diff --git a/rosbridge_server/CMakeLists.txt b/rosbridge_server/CMakeLists.txt index 00a49043d..ae3071b42 100644 --- a/rosbridge_server/CMakeLists.txt +++ b/rosbridge_server/CMakeLists.txt @@ -1,7 +1,7 @@ cmake_minimum_required(VERSION 2.8.3) project(rosbridge_server) -find_package(catkin REQUIRED COMPONENTS rosbridge_library rosapi rospy) +find_package(catkin REQUIRED) catkin_python_setup() diff --git a/rosbridge_server/package.xml b/rosbridge_server/package.xml index c003db21d..9283f6a59 100644 --- a/rosbridge_server/package.xml +++ b/rosbridge_server/package.xml @@ -15,9 +15,8 @@ Jihoon Lee catkin - rosbridge_library - rosapi - rospy + python-backports.ssl-match-hostname + python-tornado python-twisted-core rosbridge_library rosapi diff --git a/rosbridge_server/setup.py b/rosbridge_server/setup.py index 3f384df23..00ff34afd 100755 --- a/rosbridge_server/setup.py +++ b/rosbridge_server/setup.py @@ -1,96 +1,14 @@ #!/usr/bin/env python -import os -import platform -import sys -import warnings - -from distutils.core import setup, Extension +from distutils.core import setup from catkin_pkg.python_setup import generate_distutils_setup d = generate_distutils_setup( packages=[ 'rosbridge_server', - 'backports', - 'backports.ssl_match_hostname', - 'tornado', - 'tornado.platform' ], package_dir={'': 'src'} ) -# The following code is copied from -# /~https://github.com/mongodb/mongo-python-driver/blob/master/setup.py -# to support installing without the extension on platforms where -# no compiler is available. -from distutils.command.build_ext import build_ext - -class custom_build_ext(build_ext): - """Allow C extension building to fail. - - The C extension speeds up websocket masking, but is not essential. - """ - - warning_message = """ -******************************************************************** -WARNING: %s could not -be compiled. No C extensions are essential for Tornado to run, -although they do result in significant speed improvements for -websockets. -%s - -Here are some hints for popular operating systems: - -If you are seeing this message on Linux you probably need to -install GCC and/or the Python development package for your -version of Python. - -Debian and Ubuntu users should issue the following command: - - $ sudo apt-get install build-essential python-dev - -RedHat, CentOS, and Fedora users should issue the following command: - - $ sudo yum install gcc python-devel -******************************************************************** -""" - - def run(self): - try: - build_ext.run(self) - except Exception: - e = sys.exc_info()[1] - sys.stdout.write('%s\n' % str(e)) - warnings.warn(self.warning_message % ("Extension modules", - "There was an issue with " - "your platform configuration" - " - see above.")) - - def build_extension(self, ext): - name = ext.name - try: - build_ext.build_extension(self, ext) - except Exception: - e = sys.exc_info()[1] - sys.stdout.write('%s\n' % str(e)) - warnings.warn(self.warning_message % ("The %s extension " - "module" % (name,), - "The output above " - "this warning shows how " - "the compilation " - "failed.")) - -if (platform.python_implementation() == 'CPython' and - os.environ.get('TORNADO_EXTENSION') != '0'): - # This extension builds and works on pypy as well, although pypy's jit - # produces equivalent performance. - d['ext_modules'] = [ - Extension('tornado.speedups', sources=['src/tornado/speedups.c']), - ] - - if os.environ.get('TORNADO_EXTENSION') != '1': - # Unless the user has specified that the extension is mandatory, - # fall back to the pure-python implementation on any build failure. - d['cmdclass'] = {'build_ext': custom_build_ext} setup(**d) diff --git a/rosbridge_server/src/backports/__init__.py b/rosbridge_server/src/backports/__init__.py deleted file mode 100644 index 612d32836..000000000 --- a/rosbridge_server/src/backports/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -# This is a Python "namespace package" http://www.python.org/dev/peps/pep-0382/ -from pkgutil import extend_path -__path__ = extend_path(__path__, __name__) diff --git a/rosbridge_server/src/backports/ssl_match_hostname/LICENSE.txt b/rosbridge_server/src/backports/ssl_match_hostname/LICENSE.txt deleted file mode 100644 index 58058f1bb..000000000 --- a/rosbridge_server/src/backports/ssl_match_hostname/LICENSE.txt +++ /dev/null @@ -1,51 +0,0 @@ -Python License (Python-2.0) - -Python License, Version 2 (Python-2.0) - -PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 --------------------------------------------- - -1. This LICENSE AGREEMENT is between the Python Software Foundation -("PSF"), and the Individual or Organization ("Licensee") accessing and -otherwise using this software ("Python") in source or binary form and -its associated documentation. - -2. Subject to the terms and conditions of this License Agreement, PSF -hereby grants Licensee a nonexclusive, royalty-free, world-wide -license to reproduce, analyze, test, perform and/or display publicly, -prepare derivative works, distribute, and otherwise use Python -alone or in any derivative version, provided, however, that PSF's -License Agreement and PSF's notice of copyright, i.e., "Copyright (c) -2001-2013 Python Software Foundation; All Rights Reserved" are retained in -Python alone or in any derivative version prepared by Licensee. - -3. In the event Licensee prepares a derivative work that is based on -or incorporates Python or any part thereof, and wants to make -the derivative work available to others as provided herein, then -Licensee hereby agrees to include in any such work a brief summary of -the changes made to Python. - -4. PSF is making Python available to Licensee on an "AS IS" -basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR -IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND -DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS -FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT -INFRINGE ANY THIRD PARTY RIGHTS. - -5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON -FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS -A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, -OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. - -6. This License Agreement will automatically terminate upon a material -breach of its terms and conditions. - -7. Nothing in this License Agreement shall be deemed to create any -relationship of agency, partnership, or joint venture between PSF and -Licensee. This License Agreement does not grant permission to use PSF -trademarks or trade name in a trademark sense to endorse or promote -products or services of Licensee, or any third party. - -8. By copying, installing or otherwise using Python, Licensee -agrees to be bound by the terms and conditions of this License -Agreement. diff --git a/rosbridge_server/src/backports/ssl_match_hostname/README.txt b/rosbridge_server/src/backports/ssl_match_hostname/README.txt deleted file mode 100644 index de3910a64..000000000 --- a/rosbridge_server/src/backports/ssl_match_hostname/README.txt +++ /dev/null @@ -1,52 +0,0 @@ - -The ssl.match_hostname() function from Python 3.4 -================================================= - -The Secure Sockets layer is only actually *secure* -if you check the hostname in the certificate returned -by the server to which you are connecting, -and verify that it matches to hostname -that you are trying to reach. - -But the matching logic, defined in `RFC2818`_, -can be a bit tricky to implement on your own. -So the ``ssl`` package in the Standard Library of Python 3.2 -and greater now includes a ``match_hostname()`` function -for performing this check instead of requiring every application -to implement the check separately. - -This backport brings ``match_hostname()`` to users -of earlier versions of Python. -Simply make this distribution a dependency of your package, -and then use it like this:: - - from backports.ssl_match_hostname import match_hostname, CertificateError - ... - sslsock = ssl.wrap_socket(sock, ssl_version=ssl.PROTOCOL_SSLv3, - cert_reqs=ssl.CERT_REQUIRED, ca_certs=...) - try: - match_hostname(sslsock.getpeercert(), hostname) - except CertificateError, ce: - ... - -Note that the ``ssl`` module is only included in the Standard Library -for Python 2.6 and later; -users of Python 2.5 or earlier versions -will also need to install the ``ssl`` distribution -from the Python Package Index to use code like that shown above. - -Brandon Craig Rhodes is merely the packager of this distribution; -the actual code inside comes verbatim from Python 3.4. - -History -------- -* This function was introduced in python-3.2 -* It was updated for python-3.4a1 for a CVE - (backports-ssl_match_hostname-3.4.0.1) -* It was updated from RFC2818 to RFC 6125 compliance in order to fix another - security flaw for python-3.3.3 and python-3.4a5 - (backports-ssl_match_hostname-3.4.0.2) - - -.. _RFC2818: http://tools.ietf.org/html/rfc2818.html - diff --git a/rosbridge_server/src/backports/ssl_match_hostname/__init__.py b/rosbridge_server/src/backports/ssl_match_hostname/__init__.py deleted file mode 100644 index 34f248f33..000000000 --- a/rosbridge_server/src/backports/ssl_match_hostname/__init__.py +++ /dev/null @@ -1,102 +0,0 @@ -"""The match_hostname() function from Python 3.3.3, essential when using SSL.""" - -import re - -__version__ = '3.4.0.2' - -class CertificateError(ValueError): - pass - - -def _dnsname_match(dn, hostname, max_wildcards=1): - """Matching according to RFC 6125, section 6.4.3 - - http://tools.ietf.org/html/rfc6125#section-6.4.3 - """ - pats = [] - if not dn: - return False - - # Ported from python3-syntax: - # leftmost, *remainder = dn.split(r'.') - parts = dn.split(r'.') - leftmost = parts[0] - remainder = parts[1:] - - wildcards = leftmost.count('*') - if wildcards > max_wildcards: - # Issue #17980: avoid denials of service by refusing more - # than one wildcard per fragment. A survey of established - # policy among SSL implementations showed it to be a - # reasonable choice. - raise CertificateError( - "too many wildcards in certificate DNS name: " + repr(dn)) - - # speed up common case w/o wildcards - if not wildcards: - return dn.lower() == hostname.lower() - - # RFC 6125, section 6.4.3, subitem 1. - # The client SHOULD NOT attempt to match a presented identifier in which - # the wildcard character comprises a label other than the left-most label. - if leftmost == '*': - # When '*' is a fragment by itself, it matches a non-empty dotless - # fragment. - pats.append('[^.]+') - elif leftmost.startswith('xn--') or hostname.startswith('xn--'): - # RFC 6125, section 6.4.3, subitem 3. - # The client SHOULD NOT attempt to match a presented identifier - # where the wildcard character is embedded within an A-label or - # U-label of an internationalized domain name. - pats.append(re.escape(leftmost)) - else: - # Otherwise, '*' matches any dotless string, e.g. www* - pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) - - # add the remaining fragments, ignore any wildcards - for frag in remainder: - pats.append(re.escape(frag)) - - pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) - return pat.match(hostname) - - -def match_hostname(cert, hostname): - """Verify that *cert* (in decoded format as returned by - SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 - rules are followed, but IP addresses are not accepted for *hostname*. - - CertificateError is raised on failure. On success, the function - returns nothing. - """ - if not cert: - raise ValueError("empty or no certificate") - dnsnames = [] - san = cert.get('subjectAltName', ()) - for key, value in san: - if key == 'DNS': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if not dnsnames: - # The subject is only checked when there is no dNSName entry - # in subjectAltName - for sub in cert.get('subject', ()): - for key, value in sub: - # XXX according to RFC 2818, the most specific Common Name - # must be used. - if key == 'commonName': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if len(dnsnames) > 1: - raise CertificateError("hostname %r " - "doesn't match either of %s" - % (hostname, ', '.join(map(repr, dnsnames)))) - elif len(dnsnames) == 1: - raise CertificateError("hostname %r " - "doesn't match %r" - % (hostname, dnsnames[0])) - else: - raise CertificateError("no appropriate commonName or " - "subjectAltName fields were found") diff --git a/rosbridge_server/src/tornado/__init__.py b/rosbridge_server/src/tornado/__init__.py deleted file mode 100644 index ac2112fe5..000000000 --- a/rosbridge_server/src/tornado/__init__.py +++ /dev/null @@ -1,29 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""The Tornado web server and tools.""" - -from __future__ import absolute_import, division, print_function, with_statement - -# version is a human-readable version number. - -# version_info is a four-tuple for programmatic comparison. The first -# three numbers are the components of the version number. The fourth -# is zero for an official release, positive for a development branch, -# or negative for a release candidate or beta (after the base version -# number has been incremented) -version = "4.0.2" -version_info = (4, 0, 2, 0) diff --git a/rosbridge_server/src/tornado/auth.py b/rosbridge_server/src/tornado/auth.py deleted file mode 100644 index 7bd3fa1ed..000000000 --- a/rosbridge_server/src/tornado/auth.py +++ /dev/null @@ -1,1483 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""This module contains implementations of various third-party -authentication schemes. - -All the classes in this file are class mixins designed to be used with -the `tornado.web.RequestHandler` class. They are used in two ways: - -* On a login handler, use methods such as ``authenticate_redirect()``, - ``authorize_redirect()``, and ``get_authenticated_user()`` to - establish the user's identity and store authentication tokens to your - database and/or cookies. -* In non-login handlers, use methods such as ``facebook_request()`` - or ``twitter_request()`` to use the authentication tokens to make - requests to the respective services. - -They all take slightly different arguments due to the fact all these -services implement authentication and authorization slightly differently. -See the individual service classes below for complete documentation. - -Example usage for Google OpenID:: - - class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, - tornado.auth.GoogleOAuth2Mixin): - @tornado.gen.coroutine - def get(self): - if self.get_argument('code', False): - user = yield self.get_authenticated_user( - redirect_uri='http://your.site.com/auth/google', - code=self.get_argument('code')) - # Save the user with e.g. set_secure_cookie - else: - yield self.authorize_redirect( - redirect_uri='http://your.site.com/auth/google', - client_id=self.settings['google_oauth']['key'], - scope=['profile', 'email'], - response_type='code', - extra_params={'approval_prompt': 'auto'}) - -.. versionchanged:: 4.0 - All of the callback interfaces in this module are now guaranteed - to run their callback with an argument of ``None`` on error. - Previously some functions would do this while others would simply - terminate the request on their own. This change also ensures that - errors are more consistently reported through the ``Future`` interfaces. -""" - -from __future__ import absolute_import, division, print_function, with_statement - -import base64 -import binascii -import functools -import hashlib -import hmac -import time -import uuid - -from tornado.concurrent import TracebackFuture, chain_future, return_future -from tornado import gen -from tornado import httpclient -from tornado import escape -from tornado.httputil import url_concat -from tornado.log import gen_log -from tornado.stack_context import ExceptionStackContext -from tornado.util import bytes_type, u, unicode_type, ArgReplacer - -try: - import urlparse # py2 -except ImportError: - import urllib.parse as urlparse # py3 - -try: - import urllib.parse as urllib_parse # py3 -except ImportError: - import urllib as urllib_parse # py2 - -try: - long # py2 -except NameError: - long = int # py3 - - -class AuthError(Exception): - pass - - -def _auth_future_to_callback(callback, future): - try: - result = future.result() - except AuthError as e: - gen_log.warning(str(e)) - result = None - callback(result) - - -def _auth_return_future(f): - """Similar to tornado.concurrent.return_future, but uses the auth - module's legacy callback interface. - - Note that when using this decorator the ``callback`` parameter - inside the function will actually be a future. - """ - replacer = ArgReplacer(f, 'callback') - - @functools.wraps(f) - def wrapper(*args, **kwargs): - future = TracebackFuture() - callback, args, kwargs = replacer.replace(future, args, kwargs) - if callback is not None: - future.add_done_callback( - functools.partial(_auth_future_to_callback, callback)) - def handle_exception(typ, value, tb): - if future.done(): - return False - else: - future.set_exc_info((typ, value, tb)) - return True - with ExceptionStackContext(handle_exception): - f(*args, **kwargs) - return future - return wrapper - - -class OpenIdMixin(object): - """Abstract implementation of OpenID and Attribute Exchange. - - See `GoogleMixin` below for a customized example (which also - includes OAuth support). - - Class attributes: - - * ``_OPENID_ENDPOINT``: the identity provider's URI. - """ - @return_future - def authenticate_redirect(self, callback_uri=None, - ax_attrs=["name", "email", "language", "username"], - callback=None): - """Redirects to the authentication URL for this service. - - After authentication, the service will redirect back to the given - callback URI with additional parameters including ``openid.mode``. - - We request the given attributes for the authenticated user by - default (name, email, language, and username). If you don't need - all those attributes for your app, you can request fewer with - the ax_attrs keyword argument. - - .. versionchanged:: 3.1 - Returns a `.Future` and takes an optional callback. These are - not strictly necessary as this method is synchronous, - but they are supplied for consistency with - `OAuthMixin.authorize_redirect`. - """ - callback_uri = callback_uri or self.request.uri - args = self._openid_args(callback_uri, ax_attrs=ax_attrs) - self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args)) - callback() - - @_auth_return_future - def get_authenticated_user(self, callback, http_client=None): - """Fetches the authenticated user data upon redirect. - - This method should be called by the handler that receives the - redirect from the `authenticate_redirect()` method (which is - often the same as the one that calls it; in that case you would - call `get_authenticated_user` if the ``openid.mode`` parameter - is present and `authenticate_redirect` if it is not). - - The result of this method will generally be used to set a cookie. - """ - # Verify the OpenID response via direct request to the OP - args = dict((k, v[-1]) for k, v in self.request.arguments.items()) - args["openid.mode"] = u("check_authentication") - url = self._OPENID_ENDPOINT - if http_client is None: - http_client = self.get_auth_http_client() - http_client.fetch(url, functools.partial( - self._on_authentication_verified, callback), - method="POST", body=urllib_parse.urlencode(args)) - - def _openid_args(self, callback_uri, ax_attrs=[], oauth_scope=None): - url = urlparse.urljoin(self.request.full_url(), callback_uri) - args = { - "openid.ns": "http://specs.openid.net/auth/2.0", - "openid.claimed_id": - "http://specs.openid.net/auth/2.0/identifier_select", - "openid.identity": - "http://specs.openid.net/auth/2.0/identifier_select", - "openid.return_to": url, - "openid.realm": urlparse.urljoin(url, '/'), - "openid.mode": "checkid_setup", - } - if ax_attrs: - args.update({ - "openid.ns.ax": "http://openid.net/srv/ax/1.0", - "openid.ax.mode": "fetch_request", - }) - ax_attrs = set(ax_attrs) - required = [] - if "name" in ax_attrs: - ax_attrs -= set(["name", "firstname", "fullname", "lastname"]) - required += ["firstname", "fullname", "lastname"] - args.update({ - "openid.ax.type.firstname": - "http://axschema.org/namePerson/first", - "openid.ax.type.fullname": - "http://axschema.org/namePerson", - "openid.ax.type.lastname": - "http://axschema.org/namePerson/last", - }) - known_attrs = { - "email": "http://axschema.org/contact/email", - "language": "http://axschema.org/pref/language", - "username": "http://axschema.org/namePerson/friendly", - } - for name in ax_attrs: - args["openid.ax.type." + name] = known_attrs[name] - required.append(name) - args["openid.ax.required"] = ",".join(required) - if oauth_scope: - args.update({ - "openid.ns.oauth": - "http://specs.openid.net/extensions/oauth/1.0", - "openid.oauth.consumer": self.request.host.split(":")[0], - "openid.oauth.scope": oauth_scope, - }) - return args - - def _on_authentication_verified(self, future, response): - if response.error or b"is_valid:true" not in response.body: - future.set_exception(AuthError( - "Invalid OpenID response: %s" % (response.error or - response.body))) - return - - # Make sure we got back at least an email from attribute exchange - ax_ns = None - for name in self.request.arguments: - if name.startswith("openid.ns.") and \ - self.get_argument(name) == u("http://openid.net/srv/ax/1.0"): - ax_ns = name[10:] - break - - def get_ax_arg(uri): - if not ax_ns: - return u("") - prefix = "openid." + ax_ns + ".type." - ax_name = None - for name in self.request.arguments.keys(): - if self.get_argument(name) == uri and name.startswith(prefix): - part = name[len(prefix):] - ax_name = "openid." + ax_ns + ".value." + part - break - if not ax_name: - return u("") - return self.get_argument(ax_name, u("")) - - email = get_ax_arg("http://axschema.org/contact/email") - name = get_ax_arg("http://axschema.org/namePerson") - first_name = get_ax_arg("http://axschema.org/namePerson/first") - last_name = get_ax_arg("http://axschema.org/namePerson/last") - username = get_ax_arg("http://axschema.org/namePerson/friendly") - locale = get_ax_arg("http://axschema.org/pref/language").lower() - user = dict() - name_parts = [] - if first_name: - user["first_name"] = first_name - name_parts.append(first_name) - if last_name: - user["last_name"] = last_name - name_parts.append(last_name) - if name: - user["name"] = name - elif name_parts: - user["name"] = u(" ").join(name_parts) - elif email: - user["name"] = email.split("@")[0] - if email: - user["email"] = email - if locale: - user["locale"] = locale - if username: - user["username"] = username - claimed_id = self.get_argument("openid.claimed_id", None) - if claimed_id: - user["claimed_id"] = claimed_id - future.set_result(user) - - def get_auth_http_client(self): - """Returns the `.AsyncHTTPClient` instance to be used for auth requests. - - May be overridden by subclasses to use an HTTP client other than - the default. - """ - return httpclient.AsyncHTTPClient() - - -class OAuthMixin(object): - """Abstract implementation of OAuth 1.0 and 1.0a. - - See `TwitterMixin` and `FriendFeedMixin` below for example implementations, - or `GoogleMixin` for an OAuth/OpenID hybrid. - - Class attributes: - - * ``_OAUTH_AUTHORIZE_URL``: The service's OAuth authorization url. - * ``_OAUTH_ACCESS_TOKEN_URL``: The service's OAuth access token url. - * ``_OAUTH_VERSION``: May be either "1.0" or "1.0a". - * ``_OAUTH_NO_CALLBACKS``: Set this to True if the service requires - advance registration of callbacks. - - Subclasses must also override the `_oauth_get_user_future` and - `_oauth_consumer_token` methods. - """ - @return_future - def authorize_redirect(self, callback_uri=None, extra_params=None, - http_client=None, callback=None): - """Redirects the user to obtain OAuth authorization for this service. - - The ``callback_uri`` may be omitted if you have previously - registered a callback URI with the third-party service. For - some sevices (including Friendfeed), you must use a - previously-registered callback URI and cannot specify a - callback via this method. - - This method sets a cookie called ``_oauth_request_token`` which is - subsequently used (and cleared) in `get_authenticated_user` for - security purposes. - - Note that this method is asynchronous, although it calls - `.RequestHandler.finish` for you so it may not be necessary - to pass a callback or use the `.Future` it returns. However, - if this method is called from a function decorated with - `.gen.coroutine`, you must call it with ``yield`` to keep the - response from being closed prematurely. - - .. versionchanged:: 3.1 - Now returns a `.Future` and takes an optional callback, for - compatibility with `.gen.coroutine`. - """ - if callback_uri and getattr(self, "_OAUTH_NO_CALLBACKS", False): - raise Exception("This service does not support oauth_callback") - if http_client is None: - http_client = self.get_auth_http_client() - if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": - http_client.fetch( - self._oauth_request_token_url(callback_uri=callback_uri, - extra_params=extra_params), - functools.partial( - self._on_request_token, - self._OAUTH_AUTHORIZE_URL, - callback_uri, - callback)) - else: - http_client.fetch( - self._oauth_request_token_url(), - functools.partial( - self._on_request_token, self._OAUTH_AUTHORIZE_URL, - callback_uri, - callback)) - - @_auth_return_future - def get_authenticated_user(self, callback, http_client=None): - """Gets the OAuth authorized user and access token. - - This method should be called from the handler for your - OAuth callback URL to complete the registration process. We run the - callback with the authenticated user dictionary. This dictionary - will contain an ``access_key`` which can be used to make authorized - requests to this service on behalf of the user. The dictionary will - also contain other fields such as ``name``, depending on the service - used. - """ - future = callback - request_key = escape.utf8(self.get_argument("oauth_token")) - oauth_verifier = self.get_argument("oauth_verifier", None) - request_cookie = self.get_cookie("_oauth_request_token") - if not request_cookie: - future.set_exception(AuthError( - "Missing OAuth request token cookie")) - return - self.clear_cookie("_oauth_request_token") - cookie_key, cookie_secret = [base64.b64decode(escape.utf8(i)) for i in request_cookie.split("|")] - if cookie_key != request_key: - future.set_exception(AuthError( - "Request token does not match cookie")) - return - token = dict(key=cookie_key, secret=cookie_secret) - if oauth_verifier: - token["verifier"] = oauth_verifier - if http_client is None: - http_client = self.get_auth_http_client() - http_client.fetch(self._oauth_access_token_url(token), - functools.partial(self._on_access_token, callback)) - - def _oauth_request_token_url(self, callback_uri=None, extra_params=None): - consumer_token = self._oauth_consumer_token() - url = self._OAUTH_REQUEST_TOKEN_URL - args = dict( - oauth_consumer_key=escape.to_basestring(consumer_token["key"]), - oauth_signature_method="HMAC-SHA1", - oauth_timestamp=str(int(time.time())), - oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)), - oauth_version="1.0", - ) - if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": - if callback_uri == "oob": - args["oauth_callback"] = "oob" - elif callback_uri: - args["oauth_callback"] = urlparse.urljoin( - self.request.full_url(), callback_uri) - if extra_params: - args.update(extra_params) - signature = _oauth10a_signature(consumer_token, "GET", url, args) - else: - signature = _oauth_signature(consumer_token, "GET", url, args) - - args["oauth_signature"] = signature - return url + "?" + urllib_parse.urlencode(args) - - def _on_request_token(self, authorize_url, callback_uri, callback, - response): - if response.error: - raise Exception("Could not get request token: %s" % response.error) - request_token = _oauth_parse_response(response.body) - data = (base64.b64encode(escape.utf8(request_token["key"])) + b"|" + - base64.b64encode(escape.utf8(request_token["secret"]))) - self.set_cookie("_oauth_request_token", data) - args = dict(oauth_token=request_token["key"]) - if callback_uri == "oob": - self.finish(authorize_url + "?" + urllib_parse.urlencode(args)) - callback() - return - elif callback_uri: - args["oauth_callback"] = urlparse.urljoin( - self.request.full_url(), callback_uri) - self.redirect(authorize_url + "?" + urllib_parse.urlencode(args)) - callback() - - def _oauth_access_token_url(self, request_token): - consumer_token = self._oauth_consumer_token() - url = self._OAUTH_ACCESS_TOKEN_URL - args = dict( - oauth_consumer_key=escape.to_basestring(consumer_token["key"]), - oauth_token=escape.to_basestring(request_token["key"]), - oauth_signature_method="HMAC-SHA1", - oauth_timestamp=str(int(time.time())), - oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)), - oauth_version="1.0", - ) - if "verifier" in request_token: - args["oauth_verifier"] = request_token["verifier"] - - if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": - signature = _oauth10a_signature(consumer_token, "GET", url, args, - request_token) - else: - signature = _oauth_signature(consumer_token, "GET", url, args, - request_token) - - args["oauth_signature"] = signature - return url + "?" + urllib_parse.urlencode(args) - - def _on_access_token(self, future, response): - if response.error: - future.set_exception(AuthError("Could not fetch access token")) - return - - access_token = _oauth_parse_response(response.body) - self._oauth_get_user_future(access_token).add_done_callback( - functools.partial(self._on_oauth_get_user, access_token, future)) - - def _oauth_consumer_token(self): - """Subclasses must override this to return their OAuth consumer keys. - - The return value should be a `dict` with keys ``key`` and ``secret``. - """ - raise NotImplementedError() - - @return_future - def _oauth_get_user_future(self, access_token, callback): - """Subclasses must override this to get basic information about the - user. - - Should return a `.Future` whose result is a dictionary - containing information about the user, which may have been - retrieved by using ``access_token`` to make a request to the - service. - - The access token will be added to the returned dictionary to make - the result of `get_authenticated_user`. - - For backwards compatibility, the callback-based ``_oauth_get_user`` - method is also supported. - """ - # By default, call the old-style _oauth_get_user, but new code - # should override this method instead. - self._oauth_get_user(access_token, callback) - - def _oauth_get_user(self, access_token, callback): - raise NotImplementedError() - - def _on_oauth_get_user(self, access_token, future, user_future): - if user_future.exception() is not None: - future.set_exception(user_future.exception()) - return - user = user_future.result() - if not user: - future.set_exception(AuthError("Error getting user")) - return - user["access_token"] = access_token - future.set_result(user) - - def _oauth_request_parameters(self, url, access_token, parameters={}, - method="GET"): - """Returns the OAuth parameters as a dict for the given request. - - parameters should include all POST arguments and query string arguments - that will be sent with the request. - """ - consumer_token = self._oauth_consumer_token() - base_args = dict( - oauth_consumer_key=escape.to_basestring(consumer_token["key"]), - oauth_token=escape.to_basestring(access_token["key"]), - oauth_signature_method="HMAC-SHA1", - oauth_timestamp=str(int(time.time())), - oauth_nonce=escape.to_basestring(binascii.b2a_hex(uuid.uuid4().bytes)), - oauth_version="1.0", - ) - args = {} - args.update(base_args) - args.update(parameters) - if getattr(self, "_OAUTH_VERSION", "1.0a") == "1.0a": - signature = _oauth10a_signature(consumer_token, method, url, args, - access_token) - else: - signature = _oauth_signature(consumer_token, method, url, args, - access_token) - base_args["oauth_signature"] = escape.to_basestring(signature) - return base_args - - def get_auth_http_client(self): - """Returns the `.AsyncHTTPClient` instance to be used for auth requests. - - May be overridden by subclasses to use an HTTP client other than - the default. - """ - return httpclient.AsyncHTTPClient() - - -class OAuth2Mixin(object): - """Abstract implementation of OAuth 2.0. - - See `FacebookGraphMixin` below for an example implementation. - - Class attributes: - - * ``_OAUTH_AUTHORIZE_URL``: The service's authorization url. - * ``_OAUTH_ACCESS_TOKEN_URL``: The service's access token url. - """ - @return_future - def authorize_redirect(self, redirect_uri=None, client_id=None, - client_secret=None, extra_params=None, - callback=None, scope=None, response_type="code"): - """Redirects the user to obtain OAuth authorization for this service. - - Some providers require that you register a redirect URL with - your application instead of passing one via this method. You - should call this method to log the user in, and then call - ``get_authenticated_user`` in the handler for your - redirect URL to complete the authorization process. - - .. versionchanged:: 3.1 - Returns a `.Future` and takes an optional callback. These are - not strictly necessary as this method is synchronous, - but they are supplied for consistency with - `OAuthMixin.authorize_redirect`. - """ - args = { - "redirect_uri": redirect_uri, - "client_id": client_id, - "response_type": response_type - } - if extra_params: - args.update(extra_params) - if scope: - args['scope'] = ' '.join(scope) - self.redirect( - url_concat(self._OAUTH_AUTHORIZE_URL, args)) - callback() - - def _oauth_request_token_url(self, redirect_uri=None, client_id=None, - client_secret=None, code=None, - extra_params=None): - url = self._OAUTH_ACCESS_TOKEN_URL - args = dict( - redirect_uri=redirect_uri, - code=code, - client_id=client_id, - client_secret=client_secret, - ) - if extra_params: - args.update(extra_params) - return url_concat(url, args) - - -class TwitterMixin(OAuthMixin): - """Twitter OAuth authentication. - - To authenticate with Twitter, register your application with - Twitter at http://twitter.com/apps. Then copy your Consumer Key - and Consumer Secret to the application - `~tornado.web.Application.settings` ``twitter_consumer_key`` and - ``twitter_consumer_secret``. Use this mixin on the handler for the - URL you registered as your application's callback URL. - - When your application is set up, you can use this mixin like this - to authenticate the user with Twitter and get access to their stream:: - - class TwitterLoginHandler(tornado.web.RequestHandler, - tornado.auth.TwitterMixin): - @tornado.gen.coroutine - def get(self): - if self.get_argument("oauth_token", None): - user = yield self.get_authenticated_user() - # Save the user using e.g. set_secure_cookie() - else: - yield self.authorize_redirect() - - The user object returned by `~OAuthMixin.get_authenticated_user` - includes the attributes ``username``, ``name``, ``access_token``, - and all of the custom Twitter user attributes described at - https://dev.twitter.com/docs/api/1.1/get/users/show - """ - _OAUTH_REQUEST_TOKEN_URL = "https://api.twitter.com/oauth/request_token" - _OAUTH_ACCESS_TOKEN_URL = "https://api.twitter.com/oauth/access_token" - _OAUTH_AUTHORIZE_URL = "https://api.twitter.com/oauth/authorize" - _OAUTH_AUTHENTICATE_URL = "https://api.twitter.com/oauth/authenticate" - _OAUTH_NO_CALLBACKS = False - _TWITTER_BASE_URL = "https://api.twitter.com/1.1" - - @return_future - def authenticate_redirect(self, callback_uri=None, callback=None): - """Just like `~OAuthMixin.authorize_redirect`, but - auto-redirects if authorized. - - This is generally the right interface to use if you are using - Twitter for single-sign on. - - .. versionchanged:: 3.1 - Now returns a `.Future` and takes an optional callback, for - compatibility with `.gen.coroutine`. - """ - http = self.get_auth_http_client() - http.fetch(self._oauth_request_token_url(callback_uri=callback_uri), - functools.partial( - self._on_request_token, self._OAUTH_AUTHENTICATE_URL, - None, callback)) - - @_auth_return_future - def twitter_request(self, path, callback=None, access_token=None, - post_args=None, **args): - """Fetches the given API path, e.g., ``statuses/user_timeline/btaylor`` - - The path should not include the format or API version number. - (we automatically use JSON format and API version 1). - - If the request is a POST, ``post_args`` should be provided. Query - string arguments should be given as keyword arguments. - - All the Twitter methods are documented at http://dev.twitter.com/ - - Many methods require an OAuth access token which you can - obtain through `~OAuthMixin.authorize_redirect` and - `~OAuthMixin.get_authenticated_user`. The user returned through that - process includes an 'access_token' attribute that can be used - to make authenticated requests via this method. Example - usage:: - - class MainHandler(tornado.web.RequestHandler, - tornado.auth.TwitterMixin): - @tornado.web.authenticated - @tornado.gen.coroutine - def get(self): - new_entry = yield self.twitter_request( - "/statuses/update", - post_args={"status": "Testing Tornado Web Server"}, - access_token=self.current_user["access_token"]) - if not new_entry: - # Call failed; perhaps missing permission? - yield self.authorize_redirect() - return - self.finish("Posted a message!") - - """ - if path.startswith('http:') or path.startswith('https:'): - # Raw urls are useful for e.g. search which doesn't follow the - # usual pattern: http://search.twitter.com/search.json - url = path - else: - url = self._TWITTER_BASE_URL + path + ".json" - # Add the OAuth resource request signature if we have credentials - if access_token: - all_args = {} - all_args.update(args) - all_args.update(post_args or {}) - method = "POST" if post_args is not None else "GET" - oauth = self._oauth_request_parameters( - url, access_token, all_args, method=method) - args.update(oauth) - if args: - url += "?" + urllib_parse.urlencode(args) - http = self.get_auth_http_client() - http_callback = functools.partial(self._on_twitter_request, callback) - if post_args is not None: - http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args), - callback=http_callback) - else: - http.fetch(url, callback=http_callback) - - def _on_twitter_request(self, future, response): - if response.error: - future.set_exception(AuthError( - "Error response %s fetching %s" % (response.error, - response.request.url))) - return - future.set_result(escape.json_decode(response.body)) - - def _oauth_consumer_token(self): - self.require_setting("twitter_consumer_key", "Twitter OAuth") - self.require_setting("twitter_consumer_secret", "Twitter OAuth") - return dict( - key=self.settings["twitter_consumer_key"], - secret=self.settings["twitter_consumer_secret"]) - - @gen.coroutine - def _oauth_get_user_future(self, access_token): - user = yield self.twitter_request( - "/account/verify_credentials", - access_token=access_token) - if user: - user["username"] = user["screen_name"] - raise gen.Return(user) - - -class FriendFeedMixin(OAuthMixin): - """FriendFeed OAuth authentication. - - To authenticate with FriendFeed, register your application with - FriendFeed at http://friendfeed.com/api/applications. Then copy - your Consumer Key and Consumer Secret to the application - `~tornado.web.Application.settings` ``friendfeed_consumer_key`` - and ``friendfeed_consumer_secret``. Use this mixin on the handler - for the URL you registered as your application's Callback URL. - - When your application is set up, you can use this mixin like this - to authenticate the user with FriendFeed and get access to their feed:: - - class FriendFeedLoginHandler(tornado.web.RequestHandler, - tornado.auth.FriendFeedMixin): - @tornado.gen.coroutine - def get(self): - if self.get_argument("oauth_token", None): - user = yield self.get_authenticated_user() - # Save the user using e.g. set_secure_cookie() - else: - yield self.authorize_redirect() - - The user object returned by `~OAuthMixin.get_authenticated_user()` includes the - attributes ``username``, ``name``, and ``description`` in addition to - ``access_token``. You should save the access token with the user; - it is required to make requests on behalf of the user later with - `friendfeed_request()`. - """ - _OAUTH_VERSION = "1.0" - _OAUTH_REQUEST_TOKEN_URL = "https://friendfeed.com/account/oauth/request_token" - _OAUTH_ACCESS_TOKEN_URL = "https://friendfeed.com/account/oauth/access_token" - _OAUTH_AUTHORIZE_URL = "https://friendfeed.com/account/oauth/authorize" - _OAUTH_NO_CALLBACKS = True - _OAUTH_VERSION = "1.0" - - @_auth_return_future - def friendfeed_request(self, path, callback, access_token=None, - post_args=None, **args): - """Fetches the given relative API path, e.g., "/bret/friends" - - If the request is a POST, ``post_args`` should be provided. Query - string arguments should be given as keyword arguments. - - All the FriendFeed methods are documented at - http://friendfeed.com/api/documentation. - - Many methods require an OAuth access token which you can - obtain through `~OAuthMixin.authorize_redirect` and - `~OAuthMixin.get_authenticated_user`. The user returned - through that process includes an ``access_token`` attribute that - can be used to make authenticated requests via this - method. - - Example usage:: - - class MainHandler(tornado.web.RequestHandler, - tornado.auth.FriendFeedMixin): - @tornado.web.authenticated - @tornado.gen.coroutine - def get(self): - new_entry = yield self.friendfeed_request( - "/entry", - post_args={"body": "Testing Tornado Web Server"}, - access_token=self.current_user["access_token"]) - - if not new_entry: - # Call failed; perhaps missing permission? - yield self.authorize_redirect() - return - self.finish("Posted a message!") - - """ - # Add the OAuth resource request signature if we have credentials - url = "http://friendfeed-api.com/v2" + path - if access_token: - all_args = {} - all_args.update(args) - all_args.update(post_args or {}) - method = "POST" if post_args is not None else "GET" - oauth = self._oauth_request_parameters( - url, access_token, all_args, method=method) - args.update(oauth) - if args: - url += "?" + urllib_parse.urlencode(args) - callback = functools.partial(self._on_friendfeed_request, callback) - http = self.get_auth_http_client() - if post_args is not None: - http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args), - callback=callback) - else: - http.fetch(url, callback=callback) - - def _on_friendfeed_request(self, future, response): - if response.error: - future.set_exception(AuthError( - "Error response %s fetching %s" % (response.error, - response.request.url))) - return - future.set_result(escape.json_decode(response.body)) - - def _oauth_consumer_token(self): - self.require_setting("friendfeed_consumer_key", "FriendFeed OAuth") - self.require_setting("friendfeed_consumer_secret", "FriendFeed OAuth") - return dict( - key=self.settings["friendfeed_consumer_key"], - secret=self.settings["friendfeed_consumer_secret"]) - - @gen.coroutine - def _oauth_get_user_future(self, access_token, callback): - user = yield self.friendfeed_request( - "/feedinfo/" + access_token["username"], - include="id,name,description", access_token=access_token) - if user: - user["username"] = user["id"] - callback(user) - - def _parse_user_response(self, callback, user): - if user: - user["username"] = user["id"] - callback(user) - - -class GoogleMixin(OpenIdMixin, OAuthMixin): - """Google Open ID / OAuth authentication. - - .. deprecated:: 4.0 - New applications should use `GoogleOAuth2Mixin` - below instead of this class. As of May 19, 2014, Google has stopped - supporting registration-free authentication. - - No application registration is necessary to use Google for - authentication or to access Google resources on behalf of a user. - - Google implements both OpenID and OAuth in a hybrid mode. If you - just need the user's identity, use - `~OpenIdMixin.authenticate_redirect`. If you need to make - requests to Google on behalf of the user, use - `authorize_redirect`. On return, parse the response with - `~OpenIdMixin.get_authenticated_user`. We send a dict containing - the values for the user, including ``email``, ``name``, and - ``locale``. - - Example usage:: - - class GoogleLoginHandler(tornado.web.RequestHandler, - tornado.auth.GoogleMixin): - @tornado.gen.coroutine - def get(self): - if self.get_argument("openid.mode", None): - user = yield self.get_authenticated_user() - # Save the user with e.g. set_secure_cookie() - else: - yield self.authenticate_redirect() - """ - _OPENID_ENDPOINT = "https://www.google.com/accounts/o8/ud" - _OAUTH_ACCESS_TOKEN_URL = "https://www.google.com/accounts/OAuthGetAccessToken" - - @return_future - def authorize_redirect(self, oauth_scope, callback_uri=None, - ax_attrs=["name", "email", "language", "username"], - callback=None): - """Authenticates and authorizes for the given Google resource. - - Some of the available resources which can be used in the ``oauth_scope`` - argument are: - - * Gmail Contacts - http://www.google.com/m8/feeds/ - * Calendar - http://www.google.com/calendar/feeds/ - * Finance - http://finance.google.com/finance/feeds/ - - You can authorize multiple resources by separating the resource - URLs with a space. - - .. versionchanged:: 3.1 - Returns a `.Future` and takes an optional callback. These are - not strictly necessary as this method is synchronous, - but they are supplied for consistency with - `OAuthMixin.authorize_redirect`. - """ - callback_uri = callback_uri or self.request.uri - args = self._openid_args(callback_uri, ax_attrs=ax_attrs, - oauth_scope=oauth_scope) - self.redirect(self._OPENID_ENDPOINT + "?" + urllib_parse.urlencode(args)) - callback() - - @_auth_return_future - def get_authenticated_user(self, callback): - """Fetches the authenticated user data upon redirect.""" - # Look to see if we are doing combined OpenID/OAuth - oauth_ns = "" - for name, values in self.request.arguments.items(): - if name.startswith("openid.ns.") and \ - values[-1] == b"http://specs.openid.net/extensions/oauth/1.0": - oauth_ns = name[10:] - break - token = self.get_argument("openid." + oauth_ns + ".request_token", "") - if token: - http = self.get_auth_http_client() - token = dict(key=token, secret="") - http.fetch(self._oauth_access_token_url(token), - functools.partial(self._on_access_token, callback)) - else: - chain_future(OpenIdMixin.get_authenticated_user(self), - callback) - - def _oauth_consumer_token(self): - self.require_setting("google_consumer_key", "Google OAuth") - self.require_setting("google_consumer_secret", "Google OAuth") - return dict( - key=self.settings["google_consumer_key"], - secret=self.settings["google_consumer_secret"]) - - def _oauth_get_user_future(self, access_token): - return OpenIdMixin.get_authenticated_user(self) - - -class GoogleOAuth2Mixin(OAuth2Mixin): - """Google authentication using OAuth2. - - In order to use, register your application with Google and copy the - relevant parameters to your application settings. - - * Go to the Google Dev Console at http://console.developers.google.com - * Select a project, or create a new one. - * In the sidebar on the left, select APIs & Auth. - * In the list of APIs, find the Google+ API service and set it to ON. - * In the sidebar on the left, select Credentials. - * In the OAuth section of the page, select Create New Client ID. - * Set the Redirect URI to point to your auth handler - * Copy the "Client secret" and "Client ID" to the application settings as - {"google_oauth": {"key": CLIENT_ID, "secret": CLIENT_SECRET}} - - .. versionadded:: 3.2 - """ - _OAUTH_AUTHORIZE_URL = "https://accounts.google.com/o/oauth2/auth" - _OAUTH_ACCESS_TOKEN_URL = "https://accounts.google.com/o/oauth2/token" - _OAUTH_NO_CALLBACKS = False - _OAUTH_SETTINGS_KEY = 'google_oauth' - - @_auth_return_future - def get_authenticated_user(self, redirect_uri, code, callback): - """Handles the login for the Google user, returning a user object. - - Example usage:: - - class GoogleOAuth2LoginHandler(tornado.web.RequestHandler, - tornado.auth.GoogleOAuth2Mixin): - @tornado.gen.coroutine - def get(self): - if self.get_argument('code', False): - user = yield self.get_authenticated_user( - redirect_uri='http://your.site.com/auth/google', - code=self.get_argument('code')) - # Save the user with e.g. set_secure_cookie - else: - yield self.authorize_redirect( - redirect_uri='http://your.site.com/auth/google', - client_id=self.settings['google_oauth']['key'], - scope=['profile', 'email'], - response_type='code', - extra_params={'approval_prompt': 'auto'}) - """ - http = self.get_auth_http_client() - body = urllib_parse.urlencode({ - "redirect_uri": redirect_uri, - "code": code, - "client_id": self.settings[self._OAUTH_SETTINGS_KEY]['key'], - "client_secret": self.settings[self._OAUTH_SETTINGS_KEY]['secret'], - "grant_type": "authorization_code", - }) - - http.fetch(self._OAUTH_ACCESS_TOKEN_URL, - functools.partial(self._on_access_token, callback), - method="POST", headers={'Content-Type': 'application/x-www-form-urlencoded'}, body=body) - - def _on_access_token(self, future, response): - """Callback function for the exchange to the access token.""" - if response.error: - future.set_exception(AuthError('Google auth error: %s' % str(response))) - return - - args = escape.json_decode(response.body) - future.set_result(args) - - def get_auth_http_client(self): - """Returns the `.AsyncHTTPClient` instance to be used for auth requests. - - May be overridden by subclasses to use an HTTP client other than - the default. - """ - return httpclient.AsyncHTTPClient() - - -class FacebookMixin(object): - """Facebook Connect authentication. - - .. deprecated:: 1.1 - New applications should use `FacebookGraphMixin` - below instead of this class. This class does not support the - Future-based interface seen on other classes in this module. - - To authenticate with Facebook, register your application with - Facebook at http://www.facebook.com/developers/apps.php. Then - copy your API Key and Application Secret to the application settings - ``facebook_api_key`` and ``facebook_secret``. - - When your application is set up, you can use this mixin like this - to authenticate the user with Facebook:: - - class FacebookHandler(tornado.web.RequestHandler, - tornado.auth.FacebookMixin): - @tornado.web.asynchronous - def get(self): - if self.get_argument("session", None): - self.get_authenticated_user(self._on_auth) - return - yield self.authenticate_redirect() - - def _on_auth(self, user): - if not user: - raise tornado.web.HTTPError(500, "Facebook auth failed") - # Save the user using, e.g., set_secure_cookie() - - The user object returned by `get_authenticated_user` includes the - attributes ``facebook_uid`` and ``name`` in addition to session attributes - like ``session_key``. You should save the session key with the user; it is - required to make requests on behalf of the user later with - `facebook_request`. - """ - @return_future - def authenticate_redirect(self, callback_uri=None, cancel_uri=None, - extended_permissions=None, callback=None): - """Authenticates/installs this app for the current user. - - .. versionchanged:: 3.1 - Returns a `.Future` and takes an optional callback. These are - not strictly necessary as this method is synchronous, - but they are supplied for consistency with - `OAuthMixin.authorize_redirect`. - """ - self.require_setting("facebook_api_key", "Facebook Connect") - callback_uri = callback_uri or self.request.uri - args = { - "api_key": self.settings["facebook_api_key"], - "v": "1.0", - "fbconnect": "true", - "display": "page", - "next": urlparse.urljoin(self.request.full_url(), callback_uri), - "return_session": "true", - } - if cancel_uri: - args["cancel_url"] = urlparse.urljoin( - self.request.full_url(), cancel_uri) - if extended_permissions: - if isinstance(extended_permissions, (unicode_type, bytes_type)): - extended_permissions = [extended_permissions] - args["req_perms"] = ",".join(extended_permissions) - self.redirect("http://www.facebook.com/login.php?" + - urllib_parse.urlencode(args)) - callback() - - def authorize_redirect(self, extended_permissions, callback_uri=None, - cancel_uri=None, callback=None): - """Redirects to an authorization request for the given FB resource. - - The available resource names are listed at - http://wiki.developers.facebook.com/index.php/Extended_permission. - The most common resource types include: - - * publish_stream - * read_stream - * email - * sms - - extended_permissions can be a single permission name or a list of - names. To get the session secret and session key, call - get_authenticated_user() just as you would with - authenticate_redirect(). - - .. versionchanged:: 3.1 - Returns a `.Future` and takes an optional callback. These are - not strictly necessary as this method is synchronous, - but they are supplied for consistency with - `OAuthMixin.authorize_redirect`. - """ - return self.authenticate_redirect(callback_uri, cancel_uri, - extended_permissions, - callback=callback) - - def get_authenticated_user(self, callback): - """Fetches the authenticated Facebook user. - - The authenticated user includes the special Facebook attributes - 'session_key' and 'facebook_uid' in addition to the standard - user attributes like 'name'. - """ - self.require_setting("facebook_api_key", "Facebook Connect") - session = escape.json_decode(self.get_argument("session")) - self.facebook_request( - method="facebook.users.getInfo", - callback=functools.partial( - self._on_get_user_info, callback, session), - session_key=session["session_key"], - uids=session["uid"], - fields="uid,first_name,last_name,name,locale,pic_square," - "profile_url,username") - - def facebook_request(self, method, callback, **args): - """Makes a Facebook API REST request. - - We automatically include the Facebook API key and signature, but - it is the callers responsibility to include 'session_key' and any - other required arguments to the method. - - The available Facebook methods are documented here: - http://wiki.developers.facebook.com/index.php/API - - Here is an example for the stream.get() method:: - - class MainHandler(tornado.web.RequestHandler, - tornado.auth.FacebookMixin): - @tornado.web.authenticated - @tornado.web.asynchronous - def get(self): - self.facebook_request( - method="stream.get", - callback=self._on_stream, - session_key=self.current_user["session_key"]) - - def _on_stream(self, stream): - if stream is None: - # Not authorized to read the stream yet? - self.redirect(self.authorize_redirect("read_stream")) - return - self.render("stream.html", stream=stream) - - """ - self.require_setting("facebook_api_key", "Facebook Connect") - self.require_setting("facebook_secret", "Facebook Connect") - if not method.startswith("facebook."): - method = "facebook." + method - args["api_key"] = self.settings["facebook_api_key"] - args["v"] = "1.0" - args["method"] = method - args["call_id"] = str(long(time.time() * 1e6)) - args["format"] = "json" - args["sig"] = self._signature(args) - url = "http://api.facebook.com/restserver.php?" + \ - urllib_parse.urlencode(args) - http = self.get_auth_http_client() - http.fetch(url, callback=functools.partial( - self._parse_response, callback)) - - def _on_get_user_info(self, callback, session, users): - if users is None: - callback(None) - return - callback({ - "name": users[0]["name"], - "first_name": users[0]["first_name"], - "last_name": users[0]["last_name"], - "uid": users[0]["uid"], - "locale": users[0]["locale"], - "pic_square": users[0]["pic_square"], - "profile_url": users[0]["profile_url"], - "username": users[0].get("username"), - "session_key": session["session_key"], - "session_expires": session.get("expires"), - }) - - def _parse_response(self, callback, response): - if response.error: - gen_log.warning("HTTP error from Facebook: %s", response.error) - callback(None) - return - try: - json = escape.json_decode(response.body) - except Exception: - gen_log.warning("Invalid JSON from Facebook: %r", response.body) - callback(None) - return - if isinstance(json, dict) and json.get("error_code"): - gen_log.warning("Facebook error: %d: %r", json["error_code"], - json.get("error_msg")) - callback(None) - return - callback(json) - - def _signature(self, args): - parts = ["%s=%s" % (n, args[n]) for n in sorted(args.keys())] - body = "".join(parts) + self.settings["facebook_secret"] - if isinstance(body, unicode_type): - body = body.encode("utf-8") - return hashlib.md5(body).hexdigest() - - def get_auth_http_client(self): - """Returns the `.AsyncHTTPClient` instance to be used for auth requests. - - May be overridden by subclasses to use an HTTP client other than - the default. - """ - return httpclient.AsyncHTTPClient() - - -class FacebookGraphMixin(OAuth2Mixin): - """Facebook authentication using the new Graph API and OAuth2.""" - _OAUTH_ACCESS_TOKEN_URL = "https://graph.facebook.com/oauth/access_token?" - _OAUTH_AUTHORIZE_URL = "https://www.facebook.com/dialog/oauth?" - _OAUTH_NO_CALLBACKS = False - _FACEBOOK_BASE_URL = "https://graph.facebook.com" - - @_auth_return_future - def get_authenticated_user(self, redirect_uri, client_id, client_secret, - code, callback, extra_fields=None): - """Handles the login for the Facebook user, returning a user object. - - Example usage:: - - class FacebookGraphLoginHandler(LoginHandler, tornado.auth.FacebookGraphMixin): - @tornado.gen.coroutine - def get(self): - if self.get_argument("code", False): - user = yield self.get_authenticated_user( - redirect_uri='/auth/facebookgraph/', - client_id=self.settings["facebook_api_key"], - client_secret=self.settings["facebook_secret"], - code=self.get_argument("code")) - # Save the user with e.g. set_secure_cookie - else: - yield self.authorize_redirect( - redirect_uri='/auth/facebookgraph/', - client_id=self.settings["facebook_api_key"], - extra_params={"scope": "read_stream,offline_access"}) - """ - http = self.get_auth_http_client() - args = { - "redirect_uri": redirect_uri, - "code": code, - "client_id": client_id, - "client_secret": client_secret, - } - - fields = set(['id', 'name', 'first_name', 'last_name', - 'locale', 'picture', 'link']) - if extra_fields: - fields.update(extra_fields) - - http.fetch(self._oauth_request_token_url(**args), - functools.partial(self._on_access_token, redirect_uri, client_id, - client_secret, callback, fields)) - - def _on_access_token(self, redirect_uri, client_id, client_secret, - future, fields, response): - if response.error: - future.set_exception(AuthError('Facebook auth error: %s' % str(response))) - return - - args = escape.parse_qs_bytes(escape.native_str(response.body)) - session = { - "access_token": args["access_token"][-1], - "expires": args.get("expires") - } - - self.facebook_request( - path="/me", - callback=functools.partial( - self._on_get_user_info, future, session, fields), - access_token=session["access_token"], - fields=",".join(fields) - ) - - def _on_get_user_info(self, future, session, fields, user): - if user is None: - future.set_result(None) - return - - fieldmap = {} - for field in fields: - fieldmap[field] = user.get(field) - - fieldmap.update({"access_token": session["access_token"], "session_expires": session.get("expires")}) - future.set_result(fieldmap) - - @_auth_return_future - def facebook_request(self, path, callback, access_token=None, - post_args=None, **args): - """Fetches the given relative API path, e.g., "/btaylor/picture" - - If the request is a POST, ``post_args`` should be provided. Query - string arguments should be given as keyword arguments. - - An introduction to the Facebook Graph API can be found at - http://developers.facebook.com/docs/api - - Many methods require an OAuth access token which you can - obtain through `~OAuth2Mixin.authorize_redirect` and - `get_authenticated_user`. The user returned through that - process includes an ``access_token`` attribute that can be - used to make authenticated requests via this method. - - Example usage:: - - class MainHandler(tornado.web.RequestHandler, - tornado.auth.FacebookGraphMixin): - @tornado.web.authenticated - @tornado.gen.coroutine - def get(self): - new_entry = yield self.facebook_request( - "/me/feed", - post_args={"message": "I am posting from my Tornado application!"}, - access_token=self.current_user["access_token"]) - - if not new_entry: - # Call failed; perhaps missing permission? - yield self.authorize_redirect() - return - self.finish("Posted a message!") - - The given path is relative to ``self._FACEBOOK_BASE_URL``, - by default "https://graph.facebook.com". - - .. versionchanged:: 3.1 - Added the ability to override ``self._FACEBOOK_BASE_URL``. - """ - url = self._FACEBOOK_BASE_URL + path - all_args = {} - if access_token: - all_args["access_token"] = access_token - all_args.update(args) - - if all_args: - url += "?" + urllib_parse.urlencode(all_args) - callback = functools.partial(self._on_facebook_request, callback) - http = self.get_auth_http_client() - if post_args is not None: - http.fetch(url, method="POST", body=urllib_parse.urlencode(post_args), - callback=callback) - else: - http.fetch(url, callback=callback) - - def _on_facebook_request(self, future, response): - if response.error: - future.set_exception(AuthError("Error response %s fetching %s" % - (response.error, response.request.url))) - return - - future.set_result(escape.json_decode(response.body)) - - def get_auth_http_client(self): - """Returns the `.AsyncHTTPClient` instance to be used for auth requests. - - May be overridden by subclasses to use an HTTP client other than - the default. - """ - return httpclient.AsyncHTTPClient() - - -def _oauth_signature(consumer_token, method, url, parameters={}, token=None): - """Calculates the HMAC-SHA1 OAuth signature for the given request. - - See http://oauth.net/core/1.0/#signing_process - """ - parts = urlparse.urlparse(url) - scheme, netloc, path = parts[:3] - normalized_url = scheme.lower() + "://" + netloc.lower() + path - - base_elems = [] - base_elems.append(method.upper()) - base_elems.append(normalized_url) - base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v))) - for k, v in sorted(parameters.items()))) - base_string = "&".join(_oauth_escape(e) for e in base_elems) - - key_elems = [escape.utf8(consumer_token["secret"])] - key_elems.append(escape.utf8(token["secret"] if token else "")) - key = b"&".join(key_elems) - - hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1) - return binascii.b2a_base64(hash.digest())[:-1] - - -def _oauth10a_signature(consumer_token, method, url, parameters={}, token=None): - """Calculates the HMAC-SHA1 OAuth 1.0a signature for the given request. - - See http://oauth.net/core/1.0a/#signing_process - """ - parts = urlparse.urlparse(url) - scheme, netloc, path = parts[:3] - normalized_url = scheme.lower() + "://" + netloc.lower() + path - - base_elems = [] - base_elems.append(method.upper()) - base_elems.append(normalized_url) - base_elems.append("&".join("%s=%s" % (k, _oauth_escape(str(v))) - for k, v in sorted(parameters.items()))) - - base_string = "&".join(_oauth_escape(e) for e in base_elems) - key_elems = [escape.utf8(urllib_parse.quote(consumer_token["secret"], safe='~'))] - key_elems.append(escape.utf8(urllib_parse.quote(token["secret"], safe='~') if token else "")) - key = b"&".join(key_elems) - - hash = hmac.new(key, escape.utf8(base_string), hashlib.sha1) - return binascii.b2a_base64(hash.digest())[:-1] - - -def _oauth_escape(val): - if isinstance(val, unicode_type): - val = val.encode("utf-8") - return urllib_parse.quote(val, safe="~") - - -def _oauth_parse_response(body): - # I can't find an officially-defined encoding for oauth responses and - # have never seen anyone use non-ascii. Leave the response in a byte - # string for python 2, and use utf8 on python 3. - body = escape.native_str(body) - p = urlparse.parse_qs(body, keep_blank_values=False) - token = dict(key=p["oauth_token"][0], secret=p["oauth_token_secret"][0]) - - # Add the extra parameters the Provider included to the token - special = ("oauth_token", "oauth_token_secret") - token.update((k, p[k][0]) for k in p if k not in special) - return token diff --git a/rosbridge_server/src/tornado/autoreload.py b/rosbridge_server/src/tornado/autoreload.py deleted file mode 100644 index 3982579ad..000000000 --- a/rosbridge_server/src/tornado/autoreload.py +++ /dev/null @@ -1,321 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Automatically restart the server when a source file is modified. - -Most applications should not access this module directly. Instead, -pass the keyword argument ``autoreload=True`` to the -`tornado.web.Application` constructor (or ``debug=True``, which -enables this setting and several others). This will enable autoreload -mode as well as checking for changes to templates and static -resources. Note that restarting is a destructive operation and any -requests in progress will be aborted when the process restarts. (If -you want to disable autoreload while using other debug-mode features, -pass both ``debug=True`` and ``autoreload=False``). - -This module can also be used as a command-line wrapper around scripts -such as unit test runners. See the `main` method for details. - -The command-line wrapper and Application debug modes can be used together. -This combination is encouraged as the wrapper catches syntax errors and -other import-time failures, while debug mode catches changes once -the server has started. - -This module depends on `.IOLoop`, so it will not work in WSGI applications -and Google App Engine. It also will not work correctly when `.HTTPServer`'s -multi-process mode is used. - -Reloading loses any Python interpreter command-line arguments (e.g. ``-u``) -because it re-executes Python using ``sys.executable`` and ``sys.argv``. -Additionally, modifying these variables will cause reloading to behave -incorrectly. - -""" - -from __future__ import absolute_import, division, print_function, with_statement - -import os -import sys - -# sys.path handling -# ----------------- -# -# If a module is run with "python -m", the current directory (i.e. "") -# is automatically prepended to sys.path, but not if it is run as -# "path/to/file.py". The processing for "-m" rewrites the former to -# the latter, so subsequent executions won't have the same path as the -# original. -# -# Conversely, when run as path/to/file.py, the directory containing -# file.py gets added to the path, which can cause confusion as imports -# may become relative in spite of the future import. -# -# We address the former problem by setting the $PYTHONPATH environment -# variable before re-execution so the new process will see the correct -# path. We attempt to address the latter problem when tornado.autoreload -# is run as __main__, although we can't fix the general case because -# we cannot reliably reconstruct the original command line -# (http://bugs.python.org/issue14208). - -if __name__ == "__main__": - # This sys.path manipulation must come before our imports (as much - # as possible - if we introduced a tornado.sys or tornado.os - # module we'd be in trouble), or else our imports would become - # relative again despite the future import. - # - # There is a separate __main__ block at the end of the file to call main(). - if sys.path[0] == os.path.dirname(__file__): - del sys.path[0] - -import functools -import logging -import os -import pkgutil -import sys -import traceback -import types -import subprocess -import weakref - -from tornado import ioloop -from tornado.log import gen_log -from tornado import process -from tornado.util import exec_in - -try: - import signal -except ImportError: - signal = None - - -_watched_files = set() -_reload_hooks = [] -_reload_attempted = False -_io_loops = weakref.WeakKeyDictionary() - - -def start(io_loop=None, check_time=500): - """Begins watching source files for changes using the given `.IOLoop`. """ - io_loop = io_loop or ioloop.IOLoop.current() - if io_loop in _io_loops: - return - _io_loops[io_loop] = True - if len(_io_loops) > 1: - gen_log.warning("tornado.autoreload started more than once in the same process") - add_reload_hook(functools.partial(io_loop.close, all_fds=True)) - modify_times = {} - callback = functools.partial(_reload_on_update, modify_times) - scheduler = ioloop.PeriodicCallback(callback, check_time, io_loop=io_loop) - scheduler.start() - - -def wait(): - """Wait for a watched file to change, then restart the process. - - Intended to be used at the end of scripts like unit test runners, - to run the tests again after any source file changes (but see also - the command-line interface in `main`) - """ - io_loop = ioloop.IOLoop() - start(io_loop) - io_loop.start() - - -def watch(filename): - """Add a file to the watch list. - - All imported modules are watched by default. - """ - _watched_files.add(filename) - - -def add_reload_hook(fn): - """Add a function to be called before reloading the process. - - Note that for open file and socket handles it is generally - preferable to set the ``FD_CLOEXEC`` flag (using `fcntl` or - ``tornado.platform.auto.set_close_exec``) instead - of using a reload hook to close them. - """ - _reload_hooks.append(fn) - - -def _reload_on_update(modify_times): - if _reload_attempted: - # We already tried to reload and it didn't work, so don't try again. - return - if process.task_id() is not None: - # We're in a child process created by fork_processes. If child - # processes restarted themselves, they'd all restart and then - # all call fork_processes again. - return - for module in sys.modules.values(): - # Some modules play games with sys.modules (e.g. email/__init__.py - # in the standard library), and occasionally this can cause strange - # failures in getattr. Just ignore anything that's not an ordinary - # module. - if not isinstance(module, types.ModuleType): - continue - path = getattr(module, "__file__", None) - if not path: - continue - if path.endswith(".pyc") or path.endswith(".pyo"): - path = path[:-1] - _check_file(modify_times, path) - for path in _watched_files: - _check_file(modify_times, path) - - -def _check_file(modify_times, path): - try: - modified = os.stat(path).st_mtime - except Exception: - return - if path not in modify_times: - modify_times[path] = modified - return - if modify_times[path] != modified: - gen_log.info("%s modified; restarting server", path) - _reload() - - -def _reload(): - global _reload_attempted - _reload_attempted = True - for fn in _reload_hooks: - fn() - if hasattr(signal, "setitimer"): - # Clear the alarm signal set by - # ioloop.set_blocking_log_threshold so it doesn't fire - # after the exec. - signal.setitimer(signal.ITIMER_REAL, 0, 0) - # sys.path fixes: see comments at top of file. If sys.path[0] is an empty - # string, we were (probably) invoked with -m and the effective path - # is about to change on re-exec. Add the current directory to $PYTHONPATH - # to ensure that the new process sees the same path we did. - path_prefix = '.' + os.pathsep - if (sys.path[0] == '' and - not os.environ.get("PYTHONPATH", "").startswith(path_prefix)): - os.environ["PYTHONPATH"] = (path_prefix + - os.environ.get("PYTHONPATH", "")) - if sys.platform == 'win32': - # os.execv is broken on Windows and can't properly parse command line - # arguments and executable name if they contain whitespaces. subprocess - # fixes that behavior. - subprocess.Popen([sys.executable] + sys.argv) - sys.exit(0) - else: - try: - os.execv(sys.executable, [sys.executable] + sys.argv) - except OSError: - # Mac OS X versions prior to 10.6 do not support execv in - # a process that contains multiple threads. Instead of - # re-executing in the current process, start a new one - # and cause the current process to exit. This isn't - # ideal since the new process is detached from the parent - # terminal and thus cannot easily be killed with ctrl-C, - # but it's better than not being able to autoreload at - # all. - # Unfortunately the errno returned in this case does not - # appear to be consistent, so we can't easily check for - # this error specifically. - os.spawnv(os.P_NOWAIT, sys.executable, - [sys.executable] + sys.argv) - sys.exit(0) - -_USAGE = """\ -Usage: - python -m tornado.autoreload -m module.to.run [args...] - python -m tornado.autoreload path/to/script.py [args...] -""" - - -def main(): - """Command-line wrapper to re-run a script whenever its source changes. - - Scripts may be specified by filename or module name:: - - python -m tornado.autoreload -m tornado.test.runtests - python -m tornado.autoreload tornado/test/runtests.py - - Running a script with this wrapper is similar to calling - `tornado.autoreload.wait` at the end of the script, but this wrapper - can catch import-time problems like syntax errors that would otherwise - prevent the script from reaching its call to `wait`. - """ - original_argv = sys.argv - sys.argv = sys.argv[:] - if len(sys.argv) >= 3 and sys.argv[1] == "-m": - mode = "module" - module = sys.argv[2] - del sys.argv[1:3] - elif len(sys.argv) >= 2: - mode = "script" - script = sys.argv[1] - sys.argv = sys.argv[1:] - else: - print(_USAGE, file=sys.stderr) - sys.exit(1) - - try: - if mode == "module": - import runpy - runpy.run_module(module, run_name="__main__", alter_sys=True) - elif mode == "script": - with open(script) as f: - global __file__ - __file__ = script - # Use globals as our "locals" dictionary so that - # something that tries to import __main__ (e.g. the unittest - # module) will see the right things. - exec_in(f.read(), globals(), globals()) - except SystemExit as e: - logging.basicConfig() - gen_log.info("Script exited with status %s", e.code) - except Exception as e: - logging.basicConfig() - gen_log.warning("Script exited with uncaught exception", exc_info=True) - # If an exception occurred at import time, the file with the error - # never made it into sys.modules and so we won't know to watch it. - # Just to make sure we've covered everything, walk the stack trace - # from the exception and watch every file. - for (filename, lineno, name, line) in traceback.extract_tb(sys.exc_info()[2]): - watch(filename) - if isinstance(e, SyntaxError): - # SyntaxErrors are special: their innermost stack frame is fake - # so extract_tb won't see it and we have to get the filename - # from the exception object. - watch(e.filename) - else: - logging.basicConfig() - gen_log.info("Script exited normally") - # restore sys.argv so subsequent executions will include autoreload - sys.argv = original_argv - - if mode == 'module': - # runpy did a fake import of the module as __main__, but now it's - # no longer in sys.modules. Figure out where it is and watch it. - loader = pkgutil.get_loader(module) - if loader is not None: - watch(loader.get_filename()) - - wait() - - -if __name__ == "__main__": - # See also the other __main__ block at the top of the file, which modifies - # sys.path before our imports - main() diff --git a/rosbridge_server/src/tornado/concurrent.py b/rosbridge_server/src/tornado/concurrent.py deleted file mode 100644 index 702aa352b..000000000 --- a/rosbridge_server/src/tornado/concurrent.py +++ /dev/null @@ -1,329 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2012 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Utilities for working with threads and ``Futures``. - -``Futures`` are a pattern for concurrent programming introduced in -Python 3.2 in the `concurrent.futures` package (this package has also -been backported to older versions of Python and can be installed with -``pip install futures``). Tornado will use `concurrent.futures.Future` if -it is available; otherwise it will use a compatible class defined in this -module. -""" -from __future__ import absolute_import, division, print_function, with_statement - -import functools -import sys - -from tornado.stack_context import ExceptionStackContext, wrap -from tornado.util import raise_exc_info, ArgReplacer - -try: - from concurrent import futures -except ImportError: - futures = None - - -class ReturnValueIgnoredError(Exception): - pass - - -class Future(object): - """Placeholder for an asynchronous result. - - A ``Future`` encapsulates the result of an asynchronous - operation. In synchronous applications ``Futures`` are used - to wait for the result from a thread or process pool; in - Tornado they are normally used with `.IOLoop.add_future` or by - yielding them in a `.gen.coroutine`. - - `tornado.concurrent.Future` is similar to - `concurrent.futures.Future`, but not thread-safe (and therefore - faster for use with single-threaded event loops). - - In addition to ``exception`` and ``set_exception``, methods ``exc_info`` - and ``set_exc_info`` are supported to capture tracebacks in Python 2. - The traceback is automatically available in Python 3, but in the - Python 2 futures backport this information is discarded. - This functionality was previously available in a separate class - ``TracebackFuture``, which is now a deprecated alias for this class. - - .. versionchanged:: 4.0 - `tornado.concurrent.Future` is always a thread-unsafe ``Future`` - with support for the ``exc_info`` methods. Previously it would - be an alias for the thread-safe `concurrent.futures.Future` - if that package was available and fall back to the thread-unsafe - implementation if it was not. - - """ - def __init__(self): - self._done = False - self._result = None - self._exception = None - self._exc_info = None - self._callbacks = [] - - def cancel(self): - """Cancel the operation, if possible. - - Tornado ``Futures`` do not support cancellation, so this method always - returns False. - """ - return False - - def cancelled(self): - """Returns True if the operation has been cancelled. - - Tornado ``Futures`` do not support cancellation, so this method - always returns False. - """ - return False - - def running(self): - """Returns True if this operation is currently running.""" - return not self._done - - def done(self): - """Returns True if the future has finished running.""" - return self._done - - def result(self, timeout=None): - """If the operation succeeded, return its result. If it failed, - re-raise its exception. - """ - if self._result is not None: - return self._result - if self._exc_info is not None: - raise_exc_info(self._exc_info) - elif self._exception is not None: - raise self._exception - self._check_done() - return self._result - - def exception(self, timeout=None): - """If the operation raised an exception, return the `Exception` - object. Otherwise returns None. - """ - if self._exception is not None: - return self._exception - else: - self._check_done() - return None - - def add_done_callback(self, fn): - """Attaches the given callback to the `Future`. - - It will be invoked with the `Future` as its argument when the Future - has finished running and its result is available. In Tornado - consider using `.IOLoop.add_future` instead of calling - `add_done_callback` directly. - """ - if self._done: - fn(self) - else: - self._callbacks.append(fn) - - def set_result(self, result): - """Sets the result of a ``Future``. - - It is undefined to call any of the ``set`` methods more than once - on the same object. - """ - self._result = result - self._set_done() - - def set_exception(self, exception): - """Sets the exception of a ``Future.``""" - self._exception = exception - self._set_done() - - def exc_info(self): - """Returns a tuple in the same format as `sys.exc_info` or None. - - .. versionadded:: 4.0 - """ - return self._exc_info - - def set_exc_info(self, exc_info): - """Sets the exception information of a ``Future.`` - - Preserves tracebacks on Python 2. - - .. versionadded:: 4.0 - """ - self._exc_info = exc_info - self.set_exception(exc_info[1]) - - def _check_done(self): - if not self._done: - raise Exception("DummyFuture does not support blocking for results") - - def _set_done(self): - self._done = True - for cb in self._callbacks: - # TODO: error handling - cb(self) - self._callbacks = None - -TracebackFuture = Future - -if futures is None: - FUTURES = Future -else: - FUTURES = (futures.Future, Future) - - -def is_future(x): - return isinstance(x, FUTURES) - - -class DummyExecutor(object): - def submit(self, fn, *args, **kwargs): - future = TracebackFuture() - try: - future.set_result(fn(*args, **kwargs)) - except Exception: - future.set_exc_info(sys.exc_info()) - return future - - def shutdown(self, wait=True): - pass - -dummy_executor = DummyExecutor() - - -def run_on_executor(fn): - """Decorator to run a synchronous method asynchronously on an executor. - - The decorated method may be called with a ``callback`` keyword - argument and returns a future. - - This decorator should be used only on methods of objects with attributes - ``executor`` and ``io_loop``. - """ - @functools.wraps(fn) - def wrapper(self, *args, **kwargs): - callback = kwargs.pop("callback", None) - future = self.executor.submit(fn, self, *args, **kwargs) - if callback: - self.io_loop.add_future(future, - lambda future: callback(future.result())) - return future - return wrapper - - -_NO_RESULT = object() - - -def return_future(f): - """Decorator to make a function that returns via callback return a - `Future`. - - The wrapped function should take a ``callback`` keyword argument - and invoke it with one argument when it has finished. To signal failure, - the function can simply raise an exception (which will be - captured by the `.StackContext` and passed along to the ``Future``). - - From the caller's perspective, the callback argument is optional. - If one is given, it will be invoked when the function is complete - with `Future.result()` as an argument. If the function fails, the - callback will not be run and an exception will be raised into the - surrounding `.StackContext`. - - If no callback is given, the caller should use the ``Future`` to - wait for the function to complete (perhaps by yielding it in a - `.gen.engine` function, or passing it to `.IOLoop.add_future`). - - Usage:: - - @return_future - def future_func(arg1, arg2, callback): - # Do stuff (possibly asynchronous) - callback(result) - - @gen.engine - def caller(callback): - yield future_func(arg1, arg2) - callback() - - Note that ``@return_future`` and ``@gen.engine`` can be applied to the - same function, provided ``@return_future`` appears first. However, - consider using ``@gen.coroutine`` instead of this combination. - """ - replacer = ArgReplacer(f, 'callback') - - @functools.wraps(f) - def wrapper(*args, **kwargs): - future = TracebackFuture() - callback, args, kwargs = replacer.replace( - lambda value=_NO_RESULT: future.set_result(value), - args, kwargs) - - def handle_error(typ, value, tb): - future.set_exc_info((typ, value, tb)) - return True - exc_info = None - with ExceptionStackContext(handle_error): - try: - result = f(*args, **kwargs) - if result is not None: - raise ReturnValueIgnoredError( - "@return_future should not be used with functions " - "that return values") - except: - exc_info = sys.exc_info() - raise - if exc_info is not None: - # If the initial synchronous part of f() raised an exception, - # go ahead and raise it to the caller directly without waiting - # for them to inspect the Future. - raise_exc_info(exc_info) - - # If the caller passed in a callback, schedule it to be called - # when the future resolves. It is important that this happens - # just before we return the future, or else we risk confusing - # stack contexts with multiple exceptions (one here with the - # immediate exception, and again when the future resolves and - # the callback triggers its exception by calling future.result()). - if callback is not None: - def run_callback(future): - result = future.result() - if result is _NO_RESULT: - callback() - else: - callback(future.result()) - future.add_done_callback(wrap(run_callback)) - return future - return wrapper - - -def chain_future(a, b): - """Chain two futures together so that when one completes, so does the other. - - The result (success or failure) of ``a`` will be copied to ``b``, unless - ``b`` has already been completed or cancelled by the time ``a`` finishes. - """ - def copy(future): - assert future is a - if b.done(): - return - if (isinstance(a, TracebackFuture) and isinstance(b, TracebackFuture) - and a.exc_info() is not None): - b.set_exc_info(a.exc_info()) - elif a.exception() is not None: - b.set_exception(a.exception()) - else: - b.set_result(a.result()) - a.add_done_callback(copy) diff --git a/rosbridge_server/src/tornado/curl_httpclient.py b/rosbridge_server/src/tornado/curl_httpclient.py deleted file mode 100644 index 3da59a4df..000000000 --- a/rosbridge_server/src/tornado/curl_httpclient.py +++ /dev/null @@ -1,477 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Non-blocking HTTP client implementation using pycurl.""" - -from __future__ import absolute_import, division, print_function, with_statement - -import collections -import logging -import pycurl -import threading -import time - -from tornado import httputil -from tornado import ioloop -from tornado.log import gen_log -from tornado import stack_context - -from tornado.escape import utf8, native_str -from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main -from tornado.util import bytes_type - -try: - from io import BytesIO # py3 -except ImportError: - from cStringIO import StringIO as BytesIO # py2 - - -class CurlAsyncHTTPClient(AsyncHTTPClient): - def initialize(self, io_loop, max_clients=10, defaults=None): - super(CurlAsyncHTTPClient, self).initialize(io_loop, defaults=defaults) - self._multi = pycurl.CurlMulti() - self._multi.setopt(pycurl.M_TIMERFUNCTION, self._set_timeout) - self._multi.setopt(pycurl.M_SOCKETFUNCTION, self._handle_socket) - self._curls = [_curl_create() for i in range(max_clients)] - self._free_list = self._curls[:] - self._requests = collections.deque() - self._fds = {} - self._timeout = None - - # libcurl has bugs that sometimes cause it to not report all - # relevant file descriptors and timeouts to TIMERFUNCTION/ - # SOCKETFUNCTION. Mitigate the effects of such bugs by - # forcing a periodic scan of all active requests. - self._force_timeout_callback = ioloop.PeriodicCallback( - self._handle_force_timeout, 1000, io_loop=io_loop) - self._force_timeout_callback.start() - - # Work around a bug in libcurl 7.29.0: Some fields in the curl - # multi object are initialized lazily, and its destructor will - # segfault if it is destroyed without having been used. Add - # and remove a dummy handle to make sure everything is - # initialized. - dummy_curl_handle = pycurl.Curl() - self._multi.add_handle(dummy_curl_handle) - self._multi.remove_handle(dummy_curl_handle) - - def close(self): - self._force_timeout_callback.stop() - if self._timeout is not None: - self.io_loop.remove_timeout(self._timeout) - for curl in self._curls: - curl.close() - self._multi.close() - super(CurlAsyncHTTPClient, self).close() - - def fetch_impl(self, request, callback): - self._requests.append((request, callback)) - self._process_queue() - self._set_timeout(0) - - def _handle_socket(self, event, fd, multi, data): - """Called by libcurl when it wants to change the file descriptors - it cares about. - """ - event_map = { - pycurl.POLL_NONE: ioloop.IOLoop.NONE, - pycurl.POLL_IN: ioloop.IOLoop.READ, - pycurl.POLL_OUT: ioloop.IOLoop.WRITE, - pycurl.POLL_INOUT: ioloop.IOLoop.READ | ioloop.IOLoop.WRITE - } - if event == pycurl.POLL_REMOVE: - if fd in self._fds: - self.io_loop.remove_handler(fd) - del self._fds[fd] - else: - ioloop_event = event_map[event] - # libcurl sometimes closes a socket and then opens a new - # one using the same FD without giving us a POLL_NONE in - # between. This is a problem with the epoll IOLoop, - # because the kernel can tell when a socket is closed and - # removes it from the epoll automatically, causing future - # update_handler calls to fail. Since we can't tell when - # this has happened, always use remove and re-add - # instead of update. - if fd in self._fds: - self.io_loop.remove_handler(fd) - self.io_loop.add_handler(fd, self._handle_events, - ioloop_event) - self._fds[fd] = ioloop_event - - def _set_timeout(self, msecs): - """Called by libcurl to schedule a timeout.""" - if self._timeout is not None: - self.io_loop.remove_timeout(self._timeout) - self._timeout = self.io_loop.add_timeout( - self.io_loop.time() + msecs / 1000.0, self._handle_timeout) - - def _handle_events(self, fd, events): - """Called by IOLoop when there is activity on one of our - file descriptors. - """ - action = 0 - if events & ioloop.IOLoop.READ: - action |= pycurl.CSELECT_IN - if events & ioloop.IOLoop.WRITE: - action |= pycurl.CSELECT_OUT - while True: - try: - ret, num_handles = self._multi.socket_action(fd, action) - except pycurl.error as e: - ret = e.args[0] - if ret != pycurl.E_CALL_MULTI_PERFORM: - break - self._finish_pending_requests() - - def _handle_timeout(self): - """Called by IOLoop when the requested timeout has passed.""" - with stack_context.NullContext(): - self._timeout = None - while True: - try: - ret, num_handles = self._multi.socket_action( - pycurl.SOCKET_TIMEOUT, 0) - except pycurl.error as e: - ret = e.args[0] - if ret != pycurl.E_CALL_MULTI_PERFORM: - break - self._finish_pending_requests() - - # In theory, we shouldn't have to do this because curl will - # call _set_timeout whenever the timeout changes. However, - # sometimes after _handle_timeout we will need to reschedule - # immediately even though nothing has changed from curl's - # perspective. This is because when socket_action is - # called with SOCKET_TIMEOUT, libcurl decides internally which - # timeouts need to be processed by using a monotonic clock - # (where available) while tornado uses python's time.time() - # to decide when timeouts have occurred. When those clocks - # disagree on elapsed time (as they will whenever there is an - # NTP adjustment), tornado might call _handle_timeout before - # libcurl is ready. After each timeout, resync the scheduled - # timeout with libcurl's current state. - new_timeout = self._multi.timeout() - if new_timeout >= 0: - self._set_timeout(new_timeout) - - def _handle_force_timeout(self): - """Called by IOLoop periodically to ask libcurl to process any - events it may have forgotten about. - """ - with stack_context.NullContext(): - while True: - try: - ret, num_handles = self._multi.socket_all() - except pycurl.error as e: - ret = e.args[0] - if ret != pycurl.E_CALL_MULTI_PERFORM: - break - self._finish_pending_requests() - - def _finish_pending_requests(self): - """Process any requests that were completed by the last - call to multi.socket_action. - """ - while True: - num_q, ok_list, err_list = self._multi.info_read() - for curl in ok_list: - self._finish(curl) - for curl, errnum, errmsg in err_list: - self._finish(curl, errnum, errmsg) - if num_q == 0: - break - self._process_queue() - - def _process_queue(self): - with stack_context.NullContext(): - while True: - started = 0 - while self._free_list and self._requests: - started += 1 - curl = self._free_list.pop() - (request, callback) = self._requests.popleft() - curl.info = { - "headers": httputil.HTTPHeaders(), - "buffer": BytesIO(), - "request": request, - "callback": callback, - "curl_start_time": time.time(), - } - _curl_setup_request(curl, request, curl.info["buffer"], - curl.info["headers"]) - self._multi.add_handle(curl) - - if not started: - break - - def _finish(self, curl, curl_error=None, curl_message=None): - info = curl.info - curl.info = None - self._multi.remove_handle(curl) - self._free_list.append(curl) - buffer = info["buffer"] - if curl_error: - error = CurlError(curl_error, curl_message) - code = error.code - effective_url = None - buffer.close() - buffer = None - else: - error = None - code = curl.getinfo(pycurl.HTTP_CODE) - effective_url = curl.getinfo(pycurl.EFFECTIVE_URL) - buffer.seek(0) - # the various curl timings are documented at - # http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html - time_info = dict( - queue=info["curl_start_time"] - info["request"].start_time, - namelookup=curl.getinfo(pycurl.NAMELOOKUP_TIME), - connect=curl.getinfo(pycurl.CONNECT_TIME), - pretransfer=curl.getinfo(pycurl.PRETRANSFER_TIME), - starttransfer=curl.getinfo(pycurl.STARTTRANSFER_TIME), - total=curl.getinfo(pycurl.TOTAL_TIME), - redirect=curl.getinfo(pycurl.REDIRECT_TIME), - ) - try: - info["callback"](HTTPResponse( - request=info["request"], code=code, headers=info["headers"], - buffer=buffer, effective_url=effective_url, error=error, - reason=info['headers'].get("X-Http-Reason", None), - request_time=time.time() - info["curl_start_time"], - time_info=time_info)) - except Exception: - self.handle_callback_exception(info["callback"]) - - def handle_callback_exception(self, callback): - self.io_loop.handle_callback_exception(callback) - - -class CurlError(HTTPError): - def __init__(self, errno, message): - HTTPError.__init__(self, 599, message) - self.errno = errno - - -def _curl_create(): - curl = pycurl.Curl() - if gen_log.isEnabledFor(logging.DEBUG): - curl.setopt(pycurl.VERBOSE, 1) - curl.setopt(pycurl.DEBUGFUNCTION, _curl_debug) - return curl - - -def _curl_setup_request(curl, request, buffer, headers): - curl.setopt(pycurl.URL, native_str(request.url)) - - # libcurl's magic "Expect: 100-continue" behavior causes delays - # with servers that don't support it (which include, among others, - # Google's OpenID endpoint). Additionally, this behavior has - # a bug in conjunction with the curl_multi_socket_action API - # (https://sourceforge.net/tracker/?func=detail&atid=100976&aid=3039744&group_id=976), - # which increases the delays. It's more trouble than it's worth, - # so just turn off the feature (yes, setting Expect: to an empty - # value is the official way to disable this) - if "Expect" not in request.headers: - request.headers["Expect"] = "" - - # libcurl adds Pragma: no-cache by default; disable that too - if "Pragma" not in request.headers: - request.headers["Pragma"] = "" - - # Request headers may be either a regular dict or HTTPHeaders object - if isinstance(request.headers, httputil.HTTPHeaders): - curl.setopt(pycurl.HTTPHEADER, - [native_str("%s: %s" % i) for i in request.headers.get_all()]) - else: - curl.setopt(pycurl.HTTPHEADER, - [native_str("%s: %s" % i) for i in request.headers.items()]) - - if request.header_callback: - curl.setopt(pycurl.HEADERFUNCTION, - lambda line: request.header_callback(native_str(line))) - else: - curl.setopt(pycurl.HEADERFUNCTION, - lambda line: _curl_header_callback(headers, - native_str(line))) - if request.streaming_callback: - write_function = request.streaming_callback - else: - write_function = buffer.write - if bytes_type is str: # py2 - curl.setopt(pycurl.WRITEFUNCTION, write_function) - else: # py3 - # Upstream pycurl doesn't support py3, but ubuntu 12.10 includes - # a fork/port. That version has a bug in which it passes unicode - # strings instead of bytes to the WRITEFUNCTION. This means that - # if you use a WRITEFUNCTION (which tornado always does), you cannot - # download arbitrary binary data. This needs to be fixed in the - # ported pycurl package, but in the meantime this lambda will - # make it work for downloading (utf8) text. - curl.setopt(pycurl.WRITEFUNCTION, lambda s: write_function(utf8(s))) - curl.setopt(pycurl.FOLLOWLOCATION, request.follow_redirects) - curl.setopt(pycurl.MAXREDIRS, request.max_redirects) - curl.setopt(pycurl.CONNECTTIMEOUT_MS, int(1000 * request.connect_timeout)) - curl.setopt(pycurl.TIMEOUT_MS, int(1000 * request.request_timeout)) - if request.user_agent: - curl.setopt(pycurl.USERAGENT, native_str(request.user_agent)) - else: - curl.setopt(pycurl.USERAGENT, "Mozilla/5.0 (compatible; pycurl)") - if request.network_interface: - curl.setopt(pycurl.INTERFACE, request.network_interface) - if request.decompress_response: - curl.setopt(pycurl.ENCODING, "gzip,deflate") - else: - curl.setopt(pycurl.ENCODING, "none") - if request.proxy_host and request.proxy_port: - curl.setopt(pycurl.PROXY, request.proxy_host) - curl.setopt(pycurl.PROXYPORT, request.proxy_port) - if request.proxy_username: - credentials = '%s:%s' % (request.proxy_username, - request.proxy_password) - curl.setopt(pycurl.PROXYUSERPWD, credentials) - else: - curl.setopt(pycurl.PROXY, '') - curl.unsetopt(pycurl.PROXYUSERPWD) - if request.validate_cert: - curl.setopt(pycurl.SSL_VERIFYPEER, 1) - curl.setopt(pycurl.SSL_VERIFYHOST, 2) - else: - curl.setopt(pycurl.SSL_VERIFYPEER, 0) - curl.setopt(pycurl.SSL_VERIFYHOST, 0) - if request.ca_certs is not None: - curl.setopt(pycurl.CAINFO, request.ca_certs) - else: - # There is no way to restore pycurl.CAINFO to its default value - # (Using unsetopt makes it reject all certificates). - # I don't see any way to read the default value from python so it - # can be restored later. We'll have to just leave CAINFO untouched - # if no ca_certs file was specified, and require that if any - # request uses a custom ca_certs file, they all must. - pass - - if request.allow_ipv6 is False: - # Curl behaves reasonably when DNS resolution gives an ipv6 address - # that we can't reach, so allow ipv6 unless the user asks to disable. - curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4) - else: - curl.setopt(pycurl.IPRESOLVE, pycurl.IPRESOLVE_WHATEVER) - - # Set the request method through curl's irritating interface which makes - # up names for almost every single method - curl_options = { - "GET": pycurl.HTTPGET, - "POST": pycurl.POST, - "PUT": pycurl.UPLOAD, - "HEAD": pycurl.NOBODY, - } - custom_methods = set(["DELETE", "OPTIONS", "PATCH"]) - for o in curl_options.values(): - curl.setopt(o, False) - if request.method in curl_options: - curl.unsetopt(pycurl.CUSTOMREQUEST) - curl.setopt(curl_options[request.method], True) - elif request.allow_nonstandard_methods or request.method in custom_methods: - curl.setopt(pycurl.CUSTOMREQUEST, request.method) - else: - raise KeyError('unknown method ' + request.method) - - # Handle curl's cryptic options for every individual HTTP method - if request.method in ("POST", "PUT"): - if request.body is None: - raise AssertionError( - 'Body must not be empty for "%s" request' - % request.method) - - request_buffer = BytesIO(utf8(request.body)) - curl.setopt(pycurl.READFUNCTION, request_buffer.read) - if request.method == "POST": - def ioctl(cmd): - if cmd == curl.IOCMD_RESTARTREAD: - request_buffer.seek(0) - curl.setopt(pycurl.IOCTLFUNCTION, ioctl) - curl.setopt(pycurl.POSTFIELDSIZE, len(request.body)) - else: - curl.setopt(pycurl.INFILESIZE, len(request.body)) - elif request.method == "GET": - if request.body is not None: - raise AssertionError('Body must be empty for GET request') - - if request.auth_username is not None: - userpwd = "%s:%s" % (request.auth_username, request.auth_password or '') - - if request.auth_mode is None or request.auth_mode == "basic": - curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_BASIC) - elif request.auth_mode == "digest": - curl.setopt(pycurl.HTTPAUTH, pycurl.HTTPAUTH_DIGEST) - else: - raise ValueError("Unsupported auth_mode %s" % request.auth_mode) - - curl.setopt(pycurl.USERPWD, native_str(userpwd)) - gen_log.debug("%s %s (username: %r)", request.method, request.url, - request.auth_username) - else: - curl.unsetopt(pycurl.USERPWD) - gen_log.debug("%s %s", request.method, request.url) - - if request.client_cert is not None: - curl.setopt(pycurl.SSLCERT, request.client_cert) - - if request.client_key is not None: - curl.setopt(pycurl.SSLKEY, request.client_key) - - if threading.activeCount() > 1: - # libcurl/pycurl is not thread-safe by default. When multiple threads - # are used, signals should be disabled. This has the side effect - # of disabling DNS timeouts in some environments (when libcurl is - # not linked against ares), so we don't do it when there is only one - # thread. Applications that use many short-lived threads may need - # to set NOSIGNAL manually in a prepare_curl_callback since - # there may not be any other threads running at the time we call - # threading.activeCount. - curl.setopt(pycurl.NOSIGNAL, 1) - if request.prepare_curl_callback is not None: - request.prepare_curl_callback(curl) - - -def _curl_header_callback(headers, header_line): - # header_line as returned by curl includes the end-of-line characters. - header_line = header_line.strip() - if header_line.startswith("HTTP/"): - headers.clear() - try: - (__, __, reason) = httputil.parse_response_start_line(header_line) - header_line = "X-Http-Reason: %s" % reason - except httputil.HTTPInputError: - return - if not header_line: - return - headers.parse_line(header_line) - - -def _curl_debug(debug_type, debug_msg): - debug_types = ('I', '<', '>', '<', '>') - if debug_type == 0: - gen_log.debug('%s', debug_msg.strip()) - elif debug_type in (1, 2): - for line in debug_msg.splitlines(): - gen_log.debug('%s %s', debug_types[debug_type], line) - elif debug_type == 4: - gen_log.debug('%s %r', debug_types[debug_type], debug_msg) - -if __name__ == "__main__": - AsyncHTTPClient.configure(CurlAsyncHTTPClient) - main() diff --git a/rosbridge_server/src/tornado/escape.py b/rosbridge_server/src/tornado/escape.py deleted file mode 100644 index 48fa673c1..000000000 --- a/rosbridge_server/src/tornado/escape.py +++ /dev/null @@ -1,396 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Escaping/unescaping methods for HTML, JSON, URLs, and others. - -Also includes a few other miscellaneous string manipulation functions that -have crept in over time. -""" - -from __future__ import absolute_import, division, print_function, with_statement - -import re -import sys - -from tornado.util import bytes_type, unicode_type, basestring_type, u - -try: - from urllib.parse import parse_qs as _parse_qs # py3 -except ImportError: - from urlparse import parse_qs as _parse_qs # Python 2.6+ - -try: - import htmlentitydefs # py2 -except ImportError: - import html.entities as htmlentitydefs # py3 - -try: - import urllib.parse as urllib_parse # py3 -except ImportError: - import urllib as urllib_parse # py2 - -import json - -try: - unichr -except NameError: - unichr = chr - -_XHTML_ESCAPE_RE = re.compile('[&<>"\']') -_XHTML_ESCAPE_DICT = {'&': '&', '<': '<', '>': '>', '"': '"', - '\'': '''} - - -def xhtml_escape(value): - """Escapes a string so it is valid within HTML or XML. - - Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``. - When used in attribute values the escaped strings must be enclosed - in quotes. - - .. versionchanged:: 3.2 - - Added the single quote to the list of escaped characters. - """ - return _XHTML_ESCAPE_RE.sub(lambda match: _XHTML_ESCAPE_DICT[match.group(0)], - to_basestring(value)) - - -def xhtml_unescape(value): - """Un-escapes an XML-escaped string.""" - return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value)) - - -# The fact that json_encode wraps json.dumps is an implementation detail. -# Please see /~https://github.com/tornadoweb/tornado/pull/706 -# before sending a pull request that adds **kwargs to this function. -def json_encode(value): - """JSON-encodes the given Python object.""" - # JSON permits but does not require forward slashes to be escaped. - # This is useful when json data is emitted in a tags from prematurely terminating - # the javscript. Some json libraries do this escaping by default, - # although python's standard library does not, so we do it here. - # http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped - return json.dumps(value).replace("?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&|")*\)))+)""")) - - -def linkify(text, shorten=False, extra_params="", - require_protocol=False, permitted_protocols=["http", "https"]): - """Converts plain text into HTML with links. - - For example: ``linkify("Hello http://tornadoweb.org!")`` would return - ``Hello http://tornadoweb.org!`` - - Parameters: - - * ``shorten``: Long urls will be shortened for display. - - * ``extra_params``: Extra text to include in the link tag, or a callable - taking the link as an argument and returning the extra text - e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``, - or:: - - def extra_params_cb(url): - if url.startswith("http://example.com"): - return 'class="internal"' - else: - return 'class="external" rel="nofollow"' - linkify(text, extra_params=extra_params_cb) - - * ``require_protocol``: Only linkify urls which include a protocol. If - this is False, urls such as www.facebook.com will also be linkified. - - * ``permitted_protocols``: List (or set) of protocols which should be - linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp", - "mailto"])``. It is very unsafe to include protocols such as - ``javascript``. - """ - if extra_params and not callable(extra_params): - extra_params = " " + extra_params.strip() - - def make_link(m): - url = m.group(1) - proto = m.group(2) - if require_protocol and not proto: - return url # not protocol, no linkify - - if proto and proto not in permitted_protocols: - return url # bad protocol, no linkify - - href = m.group(1) - if not proto: - href = "http://" + href # no proto specified, use http - - if callable(extra_params): - params = " " + extra_params(href).strip() - else: - params = extra_params - - # clip long urls. max_len is just an approximation - max_len = 30 - if shorten and len(url) > max_len: - before_clip = url - if proto: - proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for : - else: - proto_len = 0 - - parts = url[proto_len:].split("/") - if len(parts) > 1: - # Grab the whole host part plus the first bit of the path - # The path is usually not that interesting once shortened - # (no more slug, etc), so it really just provides a little - # extra indication of shortening. - url = url[:proto_len] + parts[0] + "/" + \ - parts[1][:8].split('?')[0].split('.')[0] - - if len(url) > max_len * 1.5: # still too long - url = url[:max_len] - - if url != before_clip: - amp = url.rfind('&') - # avoid splitting html char entities - if amp > max_len - 5: - url = url[:amp] - url += "..." - - if len(url) >= len(before_clip): - url = before_clip - else: - # full url is visible on mouse-over (for those who don't - # have a status bar, such as Safari by default) - params += ' title="%s"' % href - - return u('%s') % (href, params, url) - - # First HTML-escape so that our strings are all safe. - # The regex is modified to avoid character entites other than & so - # that we won't pick up ", etc. - text = _unicode(xhtml_escape(text)) - return _URL_RE.sub(make_link, text) - - -def _convert_entity(m): - if m.group(1) == "#": - try: - return unichr(int(m.group(2))) - except ValueError: - return "&#%s;" % m.group(2) - try: - return _HTML_UNICODE_MAP[m.group(2)] - except KeyError: - return "&%s;" % m.group(2) - - -def _build_unicode_map(): - unicode_map = {} - for name, value in htmlentitydefs.name2codepoint.items(): - unicode_map[name] = unichr(value) - return unicode_map - -_HTML_UNICODE_MAP = _build_unicode_map() diff --git a/rosbridge_server/src/tornado/gen.py b/rosbridge_server/src/tornado/gen.py deleted file mode 100644 index 4bb82d422..000000000 --- a/rosbridge_server/src/tornado/gen.py +++ /dev/null @@ -1,751 +0,0 @@ -"""``tornado.gen`` is a generator-based interface to make it easier to -work in an asynchronous environment. Code using the ``gen`` module -is technically asynchronous, but it is written as a single generator -instead of a collection of separate functions. - -For example, the following asynchronous handler:: - - class AsyncHandler(RequestHandler): - @asynchronous - def get(self): - http_client = AsyncHTTPClient() - http_client.fetch("http://example.com", - callback=self.on_fetch) - - def on_fetch(self, response): - do_something_with_response(response) - self.render("template.html") - -could be written with ``gen`` as:: - - class GenAsyncHandler(RequestHandler): - @gen.coroutine - def get(self): - http_client = AsyncHTTPClient() - response = yield http_client.fetch("http://example.com") - do_something_with_response(response) - self.render("template.html") - -Most asynchronous functions in Tornado return a `.Future`; -yielding this object returns its `~.Future.result`. - -You can also yield a list or dict of ``Futures``, which will be -started at the same time and run in parallel; a list or dict of results will -be returned when they are all finished:: - - @gen.coroutine - def get(self): - http_client = AsyncHTTPClient() - response1, response2 = yield [http_client.fetch(url1), - http_client.fetch(url2)] - response_dict = yield dict(response3=http_client.fetch(url3), - response4=http_client.fetch(url4)) - response3 = response_dict['response3'] - response4 = response_dict['response4'] - -.. versionchanged:: 3.2 - Dict support added. -""" -from __future__ import absolute_import, division, print_function, with_statement - -import collections -import functools -import itertools -import sys -import types - -from tornado.concurrent import Future, TracebackFuture, is_future, chain_future -from tornado.ioloop import IOLoop -from tornado import stack_context - - -class KeyReuseError(Exception): - pass - - -class UnknownKeyError(Exception): - pass - - -class LeakedCallbackError(Exception): - pass - - -class BadYieldError(Exception): - pass - - -class ReturnValueIgnoredError(Exception): - pass - - -class TimeoutError(Exception): - """Exception raised by ``with_timeout``.""" - - -def engine(func): - """Callback-oriented decorator for asynchronous generators. - - This is an older interface; for new code that does not need to be - compatible with versions of Tornado older than 3.0 the - `coroutine` decorator is recommended instead. - - This decorator is similar to `coroutine`, except it does not - return a `.Future` and the ``callback`` argument is not treated - specially. - - In most cases, functions decorated with `engine` should take - a ``callback`` argument and invoke it with their result when - they are finished. One notable exception is the - `~tornado.web.RequestHandler` :ref:`HTTP verb methods `, - which use ``self.finish()`` in place of a callback argument. - """ - func = _make_coroutine_wrapper(func, replace_callback=False) - @functools.wraps(func) - def wrapper(*args, **kwargs): - future = func(*args, **kwargs) - def final_callback(future): - if future.result() is not None: - raise ReturnValueIgnoredError( - "@gen.engine functions cannot return values: %r" % - (future.result(),)) - future.add_done_callback(final_callback) - return wrapper - - -def coroutine(func, replace_callback=True): - """Decorator for asynchronous generators. - - Any generator that yields objects from this module must be wrapped - in either this decorator or `engine`. - - Coroutines may "return" by raising the special exception - `Return(value) `. In Python 3.3+, it is also possible for - the function to simply use the ``return value`` statement (prior to - Python 3.3 generators were not allowed to also return values). - In all versions of Python a coroutine that simply wishes to exit - early may use the ``return`` statement without a value. - - Functions with this decorator return a `.Future`. Additionally, - they may be called with a ``callback`` keyword argument, which - will be invoked with the future's result when it resolves. If the - coroutine fails, the callback will not be run and an exception - will be raised into the surrounding `.StackContext`. The - ``callback`` argument is not visible inside the decorated - function; it is handled by the decorator itself. - - From the caller's perspective, ``@gen.coroutine`` is similar to - the combination of ``@return_future`` and ``@gen.engine``. - """ - return _make_coroutine_wrapper(func, replace_callback=True) - - -def _make_coroutine_wrapper(func, replace_callback): - """The inner workings of ``@gen.coroutine`` and ``@gen.engine``. - - The two decorators differ in their treatment of the ``callback`` - argument, so we cannot simply implement ``@engine`` in terms of - ``@coroutine``. - """ - @functools.wraps(func) - def wrapper(*args, **kwargs): - future = TracebackFuture() - - if replace_callback and 'callback' in kwargs: - callback = kwargs.pop('callback') - IOLoop.current().add_future( - future, lambda future: callback(future.result())) - - try: - result = func(*args, **kwargs) - except (Return, StopIteration) as e: - result = getattr(e, 'value', None) - except Exception: - future.set_exc_info(sys.exc_info()) - return future - else: - if isinstance(result, types.GeneratorType): - # Inline the first iteration of Runner.run. This lets us - # avoid the cost of creating a Runner when the coroutine - # never actually yields, which in turn allows us to - # use "optional" coroutines in critical path code without - # performance penalty for the synchronous case. - try: - orig_stack_contexts = stack_context._state.contexts - yielded = next(result) - if stack_context._state.contexts is not orig_stack_contexts: - yielded = TracebackFuture() - yielded.set_exception( - stack_context.StackContextInconsistentError( - 'stack_context inconsistency (probably caused ' - 'by yield within a "with StackContext" block)')) - except (StopIteration, Return) as e: - future.set_result(getattr(e, 'value', None)) - except Exception: - future.set_exc_info(sys.exc_info()) - else: - Runner(result, future, yielded) - try: - return future - finally: - # Subtle memory optimization: if next() raised an exception, - # the future's exc_info contains a traceback which - # includes this stack frame. This creates a cycle, - # which will be collected at the next full GC but has - # been shown to greatly increase memory usage of - # benchmarks (relative to the refcount-based scheme - # used in the absence of cycles). We can avoid the - # cycle by clearing the local variable after we return it. - future = None - future.set_result(result) - return future - return wrapper - - -class Return(Exception): - """Special exception to return a value from a `coroutine`. - - If this exception is raised, its value argument is used as the - result of the coroutine:: - - @gen.coroutine - def fetch_json(url): - response = yield AsyncHTTPClient().fetch(url) - raise gen.Return(json_decode(response.body)) - - In Python 3.3, this exception is no longer necessary: the ``return`` - statement can be used directly to return a value (previously - ``yield`` and ``return`` with a value could not be combined in the - same function). - - By analogy with the return statement, the value argument is optional, - but it is never necessary to ``raise gen.Return()``. The ``return`` - statement can be used with no arguments instead. - """ - def __init__(self, value=None): - super(Return, self).__init__() - self.value = value - - -class YieldPoint(object): - """Base class for objects that may be yielded from the generator. - - .. deprecated:: 4.0 - Use `Futures <.Future>` instead. - """ - def start(self, runner): - """Called by the runner after the generator has yielded. - - No other methods will be called on this object before ``start``. - """ - raise NotImplementedError() - - def is_ready(self): - """Called by the runner to determine whether to resume the generator. - - Returns a boolean; may be called more than once. - """ - raise NotImplementedError() - - def get_result(self): - """Returns the value to use as the result of the yield expression. - - This method will only be called once, and only after `is_ready` - has returned true. - """ - raise NotImplementedError() - - -class Callback(YieldPoint): - """Returns a callable object that will allow a matching `Wait` to proceed. - - The key may be any value suitable for use as a dictionary key, and is - used to match ``Callbacks`` to their corresponding ``Waits``. The key - must be unique among outstanding callbacks within a single run of the - generator function, but may be reused across different runs of the same - function (so constants generally work fine). - - The callback may be called with zero or one arguments; if an argument - is given it will be returned by `Wait`. - - .. deprecated:: 4.0 - Use `Futures <.Future>` instead. - """ - def __init__(self, key): - self.key = key - - def start(self, runner): - self.runner = runner - runner.register_callback(self.key) - - def is_ready(self): - return True - - def get_result(self): - return self.runner.result_callback(self.key) - - -class Wait(YieldPoint): - """Returns the argument passed to the result of a previous `Callback`. - - .. deprecated:: 4.0 - Use `Futures <.Future>` instead. - """ - def __init__(self, key): - self.key = key - - def start(self, runner): - self.runner = runner - - def is_ready(self): - return self.runner.is_ready(self.key) - - def get_result(self): - return self.runner.pop_result(self.key) - - -class WaitAll(YieldPoint): - """Returns the results of multiple previous `Callbacks `. - - The argument is a sequence of `Callback` keys, and the result is - a list of results in the same order. - - `WaitAll` is equivalent to yielding a list of `Wait` objects. - - .. deprecated:: 4.0 - Use `Futures <.Future>` instead. - """ - def __init__(self, keys): - self.keys = keys - - def start(self, runner): - self.runner = runner - - def is_ready(self): - return all(self.runner.is_ready(key) for key in self.keys) - - def get_result(self): - return [self.runner.pop_result(key) for key in self.keys] - - -def Task(func, *args, **kwargs): - """Adapts a callback-based asynchronous function for use in coroutines. - - Takes a function (and optional additional arguments) and runs it with - those arguments plus a ``callback`` keyword argument. The argument passed - to the callback is returned as the result of the yield expression. - - .. versionchanged:: 4.0 - ``gen.Task`` is now a function that returns a `.Future`, instead of - a subclass of `YieldPoint`. It still behaves the same way when - yielded. - """ - future = Future() - def handle_exception(typ, value, tb): - if future.done(): - return False - future.set_exc_info((typ, value, tb)) - return True - def set_result(result): - if future.done(): - return - future.set_result(result) - with stack_context.ExceptionStackContext(handle_exception): - func(*args, callback=_argument_adapter(set_result), **kwargs) - return future - - -class YieldFuture(YieldPoint): - def __init__(self, future, io_loop=None): - self.future = future - self.io_loop = io_loop or IOLoop.current() - - def start(self, runner): - if not self.future.done(): - self.runner = runner - self.key = object() - runner.register_callback(self.key) - self.io_loop.add_future(self.future, runner.result_callback(self.key)) - else: - self.runner = None - self.result = self.future.result() - - def is_ready(self): - if self.runner is not None: - return self.runner.is_ready(self.key) - else: - return True - - def get_result(self): - if self.runner is not None: - return self.runner.pop_result(self.key).result() - else: - return self.result - - -class Multi(YieldPoint): - """Runs multiple asynchronous operations in parallel. - - Takes a list of ``YieldPoints`` or ``Futures`` and returns a list of - their responses. It is not necessary to call `Multi` explicitly, - since the engine will do so automatically when the generator yields - a list of ``YieldPoints`` or a mixture of ``YieldPoints`` and ``Futures``. - - Instead of a list, the argument may also be a dictionary whose values are - Futures, in which case a parallel dictionary is returned mapping the same - keys to their results. - """ - def __init__(self, children): - self.keys = None - if isinstance(children, dict): - self.keys = list(children.keys()) - children = children.values() - self.children = [] - for i in children: - if is_future(i): - i = YieldFuture(i) - self.children.append(i) - assert all(isinstance(i, YieldPoint) for i in self.children) - self.unfinished_children = set(self.children) - - def start(self, runner): - for i in self.children: - i.start(runner) - - def is_ready(self): - finished = list(itertools.takewhile( - lambda i: i.is_ready(), self.unfinished_children)) - self.unfinished_children.difference_update(finished) - return not self.unfinished_children - - def get_result(self): - result = (i.get_result() for i in self.children) - if self.keys is not None: - return dict(zip(self.keys, result)) - else: - return list(result) - - -def multi_future(children): - """Wait for multiple asynchronous futures in parallel. - - Takes a list of ``Futures`` (but *not* other ``YieldPoints``) and returns - a new Future that resolves when all the other Futures are done. - If all the ``Futures`` succeeded, the returned Future's result is a list - of their results. If any failed, the returned Future raises the exception - of the first one to fail. - - Instead of a list, the argument may also be a dictionary whose values are - Futures, in which case a parallel dictionary is returned mapping the same - keys to their results. - - It is not necessary to call `multi_future` explcitly, since the engine will - do so automatically when the generator yields a list of `Futures`. - This function is faster than the `Multi` `YieldPoint` because it does not - require the creation of a stack context. - - .. versionadded:: 4.0 - """ - if isinstance(children, dict): - keys = list(children.keys()) - children = children.values() - else: - keys = None - assert all(is_future(i) for i in children) - unfinished_children = set(children) - - future = Future() - if not children: - future.set_result({} if keys is not None else []) - def callback(f): - unfinished_children.remove(f) - if not unfinished_children: - try: - result_list = [i.result() for i in children] - except Exception: - future.set_exc_info(sys.exc_info()) - else: - if keys is not None: - future.set_result(dict(zip(keys, result_list))) - else: - future.set_result(result_list) - for f in children: - f.add_done_callback(callback) - return future - - -def maybe_future(x): - """Converts ``x`` into a `.Future`. - - If ``x`` is already a `.Future`, it is simply returned; otherwise - it is wrapped in a new `.Future`. This is suitable for use as - ``result = yield gen.maybe_future(f())`` when you don't know whether - ``f()`` returns a `.Future` or not. - """ - if is_future(x): - return x - else: - fut = Future() - fut.set_result(x) - return fut - - -def with_timeout(timeout, future, io_loop=None): - """Wraps a `.Future` in a timeout. - - Raises `TimeoutError` if the input future does not complete before - ``timeout``, which may be specified in any form allowed by - `.IOLoop.add_timeout` (i.e. a `datetime.timedelta` or an absolute time - relative to `.IOLoop.time`) - - Currently only supports Futures, not other `YieldPoint` classes. - - .. versionadded:: 4.0 - """ - # TODO: allow yield points in addition to futures? - # Tricky to do with stack_context semantics. - # - # It's tempting to optimize this by cancelling the input future on timeout - # instead of creating a new one, but A) we can't know if we are the only - # one waiting on the input future, so cancelling it might disrupt other - # callers and B) concurrent futures can only be cancelled while they are - # in the queue, so cancellation cannot reliably bound our waiting time. - result = Future() - chain_future(future, result) - if io_loop is None: - io_loop = IOLoop.current() - timeout_handle = io_loop.add_timeout( - timeout, - lambda: result.set_exception(TimeoutError("Timeout"))) - if isinstance(future, Future): - # We know this future will resolve on the IOLoop, so we don't - # need the extra thread-safety of IOLoop.add_future (and we also - # don't care about StackContext here. - future.add_done_callback( - lambda future: io_loop.remove_timeout(timeout_handle)) - else: - # concurrent.futures.Futures may resolve on any thread, so we - # need to route them back to the IOLoop. - io_loop.add_future( - future, lambda future: io_loop.remove_timeout(timeout_handle)) - return result - - -_null_future = Future() -_null_future.set_result(None) - -moment = Future() -moment.__doc__ = \ - """A special object which may be yielded to allow the IOLoop to run for -one iteration. - -This is not needed in normal use but it can be helpful in long-running -coroutines that are likely to yield Futures that are ready instantly. - -Usage: ``yield gen.moment`` - -.. versionadded:: 4.0 -""" -moment.set_result(None) - - -class Runner(object): - """Internal implementation of `tornado.gen.engine`. - - Maintains information about pending callbacks and their results. - - The results of the generator are stored in ``result_future`` (a - `.TracebackFuture`) - """ - def __init__(self, gen, result_future, first_yielded): - self.gen = gen - self.result_future = result_future - self.future = _null_future - self.yield_point = None - self.pending_callbacks = None - self.results = None - self.running = False - self.finished = False - self.had_exception = False - self.io_loop = IOLoop.current() - # For efficiency, we do not create a stack context until we - # reach a YieldPoint (stack contexts are required for the historical - # semantics of YieldPoints, but not for Futures). When we have - # done so, this field will be set and must be called at the end - # of the coroutine. - self.stack_context_deactivate = None - if self.handle_yield(first_yielded): - self.run() - - def register_callback(self, key): - """Adds ``key`` to the list of callbacks.""" - if self.pending_callbacks is None: - # Lazily initialize the old-style YieldPoint data structures. - self.pending_callbacks = set() - self.results = {} - if key in self.pending_callbacks: - raise KeyReuseError("key %r is already pending" % (key,)) - self.pending_callbacks.add(key) - - def is_ready(self, key): - """Returns true if a result is available for ``key``.""" - if self.pending_callbacks is None or key not in self.pending_callbacks: - raise UnknownKeyError("key %r is not pending" % (key,)) - return key in self.results - - def set_result(self, key, result): - """Sets the result for ``key`` and attempts to resume the generator.""" - self.results[key] = result - if self.yield_point is not None and self.yield_point.is_ready(): - try: - self.future.set_result(self.yield_point.get_result()) - except: - self.future.set_exc_info(sys.exc_info()) - self.yield_point = None - self.run() - - def pop_result(self, key): - """Returns the result for ``key`` and unregisters it.""" - self.pending_callbacks.remove(key) - return self.results.pop(key) - - def run(self): - """Starts or resumes the generator, running until it reaches a - yield point that is not ready. - """ - if self.running or self.finished: - return - try: - self.running = True - while True: - future = self.future - if not future.done(): - return - self.future = None - try: - orig_stack_contexts = stack_context._state.contexts - try: - value = future.result() - except Exception: - self.had_exception = True - yielded = self.gen.throw(*sys.exc_info()) - else: - yielded = self.gen.send(value) - if stack_context._state.contexts is not orig_stack_contexts: - self.gen.throw( - stack_context.StackContextInconsistentError( - 'stack_context inconsistency (probably caused ' - 'by yield within a "with StackContext" block)')) - except (StopIteration, Return) as e: - self.finished = True - self.future = _null_future - if self.pending_callbacks and not self.had_exception: - # If we ran cleanly without waiting on all callbacks - # raise an error (really more of a warning). If we - # had an exception then some callbacks may have been - # orphaned, so skip the check in that case. - raise LeakedCallbackError( - "finished without waiting for callbacks %r" % - self.pending_callbacks) - self.result_future.set_result(getattr(e, 'value', None)) - self.result_future = None - self._deactivate_stack_context() - return - except Exception: - self.finished = True - self.future = _null_future - self.result_future.set_exc_info(sys.exc_info()) - self.result_future = None - self._deactivate_stack_context() - return - if not self.handle_yield(yielded): - return - finally: - self.running = False - - def handle_yield(self, yielded): - if isinstance(yielded, list): - if all(is_future(f) for f in yielded): - yielded = multi_future(yielded) - else: - yielded = Multi(yielded) - elif isinstance(yielded, dict): - if all(is_future(f) for f in yielded.values()): - yielded = multi_future(yielded) - else: - yielded = Multi(yielded) - - if isinstance(yielded, YieldPoint): - self.future = TracebackFuture() - def start_yield_point(): - try: - yielded.start(self) - if yielded.is_ready(): - self.future.set_result( - yielded.get_result()) - else: - self.yield_point = yielded - except Exception: - self.future = TracebackFuture() - self.future.set_exc_info(sys.exc_info()) - if self.stack_context_deactivate is None: - # Start a stack context if this is the first - # YieldPoint we've seen. - with stack_context.ExceptionStackContext( - self.handle_exception) as deactivate: - self.stack_context_deactivate = deactivate - def cb(): - start_yield_point() - self.run() - self.io_loop.add_callback(cb) - return False - else: - start_yield_point() - elif is_future(yielded): - self.future = yielded - if not self.future.done() or self.future is moment: - self.io_loop.add_future( - self.future, lambda f: self.run()) - return False - else: - self.future = TracebackFuture() - self.future.set_exception(BadYieldError( - "yielded unknown object %r" % (yielded,))) - return True - - def result_callback(self, key): - return stack_context.wrap(_argument_adapter( - functools.partial(self.set_result, key))) - - def handle_exception(self, typ, value, tb): - if not self.running and not self.finished: - self.future = TracebackFuture() - self.future.set_exc_info((typ, value, tb)) - self.run() - return True - else: - return False - - def _deactivate_stack_context(self): - if self.stack_context_deactivate is not None: - self.stack_context_deactivate() - self.stack_context_deactivate = None - -Arguments = collections.namedtuple('Arguments', ['args', 'kwargs']) - - -def _argument_adapter(callback): - """Returns a function that when invoked runs ``callback`` with one arg. - - If the function returned by this function is called with exactly - one argument, that argument is passed to ``callback``. Otherwise - the args tuple and kwargs dict are wrapped in an `Arguments` object. - """ - def wrapper(*args, **kwargs): - if kwargs or len(args) > 1: - callback(Arguments(args, kwargs)) - elif args: - callback(args[0]) - else: - callback(None) - return wrapper diff --git a/rosbridge_server/src/tornado/http1connection.py b/rosbridge_server/src/tornado/http1connection.py deleted file mode 100644 index 1ac24f520..000000000 --- a/rosbridge_server/src/tornado/http1connection.py +++ /dev/null @@ -1,690 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2014 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Client and server implementations of HTTP/1.x. - -.. versionadded:: 4.0 -""" - -from __future__ import absolute_import, division, print_function, with_statement - -import re - -from tornado.concurrent import Future -from tornado.escape import native_str, utf8 -from tornado import gen -from tornado import httputil -from tornado import iostream -from tornado.log import gen_log, app_log -from tornado import stack_context -from tornado.util import GzipDecompressor - - -class _QuietException(Exception): - def __init__(self): - pass - -class _ExceptionLoggingContext(object): - """Used with the ``with`` statement when calling delegate methods to - log any exceptions with the given logger. Any exceptions caught are - converted to _QuietException - """ - def __init__(self, logger): - self.logger = logger - - def __enter__(self): - pass - - def __exit__(self, typ, value, tb): - if value is not None: - self.logger.error("Uncaught exception", exc_info=(typ, value, tb)) - raise _QuietException - -class HTTP1ConnectionParameters(object): - """Parameters for `.HTTP1Connection` and `.HTTP1ServerConnection`. - """ - def __init__(self, no_keep_alive=False, chunk_size=None, - max_header_size=None, header_timeout=None, max_body_size=None, - body_timeout=None, decompress=False): - """ - :arg bool no_keep_alive: If true, always close the connection after - one request. - :arg int chunk_size: how much data to read into memory at once - :arg int max_header_size: maximum amount of data for HTTP headers - :arg float header_timeout: how long to wait for all headers (seconds) - :arg int max_body_size: maximum amount of data for body - :arg float body_timeout: how long to wait while reading body (seconds) - :arg bool decompress: if true, decode incoming - ``Content-Encoding: gzip`` - """ - self.no_keep_alive = no_keep_alive - self.chunk_size = chunk_size or 65536 - self.max_header_size = max_header_size or 65536 - self.header_timeout = header_timeout - self.max_body_size = max_body_size - self.body_timeout = body_timeout - self.decompress = decompress - - -class HTTP1Connection(httputil.HTTPConnection): - """Implements the HTTP/1.x protocol. - - This class can be on its own for clients, or via `HTTP1ServerConnection` - for servers. - """ - def __init__(self, stream, is_client, params=None, context=None): - """ - :arg stream: an `.IOStream` - :arg bool is_client: client or server - :arg params: a `.HTTP1ConnectionParameters` instance or ``None`` - :arg context: an opaque application-defined object that can be accessed - as ``connection.context``. - """ - self.is_client = is_client - self.stream = stream - if params is None: - params = HTTP1ConnectionParameters() - self.params = params - self.context = context - self.no_keep_alive = params.no_keep_alive - # The body limits can be altered by the delegate, so save them - # here instead of just referencing self.params later. - self._max_body_size = (self.params.max_body_size or - self.stream.max_buffer_size) - self._body_timeout = self.params.body_timeout - # _write_finished is set to True when finish() has been called, - # i.e. there will be no more data sent. Data may still be in the - # stream's write buffer. - self._write_finished = False - # True when we have read the entire incoming body. - self._read_finished = False - # _finish_future resolves when all data has been written and flushed - # to the IOStream. - self._finish_future = Future() - # If true, the connection should be closed after this request - # (after the response has been written in the server side, - # and after it has been read in the client) - self._disconnect_on_finish = False - self._clear_callbacks() - # Save the start lines after we read or write them; they - # affect later processing (e.g. 304 responses and HEAD methods - # have content-length but no bodies) - self._request_start_line = None - self._response_start_line = None - self._request_headers = None - # True if we are writing output with chunked encoding. - self._chunking_output = None - # While reading a body with a content-length, this is the - # amount left to read. - self._expected_content_remaining = None - # A Future for our outgoing writes, returned by IOStream.write. - self._pending_write = None - - def read_response(self, delegate): - """Read a single HTTP response. - - Typical client-mode usage is to write a request using `write_headers`, - `write`, and `finish`, and then call ``read_response``. - - :arg delegate: a `.HTTPMessageDelegate` - - Returns a `.Future` that resolves to None after the full response has - been read. - """ - if self.params.decompress: - delegate = _GzipMessageDelegate(delegate, self.params.chunk_size) - return self._read_message(delegate) - - @gen.coroutine - def _read_message(self, delegate): - need_delegate_close = False - try: - header_future = self.stream.read_until_regex( - b"\r?\n\r?\n", - max_bytes=self.params.max_header_size) - if self.params.header_timeout is None: - header_data = yield header_future - else: - try: - header_data = yield gen.with_timeout( - self.stream.io_loop.time() + self.params.header_timeout, - header_future, - io_loop=self.stream.io_loop) - except gen.TimeoutError: - self.close() - raise gen.Return(False) - start_line, headers = self._parse_headers(header_data) - if self.is_client: - start_line = httputil.parse_response_start_line(start_line) - self._response_start_line = start_line - else: - start_line = httputil.parse_request_start_line(start_line) - self._request_start_line = start_line - self._request_headers = headers - - self._disconnect_on_finish = not self._can_keep_alive( - start_line, headers) - need_delegate_close = True - with _ExceptionLoggingContext(app_log): - header_future = delegate.headers_received(start_line, headers) - if header_future is not None: - yield header_future - if self.stream is None: - # We've been detached. - need_delegate_close = False - raise gen.Return(False) - skip_body = False - if self.is_client: - if (self._request_start_line is not None and - self._request_start_line.method == 'HEAD'): - skip_body = True - code = start_line.code - if code == 304: - # 304 responses may include the content-length header - # but do not actually have a body. - # http://tools.ietf.org/html/rfc7230#section-3.3 - skip_body = True - if code >= 100 and code < 200: - # 1xx responses should never indicate the presence of - # a body. - if ('Content-Length' in headers or - 'Transfer-Encoding' in headers): - raise httputil.HTTPInputError( - "Response code %d cannot have body" % code) - # TODO: client delegates will get headers_received twice - # in the case of a 100-continue. Document or change? - yield self._read_message(delegate) - else: - if (headers.get("Expect") == "100-continue" and - not self._write_finished): - self.stream.write(b"HTTP/1.1 100 (Continue)\r\n\r\n") - if not skip_body: - body_future = self._read_body( - start_line.code if self.is_client else 0, headers, delegate) - if body_future is not None: - if self._body_timeout is None: - yield body_future - else: - try: - yield gen.with_timeout( - self.stream.io_loop.time() + self._body_timeout, - body_future, self.stream.io_loop) - except gen.TimeoutError: - gen_log.info("Timeout reading body from %s", - self.context) - self.stream.close() - raise gen.Return(False) - self._read_finished = True - if not self._write_finished or self.is_client: - need_delegate_close = False - with _ExceptionLoggingContext(app_log): - delegate.finish() - # If we're waiting for the application to produce an asynchronous - # response, and we're not detached, register a close callback - # on the stream (we didn't need one while we were reading) - if (not self._finish_future.done() and - self.stream is not None and - not self.stream.closed()): - self.stream.set_close_callback(self._on_connection_close) - yield self._finish_future - if self.is_client and self._disconnect_on_finish: - self.close() - if self.stream is None: - raise gen.Return(False) - except httputil.HTTPInputError as e: - gen_log.info("Malformed HTTP message from %s: %s", - self.context, e) - self.close() - raise gen.Return(False) - finally: - if need_delegate_close: - with _ExceptionLoggingContext(app_log): - delegate.on_connection_close() - self._clear_callbacks() - raise gen.Return(True) - - def _clear_callbacks(self): - """Clears the callback attributes. - - This allows the request handler to be garbage collected more - quickly in CPython by breaking up reference cycles. - """ - self._write_callback = None - self._write_future = None - self._close_callback = None - if self.stream is not None: - self.stream.set_close_callback(None) - - def set_close_callback(self, callback): - """Sets a callback that will be run when the connection is closed. - - .. deprecated:: 4.0 - Use `.HTTPMessageDelegate.on_connection_close` instead. - """ - self._close_callback = stack_context.wrap(callback) - - def _on_connection_close(self): - # Note that this callback is only registered on the IOStream - # when we have finished reading the request and are waiting for - # the application to produce its response. - if self._close_callback is not None: - callback = self._close_callback - self._close_callback = None - callback() - if not self._finish_future.done(): - self._finish_future.set_result(None) - self._clear_callbacks() - - def close(self): - if self.stream is not None: - self.stream.close() - self._clear_callbacks() - if not self._finish_future.done(): - self._finish_future.set_result(None) - - def detach(self): - """Take control of the underlying stream. - - Returns the underlying `.IOStream` object and stops all further - HTTP processing. May only be called during - `.HTTPMessageDelegate.headers_received`. Intended for implementing - protocols like websockets that tunnel over an HTTP handshake. - """ - self._clear_callbacks() - stream = self.stream - self.stream = None - return stream - - def set_body_timeout(self, timeout): - """Sets the body timeout for a single request. - - Overrides the value from `.HTTP1ConnectionParameters`. - """ - self._body_timeout = timeout - - def set_max_body_size(self, max_body_size): - """Sets the body size limit for a single request. - - Overrides the value from `.HTTP1ConnectionParameters`. - """ - self._max_body_size = max_body_size - - def write_headers(self, start_line, headers, chunk=None, callback=None): - """Implements `.HTTPConnection.write_headers`.""" - if self.is_client: - self._request_start_line = start_line - # Client requests with a non-empty body must have either a - # Content-Length or a Transfer-Encoding. - self._chunking_output = ( - start_line.method in ('POST', 'PUT', 'PATCH') and - 'Content-Length' not in headers and - 'Transfer-Encoding' not in headers) - else: - self._response_start_line = start_line - self._chunking_output = ( - # TODO: should this use - # self._request_start_line.version or - # start_line.version? - self._request_start_line.version == 'HTTP/1.1' and - # 304 responses have no body (not even a zero-length body), and so - # should not have either Content-Length or Transfer-Encoding. - # headers. - start_line.code != 304 and - # No need to chunk the output if a Content-Length is specified. - 'Content-Length' not in headers and - # Applications are discouraged from touching Transfer-Encoding, - # but if they do, leave it alone. - 'Transfer-Encoding' not in headers) - # If a 1.0 client asked for keep-alive, add the header. - if (self._request_start_line.version == 'HTTP/1.0' and - (self._request_headers.get('Connection', '').lower() - == 'keep-alive')): - headers['Connection'] = 'Keep-Alive' - if self._chunking_output: - headers['Transfer-Encoding'] = 'chunked' - if (not self.is_client and - (self._request_start_line.method == 'HEAD' or - start_line.code == 304)): - self._expected_content_remaining = 0 - elif 'Content-Length' in headers: - self._expected_content_remaining = int(headers['Content-Length']) - else: - self._expected_content_remaining = None - lines = [utf8("%s %s %s" % start_line)] - lines.extend([utf8(n) + b": " + utf8(v) for n, v in headers.get_all()]) - for line in lines: - if b'\n' in line: - raise ValueError('Newline in header: ' + repr(line)) - future = None - if self.stream.closed(): - future = self._write_future = Future() - future.set_exception(iostream.StreamClosedError()) - else: - if callback is not None: - self._write_callback = stack_context.wrap(callback) - else: - future = self._write_future = Future() - data = b"\r\n".join(lines) + b"\r\n\r\n" - if chunk: - data += self._format_chunk(chunk) - self._pending_write = self.stream.write(data) - self._pending_write.add_done_callback(self._on_write_complete) - return future - - def _format_chunk(self, chunk): - if self._expected_content_remaining is not None: - self._expected_content_remaining -= len(chunk) - if self._expected_content_remaining < 0: - # Close the stream now to stop further framing errors. - self.stream.close() - raise httputil.HTTPOutputError( - "Tried to write more data than Content-Length") - if self._chunking_output and chunk: - # Don't write out empty chunks because that means END-OF-STREAM - # with chunked encoding - return utf8("%x" % len(chunk)) + b"\r\n" + chunk + b"\r\n" - else: - return chunk - - def write(self, chunk, callback=None): - """Implements `.HTTPConnection.write`. - - For backwards compatibility is is allowed but deprecated to - skip `write_headers` and instead call `write()` with a - pre-encoded header block. - """ - future = None - if self.stream.closed(): - future = self._write_future = Future() - self._write_future.set_exception(iostream.StreamClosedError()) - else: - if callback is not None: - self._write_callback = stack_context.wrap(callback) - else: - future = self._write_future = Future() - self._pending_write = self.stream.write(self._format_chunk(chunk)) - self._pending_write.add_done_callback(self._on_write_complete) - return future - - def finish(self): - """Implements `.HTTPConnection.finish`.""" - if (self._expected_content_remaining is not None and - self._expected_content_remaining != 0 and - not self.stream.closed()): - self.stream.close() - raise httputil.HTTPOutputError( - "Tried to write %d bytes less than Content-Length" % - self._expected_content_remaining) - if self._chunking_output: - if not self.stream.closed(): - self._pending_write = self.stream.write(b"0\r\n\r\n") - self._pending_write.add_done_callback(self._on_write_complete) - self._write_finished = True - # If the app finished the request while we're still reading, - # divert any remaining data away from the delegate and - # close the connection when we're done sending our response. - # Closing the connection is the only way to avoid reading the - # whole input body. - if not self._read_finished: - self._disconnect_on_finish = True - # No more data is coming, so instruct TCP to send any remaining - # data immediately instead of waiting for a full packet or ack. - self.stream.set_nodelay(True) - if self._pending_write is None: - self._finish_request(None) - else: - self._pending_write.add_done_callback(self._finish_request) - - def _on_write_complete(self, future): - if self._write_callback is not None: - callback = self._write_callback - self._write_callback = None - self.stream.io_loop.add_callback(callback) - if self._write_future is not None: - future = self._write_future - self._write_future = None - future.set_result(None) - - def _can_keep_alive(self, start_line, headers): - if self.params.no_keep_alive: - return False - connection_header = headers.get("Connection") - if connection_header is not None: - connection_header = connection_header.lower() - if start_line.version == "HTTP/1.1": - return connection_header != "close" - elif ("Content-Length" in headers - or start_line.method in ("HEAD", "GET")): - return connection_header == "keep-alive" - return False - - def _finish_request(self, future): - self._clear_callbacks() - if not self.is_client and self._disconnect_on_finish: - self.close() - return - # Turn Nagle's algorithm back on, leaving the stream in its - # default state for the next request. - self.stream.set_nodelay(False) - if not self._finish_future.done(): - self._finish_future.set_result(None) - - def _parse_headers(self, data): - data = native_str(data.decode('latin1')) - eol = data.find("\r\n") - start_line = data[:eol] - try: - headers = httputil.HTTPHeaders.parse(data[eol:]) - except ValueError: - # probably form split() if there was no ':' in the line - raise httputil.HTTPInputError("Malformed HTTP headers: %r" % - data[eol:100]) - return start_line, headers - - def _read_body(self, code, headers, delegate): - if "Content-Length" in headers: - if "," in headers["Content-Length"]: - # Proxies sometimes cause Content-Length headers to get - # duplicated. If all the values are identical then we can - # use them but if they differ it's an error. - pieces = re.split(r',\s*', headers["Content-Length"]) - if any(i != pieces[0] for i in pieces): - raise httputil.HTTPInputError( - "Multiple unequal Content-Lengths: %r" % - headers["Content-Length"]) - headers["Content-Length"] = pieces[0] - content_length = int(headers["Content-Length"]) - - if content_length > self._max_body_size: - raise httputil.HTTPInputError("Content-Length too long") - else: - content_length = None - - if code == 204: - # This response code is not allowed to have a non-empty body, - # and has an implicit length of zero instead of read-until-close. - # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3 - if ("Transfer-Encoding" in headers or - content_length not in (None, 0)): - raise httputil.HTTPInputError( - "Response with code %d should not have body" % code) - content_length = 0 - - if content_length is not None: - return self._read_fixed_body(content_length, delegate) - if headers.get("Transfer-Encoding") == "chunked": - return self._read_chunked_body(delegate) - if self.is_client: - return self._read_body_until_close(delegate) - return None - - @gen.coroutine - def _read_fixed_body(self, content_length, delegate): - while content_length > 0: - body = yield self.stream.read_bytes( - min(self.params.chunk_size, content_length), partial=True) - content_length -= len(body) - if not self._write_finished or self.is_client: - with _ExceptionLoggingContext(app_log): - yield gen.maybe_future(delegate.data_received(body)) - - @gen.coroutine - def _read_chunked_body(self, delegate): - # TODO: "chunk extensions" http://tools.ietf.org/html/rfc2616#section-3.6.1 - total_size = 0 - while True: - chunk_len = yield self.stream.read_until(b"\r\n", max_bytes=64) - chunk_len = int(chunk_len.strip(), 16) - if chunk_len == 0: - return - total_size += chunk_len - if total_size > self._max_body_size: - raise httputil.HTTPInputError("chunked body too large") - bytes_to_read = chunk_len - while bytes_to_read: - chunk = yield self.stream.read_bytes( - min(bytes_to_read, self.params.chunk_size), partial=True) - bytes_to_read -= len(chunk) - if not self._write_finished or self.is_client: - with _ExceptionLoggingContext(app_log): - yield gen.maybe_future(delegate.data_received(chunk)) - # chunk ends with \r\n - crlf = yield self.stream.read_bytes(2) - assert crlf == b"\r\n" - - @gen.coroutine - def _read_body_until_close(self, delegate): - body = yield self.stream.read_until_close() - if not self._write_finished or self.is_client: - with _ExceptionLoggingContext(app_log): - delegate.data_received(body) - - -class _GzipMessageDelegate(httputil.HTTPMessageDelegate): - """Wraps an `HTTPMessageDelegate` to decode ``Content-Encoding: gzip``. - """ - def __init__(self, delegate, chunk_size): - self._delegate = delegate - self._chunk_size = chunk_size - self._decompressor = None - - def headers_received(self, start_line, headers): - if headers.get("Content-Encoding") == "gzip": - self._decompressor = GzipDecompressor() - # Downstream delegates will only see uncompressed data, - # so rename the content-encoding header. - # (but note that curl_httpclient doesn't do this). - headers.add("X-Consumed-Content-Encoding", - headers["Content-Encoding"]) - del headers["Content-Encoding"] - return self._delegate.headers_received(start_line, headers) - - @gen.coroutine - def data_received(self, chunk): - if self._decompressor: - compressed_data = chunk - while compressed_data: - decompressed = self._decompressor.decompress( - compressed_data, self._chunk_size) - if decompressed: - yield gen.maybe_future( - self._delegate.data_received(decompressed)) - compressed_data = self._decompressor.unconsumed_tail - else: - yield gen.maybe_future(self._delegate.data_received(chunk)) - - def finish(self): - if self._decompressor is not None: - tail = self._decompressor.flush() - if tail: - # I believe the tail will always be empty (i.e. - # decompress will return all it can). The purpose - # of the flush call is to detect errors such - # as truncated input. But in case it ever returns - # anything, treat it as an extra chunk - self._delegate.data_received(tail) - return self._delegate.finish() - - def on_connection_close(self): - return self._delegate.on_connection_close() - - -class HTTP1ServerConnection(object): - """An HTTP/1.x server.""" - def __init__(self, stream, params=None, context=None): - """ - :arg stream: an `.IOStream` - :arg params: a `.HTTP1ConnectionParameters` or None - :arg context: an opaque application-defined object that is accessible - as ``connection.context`` - """ - self.stream = stream - if params is None: - params = HTTP1ConnectionParameters() - self.params = params - self.context = context - self._serving_future = None - - @gen.coroutine - def close(self): - """Closes the connection. - - Returns a `.Future` that resolves after the serving loop has exited. - """ - self.stream.close() - # Block until the serving loop is done, but ignore any exceptions - # (start_serving is already responsible for logging them). - try: - yield self._serving_future - except Exception: - pass - - def start_serving(self, delegate): - """Starts serving requests on this connection. - - :arg delegate: a `.HTTPServerConnectionDelegate` - """ - assert isinstance(delegate, httputil.HTTPServerConnectionDelegate) - self._serving_future = self._server_request_loop(delegate) - # Register the future on the IOLoop so its errors get logged. - self.stream.io_loop.add_future(self._serving_future, - lambda f: f.result()) - - @gen.coroutine - def _server_request_loop(self, delegate): - try: - while True: - conn = HTTP1Connection(self.stream, False, - self.params, self.context) - request_delegate = delegate.start_request(self, conn) - try: - ret = yield conn.read_response(request_delegate) - except (iostream.StreamClosedError, - iostream.UnsatisfiableReadError): - return - except _QuietException: - # This exception was already logged. - conn.close() - return - except Exception: - gen_log.error("Uncaught exception", exc_info=True) - conn.close() - return - if not ret: - return - yield gen.moment - finally: - delegate.on_close(self) diff --git a/rosbridge_server/src/tornado/httpclient.py b/rosbridge_server/src/tornado/httpclient.py deleted file mode 100644 index c8ecf47c3..000000000 --- a/rosbridge_server/src/tornado/httpclient.py +++ /dev/null @@ -1,638 +0,0 @@ -"""Blocking and non-blocking HTTP client interfaces. - -This module defines a common interface shared by two implementations, -``simple_httpclient`` and ``curl_httpclient``. Applications may either -instantiate their chosen implementation class directly or use the -`AsyncHTTPClient` class from this module, which selects an implementation -that can be overridden with the `AsyncHTTPClient.configure` method. - -The default implementation is ``simple_httpclient``, and this is expected -to be suitable for most users' needs. However, some applications may wish -to switch to ``curl_httpclient`` for reasons such as the following: - -* ``curl_httpclient`` has some features not found in ``simple_httpclient``, - including support for HTTP proxies and the ability to use a specified - network interface. - -* ``curl_httpclient`` is more likely to be compatible with sites that are - not-quite-compliant with the HTTP spec, or sites that use little-exercised - features of HTTP. - -* ``curl_httpclient`` is faster. - -* ``curl_httpclient`` was the default prior to Tornado 2.0. - -Note that if you are using ``curl_httpclient``, it is highly -recommended that you use a recent version of ``libcurl`` and -``pycurl``. Currently the minimum supported version of libcurl is -7.21.1, and the minimum version of pycurl is 7.18.2. It is highly -recommended that your ``libcurl`` installation is built with -asynchronous DNS resolver (threaded or c-ares), otherwise you may -encounter various problems with request timeouts (for more -information, see -http://curl.haxx.se/libcurl/c/curl_easy_setopt.html#CURLOPTCONNECTTIMEOUTMS -and comments in curl_httpclient.py). - -To select ``curl_httpclient``, call `AsyncHTTPClient.configure` at startup:: - - AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") -""" - -from __future__ import absolute_import, division, print_function, with_statement - -import functools -import time -import weakref - -from tornado.concurrent import TracebackFuture -from tornado.escape import utf8, native_str -from tornado import httputil, stack_context -from tornado.ioloop import IOLoop -from tornado.util import Configurable - - -class HTTPClient(object): - """A blocking HTTP client. - - This interface is provided for convenience and testing; most applications - that are running an IOLoop will want to use `AsyncHTTPClient` instead. - Typical usage looks like this:: - - http_client = httpclient.HTTPClient() - try: - response = http_client.fetch("http://www.google.com/") - print response.body - except httpclient.HTTPError as e: - print "Error:", e - http_client.close() - """ - def __init__(self, async_client_class=None, **kwargs): - self._io_loop = IOLoop() - if async_client_class is None: - async_client_class = AsyncHTTPClient - self._async_client = async_client_class(self._io_loop, **kwargs) - self._closed = False - - def __del__(self): - self.close() - - def close(self): - """Closes the HTTPClient, freeing any resources used.""" - if not self._closed: - self._async_client.close() - self._io_loop.close() - self._closed = True - - def fetch(self, request, **kwargs): - """Executes a request, returning an `HTTPResponse`. - - The request may be either a string URL or an `HTTPRequest` object. - If it is a string, we construct an `HTTPRequest` using any additional - kwargs: ``HTTPRequest(request, **kwargs)`` - - If an error occurs during the fetch, we raise an `HTTPError`. - """ - response = self._io_loop.run_sync(functools.partial( - self._async_client.fetch, request, **kwargs)) - response.rethrow() - return response - - -class AsyncHTTPClient(Configurable): - """An non-blocking HTTP client. - - Example usage:: - - def handle_request(response): - if response.error: - print "Error:", response.error - else: - print response.body - - http_client = AsyncHTTPClient() - http_client.fetch("http://www.google.com/", handle_request) - - The constructor for this class is magic in several respects: It - actually creates an instance of an implementation-specific - subclass, and instances are reused as a kind of pseudo-singleton - (one per `.IOLoop`). The keyword argument ``force_instance=True`` - can be used to suppress this singleton behavior. Unless - ``force_instance=True`` is used, no arguments other than - ``io_loop`` should be passed to the `AsyncHTTPClient` constructor. - The implementation subclass as well as arguments to its - constructor can be set with the static method `configure()` - - All `AsyncHTTPClient` implementations support a ``defaults`` - keyword argument, which can be used to set default values for - `HTTPRequest` attributes. For example:: - - AsyncHTTPClient.configure( - None, defaults=dict(user_agent="MyUserAgent")) - # or with force_instance: - client = AsyncHTTPClient(force_instance=True, - defaults=dict(user_agent="MyUserAgent")) - """ - @classmethod - def configurable_base(cls): - return AsyncHTTPClient - - @classmethod - def configurable_default(cls): - from tornado.simple_httpclient import SimpleAsyncHTTPClient - return SimpleAsyncHTTPClient - - @classmethod - def _async_clients(cls): - attr_name = '_async_client_dict_' + cls.__name__ - if not hasattr(cls, attr_name): - setattr(cls, attr_name, weakref.WeakKeyDictionary()) - return getattr(cls, attr_name) - - def __new__(cls, io_loop=None, force_instance=False, **kwargs): - io_loop = io_loop or IOLoop.current() - if force_instance: - instance_cache = None - else: - instance_cache = cls._async_clients() - if instance_cache is not None and io_loop in instance_cache: - return instance_cache[io_loop] - instance = super(AsyncHTTPClient, cls).__new__(cls, io_loop=io_loop, - **kwargs) - # Make sure the instance knows which cache to remove itself from. - # It can't simply call _async_clients() because we may be in - # __new__(AsyncHTTPClient) but instance.__class__ may be - # SimpleAsyncHTTPClient. - instance._instance_cache = instance_cache - if instance_cache is not None: - instance_cache[instance.io_loop] = instance - return instance - - def initialize(self, io_loop, defaults=None): - self.io_loop = io_loop - self.defaults = dict(HTTPRequest._DEFAULTS) - if defaults is not None: - self.defaults.update(defaults) - self._closed = False - - def close(self): - """Destroys this HTTP client, freeing any file descriptors used. - - This method is **not needed in normal use** due to the way - that `AsyncHTTPClient` objects are transparently reused. - ``close()`` is generally only necessary when either the - `.IOLoop` is also being closed, or the ``force_instance=True`` - argument was used when creating the `AsyncHTTPClient`. - - No other methods may be called on the `AsyncHTTPClient` after - ``close()``. - - """ - if self._closed: - return - self._closed = True - if self._instance_cache is not None: - if self._instance_cache.get(self.io_loop) is not self: - raise RuntimeError("inconsistent AsyncHTTPClient cache") - del self._instance_cache[self.io_loop] - - def fetch(self, request, callback=None, **kwargs): - """Executes a request, asynchronously returning an `HTTPResponse`. - - The request may be either a string URL or an `HTTPRequest` object. - If it is a string, we construct an `HTTPRequest` using any additional - kwargs: ``HTTPRequest(request, **kwargs)`` - - This method returns a `.Future` whose result is an - `HTTPResponse`. The ``Future`` will raise an `HTTPError` if - the request returned a non-200 response code. - - If a ``callback`` is given, it will be invoked with the `HTTPResponse`. - In the callback interface, `HTTPError` is not automatically raised. - Instead, you must check the response's ``error`` attribute or - call its `~HTTPResponse.rethrow` method. - """ - if self._closed: - raise RuntimeError("fetch() called on closed AsyncHTTPClient") - if not isinstance(request, HTTPRequest): - request = HTTPRequest(url=request, **kwargs) - # We may modify this (to add Host, Accept-Encoding, etc), - # so make sure we don't modify the caller's object. This is also - # where normal dicts get converted to HTTPHeaders objects. - request.headers = httputil.HTTPHeaders(request.headers) - request = _RequestProxy(request, self.defaults) - future = TracebackFuture() - if callback is not None: - callback = stack_context.wrap(callback) - - def handle_future(future): - exc = future.exception() - if isinstance(exc, HTTPError) and exc.response is not None: - response = exc.response - elif exc is not None: - response = HTTPResponse( - request, 599, error=exc, - request_time=time.time() - request.start_time) - else: - response = future.result() - self.io_loop.add_callback(callback, response) - future.add_done_callback(handle_future) - - def handle_response(response): - if response.error: - future.set_exception(response.error) - else: - future.set_result(response) - self.fetch_impl(request, handle_response) - return future - - def fetch_impl(self, request, callback): - raise NotImplementedError() - - @classmethod - def configure(cls, impl, **kwargs): - """Configures the `AsyncHTTPClient` subclass to use. - - ``AsyncHTTPClient()`` actually creates an instance of a subclass. - This method may be called with either a class object or the - fully-qualified name of such a class (or ``None`` to use the default, - ``SimpleAsyncHTTPClient``) - - If additional keyword arguments are given, they will be passed - to the constructor of each subclass instance created. The - keyword argument ``max_clients`` determines the maximum number - of simultaneous `~AsyncHTTPClient.fetch()` operations that can - execute in parallel on each `.IOLoop`. Additional arguments - may be supported depending on the implementation class in use. - - Example:: - - AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") - """ - super(AsyncHTTPClient, cls).configure(impl, **kwargs) - - -class HTTPRequest(object): - """HTTP client request object.""" - - # Default values for HTTPRequest parameters. - # Merged with the values on the request object by AsyncHTTPClient - # implementations. - _DEFAULTS = dict( - connect_timeout=20.0, - request_timeout=20.0, - follow_redirects=True, - max_redirects=5, - decompress_response=True, - proxy_password='', - allow_nonstandard_methods=False, - validate_cert=True) - - def __init__(self, url, method="GET", headers=None, body=None, - auth_username=None, auth_password=None, auth_mode=None, - connect_timeout=None, request_timeout=None, - if_modified_since=None, follow_redirects=None, - max_redirects=None, user_agent=None, use_gzip=None, - network_interface=None, streaming_callback=None, - header_callback=None, prepare_curl_callback=None, - proxy_host=None, proxy_port=None, proxy_username=None, - proxy_password=None, allow_nonstandard_methods=None, - validate_cert=None, ca_certs=None, - allow_ipv6=None, - client_key=None, client_cert=None, body_producer=None, - expect_100_continue=False, decompress_response=None): - r"""All parameters except ``url`` are optional. - - :arg string url: URL to fetch - :arg string method: HTTP method, e.g. "GET" or "POST" - :arg headers: Additional HTTP headers to pass on the request - :type headers: `~tornado.httputil.HTTPHeaders` or `dict` - :arg body: HTTP request body as a string (byte or unicode; if unicode - the utf-8 encoding will be used) - :arg body_producer: Callable used for lazy/asynchronous request bodies. - It is called with one argument, a ``write`` function, and should - return a `.Future`. It should call the write function with new - data as it becomes available. The write function returns a - `.Future` which can be used for flow control. - Only one of ``body`` and ``body_producer`` may - be specified. ``body_producer`` is not supported on - ``curl_httpclient``. When using ``body_producer`` it is recommended - to pass a ``Content-Length`` in the headers as otherwise chunked - encoding will be used, and many servers do not support chunked - encoding on requests. New in Tornado 4.0 - :arg string auth_username: Username for HTTP authentication - :arg string auth_password: Password for HTTP authentication - :arg string auth_mode: Authentication mode; default is "basic". - Allowed values are implementation-defined; ``curl_httpclient`` - supports "basic" and "digest"; ``simple_httpclient`` only supports - "basic" - :arg float connect_timeout: Timeout for initial connection in seconds - :arg float request_timeout: Timeout for entire request in seconds - :arg if_modified_since: Timestamp for ``If-Modified-Since`` header - :type if_modified_since: `datetime` or `float` - :arg bool follow_redirects: Should redirects be followed automatically - or return the 3xx response? - :arg int max_redirects: Limit for ``follow_redirects`` - :arg string user_agent: String to send as ``User-Agent`` header - :arg bool decompress_response: Request a compressed response from - the server and decompress it after downloading. Default is True. - New in Tornado 4.0. - :arg bool use_gzip: Deprecated alias for ``decompress_response`` - since Tornado 4.0. - :arg string network_interface: Network interface to use for request. - ``curl_httpclient`` only; see note below. - :arg callable streaming_callback: If set, ``streaming_callback`` will - be run with each chunk of data as it is received, and - ``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in - the final response. - :arg callable header_callback: If set, ``header_callback`` will - be run with each header line as it is received (including the - first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line - containing only ``\r\n``. All lines include the trailing newline - characters). ``HTTPResponse.headers`` will be empty in the final - response. This is most useful in conjunction with - ``streaming_callback``, because it's the only way to get access to - header data while the request is in progress. - :arg callable prepare_curl_callback: If set, will be called with - a ``pycurl.Curl`` object to allow the application to make additional - ``setopt`` calls. - :arg string proxy_host: HTTP proxy hostname. To use proxies, - ``proxy_host`` and ``proxy_port`` must be set; ``proxy_username`` and - ``proxy_pass`` are optional. Proxies are currently only supported - with ``curl_httpclient``. - :arg int proxy_port: HTTP proxy port - :arg string proxy_username: HTTP proxy username - :arg string proxy_password: HTTP proxy password - :arg bool allow_nonstandard_methods: Allow unknown values for ``method`` - argument? - :arg bool validate_cert: For HTTPS requests, validate the server's - certificate? - :arg string ca_certs: filename of CA certificates in PEM format, - or None to use defaults. See note below when used with - ``curl_httpclient``. - :arg bool allow_ipv6: Use IPv6 when available? Default is false in - ``simple_httpclient`` and true in ``curl_httpclient`` - :arg string client_key: Filename for client SSL key, if any. See - note below when used with ``curl_httpclient``. - :arg string client_cert: Filename for client SSL certificate, if any. - See note below when used with ``curl_httpclient``. - :arg bool expect_100_continue: If true, send the - ``Expect: 100-continue`` header and wait for a continue response - before sending the request body. Only supported with - simple_httpclient. - - .. note:: - - When using ``curl_httpclient`` certain options may be - inherited by subsequent fetches because ``pycurl`` does - not allow them to be cleanly reset. This applies to the - ``ca_certs``, ``client_key``, ``client_cert``, and - ``network_interface`` arguments. If you use these - options, you should pass them on every request (you don't - have to always use the same values, but it's not possible - to mix requests that specify these options with ones that - use the defaults). - - .. versionadded:: 3.1 - The ``auth_mode`` argument. - - .. versionadded:: 4.0 - The ``body_producer`` and ``expect_100_continue`` arguments. - """ - # Note that some of these attributes go through property setters - # defined below. - self.headers = headers - if if_modified_since: - self.headers["If-Modified-Since"] = httputil.format_timestamp( - if_modified_since) - self.proxy_host = proxy_host - self.proxy_port = proxy_port - self.proxy_username = proxy_username - self.proxy_password = proxy_password - self.url = url - self.method = method - self.body = body - self.body_producer = body_producer - self.auth_username = auth_username - self.auth_password = auth_password - self.auth_mode = auth_mode - self.connect_timeout = connect_timeout - self.request_timeout = request_timeout - self.follow_redirects = follow_redirects - self.max_redirects = max_redirects - self.user_agent = user_agent - if decompress_response is not None: - self.decompress_response = decompress_response - else: - self.decompress_response = use_gzip - self.network_interface = network_interface - self.streaming_callback = streaming_callback - self.header_callback = header_callback - self.prepare_curl_callback = prepare_curl_callback - self.allow_nonstandard_methods = allow_nonstandard_methods - self.validate_cert = validate_cert - self.ca_certs = ca_certs - self.allow_ipv6 = allow_ipv6 - self.client_key = client_key - self.client_cert = client_cert - self.expect_100_continue = expect_100_continue - self.start_time = time.time() - - @property - def headers(self): - return self._headers - - @headers.setter - def headers(self, value): - if value is None: - self._headers = httputil.HTTPHeaders() - else: - self._headers = value - - @property - def body(self): - return self._body - - @body.setter - def body(self, value): - self._body = utf8(value) - - @property - def body_producer(self): - return self._body_producer - - @body_producer.setter - def body_producer(self, value): - self._body_producer = stack_context.wrap(value) - - @property - def streaming_callback(self): - return self._streaming_callback - - @streaming_callback.setter - def streaming_callback(self, value): - self._streaming_callback = stack_context.wrap(value) - - @property - def header_callback(self): - return self._header_callback - - @header_callback.setter - def header_callback(self, value): - self._header_callback = stack_context.wrap(value) - - @property - def prepare_curl_callback(self): - return self._prepare_curl_callback - - @prepare_curl_callback.setter - def prepare_curl_callback(self, value): - self._prepare_curl_callback = stack_context.wrap(value) - - -class HTTPResponse(object): - """HTTP Response object. - - Attributes: - - * request: HTTPRequest object - - * code: numeric HTTP status code, e.g. 200 or 404 - - * reason: human-readable reason phrase describing the status code - - * headers: `tornado.httputil.HTTPHeaders` object - - * effective_url: final location of the resource after following any - redirects - - * buffer: ``cStringIO`` object for response body - - * body: response body as string (created on demand from ``self.buffer``) - - * error: Exception object, if any - - * request_time: seconds from request start to finish - - * time_info: dictionary of diagnostic timing information from the request. - Available data are subject to change, but currently uses timings - available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html, - plus ``queue``, which is the delay (if any) introduced by waiting for - a slot under `AsyncHTTPClient`'s ``max_clients`` setting. - """ - def __init__(self, request, code, headers=None, buffer=None, - effective_url=None, error=None, request_time=None, - time_info=None, reason=None): - if isinstance(request, _RequestProxy): - self.request = request.request - else: - self.request = request - self.code = code - self.reason = reason or httputil.responses.get(code, "Unknown") - if headers is not None: - self.headers = headers - else: - self.headers = httputil.HTTPHeaders() - self.buffer = buffer - self._body = None - if effective_url is None: - self.effective_url = request.url - else: - self.effective_url = effective_url - if error is None: - if self.code < 200 or self.code >= 300: - self.error = HTTPError(self.code, message=self.reason, - response=self) - else: - self.error = None - else: - self.error = error - self.request_time = request_time - self.time_info = time_info or {} - - def _get_body(self): - if self.buffer is None: - return None - elif self._body is None: - self._body = self.buffer.getvalue() - - return self._body - - body = property(_get_body) - - def rethrow(self): - """If there was an error on the request, raise an `HTTPError`.""" - if self.error: - raise self.error - - def __repr__(self): - args = ",".join("%s=%r" % i for i in sorted(self.__dict__.items())) - return "%s(%s)" % (self.__class__.__name__, args) - - -class HTTPError(Exception): - """Exception thrown for an unsuccessful HTTP request. - - Attributes: - - * ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is - used when no HTTP response was received, e.g. for a timeout. - - * ``response`` - `HTTPResponse` object, if any. - - Note that if ``follow_redirects`` is False, redirects become HTTPErrors, - and you can look at ``error.response.headers['Location']`` to see the - destination of the redirect. - """ - def __init__(self, code, message=None, response=None): - self.code = code - message = message or httputil.responses.get(code, "Unknown") - self.response = response - Exception.__init__(self, "HTTP %d: %s" % (self.code, message)) - - -class _RequestProxy(object): - """Combines an object with a dictionary of defaults. - - Used internally by AsyncHTTPClient implementations. - """ - def __init__(self, request, defaults): - self.request = request - self.defaults = defaults - - def __getattr__(self, name): - request_attr = getattr(self.request, name) - if request_attr is not None: - return request_attr - elif self.defaults is not None: - return self.defaults.get(name, None) - else: - return None - - -def main(): - from tornado.options import define, options, parse_command_line - define("print_headers", type=bool, default=False) - define("print_body", type=bool, default=True) - define("follow_redirects", type=bool, default=True) - define("validate_cert", type=bool, default=True) - args = parse_command_line() - client = HTTPClient() - for arg in args: - try: - response = client.fetch(arg, - follow_redirects=options.follow_redirects, - validate_cert=options.validate_cert, - ) - except HTTPError as e: - if e.response is not None: - response = e.response - else: - raise - if options.print_headers: - print(response.headers) - if options.print_body: - print(native_str(response.body)) - client.close() - -if __name__ == "__main__": - main() diff --git a/rosbridge_server/src/tornado/httpserver.py b/rosbridge_server/src/tornado/httpserver.py deleted file mode 100644 index 03b5fc737..000000000 --- a/rosbridge_server/src/tornado/httpserver.py +++ /dev/null @@ -1,297 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A non-blocking, single-threaded HTTP server. - -Typical applications have little direct interaction with the `HTTPServer` -class except to start a server at the beginning of the process -(and even that is often done indirectly via `tornado.web.Application.listen`). - -.. versionchanged:: 4.0 - - The ``HTTPRequest`` class that used to live in this module has been moved - to `tornado.httputil.HTTPServerRequest`. The old name remains as an alias. -""" - -from __future__ import absolute_import, division, print_function, with_statement - -import socket - -from tornado.escape import native_str -from tornado.http1connection import HTTP1ServerConnection, HTTP1ConnectionParameters -from tornado import gen -from tornado import httputil -from tornado import iostream -from tornado import netutil -from tornado.tcpserver import TCPServer - - -class HTTPServer(TCPServer, httputil.HTTPServerConnectionDelegate): - r"""A non-blocking, single-threaded HTTP server. - - A server is defined by either a request callback that takes a - `.HTTPServerRequest` as an argument or a `.HTTPServerConnectionDelegate` - instance. - - A simple example server that echoes back the URI you requested:: - - import tornado.httpserver - import tornado.ioloop - - def handle_request(request): - message = "You requested %s\n" % request.uri - request.connection.write_headers( - httputil.ResponseStartLine('HTTP/1.1', 200, 'OK'), - {"Content-Length": str(len(message))}) - request.connection.write(message) - request.connection.finish() - - http_server = tornado.httpserver.HTTPServer(handle_request) - http_server.listen(8888) - tornado.ioloop.IOLoop.instance().start() - - Applications should use the methods of `.HTTPConnection` to write - their response. - - `HTTPServer` supports keep-alive connections by default - (automatically for HTTP/1.1, or for HTTP/1.0 when the client - requests ``Connection: keep-alive``). - - If ``xheaders`` is ``True``, we support the - ``X-Real-Ip``/``X-Forwarded-For`` and - ``X-Scheme``/``X-Forwarded-Proto`` headers, which override the - remote IP and URI scheme/protocol for all requests. These headers - are useful when running Tornado behind a reverse proxy or load - balancer. The ``protocol`` argument can also be set to ``https`` - if Tornado is run behind an SSL-decoding proxy that does not set one of - the supported ``xheaders``. - - To make this server serve SSL traffic, send the ``ssl_options`` dictionary - argument with the arguments required for the `ssl.wrap_socket` method, - including ``certfile`` and ``keyfile``. (In Python 3.2+ you can pass - an `ssl.SSLContext` object instead of a dict):: - - HTTPServer(applicaton, ssl_options={ - "certfile": os.path.join(data_dir, "mydomain.crt"), - "keyfile": os.path.join(data_dir, "mydomain.key"), - }) - - `HTTPServer` initialization follows one of three patterns (the - initialization methods are defined on `tornado.tcpserver.TCPServer`): - - 1. `~tornado.tcpserver.TCPServer.listen`: simple single-process:: - - server = HTTPServer(app) - server.listen(8888) - IOLoop.instance().start() - - In many cases, `tornado.web.Application.listen` can be used to avoid - the need to explicitly create the `HTTPServer`. - - 2. `~tornado.tcpserver.TCPServer.bind`/`~tornado.tcpserver.TCPServer.start`: - simple multi-process:: - - server = HTTPServer(app) - server.bind(8888) - server.start(0) # Forks multiple sub-processes - IOLoop.instance().start() - - When using this interface, an `.IOLoop` must *not* be passed - to the `HTTPServer` constructor. `~.TCPServer.start` will always start - the server on the default singleton `.IOLoop`. - - 3. `~tornado.tcpserver.TCPServer.add_sockets`: advanced multi-process:: - - sockets = tornado.netutil.bind_sockets(8888) - tornado.process.fork_processes(0) - server = HTTPServer(app) - server.add_sockets(sockets) - IOLoop.instance().start() - - The `~.TCPServer.add_sockets` interface is more complicated, - but it can be used with `tornado.process.fork_processes` to - give you more flexibility in when the fork happens. - `~.TCPServer.add_sockets` can also be used in single-process - servers if you want to create your listening sockets in some - way other than `tornado.netutil.bind_sockets`. - - .. versionchanged:: 4.0 - Added ``decompress_request``, ``chunk_size``, ``max_header_size``, - ``idle_connection_timeout``, ``body_timeout``, ``max_body_size`` - arguments. Added support for `.HTTPServerConnectionDelegate` - instances as ``request_callback``. - """ - def __init__(self, request_callback, no_keep_alive=False, io_loop=None, - xheaders=False, ssl_options=None, protocol=None, - decompress_request=False, - chunk_size=None, max_header_size=None, - idle_connection_timeout=None, body_timeout=None, - max_body_size=None, max_buffer_size=None): - self.request_callback = request_callback - self.no_keep_alive = no_keep_alive - self.xheaders = xheaders - self.protocol = protocol - self.conn_params = HTTP1ConnectionParameters( - decompress=decompress_request, - chunk_size=chunk_size, - max_header_size=max_header_size, - header_timeout=idle_connection_timeout or 3600, - max_body_size=max_body_size, - body_timeout=body_timeout) - TCPServer.__init__(self, io_loop=io_loop, ssl_options=ssl_options, - max_buffer_size=max_buffer_size, - read_chunk_size=chunk_size) - self._connections = set() - - @gen.coroutine - def close_all_connections(self): - while self._connections: - # Peek at an arbitrary element of the set - conn = next(iter(self._connections)) - yield conn.close() - - def handle_stream(self, stream, address): - context = _HTTPRequestContext(stream, address, - self.protocol) - conn = HTTP1ServerConnection( - stream, self.conn_params, context) - self._connections.add(conn) - conn.start_serving(self) - - def start_request(self, server_conn, request_conn): - return _ServerRequestAdapter(self, request_conn) - - def on_close(self, server_conn): - self._connections.remove(server_conn) - - -class _HTTPRequestContext(object): - def __init__(self, stream, address, protocol): - self.address = address - self.protocol = protocol - # Save the socket's address family now so we know how to - # interpret self.address even after the stream is closed - # and its socket attribute replaced with None. - if stream.socket is not None: - self.address_family = stream.socket.family - else: - self.address_family = None - # In HTTPServerRequest we want an IP, not a full socket address. - if (self.address_family in (socket.AF_INET, socket.AF_INET6) and - address is not None): - self.remote_ip = address[0] - else: - # Unix (or other) socket; fake the remote address. - self.remote_ip = '0.0.0.0' - if protocol: - self.protocol = protocol - elif isinstance(stream, iostream.SSLIOStream): - self.protocol = "https" - else: - self.protocol = "http" - self._orig_remote_ip = self.remote_ip - self._orig_protocol = self.protocol - - def __str__(self): - if self.address_family in (socket.AF_INET, socket.AF_INET6): - return self.remote_ip - elif isinstance(self.address, bytes): - # Python 3 with the -bb option warns about str(bytes), - # so convert it explicitly. - # Unix socket addresses are str on mac but bytes on linux. - return native_str(self.address) - else: - return str(self.address) - - def _apply_xheaders(self, headers): - """Rewrite the ``remote_ip`` and ``protocol`` fields.""" - # Squid uses X-Forwarded-For, others use X-Real-Ip - ip = headers.get("X-Forwarded-For", self.remote_ip) - ip = ip.split(',')[-1].strip() - ip = headers.get("X-Real-Ip", ip) - if netutil.is_valid_ip(ip): - self.remote_ip = ip - # AWS uses X-Forwarded-Proto - proto_header = headers.get( - "X-Scheme", headers.get("X-Forwarded-Proto", - self.protocol)) - if proto_header in ("http", "https"): - self.protocol = proto_header - - def _unapply_xheaders(self): - """Undo changes from `_apply_xheaders`. - - Xheaders are per-request so they should not leak to the next - request on the same connection. - """ - self.remote_ip = self._orig_remote_ip - self.protocol = self._orig_protocol - - -class _ServerRequestAdapter(httputil.HTTPMessageDelegate): - """Adapts the `HTTPMessageDelegate` interface to the interface expected - by our clients. - """ - def __init__(self, server, connection): - self.server = server - self.connection = connection - self.request = None - if isinstance(server.request_callback, - httputil.HTTPServerConnectionDelegate): - self.delegate = server.request_callback.start_request(connection) - self._chunks = None - else: - self.delegate = None - self._chunks = [] - - def headers_received(self, start_line, headers): - if self.server.xheaders: - self.connection.context._apply_xheaders(headers) - if self.delegate is None: - self.request = httputil.HTTPServerRequest( - connection=self.connection, start_line=start_line, - headers=headers) - else: - return self.delegate.headers_received(start_line, headers) - - def data_received(self, chunk): - if self.delegate is None: - self._chunks.append(chunk) - else: - return self.delegate.data_received(chunk) - - def finish(self): - if self.delegate is None: - self.request.body = b''.join(self._chunks) - self.request._parse_body() - self.server.request_callback(self.request) - else: - self.delegate.finish() - self._cleanup() - - def on_connection_close(self): - if self.delegate is None: - self._chunks = None - else: - self.delegate.on_connection_close() - self._cleanup() - - def _cleanup(self): - if self.server.xheaders: - self.connection.context._unapply_xheaders() - - -HTTPRequest = httputil.HTTPServerRequest diff --git a/rosbridge_server/src/tornado/httputil.py b/rosbridge_server/src/tornado/httputil.py deleted file mode 100644 index a67489725..000000000 --- a/rosbridge_server/src/tornado/httputil.py +++ /dev/null @@ -1,844 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""HTTP utility code shared by clients and servers. - -This module also defines the `HTTPServerRequest` class which is exposed -via `tornado.web.RequestHandler.request`. -""" - -from __future__ import absolute_import, division, print_function, with_statement - -import calendar -import collections -import copy -import datetime -import email.utils -import numbers -import re -import time - -from tornado.escape import native_str, parse_qs_bytes, utf8 -from tornado.log import gen_log -from tornado.util import ObjectDict, bytes_type - -try: - import Cookie # py2 -except ImportError: - import http.cookies as Cookie # py3 - -try: - from httplib import responses # py2 -except ImportError: - from http.client import responses # py3 - -# responses is unused in this file, but we re-export it to other files. -# Reference it so pyflakes doesn't complain. -responses - -try: - from urllib import urlencode # py2 -except ImportError: - from urllib.parse import urlencode # py3 - -try: - from ssl import SSLError -except ImportError: - # ssl is unavailable on app engine. - class SSLError(Exception): - pass - - -class _NormalizedHeaderCache(dict): - """Dynamic cached mapping of header names to Http-Header-Case. - - Implemented as a dict subclass so that cache hits are as fast as a - normal dict lookup, without the overhead of a python function - call. - - >>> normalized_headers = _NormalizedHeaderCache(10) - >>> normalized_headers["coNtent-TYPE"] - 'Content-Type' - """ - def __init__(self, size): - super(_NormalizedHeaderCache, self).__init__() - self.size = size - self.queue = collections.deque() - - def __missing__(self, key): - normalized = "-".join([w.capitalize() for w in key.split("-")]) - self[key] = normalized - self.queue.append(key) - if len(self.queue) > self.size: - # Limit the size of the cache. LRU would be better, but this - # simpler approach should be fine. In Python 2.7+ we could - # use OrderedDict (or in 3.2+, @functools.lru_cache). - old_key = self.queue.popleft() - del self[old_key] - return normalized - -_normalized_headers = _NormalizedHeaderCache(1000) - - -class HTTPHeaders(dict): - """A dictionary that maintains ``Http-Header-Case`` for all keys. - - Supports multiple values per key via a pair of new methods, - `add()` and `get_list()`. The regular dictionary interface - returns a single value per key, with multiple values joined by a - comma. - - >>> h = HTTPHeaders({"content-type": "text/html"}) - >>> list(h.keys()) - ['Content-Type'] - >>> h["Content-Type"] - 'text/html' - - >>> h.add("Set-Cookie", "A=B") - >>> h.add("Set-Cookie", "C=D") - >>> h["set-cookie"] - 'A=B,C=D' - >>> h.get_list("set-cookie") - ['A=B', 'C=D'] - - >>> for (k,v) in sorted(h.get_all()): - ... print('%s: %s' % (k,v)) - ... - Content-Type: text/html - Set-Cookie: A=B - Set-Cookie: C=D - """ - def __init__(self, *args, **kwargs): - # Don't pass args or kwargs to dict.__init__, as it will bypass - # our __setitem__ - dict.__init__(self) - self._as_list = {} - self._last_key = None - if (len(args) == 1 and len(kwargs) == 0 and - isinstance(args[0], HTTPHeaders)): - # Copy constructor - for k, v in args[0].get_all(): - self.add(k, v) - else: - # Dict-style initialization - self.update(*args, **kwargs) - - # new public methods - - def add(self, name, value): - """Adds a new value for the given key.""" - norm_name = _normalized_headers[name] - self._last_key = norm_name - if norm_name in self: - # bypass our override of __setitem__ since it modifies _as_list - dict.__setitem__(self, norm_name, - native_str(self[norm_name]) + ',' + - native_str(value)) - self._as_list[norm_name].append(value) - else: - self[norm_name] = value - - def get_list(self, name): - """Returns all values for the given header as a list.""" - norm_name = _normalized_headers[name] - return self._as_list.get(norm_name, []) - - def get_all(self): - """Returns an iterable of all (name, value) pairs. - - If a header has multiple values, multiple pairs will be - returned with the same name. - """ - for name, values in self._as_list.items(): - for value in values: - yield (name, value) - - def parse_line(self, line): - """Updates the dictionary with a single header line. - - >>> h = HTTPHeaders() - >>> h.parse_line("Content-Type: text/html") - >>> h.get('content-type') - 'text/html' - """ - if line[0].isspace(): - # continuation of a multi-line header - new_part = ' ' + line.lstrip() - self._as_list[self._last_key][-1] += new_part - dict.__setitem__(self, self._last_key, - self[self._last_key] + new_part) - else: - name, value = line.split(":", 1) - self.add(name, value.strip()) - - @classmethod - def parse(cls, headers): - """Returns a dictionary from HTTP header text. - - >>> h = HTTPHeaders.parse("Content-Type: text/html\\r\\nContent-Length: 42\\r\\n") - >>> sorted(h.items()) - [('Content-Length', '42'), ('Content-Type', 'text/html')] - """ - h = cls() - for line in headers.splitlines(): - if line: - h.parse_line(line) - return h - - # dict implementation overrides - - def __setitem__(self, name, value): - norm_name = _normalized_headers[name] - dict.__setitem__(self, norm_name, value) - self._as_list[norm_name] = [value] - - def __getitem__(self, name): - return dict.__getitem__(self, _normalized_headers[name]) - - def __delitem__(self, name): - norm_name = _normalized_headers[name] - dict.__delitem__(self, norm_name) - del self._as_list[norm_name] - - def __contains__(self, name): - norm_name = _normalized_headers[name] - return dict.__contains__(self, norm_name) - - def get(self, name, default=None): - return dict.get(self, _normalized_headers[name], default) - - def update(self, *args, **kwargs): - # dict.update bypasses our __setitem__ - for k, v in dict(*args, **kwargs).items(): - self[k] = v - - def copy(self): - # default implementation returns dict(self), not the subclass - return HTTPHeaders(self) - - -class HTTPServerRequest(object): - """A single HTTP request. - - All attributes are type `str` unless otherwise noted. - - .. attribute:: method - - HTTP request method, e.g. "GET" or "POST" - - .. attribute:: uri - - The requested uri. - - .. attribute:: path - - The path portion of `uri` - - .. attribute:: query - - The query portion of `uri` - - .. attribute:: version - - HTTP version specified in request, e.g. "HTTP/1.1" - - .. attribute:: headers - - `.HTTPHeaders` dictionary-like object for request headers. Acts like - a case-insensitive dictionary with additional methods for repeated - headers. - - .. attribute:: body - - Request body, if present, as a byte string. - - .. attribute:: remote_ip - - Client's IP address as a string. If ``HTTPServer.xheaders`` is set, - will pass along the real IP address provided by a load balancer - in the ``X-Real-Ip`` or ``X-Forwarded-For`` header. - - .. versionchanged:: 3.1 - The list format of ``X-Forwarded-For`` is now supported. - - .. attribute:: protocol - - The protocol used, either "http" or "https". If ``HTTPServer.xheaders`` - is set, will pass along the protocol used by a load balancer if - reported via an ``X-Scheme`` header. - - .. attribute:: host - - The requested hostname, usually taken from the ``Host`` header. - - .. attribute:: arguments - - GET/POST arguments are available in the arguments property, which - maps arguments names to lists of values (to support multiple values - for individual names). Names are of type `str`, while arguments - are byte strings. Note that this is different from - `.RequestHandler.get_argument`, which returns argument values as - unicode strings. - - .. attribute:: query_arguments - - Same format as ``arguments``, but contains only arguments extracted - from the query string. - - .. versionadded:: 3.2 - - .. attribute:: body_arguments - - Same format as ``arguments``, but contains only arguments extracted - from the request body. - - .. versionadded:: 3.2 - - .. attribute:: files - - File uploads are available in the files property, which maps file - names to lists of `.HTTPFile`. - - .. attribute:: connection - - An HTTP request is attached to a single HTTP connection, which can - be accessed through the "connection" attribute. Since connections - are typically kept open in HTTP/1.1, multiple requests can be handled - sequentially on a single connection. - - .. versionchanged:: 4.0 - Moved from ``tornado.httpserver.HTTPRequest``. - """ - def __init__(self, method=None, uri=None, version="HTTP/1.0", headers=None, - body=None, host=None, files=None, connection=None, - start_line=None): - if start_line is not None: - method, uri, version = start_line - self.method = method - self.uri = uri - self.version = version - self.headers = headers or HTTPHeaders() - self.body = body or "" - - # set remote IP and protocol - context = getattr(connection, 'context', None) - self.remote_ip = getattr(context, 'remote_ip') - self.protocol = getattr(context, 'protocol', "http") - - self.host = host or self.headers.get("Host") or "127.0.0.1" - self.files = files or {} - self.connection = connection - self._start_time = time.time() - self._finish_time = None - - self.path, sep, self.query = uri.partition('?') - self.arguments = parse_qs_bytes(self.query, keep_blank_values=True) - self.query_arguments = copy.deepcopy(self.arguments) - self.body_arguments = {} - - def supports_http_1_1(self): - """Returns True if this request supports HTTP/1.1 semantics. - - .. deprecated:: 4.0 - Applications are less likely to need this information with the - introduction of `.HTTPConnection`. If you still need it, access - the ``version`` attribute directly. - """ - return self.version == "HTTP/1.1" - - @property - def cookies(self): - """A dictionary of Cookie.Morsel objects.""" - if not hasattr(self, "_cookies"): - self._cookies = Cookie.SimpleCookie() - if "Cookie" in self.headers: - try: - self._cookies.load( - native_str(self.headers["Cookie"])) - except Exception: - self._cookies = {} - return self._cookies - - def write(self, chunk, callback=None): - """Writes the given chunk to the response stream. - - .. deprecated:: 4.0 - Use ``request.connection`` and the `.HTTPConnection` methods - to write the response. - """ - assert isinstance(chunk, bytes_type) - self.connection.write(chunk, callback=callback) - - def finish(self): - """Finishes this HTTP request on the open connection. - - .. deprecated:: 4.0 - Use ``request.connection`` and the `.HTTPConnection` methods - to write the response. - """ - self.connection.finish() - self._finish_time = time.time() - - def full_url(self): - """Reconstructs the full URL for this request.""" - return self.protocol + "://" + self.host + self.uri - - def request_time(self): - """Returns the amount of time it took for this request to execute.""" - if self._finish_time is None: - return time.time() - self._start_time - else: - return self._finish_time - self._start_time - - def get_ssl_certificate(self, binary_form=False): - """Returns the client's SSL certificate, if any. - - To use client certificates, the HTTPServer must have been constructed - with cert_reqs set in ssl_options, e.g.:: - - server = HTTPServer(app, - ssl_options=dict( - certfile="foo.crt", - keyfile="foo.key", - cert_reqs=ssl.CERT_REQUIRED, - ca_certs="cacert.crt")) - - By default, the return value is a dictionary (or None, if no - client certificate is present). If ``binary_form`` is true, a - DER-encoded form of the certificate is returned instead. See - SSLSocket.getpeercert() in the standard library for more - details. - http://docs.python.org/library/ssl.html#sslsocket-objects - """ - try: - return self.connection.stream.socket.getpeercert( - binary_form=binary_form) - except SSLError: - return None - - def _parse_body(self): - parse_body_arguments( - self.headers.get("Content-Type", ""), self.body, - self.body_arguments, self.files, - self.headers) - - for k, v in self.body_arguments.items(): - self.arguments.setdefault(k, []).extend(v) - - def __repr__(self): - attrs = ("protocol", "host", "method", "uri", "version", "remote_ip") - args = ", ".join(["%s=%r" % (n, getattr(self, n)) for n in attrs]) - return "%s(%s, headers=%s)" % ( - self.__class__.__name__, args, dict(self.headers)) - - -class HTTPInputError(Exception): - """Exception class for malformed HTTP requests or responses - from remote sources. - - .. versionadded:: 4.0 - """ - pass - - -class HTTPOutputError(Exception): - """Exception class for errors in HTTP output. - - .. versionadded:: 4.0 - """ - pass - - -class HTTPServerConnectionDelegate(object): - """Implement this interface to handle requests from `.HTTPServer`. - - .. versionadded:: 4.0 - """ - def start_request(self, server_conn, request_conn): - """This method is called by the server when a new request has started. - - :arg server_conn: is an opaque object representing the long-lived - (e.g. tcp-level) connection. - :arg request_conn: is a `.HTTPConnection` object for a single - request/response exchange. - - This method should return a `.HTTPMessageDelegate`. - """ - raise NotImplementedError() - - def on_close(self, server_conn): - """This method is called when a connection has been closed. - - :arg server_conn: is a server connection that has previously been - passed to ``start_request``. - """ - pass - - -class HTTPMessageDelegate(object): - """Implement this interface to handle an HTTP request or response. - - .. versionadded:: 4.0 - """ - def headers_received(self, start_line, headers): - """Called when the HTTP headers have been received and parsed. - - :arg start_line: a `.RequestStartLine` or `.ResponseStartLine` - depending on whether this is a client or server message. - :arg headers: a `.HTTPHeaders` instance. - - Some `.HTTPConnection` methods can only be called during - ``headers_received``. - - May return a `.Future`; if it does the body will not be read - until it is done. - """ - pass - - def data_received(self, chunk): - """Called when a chunk of data has been received. - - May return a `.Future` for flow control. - """ - pass - - def finish(self): - """Called after the last chunk of data has been received.""" - pass - - def on_connection_close(self): - """Called if the connection is closed without finishing the request. - - If ``headers_received`` is called, either ``finish`` or - ``on_connection_close`` will be called, but not both. - """ - pass - - -class HTTPConnection(object): - """Applications use this interface to write their responses. - - .. versionadded:: 4.0 - """ - def write_headers(self, start_line, headers, chunk=None, callback=None): - """Write an HTTP header block. - - :arg start_line: a `.RequestStartLine` or `.ResponseStartLine`. - :arg headers: a `.HTTPHeaders` instance. - :arg chunk: the first (optional) chunk of data. This is an optimization - so that small responses can be written in the same call as their - headers. - :arg callback: a callback to be run when the write is complete. - - Returns a `.Future` if no callback is given. - """ - raise NotImplementedError() - - def write(self, chunk, callback=None): - """Writes a chunk of body data. - - The callback will be run when the write is complete. If no callback - is given, returns a Future. - """ - raise NotImplementedError() - - def finish(self): - """Indicates that the last body data has been written. - """ - raise NotImplementedError() - - -def url_concat(url, args): - """Concatenate url and argument dictionary regardless of whether - url has existing query parameters. - - >>> url_concat("http://example.com/foo?a=b", dict(c="d")) - 'http://example.com/foo?a=b&c=d' - """ - if not args: - return url - if url[-1] not in ('?', '&'): - url += '&' if ('?' in url) else '?' - return url + urlencode(args) - - -class HTTPFile(ObjectDict): - """Represents a file uploaded via a form. - - For backwards compatibility, its instance attributes are also - accessible as dictionary keys. - - * ``filename`` - * ``body`` - * ``content_type`` - """ - pass - - -def _parse_request_range(range_header): - """Parses a Range header. - - Returns either ``None`` or tuple ``(start, end)``. - Note that while the HTTP headers use inclusive byte positions, - this method returns indexes suitable for use in slices. - - >>> start, end = _parse_request_range("bytes=1-2") - >>> start, end - (1, 3) - >>> [0, 1, 2, 3, 4][start:end] - [1, 2] - >>> _parse_request_range("bytes=6-") - (6, None) - >>> _parse_request_range("bytes=-6") - (-6, None) - >>> _parse_request_range("bytes=-0") - (None, 0) - >>> _parse_request_range("bytes=") - (None, None) - >>> _parse_request_range("foo=42") - >>> _parse_request_range("bytes=1-2,6-10") - - Note: only supports one range (ex, ``bytes=1-2,6-10`` is not allowed). - - See [0] for the details of the range header. - - [0]: http://greenbytes.de/tech/webdav/draft-ietf-httpbis-p5-range-latest.html#byte.ranges - """ - unit, _, value = range_header.partition("=") - unit, value = unit.strip(), value.strip() - if unit != "bytes": - return None - start_b, _, end_b = value.partition("-") - try: - start = _int_or_none(start_b) - end = _int_or_none(end_b) - except ValueError: - return None - if end is not None: - if start is None: - if end != 0: - start = -end - end = None - else: - end += 1 - return (start, end) - - -def _get_content_range(start, end, total): - """Returns a suitable Content-Range header: - - >>> print(_get_content_range(None, 1, 4)) - bytes 0-0/4 - >>> print(_get_content_range(1, 3, 4)) - bytes 1-2/4 - >>> print(_get_content_range(None, None, 4)) - bytes 0-3/4 - """ - start = start or 0 - end = (end or total) - 1 - return "bytes %s-%s/%s" % (start, end, total) - - -def _int_or_none(val): - val = val.strip() - if val == "": - return None - return int(val) - - -def parse_body_arguments(content_type, body, arguments, files, headers=None): - """Parses a form request body. - - Supports ``application/x-www-form-urlencoded`` and - ``multipart/form-data``. The ``content_type`` parameter should be - a string and ``body`` should be a byte string. The ``arguments`` - and ``files`` parameters are dictionaries that will be updated - with the parsed contents. - """ - if headers and 'Content-Encoding' in headers: - gen_log.warning("Unsupported Content-Encoding: %s", - headers['Content-Encoding']) - return - if content_type.startswith("application/x-www-form-urlencoded"): - try: - uri_arguments = parse_qs_bytes(native_str(body), keep_blank_values=True) - except Exception as e: - gen_log.warning('Invalid x-www-form-urlencoded body: %s', e) - uri_arguments = {} - for name, values in uri_arguments.items(): - if values: - arguments.setdefault(name, []).extend(values) - elif content_type.startswith("multipart/form-data"): - fields = content_type.split(";") - for field in fields: - k, sep, v = field.strip().partition("=") - if k == "boundary" and v: - parse_multipart_form_data(utf8(v), body, arguments, files) - break - else: - gen_log.warning("Invalid multipart/form-data") - - -def parse_multipart_form_data(boundary, data, arguments, files): - """Parses a ``multipart/form-data`` body. - - The ``boundary`` and ``data`` parameters are both byte strings. - The dictionaries given in the arguments and files parameters - will be updated with the contents of the body. - """ - # The standard allows for the boundary to be quoted in the header, - # although it's rare (it happens at least for google app engine - # xmpp). I think we're also supposed to handle backslash-escapes - # here but I'll save that until we see a client that uses them - # in the wild. - if boundary.startswith(b'"') and boundary.endswith(b'"'): - boundary = boundary[1:-1] - final_boundary_index = data.rfind(b"--" + boundary + b"--") - if final_boundary_index == -1: - gen_log.warning("Invalid multipart/form-data: no final boundary") - return - parts = data[:final_boundary_index].split(b"--" + boundary + b"\r\n") - for part in parts: - if not part: - continue - eoh = part.find(b"\r\n\r\n") - if eoh == -1: - gen_log.warning("multipart/form-data missing headers") - continue - headers = HTTPHeaders.parse(part[:eoh].decode("utf-8")) - disp_header = headers.get("Content-Disposition", "") - disposition, disp_params = _parse_header(disp_header) - if disposition != "form-data" or not part.endswith(b"\r\n"): - gen_log.warning("Invalid multipart/form-data") - continue - value = part[eoh + 4:-2] - if not disp_params.get("name"): - gen_log.warning("multipart/form-data value missing name") - continue - name = disp_params["name"] - if disp_params.get("filename"): - ctype = headers.get("Content-Type", "application/unknown") - files.setdefault(name, []).append(HTTPFile( - filename=disp_params["filename"], body=value, - content_type=ctype)) - else: - arguments.setdefault(name, []).append(value) - - -def format_timestamp(ts): - """Formats a timestamp in the format used by HTTP. - - The argument may be a numeric timestamp as returned by `time.time`, - a time tuple as returned by `time.gmtime`, or a `datetime.datetime` - object. - - >>> format_timestamp(1359312200) - 'Sun, 27 Jan 2013 18:43:20 GMT' - """ - if isinstance(ts, numbers.Real): - pass - elif isinstance(ts, (tuple, time.struct_time)): - ts = calendar.timegm(ts) - elif isinstance(ts, datetime.datetime): - ts = calendar.timegm(ts.utctimetuple()) - else: - raise TypeError("unknown timestamp type: %r" % ts) - return email.utils.formatdate(ts, usegmt=True) - - -RequestStartLine = collections.namedtuple( - 'RequestStartLine', ['method', 'path', 'version']) - - -def parse_request_start_line(line): - """Returns a (method, path, version) tuple for an HTTP 1.x request line. - - The response is a `collections.namedtuple`. - - >>> parse_request_start_line("GET /foo HTTP/1.1") - RequestStartLine(method='GET', path='/foo', version='HTTP/1.1') - """ - try: - method, path, version = line.split(" ") - except ValueError: - raise HTTPInputError("Malformed HTTP request line") - if not version.startswith("HTTP/"): - raise HTTPInputError( - "Malformed HTTP version in HTTP Request-Line: %r" % version) - return RequestStartLine(method, path, version) - - -ResponseStartLine = collections.namedtuple( - 'ResponseStartLine', ['version', 'code', 'reason']) - - -def parse_response_start_line(line): - """Returns a (version, code, reason) tuple for an HTTP 1.x response line. - - The response is a `collections.namedtuple`. - - >>> parse_response_start_line("HTTP/1.1 200 OK") - ResponseStartLine(version='HTTP/1.1', code=200, reason='OK') - """ - line = native_str(line) - match = re.match("(HTTP/1.[01]) ([0-9]+) ([^\r]*)", line) - if not match: - raise HTTPInputError("Error parsing response start line") - return ResponseStartLine(match.group(1), int(match.group(2)), - match.group(3)) - -# _parseparam and _parse_header are copied and modified from python2.7's cgi.py -# The original 2.7 version of this code did not correctly support some -# combinations of semicolons and double quotes. - - -def _parseparam(s): - while s[:1] == ';': - s = s[1:] - end = s.find(';') - while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2: - end = s.find(';', end + 1) - if end < 0: - end = len(s) - f = s[:end] - yield f.strip() - s = s[end:] - - -def _parse_header(line): - """Parse a Content-type like header. - - Return the main content-type and a dictionary of options. - - """ - parts = _parseparam(';' + line) - key = next(parts) - pdict = {} - for p in parts: - i = p.find('=') - if i >= 0: - name = p[:i].strip().lower() - value = p[i + 1:].strip() - if len(value) >= 2 and value[0] == value[-1] == '"': - value = value[1:-1] - value = value.replace('\\\\', '\\').replace('\\"', '"') - pdict[name] = value - return key, pdict - - -def doctests(): - import doctest - return doctest.DocTestSuite() diff --git a/rosbridge_server/src/tornado/ioloop.py b/rosbridge_server/src/tornado/ioloop.py deleted file mode 100644 index a8f662acb..000000000 --- a/rosbridge_server/src/tornado/ioloop.py +++ /dev/null @@ -1,987 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""An I/O event loop for non-blocking sockets. - -Typical applications will use a single `IOLoop` object, in the -`IOLoop.instance` singleton. The `IOLoop.start` method should usually -be called at the end of the ``main()`` function. Atypical applications may -use more than one `IOLoop`, such as one `IOLoop` per thread, or per `unittest` -case. - -In addition to I/O events, the `IOLoop` can also schedule time-based events. -`IOLoop.add_timeout` is a non-blocking alternative to `time.sleep`. -""" - -from __future__ import absolute_import, division, print_function, with_statement - -import datetime -import errno -import functools -import heapq -import itertools -import logging -import numbers -import os -import select -import sys -import threading -import time -import traceback - -from tornado.concurrent import TracebackFuture, is_future -from tornado.log import app_log, gen_log -from tornado import stack_context -from tornado.util import Configurable, errno_from_exception, timedelta_to_seconds - -try: - import signal -except ImportError: - signal = None - -try: - import thread # py2 -except ImportError: - import _thread as thread # py3 - -from tornado.platform.auto import set_close_exec, Waker - - -_POLL_TIMEOUT = 3600.0 - - -class TimeoutError(Exception): - pass - - -class IOLoop(Configurable): - """A level-triggered I/O loop. - - We use ``epoll`` (Linux) or ``kqueue`` (BSD and Mac OS X) if they - are available, or else we fall back on select(). If you are - implementing a system that needs to handle thousands of - simultaneous connections, you should use a system that supports - either ``epoll`` or ``kqueue``. - - Example usage for a simple TCP server:: - - import errno - import functools - import ioloop - import socket - - def connection_ready(sock, fd, events): - while True: - try: - connection, address = sock.accept() - except socket.error, e: - if e.args[0] not in (errno.EWOULDBLOCK, errno.EAGAIN): - raise - return - connection.setblocking(0) - handle_connection(connection, address) - - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - sock.setblocking(0) - sock.bind(("", port)) - sock.listen(128) - - io_loop = ioloop.IOLoop.instance() - callback = functools.partial(connection_ready, sock) - io_loop.add_handler(sock.fileno(), callback, io_loop.READ) - io_loop.start() - - """ - # Constants from the epoll module - _EPOLLIN = 0x001 - _EPOLLPRI = 0x002 - _EPOLLOUT = 0x004 - _EPOLLERR = 0x008 - _EPOLLHUP = 0x010 - _EPOLLRDHUP = 0x2000 - _EPOLLONESHOT = (1 << 30) - _EPOLLET = (1 << 31) - - # Our events map exactly to the epoll events - NONE = 0 - READ = _EPOLLIN - WRITE = _EPOLLOUT - ERROR = _EPOLLERR | _EPOLLHUP - - # Global lock for creating global IOLoop instance - _instance_lock = threading.Lock() - - _current = threading.local() - - @staticmethod - def instance(): - """Returns a global `IOLoop` instance. - - Most applications have a single, global `IOLoop` running on the - main thread. Use this method to get this instance from - another thread. To get the current thread's `IOLoop`, use `current()`. - """ - if not hasattr(IOLoop, "_instance"): - with IOLoop._instance_lock: - if not hasattr(IOLoop, "_instance"): - # New instance after double check - IOLoop._instance = IOLoop() - return IOLoop._instance - - @staticmethod - def initialized(): - """Returns true if the singleton instance has been created.""" - return hasattr(IOLoop, "_instance") - - def install(self): - """Installs this `IOLoop` object as the singleton instance. - - This is normally not necessary as `instance()` will create - an `IOLoop` on demand, but you may want to call `install` to use - a custom subclass of `IOLoop`. - """ - assert not IOLoop.initialized() - IOLoop._instance = self - - @staticmethod - def clear_instance(): - """Clear the global `IOLoop` instance. - - .. versionadded:: 4.0 - """ - if hasattr(IOLoop, "_instance"): - del IOLoop._instance - - @staticmethod - def current(): - """Returns the current thread's `IOLoop`. - - If an `IOLoop` is currently running or has been marked as current - by `make_current`, returns that instance. Otherwise returns - `IOLoop.instance()`, i.e. the main thread's `IOLoop`. - - A common pattern for classes that depend on ``IOLoops`` is to use - a default argument to enable programs with multiple ``IOLoops`` - but not require the argument for simpler applications:: - - class MyClass(object): - def __init__(self, io_loop=None): - self.io_loop = io_loop or IOLoop.current() - - In general you should use `IOLoop.current` as the default when - constructing an asynchronous object, and use `IOLoop.instance` - when you mean to communicate to the main thread from a different - one. - """ - current = getattr(IOLoop._current, "instance", None) - if current is None: - return IOLoop.instance() - return current - - def make_current(self): - """Makes this the `IOLoop` for the current thread. - - An `IOLoop` automatically becomes current for its thread - when it is started, but it is sometimes useful to call - `make_current` explictly before starting the `IOLoop`, - so that code run at startup time can find the right - instance. - """ - IOLoop._current.instance = self - - @staticmethod - def clear_current(): - IOLoop._current.instance = None - - @classmethod - def configurable_base(cls): - return IOLoop - - @classmethod - def configurable_default(cls): - if hasattr(select, "epoll"): - from tornado.platform.epoll import EPollIOLoop - return EPollIOLoop - if hasattr(select, "kqueue"): - # Python 2.6+ on BSD or Mac - from tornado.platform.kqueue import KQueueIOLoop - return KQueueIOLoop - from tornado.platform.select import SelectIOLoop - return SelectIOLoop - - def initialize(self): - pass - - def close(self, all_fds=False): - """Closes the `IOLoop`, freeing any resources used. - - If ``all_fds`` is true, all file descriptors registered on the - IOLoop will be closed (not just the ones created by the - `IOLoop` itself). - - Many applications will only use a single `IOLoop` that runs for the - entire lifetime of the process. In that case closing the `IOLoop` - is not necessary since everything will be cleaned up when the - process exits. `IOLoop.close` is provided mainly for scenarios - such as unit tests, which create and destroy a large number of - ``IOLoops``. - - An `IOLoop` must be completely stopped before it can be closed. This - means that `IOLoop.stop()` must be called *and* `IOLoop.start()` must - be allowed to return before attempting to call `IOLoop.close()`. - Therefore the call to `close` will usually appear just after - the call to `start` rather than near the call to `stop`. - - .. versionchanged:: 3.1 - If the `IOLoop` implementation supports non-integer objects - for "file descriptors", those objects will have their - ``close`` method when ``all_fds`` is true. - """ - raise NotImplementedError() - - def add_handler(self, fd, handler, events): - """Registers the given handler to receive the given events for ``fd``. - - The ``fd`` argument may either be an integer file descriptor or - a file-like object with a ``fileno()`` method (and optionally a - ``close()`` method, which may be called when the `IOLoop` is shut - down). - - The ``events`` argument is a bitwise or of the constants - ``IOLoop.READ``, ``IOLoop.WRITE``, and ``IOLoop.ERROR``. - - When an event occurs, ``handler(fd, events)`` will be run. - - .. versionchanged:: 4.0 - Added the ability to pass file-like objects in addition to - raw file descriptors. - """ - raise NotImplementedError() - - def update_handler(self, fd, events): - """Changes the events we listen for ``fd``. - - .. versionchanged:: 4.0 - Added the ability to pass file-like objects in addition to - raw file descriptors. - """ - raise NotImplementedError() - - def remove_handler(self, fd): - """Stop listening for events on ``fd``. - - .. versionchanged:: 4.0 - Added the ability to pass file-like objects in addition to - raw file descriptors. - """ - raise NotImplementedError() - - def set_blocking_signal_threshold(self, seconds, action): - """Sends a signal if the `IOLoop` is blocked for more than - ``s`` seconds. - - Pass ``seconds=None`` to disable. Requires Python 2.6 on a unixy - platform. - - The action parameter is a Python signal handler. Read the - documentation for the `signal` module for more information. - If ``action`` is None, the process will be killed if it is - blocked for too long. - """ - raise NotImplementedError() - - def set_blocking_log_threshold(self, seconds): - """Logs a stack trace if the `IOLoop` is blocked for more than - ``s`` seconds. - - Equivalent to ``set_blocking_signal_threshold(seconds, - self.log_stack)`` - """ - self.set_blocking_signal_threshold(seconds, self.log_stack) - - def log_stack(self, signal, frame): - """Signal handler to log the stack trace of the current thread. - - For use with `set_blocking_signal_threshold`. - """ - gen_log.warning('IOLoop blocked for %f seconds in\n%s', - self._blocking_signal_threshold, - ''.join(traceback.format_stack(frame))) - - def start(self): - """Starts the I/O loop. - - The loop will run until one of the callbacks calls `stop()`, which - will make the loop stop after the current event iteration completes. - """ - raise NotImplementedError() - - def _setup_logging(self): - """The IOLoop catches and logs exceptions, so it's - important that log output be visible. However, python's - default behavior for non-root loggers (prior to python - 3.2) is to print an unhelpful "no handlers could be - found" message rather than the actual log entry, so we - must explicitly configure logging if we've made it this - far without anything. - - This method should be called from start() in subclasses. - """ - if not any([logging.getLogger().handlers, - logging.getLogger('tornado').handlers, - logging.getLogger('tornado.application').handlers]): - logging.basicConfig() - - def stop(self): - """Stop the I/O loop. - - If the event loop is not currently running, the next call to `start()` - will return immediately. - - To use asynchronous methods from otherwise-synchronous code (such as - unit tests), you can start and stop the event loop like this:: - - ioloop = IOLoop() - async_method(ioloop=ioloop, callback=ioloop.stop) - ioloop.start() - - ``ioloop.start()`` will return after ``async_method`` has run - its callback, whether that callback was invoked before or - after ``ioloop.start``. - - Note that even after `stop` has been called, the `IOLoop` is not - completely stopped until `IOLoop.start` has also returned. - Some work that was scheduled before the call to `stop` may still - be run before the `IOLoop` shuts down. - """ - raise NotImplementedError() - - def run_sync(self, func, timeout=None): - """Starts the `IOLoop`, runs the given function, and stops the loop. - - If the function returns a `.Future`, the `IOLoop` will run - until the future is resolved. If it raises an exception, the - `IOLoop` will stop and the exception will be re-raised to the - caller. - - The keyword-only argument ``timeout`` may be used to set - a maximum duration for the function. If the timeout expires, - a `TimeoutError` is raised. - - This method is useful in conjunction with `tornado.gen.coroutine` - to allow asynchronous calls in a ``main()`` function:: - - @gen.coroutine - def main(): - # do stuff... - - if __name__ == '__main__': - IOLoop.instance().run_sync(main) - """ - future_cell = [None] - - def run(): - try: - result = func() - except Exception: - future_cell[0] = TracebackFuture() - future_cell[0].set_exc_info(sys.exc_info()) - else: - if is_future(result): - future_cell[0] = result - else: - future_cell[0] = TracebackFuture() - future_cell[0].set_result(result) - self.add_future(future_cell[0], lambda future: self.stop()) - self.add_callback(run) - if timeout is not None: - timeout_handle = self.add_timeout(self.time() + timeout, self.stop) - self.start() - if timeout is not None: - self.remove_timeout(timeout_handle) - if not future_cell[0].done(): - raise TimeoutError('Operation timed out after %s seconds' % timeout) - return future_cell[0].result() - - def time(self): - """Returns the current time according to the `IOLoop`'s clock. - - The return value is a floating-point number relative to an - unspecified time in the past. - - By default, the `IOLoop`'s time function is `time.time`. However, - it may be configured to use e.g. `time.monotonic` instead. - Calls to `add_timeout` that pass a number instead of a - `datetime.timedelta` should use this function to compute the - appropriate time, so they can work no matter what time function - is chosen. - """ - return time.time() - - def add_timeout(self, deadline, callback, *args, **kwargs): - """Runs the ``callback`` at the time ``deadline`` from the I/O loop. - - Returns an opaque handle that may be passed to - `remove_timeout` to cancel. - - ``deadline`` may be a number denoting a time (on the same - scale as `IOLoop.time`, normally `time.time`), or a - `datetime.timedelta` object for a deadline relative to the - current time. Since Tornado 4.0, `call_later` is a more - convenient alternative for the relative case since it does not - require a timedelta object. - - Note that it is not safe to call `add_timeout` from other threads. - Instead, you must use `add_callback` to transfer control to the - `IOLoop`'s thread, and then call `add_timeout` from there. - - Subclasses of IOLoop must implement either `add_timeout` or - `call_at`; the default implementations of each will call - the other. `call_at` is usually easier to implement, but - subclasses that wish to maintain compatibility with Tornado - versions prior to 4.0 must use `add_timeout` instead. - - .. versionchanged:: 4.0 - Now passes through ``*args`` and ``**kwargs`` to the callback. - """ - if isinstance(deadline, numbers.Real): - return self.call_at(deadline, callback, *args, **kwargs) - elif isinstance(deadline, datetime.timedelta): - return self.call_at(self.time() + timedelta_to_seconds(deadline), - callback, *args, **kwargs) - else: - raise TypeError("Unsupported deadline %r" % deadline) - - def call_later(self, delay, callback, *args, **kwargs): - """Runs the ``callback`` after ``delay`` seconds have passed. - - Returns an opaque handle that may be passed to `remove_timeout` - to cancel. Note that unlike the `asyncio` method of the same - name, the returned object does not have a ``cancel()`` method. - - See `add_timeout` for comments on thread-safety and subclassing. - - .. versionadded:: 4.0 - """ - return self.call_at(self.time() + delay, callback, *args, **kwargs) - - def call_at(self, when, callback, *args, **kwargs): - """Runs the ``callback`` at the absolute time designated by ``when``. - - ``when`` must be a number using the same reference point as - `IOLoop.time`. - - Returns an opaque handle that may be passed to `remove_timeout` - to cancel. Note that unlike the `asyncio` method of the same - name, the returned object does not have a ``cancel()`` method. - - See `add_timeout` for comments on thread-safety and subclassing. - - .. versionadded:: 4.0 - """ - return self.add_timeout(when, callback, *args, **kwargs) - - def remove_timeout(self, timeout): - """Cancels a pending timeout. - - The argument is a handle as returned by `add_timeout`. It is - safe to call `remove_timeout` even if the callback has already - been run. - """ - raise NotImplementedError() - - def add_callback(self, callback, *args, **kwargs): - """Calls the given callback on the next I/O loop iteration. - - It is safe to call this method from any thread at any time, - except from a signal handler. Note that this is the **only** - method in `IOLoop` that makes this thread-safety guarantee; all - other interaction with the `IOLoop` must be done from that - `IOLoop`'s thread. `add_callback()` may be used to transfer - control from other threads to the `IOLoop`'s thread. - - To add a callback from a signal handler, see - `add_callback_from_signal`. - """ - raise NotImplementedError() - - def add_callback_from_signal(self, callback, *args, **kwargs): - """Calls the given callback on the next I/O loop iteration. - - Safe for use from a Python signal handler; should not be used - otherwise. - - Callbacks added with this method will be run without any - `.stack_context`, to avoid picking up the context of the function - that was interrupted by the signal. - """ - raise NotImplementedError() - - def spawn_callback(self, callback, *args, **kwargs): - """Calls the given callback on the next IOLoop iteration. - - Unlike all other callback-related methods on IOLoop, - ``spawn_callback`` does not associate the callback with its caller's - ``stack_context``, so it is suitable for fire-and-forget callbacks - that should not interfere with the caller. - - .. versionadded:: 4.0 - """ - with stack_context.NullContext(): - self.add_callback(callback, *args, **kwargs) - - def add_future(self, future, callback): - """Schedules a callback on the ``IOLoop`` when the given - `.Future` is finished. - - The callback is invoked with one argument, the - `.Future`. - """ - assert is_future(future) - callback = stack_context.wrap(callback) - future.add_done_callback( - lambda future: self.add_callback(callback, future)) - - def _run_callback(self, callback): - """Runs a callback with error handling. - - For use in subclasses. - """ - try: - ret = callback() - if ret is not None and is_future(ret): - # Functions that return Futures typically swallow all - # exceptions and store them in the Future. If a Future - # makes it out to the IOLoop, ensure its exception (if any) - # gets logged too. - self.add_future(ret, lambda f: f.result()) - except Exception: - self.handle_callback_exception(callback) - - def handle_callback_exception(self, callback): - """This method is called whenever a callback run by the `IOLoop` - throws an exception. - - By default simply logs the exception as an error. Subclasses - may override this method to customize reporting of exceptions. - - The exception itself is not passed explicitly, but is available - in `sys.exc_info`. - """ - app_log.error("Exception in callback %r", callback, exc_info=True) - - def split_fd(self, fd): - """Returns an (fd, obj) pair from an ``fd`` parameter. - - We accept both raw file descriptors and file-like objects as - input to `add_handler` and related methods. When a file-like - object is passed, we must retain the object itself so we can - close it correctly when the `IOLoop` shuts down, but the - poller interfaces favor file descriptors (they will accept - file-like objects and call ``fileno()`` for you, but they - always return the descriptor itself). - - This method is provided for use by `IOLoop` subclasses and should - not generally be used by application code. - - .. versionadded:: 4.0 - """ - try: - return fd.fileno(), fd - except AttributeError: - return fd, fd - - def close_fd(self, fd): - """Utility method to close an ``fd``. - - If ``fd`` is a file-like object, we close it directly; otherwise - we use `os.close`. - - This method is provided for use by `IOLoop` subclasses (in - implementations of ``IOLoop.close(all_fds=True)`` and should - not generally be used by application code. - - .. versionadded:: 4.0 - """ - try: - try: - fd.close() - except AttributeError: - os.close(fd) - except OSError: - pass - - -class PollIOLoop(IOLoop): - """Base class for IOLoops built around a select-like function. - - For concrete implementations, see `tornado.platform.epoll.EPollIOLoop` - (Linux), `tornado.platform.kqueue.KQueueIOLoop` (BSD and Mac), or - `tornado.platform.select.SelectIOLoop` (all platforms). - """ - def initialize(self, impl, time_func=None): - super(PollIOLoop, self).initialize() - self._impl = impl - if hasattr(self._impl, 'fileno'): - set_close_exec(self._impl.fileno()) - self.time_func = time_func or time.time - self._handlers = {} - self._events = {} - self._callbacks = [] - self._callback_lock = threading.Lock() - self._timeouts = [] - self._cancellations = 0 - self._running = False - self._stopped = False - self._closing = False - self._thread_ident = None - self._blocking_signal_threshold = None - self._timeout_counter = itertools.count() - - # Create a pipe that we send bogus data to when we want to wake - # the I/O loop when it is idle - self._waker = Waker() - self.add_handler(self._waker.fileno(), - lambda fd, events: self._waker.consume(), - self.READ) - - def close(self, all_fds=False): - with self._callback_lock: - self._closing = True - self.remove_handler(self._waker.fileno()) - if all_fds: - for fd, handler in self._handlers.values(): - self.close_fd(fd) - self._waker.close() - self._impl.close() - self._callbacks = None - self._timeouts = None - - def add_handler(self, fd, handler, events): - fd, obj = self.split_fd(fd) - self._handlers[fd] = (obj, stack_context.wrap(handler)) - self._impl.register(fd, events | self.ERROR) - - def update_handler(self, fd, events): - fd, obj = self.split_fd(fd) - self._impl.modify(fd, events | self.ERROR) - - def remove_handler(self, fd): - fd, obj = self.split_fd(fd) - self._handlers.pop(fd, None) - self._events.pop(fd, None) - try: - self._impl.unregister(fd) - except Exception: - gen_log.debug("Error deleting fd from IOLoop", exc_info=True) - - def set_blocking_signal_threshold(self, seconds, action): - if not hasattr(signal, "setitimer"): - gen_log.error("set_blocking_signal_threshold requires a signal module " - "with the setitimer method") - return - self._blocking_signal_threshold = seconds - if seconds is not None: - signal.signal(signal.SIGALRM, - action if action is not None else signal.SIG_DFL) - - def start(self): - if self._running: - raise RuntimeError("IOLoop is already running") - self._setup_logging() - if self._stopped: - self._stopped = False - return - old_current = getattr(IOLoop._current, "instance", None) - IOLoop._current.instance = self - self._thread_ident = thread.get_ident() - self._running = True - - # signal.set_wakeup_fd closes a race condition in event loops: - # a signal may arrive at the beginning of select/poll/etc - # before it goes into its interruptible sleep, so the signal - # will be consumed without waking the select. The solution is - # for the (C, synchronous) signal handler to write to a pipe, - # which will then be seen by select. - # - # In python's signal handling semantics, this only matters on the - # main thread (fortunately, set_wakeup_fd only works on the main - # thread and will raise a ValueError otherwise). - # - # If someone has already set a wakeup fd, we don't want to - # disturb it. This is an issue for twisted, which does its - # SIGCHILD processing in response to its own wakeup fd being - # written to. As long as the wakeup fd is registered on the IOLoop, - # the loop will still wake up and everything should work. - old_wakeup_fd = None - if hasattr(signal, 'set_wakeup_fd') and os.name == 'posix': - # requires python 2.6+, unix. set_wakeup_fd exists but crashes - # the python process on windows. - try: - old_wakeup_fd = signal.set_wakeup_fd(self._waker.write_fileno()) - if old_wakeup_fd != -1: - # Already set, restore previous value. This is a little racy, - # but there's no clean get_wakeup_fd and in real use the - # IOLoop is just started once at the beginning. - signal.set_wakeup_fd(old_wakeup_fd) - old_wakeup_fd = None - except ValueError: # non-main thread - pass - - try: - while True: - # Prevent IO event starvation by delaying new callbacks - # to the next iteration of the event loop. - with self._callback_lock: - callbacks = self._callbacks - self._callbacks = [] - - # Add any timeouts that have come due to the callback list. - # Do not run anything until we have determined which ones - # are ready, so timeouts that call add_timeout cannot - # schedule anything in this iteration. - due_timeouts = [] - if self._timeouts: - now = self.time() - while self._timeouts: - if self._timeouts[0].callback is None: - # The timeout was cancelled. Note that the - # cancellation check is repeated below for timeouts - # that are cancelled by another timeout or callback. - heapq.heappop(self._timeouts) - self._cancellations -= 1 - elif self._timeouts[0].deadline <= now: - due_timeouts.append(heapq.heappop(self._timeouts)) - else: - break - if (self._cancellations > 512 - and self._cancellations > (len(self._timeouts) >> 1)): - # Clean up the timeout queue when it gets large and it's - # more than half cancellations. - self._cancellations = 0 - self._timeouts = [x for x in self._timeouts - if x.callback is not None] - heapq.heapify(self._timeouts) - - for callback in callbacks: - self._run_callback(callback) - for timeout in due_timeouts: - if timeout.callback is not None: - self._run_callback(timeout.callback) - # Closures may be holding on to a lot of memory, so allow - # them to be freed before we go into our poll wait. - callbacks = callback = due_timeouts = timeout = None - - if self._callbacks: - # If any callbacks or timeouts called add_callback, - # we don't want to wait in poll() before we run them. - poll_timeout = 0.0 - elif self._timeouts: - # If there are any timeouts, schedule the first one. - # Use self.time() instead of 'now' to account for time - # spent running callbacks. - poll_timeout = self._timeouts[0].deadline - self.time() - poll_timeout = max(0, min(poll_timeout, _POLL_TIMEOUT)) - else: - # No timeouts and no callbacks, so use the default. - poll_timeout = _POLL_TIMEOUT - - if not self._running: - break - - if self._blocking_signal_threshold is not None: - # clear alarm so it doesn't fire while poll is waiting for - # events. - signal.setitimer(signal.ITIMER_REAL, 0, 0) - - try: - event_pairs = self._impl.poll(poll_timeout) - except Exception as e: - # Depending on python version and IOLoop implementation, - # different exception types may be thrown and there are - # two ways EINTR might be signaled: - # * e.errno == errno.EINTR - # * e.args is like (errno.EINTR, 'Interrupted system call') - if errno_from_exception(e) == errno.EINTR: - continue - else: - raise - - if self._blocking_signal_threshold is not None: - signal.setitimer(signal.ITIMER_REAL, - self._blocking_signal_threshold, 0) - - # Pop one fd at a time from the set of pending fds and run - # its handler. Since that handler may perform actions on - # other file descriptors, there may be reentrant calls to - # this IOLoop that update self._events - self._events.update(event_pairs) - while self._events: - fd, events = self._events.popitem() - try: - fd_obj, handler_func = self._handlers[fd] - handler_func(fd_obj, events) - except (OSError, IOError) as e: - if errno_from_exception(e) == errno.EPIPE: - # Happens when the client closes the connection - pass - else: - self.handle_callback_exception(self._handlers.get(fd)) - except Exception: - self.handle_callback_exception(self._handlers.get(fd)) - fd_obj = handler_func = None - - finally: - # reset the stopped flag so another start/stop pair can be issued - self._stopped = False - if self._blocking_signal_threshold is not None: - signal.setitimer(signal.ITIMER_REAL, 0, 0) - IOLoop._current.instance = old_current - if old_wakeup_fd is not None: - signal.set_wakeup_fd(old_wakeup_fd) - - def stop(self): - self._running = False - self._stopped = True - self._waker.wake() - - def time(self): - return self.time_func() - - def call_at(self, deadline, callback, *args, **kwargs): - timeout = _Timeout( - deadline, - functools.partial(stack_context.wrap(callback), *args, **kwargs), - self) - heapq.heappush(self._timeouts, timeout) - return timeout - - def remove_timeout(self, timeout): - # Removing from a heap is complicated, so just leave the defunct - # timeout object in the queue (see discussion in - # http://docs.python.org/library/heapq.html). - # If this turns out to be a problem, we could add a garbage - # collection pass whenever there are too many dead timeouts. - timeout.callback = None - self._cancellations += 1 - - def add_callback(self, callback, *args, **kwargs): - with self._callback_lock: - if self._closing: - raise RuntimeError("IOLoop is closing") - list_empty = not self._callbacks - self._callbacks.append(functools.partial( - stack_context.wrap(callback), *args, **kwargs)) - if list_empty and thread.get_ident() != self._thread_ident: - # If we're in the IOLoop's thread, we know it's not currently - # polling. If we're not, and we added the first callback to an - # empty list, we may need to wake it up (it may wake up on its - # own, but an occasional extra wake is harmless). Waking - # up a polling IOLoop is relatively expensive, so we try to - # avoid it when we can. - self._waker.wake() - - def add_callback_from_signal(self, callback, *args, **kwargs): - with stack_context.NullContext(): - if thread.get_ident() != self._thread_ident: - # if the signal is handled on another thread, we can add - # it normally (modulo the NullContext) - self.add_callback(callback, *args, **kwargs) - else: - # If we're on the IOLoop's thread, we cannot use - # the regular add_callback because it may deadlock on - # _callback_lock. Blindly insert into self._callbacks. - # This is safe because the GIL makes list.append atomic. - # One subtlety is that if the signal interrupted the - # _callback_lock block in IOLoop.start, we may modify - # either the old or new version of self._callbacks, - # but either way will work. - self._callbacks.append(functools.partial( - stack_context.wrap(callback), *args, **kwargs)) - - -class _Timeout(object): - """An IOLoop timeout, a UNIX timestamp and a callback""" - - # Reduce memory overhead when there are lots of pending callbacks - __slots__ = ['deadline', 'callback', 'tiebreaker'] - - def __init__(self, deadline, callback, io_loop): - if not isinstance(deadline, numbers.Real): - raise TypeError("Unsupported deadline %r" % deadline) - self.deadline = deadline - self.callback = callback - self.tiebreaker = next(io_loop._timeout_counter) - - # Comparison methods to sort by deadline, with object id as a tiebreaker - # to guarantee a consistent ordering. The heapq module uses __le__ - # in python2.5, and __lt__ in 2.6+ (sort() and most other comparisons - # use __lt__). - def __lt__(self, other): - return ((self.deadline, self.tiebreaker) < - (other.deadline, other.tiebreaker)) - - def __le__(self, other): - return ((self.deadline, self.tiebreaker) <= - (other.deadline, other.tiebreaker)) - - -class PeriodicCallback(object): - """Schedules the given callback to be called periodically. - - The callback is called every ``callback_time`` milliseconds. - - `start` must be called after the `PeriodicCallback` is created. - """ - def __init__(self, callback, callback_time, io_loop=None): - self.callback = callback - if callback_time <= 0: - raise ValueError("Periodic callback must have a positive callback_time") - self.callback_time = callback_time - self.io_loop = io_loop or IOLoop.current() - self._running = False - self._timeout = None - - def start(self): - """Starts the timer.""" - self._running = True - self._next_timeout = self.io_loop.time() - self._schedule_next() - - def stop(self): - """Stops the timer.""" - self._running = False - if self._timeout is not None: - self.io_loop.remove_timeout(self._timeout) - self._timeout = None - - def _run(self): - if not self._running: - return - try: - return self.callback() - except Exception: - self.io_loop.handle_callback_exception(self.callback) - finally: - self._schedule_next() - - def _schedule_next(self): - if self._running: - current_time = self.io_loop.time() - while self._next_timeout <= current_time: - self._next_timeout += self.callback_time / 1000.0 - self._timeout = self.io_loop.add_timeout(self._next_timeout, self._run) diff --git a/rosbridge_server/src/tornado/iostream.py b/rosbridge_server/src/tornado/iostream.py deleted file mode 100644 index 1cc39d9fa..000000000 --- a/rosbridge_server/src/tornado/iostream.py +++ /dev/null @@ -1,1398 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utility classes to write to and read from non-blocking files and sockets. - -Contents: - -* `BaseIOStream`: Generic interface for reading and writing. -* `IOStream`: Implementation of BaseIOStream using non-blocking sockets. -* `SSLIOStream`: SSL-aware version of IOStream. -* `PipeIOStream`: Pipe-based IOStream implementation. -""" - -from __future__ import absolute_import, division, print_function, with_statement - -import collections -import errno -import numbers -import os -import socket -import sys -import re - -from tornado.concurrent import TracebackFuture -from tornado import ioloop -from tornado.log import gen_log, app_log -from tornado.netutil import ssl_wrap_socket, ssl_match_hostname, SSLCertificateError -from tornado import stack_context -from tornado.util import bytes_type, errno_from_exception - -try: - from tornado.platform.posix import _set_nonblocking -except ImportError: - _set_nonblocking = None - -try: - import ssl -except ImportError: - # ssl is not available on Google App Engine - ssl = None - -# These errnos indicate that a non-blocking operation must be retried -# at a later time. On most platforms they're the same value, but on -# some they differ. -_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN) - -if hasattr(errno, "WSAEWOULDBLOCK"): - _ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,) - -# These errnos indicate that a connection has been abruptly terminated. -# They should be caught and handled less noisily than other errors. -_ERRNO_CONNRESET = (errno.ECONNRESET, errno.ECONNABORTED, errno.EPIPE, - errno.ETIMEDOUT) - -if hasattr(errno, "WSAECONNRESET"): - _ERRNO_CONNRESET += (errno.WSAECONNRESET, errno.WSAECONNABORTED, errno.WSAETIMEDOUT) - -# More non-portable errnos: -_ERRNO_INPROGRESS = (errno.EINPROGRESS,) - -if hasattr(errno, "WSAEINPROGRESS"): - _ERRNO_INPROGRESS += (errno.WSAEINPROGRESS,) - -####################################################### -class StreamClosedError(IOError): - """Exception raised by `IOStream` methods when the stream is closed. - - Note that the close callback is scheduled to run *after* other - callbacks on the stream (to allow for buffered data to be processed), - so you may see this error before you see the close callback. - """ - pass - - -class UnsatisfiableReadError(Exception): - """Exception raised when a read cannot be satisfied. - - Raised by ``read_until`` and ``read_until_regex`` with a ``max_bytes`` - argument. - """ - pass - - -class StreamBufferFullError(Exception): - """Exception raised by `IOStream` methods when the buffer is full. - """ - - -class BaseIOStream(object): - """A utility class to write to and read from a non-blocking file or socket. - - We support a non-blocking ``write()`` and a family of ``read_*()`` methods. - All of the methods take an optional ``callback`` argument and return a - `.Future` only if no callback is given. When the operation completes, - the callback will be run or the `.Future` will resolve with the data - read (or ``None`` for ``write()``). All outstanding ``Futures`` will - resolve with a `StreamClosedError` when the stream is closed; users - of the callback interface will be notified via - `.BaseIOStream.set_close_callback` instead. - - When a stream is closed due to an error, the IOStream's ``error`` - attribute contains the exception object. - - Subclasses must implement `fileno`, `close_fd`, `write_to_fd`, - `read_from_fd`, and optionally `get_fd_error`. - """ - def __init__(self, io_loop=None, max_buffer_size=None, - read_chunk_size=None, max_write_buffer_size=None): - """`BaseIOStream` constructor. - - :arg io_loop: The `.IOLoop` to use; defaults to `.IOLoop.current`. - :arg max_buffer_size: Maximum amount of incoming data to buffer; - defaults to 100MB. - :arg read_chunk_size: Amount of data to read at one time from the - underlying transport; defaults to 64KB. - :arg max_write_buffer_size: Amount of outgoing data to buffer; - defaults to unlimited. - - .. versionchanged:: 4.0 - Add the ``max_write_buffer_size`` parameter. Changed default - ``read_chunk_size`` to 64KB. - """ - self.io_loop = io_loop or ioloop.IOLoop.current() - self.max_buffer_size = max_buffer_size or 104857600 - # A chunk size that is too close to max_buffer_size can cause - # spurious failures. - self.read_chunk_size = min(read_chunk_size or 65536, - self.max_buffer_size // 2) - self.max_write_buffer_size = max_write_buffer_size - self.error = None - self._read_buffer = collections.deque() - self._write_buffer = collections.deque() - self._read_buffer_size = 0 - self._write_buffer_size = 0 - self._write_buffer_frozen = False - self._read_delimiter = None - self._read_regex = None - self._read_max_bytes = None - self._read_bytes = None - self._read_partial = False - self._read_until_close = False - self._read_callback = None - self._read_future = None - self._streaming_callback = None - self._write_callback = None - self._write_future = None - self._close_callback = None - self._connect_callback = None - self._connect_future = None - self._connecting = False - self._state = None - self._pending_callbacks = 0 - self._closed = False - - def fileno(self): - """Returns the file descriptor for this stream.""" - raise NotImplementedError() - - def close_fd(self): - """Closes the file underlying this stream. - - ``close_fd`` is called by `BaseIOStream` and should not be called - elsewhere; other users should call `close` instead. - """ - raise NotImplementedError() - - def write_to_fd(self, data): - """Attempts to write ``data`` to the underlying file. - - Returns the number of bytes written. - """ - raise NotImplementedError() - - def read_from_fd(self): - """Attempts to read from the underlying file. - - Returns ``None`` if there was nothing to read (the socket - returned `~errno.EWOULDBLOCK` or equivalent), otherwise - returns the data. When possible, should return no more than - ``self.read_chunk_size`` bytes at a time. - """ - raise NotImplementedError() - - def get_fd_error(self): - """Returns information about any error on the underlying file. - - This method is called after the `.IOLoop` has signaled an error on the - file descriptor, and should return an Exception (such as `socket.error` - with additional information, or None if no such information is - available. - """ - return None - - def read_until_regex(self, regex, callback=None, max_bytes=None): - """Asynchronously read until we have matched the given regex. - - The result includes the data that matches the regex and anything - that came before it. If a callback is given, it will be run - with the data as an argument; if not, this method returns a - `.Future`. - - If ``max_bytes`` is not None, the connection will be closed - if more than ``max_bytes`` bytes have been read and the regex is - not satisfied. - - .. versionchanged:: 4.0 - Added the ``max_bytes`` argument. The ``callback`` argument is - now optional and a `.Future` will be returned if it is omitted. - """ - future = self._set_read_callback(callback) - self._read_regex = re.compile(regex) - self._read_max_bytes = max_bytes - try: - self._try_inline_read() - except UnsatisfiableReadError as e: - # Handle this the same way as in _handle_events. - gen_log.info("Unsatisfiable read, closing connection: %s" % e) - self.close(exc_info=True) - return future - return future - - def read_until(self, delimiter, callback=None, max_bytes=None): - """Asynchronously read until we have found the given delimiter. - - The result includes all the data read including the delimiter. - If a callback is given, it will be run with the data as an argument; - if not, this method returns a `.Future`. - - If ``max_bytes`` is not None, the connection will be closed - if more than ``max_bytes`` bytes have been read and the delimiter - is not found. - - .. versionchanged:: 4.0 - Added the ``max_bytes`` argument. The ``callback`` argument is - now optional and a `.Future` will be returned if it is omitted. - """ - future = self._set_read_callback(callback) - self._read_delimiter = delimiter - self._read_max_bytes = max_bytes - try: - self._try_inline_read() - except UnsatisfiableReadError as e: - # Handle this the same way as in _handle_events. - gen_log.info("Unsatisfiable read, closing connection: %s" % e) - self.close(exc_info=True) - return future - return future - - def read_bytes(self, num_bytes, callback=None, streaming_callback=None, - partial=False): - """Asynchronously read a number of bytes. - - If a ``streaming_callback`` is given, it will be called with chunks - of data as they become available, and the final result will be empty. - Otherwise, the result is all the data that was read. - If a callback is given, it will be run with the data as an argument; - if not, this method returns a `.Future`. - - If ``partial`` is true, the callback is run as soon as we have - any bytes to return (but never more than ``num_bytes``) - - .. versionchanged:: 4.0 - Added the ``partial`` argument. The callback argument is now - optional and a `.Future` will be returned if it is omitted. - """ - future = self._set_read_callback(callback) - assert isinstance(num_bytes, numbers.Integral) - self._read_bytes = num_bytes - self._read_partial = partial - self._streaming_callback = stack_context.wrap(streaming_callback) - self._try_inline_read() - return future - - def read_until_close(self, callback=None, streaming_callback=None): - """Asynchronously reads all data from the socket until it is closed. - - If a ``streaming_callback`` is given, it will be called with chunks - of data as they become available, and the final result will be empty. - Otherwise, the result is all the data that was read. - If a callback is given, it will be run with the data as an argument; - if not, this method returns a `.Future`. - - .. versionchanged:: 4.0 - The callback argument is now optional and a `.Future` will - be returned if it is omitted. - """ - future = self._set_read_callback(callback) - self._streaming_callback = stack_context.wrap(streaming_callback) - if self.closed(): - if self._streaming_callback is not None: - self._run_read_callback(self._read_buffer_size, True) - self._run_read_callback(self._read_buffer_size, False) - return future - self._read_until_close = True - self._try_inline_read() - return future - - def write(self, data, callback=None): - """Asynchronously write the given data to this stream. - - If ``callback`` is given, we call it when all of the buffered write - data has been successfully written to the stream. If there was - previously buffered write data and an old write callback, that - callback is simply overwritten with this new callback. - - If no ``callback`` is given, this method returns a `.Future` that - resolves (with a result of ``None``) when the write has been - completed. If `write` is called again before that `.Future` has - resolved, the previous future will be orphaned and will never resolve. - - .. versionchanged:: 4.0 - Now returns a `.Future` if no callback is given. - """ - assert isinstance(data, bytes_type) - self._check_closed() - # We use bool(_write_buffer) as a proxy for write_buffer_size>0, - # so never put empty strings in the buffer. - if data: - if (self.max_write_buffer_size is not None and - self._write_buffer_size + len(data) > self.max_write_buffer_size): - raise StreamBufferFullError("Reached maximum read buffer size") - # Break up large contiguous strings before inserting them in the - # write buffer, so we don't have to recopy the entire thing - # as we slice off pieces to send to the socket. - WRITE_BUFFER_CHUNK_SIZE = 128 * 1024 - for i in range(0, len(data), WRITE_BUFFER_CHUNK_SIZE): - self._write_buffer.append(data[i:i + WRITE_BUFFER_CHUNK_SIZE]) - self._write_buffer_size += len(data) - if callback is not None: - self._write_callback = stack_context.wrap(callback) - future = None - else: - future = self._write_future = TracebackFuture() - if not self._connecting: - self._handle_write() - if self._write_buffer: - self._add_io_state(self.io_loop.WRITE) - self._maybe_add_error_listener() - return future - - def set_close_callback(self, callback): - """Call the given callback when the stream is closed. - - This is not necessary for applications that use the `.Future` - interface; all outstanding ``Futures`` will resolve with a - `StreamClosedError` when the stream is closed. - """ - self._close_callback = stack_context.wrap(callback) - self._maybe_add_error_listener() - - def close(self, exc_info=False): - """Close this stream. - - If ``exc_info`` is true, set the ``error`` attribute to the current - exception from `sys.exc_info` (or if ``exc_info`` is a tuple, - use that instead of `sys.exc_info`). - """ - if not self.closed(): - if exc_info: - if not isinstance(exc_info, tuple): - exc_info = sys.exc_info() - if any(exc_info): - self.error = exc_info[1] - if self._read_until_close: - if (self._streaming_callback is not None and - self._read_buffer_size): - self._run_read_callback(self._read_buffer_size, True) - self._read_until_close = False - self._run_read_callback(self._read_buffer_size, False) - if self._state is not None: - self.io_loop.remove_handler(self.fileno()) - self._state = None - self.close_fd() - self._closed = True - self._maybe_run_close_callback() - - def _maybe_run_close_callback(self): - # If there are pending callbacks, don't run the close callback - # until they're done (see _maybe_add_error_handler) - if self.closed() and self._pending_callbacks == 0: - futures = [] - if self._read_future is not None: - futures.append(self._read_future) - self._read_future = None - if self._write_future is not None: - futures.append(self._write_future) - self._write_future = None - if self._connect_future is not None: - futures.append(self._connect_future) - self._connect_future = None - for future in futures: - if (isinstance(self.error, (socket.error, IOError)) and - errno_from_exception(self.error) in _ERRNO_CONNRESET): - # Treat connection resets as closed connections so - # clients only have to catch one kind of exception - # to avoid logging. - future.set_exception(StreamClosedError()) - else: - future.set_exception(self.error or StreamClosedError()) - if self._close_callback is not None: - cb = self._close_callback - self._close_callback = None - self._run_callback(cb) - # Delete any unfinished callbacks to break up reference cycles. - self._read_callback = self._write_callback = None - # Clear the buffers so they can be cleared immediately even - # if the IOStream object is kept alive by a reference cycle. - # TODO: Clear the read buffer too; it currently breaks some tests. - self._write_buffer = None - - def reading(self): - """Returns true if we are currently reading from the stream.""" - return self._read_callback is not None or self._read_future is not None - - def writing(self): - """Returns true if we are currently writing to the stream.""" - return bool(self._write_buffer) - - def closed(self): - """Returns true if the stream has been closed.""" - return self._closed - - def set_nodelay(self, value): - """Sets the no-delay flag for this stream. - - By default, data written to TCP streams may be held for a time - to make the most efficient use of bandwidth (according to - Nagle's algorithm). The no-delay flag requests that data be - written as soon as possible, even if doing so would consume - additional bandwidth. - - This flag is currently defined only for TCP-based ``IOStreams``. - - .. versionadded:: 3.1 - """ - pass - - def _handle_events(self, fd, events): - if self.closed(): - gen_log.warning("Got events for closed stream %s", fd) - return - try: - if self._connecting: - # Most IOLoops will report a write failed connect - # with the WRITE event, but SelectIOLoop reports a - # READ as well so we must check for connecting before - # either. - self._handle_connect() - if self.closed(): - return - if events & self.io_loop.READ: - self._handle_read() - if self.closed(): - return - if events & self.io_loop.WRITE: - self._handle_write() - if self.closed(): - return - if events & self.io_loop.ERROR: - self.error = self.get_fd_error() - # We may have queued up a user callback in _handle_read or - # _handle_write, so don't close the IOStream until those - # callbacks have had a chance to run. - self.io_loop.add_callback(self.close) - return - state = self.io_loop.ERROR - if self.reading(): - state |= self.io_loop.READ - if self.writing(): - state |= self.io_loop.WRITE - if state == self.io_loop.ERROR and self._read_buffer_size == 0: - # If the connection is idle, listen for reads too so - # we can tell if the connection is closed. If there is - # data in the read buffer we won't run the close callback - # yet anyway, so we don't need to listen in this case. - state |= self.io_loop.READ - if state != self._state: - assert self._state is not None, \ - "shouldn't happen: _handle_events without self._state" - self._state = state - self.io_loop.update_handler(self.fileno(), self._state) - except UnsatisfiableReadError as e: - gen_log.info("Unsatisfiable read, closing connection: %s" % e) - self.close(exc_info=True) - except Exception: - gen_log.error("Uncaught exception, closing connection.", - exc_info=True) - self.close(exc_info=True) - raise - - def _run_callback(self, callback, *args): - def wrapper(): - self._pending_callbacks -= 1 - try: - return callback(*args) - except Exception: - app_log.error("Uncaught exception, closing connection.", - exc_info=True) - # Close the socket on an uncaught exception from a user callback - # (It would eventually get closed when the socket object is - # gc'd, but we don't want to rely on gc happening before we - # run out of file descriptors) - self.close(exc_info=True) - # Re-raise the exception so that IOLoop.handle_callback_exception - # can see it and log the error - raise - finally: - self._maybe_add_error_listener() - # We schedule callbacks to be run on the next IOLoop iteration - # rather than running them directly for several reasons: - # * Prevents unbounded stack growth when a callback calls an - # IOLoop operation that immediately runs another callback - # * Provides a predictable execution context for e.g. - # non-reentrant mutexes - # * Ensures that the try/except in wrapper() is run outside - # of the application's StackContexts - with stack_context.NullContext(): - # stack_context was already captured in callback, we don't need to - # capture it again for IOStream's wrapper. This is especially - # important if the callback was pre-wrapped before entry to - # IOStream (as in HTTPConnection._header_callback), as we could - # capture and leak the wrong context here. - self._pending_callbacks += 1 - self.io_loop.add_callback(wrapper) - - def _read_to_buffer_loop(self): - # This method is called from _handle_read and _try_inline_read. - try: - if self._read_bytes is not None: - target_bytes = self._read_bytes - elif self._read_max_bytes is not None: - target_bytes = self._read_max_bytes - elif self.reading(): - # For read_until without max_bytes, or - # read_until_close, read as much as we can before - # scanning for the delimiter. - target_bytes = None - else: - target_bytes = 0 - next_find_pos = 0 - # Pretend to have a pending callback so that an EOF in - # _read_to_buffer doesn't trigger an immediate close - # callback. At the end of this method we'll either - # estabilsh a real pending callback via - # _read_from_buffer or run the close callback. - # - # We need two try statements here so that - # pending_callbacks is decremented before the `except` - # clause below (which calls `close` and does need to - # trigger the callback) - self._pending_callbacks += 1 - while not self.closed(): - # Read from the socket until we get EWOULDBLOCK or equivalent. - # SSL sockets do some internal buffering, and if the data is - # sitting in the SSL object's buffer select() and friends - # can't see it; the only way to find out if it's there is to - # try to read it. - if self._read_to_buffer() == 0: - break - - self._run_streaming_callback() - - # If we've read all the bytes we can use, break out of - # this loop. We can't just call read_from_buffer here - # because of subtle interactions with the - # pending_callback and error_listener mechanisms. - # - # If we've reached target_bytes, we know we're done. - if (target_bytes is not None and - self._read_buffer_size >= target_bytes): - break - - # Otherwise, we need to call the more expensive find_read_pos. - # It's inefficient to do this on every read, so instead - # do it on the first read and whenever the read buffer - # size has doubled. - if self._read_buffer_size >= next_find_pos: - pos = self._find_read_pos() - if pos is not None: - return pos - next_find_pos = self._read_buffer_size * 2 - return self._find_read_pos() - finally: - self._pending_callbacks -= 1 - - def _handle_read(self): - try: - pos = self._read_to_buffer_loop() - except UnsatisfiableReadError: - raise - except Exception: - gen_log.warning("error on read", exc_info=True) - self.close(exc_info=True) - return - if pos is not None: - self._read_from_buffer(pos) - return - else: - self._maybe_run_close_callback() - - def _set_read_callback(self, callback): - assert self._read_callback is None, "Already reading" - assert self._read_future is None, "Already reading" - if callback is not None: - self._read_callback = stack_context.wrap(callback) - else: - self._read_future = TracebackFuture() - return self._read_future - - def _run_read_callback(self, size, streaming): - if streaming: - callback = self._streaming_callback - else: - callback = self._read_callback - self._read_callback = self._streaming_callback = None - if self._read_future is not None: - assert callback is None - future = self._read_future - self._read_future = None - future.set_result(self._consume(size)) - if callback is not None: - assert self._read_future is None - self._run_callback(callback, self._consume(size)) - else: - # If we scheduled a callback, we will add the error listener - # afterwards. If we didn't, we have to do it now. - self._maybe_add_error_listener() - - def _try_inline_read(self): - """Attempt to complete the current read operation from buffered data. - - If the read can be completed without blocking, schedules the - read callback on the next IOLoop iteration; otherwise starts - listening for reads on the socket. - """ - # See if we've already got the data from a previous read - self._run_streaming_callback() - pos = self._find_read_pos() - if pos is not None: - self._read_from_buffer(pos) - return - self._check_closed() - try: - pos = self._read_to_buffer_loop() - except Exception: - # If there was an in _read_to_buffer, we called close() already, - # but couldn't run the close callback because of _pending_callbacks. - # Before we escape from this function, run the close callback if - # applicable. - self._maybe_run_close_callback() - raise - if pos is not None: - self._read_from_buffer(pos) - return - # We couldn't satisfy the read inline, so either close the stream - # or listen for new data. - if self.closed(): - self._maybe_run_close_callback() - else: - self._add_io_state(ioloop.IOLoop.READ) - - def _read_to_buffer(self): - """Reads from the socket and appends the result to the read buffer. - - Returns the number of bytes read. Returns 0 if there is nothing - to read (i.e. the read returns EWOULDBLOCK or equivalent). On - error closes the socket and raises an exception. - """ - try: - chunk = self.read_from_fd() - except (socket.error, IOError, OSError) as e: - # ssl.SSLError is a subclass of socket.error - if e.args[0] in _ERRNO_CONNRESET: - # Treat ECONNRESET as a connection close rather than - # an error to minimize log spam (the exception will - # be available on self.error for apps that care). - self.close(exc_info=True) - return - self.close(exc_info=True) - raise - if chunk is None: - return 0 - self._read_buffer.append(chunk) - self._read_buffer_size += len(chunk) - if self._read_buffer_size > self.max_buffer_size: - gen_log.error("Reached maximum read buffer size") - self.close() - raise StreamBufferFullError("Reached maximum read buffer size") - return len(chunk) - - def _run_streaming_callback(self): - if self._streaming_callback is not None and self._read_buffer_size: - bytes_to_consume = self._read_buffer_size - if self._read_bytes is not None: - bytes_to_consume = min(self._read_bytes, bytes_to_consume) - self._read_bytes -= bytes_to_consume - self._run_read_callback(bytes_to_consume, True) - - def _read_from_buffer(self, pos): - """Attempts to complete the currently-pending read from the buffer. - - The argument is either a position in the read buffer or None, - as returned by _find_read_pos. - """ - self._read_bytes = self._read_delimiter = self._read_regex = None - self._read_partial = False - self._run_read_callback(pos, False) - - def _find_read_pos(self): - """Attempts to find a position in the read buffer that satisfies - the currently-pending read. - - Returns a position in the buffer if the current read can be satisfied, - or None if it cannot. - """ - if (self._read_bytes is not None and - (self._read_buffer_size >= self._read_bytes or - (self._read_partial and self._read_buffer_size > 0))): - num_bytes = min(self._read_bytes, self._read_buffer_size) - return num_bytes - elif self._read_delimiter is not None: - # Multi-byte delimiters (e.g. '\r\n') may straddle two - # chunks in the read buffer, so we can't easily find them - # without collapsing the buffer. However, since protocols - # using delimited reads (as opposed to reads of a known - # length) tend to be "line" oriented, the delimiter is likely - # to be in the first few chunks. Merge the buffer gradually - # since large merges are relatively expensive and get undone in - # _consume(). - if self._read_buffer: - while True: - loc = self._read_buffer[0].find(self._read_delimiter) - if loc != -1: - delimiter_len = len(self._read_delimiter) - self._check_max_bytes(self._read_delimiter, - loc + delimiter_len) - return loc + delimiter_len - if len(self._read_buffer) == 1: - break - _double_prefix(self._read_buffer) - self._check_max_bytes(self._read_delimiter, - len(self._read_buffer[0])) - elif self._read_regex is not None: - if self._read_buffer: - while True: - m = self._read_regex.search(self._read_buffer[0]) - if m is not None: - self._check_max_bytes(self._read_regex, m.end()) - return m.end() - if len(self._read_buffer) == 1: - break - _double_prefix(self._read_buffer) - self._check_max_bytes(self._read_regex, - len(self._read_buffer[0])) - return None - - def _check_max_bytes(self, delimiter, size): - if (self._read_max_bytes is not None and - size > self._read_max_bytes): - raise UnsatisfiableReadError( - "delimiter %r not found within %d bytes" % ( - delimiter, self._read_max_bytes)) - - def _handle_write(self): - while self._write_buffer: - try: - if not self._write_buffer_frozen: - # On windows, socket.send blows up if given a - # write buffer that's too large, instead of just - # returning the number of bytes it was able to - # process. Therefore we must not call socket.send - # with more than 128KB at a time. - _merge_prefix(self._write_buffer, 128 * 1024) - num_bytes = self.write_to_fd(self._write_buffer[0]) - if num_bytes == 0: - # With OpenSSL, if we couldn't write the entire buffer, - # the very same string object must be used on the - # next call to send. Therefore we suppress - # merging the write buffer after an incomplete send. - # A cleaner solution would be to set - # SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER, but this is - # not yet accessible from python - # (http://bugs.python.org/issue8240) - self._write_buffer_frozen = True - break - self._write_buffer_frozen = False - _merge_prefix(self._write_buffer, num_bytes) - self._write_buffer.popleft() - self._write_buffer_size -= num_bytes - except (socket.error, IOError, OSError) as e: - if e.args[0] in _ERRNO_WOULDBLOCK: - self._write_buffer_frozen = True - break - else: - if e.args[0] not in _ERRNO_CONNRESET: - # Broken pipe errors are usually caused by connection - # reset, and its better to not log EPIPE errors to - # minimize log spam - gen_log.warning("Write error on %s: %s", - self.fileno(), e) - self.close(exc_info=True) - return - if not self._write_buffer: - if self._write_callback: - callback = self._write_callback - self._write_callback = None - self._run_callback(callback) - if self._write_future: - future = self._write_future - self._write_future = None - future.set_result(None) - - def _consume(self, loc): - if loc == 0: - return b"" - _merge_prefix(self._read_buffer, loc) - self._read_buffer_size -= loc - return self._read_buffer.popleft() - - def _check_closed(self): - if self.closed(): - raise StreamClosedError("Stream is closed") - - def _maybe_add_error_listener(self): - # This method is part of an optimization: to detect a connection that - # is closed when we're not actively reading or writing, we must listen - # for read events. However, it is inefficient to do this when the - # connection is first established because we are going to read or write - # immediately anyway. Instead, we insert checks at various times to - # see if the connection is idle and add the read listener then. - if self._pending_callbacks != 0: - return - if self._state is None or self._state == ioloop.IOLoop.ERROR: - if self.closed(): - self._maybe_run_close_callback() - elif (self._read_buffer_size == 0 and - self._close_callback is not None): - self._add_io_state(ioloop.IOLoop.READ) - - def _add_io_state(self, state): - """Adds `state` (IOLoop.{READ,WRITE} flags) to our event handler. - - Implementation notes: Reads and writes have a fast path and a - slow path. The fast path reads synchronously from socket - buffers, while the slow path uses `_add_io_state` to schedule - an IOLoop callback. Note that in both cases, the callback is - run asynchronously with `_run_callback`. - - To detect closed connections, we must have called - `_add_io_state` at some point, but we want to delay this as - much as possible so we don't have to set an `IOLoop.ERROR` - listener that will be overwritten by the next slow-path - operation. As long as there are callbacks scheduled for - fast-path ops, those callbacks may do more reads. - If a sequence of fast-path ops do not end in a slow-path op, - (e.g. for an @asynchronous long-poll request), we must add - the error handler. This is done in `_run_callback` and `write` - (since the write callback is optional so we can have a - fast-path write with no `_run_callback`) - """ - if self.closed(): - # connection has been closed, so there can be no future events - return - if self._state is None: - self._state = ioloop.IOLoop.ERROR | state - with stack_context.NullContext(): - self.io_loop.add_handler( - self.fileno(), self._handle_events, self._state) - elif not self._state & state: - self._state = self._state | state - self.io_loop.update_handler(self.fileno(), self._state) - - -class IOStream(BaseIOStream): - r"""Socket-based `IOStream` implementation. - - This class supports the read and write methods from `BaseIOStream` - plus a `connect` method. - - The ``socket`` parameter may either be connected or unconnected. - For server operations the socket is the result of calling - `socket.accept `. For client operations the - socket is created with `socket.socket`, and may either be - connected before passing it to the `IOStream` or connected with - `IOStream.connect`. - - A very simple (and broken) HTTP client using this class:: - - import tornado.ioloop - import tornado.iostream - import socket - - def send_request(): - stream.write(b"GET / HTTP/1.0\r\nHost: friendfeed.com\r\n\r\n") - stream.read_until(b"\r\n\r\n", on_headers) - - def on_headers(data): - headers = {} - for line in data.split(b"\r\n"): - parts = line.split(b":") - if len(parts) == 2: - headers[parts[0].strip()] = parts[1].strip() - stream.read_bytes(int(headers[b"Content-Length"]), on_body) - - def on_body(data): - print data - stream.close() - tornado.ioloop.IOLoop.instance().stop() - - s = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0) - stream = tornado.iostream.IOStream(s) - stream.connect(("friendfeed.com", 80), send_request) - tornado.ioloop.IOLoop.instance().start() - """ - def __init__(self, socket, *args, **kwargs): - self.socket = socket - self.socket.setblocking(False) - super(IOStream, self).__init__(*args, **kwargs) - - def fileno(self): - return self.socket - - def close_fd(self): - self.socket.close() - self.socket = None - - def get_fd_error(self): - errno = self.socket.getsockopt(socket.SOL_SOCKET, - socket.SO_ERROR) - return socket.error(errno, os.strerror(errno)) - - def read_from_fd(self): - try: - chunk = self.socket.recv(self.read_chunk_size) - except socket.error as e: - if e.args[0] in _ERRNO_WOULDBLOCK: - return None - else: - raise - if not chunk: - self.close() - return None - return chunk - - def write_to_fd(self, data): - return self.socket.send(data) - - def connect(self, address, callback=None, server_hostname=None): - """Connects the socket to a remote address without blocking. - - May only be called if the socket passed to the constructor was - not previously connected. The address parameter is in the - same format as for `socket.connect ` for - the type of socket passed to the IOStream constructor, - e.g. an ``(ip, port)`` tuple. Hostnames are accepted here, - but will be resolved synchronously and block the IOLoop. - If you have a hostname instead of an IP address, the `.TCPClient` - class is recommended instead of calling this method directly. - `.TCPClient` will do asynchronous DNS resolution and handle - both IPv4 and IPv6. - - If ``callback`` is specified, it will be called with no - arguments when the connection is completed; if not this method - returns a `.Future` (whose result after a successful - connection will be the stream itself). - - If specified, the ``server_hostname`` parameter will be used - in SSL connections for certificate validation (if requested in - the ``ssl_options``) and SNI (if supported; requires - Python 3.2+). - - Note that it is safe to call `IOStream.write - ` while the connection is pending, in - which case the data will be written as soon as the connection - is ready. Calling `IOStream` read methods before the socket is - connected works on some platforms but is non-portable. - - .. versionchanged:: 4.0 - If no callback is given, returns a `.Future`. - - """ - self._connecting = True - if callback is not None: - self._connect_callback = stack_context.wrap(callback) - future = None - else: - future = self._connect_future = TracebackFuture() - try: - self.socket.connect(address) - except socket.error as e: - # In non-blocking mode we expect connect() to raise an - # exception with EINPROGRESS or EWOULDBLOCK. - # - # On freebsd, other errors such as ECONNREFUSED may be - # returned immediately when attempting to connect to - # localhost, so handle them the same way as an error - # reported later in _handle_connect. - if (errno_from_exception(e) not in _ERRNO_INPROGRESS and - errno_from_exception(e) not in _ERRNO_WOULDBLOCK): - gen_log.warning("Connect error on fd %s: %s", - self.socket.fileno(), e) - self.close(exc_info=True) - return future - self._add_io_state(self.io_loop.WRITE) - return future - - def start_tls(self, server_side, ssl_options=None, server_hostname=None): - """Convert this `IOStream` to an `SSLIOStream`. - - This enables protocols that begin in clear-text mode and - switch to SSL after some initial negotiation (such as the - ``STARTTLS`` extension to SMTP and IMAP). - - This method cannot be used if there are outstanding reads - or writes on the stream, or if there is any data in the - IOStream's buffer (data in the operating system's socket - buffer is allowed). This means it must generally be used - immediately after reading or writing the last clear-text - data. It can also be used immediately after connecting, - before any reads or writes. - - The ``ssl_options`` argument may be either a dictionary - of options or an `ssl.SSLContext`. If a ``server_hostname`` - is given, it will be used for certificate verification - (as configured in the ``ssl_options``). - - This method returns a `.Future` whose result is the new - `SSLIOStream`. After this method has been called, - any other operation on the original stream is undefined. - - If a close callback is defined on this stream, it will be - transferred to the new stream. - - .. versionadded:: 4.0 - """ - if (self._read_callback or self._read_future or - self._write_callback or self._write_future or - self._connect_callback or self._connect_future or - self._pending_callbacks or self._closed or - self._read_buffer or self._write_buffer): - raise ValueError("IOStream is not idle; cannot convert to SSL") - if ssl_options is None: - ssl_options = {} - - socket = self.socket - self.io_loop.remove_handler(socket) - self.socket = None - socket = ssl_wrap_socket(socket, ssl_options, server_side=server_side, - do_handshake_on_connect=False) - orig_close_callback = self._close_callback - self._close_callback = None - - future = TracebackFuture() - ssl_stream = SSLIOStream(socket, ssl_options=ssl_options, - io_loop=self.io_loop) - # Wrap the original close callback so we can fail our Future as well. - # If we had an "unwrap" counterpart to this method we would need - # to restore the original callback after our Future resolves - # so that repeated wrap/unwrap calls don't build up layers. - def close_callback(): - if not future.done(): - future.set_exception(ssl_stream.error or StreamClosedError()) - if orig_close_callback is not None: - orig_close_callback() - ssl_stream.set_close_callback(close_callback) - ssl_stream._ssl_connect_callback = lambda: future.set_result(ssl_stream) - ssl_stream.max_buffer_size = self.max_buffer_size - ssl_stream.read_chunk_size = self.read_chunk_size - return future - - def _handle_connect(self): - err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR) - if err != 0: - self.error = socket.error(err, os.strerror(err)) - # IOLoop implementations may vary: some of them return - # an error state before the socket becomes writable, so - # in that case a connection failure would be handled by the - # error path in _handle_events instead of here. - if self._connect_future is None: - gen_log.warning("Connect error on fd %s: %s", - self.socket.fileno(), errno.errorcode[err]) - self.close() - return - if self._connect_callback is not None: - callback = self._connect_callback - self._connect_callback = None - self._run_callback(callback) - if self._connect_future is not None: - future = self._connect_future - self._connect_future = None - future.set_result(self) - self._connecting = False - - def set_nodelay(self, value): - if (self.socket is not None and - self.socket.family in (socket.AF_INET, socket.AF_INET6)): - try: - self.socket.setsockopt(socket.IPPROTO_TCP, - socket.TCP_NODELAY, 1 if value else 0) - except socket.error as e: - # Sometimes setsockopt will fail if the socket is closed - # at the wrong time. This can happen with HTTPServer - # resetting the value to false between requests. - if e.errno not in (errno.EINVAL, errno.ECONNRESET): - raise - - -class SSLIOStream(IOStream): - """A utility class to write to and read from a non-blocking SSL socket. - - If the socket passed to the constructor is already connected, - it should be wrapped with:: - - ssl.wrap_socket(sock, do_handshake_on_connect=False, **kwargs) - - before constructing the `SSLIOStream`. Unconnected sockets will be - wrapped when `IOStream.connect` is finished. - """ - def __init__(self, *args, **kwargs): - """The ``ssl_options`` keyword argument may either be a dictionary - of keywords arguments for `ssl.wrap_socket`, or an `ssl.SSLContext` - object. - """ - self._ssl_options = kwargs.pop('ssl_options', {}) - super(SSLIOStream, self).__init__(*args, **kwargs) - self._ssl_accepting = True - self._handshake_reading = False - self._handshake_writing = False - self._ssl_connect_callback = None - self._server_hostname = None - - # If the socket is already connected, attempt to start the handshake. - try: - self.socket.getpeername() - except socket.error: - pass - else: - # Indirectly start the handshake, which will run on the next - # IOLoop iteration and then the real IO state will be set in - # _handle_events. - self._add_io_state(self.io_loop.WRITE) - - def reading(self): - return self._handshake_reading or super(SSLIOStream, self).reading() - - def writing(self): - return self._handshake_writing or super(SSLIOStream, self).writing() - - def _do_ssl_handshake(self): - # Based on code from test_ssl.py in the python stdlib - try: - self._handshake_reading = False - self._handshake_writing = False - self.socket.do_handshake() - except ssl.SSLError as err: - if err.args[0] == ssl.SSL_ERROR_WANT_READ: - self._handshake_reading = True - return - elif err.args[0] == ssl.SSL_ERROR_WANT_WRITE: - self._handshake_writing = True - return - elif err.args[0] in (ssl.SSL_ERROR_EOF, - ssl.SSL_ERROR_ZERO_RETURN): - return self.close(exc_info=True) - elif err.args[0] == ssl.SSL_ERROR_SSL: - try: - peer = self.socket.getpeername() - except Exception: - peer = '(not connected)' - gen_log.warning("SSL Error on %s %s: %s", - self.socket.fileno(), peer, err) - return self.close(exc_info=True) - raise - except socket.error as err: - if err.args[0] in _ERRNO_CONNRESET: - return self.close(exc_info=True) - except AttributeError: - # On Linux, if the connection was reset before the call to - # wrap_socket, do_handshake will fail with an - # AttributeError. - return self.close(exc_info=True) - else: - self._ssl_accepting = False - if not self._verify_cert(self.socket.getpeercert()): - self.close() - return - if self._ssl_connect_callback is not None: - callback = self._ssl_connect_callback - self._ssl_connect_callback = None - self._run_callback(callback) - - def _verify_cert(self, peercert): - """Returns True if peercert is valid according to the configured - validation mode and hostname. - - The ssl handshake already tested the certificate for a valid - CA signature; the only thing that remains is to check - the hostname. - """ - if isinstance(self._ssl_options, dict): - verify_mode = self._ssl_options.get('cert_reqs', ssl.CERT_NONE) - elif isinstance(self._ssl_options, ssl.SSLContext): - verify_mode = self._ssl_options.verify_mode - assert verify_mode in (ssl.CERT_NONE, ssl.CERT_REQUIRED, ssl.CERT_OPTIONAL) - if verify_mode == ssl.CERT_NONE or self._server_hostname is None: - return True - cert = self.socket.getpeercert() - if cert is None and verify_mode == ssl.CERT_REQUIRED: - gen_log.warning("No SSL certificate given") - return False - try: - ssl_match_hostname(peercert, self._server_hostname) - except SSLCertificateError: - gen_log.warning("Invalid SSL certificate", exc_info=True) - return False - else: - return True - - def _handle_read(self): - if self._ssl_accepting: - self._do_ssl_handshake() - return - super(SSLIOStream, self)._handle_read() - - def _handle_write(self): - if self._ssl_accepting: - self._do_ssl_handshake() - return - super(SSLIOStream, self)._handle_write() - - def connect(self, address, callback=None, server_hostname=None): - # Save the user's callback and run it after the ssl handshake - # has completed. - self._ssl_connect_callback = stack_context.wrap(callback) - self._server_hostname = server_hostname - # Note: Since we don't pass our callback argument along to - # super.connect(), this will always return a Future. - # This is harmless, but a bit less efficient than it could be. - return super(SSLIOStream, self).connect(address, callback=None) - - def _handle_connect(self): - # Call the superclass method to check for errors. - super(SSLIOStream, self)._handle_connect() - if self.closed(): - return - # When the connection is complete, wrap the socket for SSL - # traffic. Note that we do this by overriding _handle_connect - # instead of by passing a callback to super().connect because - # user callbacks are enqueued asynchronously on the IOLoop, - # but since _handle_events calls _handle_connect immediately - # followed by _handle_write we need this to be synchronous. - # - # The IOLoop will get confused if we swap out self.socket while the - # fd is registered, so remove it now and re-register after - # wrap_socket(). - self.io_loop.remove_handler(self.socket) - old_state = self._state - self._state = None - self.socket = ssl_wrap_socket(self.socket, self._ssl_options, - server_hostname=self._server_hostname, - do_handshake_on_connect=False) - self._add_io_state(old_state) - - def read_from_fd(self): - if self._ssl_accepting: - # If the handshake hasn't finished yet, there can't be anything - # to read (attempting to read may or may not raise an exception - # depending on the SSL version) - return None - try: - # SSLSocket objects have both a read() and recv() method, - # while regular sockets only have recv(). - # The recv() method blocks (at least in python 2.6) if it is - # called when there is nothing to read, so we have to use - # read() instead. - chunk = self.socket.read(self.read_chunk_size) - except ssl.SSLError as e: - # SSLError is a subclass of socket.error, so this except - # block must come first. - if e.args[0] == ssl.SSL_ERROR_WANT_READ: - return None - else: - raise - except socket.error as e: - if e.args[0] in _ERRNO_WOULDBLOCK: - return None - else: - raise - if not chunk: - self.close() - return None - return chunk - - -class PipeIOStream(BaseIOStream): - """Pipe-based `IOStream` implementation. - - The constructor takes an integer file descriptor (such as one returned - by `os.pipe`) rather than an open file object. Pipes are generally - one-way, so a `PipeIOStream` can be used for reading or writing but not - both. - """ - def __init__(self, fd, *args, **kwargs): - self.fd = fd - _set_nonblocking(fd) - super(PipeIOStream, self).__init__(*args, **kwargs) - - def fileno(self): - return self.fd - - def close_fd(self): - os.close(self.fd) - - def write_to_fd(self, data): - return os.write(self.fd, data) - - def read_from_fd(self): - try: - chunk = os.read(self.fd, self.read_chunk_size) - except (IOError, OSError) as e: - if errno_from_exception(e) in _ERRNO_WOULDBLOCK: - return None - elif errno_from_exception(e) == errno.EBADF: - # If the writing half of a pipe is closed, select will - # report it as readable but reads will fail with EBADF. - self.close(exc_info=True) - return None - else: - raise - if not chunk: - self.close() - return None - return chunk - - -def _double_prefix(deque): - """Grow by doubling, but don't split the second chunk just because the - first one is small. - """ - new_len = max(len(deque[0]) * 2, - (len(deque[0]) + len(deque[1]))) - _merge_prefix(deque, new_len) - - -def _merge_prefix(deque, size): - """Replace the first entries in a deque of strings with a single - string of up to size bytes. - - >>> d = collections.deque(['abc', 'de', 'fghi', 'j']) - >>> _merge_prefix(d, 5); print(d) - deque(['abcde', 'fghi', 'j']) - - Strings will be split as necessary to reach the desired size. - >>> _merge_prefix(d, 7); print(d) - deque(['abcdefg', 'hi', 'j']) - - >>> _merge_prefix(d, 3); print(d) - deque(['abc', 'defg', 'hi', 'j']) - - >>> _merge_prefix(d, 100); print(d) - deque(['abcdefghij']) - """ - if len(deque) == 1 and len(deque[0]) <= size: - return - prefix = [] - remaining = size - while deque and remaining > 0: - chunk = deque.popleft() - if len(chunk) > remaining: - deque.appendleft(chunk[remaining:]) - chunk = chunk[:remaining] - prefix.append(chunk) - remaining -= len(chunk) - # This data structure normally just contains byte strings, but - # the unittest gets messy if it doesn't use the default str() type, - # so do the merge based on the type of data that's actually present. - if prefix: - deque.appendleft(type(prefix[0])().join(prefix)) - if not deque: - deque.appendleft(b"") - - -def doctests(): - import doctest - return doctest.DocTestSuite() diff --git a/rosbridge_server/src/tornado/locale.py b/rosbridge_server/src/tornado/locale.py deleted file mode 100644 index 07c6d582b..000000000 --- a/rosbridge_server/src/tornado/locale.py +++ /dev/null @@ -1,511 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Translation methods for generating localized strings. - -To load a locale and generate a translated string:: - - user_locale = tornado.locale.get("es_LA") - print user_locale.translate("Sign out") - -`tornado.locale.get()` returns the closest matching locale, not necessarily the -specific locale you requested. You can support pluralization with -additional arguments to `~Locale.translate()`, e.g.:: - - people = [...] - message = user_locale.translate( - "%(list)s is online", "%(list)s are online", len(people)) - print message % {"list": user_locale.list(people)} - -The first string is chosen if ``len(people) == 1``, otherwise the second -string is chosen. - -Applications should call one of `load_translations` (which uses a simple -CSV format) or `load_gettext_translations` (which uses the ``.mo`` format -supported by `gettext` and related tools). If neither method is called, -the `Locale.translate` method will simply return the original string. -""" - -from __future__ import absolute_import, division, print_function, with_statement - -import csv -import datetime -import numbers -import os -import re - -from tornado import escape -from tornado.log import gen_log -from tornado.util import u - -_default_locale = "en_US" -_translations = {} -_supported_locales = frozenset([_default_locale]) -_use_gettext = False - - -def get(*locale_codes): - """Returns the closest match for the given locale codes. - - We iterate over all given locale codes in order. If we have a tight - or a loose match for the code (e.g., "en" for "en_US"), we return - the locale. Otherwise we move to the next code in the list. - - By default we return ``en_US`` if no translations are found for any of - the specified locales. You can change the default locale with - `set_default_locale()`. - """ - return Locale.get_closest(*locale_codes) - - -def set_default_locale(code): - """Sets the default locale. - - The default locale is assumed to be the language used for all strings - in the system. The translations loaded from disk are mappings from - the default locale to the destination locale. Consequently, you don't - need to create a translation file for the default locale. - """ - global _default_locale - global _supported_locales - _default_locale = code - _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) - - -def load_translations(directory): - """Loads translations from CSV files in a directory. - - Translations are strings with optional Python-style named placeholders - (e.g., ``My name is %(name)s``) and their associated translations. - - The directory should have translation files of the form ``LOCALE.csv``, - e.g. ``es_GT.csv``. The CSV files should have two or three columns: string, - translation, and an optional plural indicator. Plural indicators should - be one of "plural" or "singular". A given string can have both singular - and plural forms. For example ``%(name)s liked this`` may have a - different verb conjugation depending on whether %(name)s is one - name or a list of names. There should be two rows in the CSV file for - that string, one with plural indicator "singular", and one "plural". - For strings with no verbs that would change on translation, simply - use "unknown" or the empty string (or don't include the column at all). - - The file is read using the `csv` module in the default "excel" dialect. - In this format there should not be spaces after the commas. - - Example translation ``es_LA.csv``:: - - "I love you","Te amo" - "%(name)s liked this","A %(name)s les gustó esto","plural" - "%(name)s liked this","A %(name)s le gustó esto","singular" - - """ - global _translations - global _supported_locales - _translations = {} - for path in os.listdir(directory): - if not path.endswith(".csv"): - continue - locale, extension = path.split(".") - if not re.match("[a-z]+(_[A-Z]+)?$", locale): - gen_log.error("Unrecognized locale %r (path: %s)", locale, - os.path.join(directory, path)) - continue - full_path = os.path.join(directory, path) - try: - # python 3: csv.reader requires a file open in text mode. - # Force utf8 to avoid dependence on $LANG environment variable. - f = open(full_path, "r", encoding="utf-8") - except TypeError: - # python 2: files return byte strings, which are decoded below. - f = open(full_path, "r") - _translations[locale] = {} - for i, row in enumerate(csv.reader(f)): - if not row or len(row) < 2: - continue - row = [escape.to_unicode(c).strip() for c in row] - english, translation = row[:2] - if len(row) > 2: - plural = row[2] or "unknown" - else: - plural = "unknown" - if plural not in ("plural", "singular", "unknown"): - gen_log.error("Unrecognized plural indicator %r in %s line %d", - plural, path, i + 1) - continue - _translations[locale].setdefault(plural, {})[english] = translation - f.close() - _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) - gen_log.debug("Supported locales: %s", sorted(_supported_locales)) - - -def load_gettext_translations(directory, domain): - """Loads translations from `gettext`'s locale tree - - Locale tree is similar to system's ``/usr/share/locale``, like:: - - {directory}/{lang}/LC_MESSAGES/{domain}.mo - - Three steps are required to have you app translated: - - 1. Generate POT translation file:: - - xgettext --language=Python --keyword=_:1,2 -d mydomain file1.py file2.html etc - - 2. Merge against existing POT file:: - - msgmerge old.po mydomain.po > new.po - - 3. Compile:: - - msgfmt mydomain.po -o {directory}/pt_BR/LC_MESSAGES/mydomain.mo - """ - import gettext - global _translations - global _supported_locales - global _use_gettext - _translations = {} - for lang in os.listdir(directory): - if lang.startswith('.'): - continue # skip .svn, etc - if os.path.isfile(os.path.join(directory, lang)): - continue - try: - os.stat(os.path.join(directory, lang, "LC_MESSAGES", domain + ".mo")) - _translations[lang] = gettext.translation(domain, directory, - languages=[lang]) - except Exception as e: - gen_log.error("Cannot load translation for '%s': %s", lang, str(e)) - continue - _supported_locales = frozenset(list(_translations.keys()) + [_default_locale]) - _use_gettext = True - gen_log.debug("Supported locales: %s", sorted(_supported_locales)) - - -def get_supported_locales(): - """Returns a list of all the supported locale codes.""" - return _supported_locales - - -class Locale(object): - """Object representing a locale. - - After calling one of `load_translations` or `load_gettext_translations`, - call `get` or `get_closest` to get a Locale object. - """ - @classmethod - def get_closest(cls, *locale_codes): - """Returns the closest match for the given locale code.""" - for code in locale_codes: - if not code: - continue - code = code.replace("-", "_") - parts = code.split("_") - if len(parts) > 2: - continue - elif len(parts) == 2: - code = parts[0].lower() + "_" + parts[1].upper() - if code in _supported_locales: - return cls.get(code) - if parts[0].lower() in _supported_locales: - return cls.get(parts[0].lower()) - return cls.get(_default_locale) - - @classmethod - def get(cls, code): - """Returns the Locale for the given locale code. - - If it is not supported, we raise an exception. - """ - if not hasattr(cls, "_cache"): - cls._cache = {} - if code not in cls._cache: - assert code in _supported_locales - translations = _translations.get(code, None) - if translations is None: - locale = CSVLocale(code, {}) - elif _use_gettext: - locale = GettextLocale(code, translations) - else: - locale = CSVLocale(code, translations) - cls._cache[code] = locale - return cls._cache[code] - - def __init__(self, code, translations): - self.code = code - self.name = LOCALE_NAMES.get(code, {}).get("name", u("Unknown")) - self.rtl = False - for prefix in ["fa", "ar", "he"]: - if self.code.startswith(prefix): - self.rtl = True - break - self.translations = translations - - # Initialize strings for date formatting - _ = self.translate - self._months = [ - _("January"), _("February"), _("March"), _("April"), - _("May"), _("June"), _("July"), _("August"), - _("September"), _("October"), _("November"), _("December")] - self._weekdays = [ - _("Monday"), _("Tuesday"), _("Wednesday"), _("Thursday"), - _("Friday"), _("Saturday"), _("Sunday")] - - def translate(self, message, plural_message=None, count=None): - """Returns the translation for the given message for this locale. - - If ``plural_message`` is given, you must also provide - ``count``. We return ``plural_message`` when ``count != 1``, - and we return the singular form for the given message when - ``count == 1``. - """ - raise NotImplementedError() - - def format_date(self, date, gmt_offset=0, relative=True, shorter=False, - full_format=False): - """Formats the given date (which should be GMT). - - By default, we return a relative time (e.g., "2 minutes ago"). You - can return an absolute date string with ``relative=False``. - - You can force a full format date ("July 10, 1980") with - ``full_format=True``. - - This method is primarily intended for dates in the past. - For dates in the future, we fall back to full format. - """ - if isinstance(date, numbers.Real): - date = datetime.datetime.utcfromtimestamp(date) - now = datetime.datetime.utcnow() - if date > now: - if relative and (date - now).seconds < 60: - # Due to click skew, things are some things slightly - # in the future. Round timestamps in the immediate - # future down to now in relative mode. - date = now - else: - # Otherwise, future dates always use the full format. - full_format = True - local_date = date - datetime.timedelta(minutes=gmt_offset) - local_now = now - datetime.timedelta(minutes=gmt_offset) - local_yesterday = local_now - datetime.timedelta(hours=24) - difference = now - date - seconds = difference.seconds - days = difference.days - - _ = self.translate - format = None - if not full_format: - if relative and days == 0: - if seconds < 50: - return _("1 second ago", "%(seconds)d seconds ago", - seconds) % {"seconds": seconds} - - if seconds < 50 * 60: - minutes = round(seconds / 60.0) - return _("1 minute ago", "%(minutes)d minutes ago", - minutes) % {"minutes": minutes} - - hours = round(seconds / (60.0 * 60)) - return _("1 hour ago", "%(hours)d hours ago", - hours) % {"hours": hours} - - if days == 0: - format = _("%(time)s") - elif days == 1 and local_date.day == local_yesterday.day and \ - relative: - format = _("yesterday") if shorter else \ - _("yesterday at %(time)s") - elif days < 5: - format = _("%(weekday)s") if shorter else \ - _("%(weekday)s at %(time)s") - elif days < 334: # 11mo, since confusing for same month last year - format = _("%(month_name)s %(day)s") if shorter else \ - _("%(month_name)s %(day)s at %(time)s") - - if format is None: - format = _("%(month_name)s %(day)s, %(year)s") if shorter else \ - _("%(month_name)s %(day)s, %(year)s at %(time)s") - - tfhour_clock = self.code not in ("en", "en_US", "zh_CN") - if tfhour_clock: - str_time = "%d:%02d" % (local_date.hour, local_date.minute) - elif self.code == "zh_CN": - str_time = "%s%d:%02d" % ( - (u('\u4e0a\u5348'), u('\u4e0b\u5348'))[local_date.hour >= 12], - local_date.hour % 12 or 12, local_date.minute) - else: - str_time = "%d:%02d %s" % ( - local_date.hour % 12 or 12, local_date.minute, - ("am", "pm")[local_date.hour >= 12]) - - return format % { - "month_name": self._months[local_date.month - 1], - "weekday": self._weekdays[local_date.weekday()], - "day": str(local_date.day), - "year": str(local_date.year), - "time": str_time - } - - def format_day(self, date, gmt_offset=0, dow=True): - """Formats the given date as a day of week. - - Example: "Monday, January 22". You can remove the day of week with - ``dow=False``. - """ - local_date = date - datetime.timedelta(minutes=gmt_offset) - _ = self.translate - if dow: - return _("%(weekday)s, %(month_name)s %(day)s") % { - "month_name": self._months[local_date.month - 1], - "weekday": self._weekdays[local_date.weekday()], - "day": str(local_date.day), - } - else: - return _("%(month_name)s %(day)s") % { - "month_name": self._months[local_date.month - 1], - "day": str(local_date.day), - } - - def list(self, parts): - """Returns a comma-separated list for the given list of parts. - - The format is, e.g., "A, B and C", "A and B" or just "A" for lists - of size 1. - """ - _ = self.translate - if len(parts) == 0: - return "" - if len(parts) == 1: - return parts[0] - comma = u(' \u0648 ') if self.code.startswith("fa") else u(", ") - return _("%(commas)s and %(last)s") % { - "commas": comma.join(parts[:-1]), - "last": parts[len(parts) - 1], - } - - def friendly_number(self, value): - """Returns a comma-separated number for the given integer.""" - if self.code not in ("en", "en_US"): - return str(value) - value = str(value) - parts = [] - while value: - parts.append(value[-3:]) - value = value[:-3] - return ",".join(reversed(parts)) - - -class CSVLocale(Locale): - """Locale implementation using tornado's CSV translation format.""" - def translate(self, message, plural_message=None, count=None): - if plural_message is not None: - assert count is not None - if count != 1: - message = plural_message - message_dict = self.translations.get("plural", {}) - else: - message_dict = self.translations.get("singular", {}) - else: - message_dict = self.translations.get("unknown", {}) - return message_dict.get(message, message) - - -class GettextLocale(Locale): - """Locale implementation using the `gettext` module.""" - def __init__(self, code, translations): - try: - # python 2 - self.ngettext = translations.ungettext - self.gettext = translations.ugettext - except AttributeError: - # python 3 - self.ngettext = translations.ngettext - self.gettext = translations.gettext - # self.gettext must exist before __init__ is called, since it - # calls into self.translate - super(GettextLocale, self).__init__(code, translations) - - def translate(self, message, plural_message=None, count=None): - if plural_message is not None: - assert count is not None - return self.ngettext(message, plural_message, count) - else: - return self.gettext(message) - -LOCALE_NAMES = { - "af_ZA": {"name_en": u("Afrikaans"), "name": u("Afrikaans")}, - "am_ET": {"name_en": u("Amharic"), "name": u('\u12a0\u121b\u122d\u129b')}, - "ar_AR": {"name_en": u("Arabic"), "name": u("\u0627\u0644\u0639\u0631\u0628\u064a\u0629")}, - "bg_BG": {"name_en": u("Bulgarian"), "name": u("\u0411\u044a\u043b\u0433\u0430\u0440\u0441\u043a\u0438")}, - "bn_IN": {"name_en": u("Bengali"), "name": u("\u09ac\u09be\u0982\u09b2\u09be")}, - "bs_BA": {"name_en": u("Bosnian"), "name": u("Bosanski")}, - "ca_ES": {"name_en": u("Catalan"), "name": u("Catal\xe0")}, - "cs_CZ": {"name_en": u("Czech"), "name": u("\u010ce\u0161tina")}, - "cy_GB": {"name_en": u("Welsh"), "name": u("Cymraeg")}, - "da_DK": {"name_en": u("Danish"), "name": u("Dansk")}, - "de_DE": {"name_en": u("German"), "name": u("Deutsch")}, - "el_GR": {"name_en": u("Greek"), "name": u("\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac")}, - "en_GB": {"name_en": u("English (UK)"), "name": u("English (UK)")}, - "en_US": {"name_en": u("English (US)"), "name": u("English (US)")}, - "es_ES": {"name_en": u("Spanish (Spain)"), "name": u("Espa\xf1ol (Espa\xf1a)")}, - "es_LA": {"name_en": u("Spanish"), "name": u("Espa\xf1ol")}, - "et_EE": {"name_en": u("Estonian"), "name": u("Eesti")}, - "eu_ES": {"name_en": u("Basque"), "name": u("Euskara")}, - "fa_IR": {"name_en": u("Persian"), "name": u("\u0641\u0627\u0631\u0633\u06cc")}, - "fi_FI": {"name_en": u("Finnish"), "name": u("Suomi")}, - "fr_CA": {"name_en": u("French (Canada)"), "name": u("Fran\xe7ais (Canada)")}, - "fr_FR": {"name_en": u("French"), "name": u("Fran\xe7ais")}, - "ga_IE": {"name_en": u("Irish"), "name": u("Gaeilge")}, - "gl_ES": {"name_en": u("Galician"), "name": u("Galego")}, - "he_IL": {"name_en": u("Hebrew"), "name": u("\u05e2\u05d1\u05e8\u05d9\u05ea")}, - "hi_IN": {"name_en": u("Hindi"), "name": u("\u0939\u093f\u0928\u094d\u0926\u0940")}, - "hr_HR": {"name_en": u("Croatian"), "name": u("Hrvatski")}, - "hu_HU": {"name_en": u("Hungarian"), "name": u("Magyar")}, - "id_ID": {"name_en": u("Indonesian"), "name": u("Bahasa Indonesia")}, - "is_IS": {"name_en": u("Icelandic"), "name": u("\xcdslenska")}, - "it_IT": {"name_en": u("Italian"), "name": u("Italiano")}, - "ja_JP": {"name_en": u("Japanese"), "name": u("\u65e5\u672c\u8a9e")}, - "ko_KR": {"name_en": u("Korean"), "name": u("\ud55c\uad6d\uc5b4")}, - "lt_LT": {"name_en": u("Lithuanian"), "name": u("Lietuvi\u0173")}, - "lv_LV": {"name_en": u("Latvian"), "name": u("Latvie\u0161u")}, - "mk_MK": {"name_en": u("Macedonian"), "name": u("\u041c\u0430\u043a\u0435\u0434\u043e\u043d\u0441\u043a\u0438")}, - "ml_IN": {"name_en": u("Malayalam"), "name": u("\u0d2e\u0d32\u0d2f\u0d3e\u0d33\u0d02")}, - "ms_MY": {"name_en": u("Malay"), "name": u("Bahasa Melayu")}, - "nb_NO": {"name_en": u("Norwegian (bokmal)"), "name": u("Norsk (bokm\xe5l)")}, - "nl_NL": {"name_en": u("Dutch"), "name": u("Nederlands")}, - "nn_NO": {"name_en": u("Norwegian (nynorsk)"), "name": u("Norsk (nynorsk)")}, - "pa_IN": {"name_en": u("Punjabi"), "name": u("\u0a2a\u0a70\u0a1c\u0a3e\u0a2c\u0a40")}, - "pl_PL": {"name_en": u("Polish"), "name": u("Polski")}, - "pt_BR": {"name_en": u("Portuguese (Brazil)"), "name": u("Portugu\xeas (Brasil)")}, - "pt_PT": {"name_en": u("Portuguese (Portugal)"), "name": u("Portugu\xeas (Portugal)")}, - "ro_RO": {"name_en": u("Romanian"), "name": u("Rom\xe2n\u0103")}, - "ru_RU": {"name_en": u("Russian"), "name": u("\u0420\u0443\u0441\u0441\u043a\u0438\u0439")}, - "sk_SK": {"name_en": u("Slovak"), "name": u("Sloven\u010dina")}, - "sl_SI": {"name_en": u("Slovenian"), "name": u("Sloven\u0161\u010dina")}, - "sq_AL": {"name_en": u("Albanian"), "name": u("Shqip")}, - "sr_RS": {"name_en": u("Serbian"), "name": u("\u0421\u0440\u043f\u0441\u043a\u0438")}, - "sv_SE": {"name_en": u("Swedish"), "name": u("Svenska")}, - "sw_KE": {"name_en": u("Swahili"), "name": u("Kiswahili")}, - "ta_IN": {"name_en": u("Tamil"), "name": u("\u0ba4\u0bae\u0bbf\u0bb4\u0bcd")}, - "te_IN": {"name_en": u("Telugu"), "name": u("\u0c24\u0c46\u0c32\u0c41\u0c17\u0c41")}, - "th_TH": {"name_en": u("Thai"), "name": u("\u0e20\u0e32\u0e29\u0e32\u0e44\u0e17\u0e22")}, - "tl_PH": {"name_en": u("Filipino"), "name": u("Filipino")}, - "tr_TR": {"name_en": u("Turkish"), "name": u("T\xfcrk\xe7e")}, - "uk_UA": {"name_en": u("Ukraini "), "name": u("\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430")}, - "vi_VN": {"name_en": u("Vietnamese"), "name": u("Ti\u1ebfng Vi\u1ec7t")}, - "zh_CN": {"name_en": u("Chinese (Simplified)"), "name": u("\u4e2d\u6587(\u7b80\u4f53)")}, - "zh_TW": {"name_en": u("Chinese (Traditional)"), "name": u("\u4e2d\u6587(\u7e41\u9ad4)")}, -} diff --git a/rosbridge_server/src/tornado/log.py b/rosbridge_server/src/tornado/log.py deleted file mode 100644 index 374071d41..000000000 --- a/rosbridge_server/src/tornado/log.py +++ /dev/null @@ -1,230 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2012 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Logging support for Tornado. - -Tornado uses three logger streams: - -* ``tornado.access``: Per-request logging for Tornado's HTTP servers (and - potentially other servers in the future) -* ``tornado.application``: Logging of errors from application code (i.e. - uncaught exceptions from callbacks) -* ``tornado.general``: General-purpose logging, including any errors - or warnings from Tornado itself. - -These streams may be configured independently using the standard library's -`logging` module. For example, you may wish to send ``tornado.access`` logs -to a separate file for analysis. -""" -from __future__ import absolute_import, division, print_function, with_statement - -import logging -import logging.handlers -import sys - -from tornado.escape import _unicode -from tornado.util import unicode_type, basestring_type - -try: - import curses -except ImportError: - curses = None - -# Logger objects for internal tornado use -access_log = logging.getLogger("tornado.access") -app_log = logging.getLogger("tornado.application") -gen_log = logging.getLogger("tornado.general") - - -def _stderr_supports_color(): - color = False - if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty(): - try: - curses.setupterm() - if curses.tigetnum("colors") > 0: - color = True - except Exception: - pass - return color - - -def _safe_unicode(s): - try: - return _unicode(s) - except UnicodeDecodeError: - return repr(s) - - -class LogFormatter(logging.Formatter): - """Log formatter used in Tornado. - - Key features of this formatter are: - - * Color support when logging to a terminal that supports it. - * Timestamps on every log line. - * Robust against str/bytes encoding problems. - - This formatter is enabled automatically by - `tornado.options.parse_command_line` (unless ``--logging=none`` is - used). - """ - DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s' - DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S' - DEFAULT_COLORS = { - logging.DEBUG: 4, # Blue - logging.INFO: 2, # Green - logging.WARNING: 3, # Yellow - logging.ERROR: 1, # Red - } - - def __init__(self, color=True, fmt=DEFAULT_FORMAT, - datefmt=DEFAULT_DATE_FORMAT, colors=DEFAULT_COLORS): - r""" - :arg bool color: Enables color support. - :arg string fmt: Log message format. - It will be applied to the attributes dict of log records. The - text between ``%(color)s`` and ``%(end_color)s`` will be colored - depending on the level if color support is on. - :arg dict colors: color mappings from logging level to terminal color - code - :arg string datefmt: Datetime format. - Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``. - - .. versionchanged:: 3.2 - - Added ``fmt`` and ``datefmt`` arguments. - """ - logging.Formatter.__init__(self, datefmt=datefmt) - self._fmt = fmt - - self._colors = {} - if color and _stderr_supports_color(): - # The curses module has some str/bytes confusion in - # python3. Until version 3.2.3, most methods return - # bytes, but only accept strings. In addition, we want to - # output these strings with the logging module, which - # works with unicode strings. The explicit calls to - # unicode() below are harmless in python2 but will do the - # right conversion in python 3. - fg_color = (curses.tigetstr("setaf") or - curses.tigetstr("setf") or "") - if (3, 0) < sys.version_info < (3, 2, 3): - fg_color = unicode_type(fg_color, "ascii") - - for levelno, code in colors.items(): - self._colors[levelno] = unicode_type(curses.tparm(fg_color, code), "ascii") - self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii") - else: - self._normal = '' - - def format(self, record): - try: - message = record.getMessage() - assert isinstance(message, basestring_type) # guaranteed by logging - # Encoding notes: The logging module prefers to work with character - # strings, but only enforces that log messages are instances of - # basestring. In python 2, non-ascii bytestrings will make - # their way through the logging framework until they blow up with - # an unhelpful decoding error (with this formatter it happens - # when we attach the prefix, but there are other opportunities for - # exceptions further along in the framework). - # - # If a byte string makes it this far, convert it to unicode to - # ensure it will make it out to the logs. Use repr() as a fallback - # to ensure that all byte strings can be converted successfully, - # but don't do it by default so we don't add extra quotes to ascii - # bytestrings. This is a bit of a hacky place to do this, but - # it's worth it since the encoding errors that would otherwise - # result are so useless (and tornado is fond of using utf8-encoded - # byte strings whereever possible). - record.message = _safe_unicode(message) - except Exception as e: - record.message = "Bad message (%r): %r" % (e, record.__dict__) - - record.asctime = self.formatTime(record, self.datefmt) - - if record.levelno in self._colors: - record.color = self._colors[record.levelno] - record.end_color = self._normal - else: - record.color = record.end_color = '' - - formatted = self._fmt % record.__dict__ - - if record.exc_info: - if not record.exc_text: - record.exc_text = self.formatException(record.exc_info) - if record.exc_text: - # exc_text contains multiple lines. We need to _safe_unicode - # each line separately so that non-utf8 bytes don't cause - # all the newlines to turn into '\n'. - lines = [formatted.rstrip()] - lines.extend(_safe_unicode(ln) for ln in record.exc_text.split('\n')) - formatted = '\n'.join(lines) - return formatted.replace("\n", "\n ") - - -def enable_pretty_logging(options=None, logger=None): - """Turns on formatted logging output as configured. - - This is called automatically by `tornado.options.parse_command_line` - and `tornado.options.parse_config_file`. - """ - if options is None: - from tornado.options import options - if options.logging is None or options.logging.lower() == 'none': - return - if logger is None: - logger = logging.getLogger() - logger.setLevel(getattr(logging, options.logging.upper())) - if options.log_file_prefix: - channel = logging.handlers.RotatingFileHandler( - filename=options.log_file_prefix, - maxBytes=options.log_file_max_size, - backupCount=options.log_file_num_backups) - channel.setFormatter(LogFormatter(color=False)) - logger.addHandler(channel) - - if (options.log_to_stderr or - (options.log_to_stderr is None and not logger.handlers)): - # Set up color if we are in a tty and curses is installed - channel = logging.StreamHandler() - channel.setFormatter(LogFormatter()) - logger.addHandler(channel) - - -def define_logging_options(options=None): - if options is None: - # late import to prevent cycle - from tornado.options import options - options.define("logging", default="info", - help=("Set the Python log level. If 'none', tornado won't touch the " - "logging configuration."), - metavar="debug|info|warning|error|none") - options.define("log_to_stderr", type=bool, default=None, - help=("Send log output to stderr (colorized if possible). " - "By default use stderr if --log_file_prefix is not set and " - "no other logging is configured.")) - options.define("log_file_prefix", type=str, default=None, metavar="PATH", - help=("Path prefix for log files. " - "Note that if you are running multiple tornado processes, " - "log_file_prefix must be different for each of them (e.g. " - "include the port number)")) - options.define("log_file_max_size", type=int, default=100 * 1000 * 1000, - help="max size of log files before rollover") - options.define("log_file_num_backups", type=int, default=10, - help="number of log files to keep") - - options.add_parse_callback(enable_pretty_logging) diff --git a/rosbridge_server/src/tornado/netutil.py b/rosbridge_server/src/tornado/netutil.py deleted file mode 100644 index f147c974d..000000000 --- a/rosbridge_server/src/tornado/netutil.py +++ /dev/null @@ -1,464 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2011 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Miscellaneous network utility code.""" - -from __future__ import absolute_import, division, print_function, with_statement - -import errno -import os -import platform -import socket -import stat - -from tornado.concurrent import dummy_executor, run_on_executor -from tornado.ioloop import IOLoop -from tornado.platform.auto import set_close_exec -from tornado.util import u, Configurable, errno_from_exception - -try: - import ssl -except ImportError: - # ssl is not available on Google App Engine - ssl = None - -try: - xrange # py2 -except NameError: - xrange = range # py3 - -if hasattr(ssl, 'match_hostname') and hasattr(ssl, 'CertificateError'): # python 3.2+ - ssl_match_hostname = ssl.match_hostname - SSLCertificateError = ssl.CertificateError -elif ssl is None: - ssl_match_hostname = SSLCertificateError = None -else: - import backports.ssl_match_hostname - ssl_match_hostname = backports.ssl_match_hostname.match_hostname - SSLCertificateError = backports.ssl_match_hostname.CertificateError - -# ThreadedResolver runs getaddrinfo on a thread. If the hostname is unicode, -# getaddrinfo attempts to import encodings.idna. If this is done at -# module-import time, the import lock is already held by the main thread, -# leading to deadlock. Avoid it by caching the idna encoder on the main -# thread now. -u('foo').encode('idna') - -# These errnos indicate that a non-blocking operation must be retried -# at a later time. On most platforms they're the same value, but on -# some they differ. -_ERRNO_WOULDBLOCK = (errno.EWOULDBLOCK, errno.EAGAIN) - -if hasattr(errno, "WSAEWOULDBLOCK"): - _ERRNO_WOULDBLOCK += (errno.WSAEWOULDBLOCK,) - -# Default backlog used when calling sock.listen() -_DEFAULT_BACKLOG = 128 - -def bind_sockets(port, address=None, family=socket.AF_UNSPEC, - backlog=_DEFAULT_BACKLOG, flags=None): - """Creates listening sockets bound to the given port and address. - - Returns a list of socket objects (multiple sockets are returned if - the given address maps to multiple IP addresses, which is most common - for mixed IPv4 and IPv6 use). - - Address may be either an IP address or hostname. If it's a hostname, - the server will listen on all IP addresses associated with the - name. Address may be an empty string or None to listen on all - available interfaces. Family may be set to either `socket.AF_INET` - or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise - both will be used if available. - - The ``backlog`` argument has the same meaning as for - `socket.listen() `. - - ``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like - ``socket.AI_PASSIVE | socket.AI_NUMERICHOST``. - """ - sockets = [] - if address == "": - address = None - if not socket.has_ipv6 and family == socket.AF_UNSPEC: - # Python can be compiled with --disable-ipv6, which causes - # operations on AF_INET6 sockets to fail, but does not - # automatically exclude those results from getaddrinfo - # results. - # http://bugs.python.org/issue16208 - family = socket.AF_INET - if flags is None: - flags = socket.AI_PASSIVE - bound_port = None - for res in set(socket.getaddrinfo(address, port, family, socket.SOCK_STREAM, - 0, flags)): - af, socktype, proto, canonname, sockaddr = res - if (platform.system() == 'Darwin' and address == 'localhost' and - af == socket.AF_INET6 and sockaddr[3] != 0): - # Mac OS X includes a link-local address fe80::1%lo0 in the - # getaddrinfo results for 'localhost'. However, the firewall - # doesn't understand that this is a local address and will - # prompt for access (often repeatedly, due to an apparent - # bug in its ability to remember granting access to an - # application). Skip these addresses. - continue - try: - sock = socket.socket(af, socktype, proto) - except socket.error as e: - if errno_from_exception(e) == errno.EAFNOSUPPORT: - continue - raise - set_close_exec(sock.fileno()) - if os.name != 'nt': - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - if af == socket.AF_INET6: - # On linux, ipv6 sockets accept ipv4 too by default, - # but this makes it impossible to bind to both - # 0.0.0.0 in ipv4 and :: in ipv6. On other systems, - # separate sockets *must* be used to listen for both ipv4 - # and ipv6. For consistency, always disable ipv4 on our - # ipv6 sockets and use a separate ipv4 socket when needed. - # - # Python 2.x on windows doesn't have IPPROTO_IPV6. - if hasattr(socket, "IPPROTO_IPV6"): - sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1) - - # automatic port allocation with port=None - # should bind on the same port on IPv4 and IPv6 - host, requested_port = sockaddr[:2] - if requested_port == 0 and bound_port is not None: - sockaddr = tuple([host, bound_port] + list(sockaddr[2:])) - - sock.setblocking(0) - sock.bind(sockaddr) - bound_port = sock.getsockname()[1] - sock.listen(backlog) - sockets.append(sock) - return sockets - -if hasattr(socket, 'AF_UNIX'): - def bind_unix_socket(file, mode=0o600, backlog=_DEFAULT_BACKLOG): - """Creates a listening unix socket. - - If a socket with the given name already exists, it will be deleted. - If any other file with that name exists, an exception will be - raised. - - Returns a socket object (not a list of socket objects like - `bind_sockets`) - """ - sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - set_close_exec(sock.fileno()) - sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) - sock.setblocking(0) - try: - st = os.stat(file) - except OSError as err: - if errno_from_exception(err) != errno.ENOENT: - raise - else: - if stat.S_ISSOCK(st.st_mode): - os.remove(file) - else: - raise ValueError("File %s exists and is not a socket", file) - sock.bind(file) - os.chmod(file, mode) - sock.listen(backlog) - return sock - - -def add_accept_handler(sock, callback, io_loop=None): - """Adds an `.IOLoop` event handler to accept new connections on ``sock``. - - When a connection is accepted, ``callback(connection, address)`` will - be run (``connection`` is a socket object, and ``address`` is the - address of the other end of the connection). Note that this signature - is different from the ``callback(fd, events)`` signature used for - `.IOLoop` handlers. - """ - if io_loop is None: - io_loop = IOLoop.current() - - def accept_handler(fd, events): - # More connections may come in while we're handling callbacks; - # to prevent starvation of other tasks we must limit the number - # of connections we accept at a time. Ideally we would accept - # up to the number of connections that were waiting when we - # entered this method, but this information is not available - # (and rearranging this method to call accept() as many times - # as possible before running any callbacks would have adverse - # effects on load balancing in multiprocess configurations). - # Instead, we use the (default) listen backlog as a rough - # heuristic for the number of connections we can reasonably - # accept at once. - for i in xrange(_DEFAULT_BACKLOG): - try: - connection, address = sock.accept() - except socket.error as e: - # _ERRNO_WOULDBLOCK indicate we have accepted every - # connection that is available. - if errno_from_exception(e) in _ERRNO_WOULDBLOCK: - return - # ECONNABORTED indicates that there was a connection - # but it was closed while still in the accept queue. - # (observed on FreeBSD). - if errno_from_exception(e) == errno.ECONNABORTED: - continue - raise - callback(connection, address) - io_loop.add_handler(sock, accept_handler, IOLoop.READ) - - -def is_valid_ip(ip): - """Returns true if the given string is a well-formed IP address. - - Supports IPv4 and IPv6. - """ - if not ip or '\x00' in ip: - # getaddrinfo resolves empty strings to localhost, and truncates - # on zero bytes. - return False - try: - res = socket.getaddrinfo(ip, 0, socket.AF_UNSPEC, - socket.SOCK_STREAM, - 0, socket.AI_NUMERICHOST) - return bool(res) - except socket.gaierror as e: - if e.args[0] == socket.EAI_NONAME: - return False - raise - return True - - -class Resolver(Configurable): - """Configurable asynchronous DNS resolver interface. - - By default, a blocking implementation is used (which simply calls - `socket.getaddrinfo`). An alternative implementation can be - chosen with the `Resolver.configure <.Configurable.configure>` - class method:: - - Resolver.configure('tornado.netutil.ThreadedResolver') - - The implementations of this interface included with Tornado are - - * `tornado.netutil.BlockingResolver` - * `tornado.netutil.ThreadedResolver` - * `tornado.netutil.OverrideResolver` - * `tornado.platform.twisted.TwistedResolver` - * `tornado.platform.caresresolver.CaresResolver` - """ - @classmethod - def configurable_base(cls): - return Resolver - - @classmethod - def configurable_default(cls): - return BlockingResolver - - def resolve(self, host, port, family=socket.AF_UNSPEC, callback=None): - """Resolves an address. - - The ``host`` argument is a string which may be a hostname or a - literal IP address. - - Returns a `.Future` whose result is a list of (family, - address) pairs, where address is a tuple suitable to pass to - `socket.connect ` (i.e. a ``(host, - port)`` pair for IPv4; additional fields may be present for - IPv6). If a ``callback`` is passed, it will be run with the - result as an argument when it is complete. - """ - raise NotImplementedError() - - def close(self): - """Closes the `Resolver`, freeing any resources used. - - .. versionadded:: 3.1 - - """ - pass - - -class ExecutorResolver(Resolver): - """Resolver implementation using a `concurrent.futures.Executor`. - - Use this instead of `ThreadedResolver` when you require additional - control over the executor being used. - - The executor will be shut down when the resolver is closed unless - ``close_resolver=False``; use this if you want to reuse the same - executor elsewhere. - """ - def initialize(self, io_loop=None, executor=None, close_executor=True): - self.io_loop = io_loop or IOLoop.current() - if executor is not None: - self.executor = executor - self.close_executor = close_executor - else: - self.executor = dummy_executor - self.close_executor = False - - def close(self): - if self.close_executor: - self.executor.shutdown() - self.executor = None - - @run_on_executor - def resolve(self, host, port, family=socket.AF_UNSPEC): - # On Solaris, getaddrinfo fails if the given port is not found - # in /etc/services and no socket type is given, so we must pass - # one here. The socket type used here doesn't seem to actually - # matter (we discard the one we get back in the results), - # so the addresses we return should still be usable with SOCK_DGRAM. - addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM) - results = [] - for family, socktype, proto, canonname, address in addrinfo: - results.append((family, address)) - return results - - -class BlockingResolver(ExecutorResolver): - """Default `Resolver` implementation, using `socket.getaddrinfo`. - - The `.IOLoop` will be blocked during the resolution, although the - callback will not be run until the next `.IOLoop` iteration. - """ - def initialize(self, io_loop=None): - super(BlockingResolver, self).initialize(io_loop=io_loop) - - -class ThreadedResolver(ExecutorResolver): - """Multithreaded non-blocking `Resolver` implementation. - - Requires the `concurrent.futures` package to be installed - (available in the standard library since Python 3.2, - installable with ``pip install futures`` in older versions). - - The thread pool size can be configured with:: - - Resolver.configure('tornado.netutil.ThreadedResolver', - num_threads=10) - - .. versionchanged:: 3.1 - All ``ThreadedResolvers`` share a single thread pool, whose - size is set by the first one to be created. - """ - _threadpool = None - _threadpool_pid = None - - def initialize(self, io_loop=None, num_threads=10): - threadpool = ThreadedResolver._create_threadpool(num_threads) - super(ThreadedResolver, self).initialize( - io_loop=io_loop, executor=threadpool, close_executor=False) - - @classmethod - def _create_threadpool(cls, num_threads): - pid = os.getpid() - if cls._threadpool_pid != pid: - # Threads cannot survive after a fork, so if our pid isn't what it - # was when we created the pool then delete it. - cls._threadpool = None - if cls._threadpool is None: - from concurrent.futures import ThreadPoolExecutor - cls._threadpool = ThreadPoolExecutor(num_threads) - cls._threadpool_pid = pid - return cls._threadpool - - -class OverrideResolver(Resolver): - """Wraps a resolver with a mapping of overrides. - - This can be used to make local DNS changes (e.g. for testing) - without modifying system-wide settings. - - The mapping can contain either host strings or host-port pairs. - """ - def initialize(self, resolver, mapping): - self.resolver = resolver - self.mapping = mapping - - def close(self): - self.resolver.close() - - def resolve(self, host, port, *args, **kwargs): - if (host, port) in self.mapping: - host, port = self.mapping[(host, port)] - elif host in self.mapping: - host = self.mapping[host] - return self.resolver.resolve(host, port, *args, **kwargs) - - -# These are the keyword arguments to ssl.wrap_socket that must be translated -# to their SSLContext equivalents (the other arguments are still passed -# to SSLContext.wrap_socket). -_SSL_CONTEXT_KEYWORDS = frozenset(['ssl_version', 'certfile', 'keyfile', - 'cert_reqs', 'ca_certs', 'ciphers']) - - -def ssl_options_to_context(ssl_options): - """Try to convert an ``ssl_options`` dictionary to an - `~ssl.SSLContext` object. - - The ``ssl_options`` dictionary contains keywords to be passed to - `ssl.wrap_socket`. In Python 3.2+, `ssl.SSLContext` objects can - be used instead. This function converts the dict form to its - `~ssl.SSLContext` equivalent, and may be used when a component which - accepts both forms needs to upgrade to the `~ssl.SSLContext` version - to use features like SNI or NPN. - """ - if isinstance(ssl_options, dict): - assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options - if (not hasattr(ssl, 'SSLContext') or - isinstance(ssl_options, ssl.SSLContext)): - return ssl_options - context = ssl.SSLContext( - ssl_options.get('ssl_version', ssl.PROTOCOL_SSLv23)) - if 'certfile' in ssl_options: - context.load_cert_chain(ssl_options['certfile'], ssl_options.get('keyfile', None)) - if 'cert_reqs' in ssl_options: - context.verify_mode = ssl_options['cert_reqs'] - if 'ca_certs' in ssl_options: - context.load_verify_locations(ssl_options['ca_certs']) - if 'ciphers' in ssl_options: - context.set_ciphers(ssl_options['ciphers']) - if hasattr(ssl, 'OP_NO_COMPRESSION'): - # Disable TLS compression to avoid CRIME and related attacks. - # This constant wasn't added until python 3.3. - context.options |= ssl.OP_NO_COMPRESSION - return context - - -def ssl_wrap_socket(socket, ssl_options, server_hostname=None, **kwargs): - """Returns an ``ssl.SSLSocket`` wrapping the given socket. - - ``ssl_options`` may be either a dictionary (as accepted by - `ssl_options_to_context`) or an `ssl.SSLContext` object. - Additional keyword arguments are passed to ``wrap_socket`` - (either the `~ssl.SSLContext` method or the `ssl` module function - as appropriate). - """ - context = ssl_options_to_context(ssl_options) - if hasattr(ssl, 'SSLContext') and isinstance(context, ssl.SSLContext): - if server_hostname is not None and getattr(ssl, 'HAS_SNI'): - # Python doesn't have server-side SNI support so we can't - # really unittest this, but it can be manually tested with - # python3.2 -m tornado.httpclient https://sni.velox.ch - return context.wrap_socket(socket, server_hostname=server_hostname, - **kwargs) - else: - return context.wrap_socket(socket, **kwargs) - else: - return ssl.wrap_socket(socket, **dict(context, **kwargs)) diff --git a/rosbridge_server/src/tornado/options.py b/rosbridge_server/src/tornado/options.py deleted file mode 100644 index fa9c269ea..000000000 --- a/rosbridge_server/src/tornado/options.py +++ /dev/null @@ -1,553 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A command line parsing module that lets modules define their own options. - -Each module defines its own options which are added to the global -option namespace, e.g.:: - - from tornado.options import define, options - - define("mysql_host", default="127.0.0.1:3306", help="Main user DB") - define("memcache_hosts", default="127.0.0.1:11011", multiple=True, - help="Main user memcache servers") - - def connect(): - db = database.Connection(options.mysql_host) - ... - -The ``main()`` method of your application does not need to be aware of all of -the options used throughout your program; they are all automatically loaded -when the modules are loaded. However, all modules that define options -must have been imported before the command line is parsed. - -Your ``main()`` method can parse the command line or parse a config file with -either:: - - tornado.options.parse_command_line() - # or - tornado.options.parse_config_file("/etc/server.conf") - -Command line formats are what you would expect (``--myoption=myvalue``). -Config files are just Python files. Global names become options, e.g.:: - - myoption = "myvalue" - myotheroption = "myothervalue" - -We support `datetimes `, `timedeltas -`, ints, and floats (just pass a ``type`` kwarg to -`define`). We also accept multi-value options. See the documentation for -`define()` below. - -`tornado.options.options` is a singleton instance of `OptionParser`, and -the top-level functions in this module (`define`, `parse_command_line`, etc) -simply call methods on it. You may create additional `OptionParser` -instances to define isolated sets of options, such as for subcommands. - -.. note:: - - By default, several options are defined that will configure the - standard `logging` module when `parse_command_line` or `parse_config_file` - are called. If you want Tornado to leave the logging configuration - alone so you can manage it yourself, either pass ``--logging=none`` - on the command line or do the following to disable it in code:: - - from tornado.options import options, parse_command_line - options.logging = None - parse_command_line() -""" - -from __future__ import absolute_import, division, print_function, with_statement - -import datetime -import numbers -import re -import sys -import os -import textwrap - -from tornado.escape import _unicode -from tornado.log import define_logging_options -from tornado import stack_context -from tornado.util import basestring_type, exec_in - - -class Error(Exception): - """Exception raised by errors in the options module.""" - pass - - -class OptionParser(object): - """A collection of options, a dictionary with object-like access. - - Normally accessed via static functions in the `tornado.options` module, - which reference a global instance. - """ - def __init__(self): - # we have to use self.__dict__ because we override setattr. - self.__dict__['_options'] = {} - self.__dict__['_parse_callbacks'] = [] - self.define("help", type=bool, help="show this help information", - callback=self._help_callback) - - def __getattr__(self, name): - if isinstance(self._options.get(name), _Option): - return self._options[name].value() - raise AttributeError("Unrecognized option %r" % name) - - def __setattr__(self, name, value): - if isinstance(self._options.get(name), _Option): - return self._options[name].set(value) - raise AttributeError("Unrecognized option %r" % name) - - def __iter__(self): - return iter(self._options) - - def __getitem__(self, item): - return self._options[item].value() - - def items(self): - """A sequence of (name, value) pairs. - - .. versionadded:: 3.1 - """ - return [(name, opt.value()) for name, opt in self._options.items()] - - def groups(self): - """The set of option-groups created by ``define``. - - .. versionadded:: 3.1 - """ - return set(opt.group_name for opt in self._options.values()) - - def group_dict(self, group): - """The names and values of options in a group. - - Useful for copying options into Application settings:: - - from tornado.options import define, parse_command_line, options - - define('template_path', group='application') - define('static_path', group='application') - - parse_command_line() - - application = Application( - handlers, **options.group_dict('application')) - - .. versionadded:: 3.1 - """ - return dict( - (name, opt.value()) for name, opt in self._options.items() - if not group or group == opt.group_name) - - def as_dict(self): - """The names and values of all options. - - .. versionadded:: 3.1 - """ - return dict( - (name, opt.value()) for name, opt in self._options.items()) - - def define(self, name, default=None, type=None, help=None, metavar=None, - multiple=False, group=None, callback=None): - """Defines a new command line option. - - If ``type`` is given (one of str, float, int, datetime, or timedelta) - or can be inferred from the ``default``, we parse the command line - arguments based on the given type. If ``multiple`` is True, we accept - comma-separated values, and the option value is always a list. - - For multi-value integers, we also accept the syntax ``x:y``, which - turns into ``range(x, y)`` - very useful for long integer ranges. - - ``help`` and ``metavar`` are used to construct the - automatically generated command line help string. The help - message is formatted like:: - - --name=METAVAR help string - - ``group`` is used to group the defined options in logical - groups. By default, command line options are grouped by the - file in which they are defined. - - Command line option names must be unique globally. They can be parsed - from the command line with `parse_command_line` or parsed from a - config file with `parse_config_file`. - - If a ``callback`` is given, it will be run with the new value whenever - the option is changed. This can be used to combine command-line - and file-based options:: - - define("config", type=str, help="path to config file", - callback=lambda path: parse_config_file(path, final=False)) - - With this definition, options in the file specified by ``--config`` will - override options set earlier on the command line, but can be overridden - by later flags. - """ - if name in self._options: - raise Error("Option %r already defined in %s" % - (name, self._options[name].file_name)) - frame = sys._getframe(0) - options_file = frame.f_code.co_filename - file_name = frame.f_back.f_code.co_filename - if file_name == options_file: - file_name = "" - if type is None: - if not multiple and default is not None: - type = default.__class__ - else: - type = str - if group: - group_name = group - else: - group_name = file_name - self._options[name] = _Option(name, file_name=file_name, - default=default, type=type, help=help, - metavar=metavar, multiple=multiple, - group_name=group_name, - callback=callback) - - def parse_command_line(self, args=None, final=True): - """Parses all options given on the command line (defaults to - `sys.argv`). - - Note that ``args[0]`` is ignored since it is the program name - in `sys.argv`. - - We return a list of all arguments that are not parsed as options. - - If ``final`` is ``False``, parse callbacks will not be run. - This is useful for applications that wish to combine configurations - from multiple sources. - """ - if args is None: - args = sys.argv - remaining = [] - for i in range(1, len(args)): - # All things after the last option are command line arguments - if not args[i].startswith("-"): - remaining = args[i:] - break - if args[i] == "--": - remaining = args[i + 1:] - break - arg = args[i].lstrip("-") - name, equals, value = arg.partition("=") - name = name.replace('-', '_') - if not name in self._options: - self.print_help() - raise Error('Unrecognized command line option: %r' % name) - option = self._options[name] - if not equals: - if option.type == bool: - value = "true" - else: - raise Error('Option %r requires a value' % name) - option.parse(value) - - if final: - self.run_parse_callbacks() - - return remaining - - def parse_config_file(self, path, final=True): - """Parses and loads the Python config file at the given path. - - If ``final`` is ``False``, parse callbacks will not be run. - This is useful for applications that wish to combine configurations - from multiple sources. - """ - config = {} - with open(path) as f: - exec_in(f.read(), config, config) - for name in config: - if name in self._options: - self._options[name].set(config[name]) - - if final: - self.run_parse_callbacks() - - def print_help(self, file=None): - """Prints all the command line options to stderr (or another file).""" - if file is None: - file = sys.stderr - print("Usage: %s [OPTIONS]" % sys.argv[0], file=file) - print("\nOptions:\n", file=file) - by_group = {} - for option in self._options.values(): - by_group.setdefault(option.group_name, []).append(option) - - for filename, o in sorted(by_group.items()): - if filename: - print("\n%s options:\n" % os.path.normpath(filename), file=file) - o.sort(key=lambda option: option.name) - for option in o: - prefix = option.name - if option.metavar: - prefix += "=" + option.metavar - description = option.help or "" - if option.default is not None and option.default != '': - description += " (default %s)" % option.default - lines = textwrap.wrap(description, 79 - 35) - if len(prefix) > 30 or len(lines) == 0: - lines.insert(0, '') - print(" --%-30s %s" % (prefix, lines[0]), file=file) - for line in lines[1:]: - print("%-34s %s" % (' ', line), file=file) - print(file=file) - - def _help_callback(self, value): - if value: - self.print_help() - sys.exit(0) - - def add_parse_callback(self, callback): - """Adds a parse callback, to be invoked when option parsing is done.""" - self._parse_callbacks.append(stack_context.wrap(callback)) - - def run_parse_callbacks(self): - for callback in self._parse_callbacks: - callback() - - def mockable(self): - """Returns a wrapper around self that is compatible with - `mock.patch `. - - The `mock.patch ` function (included in - the standard library `unittest.mock` package since Python 3.3, - or in the third-party ``mock`` package for older versions of - Python) is incompatible with objects like ``options`` that - override ``__getattr__`` and ``__setattr__``. This function - returns an object that can be used with `mock.patch.object - ` to modify option values:: - - with mock.patch.object(options.mockable(), 'name', value): - assert options.name == value - """ - return _Mockable(self) - - -class _Mockable(object): - """`mock.patch` compatible wrapper for `OptionParser`. - - As of ``mock`` version 1.0.1, when an object uses ``__getattr__`` - hooks instead of ``__dict__``, ``patch.__exit__`` tries to delete - the attribute it set instead of setting a new one (assuming that - the object does not catpure ``__setattr__``, so the patch - created a new attribute in ``__dict__``). - - _Mockable's getattr and setattr pass through to the underlying - OptionParser, and delattr undoes the effect of a previous setattr. - """ - def __init__(self, options): - # Modify __dict__ directly to bypass __setattr__ - self.__dict__['_options'] = options - self.__dict__['_originals'] = {} - - def __getattr__(self, name): - return getattr(self._options, name) - - def __setattr__(self, name, value): - assert name not in self._originals, "don't reuse mockable objects" - self._originals[name] = getattr(self._options, name) - setattr(self._options, name, value) - - def __delattr__(self, name): - setattr(self._options, name, self._originals.pop(name)) - - -class _Option(object): - UNSET = object() - - def __init__(self, name, default=None, type=basestring_type, help=None, - metavar=None, multiple=False, file_name=None, group_name=None, - callback=None): - if default is None and multiple: - default = [] - self.name = name - self.type = type - self.help = help - self.metavar = metavar - self.multiple = multiple - self.file_name = file_name - self.group_name = group_name - self.callback = callback - self.default = default - self._value = _Option.UNSET - - def value(self): - return self.default if self._value is _Option.UNSET else self._value - - def parse(self, value): - _parse = { - datetime.datetime: self._parse_datetime, - datetime.timedelta: self._parse_timedelta, - bool: self._parse_bool, - basestring_type: self._parse_string, - }.get(self.type, self.type) - if self.multiple: - self._value = [] - for part in value.split(","): - if issubclass(self.type, numbers.Integral): - # allow ranges of the form X:Y (inclusive at both ends) - lo, _, hi = part.partition(":") - lo = _parse(lo) - hi = _parse(hi) if hi else lo - self._value.extend(range(lo, hi + 1)) - else: - self._value.append(_parse(part)) - else: - self._value = _parse(value) - if self.callback is not None: - self.callback(self._value) - return self.value() - - def set(self, value): - if self.multiple: - if not isinstance(value, list): - raise Error("Option %r is required to be a list of %s" % - (self.name, self.type.__name__)) - for item in value: - if item is not None and not isinstance(item, self.type): - raise Error("Option %r is required to be a list of %s" % - (self.name, self.type.__name__)) - else: - if value is not None and not isinstance(value, self.type): - raise Error("Option %r is required to be a %s (%s given)" % - (self.name, self.type.__name__, type(value))) - self._value = value - if self.callback is not None: - self.callback(self._value) - - # Supported date/time formats in our options - _DATETIME_FORMATS = [ - "%a %b %d %H:%M:%S %Y", - "%Y-%m-%d %H:%M:%S", - "%Y-%m-%d %H:%M", - "%Y-%m-%dT%H:%M", - "%Y%m%d %H:%M:%S", - "%Y%m%d %H:%M", - "%Y-%m-%d", - "%Y%m%d", - "%H:%M:%S", - "%H:%M", - ] - - def _parse_datetime(self, value): - for format in self._DATETIME_FORMATS: - try: - return datetime.datetime.strptime(value, format) - except ValueError: - pass - raise Error('Unrecognized date/time format: %r' % value) - - _TIMEDELTA_ABBREVS = [ - ('hours', ['h']), - ('minutes', ['m', 'min']), - ('seconds', ['s', 'sec']), - ('milliseconds', ['ms']), - ('microseconds', ['us']), - ('days', ['d']), - ('weeks', ['w']), - ] - - _TIMEDELTA_ABBREV_DICT = dict( - (abbrev, full) for full, abbrevs in _TIMEDELTA_ABBREVS - for abbrev in abbrevs) - - _FLOAT_PATTERN = r'[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?' - - _TIMEDELTA_PATTERN = re.compile( - r'\s*(%s)\s*(\w*)\s*' % _FLOAT_PATTERN, re.IGNORECASE) - - def _parse_timedelta(self, value): - try: - sum = datetime.timedelta() - start = 0 - while start < len(value): - m = self._TIMEDELTA_PATTERN.match(value, start) - if not m: - raise Exception() - num = float(m.group(1)) - units = m.group(2) or 'seconds' - units = self._TIMEDELTA_ABBREV_DICT.get(units, units) - sum += datetime.timedelta(**{units: num}) - start = m.end() - return sum - except Exception: - raise - - def _parse_bool(self, value): - return value.lower() not in ("false", "0", "f") - - def _parse_string(self, value): - return _unicode(value) - - -options = OptionParser() -"""Global options object. - -All defined options are available as attributes on this object. -""" - - -def define(name, default=None, type=None, help=None, metavar=None, - multiple=False, group=None, callback=None): - """Defines an option in the global namespace. - - See `OptionParser.define`. - """ - return options.define(name, default=default, type=type, help=help, - metavar=metavar, multiple=multiple, group=group, - callback=callback) - - -def parse_command_line(args=None, final=True): - """Parses global options from the command line. - - See `OptionParser.parse_command_line`. - """ - return options.parse_command_line(args, final=final) - - -def parse_config_file(path, final=True): - """Parses global options from a config file. - - See `OptionParser.parse_config_file`. - """ - return options.parse_config_file(path, final=final) - - -def print_help(file=None): - """Prints all the command line options to stderr (or another file). - - See `OptionParser.print_help`. - """ - return options.print_help(file) - - -def add_parse_callback(callback): - """Adds a parse callback, to be invoked when option parsing is done. - - See `OptionParser.add_parse_callback` - """ - options.add_parse_callback(callback) - - -# Default options -define_logging_options(options) diff --git a/rosbridge_server/src/tornado/platform/__init__.py b/rosbridge_server/src/tornado/platform/__init__.py deleted file mode 100644 index e69de29bb..000000000 diff --git a/rosbridge_server/src/tornado/platform/asyncio.py b/rosbridge_server/src/tornado/platform/asyncio.py deleted file mode 100644 index b40f0141a..000000000 --- a/rosbridge_server/src/tornado/platform/asyncio.py +++ /dev/null @@ -1,142 +0,0 @@ -"""Bridges between the `asyncio` module and Tornado IOLoop. - -This is a work in progress and interfaces are subject to change. - -To test: -python3.4 -m tornado.test.runtests --ioloop=tornado.platform.asyncio.AsyncIOLoop -python3.4 -m tornado.test.runtests --ioloop=tornado.platform.asyncio.AsyncIOMainLoop -(the tests log a few warnings with AsyncIOMainLoop because they leave some -unfinished callbacks on the event loop that fail when it resumes) -""" - -from __future__ import absolute_import, division, print_function, with_statement -import datetime -import functools - -from tornado.ioloop import IOLoop -from tornado import stack_context -from tornado.util import timedelta_to_seconds - -try: - # Import the real asyncio module for py33+ first. Older versions of the - # trollius backport also use this name. - import asyncio -except ImportError as e: - # Asyncio itself isn't available; see if trollius is (backport to py26+). - try: - import trollius as asyncio - except ImportError: - # Re-raise the original asyncio error, not the trollius one. - raise e - -class BaseAsyncIOLoop(IOLoop): - def initialize(self, asyncio_loop, close_loop=False): - self.asyncio_loop = asyncio_loop - self.close_loop = close_loop - self.asyncio_loop.call_soon(self.make_current) - # Maps fd to (fileobj, handler function) pair (as in IOLoop.add_handler) - self.handlers = {} - # Set of fds listening for reads/writes - self.readers = set() - self.writers = set() - self.closing = False - - def close(self, all_fds=False): - self.closing = True - for fd in list(self.handlers): - fileobj, handler_func = self.handlers[fd] - self.remove_handler(fd) - if all_fds: - self.close_fd(fileobj) - if self.close_loop: - self.asyncio_loop.close() - - def add_handler(self, fd, handler, events): - fd, fileobj = self.split_fd(fd) - if fd in self.handlers: - raise ValueError("fd %s added twice" % fd) - self.handlers[fd] = (fileobj, stack_context.wrap(handler)) - if events & IOLoop.READ: - self.asyncio_loop.add_reader( - fd, self._handle_events, fd, IOLoop.READ) - self.readers.add(fd) - if events & IOLoop.WRITE: - self.asyncio_loop.add_writer( - fd, self._handle_events, fd, IOLoop.WRITE) - self.writers.add(fd) - - def update_handler(self, fd, events): - fd, fileobj = self.split_fd(fd) - if events & IOLoop.READ: - if fd not in self.readers: - self.asyncio_loop.add_reader( - fd, self._handle_events, fd, IOLoop.READ) - self.readers.add(fd) - else: - if fd in self.readers: - self.asyncio_loop.remove_reader(fd) - self.readers.remove(fd) - if events & IOLoop.WRITE: - if fd not in self.writers: - self.asyncio_loop.add_writer( - fd, self._handle_events, fd, IOLoop.WRITE) - self.writers.add(fd) - else: - if fd in self.writers: - self.asyncio_loop.remove_writer(fd) - self.writers.remove(fd) - - def remove_handler(self, fd): - fd, fileobj = self.split_fd(fd) - if fd not in self.handlers: - return - if fd in self.readers: - self.asyncio_loop.remove_reader(fd) - self.readers.remove(fd) - if fd in self.writers: - self.asyncio_loop.remove_writer(fd) - self.writers.remove(fd) - del self.handlers[fd] - - def _handle_events(self, fd, events): - fileobj, handler_func = self.handlers[fd] - handler_func(fileobj, events) - - def start(self): - self._setup_logging() - self.asyncio_loop.run_forever() - - def stop(self): - self.asyncio_loop.stop() - - def call_at(self, when, callback, *args, **kwargs): - # asyncio.call_at supports *args but not **kwargs, so bind them here. - # We do not synchronize self.time and asyncio_loop.time, so - # convert from absolute to relative. - return self.asyncio_loop.call_later( - max(0, when - self.time()), self._run_callback, - functools.partial(stack_context.wrap(callback), *args, **kwargs)) - - def remove_timeout(self, timeout): - timeout.cancel() - - def add_callback(self, callback, *args, **kwargs): - if self.closing: - raise RuntimeError("IOLoop is closing") - self.asyncio_loop.call_soon_threadsafe( - self._run_callback, - functools.partial(stack_context.wrap(callback), *args, **kwargs)) - - add_callback_from_signal = add_callback - - -class AsyncIOMainLoop(BaseAsyncIOLoop): - def initialize(self): - super(AsyncIOMainLoop, self).initialize(asyncio.get_event_loop(), - close_loop=False) - - -class AsyncIOLoop(BaseAsyncIOLoop): - def initialize(self): - super(AsyncIOLoop, self).initialize(asyncio.new_event_loop(), - close_loop=True) diff --git a/rosbridge_server/src/tornado/platform/auto.py b/rosbridge_server/src/tornado/platform/auto.py deleted file mode 100644 index ddfe06b4a..000000000 --- a/rosbridge_server/src/tornado/platform/auto.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2011 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Implementation of platform-specific functionality. - -For each function or class described in `tornado.platform.interface`, -the appropriate platform-specific implementation exists in this module. -Most code that needs access to this functionality should do e.g.:: - - from tornado.platform.auto import set_close_exec -""" - -from __future__ import absolute_import, division, print_function, with_statement - -import os - -if os.name == 'nt': - from tornado.platform.common import Waker - from tornado.platform.windows import set_close_exec -elif 'APPENGINE_RUNTIME' in os.environ: - from tornado.platform.common import Waker - def set_close_exec(fd): - pass -else: - from tornado.platform.posix import set_close_exec, Waker - -try: - # monotime monkey-patches the time module to have a monotonic function - # in versions of python before 3.3. - import monotime -except ImportError: - pass -try: - from time import monotonic as monotonic_time -except ImportError: - monotonic_time = None diff --git a/rosbridge_server/src/tornado/platform/caresresolver.py b/rosbridge_server/src/tornado/platform/caresresolver.py deleted file mode 100644 index c4648c222..000000000 --- a/rosbridge_server/src/tornado/platform/caresresolver.py +++ /dev/null @@ -1,76 +0,0 @@ -from __future__ import absolute_import, division, print_function, with_statement -import pycares -import socket - -from tornado import gen -from tornado.ioloop import IOLoop -from tornado.netutil import Resolver, is_valid_ip - - -class CaresResolver(Resolver): - """Name resolver based on the c-ares library. - - This is a non-blocking and non-threaded resolver. It may not produce - the same results as the system resolver, but can be used for non-blocking - resolution when threads cannot be used. - - c-ares fails to resolve some names when ``family`` is ``AF_UNSPEC``, - so it is only recommended for use in ``AF_INET`` (i.e. IPv4). This is - the default for ``tornado.simple_httpclient``, but other libraries - may default to ``AF_UNSPEC``. - """ - def initialize(self, io_loop=None): - self.io_loop = io_loop or IOLoop.current() - self.channel = pycares.Channel(sock_state_cb=self._sock_state_cb) - self.fds = {} - - def _sock_state_cb(self, fd, readable, writable): - state = ((IOLoop.READ if readable else 0) | - (IOLoop.WRITE if writable else 0)) - if not state: - self.io_loop.remove_handler(fd) - del self.fds[fd] - elif fd in self.fds: - self.io_loop.update_handler(fd, state) - self.fds[fd] = state - else: - self.io_loop.add_handler(fd, self._handle_events, state) - self.fds[fd] = state - - def _handle_events(self, fd, events): - read_fd = pycares.ARES_SOCKET_BAD - write_fd = pycares.ARES_SOCKET_BAD - if events & IOLoop.READ: - read_fd = fd - if events & IOLoop.WRITE: - write_fd = fd - self.channel.process_fd(read_fd, write_fd) - - @gen.coroutine - def resolve(self, host, port, family=0): - if is_valid_ip(host): - addresses = [host] - else: - # gethostbyname doesn't take callback as a kwarg - self.channel.gethostbyname(host, family, (yield gen.Callback(1))) - callback_args = yield gen.Wait(1) - assert isinstance(callback_args, gen.Arguments) - assert not callback_args.kwargs - result, error = callback_args.args - if error: - raise Exception('C-Ares returned error %s: %s while resolving %s' % - (error, pycares.errno.strerror(error), host)) - addresses = result.addresses - addrinfo = [] - for address in addresses: - if '.' in address: - address_family = socket.AF_INET - elif ':' in address: - address_family = socket.AF_INET6 - else: - address_family = socket.AF_UNSPEC - if family != socket.AF_UNSPEC and family != address_family: - raise Exception('Requested socket family %d but got %d' % - (family, address_family)) - addrinfo.append((address_family, (address, port))) - raise gen.Return(addrinfo) diff --git a/rosbridge_server/src/tornado/platform/common.py b/rosbridge_server/src/tornado/platform/common.py deleted file mode 100644 index b409a903f..000000000 --- a/rosbridge_server/src/tornado/platform/common.py +++ /dev/null @@ -1,92 +0,0 @@ -"""Lowest-common-denominator implementations of platform functionality.""" -from __future__ import absolute_import, division, print_function, with_statement - -import errno -import socket - -from tornado.platform import interface - - -class Waker(interface.Waker): - """Create an OS independent asynchronous pipe. - - For use on platforms that don't have os.pipe() (or where pipes cannot - be passed to select()), but do have sockets. This includes Windows - and Jython. - """ - def __init__(self): - # Based on Zope select_trigger.py: - # /~https://github.com/zopefoundation/Zope/blob/master/src/ZServer/medusa/thread/select_trigger.py - - self.writer = socket.socket() - # Disable buffering -- pulling the trigger sends 1 byte, - # and we want that sent immediately, to wake up ASAP. - self.writer.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1) - - count = 0 - while 1: - count += 1 - # Bind to a local port; for efficiency, let the OS pick - # a free port for us. - # Unfortunately, stress tests showed that we may not - # be able to connect to that port ("Address already in - # use") despite that the OS picked it. This appears - # to be a race bug in the Windows socket implementation. - # So we loop until a connect() succeeds (almost always - # on the first try). See the long thread at - # http://mail.zope.org/pipermail/zope/2005-July/160433.html - # for hideous details. - a = socket.socket() - a.bind(("127.0.0.1", 0)) - a.listen(1) - connect_address = a.getsockname() # assigned (host, port) pair - try: - self.writer.connect(connect_address) - break # success - except socket.error as detail: - if (not hasattr(errno, 'WSAEADDRINUSE') or - detail[0] != errno.WSAEADDRINUSE): - # "Address already in use" is the only error - # I've seen on two WinXP Pro SP2 boxes, under - # Pythons 2.3.5 and 2.4.1. - raise - # (10048, 'Address already in use') - # assert count <= 2 # never triggered in Tim's tests - if count >= 10: # I've never seen it go above 2 - a.close() - self.writer.close() - raise socket.error("Cannot bind trigger!") - # Close `a` and try again. Note: I originally put a short - # sleep() here, but it didn't appear to help or hurt. - a.close() - - self.reader, addr = a.accept() - self.reader.setblocking(0) - self.writer.setblocking(0) - a.close() - self.reader_fd = self.reader.fileno() - - def fileno(self): - return self.reader.fileno() - - def write_fileno(self): - return self.writer.fileno() - - def wake(self): - try: - self.writer.send(b"x") - except (IOError, socket.error): - pass - - def consume(self): - try: - while True: - result = self.reader.recv(1024) - if not result: - break - except (IOError, socket.error): - pass - - def close(self): - self.reader.close() - self.writer.close() diff --git a/rosbridge_server/src/tornado/platform/epoll.py b/rosbridge_server/src/tornado/platform/epoll.py deleted file mode 100644 index b08cc6281..000000000 --- a/rosbridge_server/src/tornado/platform/epoll.py +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2012 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""EPoll-based IOLoop implementation for Linux systems.""" -from __future__ import absolute_import, division, print_function, with_statement - -import select - -from tornado.ioloop import PollIOLoop - - -class EPollIOLoop(PollIOLoop): - def initialize(self, **kwargs): - super(EPollIOLoop, self).initialize(impl=select.epoll(), **kwargs) diff --git a/rosbridge_server/src/tornado/platform/interface.py b/rosbridge_server/src/tornado/platform/interface.py deleted file mode 100644 index 07da6babd..000000000 --- a/rosbridge_server/src/tornado/platform/interface.py +++ /dev/null @@ -1,63 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2011 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Interfaces for platform-specific functionality. - -This module exists primarily for documentation purposes and as base classes -for other tornado.platform modules. Most code should import the appropriate -implementation from `tornado.platform.auto`. -""" - -from __future__ import absolute_import, division, print_function, with_statement - - -def set_close_exec(fd): - """Sets the close-on-exec bit (``FD_CLOEXEC``)for a file descriptor.""" - raise NotImplementedError() - - -class Waker(object): - """A socket-like object that can wake another thread from ``select()``. - - The `~tornado.ioloop.IOLoop` will add the Waker's `fileno()` to - its ``select`` (or ``epoll`` or ``kqueue``) calls. When another - thread wants to wake up the loop, it calls `wake`. Once it has woken - up, it will call `consume` to do any necessary per-wake cleanup. When - the ``IOLoop`` is closed, it closes its waker too. - """ - def fileno(self): - """Returns the read file descriptor for this waker. - - Must be suitable for use with ``select()`` or equivalent on the - local platform. - """ - raise NotImplementedError() - - def write_fileno(self): - """Returns the write file descriptor for this waker.""" - raise NotImplementedError() - - def wake(self): - """Triggers activity on the waker's file descriptor.""" - raise NotImplementedError() - - def consume(self): - """Called after the listen has woken up to do any necessary cleanup.""" - raise NotImplementedError() - - def close(self): - """Closes the waker's file descriptor(s).""" - raise NotImplementedError() diff --git a/rosbridge_server/src/tornado/platform/kqueue.py b/rosbridge_server/src/tornado/platform/kqueue.py deleted file mode 100644 index de8c046d3..000000000 --- a/rosbridge_server/src/tornado/platform/kqueue.py +++ /dev/null @@ -1,92 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2012 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""KQueue-based IOLoop implementation for BSD/Mac systems.""" -from __future__ import absolute_import, division, print_function, with_statement - -import select - -from tornado.ioloop import IOLoop, PollIOLoop - -assert hasattr(select, 'kqueue'), 'kqueue not supported' - - -class _KQueue(object): - """A kqueue-based event loop for BSD/Mac systems.""" - def __init__(self): - self._kqueue = select.kqueue() - self._active = {} - - def fileno(self): - return self._kqueue.fileno() - - def close(self): - self._kqueue.close() - - def register(self, fd, events): - if fd in self._active: - raise IOError("fd %s already registered" % fd) - self._control(fd, events, select.KQ_EV_ADD) - self._active[fd] = events - - def modify(self, fd, events): - self.unregister(fd) - self.register(fd, events) - - def unregister(self, fd): - events = self._active.pop(fd) - self._control(fd, events, select.KQ_EV_DELETE) - - def _control(self, fd, events, flags): - kevents = [] - if events & IOLoop.WRITE: - kevents.append(select.kevent( - fd, filter=select.KQ_FILTER_WRITE, flags=flags)) - if events & IOLoop.READ or not kevents: - # Always read when there is not a write - kevents.append(select.kevent( - fd, filter=select.KQ_FILTER_READ, flags=flags)) - # Even though control() takes a list, it seems to return EINVAL - # on Mac OS X (10.6) when there is more than one event in the list. - for kevent in kevents: - self._kqueue.control([kevent], 0) - - def poll(self, timeout): - kevents = self._kqueue.control(None, 1000, timeout) - events = {} - for kevent in kevents: - fd = kevent.ident - if kevent.filter == select.KQ_FILTER_READ: - events[fd] = events.get(fd, 0) | IOLoop.READ - if kevent.filter == select.KQ_FILTER_WRITE: - if kevent.flags & select.KQ_EV_EOF: - # If an asynchronous connection is refused, kqueue - # returns a write event with the EOF flag set. - # Turn this into an error for consistency with the - # other IOLoop implementations. - # Note that for read events, EOF may be returned before - # all data has been consumed from the socket buffer, - # so we only check for EOF on write events. - events[fd] = IOLoop.ERROR - else: - events[fd] = events.get(fd, 0) | IOLoop.WRITE - if kevent.flags & select.KQ_EV_ERROR: - events[fd] = events.get(fd, 0) | IOLoop.ERROR - return events.items() - - -class KQueueIOLoop(PollIOLoop): - def initialize(self, **kwargs): - super(KQueueIOLoop, self).initialize(impl=_KQueue(), **kwargs) diff --git a/rosbridge_server/src/tornado/platform/posix.py b/rosbridge_server/src/tornado/platform/posix.py deleted file mode 100644 index 41a5794c6..000000000 --- a/rosbridge_server/src/tornado/platform/posix.py +++ /dev/null @@ -1,70 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2011 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Posix implementations of platform-specific functionality.""" - -from __future__ import absolute_import, division, print_function, with_statement - -import fcntl -import os - -from tornado.platform import interface - - -def set_close_exec(fd): - flags = fcntl.fcntl(fd, fcntl.F_GETFD) - fcntl.fcntl(fd, fcntl.F_SETFD, flags | fcntl.FD_CLOEXEC) - - -def _set_nonblocking(fd): - flags = fcntl.fcntl(fd, fcntl.F_GETFL) - fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK) - - -class Waker(interface.Waker): - def __init__(self): - r, w = os.pipe() - _set_nonblocking(r) - _set_nonblocking(w) - set_close_exec(r) - set_close_exec(w) - self.reader = os.fdopen(r, "rb", 0) - self.writer = os.fdopen(w, "wb", 0) - - def fileno(self): - return self.reader.fileno() - - def write_fileno(self): - return self.writer.fileno() - - def wake(self): - try: - self.writer.write(b"x") - except IOError: - pass - - def consume(self): - try: - while True: - result = self.reader.read() - if not result: - break - except IOError: - pass - - def close(self): - self.reader.close() - self.writer.close() diff --git a/rosbridge_server/src/tornado/platform/select.py b/rosbridge_server/src/tornado/platform/select.py deleted file mode 100644 index 9a8795626..000000000 --- a/rosbridge_server/src/tornado/platform/select.py +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2012 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -"""Select-based IOLoop implementation. - -Used as a fallback for systems that don't support epoll or kqueue. -""" -from __future__ import absolute_import, division, print_function, with_statement - -import select - -from tornado.ioloop import IOLoop, PollIOLoop - - -class _Select(object): - """A simple, select()-based IOLoop implementation for non-Linux systems""" - def __init__(self): - self.read_fds = set() - self.write_fds = set() - self.error_fds = set() - self.fd_sets = (self.read_fds, self.write_fds, self.error_fds) - - def close(self): - pass - - def register(self, fd, events): - if fd in self.read_fds or fd in self.write_fds or fd in self.error_fds: - raise IOError("fd %s already registered" % fd) - if events & IOLoop.READ: - self.read_fds.add(fd) - if events & IOLoop.WRITE: - self.write_fds.add(fd) - if events & IOLoop.ERROR: - self.error_fds.add(fd) - # Closed connections are reported as errors by epoll and kqueue, - # but as zero-byte reads by select, so when errors are requested - # we need to listen for both read and error. - self.read_fds.add(fd) - - def modify(self, fd, events): - self.unregister(fd) - self.register(fd, events) - - def unregister(self, fd): - self.read_fds.discard(fd) - self.write_fds.discard(fd) - self.error_fds.discard(fd) - - def poll(self, timeout): - readable, writeable, errors = select.select( - self.read_fds, self.write_fds, self.error_fds, timeout) - events = {} - for fd in readable: - events[fd] = events.get(fd, 0) | IOLoop.READ - for fd in writeable: - events[fd] = events.get(fd, 0) | IOLoop.WRITE - for fd in errors: - events[fd] = events.get(fd, 0) | IOLoop.ERROR - return events.items() - - -class SelectIOLoop(PollIOLoop): - def initialize(self, **kwargs): - super(SelectIOLoop, self).initialize(impl=_Select(), **kwargs) diff --git a/rosbridge_server/src/tornado/platform/twisted.py b/rosbridge_server/src/tornado/platform/twisted.py deleted file mode 100644 index b271dfcef..000000000 --- a/rosbridge_server/src/tornado/platform/twisted.py +++ /dev/null @@ -1,556 +0,0 @@ -# Author: Ovidiu Predescu -# Date: July 2011 -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -# Note: This module's docs are not currently extracted automatically, -# so changes must be made manually to twisted.rst -# TODO: refactor doc build process to use an appropriate virtualenv -"""Bridges between the Twisted reactor and Tornado IOLoop. - -This module lets you run applications and libraries written for -Twisted in a Tornado application. It can be used in two modes, -depending on which library's underlying event loop you want to use. - -This module has been tested with Twisted versions 11.0.0 and newer. - -Twisted on Tornado ------------------- - -`TornadoReactor` implements the Twisted reactor interface on top of -the Tornado IOLoop. To use it, simply call `install` at the beginning -of the application:: - - import tornado.platform.twisted - tornado.platform.twisted.install() - from twisted.internet import reactor - -When the app is ready to start, call `IOLoop.instance().start()` -instead of `reactor.run()`. - -It is also possible to create a non-global reactor by calling -`tornado.platform.twisted.TornadoReactor(io_loop)`. However, if -the `IOLoop` and reactor are to be short-lived (such as those used in -unit tests), additional cleanup may be required. Specifically, it is -recommended to call:: - - reactor.fireSystemEvent('shutdown') - reactor.disconnectAll() - -before closing the `IOLoop`. - -Tornado on Twisted ------------------- - -`TwistedIOLoop` implements the Tornado IOLoop interface on top of the Twisted -reactor. Recommended usage:: - - from tornado.platform.twisted import TwistedIOLoop - from twisted.internet import reactor - TwistedIOLoop().install() - # Set up your tornado application as usual using `IOLoop.instance` - reactor.run() - -`TwistedIOLoop` always uses the global Twisted reactor. -""" - -from __future__ import absolute_import, division, print_function, with_statement - -import datetime -import functools -import numbers -import socket - -import twisted.internet.abstract -from twisted.internet.posixbase import PosixReactorBase -from twisted.internet.interfaces import \ - IReactorFDSet, IDelayedCall, IReactorTime, IReadDescriptor, IWriteDescriptor -from twisted.python import failure, log -from twisted.internet import error -import twisted.names.cache -import twisted.names.client -import twisted.names.hosts -import twisted.names.resolve - -from zope.interface import implementer - -from tornado.escape import utf8 -from tornado import gen -import tornado.ioloop -from tornado.log import app_log -from tornado.netutil import Resolver -from tornado.stack_context import NullContext, wrap -from tornado.ioloop import IOLoop -from tornado.util import timedelta_to_seconds - - -@implementer(IDelayedCall) -class TornadoDelayedCall(object): - """DelayedCall object for Tornado.""" - def __init__(self, reactor, seconds, f, *args, **kw): - self._reactor = reactor - self._func = functools.partial(f, *args, **kw) - self._time = self._reactor.seconds() + seconds - self._timeout = self._reactor._io_loop.add_timeout(self._time, - self._called) - self._active = True - - def _called(self): - self._active = False - self._reactor._removeDelayedCall(self) - try: - self._func() - except: - app_log.error("_called caught exception", exc_info=True) - - def getTime(self): - return self._time - - def cancel(self): - self._active = False - self._reactor._io_loop.remove_timeout(self._timeout) - self._reactor._removeDelayedCall(self) - - def delay(self, seconds): - self._reactor._io_loop.remove_timeout(self._timeout) - self._time += seconds - self._timeout = self._reactor._io_loop.add_timeout(self._time, - self._called) - - def reset(self, seconds): - self._reactor._io_loop.remove_timeout(self._timeout) - self._time = self._reactor.seconds() + seconds - self._timeout = self._reactor._io_loop.add_timeout(self._time, - self._called) - - def active(self): - return self._active - - -@implementer(IReactorTime, IReactorFDSet) -class TornadoReactor(PosixReactorBase): - """Twisted reactor built on the Tornado IOLoop. - - Since it is intented to be used in applications where the top-level - event loop is ``io_loop.start()`` rather than ``reactor.run()``, - it is implemented a little differently than other Twisted reactors. - We override `mainLoop` instead of `doIteration` and must implement - timed call functionality on top of `IOLoop.add_timeout` rather than - using the implementation in `PosixReactorBase`. - """ - def __init__(self, io_loop=None): - if not io_loop: - io_loop = tornado.ioloop.IOLoop.current() - self._io_loop = io_loop - self._readers = {} # map of reader objects to fd - self._writers = {} # map of writer objects to fd - self._fds = {} # a map of fd to a (reader, writer) tuple - self._delayedCalls = {} - PosixReactorBase.__init__(self) - self.addSystemEventTrigger('during', 'shutdown', self.crash) - - # IOLoop.start() bypasses some of the reactor initialization. - # Fire off the necessary events if they weren't already triggered - # by reactor.run(). - def start_if_necessary(): - if not self._started: - self.fireSystemEvent('startup') - self._io_loop.add_callback(start_if_necessary) - - # IReactorTime - def seconds(self): - return self._io_loop.time() - - def callLater(self, seconds, f, *args, **kw): - dc = TornadoDelayedCall(self, seconds, f, *args, **kw) - self._delayedCalls[dc] = True - return dc - - def getDelayedCalls(self): - return [x for x in self._delayedCalls if x._active] - - def _removeDelayedCall(self, dc): - if dc in self._delayedCalls: - del self._delayedCalls[dc] - - # IReactorThreads - def callFromThread(self, f, *args, **kw): - """See `twisted.internet.interfaces.IReactorThreads.callFromThread`""" - assert callable(f), "%s is not callable" % f - with NullContext(): - # This NullContext is mainly for an edge case when running - # TwistedIOLoop on top of a TornadoReactor. - # TwistedIOLoop.add_callback uses reactor.callFromThread and - # should not pick up additional StackContexts along the way. - self._io_loop.add_callback(f, *args, **kw) - - # We don't need the waker code from the super class, Tornado uses - # its own waker. - def installWaker(self): - pass - - def wakeUp(self): - pass - - # IReactorFDSet - def _invoke_callback(self, fd, events): - if fd not in self._fds: - return - (reader, writer) = self._fds[fd] - if reader: - err = None - if reader.fileno() == -1: - err = error.ConnectionLost() - elif events & IOLoop.READ: - err = log.callWithLogger(reader, reader.doRead) - if err is None and events & IOLoop.ERROR: - err = error.ConnectionLost() - if err is not None: - self.removeReader(reader) - reader.readConnectionLost(failure.Failure(err)) - if writer: - err = None - if writer.fileno() == -1: - err = error.ConnectionLost() - elif events & IOLoop.WRITE: - err = log.callWithLogger(writer, writer.doWrite) - if err is None and events & IOLoop.ERROR: - err = error.ConnectionLost() - if err is not None: - self.removeWriter(writer) - writer.writeConnectionLost(failure.Failure(err)) - - def addReader(self, reader): - """Add a FileDescriptor for notification of data available to read.""" - if reader in self._readers: - # Don't add the reader if it's already there - return - fd = reader.fileno() - self._readers[reader] = fd - if fd in self._fds: - (_, writer) = self._fds[fd] - self._fds[fd] = (reader, writer) - if writer: - # We already registered this fd for write events, - # update it for read events as well. - self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE) - else: - with NullContext(): - self._fds[fd] = (reader, None) - self._io_loop.add_handler(fd, self._invoke_callback, - IOLoop.READ) - - def addWriter(self, writer): - """Add a FileDescriptor for notification of data available to write.""" - if writer in self._writers: - return - fd = writer.fileno() - self._writers[writer] = fd - if fd in self._fds: - (reader, _) = self._fds[fd] - self._fds[fd] = (reader, writer) - if reader: - # We already registered this fd for read events, - # update it for write events as well. - self._io_loop.update_handler(fd, IOLoop.READ | IOLoop.WRITE) - else: - with NullContext(): - self._fds[fd] = (None, writer) - self._io_loop.add_handler(fd, self._invoke_callback, - IOLoop.WRITE) - - def removeReader(self, reader): - """Remove a Selectable for notification of data available to read.""" - if reader in self._readers: - fd = self._readers.pop(reader) - (_, writer) = self._fds[fd] - if writer: - # We have a writer so we need to update the IOLoop for - # write events only. - self._fds[fd] = (None, writer) - self._io_loop.update_handler(fd, IOLoop.WRITE) - else: - # Since we have no writer registered, we remove the - # entry from _fds and unregister the handler from the - # IOLoop - del self._fds[fd] - self._io_loop.remove_handler(fd) - - def removeWriter(self, writer): - """Remove a Selectable for notification of data available to write.""" - if writer in self._writers: - fd = self._writers.pop(writer) - (reader, _) = self._fds[fd] - if reader: - # We have a reader so we need to update the IOLoop for - # read events only. - self._fds[fd] = (reader, None) - self._io_loop.update_handler(fd, IOLoop.READ) - else: - # Since we have no reader registered, we remove the - # entry from the _fds and unregister the handler from - # the IOLoop. - del self._fds[fd] - self._io_loop.remove_handler(fd) - - def removeAll(self): - return self._removeAll(self._readers, self._writers) - - def getReaders(self): - return self._readers.keys() - - def getWriters(self): - return self._writers.keys() - - # The following functions are mainly used in twisted-style test cases; - # it is expected that most users of the TornadoReactor will call - # IOLoop.start() instead of Reactor.run(). - def stop(self): - PosixReactorBase.stop(self) - fire_shutdown = functools.partial(self.fireSystemEvent, "shutdown") - self._io_loop.add_callback(fire_shutdown) - - def crash(self): - PosixReactorBase.crash(self) - self._io_loop.stop() - - def doIteration(self, delay): - raise NotImplementedError("doIteration") - - def mainLoop(self): - self._io_loop.start() - - -class _TestReactor(TornadoReactor): - """Subclass of TornadoReactor for use in unittests. - - This can't go in the test.py file because of import-order dependencies - with the Twisted reactor test builder. - """ - def __init__(self): - # always use a new ioloop - super(_TestReactor, self).__init__(IOLoop()) - - def listenTCP(self, port, factory, backlog=50, interface=''): - # default to localhost to avoid firewall prompts on the mac - if not interface: - interface = '127.0.0.1' - return super(_TestReactor, self).listenTCP( - port, factory, backlog=backlog, interface=interface) - - def listenUDP(self, port, protocol, interface='', maxPacketSize=8192): - if not interface: - interface = '127.0.0.1' - return super(_TestReactor, self).listenUDP( - port, protocol, interface=interface, maxPacketSize=maxPacketSize) - - -def install(io_loop=None): - """Install this package as the default Twisted reactor.""" - if not io_loop: - io_loop = tornado.ioloop.IOLoop.current() - reactor = TornadoReactor(io_loop) - from twisted.internet.main import installReactor - installReactor(reactor) - return reactor - - -@implementer(IReadDescriptor, IWriteDescriptor) -class _FD(object): - def __init__(self, fd, fileobj, handler): - self.fd = fd - self.fileobj = fileobj - self.handler = handler - self.reading = False - self.writing = False - self.lost = False - - def fileno(self): - return self.fd - - def doRead(self): - if not self.lost: - self.handler(self.fileobj, tornado.ioloop.IOLoop.READ) - - def doWrite(self): - if not self.lost: - self.handler(self.fileobj, tornado.ioloop.IOLoop.WRITE) - - def connectionLost(self, reason): - if not self.lost: - self.handler(self.fileobj, tornado.ioloop.IOLoop.ERROR) - self.lost = True - - def logPrefix(self): - return '' - - -class TwistedIOLoop(tornado.ioloop.IOLoop): - """IOLoop implementation that runs on Twisted. - - Uses the global Twisted reactor by default. To create multiple - `TwistedIOLoops` in the same process, you must pass a unique reactor - when constructing each one. - - Not compatible with `tornado.process.Subprocess.set_exit_callback` - because the ``SIGCHLD`` handlers used by Tornado and Twisted conflict - with each other. - """ - def initialize(self, reactor=None): - if reactor is None: - import twisted.internet.reactor - reactor = twisted.internet.reactor - self.reactor = reactor - self.fds = {} - self.reactor.callWhenRunning(self.make_current) - - def close(self, all_fds=False): - fds = self.fds - self.reactor.removeAll() - for c in self.reactor.getDelayedCalls(): - c.cancel() - if all_fds: - for fd in fds.values(): - self.close_fd(fd.fileobj) - - def add_handler(self, fd, handler, events): - if fd in self.fds: - raise ValueError('fd %s added twice' % fd) - fd, fileobj = self.split_fd(fd) - self.fds[fd] = _FD(fd, fileobj, wrap(handler)) - if events & tornado.ioloop.IOLoop.READ: - self.fds[fd].reading = True - self.reactor.addReader(self.fds[fd]) - if events & tornado.ioloop.IOLoop.WRITE: - self.fds[fd].writing = True - self.reactor.addWriter(self.fds[fd]) - - def update_handler(self, fd, events): - fd, fileobj = self.split_fd(fd) - if events & tornado.ioloop.IOLoop.READ: - if not self.fds[fd].reading: - self.fds[fd].reading = True - self.reactor.addReader(self.fds[fd]) - else: - if self.fds[fd].reading: - self.fds[fd].reading = False - self.reactor.removeReader(self.fds[fd]) - if events & tornado.ioloop.IOLoop.WRITE: - if not self.fds[fd].writing: - self.fds[fd].writing = True - self.reactor.addWriter(self.fds[fd]) - else: - if self.fds[fd].writing: - self.fds[fd].writing = False - self.reactor.removeWriter(self.fds[fd]) - - def remove_handler(self, fd): - fd, fileobj = self.split_fd(fd) - if fd not in self.fds: - return - self.fds[fd].lost = True - if self.fds[fd].reading: - self.reactor.removeReader(self.fds[fd]) - if self.fds[fd].writing: - self.reactor.removeWriter(self.fds[fd]) - del self.fds[fd] - - def start(self): - self._setup_logging() - self.reactor.run() - - def stop(self): - self.reactor.crash() - - def add_timeout(self, deadline, callback, *args, **kwargs): - # This method could be simplified (since tornado 4.0) by - # overriding call_at instead of add_timeout, but we leave it - # for now as a test of backwards-compatibility. - if isinstance(deadline, numbers.Real): - delay = max(deadline - self.time(), 0) - elif isinstance(deadline, datetime.timedelta): - delay = timedelta_to_seconds(deadline) - else: - raise TypeError("Unsupported deadline %r") - return self.reactor.callLater( - delay, self._run_callback, - functools.partial(wrap(callback), *args, **kwargs)) - - def remove_timeout(self, timeout): - if timeout.active(): - timeout.cancel() - - def add_callback(self, callback, *args, **kwargs): - self.reactor.callFromThread( - self._run_callback, - functools.partial(wrap(callback), *args, **kwargs)) - - def add_callback_from_signal(self, callback, *args, **kwargs): - self.add_callback(callback, *args, **kwargs) - - -class TwistedResolver(Resolver): - """Twisted-based asynchronous resolver. - - This is a non-blocking and non-threaded resolver. It is - recommended only when threads cannot be used, since it has - limitations compared to the standard ``getaddrinfo``-based - `~tornado.netutil.Resolver` and - `~tornado.netutil.ThreadedResolver`. Specifically, it returns at - most one result, and arguments other than ``host`` and ``family`` - are ignored. It may fail to resolve when ``family`` is not - ``socket.AF_UNSPEC``. - - Requires Twisted 12.1 or newer. - """ - def initialize(self, io_loop=None): - self.io_loop = io_loop or IOLoop.current() - # partial copy of twisted.names.client.createResolver, which doesn't - # allow for a reactor to be passed in. - self.reactor = tornado.platform.twisted.TornadoReactor(io_loop) - - host_resolver = twisted.names.hosts.Resolver('/etc/hosts') - cache_resolver = twisted.names.cache.CacheResolver(reactor=self.reactor) - real_resolver = twisted.names.client.Resolver('/etc/resolv.conf', - reactor=self.reactor) - self.resolver = twisted.names.resolve.ResolverChain( - [host_resolver, cache_resolver, real_resolver]) - - @gen.coroutine - def resolve(self, host, port, family=0): - # getHostByName doesn't accept IP addresses, so if the input - # looks like an IP address just return it immediately. - if twisted.internet.abstract.isIPAddress(host): - resolved = host - resolved_family = socket.AF_INET - elif twisted.internet.abstract.isIPv6Address(host): - resolved = host - resolved_family = socket.AF_INET6 - else: - deferred = self.resolver.getHostByName(utf8(host)) - resolved = yield gen.Task(deferred.addBoth) - if isinstance(resolved, failure.Failure): - resolved.raiseException() - elif twisted.internet.abstract.isIPAddress(resolved): - resolved_family = socket.AF_INET - elif twisted.internet.abstract.isIPv6Address(resolved): - resolved_family = socket.AF_INET6 - else: - resolved_family = socket.AF_UNSPEC - if family != socket.AF_UNSPEC and family != resolved_family: - raise Exception('Requested socket family %d but got %d' % - (family, resolved_family)) - result = [ - (resolved_family, (resolved, port)), - ] - raise gen.Return(result) diff --git a/rosbridge_server/src/tornado/platform/windows.py b/rosbridge_server/src/tornado/platform/windows.py deleted file mode 100644 index 817bdca13..000000000 --- a/rosbridge_server/src/tornado/platform/windows.py +++ /dev/null @@ -1,20 +0,0 @@ -# NOTE: win32 support is currently experimental, and not recommended -# for production use. - - -from __future__ import absolute_import, division, print_function, with_statement -import ctypes -import ctypes.wintypes - -# See: http://msdn.microsoft.com/en-us/library/ms724935(VS.85).aspx -SetHandleInformation = ctypes.windll.kernel32.SetHandleInformation -SetHandleInformation.argtypes = (ctypes.wintypes.HANDLE, ctypes.wintypes.DWORD, ctypes.wintypes.DWORD) -SetHandleInformation.restype = ctypes.wintypes.BOOL - -HANDLE_FLAG_INHERIT = 0x00000001 - - -def set_close_exec(fd): - success = SetHandleInformation(fd, HANDLE_FLAG_INHERIT, 0) - if not success: - raise ctypes.GetLastError() diff --git a/rosbridge_server/src/tornado/process.py b/rosbridge_server/src/tornado/process.py deleted file mode 100644 index 0f38b856d..000000000 --- a/rosbridge_server/src/tornado/process.py +++ /dev/null @@ -1,312 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2011 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""Utilities for working with multiple processes, including both forking -the server into multiple processes and managing subprocesses. -""" - -from __future__ import absolute_import, division, print_function, with_statement - -import errno -import os -import signal -import subprocess -import sys -import time - -from binascii import hexlify - -from tornado import ioloop -from tornado.iostream import PipeIOStream -from tornado.log import gen_log -from tornado.platform.auto import set_close_exec -from tornado import stack_context -from tornado.util import errno_from_exception - -try: - import multiprocessing -except ImportError: - # Multiprocessing is not availble on Google App Engine. - multiprocessing = None - -try: - long # py2 -except NameError: - long = int # py3 - - -def cpu_count(): - """Returns the number of processors on this machine.""" - if multiprocessing is None: - return 1 - try: - return multiprocessing.cpu_count() - except NotImplementedError: - pass - try: - return os.sysconf("SC_NPROCESSORS_CONF") - except ValueError: - pass - gen_log.error("Could not detect number of processors; assuming 1") - return 1 - - -def _reseed_random(): - if 'random' not in sys.modules: - return - import random - # If os.urandom is available, this method does the same thing as - # random.seed (at least as of python 2.6). If os.urandom is not - # available, we mix in the pid in addition to a timestamp. - try: - seed = long(hexlify(os.urandom(16)), 16) - except NotImplementedError: - seed = int(time.time() * 1000) ^ os.getpid() - random.seed(seed) - - -def _pipe_cloexec(): - r, w = os.pipe() - set_close_exec(r) - set_close_exec(w) - return r, w - - -_task_id = None - - -def fork_processes(num_processes, max_restarts=100): - """Starts multiple worker processes. - - If ``num_processes`` is None or <= 0, we detect the number of cores - available on this machine and fork that number of child - processes. If ``num_processes`` is given and > 0, we fork that - specific number of sub-processes. - - Since we use processes and not threads, there is no shared memory - between any server code. - - Note that multiple processes are not compatible with the autoreload - module (or the ``autoreload=True`` option to `tornado.web.Application` - which defaults to True when ``debug=True``). - When using multiple processes, no IOLoops can be created or - referenced until after the call to ``fork_processes``. - - In each child process, ``fork_processes`` returns its *task id*, a - number between 0 and ``num_processes``. Processes that exit - abnormally (due to a signal or non-zero exit status) are restarted - with the same id (up to ``max_restarts`` times). In the parent - process, ``fork_processes`` returns None if all child processes - have exited normally, but will otherwise only exit by throwing an - exception. - """ - global _task_id - assert _task_id is None - if num_processes is None or num_processes <= 0: - num_processes = cpu_count() - if ioloop.IOLoop.initialized(): - raise RuntimeError("Cannot run in multiple processes: IOLoop instance " - "has already been initialized. You cannot call " - "IOLoop.instance() before calling start_processes()") - gen_log.info("Starting %d processes", num_processes) - children = {} - - def start_child(i): - pid = os.fork() - if pid == 0: - # child process - _reseed_random() - global _task_id - _task_id = i - return i - else: - children[pid] = i - return None - for i in range(num_processes): - id = start_child(i) - if id is not None: - return id - num_restarts = 0 - while children: - try: - pid, status = os.wait() - except OSError as e: - if errno_from_exception(e) == errno.EINTR: - continue - raise - if pid not in children: - continue - id = children.pop(pid) - if os.WIFSIGNALED(status): - gen_log.warning("child %d (pid %d) killed by signal %d, restarting", - id, pid, os.WTERMSIG(status)) - elif os.WEXITSTATUS(status) != 0: - gen_log.warning("child %d (pid %d) exited with status %d, restarting", - id, pid, os.WEXITSTATUS(status)) - else: - gen_log.info("child %d (pid %d) exited normally", id, pid) - continue - num_restarts += 1 - if num_restarts > max_restarts: - raise RuntimeError("Too many child restarts, giving up") - new_id = start_child(id) - if new_id is not None: - return new_id - # All child processes exited cleanly, so exit the master process - # instead of just returning to right after the call to - # fork_processes (which will probably just start up another IOLoop - # unless the caller checks the return value). - sys.exit(0) - - -def task_id(): - """Returns the current task id, if any. - - Returns None if this process was not created by `fork_processes`. - """ - global _task_id - return _task_id - - -class Subprocess(object): - """Wraps ``subprocess.Popen`` with IOStream support. - - The constructor is the same as ``subprocess.Popen`` with the following - additions: - - * ``stdin``, ``stdout``, and ``stderr`` may have the value - ``tornado.process.Subprocess.STREAM``, which will make the corresponding - attribute of the resulting Subprocess a `.PipeIOStream`. - * A new keyword argument ``io_loop`` may be used to pass in an IOLoop. - """ - STREAM = object() - - _initialized = False - _waiting = {} - - def __init__(self, *args, **kwargs): - self.io_loop = kwargs.pop('io_loop', None) or ioloop.IOLoop.current() - # All FDs we create should be closed on error; those in to_close - # should be closed in the parent process on success. - pipe_fds = [] - to_close = [] - if kwargs.get('stdin') is Subprocess.STREAM: - in_r, in_w = _pipe_cloexec() - kwargs['stdin'] = in_r - pipe_fds.extend((in_r, in_w)) - to_close.append(in_r) - self.stdin = PipeIOStream(in_w, io_loop=self.io_loop) - if kwargs.get('stdout') is Subprocess.STREAM: - out_r, out_w = _pipe_cloexec() - kwargs['stdout'] = out_w - pipe_fds.extend((out_r, out_w)) - to_close.append(out_w) - self.stdout = PipeIOStream(out_r, io_loop=self.io_loop) - if kwargs.get('stderr') is Subprocess.STREAM: - err_r, err_w = _pipe_cloexec() - kwargs['stderr'] = err_w - pipe_fds.extend((err_r, err_w)) - to_close.append(err_w) - self.stderr = PipeIOStream(err_r, io_loop=self.io_loop) - try: - self.proc = subprocess.Popen(*args, **kwargs) - except: - for fd in pipe_fds: - os.close(fd) - raise - for fd in to_close: - os.close(fd) - for attr in ['stdin', 'stdout', 'stderr', 'pid']: - if not hasattr(self, attr): # don't clobber streams set above - setattr(self, attr, getattr(self.proc, attr)) - self._exit_callback = None - self.returncode = None - - def set_exit_callback(self, callback): - """Runs ``callback`` when this process exits. - - The callback takes one argument, the return code of the process. - - This method uses a ``SIGCHILD`` handler, which is a global setting - and may conflict if you have other libraries trying to handle the - same signal. If you are using more than one ``IOLoop`` it may - be necessary to call `Subprocess.initialize` first to designate - one ``IOLoop`` to run the signal handlers. - - In many cases a close callback on the stdout or stderr streams - can be used as an alternative to an exit callback if the - signal handler is causing a problem. - """ - self._exit_callback = stack_context.wrap(callback) - Subprocess.initialize(self.io_loop) - Subprocess._waiting[self.pid] = self - Subprocess._try_cleanup_process(self.pid) - - @classmethod - def initialize(cls, io_loop=None): - """Initializes the ``SIGCHILD`` handler. - - The signal handler is run on an `.IOLoop` to avoid locking issues. - Note that the `.IOLoop` used for signal handling need not be the - same one used by individual Subprocess objects (as long as the - ``IOLoops`` are each running in separate threads). - """ - if cls._initialized: - return - if io_loop is None: - io_loop = ioloop.IOLoop.current() - cls._old_sigchld = signal.signal( - signal.SIGCHLD, - lambda sig, frame: io_loop.add_callback_from_signal(cls._cleanup)) - cls._initialized = True - - @classmethod - def uninitialize(cls): - """Removes the ``SIGCHILD`` handler.""" - if not cls._initialized: - return - signal.signal(signal.SIGCHLD, cls._old_sigchld) - cls._initialized = False - - @classmethod - def _cleanup(cls): - for pid in list(cls._waiting.keys()): # make a copy - cls._try_cleanup_process(pid) - - @classmethod - def _try_cleanup_process(cls, pid): - try: - ret_pid, status = os.waitpid(pid, os.WNOHANG) - except OSError as e: - if errno_from_exception(e) == errno.ECHILD: - return - if ret_pid == 0: - return - assert ret_pid == pid - subproc = cls._waiting.pop(pid) - subproc.io_loop.add_callback_from_signal( - subproc._set_returncode, status) - - def _set_returncode(self, status): - if os.WIFSIGNALED(status): - self.returncode = -os.WTERMSIG(status) - else: - assert os.WIFEXITED(status) - self.returncode = os.WEXITSTATUS(status) - if self._exit_callback: - callback = self._exit_callback - self._exit_callback = None - callback(self.returncode) diff --git a/rosbridge_server/src/tornado/simple_httpclient.py b/rosbridge_server/src/tornado/simple_httpclient.py deleted file mode 100644 index 679e7e769..000000000 --- a/rosbridge_server/src/tornado/simple_httpclient.py +++ /dev/null @@ -1,509 +0,0 @@ -#!/usr/bin/env python -from __future__ import absolute_import, division, print_function, with_statement - -from tornado.concurrent import is_future -from tornado.escape import utf8, _unicode -from tornado.httpclient import HTTPResponse, HTTPError, AsyncHTTPClient, main, _RequestProxy -from tornado import httputil -from tornado.http1connection import HTTP1Connection, HTTP1ConnectionParameters -from tornado.iostream import StreamClosedError -from tornado.netutil import Resolver, OverrideResolver -from tornado.log import gen_log -from tornado import stack_context -from tornado.tcpclient import TCPClient - -import base64 -import collections -import copy -import functools -import re -import socket -import sys - -try: - from io import BytesIO # python 3 -except ImportError: - from cStringIO import StringIO as BytesIO # python 2 - -try: - import urlparse # py2 -except ImportError: - import urllib.parse as urlparse # py3 - -try: - import ssl -except ImportError: - # ssl is not available on Google App Engine. - ssl = None - -try: - import certifi -except ImportError: - certifi = None - - -def _default_ca_certs(): - if certifi is None: - raise Exception("The 'certifi' package is required to use https " - "in simple_httpclient") - return certifi.where() - - -class SimpleAsyncHTTPClient(AsyncHTTPClient): - """Non-blocking HTTP client with no external dependencies. - - This class implements an HTTP 1.1 client on top of Tornado's IOStreams. - It does not currently implement all applicable parts of the HTTP - specification, but it does enough to work with major web service APIs. - - Some features found in the curl-based AsyncHTTPClient are not yet - supported. In particular, proxies are not supported, connections - are not reused, and callers cannot select the network interface to be - used. - """ - def initialize(self, io_loop, max_clients=10, - hostname_mapping=None, max_buffer_size=104857600, - resolver=None, defaults=None, max_header_size=None): - """Creates a AsyncHTTPClient. - - Only a single AsyncHTTPClient instance exists per IOLoop - in order to provide limitations on the number of pending connections. - force_instance=True may be used to suppress this behavior. - - max_clients is the number of concurrent requests that can be - in progress. Note that this arguments are only used when the - client is first created, and will be ignored when an existing - client is reused. - - hostname_mapping is a dictionary mapping hostnames to IP addresses. - It can be used to make local DNS changes when modifying system-wide - settings like /etc/hosts is not possible or desirable (e.g. in - unittests). - - max_buffer_size is the number of bytes that can be read by IOStream. It - defaults to 100mb. - """ - super(SimpleAsyncHTTPClient, self).initialize(io_loop, - defaults=defaults) - self.max_clients = max_clients - self.queue = collections.deque() - self.active = {} - self.waiting = {} - self.max_buffer_size = max_buffer_size - self.max_header_size = max_header_size - # TCPClient could create a Resolver for us, but we have to do it - # ourselves to support hostname_mapping. - if resolver: - self.resolver = resolver - self.own_resolver = False - else: - self.resolver = Resolver(io_loop=io_loop) - self.own_resolver = True - if hostname_mapping is not None: - self.resolver = OverrideResolver(resolver=self.resolver, - mapping=hostname_mapping) - self.tcp_client = TCPClient(resolver=self.resolver, io_loop=io_loop) - - def close(self): - super(SimpleAsyncHTTPClient, self).close() - if self.own_resolver: - self.resolver.close() - self.tcp_client.close() - - def fetch_impl(self, request, callback): - key = object() - self.queue.append((key, request, callback)) - if not len(self.active) < self.max_clients: - timeout_handle = self.io_loop.add_timeout( - self.io_loop.time() + min(request.connect_timeout, - request.request_timeout), - functools.partial(self._on_timeout, key)) - else: - timeout_handle = None - self.waiting[key] = (request, callback, timeout_handle) - self._process_queue() - if self.queue: - gen_log.debug("max_clients limit reached, request queued. " - "%d active, %d queued requests." % ( - len(self.active), len(self.queue))) - - def _process_queue(self): - with stack_context.NullContext(): - while self.queue and len(self.active) < self.max_clients: - key, request, callback = self.queue.popleft() - if key not in self.waiting: - continue - self._remove_timeout(key) - self.active[key] = (request, callback) - release_callback = functools.partial(self._release_fetch, key) - self._handle_request(request, release_callback, callback) - - def _handle_request(self, request, release_callback, final_callback): - _HTTPConnection(self.io_loop, self, request, release_callback, - final_callback, self.max_buffer_size, self.tcp_client, - self.max_header_size) - - def _release_fetch(self, key): - del self.active[key] - self._process_queue() - - def _remove_timeout(self, key): - if key in self.waiting: - request, callback, timeout_handle = self.waiting[key] - if timeout_handle is not None: - self.io_loop.remove_timeout(timeout_handle) - del self.waiting[key] - - def _on_timeout(self, key): - request, callback, timeout_handle = self.waiting[key] - self.queue.remove((key, request, callback)) - timeout_response = HTTPResponse( - request, 599, error=HTTPError(599, "Timeout"), - request_time=self.io_loop.time() - request.start_time) - self.io_loop.add_callback(callback, timeout_response) - del self.waiting[key] - - -class _HTTPConnection(httputil.HTTPMessageDelegate): - _SUPPORTED_METHODS = set(["GET", "HEAD", "POST", "PUT", "DELETE", "PATCH", "OPTIONS"]) - - def __init__(self, io_loop, client, request, release_callback, - final_callback, max_buffer_size, tcp_client, - max_header_size): - self.start_time = io_loop.time() - self.io_loop = io_loop - self.client = client - self.request = request - self.release_callback = release_callback - self.final_callback = final_callback - self.max_buffer_size = max_buffer_size - self.tcp_client = tcp_client - self.max_header_size = max_header_size - self.code = None - self.headers = None - self.chunks = [] - self._decompressor = None - # Timeout handle returned by IOLoop.add_timeout - self._timeout = None - self._sockaddr = None - with stack_context.ExceptionStackContext(self._handle_exception): - self.parsed = urlparse.urlsplit(_unicode(self.request.url)) - if self.parsed.scheme not in ("http", "https"): - raise ValueError("Unsupported url scheme: %s" % - self.request.url) - # urlsplit results have hostname and port results, but they - # didn't support ipv6 literals until python 2.7. - netloc = self.parsed.netloc - if "@" in netloc: - userpass, _, netloc = netloc.rpartition("@") - match = re.match(r'^(.+):(\d+)$', netloc) - if match: - host = match.group(1) - port = int(match.group(2)) - else: - host = netloc - port = 443 if self.parsed.scheme == "https" else 80 - if re.match(r'^\[.*\]$', host): - # raw ipv6 addresses in urls are enclosed in brackets - host = host[1:-1] - self.parsed_hostname = host # save final host for _on_connect - - if request.allow_ipv6 is False: - af = socket.AF_INET - else: - af = socket.AF_UNSPEC - - ssl_options = self._get_ssl_options(self.parsed.scheme) - - timeout = min(self.request.connect_timeout, self.request.request_timeout) - if timeout: - self._timeout = self.io_loop.add_timeout( - self.start_time + timeout, - stack_context.wrap(self._on_timeout)) - self.tcp_client.connect(host, port, af=af, - ssl_options=ssl_options, - max_buffer_size=self.max_buffer_size, - callback=self._on_connect) - - def _get_ssl_options(self, scheme): - if scheme == "https": - ssl_options = {} - if self.request.validate_cert: - ssl_options["cert_reqs"] = ssl.CERT_REQUIRED - if self.request.ca_certs is not None: - ssl_options["ca_certs"] = self.request.ca_certs - else: - ssl_options["ca_certs"] = _default_ca_certs() - if self.request.client_key is not None: - ssl_options["keyfile"] = self.request.client_key - if self.request.client_cert is not None: - ssl_options["certfile"] = self.request.client_cert - - # SSL interoperability is tricky. We want to disable - # SSLv2 for security reasons; it wasn't disabled by default - # until openssl 1.0. The best way to do this is to use - # the SSL_OP_NO_SSLv2, but that wasn't exposed to python - # until 3.2. Python 2.7 adds the ciphers argument, which - # can also be used to disable SSLv2. As a last resort - # on python 2.6, we set ssl_version to TLSv1. This is - # more narrow than we'd like since it also breaks - # compatibility with servers configured for SSLv3 only, - # but nearly all servers support both SSLv3 and TLSv1: - # http://blog.ivanristic.com/2011/09/ssl-survey-protocol-support.html - if sys.version_info >= (2, 7): - # In addition to disabling SSLv2, we also exclude certain - # classes of insecure ciphers. - ssl_options["ciphers"] = "DEFAULT:!SSLv2:!EXPORT:!DES" - else: - # This is really only necessary for pre-1.0 versions - # of openssl, but python 2.6 doesn't expose version - # information. - ssl_options["ssl_version"] = ssl.PROTOCOL_TLSv1 - return ssl_options - return None - - def _on_timeout(self): - self._timeout = None - if self.final_callback is not None: - raise HTTPError(599, "Timeout") - - def _remove_timeout(self): - if self._timeout is not None: - self.io_loop.remove_timeout(self._timeout) - self._timeout = None - - def _on_connect(self, stream): - if self.final_callback is None: - # final_callback is cleared if we've hit our timeout. - stream.close() - return - self.stream = stream - self.stream.set_close_callback(self.on_connection_close) - self._remove_timeout() - if self.final_callback is None: - return - if self.request.request_timeout: - self._timeout = self.io_loop.add_timeout( - self.start_time + self.request.request_timeout, - stack_context.wrap(self._on_timeout)) - if (self.request.method not in self._SUPPORTED_METHODS and - not self.request.allow_nonstandard_methods): - raise KeyError("unknown method %s" % self.request.method) - for key in ('network_interface', - 'proxy_host', 'proxy_port', - 'proxy_username', 'proxy_password'): - if getattr(self.request, key, None): - raise NotImplementedError('%s not supported' % key) - if "Connection" not in self.request.headers: - self.request.headers["Connection"] = "close" - if "Host" not in self.request.headers: - if '@' in self.parsed.netloc: - self.request.headers["Host"] = self.parsed.netloc.rpartition('@')[-1] - else: - self.request.headers["Host"] = self.parsed.netloc - username, password = None, None - if self.parsed.username is not None: - username, password = self.parsed.username, self.parsed.password - elif self.request.auth_username is not None: - username = self.request.auth_username - password = self.request.auth_password or '' - if username is not None: - if self.request.auth_mode not in (None, "basic"): - raise ValueError("unsupported auth_mode %s", - self.request.auth_mode) - auth = utf8(username) + b":" + utf8(password) - self.request.headers["Authorization"] = (b"Basic " + - base64.b64encode(auth)) - if self.request.user_agent: - self.request.headers["User-Agent"] = self.request.user_agent - if not self.request.allow_nonstandard_methods: - if self.request.method in ("POST", "PATCH", "PUT"): - if (self.request.body is None and - self.request.body_producer is None): - raise AssertionError( - 'Body must not be empty for "%s" request' - % self.request.method) - else: - if (self.request.body is not None or - self.request.body_producer is not None): - raise AssertionError( - 'Body must be empty for "%s" request' - % self.request.method) - if self.request.expect_100_continue: - self.request.headers["Expect"] = "100-continue" - if self.request.body is not None: - # When body_producer is used the caller is responsible for - # setting Content-Length (or else chunked encoding will be used). - self.request.headers["Content-Length"] = str(len( - self.request.body)) - if (self.request.method == "POST" and - "Content-Type" not in self.request.headers): - self.request.headers["Content-Type"] = "application/x-www-form-urlencoded" - if self.request.decompress_response: - self.request.headers["Accept-Encoding"] = "gzip" - req_path = ((self.parsed.path or '/') + - (('?' + self.parsed.query) if self.parsed.query else '')) - self.stream.set_nodelay(True) - self.connection = HTTP1Connection( - self.stream, True, - HTTP1ConnectionParameters( - no_keep_alive=True, - max_header_size=self.max_header_size, - decompress=self.request.decompress_response), - self._sockaddr) - start_line = httputil.RequestStartLine(self.request.method, - req_path, 'HTTP/1.1') - self.connection.write_headers(start_line, self.request.headers) - if self.request.expect_100_continue: - self._read_response() - else: - self._write_body(True) - - def _write_body(self, start_read): - if self.request.body is not None: - self.connection.write(self.request.body) - self.connection.finish() - elif self.request.body_producer is not None: - fut = self.request.body_producer(self.connection.write) - if is_future(fut): - def on_body_written(fut): - fut.result() - self.connection.finish() - if start_read: - self._read_response() - self.io_loop.add_future(fut, on_body_written) - return - self.connection.finish() - if start_read: - self._read_response() - - def _read_response(self): - # Ensure that any exception raised in read_response ends up in our - # stack context. - self.io_loop.add_future( - self.connection.read_response(self), - lambda f: f.result()) - - def _release(self): - if self.release_callback is not None: - release_callback = self.release_callback - self.release_callback = None - release_callback() - - def _run_callback(self, response): - self._release() - if self.final_callback is not None: - final_callback = self.final_callback - self.final_callback = None - self.io_loop.add_callback(final_callback, response) - - def _handle_exception(self, typ, value, tb): - if self.final_callback: - self._remove_timeout() - if isinstance(value, StreamClosedError): - value = HTTPError(599, "Stream closed") - self._run_callback(HTTPResponse(self.request, 599, error=value, - request_time=self.io_loop.time() - self.start_time, - )) - - if hasattr(self, "stream"): - # TODO: this may cause a StreamClosedError to be raised - # by the connection's Future. Should we cancel the - # connection more gracefully? - self.stream.close() - return True - else: - # If our callback has already been called, we are probably - # catching an exception that is not caused by us but rather - # some child of our callback. Rather than drop it on the floor, - # pass it along, unless it's just the stream being closed. - return isinstance(value, StreamClosedError) - - def on_connection_close(self): - if self.final_callback is not None: - message = "Connection closed" - if self.stream.error: - raise self.stream.error - try: - raise HTTPError(599, message) - except HTTPError: - self._handle_exception(*sys.exc_info()) - - def headers_received(self, first_line, headers): - if self.request.expect_100_continue and first_line.code == 100: - self._write_body(False) - return - self.headers = headers - self.code = first_line.code - self.reason = first_line.reason - - if self.request.header_callback is not None: - # Reassemble the start line. - self.request.header_callback('%s %s %s\r\n' % first_line) - for k, v in self.headers.get_all(): - self.request.header_callback("%s: %s\r\n" % (k, v)) - self.request.header_callback('\r\n') - - def finish(self): - data = b''.join(self.chunks) - self._remove_timeout() - original_request = getattr(self.request, "original_request", - self.request) - if (self.request.follow_redirects and - self.request.max_redirects > 0 and - self.code in (301, 302, 303, 307)): - assert isinstance(self.request, _RequestProxy) - new_request = copy.copy(self.request.request) - new_request.url = urlparse.urljoin(self.request.url, - self.headers["Location"]) - new_request.max_redirects = self.request.max_redirects - 1 - del new_request.headers["Host"] - # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.4 - # Client SHOULD make a GET request after a 303. - # According to the spec, 302 should be followed by the same - # method as the original request, but in practice browsers - # treat 302 the same as 303, and many servers use 302 for - # compatibility with pre-HTTP/1.1 user agents which don't - # understand the 303 status. - if self.code in (302, 303): - new_request.method = "GET" - new_request.body = None - for h in ["Content-Length", "Content-Type", - "Content-Encoding", "Transfer-Encoding"]: - try: - del self.request.headers[h] - except KeyError: - pass - new_request.original_request = original_request - final_callback = self.final_callback - self.final_callback = None - self._release() - self.client.fetch(new_request, final_callback) - self._on_end_request() - return - if self.request.streaming_callback: - buffer = BytesIO() - else: - buffer = BytesIO(data) # TODO: don't require one big string? - response = HTTPResponse(original_request, - self.code, reason=getattr(self, 'reason', None), - headers=self.headers, - request_time=self.io_loop.time() - self.start_time, - buffer=buffer, - effective_url=self.request.url) - self._run_callback(response) - self._on_end_request() - - def _on_end_request(self): - self.stream.close() - - def data_received(self, chunk): - if self.request.streaming_callback is not None: - self.request.streaming_callback(chunk) - else: - self.chunks.append(chunk) - - -if __name__ == "__main__": - AsyncHTTPClient.configure(SimpleAsyncHTTPClient) - main() diff --git a/rosbridge_server/src/tornado/speedups.c b/rosbridge_server/src/tornado/speedups.c deleted file mode 100644 index 174a6129e..000000000 --- a/rosbridge_server/src/tornado/speedups.c +++ /dev/null @@ -1,52 +0,0 @@ -#define PY_SSIZE_T_CLEAN -#include - -static PyObject* websocket_mask(PyObject* self, PyObject* args) { - const char* mask; - Py_ssize_t mask_len; - const char* data; - Py_ssize_t data_len; - Py_ssize_t i; - PyObject* result; - char* buf; - - if (!PyArg_ParseTuple(args, "s#s#", &mask, &mask_len, &data, &data_len)) { - return NULL; - } - - result = PyBytes_FromStringAndSize(NULL, data_len); - if (!result) { - return NULL; - } - buf = PyBytes_AsString(result); - for (i = 0; i < data_len; i++) { - buf[i] = data[i] ^ mask[i % 4]; - } - - return result; -} - -static PyMethodDef methods[] = { - {"websocket_mask", websocket_mask, METH_VARARGS, ""}, - {NULL, NULL, 0, NULL} -}; - -#if PY_MAJOR_VERSION >= 3 -static struct PyModuleDef speedupsmodule = { - PyModuleDef_HEAD_INIT, - "speedups", - NULL, - -1, - methods -}; - -PyMODINIT_FUNC -PyInit_speedups() { - return PyModule_Create(&speedupsmodule); -} -#else // Python 2.x -PyMODINIT_FUNC -initspeedups() { - Py_InitModule("tornado.speedups", methods); -} -#endif diff --git a/rosbridge_server/src/tornado/stack_context.py b/rosbridge_server/src/tornado/stack_context.py deleted file mode 100644 index 2e845ab27..000000000 --- a/rosbridge_server/src/tornado/stack_context.py +++ /dev/null @@ -1,388 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2010 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""`StackContext` allows applications to maintain threadlocal-like state -that follows execution as it moves to other execution contexts. - -The motivating examples are to eliminate the need for explicit -``async_callback`` wrappers (as in `tornado.web.RequestHandler`), and to -allow some additional context to be kept for logging. - -This is slightly magic, but it's an extension of the idea that an -exception handler is a kind of stack-local state and when that stack -is suspended and resumed in a new context that state needs to be -preserved. `StackContext` shifts the burden of restoring that state -from each call site (e.g. wrapping each `.AsyncHTTPClient` callback -in ``async_callback``) to the mechanisms that transfer control from -one context to another (e.g. `.AsyncHTTPClient` itself, `.IOLoop`, -thread pools, etc). - -Example usage:: - - @contextlib.contextmanager - def die_on_error(): - try: - yield - except Exception: - logging.error("exception in asynchronous operation",exc_info=True) - sys.exit(1) - - with StackContext(die_on_error): - # Any exception thrown here *or in callback and its desendents* - # will cause the process to exit instead of spinning endlessly - # in the ioloop. - http_client.fetch(url, callback) - ioloop.start() - -Most applications shouln't have to work with `StackContext` directly. -Here are a few rules of thumb for when it's necessary: - -* If you're writing an asynchronous library that doesn't rely on a - stack_context-aware library like `tornado.ioloop` or `tornado.iostream` - (for example, if you're writing a thread pool), use - `.stack_context.wrap()` before any asynchronous operations to capture the - stack context from where the operation was started. - -* If you're writing an asynchronous library that has some shared - resources (such as a connection pool), create those shared resources - within a ``with stack_context.NullContext():`` block. This will prevent - ``StackContexts`` from leaking from one request to another. - -* If you want to write something like an exception handler that will - persist across asynchronous calls, create a new `StackContext` (or - `ExceptionStackContext`), and make your asynchronous calls in a ``with`` - block that references your `StackContext`. -""" - -from __future__ import absolute_import, division, print_function, with_statement - -import sys -import threading - -from tornado.util import raise_exc_info - - -class StackContextInconsistentError(Exception): - pass - - -class _State(threading.local): - def __init__(self): - self.contexts = (tuple(), None) -_state = _State() - - -class StackContext(object): - """Establishes the given context as a StackContext that will be transferred. - - Note that the parameter is a callable that returns a context - manager, not the context itself. That is, where for a - non-transferable context manager you would say:: - - with my_context(): - - StackContext takes the function itself rather than its result:: - - with StackContext(my_context): - - The result of ``with StackContext() as cb:`` is a deactivation - callback. Run this callback when the StackContext is no longer - needed to ensure that it is not propagated any further (note that - deactivating a context does not affect any instances of that - context that are currently pending). This is an advanced feature - and not necessary in most applications. - """ - def __init__(self, context_factory): - self.context_factory = context_factory - self.contexts = [] - self.active = True - - def _deactivate(self): - self.active = False - - # StackContext protocol - def enter(self): - context = self.context_factory() - self.contexts.append(context) - context.__enter__() - - def exit(self, type, value, traceback): - context = self.contexts.pop() - context.__exit__(type, value, traceback) - - # Note that some of this code is duplicated in ExceptionStackContext - # below. ExceptionStackContext is more common and doesn't need - # the full generality of this class. - def __enter__(self): - self.old_contexts = _state.contexts - self.new_contexts = (self.old_contexts[0] + (self,), self) - _state.contexts = self.new_contexts - - try: - self.enter() - except: - _state.contexts = self.old_contexts - raise - - return self._deactivate - - def __exit__(self, type, value, traceback): - try: - self.exit(type, value, traceback) - finally: - final_contexts = _state.contexts - _state.contexts = self.old_contexts - - # Generator coroutines and with-statements with non-local - # effects interact badly. Check here for signs of - # the stack getting out of sync. - # Note that this check comes after restoring _state.context - # so that if it fails things are left in a (relatively) - # consistent state. - if final_contexts is not self.new_contexts: - raise StackContextInconsistentError( - 'stack_context inconsistency (may be caused by yield ' - 'within a "with StackContext" block)') - - # Break up a reference to itself to allow for faster GC on CPython. - self.new_contexts = None - - -class ExceptionStackContext(object): - """Specialization of StackContext for exception handling. - - The supplied ``exception_handler`` function will be called in the - event of an uncaught exception in this context. The semantics are - similar to a try/finally clause, and intended use cases are to log - an error, close a socket, or similar cleanup actions. The - ``exc_info`` triple ``(type, value, traceback)`` will be passed to the - exception_handler function. - - If the exception handler returns true, the exception will be - consumed and will not be propagated to other exception handlers. - """ - def __init__(self, exception_handler): - self.exception_handler = exception_handler - self.active = True - - def _deactivate(self): - self.active = False - - def exit(self, type, value, traceback): - if type is not None: - return self.exception_handler(type, value, traceback) - - def __enter__(self): - self.old_contexts = _state.contexts - self.new_contexts = (self.old_contexts[0], self) - _state.contexts = self.new_contexts - - return self._deactivate - - def __exit__(self, type, value, traceback): - try: - if type is not None: - return self.exception_handler(type, value, traceback) - finally: - final_contexts = _state.contexts - _state.contexts = self.old_contexts - - if final_contexts is not self.new_contexts: - raise StackContextInconsistentError( - 'stack_context inconsistency (may be caused by yield ' - 'within a "with StackContext" block)') - - # Break up a reference to itself to allow for faster GC on CPython. - self.new_contexts = None - - -class NullContext(object): - """Resets the `StackContext`. - - Useful when creating a shared resource on demand (e.g. an - `.AsyncHTTPClient`) where the stack that caused the creating is - not relevant to future operations. - """ - def __enter__(self): - self.old_contexts = _state.contexts - _state.contexts = (tuple(), None) - - def __exit__(self, type, value, traceback): - _state.contexts = self.old_contexts - - -def _remove_deactivated(contexts): - """Remove deactivated handlers from the chain""" - # Clean ctx handlers - stack_contexts = tuple([h for h in contexts[0] if h.active]) - - # Find new head - head = contexts[1] - while head is not None and not head.active: - head = head.old_contexts[1] - - # Process chain - ctx = head - while ctx is not None: - parent = ctx.old_contexts[1] - - while parent is not None: - if parent.active: - break - ctx.old_contexts = parent.old_contexts - parent = parent.old_contexts[1] - - ctx = parent - - return (stack_contexts, head) - - -def wrap(fn): - """Returns a callable object that will restore the current `StackContext` - when executed. - - Use this whenever saving a callback to be executed later in a - different execution context (either in a different thread or - asynchronously in the same thread). - """ - # Check if function is already wrapped - if fn is None or hasattr(fn, '_wrapped'): - return fn - - # Capture current stack head - # TODO: Any other better way to store contexts and update them in wrapped function? - cap_contexts = [_state.contexts] - - if not cap_contexts[0][0] and not cap_contexts[0][1]: - # Fast path when there are no active contexts. - def null_wrapper(*args, **kwargs): - try: - current_state = _state.contexts - _state.contexts = cap_contexts[0] - return fn(*args, **kwargs) - finally: - _state.contexts = current_state - null_wrapper._wrapped = True - return null_wrapper - - def wrapped(*args, **kwargs): - ret = None - try: - # Capture old state - current_state = _state.contexts - - # Remove deactivated items - cap_contexts[0] = contexts = _remove_deactivated(cap_contexts[0]) - - # Force new state - _state.contexts = contexts - - # Current exception - exc = (None, None, None) - top = None - - # Apply stack contexts - last_ctx = 0 - stack = contexts[0] - - # Apply state - for n in stack: - try: - n.enter() - last_ctx += 1 - except: - # Exception happened. Record exception info and store top-most handler - exc = sys.exc_info() - top = n.old_contexts[1] - - # Execute callback if no exception happened while restoring state - if top is None: - try: - ret = fn(*args, **kwargs) - except: - exc = sys.exc_info() - top = contexts[1] - - # If there was exception, try to handle it by going through the exception chain - if top is not None: - exc = _handle_exception(top, exc) - else: - # Otherwise take shorter path and run stack contexts in reverse order - while last_ctx > 0: - last_ctx -= 1 - c = stack[last_ctx] - - try: - c.exit(*exc) - except: - exc = sys.exc_info() - top = c.old_contexts[1] - break - else: - top = None - - # If if exception happened while unrolling, take longer exception handler path - if top is not None: - exc = _handle_exception(top, exc) - - # If exception was not handled, raise it - if exc != (None, None, None): - raise_exc_info(exc) - finally: - _state.contexts = current_state - return ret - - wrapped._wrapped = True - return wrapped - - -def _handle_exception(tail, exc): - while tail is not None: - try: - if tail.exit(*exc): - exc = (None, None, None) - except: - exc = sys.exc_info() - - tail = tail.old_contexts[1] - - return exc - - -def run_with_stack_context(context, func): - """Run a coroutine ``func`` in the given `StackContext`. - - It is not safe to have a ``yield`` statement within a ``with StackContext`` - block, so it is difficult to use stack context with `.gen.coroutine`. - This helper function runs the function in the correct context while - keeping the ``yield`` and ``with`` statements syntactically separate. - - Example:: - - @gen.coroutine - def incorrect(): - with StackContext(ctx): - # ERROR: this will raise StackContextInconsistentError - yield other_coroutine() - - @gen.coroutine - def correct(): - yield run_with_stack_context(StackContext(ctx), other_coroutine) - - .. versionadded:: 3.1 - """ - with context: - return func() diff --git a/rosbridge_server/src/tornado/tcpclient.py b/rosbridge_server/src/tornado/tcpclient.py deleted file mode 100644 index d49eb5cd6..000000000 --- a/rosbridge_server/src/tornado/tcpclient.py +++ /dev/null @@ -1,179 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2014 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A non-blocking TCP connection factory. -""" -from __future__ import absolute_import, division, print_function, with_statement - -import functools -import socket - -from tornado.concurrent import Future -from tornado.ioloop import IOLoop -from tornado.iostream import IOStream -from tornado import gen -from tornado.netutil import Resolver - -_INITIAL_CONNECT_TIMEOUT = 0.3 - - -class _Connector(object): - """A stateless implementation of the "Happy Eyeballs" algorithm. - - "Happy Eyeballs" is documented in RFC6555 as the recommended practice - for when both IPv4 and IPv6 addresses are available. - - In this implementation, we partition the addresses by family, and - make the first connection attempt to whichever address was - returned first by ``getaddrinfo``. If that connection fails or - times out, we begin a connection in parallel to the first address - of the other family. If there are additional failures we retry - with other addresses, keeping one connection attempt per family - in flight at a time. - - http://tools.ietf.org/html/rfc6555 - - """ - def __init__(self, addrinfo, io_loop, connect): - self.io_loop = io_loop - self.connect = connect - - self.future = Future() - self.timeout = None - self.last_error = None - self.remaining = len(addrinfo) - self.primary_addrs, self.secondary_addrs = self.split(addrinfo) - - @staticmethod - def split(addrinfo): - """Partition the ``addrinfo`` list by address family. - - Returns two lists. The first list contains the first entry from - ``addrinfo`` and all others with the same family, and the - second list contains all other addresses (normally one list will - be AF_INET and the other AF_INET6, although non-standard resolvers - may return additional families). - """ - primary = [] - secondary = [] - primary_af = addrinfo[0][0] - for af, addr in addrinfo: - if af == primary_af: - primary.append((af, addr)) - else: - secondary.append((af, addr)) - return primary, secondary - - def start(self, timeout=_INITIAL_CONNECT_TIMEOUT): - self.try_connect(iter(self.primary_addrs)) - self.set_timout(timeout) - return self.future - - def try_connect(self, addrs): - try: - af, addr = next(addrs) - except StopIteration: - # We've reached the end of our queue, but the other queue - # might still be working. Send a final error on the future - # only when both queues are finished. - if self.remaining == 0 and not self.future.done(): - self.future.set_exception(self.last_error or - IOError("connection failed")) - return - future = self.connect(af, addr) - future.add_done_callback(functools.partial(self.on_connect_done, - addrs, af, addr)) - - def on_connect_done(self, addrs, af, addr, future): - self.remaining -= 1 - try: - stream = future.result() - except Exception as e: - if self.future.done(): - return - # Error: try again (but remember what happened so we have an - # error to raise in the end) - self.last_error = e - self.try_connect(addrs) - if self.timeout is not None: - # If the first attempt failed, don't wait for the - # timeout to try an address from the secondary queue. - self.on_timeout() - return - self.clear_timeout() - if self.future.done(): - # This is a late arrival; just drop it. - stream.close() - else: - self.future.set_result((af, addr, stream)) - - def set_timout(self, timeout): - self.timeout = self.io_loop.add_timeout(self.io_loop.time() + timeout, - self.on_timeout) - - def on_timeout(self): - self.timeout = None - self.try_connect(iter(self.secondary_addrs)) - - def clear_timeout(self): - if self.timeout is not None: - self.io_loop.remove_timeout(self.timeout) - - -class TCPClient(object): - """A non-blocking TCP connection factory. - """ - def __init__(self, resolver=None, io_loop=None): - self.io_loop = io_loop or IOLoop.current() - if resolver is not None: - self.resolver = resolver - self._own_resolver = False - else: - self.resolver = Resolver(io_loop=io_loop) - self._own_resolver = True - - def close(self): - if self._own_resolver: - self.resolver.close() - - @gen.coroutine - def connect(self, host, port, af=socket.AF_UNSPEC, ssl_options=None, - max_buffer_size=None): - """Connect to the given host and port. - - Asynchronously returns an `.IOStream` (or `.SSLIOStream` if - ``ssl_options`` is not None). - """ - addrinfo = yield self.resolver.resolve(host, port, af) - connector = _Connector( - addrinfo, self.io_loop, - functools.partial(self._create_stream, max_buffer_size)) - af, addr, stream = yield connector.start() - # TODO: For better performance we could cache the (af, addr) - # information here and re-use it on sbusequent connections to - # the same host. (http://tools.ietf.org/html/rfc6555#section-4.2) - if ssl_options is not None: - stream = yield stream.start_tls(False, ssl_options=ssl_options, - server_hostname=host) - raise gen.Return(stream) - - def _create_stream(self, max_buffer_size, af, addr): - # Always connect in plaintext; we'll convert to ssl if necessary - # after one connection has completed. - stream = IOStream(socket.socket(af), - io_loop=self.io_loop, - max_buffer_size=max_buffer_size) - return stream.connect(addr) diff --git a/rosbridge_server/src/tornado/tcpserver.py b/rosbridge_server/src/tornado/tcpserver.py deleted file mode 100644 index 427acec57..000000000 --- a/rosbridge_server/src/tornado/tcpserver.py +++ /dev/null @@ -1,257 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2011 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A non-blocking, single-threaded TCP server.""" -from __future__ import absolute_import, division, print_function, with_statement - -import errno -import os -import socket - -from tornado.log import app_log -from tornado.ioloop import IOLoop -from tornado.iostream import IOStream, SSLIOStream -from tornado.netutil import bind_sockets, add_accept_handler, ssl_wrap_socket -from tornado import process -from tornado.util import errno_from_exception - -try: - import ssl -except ImportError: - # ssl is not available on Google App Engine. - ssl = None - - -class TCPServer(object): - r"""A non-blocking, single-threaded TCP server. - - To use `TCPServer`, define a subclass which overrides the `handle_stream` - method. - - To make this server serve SSL traffic, send the ssl_options dictionary - argument with the arguments required for the `ssl.wrap_socket` method, - including "certfile" and "keyfile":: - - TCPServer(ssl_options={ - "certfile": os.path.join(data_dir, "mydomain.crt"), - "keyfile": os.path.join(data_dir, "mydomain.key"), - }) - - `TCPServer` initialization follows one of three patterns: - - 1. `listen`: simple single-process:: - - server = TCPServer() - server.listen(8888) - IOLoop.instance().start() - - 2. `bind`/`start`: simple multi-process:: - - server = TCPServer() - server.bind(8888) - server.start(0) # Forks multiple sub-processes - IOLoop.instance().start() - - When using this interface, an `.IOLoop` must *not* be passed - to the `TCPServer` constructor. `start` will always start - the server on the default singleton `.IOLoop`. - - 3. `add_sockets`: advanced multi-process:: - - sockets = bind_sockets(8888) - tornado.process.fork_processes(0) - server = TCPServer() - server.add_sockets(sockets) - IOLoop.instance().start() - - The `add_sockets` interface is more complicated, but it can be - used with `tornado.process.fork_processes` to give you more - flexibility in when the fork happens. `add_sockets` can - also be used in single-process servers if you want to create - your listening sockets in some way other than - `~tornado.netutil.bind_sockets`. - - .. versionadded:: 3.1 - The ``max_buffer_size`` argument. - """ - def __init__(self, io_loop=None, ssl_options=None, max_buffer_size=None, - read_chunk_size=None): - self.io_loop = io_loop - self.ssl_options = ssl_options - self._sockets = {} # fd -> socket object - self._pending_sockets = [] - self._started = False - self.max_buffer_size = max_buffer_size - self.read_chunk_size = None - - # Verify the SSL options. Otherwise we don't get errors until clients - # connect. This doesn't verify that the keys are legitimate, but - # the SSL module doesn't do that until there is a connected socket - # which seems like too much work - if self.ssl_options is not None and isinstance(self.ssl_options, dict): - # Only certfile is required: it can contain both keys - if 'certfile' not in self.ssl_options: - raise KeyError('missing key "certfile" in ssl_options') - - if not os.path.exists(self.ssl_options['certfile']): - raise ValueError('certfile "%s" does not exist' % - self.ssl_options['certfile']) - if ('keyfile' in self.ssl_options and - not os.path.exists(self.ssl_options['keyfile'])): - raise ValueError('keyfile "%s" does not exist' % - self.ssl_options['keyfile']) - - def listen(self, port, address=""): - """Starts accepting connections on the given port. - - This method may be called more than once to listen on multiple ports. - `listen` takes effect immediately; it is not necessary to call - `TCPServer.start` afterwards. It is, however, necessary to start - the `.IOLoop`. - """ - sockets = bind_sockets(port, address=address) - self.add_sockets(sockets) - - def add_sockets(self, sockets): - """Makes this server start accepting connections on the given sockets. - - The ``sockets`` parameter is a list of socket objects such as - those returned by `~tornado.netutil.bind_sockets`. - `add_sockets` is typically used in combination with that - method and `tornado.process.fork_processes` to provide greater - control over the initialization of a multi-process server. - """ - if self.io_loop is None: - self.io_loop = IOLoop.current() - - for sock in sockets: - self._sockets[sock.fileno()] = sock - add_accept_handler(sock, self._handle_connection, - io_loop=self.io_loop) - - def add_socket(self, socket): - """Singular version of `add_sockets`. Takes a single socket object.""" - self.add_sockets([socket]) - - def bind(self, port, address=None, family=socket.AF_UNSPEC, backlog=128): - """Binds this server to the given port on the given address. - - To start the server, call `start`. If you want to run this server - in a single process, you can call `listen` as a shortcut to the - sequence of `bind` and `start` calls. - - Address may be either an IP address or hostname. If it's a hostname, - the server will listen on all IP addresses associated with the - name. Address may be an empty string or None to listen on all - available interfaces. Family may be set to either `socket.AF_INET` - or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise - both will be used if available. - - The ``backlog`` argument has the same meaning as for - `socket.listen `. - - This method may be called multiple times prior to `start` to listen - on multiple ports or interfaces. - """ - sockets = bind_sockets(port, address=address, family=family, - backlog=backlog) - if self._started: - self.add_sockets(sockets) - else: - self._pending_sockets.extend(sockets) - - def start(self, num_processes=1): - """Starts this server in the `.IOLoop`. - - By default, we run the server in this process and do not fork any - additional child process. - - If num_processes is ``None`` or <= 0, we detect the number of cores - available on this machine and fork that number of child - processes. If num_processes is given and > 1, we fork that - specific number of sub-processes. - - Since we use processes and not threads, there is no shared memory - between any server code. - - Note that multiple processes are not compatible with the autoreload - module (or the ``autoreload=True`` option to `tornado.web.Application` - which defaults to True when ``debug=True``). - When using multiple processes, no IOLoops can be created or - referenced until after the call to ``TCPServer.start(n)``. - """ - assert not self._started - self._started = True - if num_processes != 1: - process.fork_processes(num_processes) - sockets = self._pending_sockets - self._pending_sockets = [] - self.add_sockets(sockets) - - def stop(self): - """Stops listening for new connections. - - Requests currently in progress may still continue after the - server is stopped. - """ - for fd, sock in self._sockets.items(): - self.io_loop.remove_handler(fd) - sock.close() - - def handle_stream(self, stream, address): - """Override to handle a new `.IOStream` from an incoming connection.""" - raise NotImplementedError() - - def _handle_connection(self, connection, address): - if self.ssl_options is not None: - assert ssl, "Python 2.6+ and OpenSSL required for SSL" - try: - connection = ssl_wrap_socket(connection, - self.ssl_options, - server_side=True, - do_handshake_on_connect=False) - except ssl.SSLError as err: - if err.args[0] == ssl.SSL_ERROR_EOF: - return connection.close() - else: - raise - except socket.error as err: - # If the connection is closed immediately after it is created - # (as in a port scan), we can get one of several errors. - # wrap_socket makes an internal call to getpeername, - # which may return either EINVAL (Mac OS X) or ENOTCONN - # (Linux). If it returns ENOTCONN, this error is - # silently swallowed by the ssl module, so we need to - # catch another error later on (AttributeError in - # SSLIOStream._do_ssl_handshake). - # To test this behavior, try nmap with the -sT flag. - # /~https://github.com/tornadoweb/tornado/pull/750 - if errno_from_exception(err) in (errno.ECONNABORTED, errno.EINVAL): - return connection.close() - else: - raise - try: - if self.ssl_options is not None: - stream = SSLIOStream(connection, io_loop=self.io_loop, - max_buffer_size=self.max_buffer_size, - read_chunk_size=self.read_chunk_size) - else: - stream = IOStream(connection, io_loop=self.io_loop, - max_buffer_size=self.max_buffer_size, - read_chunk_size=self.read_chunk_size) - self.handle_stream(stream, address) - except Exception: - app_log.error("Error in connection callback", exc_info=True) diff --git a/rosbridge_server/src/tornado/template.py b/rosbridge_server/src/tornado/template.py deleted file mode 100644 index 4dcec5d5f..000000000 --- a/rosbridge_server/src/tornado/template.py +++ /dev/null @@ -1,865 +0,0 @@ -#!/usr/bin/env python -# -# Copyright 2009 Facebook -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -"""A simple template system that compiles templates to Python code. - -Basic usage looks like:: - - t = template.Template("{{ myvalue }}") - print t.generate(myvalue="XXX") - -`Loader` is a class that loads templates from a root directory and caches -the compiled templates:: - - loader = template.Loader("/home/btaylor") - print loader.load("test.html").generate(myvalue="XXX") - -We compile all templates to raw Python. Error-reporting is currently... uh, -interesting. Syntax for the templates:: - - ### base.html - - - {% block title %}Default title{% end %} - - -
    - {% for student in students %} - {% block student %} -
  • {{ escape(student.name) }}
  • - {% end %} - {% end %} -
- - - - ### bold.html - {% extends "base.html" %} - - {% block title %}A bolder title{% end %} - - {% block student %} -
  • {{ escape(student.name) }}
  • - {% end %} - -Unlike most other template systems, we do not put any restrictions on the -expressions you can include in your statements. ``if`` and ``for`` blocks get -translated exactly into Python, so you can do complex expressions like:: - - {% for student in [p for p in people if p.student and p.age > 23] %} -
  • {{ escape(student.name) }}
  • - {% end %} - -Translating directly to Python means you can apply functions to expressions -easily, like the ``escape()`` function in the examples above. You can pass -functions in to your template just like any other variable -(In a `.RequestHandler`, override `.RequestHandler.get_template_namespace`):: - - ### Python code - def add(x, y): - return x + y - template.execute(add=add) - - ### The template - {{ add(1, 2) }} - -We provide the functions `escape() <.xhtml_escape>`, `.url_escape()`, -`.json_encode()`, and `.squeeze()` to all templates by default. - -Typical applications do not create `Template` or `Loader` instances by -hand, but instead use the `~.RequestHandler.render` and -`~.RequestHandler.render_string` methods of -`tornado.web.RequestHandler`, which load templates automatically based -on the ``template_path`` `.Application` setting. - -Variable names beginning with ``_tt_`` are reserved by the template -system and should not be used by application code. - -Syntax Reference ----------------- - -Template expressions are surrounded by double curly braces: ``{{ ... }}``. -The contents may be any python expression, which will be escaped according -to the current autoescape setting and inserted into the output. Other -template directives use ``{% %}``. These tags may be escaped as ``{{!`` -and ``{%!`` if you need to include a literal ``{{`` or ``{%`` in the output. - -To comment out a section so that it is omitted from the output, surround it -with ``{# ... #}``. - -``{% apply *function* %}...{% end %}`` - Applies a function to the output of all template code between ``apply`` - and ``end``:: - - {% apply linkify %}{{name}} said: {{message}}{% end %} - - Note that as an implementation detail apply blocks are implemented - as nested functions and thus may interact strangely with variables - set via ``{% set %}``, or the use of ``{% break %}`` or ``{% continue %}`` - within loops. - -``{% autoescape *function* %}`` - Sets the autoescape mode for the current file. This does not affect - other files, even those referenced by ``{% include %}``. Note that - autoescaping can also be configured globally, at the `.Application` - or `Loader`.:: - - {% autoescape xhtml_escape %} - {% autoescape None %} - -``{% block *name* %}...{% end %}`` - Indicates a named, replaceable block for use with ``{% extends %}``. - Blocks in the parent template will be replaced with the contents of - the same-named block in a child template.:: - - - {% block title %}Default title{% end %} - - - {% extends "base.html" %} - {% block title %}My page title{% end %} - -``{% comment ... %}`` - A comment which will be removed from the template output. Note that - there is no ``{% end %}`` tag; the comment goes from the word ``comment`` - to the closing ``%}`` tag. - -``{% extends *filename* %}`` - Inherit from another template. Templates that use ``extends`` should - contain one or more ``block`` tags to replace content from the parent - template. Anything in the child template not contained in a ``block`` - tag will be ignored. For an example, see the ``{% block %}`` tag. - -``{% for *var* in *expr* %}...{% end %}`` - Same as the python ``for`` statement. ``{% break %}`` and - ``{% continue %}`` may be used inside the loop. - -``{% from *x* import *y* %}`` - Same as the python ``import`` statement. - -``{% if *condition* %}...{% elif *condition* %}...{% else %}...{% end %}`` - Conditional statement - outputs the first section whose condition is - true. (The ``elif`` and ``else`` sections are optional) - -``{% import *module* %}`` - Same as the python ``import`` statement. - -``{% include *filename* %}`` - Includes another template file. The included file can see all the local - variables as if it were copied directly to the point of the ``include`` - directive (the ``{% autoescape %}`` directive is an exception). - Alternately, ``{% module Template(filename, **kwargs) %}`` may be used - to include another template with an isolated namespace. - -``{% module *expr* %}`` - Renders a `~tornado.web.UIModule`. The output of the ``UIModule`` is - not escaped:: - - {% module Template("foo.html", arg=42) %} - - ``UIModules`` are a feature of the `tornado.web.RequestHandler` - class (and specifically its ``render`` method) and will not work - when the template system is used on its own in other contexts. - -``{% raw *expr* %}`` - Outputs the result of the given expression without autoescaping. - -``{% set *x* = *y* %}`` - Sets a local variable. - -``{% try %}...{% except %}...{% else %}...{% finally %}...{% end %}`` - Same as the python ``try`` statement. - -``{% while *condition* %}... {% end %}`` - Same as the python ``while`` statement. ``{% break %}`` and - ``{% continue %}`` may be used inside the loop. -""" - -from __future__ import absolute_import, division, print_function, with_statement - -import datetime -import linecache -import os.path -import posixpath -import re -import threading - -from tornado import escape -from tornado.log import app_log -from tornado.util import bytes_type, ObjectDict, exec_in, unicode_type - -try: - from cStringIO import StringIO # py2 -except ImportError: - from io import StringIO # py3 - -_DEFAULT_AUTOESCAPE = "xhtml_escape" -_UNSET = object() - - -class Template(object): - """A compiled template. - - We compile into Python from the given template_string. You can generate - the template from variables with generate(). - """ - # note that the constructor's signature is not extracted with - # autodoc because _UNSET looks like garbage. When changing - # this signature update website/sphinx/template.rst too. - def __init__(self, template_string, name="", loader=None, - compress_whitespace=None, autoescape=_UNSET): - self.name = name - if compress_whitespace is None: - compress_whitespace = name.endswith(".html") or \ - name.endswith(".js") - if autoescape is not _UNSET: - self.autoescape = autoescape - elif loader: - self.autoescape = loader.autoescape - else: - self.autoescape = _DEFAULT_AUTOESCAPE - self.namespace = loader.namespace if loader else {} - reader = _TemplateReader(name, escape.native_str(template_string)) - self.file = _File(self, _parse(reader, self)) - self.code = self._generate_python(loader, compress_whitespace) - self.loader = loader - try: - # Under python2.5, the fake filename used here must match - # the module name used in __name__ below. - # The dont_inherit flag prevents template.py's future imports - # from being applied to the generated code. - self.compiled = compile( - escape.to_unicode(self.code), - "%s.generated.py" % self.name.replace('.', '_'), - "exec", dont_inherit=True) - except Exception: - formatted_code = _format_code(self.code).rstrip() - app_log.error("%s code:\n%s", self.name, formatted_code) - raise - - def generate(self, **kwargs): - """Generate this template with the given arguments.""" - namespace = { - "escape": escape.xhtml_escape, - "xhtml_escape": escape.xhtml_escape, - "url_escape": escape.url_escape, - "json_encode": escape.json_encode, - "squeeze": escape.squeeze, - "linkify": escape.linkify, - "datetime": datetime, - "_tt_utf8": escape.utf8, # for internal use - "_tt_string_types": (unicode_type, bytes_type), - # __name__ and __loader__ allow the traceback mechanism to find - # the generated source code. - "__name__": self.name.replace('.', '_'), - "__loader__": ObjectDict(get_source=lambda name: self.code), - } - namespace.update(self.namespace) - namespace.update(kwargs) - exec_in(self.compiled, namespace) - execute = namespace["_tt_execute"] - # Clear the traceback module's cache of source data now that - # we've generated a new template (mainly for this module's - # unittests, where different tests reuse the same name). - linecache.clearcache() - return execute() - - def _generate_python(self, loader, compress_whitespace): - buffer = StringIO() - try: - # named_blocks maps from names to _NamedBlock objects - named_blocks = {} - ancestors = self._get_ancestors(loader) - ancestors.reverse() - for ancestor in ancestors: - ancestor.find_named_blocks(loader, named_blocks) - writer = _CodeWriter(buffer, named_blocks, loader, ancestors[0].template, - compress_whitespace) - ancestors[0].generate(writer) - return buffer.getvalue() - finally: - buffer.close() - - def _get_ancestors(self, loader): - ancestors = [self.file] - for chunk in self.file.body.chunks: - if isinstance(chunk, _ExtendsBlock): - if not loader: - raise ParseError("{% extends %} block found, but no " - "template loader") - template = loader.load(chunk.name, self.name) - ancestors.extend(template._get_ancestors(loader)) - return ancestors - - -class BaseLoader(object): - """Base class for template loaders. - - You must use a template loader to use template constructs like - ``{% extends %}`` and ``{% include %}``. The loader caches all - templates after they are loaded the first time. - """ - def __init__(self, autoescape=_DEFAULT_AUTOESCAPE, namespace=None): - """``autoescape`` must be either None or a string naming a function - in the template namespace, such as "xhtml_escape". - """ - self.autoescape = autoescape - self.namespace = namespace or {} - self.templates = {} - # self.lock protects self.templates. It's a reentrant lock - # because templates may load other templates via `include` or - # `extends`. Note that thanks to the GIL this code would be safe - # even without the lock, but could lead to wasted work as multiple - # threads tried to compile the same template simultaneously. - self.lock = threading.RLock() - - def reset(self): - """Resets the cache of compiled templates.""" - with self.lock: - self.templates = {} - - def resolve_path(self, name, parent_path=None): - """Converts a possibly-relative path to absolute (used internally).""" - raise NotImplementedError() - - def load(self, name, parent_path=None): - """Loads a template.""" - name = self.resolve_path(name, parent_path=parent_path) - with self.lock: - if name not in self.templates: - self.templates[name] = self._create_template(name) - return self.templates[name] - - def _create_template(self, name): - raise NotImplementedError() - - -class Loader(BaseLoader): - """A template loader that loads from a single root directory. - """ - def __init__(self, root_directory, **kwargs): - super(Loader, self).__init__(**kwargs) - self.root = os.path.abspath(root_directory) - - def resolve_path(self, name, parent_path=None): - if parent_path and not parent_path.startswith("<") and \ - not parent_path.startswith("/") and \ - not name.startswith("/"): - current_path = os.path.join(self.root, parent_path) - file_dir = os.path.dirname(os.path.abspath(current_path)) - relative_path = os.path.abspath(os.path.join(file_dir, name)) - if relative_path.startswith(self.root): - name = relative_path[len(self.root) + 1:] - return name - - def _create_template(self, name): - path = os.path.join(self.root, name) - with open(path, "rb") as f: - template = Template(f.read(), name=name, loader=self) - return template - - -class DictLoader(BaseLoader): - """A template loader that loads from a dictionary.""" - def __init__(self, dict, **kwargs): - super(DictLoader, self).__init__(**kwargs) - self.dict = dict - - def resolve_path(self, name, parent_path=None): - if parent_path and not parent_path.startswith("<") and \ - not parent_path.startswith("/") and \ - not name.startswith("/"): - file_dir = posixpath.dirname(parent_path) - name = posixpath.normpath(posixpath.join(file_dir, name)) - return name - - def _create_template(self, name): - return Template(self.dict[name], name=name, loader=self) - - -class _Node(object): - def each_child(self): - return () - - def generate(self, writer): - raise NotImplementedError() - - def find_named_blocks(self, loader, named_blocks): - for child in self.each_child(): - child.find_named_blocks(loader, named_blocks) - - -class _File(_Node): - def __init__(self, template, body): - self.template = template - self.body = body - self.line = 0 - - def generate(self, writer): - writer.write_line("def _tt_execute():", self.line) - with writer.indent(): - writer.write_line("_tt_buffer = []", self.line) - writer.write_line("_tt_append = _tt_buffer.append", self.line) - self.body.generate(writer) - writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line) - - def each_child(self): - return (self.body,) - - -class _ChunkList(_Node): - def __init__(self, chunks): - self.chunks = chunks - - def generate(self, writer): - for chunk in self.chunks: - chunk.generate(writer) - - def each_child(self): - return self.chunks - - -class _NamedBlock(_Node): - def __init__(self, name, body, template, line): - self.name = name - self.body = body - self.template = template - self.line = line - - def each_child(self): - return (self.body,) - - def generate(self, writer): - block = writer.named_blocks[self.name] - with writer.include(block.template, self.line): - block.body.generate(writer) - - def find_named_blocks(self, loader, named_blocks): - named_blocks[self.name] = self - _Node.find_named_blocks(self, loader, named_blocks) - - -class _ExtendsBlock(_Node): - def __init__(self, name): - self.name = name - - -class _IncludeBlock(_Node): - def __init__(self, name, reader, line): - self.name = name - self.template_name = reader.name - self.line = line - - def find_named_blocks(self, loader, named_blocks): - included = loader.load(self.name, self.template_name) - included.file.find_named_blocks(loader, named_blocks) - - def generate(self, writer): - included = writer.loader.load(self.name, self.template_name) - with writer.include(included, self.line): - included.file.body.generate(writer) - - -class _ApplyBlock(_Node): - def __init__(self, method, line, body=None): - self.method = method - self.line = line - self.body = body - - def each_child(self): - return (self.body,) - - def generate(self, writer): - method_name = "_tt_apply%d" % writer.apply_counter - writer.apply_counter += 1 - writer.write_line("def %s():" % method_name, self.line) - with writer.indent(): - writer.write_line("_tt_buffer = []", self.line) - writer.write_line("_tt_append = _tt_buffer.append", self.line) - self.body.generate(writer) - writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line) - writer.write_line("_tt_append(_tt_utf8(%s(%s())))" % ( - self.method, method_name), self.line) - - -class _ControlBlock(_Node): - def __init__(self, statement, line, body=None): - self.statement = statement - self.line = line - self.body = body - - def each_child(self): - return (self.body,) - - def generate(self, writer): - writer.write_line("%s:" % self.statement, self.line) - with writer.indent(): - self.body.generate(writer) - # Just in case the body was empty - writer.write_line("pass", self.line) - - -class _IntermediateControlBlock(_Node): - def __init__(self, statement, line): - self.statement = statement - self.line = line - - def generate(self, writer): - # In case the previous block was empty - writer.write_line("pass", self.line) - writer.write_line("%s:" % self.statement, self.line, writer.indent_size() - 1) - - -class _Statement(_Node): - def __init__(self, statement, line): - self.statement = statement - self.line = line - - def generate(self, writer): - writer.write_line(self.statement, self.line) - - -class _Expression(_Node): - def __init__(self, expression, line, raw=False): - self.expression = expression - self.line = line - self.raw = raw - - def generate(self, writer): - writer.write_line("_tt_tmp = %s" % self.expression, self.line) - writer.write_line("if isinstance(_tt_tmp, _tt_string_types):" - " _tt_tmp = _tt_utf8(_tt_tmp)", self.line) - writer.write_line("else: _tt_tmp = _tt_utf8(str(_tt_tmp))", self.line) - if not self.raw and writer.current_template.autoescape is not None: - # In python3 functions like xhtml_escape return unicode, - # so we have to convert to utf8 again. - writer.write_line("_tt_tmp = _tt_utf8(%s(_tt_tmp))" % - writer.current_template.autoescape, self.line) - writer.write_line("_tt_append(_tt_tmp)", self.line) - - -class _Module(_Expression): - def __init__(self, expression, line): - super(_Module, self).__init__("_tt_modules." + expression, line, - raw=True) - - -class _Text(_Node): - def __init__(self, value, line): - self.value = value - self.line = line - - def generate(self, writer): - value = self.value - - # Compress lots of white space to a single character. If the whitespace - # breaks a line, have it continue to break a line, but just with a - # single \n character - if writer.compress_whitespace and "
    " not in value:
    -            value = re.sub(r"([\t ]+)", " ", value)
    -            value = re.sub(r"(\s*\n\s*)", "\n", value)
    -
    -        if value:
    -            writer.write_line('_tt_append(%r)' % escape.utf8(value), self.line)
    -
    -
    -class ParseError(Exception):
    -    """Raised for template syntax errors."""
    -    pass
    -
    -
    -class _CodeWriter(object):
    -    def __init__(self, file, named_blocks, loader, current_template,
    -                 compress_whitespace):
    -        self.file = file
    -        self.named_blocks = named_blocks
    -        self.loader = loader
    -        self.current_template = current_template
    -        self.compress_whitespace = compress_whitespace
    -        self.apply_counter = 0
    -        self.include_stack = []
    -        self._indent = 0
    -
    -    def indent_size(self):
    -        return self._indent
    -
    -    def indent(self):
    -        class Indenter(object):
    -            def __enter__(_):
    -                self._indent += 1
    -                return self
    -
    -            def __exit__(_, *args):
    -                assert self._indent > 0
    -                self._indent -= 1
    -
    -        return Indenter()
    -
    -    def include(self, template, line):
    -        self.include_stack.append((self.current_template, line))
    -        self.current_template = template
    -
    -        class IncludeTemplate(object):
    -            def __enter__(_):
    -                return self
    -
    -            def __exit__(_, *args):
    -                self.current_template = self.include_stack.pop()[0]
    -
    -        return IncludeTemplate()
    -
    -    def write_line(self, line, line_number, indent=None):
    -        if indent is None:
    -            indent = self._indent
    -        line_comment = '  # %s:%d' % (self.current_template.name, line_number)
    -        if self.include_stack:
    -            ancestors = ["%s:%d" % (tmpl.name, lineno)
    -                         for (tmpl, lineno) in self.include_stack]
    -            line_comment += ' (via %s)' % ', '.join(reversed(ancestors))
    -        print("    " * indent + line + line_comment, file=self.file)
    -
    -
    -class _TemplateReader(object):
    -    def __init__(self, name, text):
    -        self.name = name
    -        self.text = text
    -        self.line = 1
    -        self.pos = 0
    -
    -    def find(self, needle, start=0, end=None):
    -        assert start >= 0, start
    -        pos = self.pos
    -        start += pos
    -        if end is None:
    -            index = self.text.find(needle, start)
    -        else:
    -            end += pos
    -            assert end >= start
    -            index = self.text.find(needle, start, end)
    -        if index != -1:
    -            index -= pos
    -        return index
    -
    -    def consume(self, count=None):
    -        if count is None:
    -            count = len(self.text) - self.pos
    -        newpos = self.pos + count
    -        self.line += self.text.count("\n", self.pos, newpos)
    -        s = self.text[self.pos:newpos]
    -        self.pos = newpos
    -        return s
    -
    -    def remaining(self):
    -        return len(self.text) - self.pos
    -
    -    def __len__(self):
    -        return self.remaining()
    -
    -    def __getitem__(self, key):
    -        if type(key) is slice:
    -            size = len(self)
    -            start, stop, step = key.indices(size)
    -            if start is None:
    -                start = self.pos
    -            else:
    -                start += self.pos
    -            if stop is not None:
    -                stop += self.pos
    -            return self.text[slice(start, stop, step)]
    -        elif key < 0:
    -            return self.text[key]
    -        else:
    -            return self.text[self.pos + key]
    -
    -    def __str__(self):
    -        return self.text[self.pos:]
    -
    -
    -def _format_code(code):
    -    lines = code.splitlines()
    -    format = "%%%dd  %%s\n" % len(repr(len(lines) + 1))
    -    return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
    -
    -
    -def _parse(reader, template, in_block=None, in_loop=None):
    -    body = _ChunkList([])
    -    while True:
    -        # Find next template directive
    -        curly = 0
    -        while True:
    -            curly = reader.find("{", curly)
    -            if curly == -1 or curly + 1 == reader.remaining():
    -                # EOF
    -                if in_block:
    -                    raise ParseError("Missing {%% end %%} block for %s" %
    -                                     in_block)
    -                body.chunks.append(_Text(reader.consume(), reader.line))
    -                return body
    -            # If the first curly brace is not the start of a special token,
    -            # start searching from the character after it
    -            if reader[curly + 1] not in ("{", "%", "#"):
    -                curly += 1
    -                continue
    -            # When there are more than 2 curlies in a row, use the
    -            # innermost ones.  This is useful when generating languages
    -            # like latex where curlies are also meaningful
    -            if (curly + 2 < reader.remaining() and
    -                    reader[curly + 1] == '{' and reader[curly + 2] == '{'):
    -                curly += 1
    -                continue
    -            break
    -
    -        # Append any text before the special token
    -        if curly > 0:
    -            cons = reader.consume(curly)
    -            body.chunks.append(_Text(cons, reader.line))
    -
    -        start_brace = reader.consume(2)
    -        line = reader.line
    -
    -        # Template directives may be escaped as "{{!" or "{%!".
    -        # In this case output the braces and consume the "!".
    -        # This is especially useful in conjunction with jquery templates,
    -        # which also use double braces.
    -        if reader.remaining() and reader[0] == "!":
    -            reader.consume(1)
    -            body.chunks.append(_Text(start_brace, line))
    -            continue
    -
    -        # Comment
    -        if start_brace == "{#":
    -            end = reader.find("#}")
    -            if end == -1:
    -                raise ParseError("Missing end expression #} on line %d" % line)
    -            contents = reader.consume(end).strip()
    -            reader.consume(2)
    -            continue
    -
    -        # Expression
    -        if start_brace == "{{":
    -            end = reader.find("}}")
    -            if end == -1:
    -                raise ParseError("Missing end expression }} on line %d" % line)
    -            contents = reader.consume(end).strip()
    -            reader.consume(2)
    -            if not contents:
    -                raise ParseError("Empty expression on line %d" % line)
    -            body.chunks.append(_Expression(contents, line))
    -            continue
    -
    -        # Block
    -        assert start_brace == "{%", start_brace
    -        end = reader.find("%}")
    -        if end == -1:
    -            raise ParseError("Missing end block %%} on line %d" % line)
    -        contents = reader.consume(end).strip()
    -        reader.consume(2)
    -        if not contents:
    -            raise ParseError("Empty block tag ({%% %%}) on line %d" % line)
    -
    -        operator, space, suffix = contents.partition(" ")
    -        suffix = suffix.strip()
    -
    -        # Intermediate ("else", "elif", etc) blocks
    -        intermediate_blocks = {
    -            "else": set(["if", "for", "while", "try"]),
    -            "elif": set(["if"]),
    -            "except": set(["try"]),
    -            "finally": set(["try"]),
    -        }
    -        allowed_parents = intermediate_blocks.get(operator)
    -        if allowed_parents is not None:
    -            if not in_block:
    -                raise ParseError("%s outside %s block" %
    -                                 (operator, allowed_parents))
    -            if in_block not in allowed_parents:
    -                raise ParseError("%s block cannot be attached to %s block" % (operator, in_block))
    -            body.chunks.append(_IntermediateControlBlock(contents, line))
    -            continue
    -
    -        # End tag
    -        elif operator == "end":
    -            if not in_block:
    -                raise ParseError("Extra {%% end %%} block on line %d" % line)
    -            return body
    -
    -        elif operator in ("extends", "include", "set", "import", "from",
    -                          "comment", "autoescape", "raw", "module"):
    -            if operator == "comment":
    -                continue
    -            if operator == "extends":
    -                suffix = suffix.strip('"').strip("'")
    -                if not suffix:
    -                    raise ParseError("extends missing file path on line %d" % line)
    -                block = _ExtendsBlock(suffix)
    -            elif operator in ("import", "from"):
    -                if not suffix:
    -                    raise ParseError("import missing statement on line %d" % line)
    -                block = _Statement(contents, line)
    -            elif operator == "include":
    -                suffix = suffix.strip('"').strip("'")
    -                if not suffix:
    -                    raise ParseError("include missing file path on line %d" % line)
    -                block = _IncludeBlock(suffix, reader, line)
    -            elif operator == "set":
    -                if not suffix:
    -                    raise ParseError("set missing statement on line %d" % line)
    -                block = _Statement(suffix, line)
    -            elif operator == "autoescape":
    -                fn = suffix.strip()
    -                if fn == "None":
    -                    fn = None
    -                template.autoescape = fn
    -                continue
    -            elif operator == "raw":
    -                block = _Expression(suffix, line, raw=True)
    -            elif operator == "module":
    -                block = _Module(suffix, line)
    -            body.chunks.append(block)
    -            continue
    -
    -        elif operator in ("apply", "block", "try", "if", "for", "while"):
    -            # parse inner body recursively
    -            if operator in ("for", "while"):
    -                block_body = _parse(reader, template, operator, operator)
    -            elif operator == "apply":
    -                # apply creates a nested function so syntactically it's not
    -                # in the loop.
    -                block_body = _parse(reader, template, operator, None)
    -            else:
    -                block_body = _parse(reader, template, operator, in_loop)
    -
    -            if operator == "apply":
    -                if not suffix:
    -                    raise ParseError("apply missing method name on line %d" % line)
    -                block = _ApplyBlock(suffix, line, block_body)
    -            elif operator == "block":
    -                if not suffix:
    -                    raise ParseError("block missing name on line %d" % line)
    -                block = _NamedBlock(suffix, block_body, template, line)
    -            else:
    -                block = _ControlBlock(contents, line, block_body)
    -            body.chunks.append(block)
    -            continue
    -
    -        elif operator in ("break", "continue"):
    -            if not in_loop:
    -                raise ParseError("%s outside %s block" % (operator, set(["for", "while"])))
    -            body.chunks.append(_Statement(contents, line))
    -            continue
    -
    -        else:
    -            raise ParseError("unknown operator: %r" % operator)
    diff --git a/rosbridge_server/src/tornado/test/README b/rosbridge_server/src/tornado/test/README
    deleted file mode 100644
    index 2d6195d80..000000000
    --- a/rosbridge_server/src/tornado/test/README
    +++ /dev/null
    @@ -1,4 +0,0 @@
    -Test coverage is almost non-existent, but it's a start.  Be sure to
    -set PYTHONPATH apprioriately (generally to the root directory of your
    -tornado checkout) when running tests to make sure you're getting the
    -version of the tornado package that you expect.
    \ No newline at end of file
    diff --git a/rosbridge_server/src/tornado/test/__init__.py b/rosbridge_server/src/tornado/test/__init__.py
    deleted file mode 100644
    index e69de29bb..000000000
    diff --git a/rosbridge_server/src/tornado/test/__main__.py b/rosbridge_server/src/tornado/test/__main__.py
    deleted file mode 100644
    index 5953443b1..000000000
    --- a/rosbridge_server/src/tornado/test/__main__.py
    +++ /dev/null
    @@ -1,14 +0,0 @@
    -"""Shim to allow python -m tornado.test.
    -
    -This only works in python 2.7+.
    -"""
    -from __future__ import absolute_import, division, print_function, with_statement
    -
    -from tornado.test.runtests import all, main
    -
    -# tornado.testing.main autodiscovery relies on 'all' being present in
    -# the main module, so import it here even though it is not used directly.
    -# The following line prevents a pyflakes warning.
    -all = all
    -
    -main()
    diff --git a/rosbridge_server/src/tornado/test/auth_test.py b/rosbridge_server/src/tornado/test/auth_test.py
    deleted file mode 100644
    index 254e1ae13..000000000
    --- a/rosbridge_server/src/tornado/test/auth_test.py
    +++ /dev/null
    @@ -1,451 +0,0 @@
    -# These tests do not currently do much to verify the correct implementation
    -# of the openid/oauth protocols, they just exercise the major code paths
    -# and ensure that it doesn't blow up (e.g. with unicode/bytes issues in
    -# python 3)
    -
    -
    -from __future__ import absolute_import, division, print_function, with_statement
    -from tornado.auth import OpenIdMixin, OAuthMixin, OAuth2Mixin, TwitterMixin, GoogleMixin, AuthError
    -from tornado.concurrent import Future
    -from tornado.escape import json_decode
    -from tornado import gen
    -from tornado.log import gen_log
    -from tornado.testing import AsyncHTTPTestCase, ExpectLog
    -from tornado.util import u
    -from tornado.web import RequestHandler, Application, asynchronous, HTTPError
    -
    -
    -class OpenIdClientLoginHandler(RequestHandler, OpenIdMixin):
    -    def initialize(self, test):
    -        self._OPENID_ENDPOINT = test.get_url('/openid/server/authenticate')
    -
    -    @asynchronous
    -    def get(self):
    -        if self.get_argument('openid.mode', None):
    -            self.get_authenticated_user(
    -                self.on_user, http_client=self.settings['http_client'])
    -            return
    -        res = self.authenticate_redirect()
    -        assert isinstance(res, Future)
    -        assert res.done()
    -
    -    def on_user(self, user):
    -        if user is None:
    -            raise Exception("user is None")
    -        self.finish(user)
    -
    -
    -class OpenIdServerAuthenticateHandler(RequestHandler):
    -    def post(self):
    -        if self.get_argument('openid.mode') != 'check_authentication':
    -            raise Exception("incorrect openid.mode %r")
    -        self.write('is_valid:true')
    -
    -
    -class OAuth1ClientLoginHandler(RequestHandler, OAuthMixin):
    -    def initialize(self, test, version):
    -        self._OAUTH_VERSION = version
    -        self._OAUTH_REQUEST_TOKEN_URL = test.get_url('/oauth1/server/request_token')
    -        self._OAUTH_AUTHORIZE_URL = test.get_url('/oauth1/server/authorize')
    -        self._OAUTH_ACCESS_TOKEN_URL = test.get_url('/oauth1/server/access_token')
    -
    -    def _oauth_consumer_token(self):
    -        return dict(key='asdf', secret='qwer')
    -
    -    @asynchronous
    -    def get(self):
    -        if self.get_argument('oauth_token', None):
    -            self.get_authenticated_user(
    -                self.on_user, http_client=self.settings['http_client'])
    -            return
    -        res = self.authorize_redirect(http_client=self.settings['http_client'])
    -        assert isinstance(res, Future)
    -
    -    def on_user(self, user):
    -        if user is None:
    -            raise Exception("user is None")
    -        self.finish(user)
    -
    -    def _oauth_get_user(self, access_token, callback):
    -        if self.get_argument('fail_in_get_user', None):
    -            raise Exception("failing in get_user")
    -        if access_token != dict(key='uiop', secret='5678'):
    -            raise Exception("incorrect access token %r" % access_token)
    -        callback(dict(email='foo@example.com'))
    -
    -
    -class OAuth1ClientLoginCoroutineHandler(OAuth1ClientLoginHandler):
    -    """Replaces OAuth1ClientLoginCoroutineHandler's get() with a coroutine."""
    -    @gen.coroutine
    -    def get(self):
    -        if self.get_argument('oauth_token', None):
    -            # Ensure that any exceptions are set on the returned Future,
    -            # not simply thrown into the surrounding StackContext.
    -            try:
    -                yield self.get_authenticated_user()
    -            except Exception as e:
    -                self.set_status(503)
    -                self.write("got exception: %s" % e)
    -        else:
    -            yield self.authorize_redirect()
    -
    -
    -class OAuth1ClientRequestParametersHandler(RequestHandler, OAuthMixin):
    -    def initialize(self, version):
    -        self._OAUTH_VERSION = version
    -
    -    def _oauth_consumer_token(self):
    -        return dict(key='asdf', secret='qwer')
    -
    -    def get(self):
    -        params = self._oauth_request_parameters(
    -            'http://www.example.com/api/asdf',
    -            dict(key='uiop', secret='5678'),
    -            parameters=dict(foo='bar'))
    -        self.write(params)
    -
    -
    -class OAuth1ServerRequestTokenHandler(RequestHandler):
    -    def get(self):
    -        self.write('oauth_token=zxcv&oauth_token_secret=1234')
    -
    -
    -class OAuth1ServerAccessTokenHandler(RequestHandler):
    -    def get(self):
    -        self.write('oauth_token=uiop&oauth_token_secret=5678')
    -
    -
    -class OAuth2ClientLoginHandler(RequestHandler, OAuth2Mixin):
    -    def initialize(self, test):
    -        self._OAUTH_AUTHORIZE_URL = test.get_url('/oauth2/server/authorize')
    -
    -    def get(self):
    -        res = self.authorize_redirect()
    -        assert isinstance(res, Future)
    -        assert res.done()
    -
    -
    -class TwitterClientHandler(RequestHandler, TwitterMixin):
    -    def initialize(self, test):
    -        self._OAUTH_REQUEST_TOKEN_URL = test.get_url('/oauth1/server/request_token')
    -        self._OAUTH_ACCESS_TOKEN_URL = test.get_url('/twitter/server/access_token')
    -        self._OAUTH_AUTHORIZE_URL = test.get_url('/oauth1/server/authorize')
    -        self._TWITTER_BASE_URL = test.get_url('/twitter/api')
    -
    -    def get_auth_http_client(self):
    -        return self.settings['http_client']
    -
    -
    -class TwitterClientLoginHandler(TwitterClientHandler):
    -    @asynchronous
    -    def get(self):
    -        if self.get_argument("oauth_token", None):
    -            self.get_authenticated_user(self.on_user)
    -            return
    -        self.authorize_redirect()
    -
    -    def on_user(self, user):
    -        if user is None:
    -            raise Exception("user is None")
    -        self.finish(user)
    -
    -
    -class TwitterClientLoginGenEngineHandler(TwitterClientHandler):
    -    @asynchronous
    -    @gen.engine
    -    def get(self):
    -        if self.get_argument("oauth_token", None):
    -            user = yield self.get_authenticated_user()
    -            self.finish(user)
    -        else:
    -            # Old style: with @gen.engine we can ignore the Future from
    -            # authorize_redirect.
    -            self.authorize_redirect()
    -
    -
    -class TwitterClientLoginGenCoroutineHandler(TwitterClientHandler):
    -    @gen.coroutine
    -    def get(self):
    -        if self.get_argument("oauth_token", None):
    -            user = yield self.get_authenticated_user()
    -            self.finish(user)
    -        else:
    -            # New style: with @gen.coroutine the result must be yielded
    -            # or else the request will be auto-finished too soon.
    -            yield self.authorize_redirect()
    -
    -
    -class TwitterClientShowUserHandler(TwitterClientHandler):
    -    @asynchronous
    -    @gen.engine
    -    def get(self):
    -        # TODO: would be nice to go through the login flow instead of
    -        # cheating with a hard-coded access token.
    -        response = yield gen.Task(self.twitter_request,
    -                                  '/users/show/%s' % self.get_argument('name'),
    -                                  access_token=dict(key='hjkl', secret='vbnm'))
    -        if response is None:
    -            self.set_status(500)
    -            self.finish('error from twitter request')
    -        else:
    -            self.finish(response)
    -
    -
    -class TwitterClientShowUserFutureHandler(TwitterClientHandler):
    -    @asynchronous
    -    @gen.engine
    -    def get(self):
    -        try:
    -            response = yield self.twitter_request(
    -                '/users/show/%s' % self.get_argument('name'),
    -                access_token=dict(key='hjkl', secret='vbnm'))
    -        except AuthError as e:
    -            self.set_status(500)
    -            self.finish(str(e))
    -            return
    -        assert response is not None
    -        self.finish(response)
    -
    -
    -class TwitterServerAccessTokenHandler(RequestHandler):
    -    def get(self):
    -        self.write('oauth_token=hjkl&oauth_token_secret=vbnm&screen_name=foo')
    -
    -
    -class TwitterServerShowUserHandler(RequestHandler):
    -    def get(self, screen_name):
    -        if screen_name == 'error':
    -            raise HTTPError(500)
    -        assert 'oauth_nonce' in self.request.arguments
    -        assert 'oauth_timestamp' in self.request.arguments
    -        assert 'oauth_signature' in self.request.arguments
    -        assert self.get_argument('oauth_consumer_key') == 'test_twitter_consumer_key'
    -        assert self.get_argument('oauth_signature_method') == 'HMAC-SHA1'
    -        assert self.get_argument('oauth_version') == '1.0'
    -        assert self.get_argument('oauth_token') == 'hjkl'
    -        self.write(dict(screen_name=screen_name, name=screen_name.capitalize()))
    -
    -
    -class TwitterServerVerifyCredentialsHandler(RequestHandler):
    -    def get(self):
    -        assert 'oauth_nonce' in self.request.arguments
    -        assert 'oauth_timestamp' in self.request.arguments
    -        assert 'oauth_signature' in self.request.arguments
    -        assert self.get_argument('oauth_consumer_key') == 'test_twitter_consumer_key'
    -        assert self.get_argument('oauth_signature_method') == 'HMAC-SHA1'
    -        assert self.get_argument('oauth_version') == '1.0'
    -        assert self.get_argument('oauth_token') == 'hjkl'
    -        self.write(dict(screen_name='foo', name='Foo'))
    -
    -
    -class GoogleOpenIdClientLoginHandler(RequestHandler, GoogleMixin):
    -    def initialize(self, test):
    -        self._OPENID_ENDPOINT = test.get_url('/openid/server/authenticate')
    -
    -    @asynchronous
    -    def get(self):
    -        if self.get_argument("openid.mode", None):
    -            self.get_authenticated_user(self.on_user)
    -            return
    -        res = self.authenticate_redirect()
    -        assert isinstance(res, Future)
    -        assert res.done()
    -
    -    def on_user(self, user):
    -        if user is None:
    -            raise Exception("user is None")
    -        self.finish(user)
    -
    -    def get_auth_http_client(self):
    -        return self.settings['http_client']
    -
    -
    -class AuthTest(AsyncHTTPTestCase):
    -    def get_app(self):
    -        return Application(
    -            [
    -                # test endpoints
    -                ('/openid/client/login', OpenIdClientLoginHandler, dict(test=self)),
    -                ('/oauth10/client/login', OAuth1ClientLoginHandler,
    -                 dict(test=self, version='1.0')),
    -                ('/oauth10/client/request_params',
    -                 OAuth1ClientRequestParametersHandler,
    -                 dict(version='1.0')),
    -                ('/oauth10a/client/login', OAuth1ClientLoginHandler,
    -                 dict(test=self, version='1.0a')),
    -                ('/oauth10a/client/login_coroutine',
    -                 OAuth1ClientLoginCoroutineHandler,
    -                 dict(test=self, version='1.0a')),
    -                ('/oauth10a/client/request_params',
    -                 OAuth1ClientRequestParametersHandler,
    -                 dict(version='1.0a')),
    -                ('/oauth2/client/login', OAuth2ClientLoginHandler, dict(test=self)),
    -
    -                ('/twitter/client/login', TwitterClientLoginHandler, dict(test=self)),
    -                ('/twitter/client/login_gen_engine', TwitterClientLoginGenEngineHandler, dict(test=self)),
    -                ('/twitter/client/login_gen_coroutine', TwitterClientLoginGenCoroutineHandler, dict(test=self)),
    -                ('/twitter/client/show_user', TwitterClientShowUserHandler, dict(test=self)),
    -                ('/twitter/client/show_user_future', TwitterClientShowUserFutureHandler, dict(test=self)),
    -                ('/google/client/openid_login', GoogleOpenIdClientLoginHandler, dict(test=self)),
    -
    -                # simulated servers
    -                ('/openid/server/authenticate', OpenIdServerAuthenticateHandler),
    -                ('/oauth1/server/request_token', OAuth1ServerRequestTokenHandler),
    -                ('/oauth1/server/access_token', OAuth1ServerAccessTokenHandler),
    -
    -                ('/twitter/server/access_token', TwitterServerAccessTokenHandler),
    -                (r'/twitter/api/users/show/(.*)\.json', TwitterServerShowUserHandler),
    -                (r'/twitter/api/account/verify_credentials\.json', TwitterServerVerifyCredentialsHandler),
    -            ],
    -            http_client=self.http_client,
    -            twitter_consumer_key='test_twitter_consumer_key',
    -            twitter_consumer_secret='test_twitter_consumer_secret')
    -
    -    def test_openid_redirect(self):
    -        response = self.fetch('/openid/client/login', follow_redirects=False)
    -        self.assertEqual(response.code, 302)
    -        self.assertTrue(
    -            '/openid/server/authenticate?' in response.headers['Location'])
    -
    -    def test_openid_get_user(self):
    -        response = self.fetch('/openid/client/login?openid.mode=blah&openid.ns.ax=http://openid.net/srv/ax/1.0&openid.ax.type.email=http://axschema.org/contact/email&openid.ax.value.email=foo@example.com')
    -        response.rethrow()
    -        parsed = json_decode(response.body)
    -        self.assertEqual(parsed["email"], "foo@example.com")
    -
    -    def test_oauth10_redirect(self):
    -        response = self.fetch('/oauth10/client/login', follow_redirects=False)
    -        self.assertEqual(response.code, 302)
    -        self.assertTrue(response.headers['Location'].endswith(
    -            '/oauth1/server/authorize?oauth_token=zxcv'))
    -        # the cookie is base64('zxcv')|base64('1234')
    -        self.assertTrue(
    -            '_oauth_request_token="enhjdg==|MTIzNA=="' in response.headers['Set-Cookie'],
    -            response.headers['Set-Cookie'])
    -
    -    def test_oauth10_get_user(self):
    -        response = self.fetch(
    -            '/oauth10/client/login?oauth_token=zxcv',
    -            headers={'Cookie': '_oauth_request_token=enhjdg==|MTIzNA=='})
    -        response.rethrow()
    -        parsed = json_decode(response.body)
    -        self.assertEqual(parsed['email'], 'foo@example.com')
    -        self.assertEqual(parsed['access_token'], dict(key='uiop', secret='5678'))
    -
    -    def test_oauth10_request_parameters(self):
    -        response = self.fetch('/oauth10/client/request_params')
    -        response.rethrow()
    -        parsed = json_decode(response.body)
    -        self.assertEqual(parsed['oauth_consumer_key'], 'asdf')
    -        self.assertEqual(parsed['oauth_token'], 'uiop')
    -        self.assertTrue('oauth_nonce' in parsed)
    -        self.assertTrue('oauth_signature' in parsed)
    -
    -    def test_oauth10a_redirect(self):
    -        response = self.fetch('/oauth10a/client/login', follow_redirects=False)
    -        self.assertEqual(response.code, 302)
    -        self.assertTrue(response.headers['Location'].endswith(
    -            '/oauth1/server/authorize?oauth_token=zxcv'))
    -        # the cookie is base64('zxcv')|base64('1234')
    -        self.assertTrue(
    -            '_oauth_request_token="enhjdg==|MTIzNA=="' in response.headers['Set-Cookie'],
    -            response.headers['Set-Cookie'])
    -
    -    def test_oauth10a_get_user(self):
    -        response = self.fetch(
    -            '/oauth10a/client/login?oauth_token=zxcv',
    -            headers={'Cookie': '_oauth_request_token=enhjdg==|MTIzNA=='})
    -        response.rethrow()
    -        parsed = json_decode(response.body)
    -        self.assertEqual(parsed['email'], 'foo@example.com')
    -        self.assertEqual(parsed['access_token'], dict(key='uiop', secret='5678'))
    -
    -    def test_oauth10a_request_parameters(self):
    -        response = self.fetch('/oauth10a/client/request_params')
    -        response.rethrow()
    -        parsed = json_decode(response.body)
    -        self.assertEqual(parsed['oauth_consumer_key'], 'asdf')
    -        self.assertEqual(parsed['oauth_token'], 'uiop')
    -        self.assertTrue('oauth_nonce' in parsed)
    -        self.assertTrue('oauth_signature' in parsed)
    -
    -    def test_oauth10a_get_user_coroutine_exception(self):
    -        response = self.fetch(
    -            '/oauth10a/client/login_coroutine?oauth_token=zxcv&fail_in_get_user=true',
    -            headers={'Cookie': '_oauth_request_token=enhjdg==|MTIzNA=='})
    -        self.assertEqual(response.code, 503)
    -
    -    def test_oauth2_redirect(self):
    -        response = self.fetch('/oauth2/client/login', follow_redirects=False)
    -        self.assertEqual(response.code, 302)
    -        self.assertTrue('/oauth2/server/authorize?' in response.headers['Location'])
    -
    -    def base_twitter_redirect(self, url):
    -        # Same as test_oauth10a_redirect
    -        response = self.fetch(url, follow_redirects=False)
    -        self.assertEqual(response.code, 302)
    -        self.assertTrue(response.headers['Location'].endswith(
    -            '/oauth1/server/authorize?oauth_token=zxcv'))
    -        # the cookie is base64('zxcv')|base64('1234')
    -        self.assertTrue(
    -            '_oauth_request_token="enhjdg==|MTIzNA=="' in response.headers['Set-Cookie'],
    -            response.headers['Set-Cookie'])
    -
    -    def test_twitter_redirect(self):
    -        self.base_twitter_redirect('/twitter/client/login')
    -
    -    def test_twitter_redirect_gen_engine(self):
    -        self.base_twitter_redirect('/twitter/client/login_gen_engine')
    -
    -    def test_twitter_redirect_gen_coroutine(self):
    -        self.base_twitter_redirect('/twitter/client/login_gen_coroutine')
    -
    -    def test_twitter_get_user(self):
    -        response = self.fetch(
    -            '/twitter/client/login?oauth_token=zxcv',
    -            headers={'Cookie': '_oauth_request_token=enhjdg==|MTIzNA=='})
    -        response.rethrow()
    -        parsed = json_decode(response.body)
    -        self.assertEqual(parsed,
    -                         {u('access_token'): {u('key'): u('hjkl'),
    -                                              u('screen_name'): u('foo'),
    -                                              u('secret'): u('vbnm')},
    -                          u('name'): u('Foo'),
    -                          u('screen_name'): u('foo'),
    -                          u('username'): u('foo')})
    -
    -    def test_twitter_show_user(self):
    -        response = self.fetch('/twitter/client/show_user?name=somebody')
    -        response.rethrow()
    -        self.assertEqual(json_decode(response.body),
    -                         {'name': 'Somebody', 'screen_name': 'somebody'})
    -
    -    def test_twitter_show_user_error(self):
    -        with ExpectLog(gen_log, 'Error response HTTP 500'):
    -            response = self.fetch('/twitter/client/show_user?name=error')
    -        self.assertEqual(response.code, 500)
    -        self.assertEqual(response.body, b'error from twitter request')
    -
    -    def test_twitter_show_user_future(self):
    -        response = self.fetch('/twitter/client/show_user_future?name=somebody')
    -        response.rethrow()
    -        self.assertEqual(json_decode(response.body),
    -                         {'name': 'Somebody', 'screen_name': 'somebody'})
    -
    -    def test_twitter_show_user_future_error(self):
    -        response = self.fetch('/twitter/client/show_user_future?name=error')
    -        self.assertEqual(response.code, 500)
    -        self.assertIn(b'Error response HTTP 500', response.body)
    -
    -    def test_google_redirect(self):
    -        # same as test_openid_redirect
    -        response = self.fetch('/google/client/openid_login', follow_redirects=False)
    -        self.assertEqual(response.code, 302)
    -        self.assertTrue(
    -            '/openid/server/authenticate?' in response.headers['Location'])
    -
    -    def test_google_get_user(self):
    -        response = self.fetch('/google/client/openid_login?openid.mode=blah&openid.ns.ax=http://openid.net/srv/ax/1.0&openid.ax.type.email=http://axschema.org/contact/email&openid.ax.value.email=foo@example.com', follow_redirects=False)
    -        response.rethrow()
    -        parsed = json_decode(response.body)
    -        self.assertEqual(parsed["email"], "foo@example.com")
    diff --git a/rosbridge_server/src/tornado/test/concurrent_test.py b/rosbridge_server/src/tornado/test/concurrent_test.py
    deleted file mode 100644
    index 5e93ad6a4..000000000
    --- a/rosbridge_server/src/tornado/test/concurrent_test.py
    +++ /dev/null
    @@ -1,336 +0,0 @@
    -#!/usr/bin/env python
    -#
    -# Copyright 2012 Facebook
    -#
    -# Licensed under the Apache License, Version 2.0 (the "License"); you may
    -# not use this file except in compliance with the License. You may obtain
    -# a copy of the License at
    -#
    -#     http://www.apache.org/licenses/LICENSE-2.0
    -#
    -# Unless required by applicable law or agreed to in writing, software
    -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    -# License for the specific language governing permissions and limitations
    -# under the License.
    -from __future__ import absolute_import, division, print_function, with_statement
    -
    -import logging
    -import re
    -import socket
    -import sys
    -import traceback
    -
    -from tornado.concurrent import Future, return_future, ReturnValueIgnoredError
    -from tornado.escape import utf8, to_unicode
    -from tornado import gen
    -from tornado.iostream import IOStream
    -from tornado import stack_context
    -from tornado.tcpserver import TCPServer
    -from tornado.testing import AsyncTestCase, LogTrapTestCase, bind_unused_port, gen_test
    -
    -
    -try:
    -    from concurrent import futures
    -except ImportError:
    -    futures = None
    -
    -
    -class ReturnFutureTest(AsyncTestCase):
    -    @return_future
    -    def sync_future(self, callback):
    -        callback(42)
    -
    -    @return_future
    -    def async_future(self, callback):
    -        self.io_loop.add_callback(callback, 42)
    -
    -    @return_future
    -    def immediate_failure(self, callback):
    -        1 / 0
    -
    -    @return_future
    -    def delayed_failure(self, callback):
    -        self.io_loop.add_callback(lambda: 1 / 0)
    -
    -    @return_future
    -    def return_value(self, callback):
    -        # Note that the result of both running the callback and returning
    -        # a value (or raising an exception) is unspecified; with current
    -        # implementations the last event prior to callback resolution wins.
    -        return 42
    -
    -    @return_future
    -    def no_result_future(self, callback):
    -        callback()
    -
    -    def test_immediate_failure(self):
    -        with self.assertRaises(ZeroDivisionError):
    -            # The caller sees the error just like a normal function.
    -            self.immediate_failure(callback=self.stop)
    -        # The callback is not run because the function failed synchronously.
    -        self.io_loop.add_timeout(self.io_loop.time() + 0.05, self.stop)
    -        result = self.wait()
    -        self.assertIs(result, None)
    -
    -    def test_return_value(self):
    -        with self.assertRaises(ReturnValueIgnoredError):
    -            self.return_value(callback=self.stop)
    -
    -    def test_callback_kw(self):
    -        future = self.sync_future(callback=self.stop)
    -        result = self.wait()
    -        self.assertEqual(result, 42)
    -        self.assertEqual(future.result(), 42)
    -
    -    def test_callback_positional(self):
    -        # When the callback is passed in positionally, future_wrap shouldn't
    -        # add another callback in the kwargs.
    -        future = self.sync_future(self.stop)
    -        result = self.wait()
    -        self.assertEqual(result, 42)
    -        self.assertEqual(future.result(), 42)
    -
    -    def test_no_callback(self):
    -        future = self.sync_future()
    -        self.assertEqual(future.result(), 42)
    -
    -    def test_none_callback_kw(self):
    -        # explicitly pass None as callback
    -        future = self.sync_future(callback=None)
    -        self.assertEqual(future.result(), 42)
    -
    -    def test_none_callback_pos(self):
    -        future = self.sync_future(None)
    -        self.assertEqual(future.result(), 42)
    -
    -    def test_async_future(self):
    -        future = self.async_future()
    -        self.assertFalse(future.done())
    -        self.io_loop.add_future(future, self.stop)
    -        future2 = self.wait()
    -        self.assertIs(future, future2)
    -        self.assertEqual(future.result(), 42)
    -
    -    @gen_test
    -    def test_async_future_gen(self):
    -        result = yield self.async_future()
    -        self.assertEqual(result, 42)
    -
    -    def test_delayed_failure(self):
    -        future = self.delayed_failure()
    -        self.io_loop.add_future(future, self.stop)
    -        future2 = self.wait()
    -        self.assertIs(future, future2)
    -        with self.assertRaises(ZeroDivisionError):
    -            future.result()
    -
    -    def test_kw_only_callback(self):
    -        @return_future
    -        def f(**kwargs):
    -            kwargs['callback'](42)
    -        future = f()
    -        self.assertEqual(future.result(), 42)
    -
    -    def test_error_in_callback(self):
    -        self.sync_future(callback=lambda future: 1 / 0)
    -        # The exception gets caught by our StackContext and will be re-raised
    -        # when we wait.
    -        self.assertRaises(ZeroDivisionError, self.wait)
    -
    -    def test_no_result_future(self):
    -        future = self.no_result_future(self.stop)
    -        result = self.wait()
    -        self.assertIs(result, None)
    -        # result of this future is undefined, but not an error
    -        future.result()
    -
    -    def test_no_result_future_callback(self):
    -        future = self.no_result_future(callback=lambda: self.stop())
    -        result = self.wait()
    -        self.assertIs(result, None)
    -        future.result()
    -
    -    @gen_test
    -    def test_future_traceback(self):
    -        @return_future
    -        @gen.engine
    -        def f(callback):
    -            yield gen.Task(self.io_loop.add_callback)
    -            try:
    -                1 / 0
    -            except ZeroDivisionError:
    -                self.expected_frame = traceback.extract_tb(
    -                    sys.exc_info()[2], limit=1)[0]
    -                raise
    -        try:
    -            yield f()
    -            self.fail("didn't get expected exception")
    -        except ZeroDivisionError:
    -            tb = traceback.extract_tb(sys.exc_info()[2])
    -            self.assertIn(self.expected_frame, tb)
    -
    -# The following series of classes demonstrate and test various styles
    -# of use, with and without generators and futures.
    -
    -
    -class CapServer(TCPServer):
    -    def handle_stream(self, stream, address):
    -        logging.info("handle_stream")
    -        self.stream = stream
    -        self.stream.read_until(b"\n", self.handle_read)
    -
    -    def handle_read(self, data):
    -        logging.info("handle_read")
    -        data = to_unicode(data)
    -        if data == data.upper():
    -            self.stream.write(b"error\talready capitalized\n")
    -        else:
    -            # data already has \n
    -            self.stream.write(utf8("ok\t%s" % data.upper()))
    -        self.stream.close()
    -
    -
    -class CapError(Exception):
    -    pass
    -
    -
    -class BaseCapClient(object):
    -    def __init__(self, port, io_loop):
    -        self.port = port
    -        self.io_loop = io_loop
    -
    -    def process_response(self, data):
    -        status, message = re.match('(.*)\t(.*)\n', to_unicode(data)).groups()
    -        if status == 'ok':
    -            return message
    -        else:
    -            raise CapError(message)
    -
    -
    -class ManualCapClient(BaseCapClient):
    -    def capitalize(self, request_data, callback=None):
    -        logging.info("capitalize")
    -        self.request_data = request_data
    -        self.stream = IOStream(socket.socket(), io_loop=self.io_loop)
    -        self.stream.connect(('127.0.0.1', self.port),
    -                            callback=self.handle_connect)
    -        self.future = Future()
    -        if callback is not None:
    -            self.future.add_done_callback(
    -                stack_context.wrap(lambda future: callback(future.result())))
    -        return self.future
    -
    -    def handle_connect(self):
    -        logging.info("handle_connect")
    -        self.stream.write(utf8(self.request_data + "\n"))
    -        self.stream.read_until(b'\n', callback=self.handle_read)
    -
    -    def handle_read(self, data):
    -        logging.info("handle_read")
    -        self.stream.close()
    -        try:
    -            self.future.set_result(self.process_response(data))
    -        except CapError as e:
    -            self.future.set_exception(e)
    -
    -
    -class DecoratorCapClient(BaseCapClient):
    -    @return_future
    -    def capitalize(self, request_data, callback):
    -        logging.info("capitalize")
    -        self.request_data = request_data
    -        self.stream = IOStream(socket.socket(), io_loop=self.io_loop)
    -        self.stream.connect(('127.0.0.1', self.port),
    -                            callback=self.handle_connect)
    -        self.callback = callback
    -
    -    def handle_connect(self):
    -        logging.info("handle_connect")
    -        self.stream.write(utf8(self.request_data + "\n"))
    -        self.stream.read_until(b'\n', callback=self.handle_read)
    -
    -    def handle_read(self, data):
    -        logging.info("handle_read")
    -        self.stream.close()
    -        self.callback(self.process_response(data))
    -
    -
    -class GeneratorCapClient(BaseCapClient):
    -    @return_future
    -    @gen.engine
    -    def capitalize(self, request_data, callback):
    -        logging.info('capitalize')
    -        stream = IOStream(socket.socket(), io_loop=self.io_loop)
    -        logging.info('connecting')
    -        yield gen.Task(stream.connect, ('127.0.0.1', self.port))
    -        stream.write(utf8(request_data + '\n'))
    -        logging.info('reading')
    -        data = yield gen.Task(stream.read_until, b'\n')
    -        logging.info('returning')
    -        stream.close()
    -        callback(self.process_response(data))
    -
    -
    -class ClientTestMixin(object):
    -    def setUp(self):
    -        super(ClientTestMixin, self).setUp()
    -        self.server = CapServer(io_loop=self.io_loop)
    -        sock, port = bind_unused_port()
    -        self.server.add_sockets([sock])
    -        self.client = self.client_class(io_loop=self.io_loop, port=port)
    -
    -    def tearDown(self):
    -        self.server.stop()
    -        super(ClientTestMixin, self).tearDown()
    -
    -    def test_callback(self):
    -        self.client.capitalize("hello", callback=self.stop)
    -        result = self.wait()
    -        self.assertEqual(result, "HELLO")
    -
    -    def test_callback_error(self):
    -        self.client.capitalize("HELLO", callback=self.stop)
    -        self.assertRaisesRegexp(CapError, "already capitalized", self.wait)
    -
    -    def test_future(self):
    -        future = self.client.capitalize("hello")
    -        self.io_loop.add_future(future, self.stop)
    -        self.wait()
    -        self.assertEqual(future.result(), "HELLO")
    -
    -    def test_future_error(self):
    -        future = self.client.capitalize("HELLO")
    -        self.io_loop.add_future(future, self.stop)
    -        self.wait()
    -        self.assertRaisesRegexp(CapError, "already capitalized", future.result)
    -
    -    def test_generator(self):
    -        @gen.engine
    -        def f():
    -            result = yield self.client.capitalize("hello")
    -            self.assertEqual(result, "HELLO")
    -            self.stop()
    -        f()
    -        self.wait()
    -
    -    def test_generator_error(self):
    -        @gen.engine
    -        def f():
    -            with self.assertRaisesRegexp(CapError, "already capitalized"):
    -                yield self.client.capitalize("HELLO")
    -            self.stop()
    -        f()
    -        self.wait()
    -
    -
    -class ManualClientTest(ClientTestMixin, AsyncTestCase, LogTrapTestCase):
    -    client_class = ManualCapClient
    -
    -
    -class DecoratorClientTest(ClientTestMixin, AsyncTestCase, LogTrapTestCase):
    -    client_class = DecoratorCapClient
    -
    -
    -class GeneratorClientTest(ClientTestMixin, AsyncTestCase, LogTrapTestCase):
    -    client_class = GeneratorCapClient
    diff --git a/rosbridge_server/src/tornado/test/csv_translations/fr_FR.csv b/rosbridge_server/src/tornado/test/csv_translations/fr_FR.csv
    deleted file mode 100644
    index 6321b6e7c..000000000
    --- a/rosbridge_server/src/tornado/test/csv_translations/fr_FR.csv
    +++ /dev/null
    @@ -1 +0,0 @@
    -"school","école"
    diff --git a/rosbridge_server/src/tornado/test/curl_httpclient_test.py b/rosbridge_server/src/tornado/test/curl_httpclient_test.py
    deleted file mode 100644
    index 3873cf1e3..000000000
    --- a/rosbridge_server/src/tornado/test/curl_httpclient_test.py
    +++ /dev/null
    @@ -1,122 +0,0 @@
    -from __future__ import absolute_import, division, print_function, with_statement
    -
    -from hashlib import md5
    -
    -from tornado.escape import utf8
    -from tornado.httpclient import HTTPRequest
    -from tornado.stack_context import ExceptionStackContext
    -from tornado.testing import AsyncHTTPTestCase
    -from tornado.test import httpclient_test
    -from tornado.test.util import unittest
    -from tornado.web import Application, RequestHandler
    -
    -try:
    -    import pycurl
    -except ImportError:
    -    pycurl = None
    -
    -if pycurl is not None:
    -    from tornado.curl_httpclient import CurlAsyncHTTPClient
    -
    -
    -@unittest.skipIf(pycurl is None, "pycurl module not present")
    -class CurlHTTPClientCommonTestCase(httpclient_test.HTTPClientCommonTestCase):
    -    def get_http_client(self):
    -        client = CurlAsyncHTTPClient(io_loop=self.io_loop,
    -                                     defaults=dict(allow_ipv6=False))
    -        # make sure AsyncHTTPClient magic doesn't give us the wrong class
    -        self.assertTrue(isinstance(client, CurlAsyncHTTPClient))
    -        return client
    -
    -
    -class DigestAuthHandler(RequestHandler):
    -    def get(self):
    -        realm = 'test'
    -        opaque = 'asdf'
    -        # Real implementations would use a random nonce.
    -        nonce = "1234"
    -        username = 'foo'
    -        password = 'bar'
    -
    -        auth_header = self.request.headers.get('Authorization', None)
    -        if auth_header is not None:
    -            auth_mode, params = auth_header.split(' ', 1)
    -            assert auth_mode == 'Digest'
    -            param_dict = {}
    -            for pair in params.split(','):
    -                k, v = pair.strip().split('=', 1)
    -                if v[0] == '"' and v[-1] == '"':
    -                    v = v[1:-1]
    -                param_dict[k] = v
    -            assert param_dict['realm'] == realm
    -            assert param_dict['opaque'] == opaque
    -            assert param_dict['nonce'] == nonce
    -            assert param_dict['username'] == username
    -            assert param_dict['uri'] == self.request.path
    -            h1 = md5(utf8('%s:%s:%s' % (username, realm, password))).hexdigest()
    -            h2 = md5(utf8('%s:%s' % (self.request.method,
    -                                     self.request.path))).hexdigest()
    -            digest = md5(utf8('%s:%s:%s' % (h1, nonce, h2))).hexdigest()
    -            if digest == param_dict['response']:
    -                self.write('ok')
    -            else:
    -                self.write('fail')
    -        else:
    -            self.set_status(401)
    -            self.set_header('WWW-Authenticate',
    -                            'Digest realm="%s", nonce="%s", opaque="%s"' %
    -                            (realm, nonce, opaque))
    -
    -
    -class CustomReasonHandler(RequestHandler):
    -    def get(self):
    -        self.set_status(200, "Custom reason")
    -
    -
    -class CustomFailReasonHandler(RequestHandler):
    -    def get(self):
    -        self.set_status(400, "Custom reason")
    -
    -
    -@unittest.skipIf(pycurl is None, "pycurl module not present")
    -class CurlHTTPClientTestCase(AsyncHTTPTestCase):
    -    def setUp(self):
    -        super(CurlHTTPClientTestCase, self).setUp()
    -        self.http_client = CurlAsyncHTTPClient(self.io_loop,
    -                                               defaults=dict(allow_ipv6=False))
    -
    -    def get_app(self):
    -        return Application([
    -            ('/digest', DigestAuthHandler),
    -            ('/custom_reason', CustomReasonHandler),
    -            ('/custom_fail_reason', CustomFailReasonHandler),
    -        ])
    -
    -    def test_prepare_curl_callback_stack_context(self):
    -        exc_info = []
    -
    -        def error_handler(typ, value, tb):
    -            exc_info.append((typ, value, tb))
    -            self.stop()
    -            return True
    -
    -        with ExceptionStackContext(error_handler):
    -            request = HTTPRequest(self.get_url('/'),
    -                                  prepare_curl_callback=lambda curl: 1 / 0)
    -        self.http_client.fetch(request, callback=self.stop)
    -        self.wait()
    -        self.assertEqual(1, len(exc_info))
    -        self.assertIs(exc_info[0][0], ZeroDivisionError)
    -
    -    def test_digest_auth(self):
    -        response = self.fetch('/digest', auth_mode='digest',
    -                              auth_username='foo', auth_password='bar')
    -        self.assertEqual(response.body, b'ok')
    -
    -    def test_custom_reason(self):
    -        response = self.fetch('/custom_reason')
    -        self.assertEqual(response.reason, "Custom reason")
    -
    -    def test_fail_custom_reason(self):
    -        response = self.fetch('/custom_fail_reason')
    -        self.assertEqual(str(response.error), "HTTP 400: Custom reason")
    diff --git a/rosbridge_server/src/tornado/test/escape_test.py b/rosbridge_server/src/tornado/test/escape_test.py
    deleted file mode 100644
    index 9abc74803..000000000
    --- a/rosbridge_server/src/tornado/test/escape_test.py
    +++ /dev/null
    @@ -1,217 +0,0 @@
    -#!/usr/bin/env python
    -
    -
    -from __future__ import absolute_import, division, print_function, with_statement
    -import tornado.escape
    -
    -from tornado.escape import utf8, xhtml_escape, xhtml_unescape, url_escape, url_unescape, to_unicode, json_decode, json_encode
    -from tornado.util import u, unicode_type, bytes_type
    -from tornado.test.util import unittest
    -
    -linkify_tests = [
    -    # (input, linkify_kwargs, expected_output)
    -
    -    ("hello http://world.com/!", {},
    -     u('hello http://world.com/!')),
    -
    -    ("hello http://world.com/with?param=true&stuff=yes", {},
    -     u('hello http://world.com/with?param=true&stuff=yes')),
    -
    -    # an opened paren followed by many chars killed Gruber's regex
    -    ("http://url.com/w(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", {},
    -     u('http://url.com/w(aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa')),
    -
    -    # as did too many dots at the end
    -    ("http://url.com/withmany.......................................", {},
    -     u('http://url.com/withmany.......................................')),
    -
    -    ("http://url.com/withmany((((((((((((((((((((((((((((((((((a)", {},
    -     u('http://url.com/withmany((((((((((((((((((((((((((((((((((a)')),
    -
    -    # some examples from http://daringfireball.net/2009/11/liberal_regex_for_matching_urls
    -    # plus a fex extras (such as multiple parentheses).
    -    ("http://foo.com/blah_blah", {},
    -     u('http://foo.com/blah_blah')),
    -
    -    ("http://foo.com/blah_blah/", {},
    -     u('http://foo.com/blah_blah/')),
    -
    -    ("(Something like http://foo.com/blah_blah)", {},
    -     u('(Something like http://foo.com/blah_blah)')),
    -
    -    ("http://foo.com/blah_blah_(wikipedia)", {},
    -     u('http://foo.com/blah_blah_(wikipedia)')),
    -
    -    ("http://foo.com/blah_(blah)_(wikipedia)_blah", {},
    -     u('http://foo.com/blah_(blah)_(wikipedia)_blah')),
    -
    -    ("(Something like http://foo.com/blah_blah_(wikipedia))", {},
    -     u('(Something like http://foo.com/blah_blah_(wikipedia))')),
    -
    -    ("http://foo.com/blah_blah.", {},
    -     u('http://foo.com/blah_blah.')),
    -
    -    ("http://foo.com/blah_blah/.", {},
    -     u('http://foo.com/blah_blah/.')),
    -
    -    ("", {},
    -     u('<http://foo.com/blah_blah>')),
    -
    -    ("", {},
    -     u('<http://foo.com/blah_blah/>')),
    -
    -    ("http://foo.com/blah_blah,", {},
    -     u('http://foo.com/blah_blah,')),
    -
    -    ("http://www.example.com/wpstyle/?p=364.", {},
    -     u('http://www.example.com/wpstyle/?p=364.')),
    -
    -    ("rdar://1234",
    -     {"permitted_protocols": ["http", "rdar"]},
    -     u('rdar://1234')),
    -
    -    ("rdar:/1234",
    -     {"permitted_protocols": ["rdar"]},
    -     u('rdar:/1234')),
    -
    -    ("http://userid:password@example.com:8080", {},
    -     u('http://userid:password@example.com:8080')),
    -
    -    ("http://userid@example.com", {},
    -     u('http://userid@example.com')),
    -
    -    ("http://userid@example.com:8080", {},
    -     u('http://userid@example.com:8080')),
    -
    -    ("http://userid:password@example.com", {},
    -     u('http://userid:password@example.com')),
    -
    -    ("message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e",
    -     {"permitted_protocols": ["http", "message"]},
    -     u('message://%3c330e7f8409726r6a4ba78dkf1fd71420c1bf6ff@mail.gmail.com%3e')),
    -
    -    (u("http://\u27a1.ws/\u4a39"), {},
    -     u('http://\u27a1.ws/\u4a39')),
    -
    -    ("http://example.com", {},
    -     u('<tag>http://example.com</tag>')),
    -
    -    ("Just a www.example.com link.", {},
    -     u('Just a www.example.com link.')),
    -
    -    ("Just a www.example.com link.",
    -     {"require_protocol": True},
    -     u('Just a www.example.com link.')),
    -
    -    ("A http://reallylong.com/link/that/exceedsthelenglimit.html",
    -     {"require_protocol": True, "shorten": True},
    -     u('A http://reallylong.com/link...')),
    -
    -    ("A http://reallylongdomainnamethatwillbetoolong.com/hi!",
    -     {"shorten": True},
    -     u('A http://reallylongdomainnametha...!')),
    -
    -    ("A file:///passwords.txt and http://web.com link", {},
    -     u('A file:///passwords.txt and http://web.com link')),
    -
    -    ("A file:///passwords.txt and http://web.com link",
    -     {"permitted_protocols": ["file"]},
    -     u('A file:///passwords.txt and http://web.com link')),
    -
    -    ("www.external-link.com",
    -     {"extra_params": 'rel="nofollow" class="external"'},
    -     u('www.external-link.com')),
    -
    -    ("www.external-link.com and www.internal-link.com/blogs extra",
    -     {"extra_params": lambda href: 'class="internal"' if href.startswith("http://www.internal-link.com") else 'rel="nofollow" class="external"'},
    -     u('www.external-link.com and www.internal-link.com/blogs extra')),
    -
    -    ("www.external-link.com",
    -     {"extra_params": lambda href: '    rel="nofollow" class="external"  '},
    -     u('www.external-link.com')),
    -]
    -
    -
    -class EscapeTestCase(unittest.TestCase):
    -    def test_linkify(self):
    -        for text, kwargs, html in linkify_tests:
    -            linked = tornado.escape.linkify(text, **kwargs)
    -            self.assertEqual(linked, html)
    -
    -    def test_xhtml_escape(self):
    -        tests = [
    -            ("", "<foo>"),
    -            (u(""), u("<foo>")),
    -            (b"", b"<foo>"),
    -
    -            ("<>&\"'", "<>&"'"),
    -            ("&", "&amp;"),
    -
    -            (u("<\u00e9>"), u("<\u00e9>")),
    -            (b"<\xc3\xa9>", b"<\xc3\xa9>"),
    -        ]
    -        for unescaped, escaped in tests:
    -            self.assertEqual(utf8(xhtml_escape(unescaped)), utf8(escaped))
    -            self.assertEqual(utf8(unescaped), utf8(xhtml_unescape(escaped)))
    -
    -    def test_url_escape_unicode(self):
    -        tests = [
    -            # byte strings are passed through as-is
    -            (u('\u00e9').encode('utf8'), '%C3%A9'),
    -            (u('\u00e9').encode('latin1'), '%E9'),
    -
    -            # unicode strings become utf8
    -            (u('\u00e9'), '%C3%A9'),
    -        ]
    -        for unescaped, escaped in tests:
    -            self.assertEqual(url_escape(unescaped), escaped)
    -
    -    def test_url_unescape_unicode(self):
    -        tests = [
    -            ('%C3%A9', u('\u00e9'), 'utf8'),
    -            ('%C3%A9', u('\u00c3\u00a9'), 'latin1'),
    -            ('%C3%A9', utf8(u('\u00e9')), None),
    -        ]
    -        for escaped, unescaped, encoding in tests:
    -            # input strings to url_unescape should only contain ascii
    -            # characters, but make sure the function accepts both byte
    -            # and unicode strings.
    -            self.assertEqual(url_unescape(to_unicode(escaped), encoding), unescaped)
    -            self.assertEqual(url_unescape(utf8(escaped), encoding), unescaped)
    -
    -    def test_url_escape_quote_plus(self):
    -        unescaped = '+ #%'
    -        plus_escaped = '%2B+%23%25'
    -        escaped = '%2B%20%23%25'
    -        self.assertEqual(url_escape(unescaped), plus_escaped)
    -        self.assertEqual(url_escape(unescaped, plus=False), escaped)
    -        self.assertEqual(url_unescape(plus_escaped), unescaped)
    -        self.assertEqual(url_unescape(escaped, plus=False), unescaped)
    -        self.assertEqual(url_unescape(plus_escaped, encoding=None),
    -                         utf8(unescaped))
    -        self.assertEqual(url_unescape(escaped, encoding=None, plus=False),
    -                         utf8(unescaped))
    -
    -    def test_escape_return_types(self):
    -        # On python2 the escape methods should generally return the same
    -        # type as their argument
    -        self.assertEqual(type(xhtml_escape("foo")), str)
    -        self.assertEqual(type(xhtml_escape(u("foo"))), unicode_type)
    -
    -    def test_json_decode(self):
    -        # json_decode accepts both bytes and unicode, but strings it returns
    -        # are always unicode.
    -        self.assertEqual(json_decode(b'"foo"'), u("foo"))
    -        self.assertEqual(json_decode(u('"foo"')), u("foo"))
    -
    -        # Non-ascii bytes are interpreted as utf8
    -        self.assertEqual(json_decode(utf8(u('"\u00e9"'))), u("\u00e9"))
    -
    -    def test_json_encode(self):
    -        # json deals with strings, not bytes.  On python 2 byte strings will
    -        # convert automatically if they are utf8; on python 3 byte strings
    -        # are not allowed.
    -        self.assertEqual(json_decode(json_encode(u("\u00e9"))), u("\u00e9"))
    -        if bytes_type is str:
    -            self.assertEqual(json_decode(json_encode(utf8(u("\u00e9")))), u("\u00e9"))
    -            self.assertRaises(UnicodeDecodeError, json_encode, b"\xe9")
    diff --git a/rosbridge_server/src/tornado/test/gen_test.py b/rosbridge_server/src/tornado/test/gen_test.py
    deleted file mode 100644
    index a15cdf73a..000000000
    --- a/rosbridge_server/src/tornado/test/gen_test.py
    +++ /dev/null
    @@ -1,1071 +0,0 @@
    -from __future__ import absolute_import, division, print_function, with_statement
    -
    -import contextlib
    -import datetime
    -import functools
    -import sys
    -import textwrap
    -import time
    -import platform
    -import weakref
    -
    -from tornado.concurrent import return_future, Future
    -from tornado.escape import url_escape
    -from tornado.httpclient import AsyncHTTPClient
    -from tornado.ioloop import IOLoop
    -from tornado.log import app_log
    -from tornado import stack_context
    -from tornado.testing import AsyncHTTPTestCase, AsyncTestCase, ExpectLog, gen_test
    -from tornado.test.util import unittest, skipOnTravis
    -from tornado.web import Application, RequestHandler, asynchronous, HTTPError
    -
    -from tornado import gen
    -
    -try:
    -    from concurrent import futures
    -except ImportError:
    -    futures = None
    -
    -skipBefore33 = unittest.skipIf(sys.version_info < (3, 3), 'PEP 380 not available')
    -skipNotCPython = unittest.skipIf(platform.python_implementation() != 'CPython',
    -                                 'Not CPython implementation')
    -
    -
    -class GenEngineTest(AsyncTestCase):
    -    def setUp(self):
    -        super(GenEngineTest, self).setUp()
    -        self.named_contexts = []
    -
    -    def named_context(self, name):
    -        @contextlib.contextmanager
    -        def context():
    -            self.named_contexts.append(name)
    -            try:
    -                yield
    -            finally:
    -                self.assertEqual(self.named_contexts.pop(), name)
    -        return context
    -
    -    def run_gen(self, f):
    -        f()
    -        return self.wait()
    -
    -    def delay_callback(self, iterations, callback, arg):
    -        """Runs callback(arg) after a number of IOLoop iterations."""
    -        if iterations == 0:
    -            callback(arg)
    -        else:
    -            self.io_loop.add_callback(functools.partial(
    -                self.delay_callback, iterations - 1, callback, arg))
    -
    -    @return_future
    -    def async_future(self, result, callback):
    -        self.io_loop.add_callback(callback, result)
    -
    -    def test_no_yield(self):
    -        @gen.engine
    -        def f():
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_inline_cb(self):
    -        @gen.engine
    -        def f():
    -            (yield gen.Callback("k1"))()
    -            res = yield gen.Wait("k1")
    -            self.assertTrue(res is None)
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_ioloop_cb(self):
    -        @gen.engine
    -        def f():
    -            self.io_loop.add_callback((yield gen.Callback("k1")))
    -            yield gen.Wait("k1")
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_exception_phase1(self):
    -        @gen.engine
    -        def f():
    -            1 / 0
    -        self.assertRaises(ZeroDivisionError, self.run_gen, f)
    -
    -    def test_exception_phase2(self):
    -        @gen.engine
    -        def f():
    -            self.io_loop.add_callback((yield gen.Callback("k1")))
    -            yield gen.Wait("k1")
    -            1 / 0
    -        self.assertRaises(ZeroDivisionError, self.run_gen, f)
    -
    -    def test_exception_in_task_phase1(self):
    -        def fail_task(callback):
    -            1 / 0
    -
    -        @gen.engine
    -        def f():
    -            try:
    -                yield gen.Task(fail_task)
    -                raise Exception("did not get expected exception")
    -            except ZeroDivisionError:
    -                self.stop()
    -        self.run_gen(f)
    -
    -    def test_exception_in_task_phase2(self):
    -        # This is the case that requires the use of stack_context in gen.engine
    -        def fail_task(callback):
    -            self.io_loop.add_callback(lambda: 1 / 0)
    -
    -        @gen.engine
    -        def f():
    -            try:
    -                yield gen.Task(fail_task)
    -                raise Exception("did not get expected exception")
    -            except ZeroDivisionError:
    -                self.stop()
    -        self.run_gen(f)
    -
    -    def test_with_arg(self):
    -        @gen.engine
    -        def f():
    -            (yield gen.Callback("k1"))(42)
    -            res = yield gen.Wait("k1")
    -            self.assertEqual(42, res)
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_with_arg_tuple(self):
    -        @gen.engine
    -        def f():
    -            (yield gen.Callback((1, 2)))((3, 4))
    -            res = yield gen.Wait((1, 2))
    -            self.assertEqual((3, 4), res)
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_key_reuse(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Callback("k1")
    -            yield gen.Callback("k1")
    -            self.stop()
    -        self.assertRaises(gen.KeyReuseError, self.run_gen, f)
    -
    -    def test_key_reuse_tuple(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Callback((1, 2))
    -            yield gen.Callback((1, 2))
    -            self.stop()
    -        self.assertRaises(gen.KeyReuseError, self.run_gen, f)
    -
    -    def test_key_mismatch(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Callback("k1")
    -            yield gen.Wait("k2")
    -            self.stop()
    -        self.assertRaises(gen.UnknownKeyError, self.run_gen, f)
    -
    -    def test_key_mismatch_tuple(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Callback((1, 2))
    -            yield gen.Wait((2, 3))
    -            self.stop()
    -        self.assertRaises(gen.UnknownKeyError, self.run_gen, f)
    -
    -    def test_leaked_callback(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Callback("k1")
    -            self.stop()
    -        self.assertRaises(gen.LeakedCallbackError, self.run_gen, f)
    -
    -    def test_leaked_callback_tuple(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Callback((1, 2))
    -            self.stop()
    -        self.assertRaises(gen.LeakedCallbackError, self.run_gen, f)
    -
    -    def test_parallel_callback(self):
    -        @gen.engine
    -        def f():
    -            for k in range(3):
    -                self.io_loop.add_callback((yield gen.Callback(k)))
    -            yield gen.Wait(1)
    -            self.io_loop.add_callback((yield gen.Callback(3)))
    -            yield gen.Wait(0)
    -            yield gen.Wait(3)
    -            yield gen.Wait(2)
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_bogus_yield(self):
    -        @gen.engine
    -        def f():
    -            yield 42
    -        self.assertRaises(gen.BadYieldError, self.run_gen, f)
    -
    -    def test_bogus_yield_tuple(self):
    -        @gen.engine
    -        def f():
    -            yield (1, 2)
    -        self.assertRaises(gen.BadYieldError, self.run_gen, f)
    -
    -    def test_reuse(self):
    -        @gen.engine
    -        def f():
    -            self.io_loop.add_callback((yield gen.Callback(0)))
    -            yield gen.Wait(0)
    -            self.stop()
    -        self.run_gen(f)
    -        self.run_gen(f)
    -
    -    def test_task(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Task(self.io_loop.add_callback)
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_wait_all(self):
    -        @gen.engine
    -        def f():
    -            (yield gen.Callback("k1"))("v1")
    -            (yield gen.Callback("k2"))("v2")
    -            results = yield gen.WaitAll(["k1", "k2"])
    -            self.assertEqual(results, ["v1", "v2"])
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_exception_in_yield(self):
    -        @gen.engine
    -        def f():
    -            try:
    -                yield gen.Wait("k1")
    -                raise Exception("did not get expected exception")
    -            except gen.UnknownKeyError:
    -                pass
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_resume_after_exception_in_yield(self):
    -        @gen.engine
    -        def f():
    -            try:
    -                yield gen.Wait("k1")
    -                raise Exception("did not get expected exception")
    -            except gen.UnknownKeyError:
    -                pass
    -            (yield gen.Callback("k2"))("v2")
    -            self.assertEqual((yield gen.Wait("k2")), "v2")
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_orphaned_callback(self):
    -        @gen.engine
    -        def f():
    -            self.orphaned_callback = yield gen.Callback(1)
    -        try:
    -            self.run_gen(f)
    -            raise Exception("did not get expected exception")
    -        except gen.LeakedCallbackError:
    -            pass
    -        self.orphaned_callback()
    -
    -    def test_multi(self):
    -        @gen.engine
    -        def f():
    -            (yield gen.Callback("k1"))("v1")
    -            (yield gen.Callback("k2"))("v2")
    -            results = yield [gen.Wait("k1"), gen.Wait("k2")]
    -            self.assertEqual(results, ["v1", "v2"])
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_multi_dict(self):
    -        @gen.engine
    -        def f():
    -            (yield gen.Callback("k1"))("v1")
    -            (yield gen.Callback("k2"))("v2")
    -            results = yield dict(foo=gen.Wait("k1"), bar=gen.Wait("k2"))
    -            self.assertEqual(results, dict(foo="v1", bar="v2"))
    -            self.stop()
    -        self.run_gen(f)
    -
    -    # The following tests explicitly run with both gen.Multi
    -    # and gen.multi_future (Task returns a Future, so it can be used
    -    # with either).
    -    def test_multi_yieldpoint_delayed(self):
    -        @gen.engine
    -        def f():
    -            # callbacks run at different times
    -            responses = yield gen.Multi([
    -                gen.Task(self.delay_callback, 3, arg="v1"),
    -                gen.Task(self.delay_callback, 1, arg="v2"),
    -            ])
    -            self.assertEqual(responses, ["v1", "v2"])
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_multi_yieldpoint_dict_delayed(self):
    -        @gen.engine
    -        def f():
    -            # callbacks run at different times
    -            responses = yield gen.Multi(dict(
    -                foo=gen.Task(self.delay_callback, 3, arg="v1"),
    -                bar=gen.Task(self.delay_callback, 1, arg="v2"),
    -            ))
    -            self.assertEqual(responses, dict(foo="v1", bar="v2"))
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_multi_future_delayed(self):
    -        @gen.engine
    -        def f():
    -            # callbacks run at different times
    -            responses = yield gen.multi_future([
    -                gen.Task(self.delay_callback, 3, arg="v1"),
    -                gen.Task(self.delay_callback, 1, arg="v2"),
    -            ])
    -            self.assertEqual(responses, ["v1", "v2"])
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_multi_future_dict_delayed(self):
    -        @gen.engine
    -        def f():
    -            # callbacks run at different times
    -            responses = yield gen.multi_future(dict(
    -                foo=gen.Task(self.delay_callback, 3, arg="v1"),
    -                bar=gen.Task(self.delay_callback, 1, arg="v2"),
    -            ))
    -            self.assertEqual(responses, dict(foo="v1", bar="v2"))
    -            self.stop()
    -        self.run_gen(f)
    -
    -    @skipOnTravis
    -    @gen_test
    -    def test_multi_performance(self):
    -        # Yielding a list used to have quadratic performance; make
    -        # sure a large list stays reasonable.  On my laptop a list of
    -        # 2000 used to take 1.8s, now it takes 0.12.
    -        start = time.time()
    -        yield [gen.Task(self.io_loop.add_callback) for i in range(2000)]
    -        end = time.time()
    -        self.assertLess(end - start, 1.0)
    -
    -    @gen_test
    -    def test_multi_empty(self):
    -        # Empty lists or dicts should return the same type.
    -        x = yield []
    -        self.assertTrue(isinstance(x, list))
    -        y = yield {}
    -        self.assertTrue(isinstance(y, dict))
    -
    -    @gen_test
    -    def test_multi_mixed_types(self):
    -        # A YieldPoint (Wait) and Future (Task) can be combined
    -        # (and use the YieldPoint codepath)
    -        (yield gen.Callback("k1"))("v1")
    -        responses = yield [gen.Wait("k1"),
    -                           gen.Task(self.delay_callback, 3, arg="v2")]
    -        self.assertEqual(responses, ["v1", "v2"])
    -
    -    @gen_test
    -    def test_future(self):
    -        result = yield self.async_future(1)
    -        self.assertEqual(result, 1)
    -
    -    @gen_test
    -    def test_multi_future(self):
    -        results = yield [self.async_future(1), self.async_future(2)]
    -        self.assertEqual(results, [1, 2])
    -
    -    @gen_test
    -    def test_multi_dict_future(self):
    -        results = yield dict(foo=self.async_future(1), bar=self.async_future(2))
    -        self.assertEqual(results, dict(foo=1, bar=2))
    -
    -    def test_arguments(self):
    -        @gen.engine
    -        def f():
    -            (yield gen.Callback("noargs"))()
    -            self.assertEqual((yield gen.Wait("noargs")), None)
    -            (yield gen.Callback("1arg"))(42)
    -            self.assertEqual((yield gen.Wait("1arg")), 42)
    -
    -            (yield gen.Callback("kwargs"))(value=42)
    -            result = yield gen.Wait("kwargs")
    -            self.assertTrue(isinstance(result, gen.Arguments))
    -            self.assertEqual(((), dict(value=42)), result)
    -            self.assertEqual(dict(value=42), result.kwargs)
    -
    -            (yield gen.Callback("2args"))(42, 43)
    -            result = yield gen.Wait("2args")
    -            self.assertTrue(isinstance(result, gen.Arguments))
    -            self.assertEqual(((42, 43), {}), result)
    -            self.assertEqual((42, 43), result.args)
    -
    -            def task_func(callback):
    -                callback(None, error="foo")
    -            result = yield gen.Task(task_func)
    -            self.assertTrue(isinstance(result, gen.Arguments))
    -            self.assertEqual(((None,), dict(error="foo")), result)
    -
    -            self.stop()
    -        self.run_gen(f)
    -
    -    def test_stack_context_leak(self):
    -        # regression test: repeated invocations of a gen-based
    -        # function should not result in accumulated stack_contexts
    -        def _stack_depth():
    -            head = stack_context._state.contexts[1]
    -            length = 0
    -
    -            while head is not None:
    -                length += 1
    -                head = head.old_contexts[1]
    -
    -            return length
    -
    -        @gen.engine
    -        def inner(callback):
    -            yield gen.Task(self.io_loop.add_callback)
    -            callback()
    -
    -        @gen.engine
    -        def outer():
    -            for i in range(10):
    -                yield gen.Task(inner)
    -
    -            stack_increase = _stack_depth() - initial_stack_depth
    -            self.assertTrue(stack_increase <= 2)
    -            self.stop()
    -        initial_stack_depth = _stack_depth()
    -        self.run_gen(outer)
    -
    -    def test_stack_context_leak_exception(self):
    -        # same as previous, but with a function that exits with an exception
    -        @gen.engine
    -        def inner(callback):
    -            yield gen.Task(self.io_loop.add_callback)
    -            1 / 0
    -
    -        @gen.engine
    -        def outer():
    -            for i in range(10):
    -                try:
    -                    yield gen.Task(inner)
    -                except ZeroDivisionError:
    -                    pass
    -            stack_increase = len(stack_context._state.contexts) - initial_stack_depth
    -            self.assertTrue(stack_increase <= 2)
    -            self.stop()
    -        initial_stack_depth = len(stack_context._state.contexts)
    -        self.run_gen(outer)
    -
    -    def function_with_stack_context(self, callback):
    -        # Technically this function should stack_context.wrap its callback
    -        # upon entry.  However, it is very common for this step to be
    -        # omitted.
    -        def step2():
    -            self.assertEqual(self.named_contexts, ['a'])
    -            self.io_loop.add_callback(callback)
    -
    -        with stack_context.StackContext(self.named_context('a')):
    -            self.io_loop.add_callback(step2)
    -
    -    @gen_test
    -    def test_wait_transfer_stack_context(self):
    -        # Wait should not pick up contexts from where callback was invoked,
    -        # even if that function improperly fails to wrap its callback.
    -        cb = yield gen.Callback('k1')
    -        self.function_with_stack_context(cb)
    -        self.assertEqual(self.named_contexts, [])
    -        yield gen.Wait('k1')
    -        self.assertEqual(self.named_contexts, [])
    -
    -    @gen_test
    -    def test_task_transfer_stack_context(self):
    -        yield gen.Task(self.function_with_stack_context)
    -        self.assertEqual(self.named_contexts, [])
    -
    -    def test_raise_after_stop(self):
    -        # This pattern will be used in the following tests so make sure
    -        # the exception propagates as expected.
    -        @gen.engine
    -        def f():
    -            self.stop()
    -            1 / 0
    -
    -        with self.assertRaises(ZeroDivisionError):
    -            self.run_gen(f)
    -
    -    def test_sync_raise_return(self):
    -        # gen.Return is allowed in @gen.engine, but it may not be used
    -        # to return a value.
    -        @gen.engine
    -        def f():
    -            self.stop(42)
    -            raise gen.Return()
    -
    -        result = self.run_gen(f)
    -        self.assertEqual(result, 42)
    -
    -    def test_async_raise_return(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Task(self.io_loop.add_callback)
    -            self.stop(42)
    -            raise gen.Return()
    -
    -        result = self.run_gen(f)
    -        self.assertEqual(result, 42)
    -
    -    def test_sync_raise_return_value(self):
    -        @gen.engine
    -        def f():
    -            raise gen.Return(42)
    -
    -        with self.assertRaises(gen.ReturnValueIgnoredError):
    -            self.run_gen(f)
    -
    -    def test_sync_raise_return_value_tuple(self):
    -        @gen.engine
    -        def f():
    -            raise gen.Return((1, 2))
    -
    -        with self.assertRaises(gen.ReturnValueIgnoredError):
    -            self.run_gen(f)
    -
    -    def test_async_raise_return_value(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Task(self.io_loop.add_callback)
    -            raise gen.Return(42)
    -
    -        with self.assertRaises(gen.ReturnValueIgnoredError):
    -            self.run_gen(f)
    -
    -    def test_async_raise_return_value_tuple(self):
    -        @gen.engine
    -        def f():
    -            yield gen.Task(self.io_loop.add_callback)
    -            raise gen.Return((1, 2))
    -
    -        with self.assertRaises(gen.ReturnValueIgnoredError):
    -            self.run_gen(f)
    -
    -    def test_return_value(self):
    -        # It is an error to apply @gen.engine to a function that returns
    -        # a value.
    -        @gen.engine
    -        def f():
    -            return 42
    -
    -        with self.assertRaises(gen.ReturnValueIgnoredError):
    -            self.run_gen(f)
    -
    -    def test_return_value_tuple(self):
    -        # It is an error to apply @gen.engine to a function that returns
    -        # a value.
    -        @gen.engine
    -        def f():
    -            return (1, 2)
    -
    -        with self.assertRaises(gen.ReturnValueIgnoredError):
    -            self.run_gen(f)
    -
    -    @skipNotCPython
    -    def test_task_refcounting(self):
    -        # On CPython, tasks and their arguments should be released immediately
    -        # without waiting for garbage collection.
    -        @gen.engine
    -        def f():
    -            class Foo(object):
    -                pass
    -            arg = Foo()
    -            self.arg_ref = weakref.ref(arg)
    -            task = gen.Task(self.io_loop.add_callback, arg=arg)
    -            self.task_ref = weakref.ref(task)
    -            yield task
    -            self.stop()
    -
    -        self.run_gen(f)
    -        self.assertIs(self.arg_ref(), None)
    -        self.assertIs(self.task_ref(), None)
    -
    -
    -class GenCoroutineTest(AsyncTestCase):
    -    def setUp(self):
    -        # Stray StopIteration exceptions can lead to tests exiting prematurely,
    -        # so we need explicit checks here to make sure the tests run all
    -        # the way through.
    -        self.finished = False
    -        super(GenCoroutineTest, self).setUp()
    -
    -    def tearDown(self):
    -        super(GenCoroutineTest, self).tearDown()
    -        assert self.finished
    -
    -    @gen_test
    -    def test_sync_gen_return(self):
    -        @gen.coroutine
    -        def f():
    -            raise gen.Return(42)
    -        result = yield f()
    -        self.assertEqual(result, 42)
    -        self.finished = True
    -
    -    @gen_test
    -    def test_async_gen_return(self):
    -        @gen.coroutine
    -        def f():
    -            yield gen.Task(self.io_loop.add_callback)
    -            raise gen.Return(42)
    -        result = yield f()
    -        self.assertEqual(result, 42)
    -        self.finished = True
    -
    -    @gen_test
    -    def test_sync_return(self):
    -        @gen.coroutine
    -        def f():
    -            return 42
    -        result = yield f()
    -        self.assertEqual(result, 42)
    -        self.finished = True
    -
    -    @skipBefore33
    -    @gen_test
    -    def test_async_return(self):
    -        # It is a compile-time error to return a value in a generator
    -        # before Python 3.3, so we must test this with exec.
    -        # Flatten the real global and local namespace into our fake globals:
    -        # it's all global from the perspective of f().
    -        global_namespace = dict(globals(), **locals())
    -        local_namespace = {}
    -        exec(textwrap.dedent("""
    -        @gen.coroutine
    -        def f():
    -            yield gen.Task(self.io_loop.add_callback)
    -            return 42
    -        """), global_namespace, local_namespace)
    -        result = yield local_namespace['f']()
    -        self.assertEqual(result, 42)
    -        self.finished = True
    -
    -    @skipBefore33
    -    @gen_test
    -    def test_async_early_return(self):
    -        # A yield statement exists but is not executed, which means
    -        # this function "returns" via an exception.  This exception
    -        # doesn't happen before the exception handling is set up.
    -        global_namespace = dict(globals(), **locals())
    -        local_namespace = {}
    -        exec(textwrap.dedent("""
    -        @gen.coroutine
    -        def f():
    -            if True:
    -                return 42
    -            yield gen.Task(self.io_loop.add_callback)
    -        """), global_namespace, local_namespace)
    -        result = yield local_namespace['f']()
    -        self.assertEqual(result, 42)
    -        self.finished = True
    -
    -    @gen_test
    -    def test_sync_return_no_value(self):
    -        @gen.coroutine
    -        def f():
    -            return
    -        result = yield f()
    -        self.assertEqual(result, None)
    -        self.finished = True
    -
    -    @gen_test
    -    def test_async_return_no_value(self):
    -        # Without a return value we don't need python 3.3.
    -        @gen.coroutine
    -        def f():
    -            yield gen.Task(self.io_loop.add_callback)
    -            return
    -        result = yield f()
    -        self.assertEqual(result, None)
    -        self.finished = True
    -
    -    @gen_test
    -    def test_sync_raise(self):
    -        @gen.coroutine
    -        def f():
    -            1 / 0
    -        # The exception is raised when the future is yielded
    -        # (or equivalently when its result method is called),
    -        # not when the function itself is called).
    -        future = f()
    -        with self.assertRaises(ZeroDivisionError):
    -            yield future
    -        self.finished = True
    -
    -    @gen_test
    -    def test_async_raise(self):
    -        @gen.coroutine
    -        def f():
    -            yield gen.Task(self.io_loop.add_callback)
    -            1 / 0
    -        future = f()
    -        with self.assertRaises(ZeroDivisionError):
    -            yield future
    -        self.finished = True
    -
    -    @gen_test
    -    def test_pass_callback(self):
    -        @gen.coroutine
    -        def f():
    -            raise gen.Return(42)
    -        result = yield gen.Task(f)
    -        self.assertEqual(result, 42)
    -        self.finished = True
    -
    -    @gen_test
    -    def test_replace_yieldpoint_exception(self):
    -        # Test exception handling: a coroutine can catch one exception
    -        # raised by a yield point and raise a different one.
    -        @gen.coroutine
    -        def f1():
    -            1 / 0
    -
    -        @gen.coroutine
    -        def f2():
    -            try:
    -                yield f1()
    -            except ZeroDivisionError:
    -                raise KeyError()
    -
    -        future = f2()
    -        with self.assertRaises(KeyError):
    -            yield future
    -        self.finished = True
    -
    -    @gen_test
    -    def test_swallow_yieldpoint_exception(self):
    -        # Test exception handling: a coroutine can catch an exception
    -        # raised by a yield point and not raise a different one.
    -        @gen.coroutine
    -        def f1():
    -            1 / 0
    -
    -        @gen.coroutine
    -        def f2():
    -            try:
    -                yield f1()
    -            except ZeroDivisionError:
    -                raise gen.Return(42)
    -
    -        result = yield f2()
    -        self.assertEqual(result, 42)
    -        self.finished = True
    -
    -    @gen_test
    -    def test_replace_context_exception(self):
    -        # Test exception handling: exceptions thrown into the stack context
    -        # can be caught and replaced.
    -        # Note that this test and the following are for behavior that is
    -        # not really supported any more:  coroutines no longer create a
    -        # stack context automatically; but one is created after the first
    -        # YieldPoint (i.e. not a Future).
    -        @gen.coroutine
    -        def f2():
    -            (yield gen.Callback(1))()
    -            yield gen.Wait(1)
    -            self.io_loop.add_callback(lambda: 1 / 0)
    -            try:
    -                yield gen.Task(self.io_loop.add_timeout,
    -                               self.io_loop.time() + 10)
    -            except ZeroDivisionError:
    -                raise KeyError()
    -
    -        future = f2()
    -        with self.assertRaises(KeyError):
    -            yield future
    -        self.finished = True
    -
    -    @gen_test
    -    def test_swallow_context_exception(self):
    -        # Test exception handling: exceptions thrown into the stack context
    -        # can be caught and ignored.
    -        @gen.coroutine
    -        def f2():
    -            (yield gen.Callback(1))()
    -            yield gen.Wait(1)
    -            self.io_loop.add_callback(lambda: 1 / 0)
    -            try:
    -                yield gen.Task(self.io_loop.add_timeout,
    -                               self.io_loop.time() + 10)
    -            except ZeroDivisionError:
    -                raise gen.Return(42)
    -
    -        result = yield f2()
    -        self.assertEqual(result, 42)
    -        self.finished = True
    -
    -    @gen_test
    -    def test_moment(self):
    -        calls = []
    -        @gen.coroutine
    -        def f(name, yieldable):
    -            for i in range(5):
    -                calls.append(name)
    -                yield yieldable
    -        # First, confirm the behavior without moment: each coroutine
    -        # monopolizes the event loop until it finishes.
    -        immediate = Future()
    -        immediate.set_result(None)
    -        yield [f('a', immediate), f('b', immediate)]
    -        self.assertEqual(''.join(calls), 'aaaaabbbbb')
    -
    -        # With moment, they take turns.
    -        calls = []
    -        yield [f('a', gen.moment), f('b', gen.moment)]
    -        self.assertEqual(''.join(calls), 'ababababab')
    -        self.finished = True
    -
    -        calls = []
    -        yield [f('a', gen.moment), f('b', immediate)]
    -        self.assertEqual(''.join(calls), 'abbbbbaaaa')
    -
    -
    -class GenSequenceHandler(RequestHandler):
    -    @asynchronous
    -    @gen.engine
    -    def get(self):
    -        self.io_loop = self.request.connection.stream.io_loop
    -        self.io_loop.add_callback((yield gen.Callback("k1")))
    -        yield gen.Wait("k1")
    -        self.write("1")
    -        self.io_loop.add_callback((yield gen.Callback("k2")))
    -        yield gen.Wait("k2")
    -        self.write("2")
    -        # reuse an old key
    -        self.io_loop.add_callback((yield gen.Callback("k1")))
    -        yield gen.Wait("k1")
    -        self.finish("3")
    -
    -
    -class GenCoroutineSequenceHandler(RequestHandler):
    -    @gen.coroutine
    -    def get(self):
    -        self.io_loop = self.request.connection.stream.io_loop
    -        self.io_loop.add_callback((yield gen.Callback("k1")))
    -        yield gen.Wait("k1")
    -        self.write("1")
    -        self.io_loop.add_callback((yield gen.Callback("k2")))
    -        yield gen.Wait("k2")
    -        self.write("2")
    -        # reuse an old key
    -        self.io_loop.add_callback((yield gen.Callback("k1")))
    -        yield gen.Wait("k1")
    -        self.finish("3")
    -
    -
    -class GenCoroutineUnfinishedSequenceHandler(RequestHandler):
    -    @asynchronous
    -    @gen.coroutine
    -    def get(self):
    -        self.io_loop = self.request.connection.stream.io_loop
    -        self.io_loop.add_callback((yield gen.Callback("k1")))
    -        yield gen.Wait("k1")
    -        self.write("1")
    -        self.io_loop.add_callback((yield gen.Callback("k2")))
    -        yield gen.Wait("k2")
    -        self.write("2")
    -        # reuse an old key
    -        self.io_loop.add_callback((yield gen.Callback("k1")))
    -        yield gen.Wait("k1")
    -        # just write, don't finish
    -        self.write("3")
    -
    -
    -class GenTaskHandler(RequestHandler):
    -    @asynchronous
    -    @gen.engine
    -    def get(self):
    -        io_loop = self.request.connection.stream.io_loop
    -        client = AsyncHTTPClient(io_loop=io_loop)
    -        response = yield gen.Task(client.fetch, self.get_argument('url'))
    -        response.rethrow()
    -        self.finish(b"got response: " + response.body)
    -
    -
    -class GenExceptionHandler(RequestHandler):
    -    @asynchronous
    -    @gen.engine
    -    def get(self):
    -        # This test depends on the order of the two decorators.
    -        io_loop = self.request.connection.stream.io_loop
    -        yield gen.Task(io_loop.add_callback)
    -        raise Exception("oops")
    -
    -
    -class GenCoroutineExceptionHandler(RequestHandler):
    -    @gen.coroutine
    -    def get(self):
    -        # This test depends on the order of the two decorators.
    -        io_loop = self.request.connection.stream.io_loop
    -        yield gen.Task(io_loop.add_callback)
    -        raise Exception("oops")
    -
    -
    -class GenYieldExceptionHandler(RequestHandler):
    -    @asynchronous
    -    @gen.engine
    -    def get(self):
    -        io_loop = self.request.connection.stream.io_loop
    -        # Test the interaction of the two stack_contexts.
    -
    -        def fail_task(callback):
    -            io_loop.add_callback(lambda: 1 / 0)
    -        try:
    -            yield gen.Task(fail_task)
    -            raise Exception("did not get expected exception")
    -        except ZeroDivisionError:
    -            self.finish('ok')
    -
    -
    -class UndecoratedCoroutinesHandler(RequestHandler):
    -    @gen.coroutine
    -    def prepare(self):
    -        self.chunks = []
    -        yield gen.Task(IOLoop.current().add_callback)
    -        self.chunks.append('1')
    -
    -    @gen.coroutine
    -    def get(self):
    -        self.chunks.append('2')
    -        yield gen.Task(IOLoop.current().add_callback)
    -        self.chunks.append('3')
    -        yield gen.Task(IOLoop.current().add_callback)
    -        self.write(''.join(self.chunks))
    -
    -
    -class AsyncPrepareErrorHandler(RequestHandler):
    -    @gen.coroutine
    -    def prepare(self):
    -        yield gen.Task(IOLoop.current().add_callback)
    -        raise HTTPError(403)
    -
    -    def get(self):
    -        self.finish('ok')
    -
    -
    -class GenWebTest(AsyncHTTPTestCase):
    -    def get_app(self):
    -        return Application([
    -            ('/sequence', GenSequenceHandler),
    -            ('/coroutine_sequence', GenCoroutineSequenceHandler),
    -            ('/coroutine_unfinished_sequence',
    -             GenCoroutineUnfinishedSequenceHandler),
    -            ('/task', GenTaskHandler),
    -            ('/exception', GenExceptionHandler),
    -            ('/coroutine_exception', GenCoroutineExceptionHandler),
    -            ('/yield_exception', GenYieldExceptionHandler),
    -            ('/undecorated_coroutine', UndecoratedCoroutinesHandler),
    -            ('/async_prepare_error', AsyncPrepareErrorHandler),
    -        ])
    -
    -    def test_sequence_handler(self):
    -        response = self.fetch('/sequence')
    -        self.assertEqual(response.body, b"123")
    -
    -    def test_coroutine_sequence_handler(self):
    -        response = self.fetch('/coroutine_sequence')
    -        self.assertEqual(response.body, b"123")
    -
    -    def test_coroutine_unfinished_sequence_handler(self):
    -        response = self.fetch('/coroutine_unfinished_sequence')
    -        self.assertEqual(response.body, b"123")
    -
    -    def test_task_handler(self):
    -        response = self.fetch('/task?url=%s' % url_escape(self.get_url('/sequence')))
    -        self.assertEqual(response.body, b"got response: 123")
    -
    -    def test_exception_handler(self):
    -        # Make sure we get an error and not a timeout
    -        with ExpectLog(app_log, "Uncaught exception GET /exception"):
    -            response = self.fetch('/exception')
    -        self.assertEqual(500, response.code)
    -
    -    def test_coroutine_exception_handler(self):
    -        # Make sure we get an error and not a timeout
    -        with ExpectLog(app_log, "Uncaught exception GET /coroutine_exception"):
    -            response = self.fetch('/coroutine_exception')
    -        self.assertEqual(500, response.code)
    -
    -    def test_yield_exception_handler(self):
    -        response = self.fetch('/yield_exception')
    -        self.assertEqual(response.body, b'ok')
    -
    -    def test_undecorated_coroutines(self):
    -        response = self.fetch('/undecorated_coroutine')
    -        self.assertEqual(response.body, b'123')
    -
    -    def test_async_prepare_error_handler(self):
    -        response = self.fetch('/async_prepare_error')
    -        self.assertEqual(response.code, 403)
    -
    -
    -class WithTimeoutTest(AsyncTestCase):
    -    @gen_test
    -    def test_timeout(self):
    -        with self.assertRaises(gen.TimeoutError):
    -            yield gen.with_timeout(datetime.timedelta(seconds=0.1),
    -                                   Future())
    -
    -    @gen_test
    -    def test_completes_before_timeout(self):
    -        future = Future()
    -        self.io_loop.add_timeout(datetime.timedelta(seconds=0.1),
    -                                 lambda: future.set_result('asdf'))
    -        result = yield gen.with_timeout(datetime.timedelta(seconds=3600),
    -                                        future)
    -        self.assertEqual(result, 'asdf')
    -
    -    @gen_test
    -    def test_fails_before_timeout(self):
    -        future = Future()
    -        self.io_loop.add_timeout(
    -            datetime.timedelta(seconds=0.1),
    -            lambda: future.set_exception(ZeroDivisionError))
    -        with self.assertRaises(ZeroDivisionError):
    -            yield gen.with_timeout(datetime.timedelta(seconds=3600), future)
    -
    -    @gen_test
    -    def test_already_resolved(self):
    -        future = Future()
    -        future.set_result('asdf')
    -        result = yield gen.with_timeout(datetime.timedelta(seconds=3600),
    -                                        future)
    -        self.assertEqual(result, 'asdf')
    -
    -    @unittest.skipIf(futures is None, 'futures module not present')
    -    @gen_test
    -    def test_timeout_concurrent_future(self):
    -        with futures.ThreadPoolExecutor(1) as executor:
    -            with self.assertRaises(gen.TimeoutError):
    -                yield gen.with_timeout(self.io_loop.time(),
    -                                       executor.submit(time.sleep, 0.1))
    -
    -    @unittest.skipIf(futures is None, 'futures module not present')
    -    @gen_test
    -    def test_completed_concurrent_future(self):
    -        with futures.ThreadPoolExecutor(1) as executor:
    -            yield gen.with_timeout(datetime.timedelta(seconds=3600),
    -                                   executor.submit(lambda: None))
    -
    -
    -if __name__ == '__main__':
    -    unittest.main()
    diff --git a/rosbridge_server/src/tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.mo b/rosbridge_server/src/tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.mo
    deleted file mode 100644
    index 089f6c7ab..000000000
    Binary files a/rosbridge_server/src/tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.mo and /dev/null differ
    diff --git a/rosbridge_server/src/tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.po b/rosbridge_server/src/tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.po
    deleted file mode 100644
    index 732ee6da8..000000000
    --- a/rosbridge_server/src/tornado/test/gettext_translations/fr_FR/LC_MESSAGES/tornado_test.po
    +++ /dev/null
    @@ -1,22 +0,0 @@
    -# SOME DESCRIPTIVE TITLE.
    -# Copyright (C) YEAR THE PACKAGE'S COPYRIGHT HOLDER
    -# This file is distributed under the same license as the PACKAGE package.
    -# FIRST AUTHOR , YEAR.
    -#
    -#, fuzzy
    -msgid ""
    -msgstr ""
    -"Project-Id-Version: PACKAGE VERSION\n"
    -"Report-Msgid-Bugs-To: \n"
    -"POT-Creation-Date: 2012-06-14 01:10-0700\n"
    -"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
    -"Last-Translator: FULL NAME \n"
    -"Language-Team: LANGUAGE \n"
    -"Language: \n"
    -"MIME-Version: 1.0\n"
    -"Content-Type: text/plain; charset=utf-8\n"
    -"Content-Transfer-Encoding: 8bit\n"
    -
    -#: extract_me.py:1
    -msgid "school"
    -msgstr "école"
    diff --git a/rosbridge_server/src/tornado/test/httpclient_test.py b/rosbridge_server/src/tornado/test/httpclient_test.py
    deleted file mode 100644
    index 78daa74dc..000000000
    --- a/rosbridge_server/src/tornado/test/httpclient_test.py
    +++ /dev/null
    @@ -1,517 +0,0 @@
    -#!/usr/bin/env python
    -
    -from __future__ import absolute_import, division, print_function, with_statement
    -
    -import base64
    -import binascii
    -from contextlib import closing
    -import functools
    -import sys
    -import threading
    -
    -from tornado.escape import utf8
    -from tornado.httpclient import HTTPRequest, HTTPResponse, _RequestProxy, HTTPError, HTTPClient
    -from tornado.httpserver import HTTPServer
    -from tornado.ioloop import IOLoop
    -from tornado.iostream import IOStream
    -from tornado.log import gen_log
    -from tornado import netutil
    -from tornado.stack_context import ExceptionStackContext, NullContext
    -from tornado.testing import AsyncHTTPTestCase, bind_unused_port, gen_test, ExpectLog
    -from tornado.test.util import unittest, skipOnTravis
    -from tornado.util import u, bytes_type
    -from tornado.web import Application, RequestHandler, url
    -
    -try:
    -    from io import BytesIO  # python 3
    -except ImportError:
    -    from cStringIO import StringIO as BytesIO
    -
    -
    -class HelloWorldHandler(RequestHandler):
    -    def get(self):
    -        name = self.get_argument("name", "world")
    -        self.set_header("Content-Type", "text/plain")
    -        self.finish("Hello %s!" % name)
    -
    -
    -class PostHandler(RequestHandler):
    -    def post(self):
    -        self.finish("Post arg1: %s, arg2: %s" % (
    -            self.get_argument("arg1"), self.get_argument("arg2")))
    -
    -
    -class ChunkHandler(RequestHandler):
    -    def get(self):
    -        self.write("asdf")
    -        self.flush()
    -        self.write("qwer")
    -
    -
    -class AuthHandler(RequestHandler):
    -    def get(self):
    -        self.finish(self.request.headers["Authorization"])
    -
    -
    -class CountdownHandler(RequestHandler):
    -    def get(self, count):
    -        count = int(count)
    -        if count > 0:
    -            self.redirect(self.reverse_url("countdown", count - 1))
    -        else:
    -            self.write("Zero")
    -
    -
    -class EchoPostHandler(RequestHandler):
    -    def post(self):
    -        self.write(self.request.body)
    -
    -
    -class UserAgentHandler(RequestHandler):
    -    def get(self):
    -        self.write(self.request.headers.get('User-Agent', 'User agent not set'))
    -
    -
    -class ContentLength304Handler(RequestHandler):
    -    def get(self):
    -        self.set_status(304)
    -        self.set_header('Content-Length', 42)
    -
    -    def _clear_headers_for_304(self):
    -        # Tornado strips content-length from 304 responses, but here we
    -        # want to simulate servers that include the headers anyway.
    -        pass
    -
    -
    -class AllMethodsHandler(RequestHandler):
    -    SUPPORTED_METHODS = RequestHandler.SUPPORTED_METHODS + ('OTHER',)
    -
    -    def method(self):
    -        self.write(self.request.method)
    -
    -    get = post = put = delete = options = patch = other = method
    -
    -# These tests end up getting run redundantly: once here with the default
    -# HTTPClient implementation, and then again in each implementation's own
    -# test suite.
    -
    -
    -class HTTPClientCommonTestCase(AsyncHTTPTestCase):
    -    def get_app(self):
    -        return Application([
    -            url("/hello", HelloWorldHandler),
    -            url("/post", PostHandler),
    -            url("/chunk", ChunkHandler),
    -            url("/auth", AuthHandler),
    -            url("/countdown/([0-9]+)", CountdownHandler, name="countdown"),
    -            url("/echopost", EchoPostHandler),
    -            url("/user_agent", UserAgentHandler),
    -            url("/304_with_content_length", ContentLength304Handler),
    -            url("/all_methods", AllMethodsHandler),
    -        ], gzip=True)
    -
    -    @skipOnTravis
    -    def test_hello_world(self):
    -        response = self.fetch("/hello")
    -        self.assertEqual(response.code, 200)
    -        self.assertEqual(response.headers["Content-Type"], "text/plain")
    -        self.assertEqual(response.body, b"Hello world!")
    -        self.assertEqual(int(response.request_time), 0)
    -
    -        response = self.fetch("/hello?name=Ben")
    -        self.assertEqual(response.body, b"Hello Ben!")
    -
    -    def test_streaming_callback(self):
    -        # streaming_callback is also tested in test_chunked
    -        chunks = []
    -        response = self.fetch("/hello",
    -                              streaming_callback=chunks.append)
    -        # with streaming_callback, data goes to the callback and not response.body
    -        self.assertEqual(chunks, [b"Hello world!"])
    -        self.assertFalse(response.body)
    -
    -    def test_post(self):
    -        response = self.fetch("/post", method="POST",
    -                              body="arg1=foo&arg2=bar")
    -        self.assertEqual(response.code, 200)
    -        self.assertEqual(response.body, b"Post arg1: foo, arg2: bar")
    -
    -    def test_chunked(self):
    -        response = self.fetch("/chunk")
    -        self.assertEqual(response.body, b"asdfqwer")
    -
    -        chunks = []
    -        response = self.fetch("/chunk",
    -                              streaming_callback=chunks.append)
    -        self.assertEqual(chunks, [b"asdf", b"qwer"])
    -        self.assertFalse(response.body)
    -
    -    def test_chunked_close(self):
    -        # test case in which chunks spread read-callback processing
    -        # over several ioloop iterations, but the connection is already closed.
    -        sock, port = bind_unused_port()
    -        with closing(sock):
    -            def write_response(stream, request_data):
    -                stream.write(b"""\
    -HTTP/1.1 200 OK
    -Transfer-Encoding: chunked
    -
    -1
    -1
    -1
    -2
    -0
    -
    -""".replace(b"\n", b"\r\n"), callback=stream.close)
    -
    -            def accept_callback(conn, address):
    -                # fake an HTTP server using chunked encoding where the final chunks
    -                # and connection close all happen at once
    -                stream = IOStream(conn, io_loop=self.io_loop)
    -                stream.read_until(b"\r\n\r\n",
    -                                  functools.partial(write_response, stream))
    -            netutil.add_accept_handler(sock, accept_callback, self.io_loop)
    -            self.http_client.fetch("http://127.0.0.1:%d/" % port, self.stop)
    -            resp = self.wait()
    -            resp.rethrow()
    -            self.assertEqual(resp.body, b"12")
    -            self.io_loop.remove_handler(sock.fileno())
    -
    -    def test_streaming_stack_context(self):
    -        chunks = []
    -        exc_info = []
    -
    -        def error_handler(typ, value, tb):
    -            exc_info.append((typ, value, tb))
    -            return True
    -
    -        def streaming_cb(chunk):
    -            chunks.append(chunk)
    -            if chunk == b'qwer':
    -                1 / 0
    -
    -        with ExceptionStackContext(error_handler):
    -            self.fetch('/chunk', streaming_callback=streaming_cb)
    -
    -        self.assertEqual(chunks, [b'asdf', b'qwer'])
    -        self.assertEqual(1, len(exc_info))
    -        self.assertIs(exc_info[0][0], ZeroDivisionError)
    -
    -    def test_basic_auth(self):
    -        self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
    -                                    auth_password="open sesame").body,
    -                         b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
    -
    -    def test_basic_auth_explicit_mode(self):
    -        self.assertEqual(self.fetch("/auth", auth_username="Aladdin",
    -                                    auth_password="open sesame",
    -                                    auth_mode="basic").body,
    -                         b"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==")
    -
    -    def test_unsupported_auth_mode(self):
    -        # curl and simple clients handle errors a bit differently; the
    -        # important thing is that they don't fall back to basic auth
    -        # on an unknown mode.
    -        with ExpectLog(gen_log, "uncaught exception", required=False):
    -            with self.assertRaises((ValueError, HTTPError)):
    -                response = self.fetch("/auth", auth_username="Aladdin",
    -                                      auth_password="open sesame",
    -                                      auth_mode="asdf")
    -                response.rethrow()
    -
    -    def test_follow_redirect(self):
    -        response = self.fetch("/countdown/2", follow_redirects=False)
    -        self.assertEqual(302, response.code)
    -        self.assertTrue(response.headers["Location"].endswith("/countdown/1"))
    -
    -        response = self.fetch("/countdown/2")
    -        self.assertEqual(200, response.code)
    -        self.assertTrue(response.effective_url.endswith("/countdown/0"))
    -        self.assertEqual(b"Zero", response.body)
    -
    -    def test_credentials_in_url(self):
    -        url = self.get_url("/auth").replace("http://", "http://me:secret@")
    -        self.http_client.fetch(url, self.stop)
    -        response = self.wait()
    -        self.assertEqual(b"Basic " + base64.b64encode(b"me:secret"),
    -                         response.body)
    -
    -    def test_body_encoding(self):
    -        unicode_body = u("\xe9")
    -        byte_body = binascii.a2b_hex(b"e9")
    -
    -        # unicode string in body gets converted to utf8
    -        response = self.fetch("/echopost", method="POST", body=unicode_body,
    -                              headers={"Content-Type": "application/blah"})
    -        self.assertEqual(response.headers["Content-Length"], "2")
    -        self.assertEqual(response.body, utf8(unicode_body))
    -
    -        # byte strings pass through directly
    -        response = self.fetch("/echopost", method="POST",
    -                              body=byte_body,
    -                              headers={"Content-Type": "application/blah"})
    -        self.assertEqual(response.headers["Content-Length"], "1")
    -        self.assertEqual(response.body, byte_body)
    -
    -        # Mixing unicode in headers and byte string bodies shouldn't
    -        # break anything
    -        response = self.fetch("/echopost", method="POST", body=byte_body,
    -                              headers={"Content-Type": "application/blah"},
    -                              user_agent=u("foo"))
    -        self.assertEqual(response.headers["Content-Length"], "1")
    -        self.assertEqual(response.body, byte_body)
    -
    -    def test_types(self):
    -        response = self.fetch("/hello")
    -        self.assertEqual(type(response.body), bytes_type)
    -        self.assertEqual(type(response.headers["Content-Type"]), str)
    -        self.assertEqual(type(response.code), int)
    -        self.assertEqual(type(response.effective_url), str)
    -
    -    def test_header_callback(self):
    -        first_line = []
    -        headers = {}
    -        chunks = []
    -
    -        def header_callback(header_line):
    -            if header_line.startswith('HTTP/'):
    -                first_line.append(header_line)
    -            elif header_line != '\r\n':
    -                k, v = header_line.split(':', 1)
    -                headers[k] = v.strip()
    -
    -        def streaming_callback(chunk):
    -            # All header callbacks are run before any streaming callbacks,
    -            # so the header data is available to process the data as it
    -            # comes in.
    -            self.assertEqual(headers['Content-Type'], 'text/html; charset=UTF-8')
    -            chunks.append(chunk)
    -
    -        self.fetch('/chunk', header_callback=header_callback,
    -                   streaming_callback=streaming_callback)
    -        self.assertEqual(len(first_line), 1)
    -        self.assertRegexpMatches(first_line[0], 'HTTP/1.[01] 200 OK\r\n')
    -        self.assertEqual(chunks, [b'asdf', b'qwer'])
    -
    -    def test_header_callback_stack_context(self):
    -        exc_info = []
    -
    -        def error_handler(typ, value, tb):
    -            exc_info.append((typ, value, tb))
    -            return True
    -
    -        def header_callback(header_line):
    -            if header_line.startswith('Content-Type:'):
    -                1 / 0
    -
    -        with ExceptionStackContext(error_handler):
    -            self.fetch('/chunk', header_callback=header_callback)
    -        self.assertEqual(len(exc_info), 1)
    -        self.assertIs(exc_info[0][0], ZeroDivisionError)
    -
    -    def test_configure_defaults(self):
    -        defaults = dict(user_agent='TestDefaultUserAgent', allow_ipv6=False)
    -        # Construct a new instance of the configured client class
    -        client = self.http_client.__class__(self.io_loop, force_instance=True,
    -                                            defaults=defaults)
    -        client.fetch(self.get_url('/user_agent'), callback=self.stop)
    -        response = self.wait()
    -        self.assertEqual(response.body, b'TestDefaultUserAgent')
    -        client.close()
    -
    -    def test_304_with_content_length(self):
    -        # According to the spec 304 responses SHOULD NOT include
    -        # Content-Length or other entity headers, but some servers do it
    -        # anyway.
    -        # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
    -        response = self.fetch('/304_with_content_length')
    -        self.assertEqual(response.code, 304)
    -        self.assertEqual(response.headers['Content-Length'], '42')
    -
    -    def test_final_callback_stack_context(self):
    -        # The final callback should be run outside of the httpclient's
    -        # stack_context.  We want to ensure that there is not stack_context
    -        # between the user's callback and the IOLoop, so monkey-patch
    -        # IOLoop.handle_callback_exception and disable the test harness's
    -        # context with a NullContext.
    -        # Note that this does not apply to secondary callbacks (header
    -        # and streaming_callback), as errors there must be seen as errors
    -        # by the http client so it can clean up the connection.
    -        exc_info = []
    -
    -        def handle_callback_exception(callback):
    -            exc_info.append(sys.exc_info())
    -            self.stop()
    -        self.io_loop.handle_callback_exception = handle_callback_exception
    -        with NullContext():
    -            self.http_client.fetch(self.get_url('/hello'),
    -                                   lambda response: 1 / 0)
    -        self.wait()
    -        self.assertEqual(exc_info[0][0], ZeroDivisionError)
    -
    -    @gen_test
    -    def test_future_interface(self):
    -        response = yield self.http_client.fetch(self.get_url('/hello'))
    -        self.assertEqual(response.body, b'Hello world!')
    -
    -    @gen_test
    -    def test_future_http_error(self):
    -        with self.assertRaises(HTTPError) as context:
    -            yield self.http_client.fetch(self.get_url('/notfound'))
    -        self.assertEqual(context.exception.code, 404)
    -        self.assertEqual(context.exception.response.code, 404)
    -
    -    @gen_test
    -    def test_reuse_request_from_response(self):
    -        # The response.request attribute should be an HTTPRequest, not
    -        # a _RequestProxy.
    -        # This test uses self.http_client.fetch because self.fetch calls
    -        # self.get_url on the input unconditionally.
    -        url = self.get_url('/hello')
    -        response = yield self.http_client.fetch(url)
    -        self.assertEqual(response.request.url, url)
    -        self.assertTrue(isinstance(response.request, HTTPRequest))
    -        response2 = yield self.http_client.fetch(response.request)
    -        self.assertEqual(response2.body, b'Hello world!')
    -
    -    def test_all_methods(self):
    -        for method in ['GET', 'DELETE', 'OPTIONS']:
    -            response = self.fetch('/all_methods', method=method)
    -            self.assertEqual(response.body, utf8(method))
    -        for method in ['POST', 'PUT', 'PATCH']:
    -            response = self.fetch('/all_methods', method=method, body=b'')
    -            self.assertEqual(response.body, utf8(method))
    -        response = self.fetch('/all_methods', method='HEAD')
    -        self.assertEqual(response.body, b'')
    -        response = self.fetch('/all_methods', method='OTHER',
    -                              allow_nonstandard_methods=True)
    -        self.assertEqual(response.body, b'OTHER')
    -
    -    @gen_test
    -    def test_body(self):
    -        hello_url = self.get_url('/hello')
    -        with self.assertRaises(AssertionError) as context:
    -            yield self.http_client.fetch(hello_url, body='data')
    -
    -        self.assertTrue('must be empty' in str(context.exception))
    -
    -        with self.assertRaises(AssertionError) as context:
    -            yield self.http_client.fetch(hello_url, method='POST')
    -
    -        self.assertTrue('must not be empty' in str(context.exception))
    -
    -
    -class RequestProxyTest(unittest.TestCase):
    -    def test_request_set(self):
    -        proxy = _RequestProxy(HTTPRequest('http://example.com/',
    -                                          user_agent='foo'),
    -                              dict())
    -        self.assertEqual(proxy.user_agent, 'foo')
    -
    -    def test_default_set(self):
    -        proxy = _RequestProxy(HTTPRequest('http://example.com/'),
    -                              dict(network_interface='foo'))
    -        self.assertEqual(proxy.network_interface, 'foo')
    -
    -    def test_both_set(self):
    -        proxy = _RequestProxy(HTTPRequest('http://example.com/',
    -                                          proxy_host='foo'),
    -                              dict(proxy_host='bar'))
    -        self.assertEqual(proxy.proxy_host, 'foo')
    -
    -    def test_neither_set(self):
    -        proxy = _RequestProxy(HTTPRequest('http://example.com/'),
    -                              dict())
    -        self.assertIs(proxy.auth_username, None)
    -
    -    def test_bad_attribute(self):
    -        proxy = _RequestProxy(HTTPRequest('http://example.com/'),
    -                              dict())
    -        with self.assertRaises(AttributeError):
    -            proxy.foo
    -
    -    def test_defaults_none(self):
    -        proxy = _RequestProxy(HTTPRequest('http://example.com/'), None)
    -        self.assertIs(proxy.auth_username, None)
    -
    -
    -class HTTPResponseTestCase(unittest.TestCase):
    -    def test_str(self):
    -        response = HTTPResponse(HTTPRequest('http://example.com'),
    -                                200, headers={}, buffer=BytesIO())
    -        s = str(response)
    -        self.assertTrue(s.startswith('HTTPResponse('))
    -        self.assertIn('code=200', s)
    -
    -
    -class SyncHTTPClientTest(unittest.TestCase):
    -    def setUp(self):
    -        if IOLoop.configured_class().__name__ in ('TwistedIOLoop',
    -                                                  'AsyncIOMainLoop'):
    -            # TwistedIOLoop only supports the global reactor, so we can't have
    -            # separate IOLoops for client and server threads.
    -            # AsyncIOMainLoop doesn't work with the default policy
    -            # (although it could with some tweaks to this test and a
    -            # policy that created loops for non-main threads).
    -            raise unittest.SkipTest(
    -                'Sync HTTPClient not compatible with TwistedIOLoop or '
    -                'AsyncIOMainLoop')
    -        self.server_ioloop = IOLoop()
    -
    -        sock, self.port = bind_unused_port()
    -        app = Application([('/', HelloWorldHandler)])
    -        self.server = HTTPServer(app, io_loop=self.server_ioloop)
    -        self.server.add_socket(sock)
    -
    -        self.server_thread = threading.Thread(target=self.server_ioloop.start)
    -        self.server_thread.start()
    -
    -        self.http_client = HTTPClient()
    -
    -    def tearDown(self):
    -        def stop_server():
    -            self.server.stop()
    -            self.server_ioloop.stop()
    -        self.server_ioloop.add_callback(stop_server)
    -        self.server_thread.join()
    -        self.http_client.close()
    -        self.server_ioloop.close(all_fds=True)
    -
    -    def get_url(self, path):
    -        return 'http://localhost:%d%s' % (self.port, path)
    -
    -    def test_sync_client(self):
    -        response = self.http_client.fetch(self.get_url('/'))
    -        self.assertEqual(b'Hello world!', response.body)
    -
    -    def test_sync_client_error(self):
    -        # Synchronous HTTPClient raises errors directly; no need for
    -        # response.rethrow()
    -        with self.assertRaises(HTTPError) as assertion:
    -            self.http_client.fetch(self.get_url('/notfound'))
    -        self.assertEqual(assertion.exception.code, 404)
    -
    -
    -class HTTPRequestTestCase(unittest.TestCase):
    -    def test_headers(self):
    -        request = HTTPRequest('http://example.com', headers={'foo': 'bar'})
    -        self.assertEqual(request.headers, {'foo': 'bar'})
    -
    -    def test_headers_setter(self):
    -        request = HTTPRequest('http://example.com')
    -        request.headers = {'bar': 'baz'}
    -        self.assertEqual(request.headers, {'bar': 'baz'})
    -
    -    def test_null_headers_setter(self):
    -        request = HTTPRequest('http://example.com')
    -        request.headers = None
    -        self.assertEqual(request.headers, {})
    -
    -    def test_body(self):
    -        request = HTTPRequest('http://example.com', body='foo')
    -        self.assertEqual(request.body, utf8('foo'))
    -
    -    def test_body_setter(self):
    -        request = HTTPRequest('http://example.com')
    -        request.body = 'foo'
    -        self.assertEqual(request.body, utf8('foo'))
    diff --git a/rosbridge_server/src/tornado/test/httpserver_test.py b/rosbridge_server/src/tornado/test/httpserver_test.py
    deleted file mode 100644
    index 63a55291b..000000000
    --- a/rosbridge_server/src/tornado/test/httpserver_test.py
    +++ /dev/null
    @@ -1,1035 +0,0 @@
    -#!/usr/bin/env python
    -
    -
    -from __future__ import absolute_import, division, print_function, with_statement
    -from tornado import netutil
    -from tornado.escape import json_decode, json_encode, utf8, _unicode, recursive_unicode, native_str
    -from tornado import gen
    -from tornado.http1connection import HTTP1Connection
    -from tornado.httpserver import HTTPServer
    -from tornado.httputil import HTTPHeaders, HTTPMessageDelegate, HTTPServerConnectionDelegate, ResponseStartLine
    -from tornado.iostream import IOStream
    -from tornado.log import gen_log, app_log
    -from tornado.netutil import ssl_options_to_context
    -from tornado.simple_httpclient import SimpleAsyncHTTPClient
    -from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog, gen_test
    -from tornado.test.util import unittest, skipOnTravis
    -from tornado.util import u, bytes_type
    -from tornado.web import Application, RequestHandler, asynchronous, stream_request_body
    -from contextlib import closing
    -import datetime
    -import gzip
    -import os
    -import shutil
    -import socket
    -import ssl
    -import sys
    -import tempfile
    -
    -try:
    -    from io import BytesIO  # python 3
    -except ImportError:
    -    from cStringIO import StringIO as BytesIO  # python 2
    -
    -
    -def read_stream_body(stream, callback):
    -    """Reads an HTTP response from `stream` and runs callback with its
    -    headers and body."""
    -    chunks = []
    -    class Delegate(HTTPMessageDelegate):
    -        def headers_received(self, start_line, headers):
    -            self.headers = headers
    -
    -        def data_received(self, chunk):
    -            chunks.append(chunk)
    -
    -        def finish(self):
    -            callback((self.headers, b''.join(chunks)))
    -    conn = HTTP1Connection(stream, True)
    -    conn.read_response(Delegate())
    -
    -
    -class HandlerBaseTestCase(AsyncHTTPTestCase):
    -    def get_app(self):
    -        return Application([('/', self.__class__.Handler)])
    -
    -    def fetch_json(self, *args, **kwargs):
    -        response = self.fetch(*args, **kwargs)
    -        response.rethrow()
    -        return json_decode(response.body)
    -
    -
    -class HelloWorldRequestHandler(RequestHandler):
    -    def initialize(self, protocol="http"):
    -        self.expected_protocol = protocol
    -
    -    def get(self):
    -        if self.request.protocol != self.expected_protocol:
    -            raise Exception("unexpected protocol")
    -        self.finish("Hello world")
    -
    -    def post(self):
    -        self.finish("Got %d bytes in POST" % len(self.request.body))
    -
    -
    -# In pre-1.0 versions of openssl, SSLv23 clients always send SSLv2
    -# ClientHello messages, which are rejected by SSLv3 and TLSv1
    -# servers.  Note that while the OPENSSL_VERSION_INFO was formally
    -# introduced in python3.2, it was present but undocumented in
    -# python 2.7
    -skipIfOldSSL = unittest.skipIf(
    -    getattr(ssl, 'OPENSSL_VERSION_INFO', (0, 0)) < (1, 0),
    -    "old version of ssl module and/or openssl")
    -
    -
    -class BaseSSLTest(AsyncHTTPSTestCase):
    -    def get_app(self):
    -        return Application([('/', HelloWorldRequestHandler,
    -                             dict(protocol="https"))])
    -
    -
    -class SSLTestMixin(object):
    -    def get_ssl_options(self):
    -        return dict(ssl_version=self.get_ssl_version(),
    -                    **AsyncHTTPSTestCase.get_ssl_options())
    -
    -    def get_ssl_version(self):
    -        raise NotImplementedError()
    -
    -    def test_ssl(self):
    -        response = self.fetch('/')
    -        self.assertEqual(response.body, b"Hello world")
    -
    -    def test_large_post(self):
    -        response = self.fetch('/',
    -                              method='POST',
    -                              body='A' * 5000)
    -        self.assertEqual(response.body, b"Got 5000 bytes in POST")
    -
    -    def test_non_ssl_request(self):
    -        # Make sure the server closes the connection when it gets a non-ssl
    -        # connection, rather than waiting for a timeout or otherwise
    -        # misbehaving.
    -        with ExpectLog(gen_log, '(SSL Error|uncaught exception)'):
    -            with ExpectLog(gen_log, 'Uncaught exception', required=False):
    -                self.http_client.fetch(
    -                    self.get_url("/").replace('https:', 'http:'),
    -                    self.stop,
    -                    request_timeout=3600,
    -                    connect_timeout=3600)
    -                response = self.wait()
    -        self.assertEqual(response.code, 599)
    -
    -# Python's SSL implementation differs significantly between versions.
    -# For example, SSLv3 and TLSv1 throw an exception if you try to read
    -# from the socket before the handshake is complete, but the default
    -# of SSLv23 allows it.
    -
    -
    -class SSLv23Test(BaseSSLTest, SSLTestMixin):
    -    def get_ssl_version(self):
    -        return ssl.PROTOCOL_SSLv23
    -
    -
    -@skipIfOldSSL
    -class SSLv3Test(BaseSSLTest, SSLTestMixin):
    -    def get_ssl_version(self):
    -        return ssl.PROTOCOL_SSLv3
    -
    -
    -@skipIfOldSSL
    -class TLSv1Test(BaseSSLTest, SSLTestMixin):
    -    def get_ssl_version(self):
    -        return ssl.PROTOCOL_TLSv1
    -
    -
    -@unittest.skipIf(not hasattr(ssl, 'SSLContext'), 'ssl.SSLContext not present')
    -class SSLContextTest(BaseSSLTest, SSLTestMixin):
    -    def get_ssl_options(self):
    -        context = ssl_options_to_context(
    -            AsyncHTTPSTestCase.get_ssl_options(self))
    -        assert isinstance(context, ssl.SSLContext)
    -        return context
    -
    -
    -class BadSSLOptionsTest(unittest.TestCase):
    -    def test_missing_arguments(self):
    -        application = Application()
    -        self.assertRaises(KeyError, HTTPServer, application, ssl_options={
    -            "keyfile": "/__missing__.crt",
    -        })
    -
    -    def test_missing_key(self):
    -        """A missing SSL key should cause an immediate exception."""
    -
    -        application = Application()
    -        module_dir = os.path.dirname(__file__)
    -        existing_certificate = os.path.join(module_dir, 'test.crt')
    -
    -        self.assertRaises(ValueError, HTTPServer, application, ssl_options={
    -                          "certfile": "/__mising__.crt",
    -                          })
    -        self.assertRaises(ValueError, HTTPServer, application, ssl_options={
    -                          "certfile": existing_certificate,
    -                          "keyfile": "/__missing__.key"
    -                          })
    -
    -        # This actually works because both files exist
    -        HTTPServer(application, ssl_options={
    -                   "certfile": existing_certificate,
    -                   "keyfile": existing_certificate
    -                   })
    -
    -
    -class MultipartTestHandler(RequestHandler):
    -    def post(self):
    -        self.finish({"header": self.request.headers["X-Header-Encoding-Test"],
    -                     "argument": self.get_argument("argument"),
    -                     "filename": self.request.files["files"][0].filename,
    -                     "filebody": _unicode(self.request.files["files"][0]["body"]),
    -                     })
    -
    -
    -# This test is also called from wsgi_test
    -class HTTPConnectionTest(AsyncHTTPTestCase):
    -    def get_handlers(self):
    -        return [("/multipart", MultipartTestHandler),
    -                ("/hello", HelloWorldRequestHandler)]
    -
    -    def get_app(self):
    -        return Application(self.get_handlers())
    -
    -    def raw_fetch(self, headers, body):
    -        with closing(IOStream(socket.socket())) as stream:
    -            stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
    -            self.wait()
    -            stream.write(
    -                b"\r\n".join(headers +
    -                             [utf8("Content-Length: %d\r\n" % len(body))]) +
    -                b"\r\n" + body)
    -            read_stream_body(stream, self.stop)
    -            headers, body = self.wait()
    -            return body
    -
    -    def test_multipart_form(self):
    -        # Encodings here are tricky:  Headers are latin1, bodies can be
    -        # anything (we use utf8 by default).
    -        response = self.raw_fetch([
    -            b"POST /multipart HTTP/1.0",
    -            b"Content-Type: multipart/form-data; boundary=1234567890",
    -            b"X-Header-encoding-test: \xe9",
    -        ],
    -            b"\r\n".join([
    -                b"Content-Disposition: form-data; name=argument",
    -                b"",
    -                u("\u00e1").encode("utf-8"),
    -                b"--1234567890",
    -                u('Content-Disposition: form-data; name="files"; filename="\u00f3"').encode("utf8"),
    -                b"",
    -                u("\u00fa").encode("utf-8"),
    -                b"--1234567890--",
    -                b"",
    -            ]))
    -        data = json_decode(response)
    -        self.assertEqual(u("\u00e9"), data["header"])
    -        self.assertEqual(u("\u00e1"), data["argument"])
    -        self.assertEqual(u("\u00f3"), data["filename"])
    -        self.assertEqual(u("\u00fa"), data["filebody"])
    -
    -    def test_100_continue(self):
    -        # Run through a 100-continue interaction by hand:
    -        # When given Expect: 100-continue, we get a 100 response after the
    -        # headers, and then the real response after the body.
    -        stream = IOStream(socket.socket(), io_loop=self.io_loop)
    -        stream.connect(("localhost", self.get_http_port()), callback=self.stop)
    -        self.wait()
    -        stream.write(b"\r\n".join([b"POST /hello HTTP/1.1",
    -                                   b"Content-Length: 1024",
    -                                   b"Expect: 100-continue",
    -                                   b"Connection: close",
    -                                   b"\r\n"]), callback=self.stop)
    -        self.wait()
    -        stream.read_until(b"\r\n\r\n", self.stop)
    -        data = self.wait()
    -        self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data)
    -        stream.write(b"a" * 1024)
    -        stream.read_until(b"\r\n", self.stop)
    -        first_line = self.wait()
    -        self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line)
    -        stream.read_until(b"\r\n\r\n", self.stop)
    -        header_data = self.wait()
    -        headers = HTTPHeaders.parse(native_str(header_data.decode('latin1')))
    -        stream.read_bytes(int(headers["Content-Length"]), self.stop)
    -        body = self.wait()
    -        self.assertEqual(body, b"Got 1024 bytes in POST")
    -        stream.close()
    -
    -
    -class EchoHandler(RequestHandler):
    -    def get(self):
    -        self.write(recursive_unicode(self.request.arguments))
    -
    -    def post(self):
    -        self.write(recursive_unicode(self.request.arguments))
    -
    -
    -class TypeCheckHandler(RequestHandler):
    -    def prepare(self):
    -        self.errors = {}
    -        fields = [
    -            ('method', str),
    -            ('uri', str),
    -            ('version', str),
    -            ('remote_ip', str),
    -            ('protocol', str),
    -            ('host', str),
    -            ('path', str),
    -            ('query', str),
    -        ]
    -        for field, expected_type in fields:
    -            self.check_type(field, getattr(self.request, field), expected_type)
    -
    -        self.check_type('header_key', list(self.request.headers.keys())[0], str)
    -        self.check_type('header_value', list(self.request.headers.values())[0], str)
    -
    -        self.check_type('cookie_key', list(self.request.cookies.keys())[0], str)
    -        self.check_type('cookie_value', list(self.request.cookies.values())[0].value, str)
    -        # secure cookies
    -
    -        self.check_type('arg_key', list(self.request.arguments.keys())[0], str)
    -        self.check_type('arg_value', list(self.request.arguments.values())[0][0], bytes_type)
    -
    -    def post(self):
    -        self.check_type('body', self.request.body, bytes_type)
    -        self.write(self.errors)
    -
    -    def get(self):
    -        self.write(self.errors)
    -
    -    def check_type(self, name, obj, expected_type):
    -        actual_type = type(obj)
    -        if expected_type != actual_type:
    -            self.errors[name] = "expected %s, got %s" % (expected_type,
    -                                                         actual_type)
    -
    -
    -class HTTPServerTest(AsyncHTTPTestCase):
    -    def get_app(self):
    -        return Application([("/echo", EchoHandler),
    -                            ("/typecheck", TypeCheckHandler),
    -                            ("//doubleslash", EchoHandler),
    -                            ])
    -
    -    def test_query_string_encoding(self):
    -        response = self.fetch("/echo?foo=%C3%A9")
    -        data = json_decode(response.body)
    -        self.assertEqual(data, {u("foo"): [u("\u00e9")]})
    -
    -    def test_empty_query_string(self):
    -        response = self.fetch("/echo?foo=&foo=")
    -        data = json_decode(response.body)
    -        self.assertEqual(data, {u("foo"): [u(""), u("")]})
    -
    -    def test_empty_post_parameters(self):
    -        response = self.fetch("/echo", method="POST", body="foo=&bar=")
    -        data = json_decode(response.body)
    -        self.assertEqual(data, {u("foo"): [u("")], u("bar"): [u("")]})
    -
    -    def test_types(self):
    -        headers = {"Cookie": "foo=bar"}
    -        response = self.fetch("/typecheck?foo=bar", headers=headers)
    -        data = json_decode(response.body)
    -        self.assertEqual(data, {})
    -
    -        response = self.fetch("/typecheck", method="POST", body="foo=bar", headers=headers)
    -        data = json_decode(response.body)
    -        self.assertEqual(data, {})
    -
    -    def test_double_slash(self):
    -        # urlparse.urlsplit (which tornado.httpserver used to use
    -        # incorrectly) would parse paths beginning with "//" as
    -        # protocol-relative urls.
    -        response = self.fetch("//doubleslash")
    -        self.assertEqual(200, response.code)
    -        self.assertEqual(json_decode(response.body), {})
    -
    -    def test_malformed_body(self):
    -        # parse_qs is pretty forgiving, but it will fail on python 3
    -        # if the data is not utf8.  On python 2 parse_qs will work,
    -        # but then the recursive_unicode call in EchoHandler will
    -        # fail.
    -        if str is bytes_type:
    -            return
    -        with ExpectLog(gen_log, 'Invalid x-www-form-urlencoded body'):
    -            response = self.fetch(
    -                '/echo', method="POST",
    -                headers={'Content-Type': 'application/x-www-form-urlencoded'},
    -                body=b'\xe9')
    -        self.assertEqual(200, response.code)
    -        self.assertEqual(b'{}', response.body)
    -
    -
    -class HTTPServerRawTest(AsyncHTTPTestCase):
    -    def get_app(self):
    -        return Application([
    -            ('/echo', EchoHandler),
    -        ])
    -
    -    def setUp(self):
    -        super(HTTPServerRawTest, self).setUp()
    -        self.stream = IOStream(socket.socket())
    -        self.stream.connect(('localhost', self.get_http_port()), self.stop)
    -        self.wait()
    -
    -    def tearDown(self):
    -        self.stream.close()
    -        super(HTTPServerRawTest, self).tearDown()
    -
    -    def test_empty_request(self):
    -        self.stream.close()
    -        self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop)
    -        self.wait()
    -
    -    def test_malformed_first_line(self):
    -        with ExpectLog(gen_log, '.*Malformed HTTP request line'):
    -            self.stream.write(b'asdf\r\n\r\n')
    -            # TODO: need an async version of ExpectLog so we don't need
    -            # hard-coded timeouts here.
    -            self.io_loop.add_timeout(datetime.timedelta(seconds=0.01),
    -                                     self.stop)
    -            self.wait()
    -
    -    def test_malformed_headers(self):
    -        with ExpectLog(gen_log, '.*Malformed HTTP headers'):
    -            self.stream.write(b'GET / HTTP/1.0\r\nasdf\r\n\r\n')
    -            self.io_loop.add_timeout(datetime.timedelta(seconds=0.01),
    -                                     self.stop)
    -            self.wait()
    -
    -    def test_chunked_request_body(self):
    -        # Chunked requests are not widely supported and we don't have a way
    -        # to generate them in AsyncHTTPClient, but HTTPServer will read them.
    -        self.stream.write(b"""\
    -POST /echo HTTP/1.1
    -Transfer-Encoding: chunked
    -Content-Type: application/x-www-form-urlencoded
    -
    -4
    -foo=
    -3
    -bar
    -0
    -
    -""".replace(b"\n", b"\r\n"))
    -        read_stream_body(self.stream, self.stop)
    -        headers, response = self.wait()
    -        self.assertEqual(json_decode(response), {u('foo'): [u('bar')]})
    -
    -
    -class XHeaderTest(HandlerBaseTestCase):
    -    class Handler(RequestHandler):
    -        def get(self):
    -            self.write(dict(remote_ip=self.request.remote_ip,
    -                            remote_protocol=self.request.protocol))
    -
    -    def get_httpserver_options(self):
    -        return dict(xheaders=True)
    -
    -    def test_ip_headers(self):
    -        self.assertEqual(self.fetch_json("/")["remote_ip"], "127.0.0.1")
    -
    -        valid_ipv4 = {"X-Real-IP": "4.4.4.4"}
    -        self.assertEqual(
    -            self.fetch_json("/", headers=valid_ipv4)["remote_ip"],
    -            "4.4.4.4")
    -
    -        valid_ipv4_list = {"X-Forwarded-For": "127.0.0.1, 4.4.4.4"}
    -        self.assertEqual(
    -            self.fetch_json("/", headers=valid_ipv4_list)["remote_ip"],
    -            "4.4.4.4")
    -
    -        valid_ipv6 = {"X-Real-IP": "2620:0:1cfe:face:b00c::3"}
    -        self.assertEqual(
    -            self.fetch_json("/", headers=valid_ipv6)["remote_ip"],
    -            "2620:0:1cfe:face:b00c::3")
    -
    -        valid_ipv6_list = {"X-Forwarded-For": "::1, 2620:0:1cfe:face:b00c::3"}
    -        self.assertEqual(
    -            self.fetch_json("/", headers=valid_ipv6_list)["remote_ip"],
    -            "2620:0:1cfe:face:b00c::3")
    -
    -        invalid_chars = {"X-Real-IP": "4.4.4.4
    -
    -'
    -                         for p in paths)
    -            sloc = html.rindex(b'')
    -            html = html[:sloc] + utf8(js) + b'\n' + html[sloc:]
    -        if js_embed:
    -            js = b''
    -            sloc = html.rindex(b'')
    -            html = html[:sloc] + js + b'\n' + html[sloc:]
    -        if css_files:
    -            paths = []
    -            unique_paths = set()
    -            for path in css_files:
    -                if not is_absolute(path):
    -                    path = self.static_url(path)
    -                if path not in unique_paths:
    -                    paths.append(path)
    -                    unique_paths.add(path)
    -            css = ''.join(''
    -                          for p in paths)
    -            hloc = html.index(b'')
    -            html = html[:hloc] + utf8(css) + b'\n' + html[hloc:]
    -        if css_embed:
    -            css = b''
    -            hloc = html.index(b'')
    -            html = html[:hloc] + css + b'\n' + html[hloc:]
    -        if html_heads:
    -            hloc = html.index(b'')
    -            html = html[:hloc] + b''.join(html_heads) + b'\n' + html[hloc:]
    -        if html_bodies:
    -            hloc = html.index(b'')
    -            html = html[:hloc] + b''.join(html_bodies) + b'\n' + html[hloc:]
    -        self.finish(html)
    -
    -    def render_string(self, template_name, **kwargs):
    -        """Generate the given template with the given arguments.
    -
    -        We return the generated byte string (in utf8). To generate and
    -        write a template as a response, use render() above.
    -        """
    -        # If no template_path is specified, use the path of the calling file
    -        template_path = self.get_template_path()
    -        if not template_path:
    -            frame = sys._getframe(0)
    -            web_file = frame.f_code.co_filename
    -            while frame.f_code.co_filename == web_file:
    -                frame = frame.f_back
    -            template_path = os.path.dirname(frame.f_code.co_filename)
    -        with RequestHandler._template_loader_lock:
    -            if template_path not in RequestHandler._template_loaders:
    -                loader = self.create_template_loader(template_path)
    -                RequestHandler._template_loaders[template_path] = loader
    -            else:
    -                loader = RequestHandler._template_loaders[template_path]
    -        t = loader.load(template_name)
    -        namespace = self.get_template_namespace()
    -        namespace.update(kwargs)
    -        return t.generate(**namespace)
    -
    -    def get_template_namespace(self):
    -        """Returns a dictionary to be used as the default template namespace.
    -
    -        May be overridden by subclasses to add or modify values.
    -
    -        The results of this method will be combined with additional
    -        defaults in the `tornado.template` module and keyword arguments
    -        to `render` or `render_string`.
    -        """
    -        namespace = dict(
    -            handler=self,
    -            request=self.request,
    -            current_user=self.current_user,
    -            locale=self.locale,
    -            _=self.locale.translate,
    -            static_url=self.static_url,
    -            xsrf_form_html=self.xsrf_form_html,
    -            reverse_url=self.reverse_url
    -        )
    -        namespace.update(self.ui)
    -        return namespace
    -
    -    def create_template_loader(self, template_path):
    -        """Returns a new template loader for the given path.
    -
    -        May be overridden by subclasses.  By default returns a
    -        directory-based loader on the given path, using the
    -        ``autoescape`` application setting.  If a ``template_loader``
    -        application setting is supplied, uses that instead.
    -        """
    -        settings = self.application.settings
    -        if "template_loader" in settings:
    -            return settings["template_loader"]
    -        kwargs = {}
    -        if "autoescape" in settings:
    -            # autoescape=None means "no escaping", so we have to be sure
    -            # to only pass this kwarg if the user asked for it.
    -            kwargs["autoescape"] = settings["autoescape"]
    -        return template.Loader(template_path, **kwargs)
    -
    -    def flush(self, include_footers=False, callback=None):
    -        """Flushes the current output buffer to the network.
    -
    -        The ``callback`` argument, if given, can be used for flow control:
    -        it will be run when all flushed data has been written to the socket.
    -        Note that only one flush callback can be outstanding at a time;
    -        if another flush occurs before the previous flush's callback
    -        has been run, the previous callback will be discarded.
    -
    -        .. versionchanged:: 4.0
    -           Now returns a `.Future` if no callback is given.
    -        """
    -        chunk = b"".join(self._write_buffer)
    -        self._write_buffer = []
    -        if not self._headers_written:
    -            self._headers_written = True
    -            for transform in self._transforms:
    -                self._status_code, self._headers, chunk = \
    -                    transform.transform_first_chunk(
    -                        self._status_code, self._headers, chunk, include_footers)
    -            # Ignore the chunk and only write the headers for HEAD requests
    -            if self.request.method == "HEAD":
    -                chunk = None
    -
    -            # Finalize the cookie headers (which have been stored in a side
    -            # object so an outgoing cookie could be overwritten before it
    -            # is sent).
    -            if hasattr(self, "_new_cookie"):
    -                for cookie in self._new_cookie.values():
    -                    self.add_header("Set-Cookie", cookie.OutputString(None))
    -
    -            start_line = httputil.ResponseStartLine(self.request.version,
    -                                                    self._status_code,
    -                                                    self._reason)
    -            return self.request.connection.write_headers(
    -                start_line, self._headers, chunk, callback=callback)
    -        else:
    -            for transform in self._transforms:
    -                chunk = transform.transform_chunk(chunk, include_footers)
    -            # Ignore the chunk and only write the headers for HEAD requests
    -            if self.request.method != "HEAD":
    -                return self.request.connection.write(chunk, callback=callback)
    -            else:
    -                future = Future()
    -                future.set_result(None)
    -                return future
    -
    -    def finish(self, chunk=None):
    -        """Finishes this response, ending the HTTP request."""
    -        if self._finished:
    -            raise RuntimeError("finish() called twice.  May be caused "
    -                               "by using async operations without the "
    -                               "@asynchronous decorator.")
    -
    -        if chunk is not None:
    -            self.write(chunk)
    -
    -        # Automatically support ETags and add the Content-Length header if
    -        # we have not flushed any content yet.
    -        if not self._headers_written:
    -            if (self._status_code == 200 and
    -                self.request.method in ("GET", "HEAD") and
    -                    "Etag" not in self._headers):
    -                self.set_etag_header()
    -                if self.check_etag_header():
    -                    self._write_buffer = []
    -                    self.set_status(304)
    -            if self._status_code == 304:
    -                assert not self._write_buffer, "Cannot send body with 304"
    -                self._clear_headers_for_304()
    -            elif "Content-Length" not in self._headers:
    -                content_length = sum(len(part) for part in self._write_buffer)
    -                self.set_header("Content-Length", content_length)
    -
    -        if hasattr(self.request, "connection"):
    -            # Now that the request is finished, clear the callback we
    -            # set on the HTTPConnection (which would otherwise prevent the
    -            # garbage collection of the RequestHandler when there
    -            # are keepalive connections)
    -            self.request.connection.set_close_callback(None)
    -
    -        self.flush(include_footers=True)
    -        self.request.finish()
    -        self._log()
    -        self._finished = True
    -        self.on_finish()
    -        # Break up a reference cycle between this handler and the
    -        # _ui_module closures to allow for faster GC on CPython.
    -        self.ui = None
    -
    -    def send_error(self, status_code=500, **kwargs):
    -        """Sends the given HTTP error code to the browser.
    -
    -        If `flush()` has already been called, it is not possible to send
    -        an error, so this method will simply terminate the response.
    -        If output has been written but not yet flushed, it will be discarded
    -        and replaced with the error page.
    -
    -        Override `write_error()` to customize the error page that is returned.
    -        Additional keyword arguments are passed through to `write_error`.
    -        """
    -        if self._headers_written:
    -            gen_log.error("Cannot send error response after headers written")
    -            if not self._finished:
    -                self.finish()
    -            return
    -        self.clear()
    -
    -        reason = None
    -        if 'exc_info' in kwargs:
    -            exception = kwargs['exc_info'][1]
    -            if isinstance(exception, HTTPError) and exception.reason:
    -                reason = exception.reason
    -        self.set_status(status_code, reason=reason)
    -        try:
    -            self.write_error(status_code, **kwargs)
    -        except Exception:
    -            app_log.error("Uncaught exception in write_error", exc_info=True)
    -        if not self._finished:
    -            self.finish()
    -
    -    def write_error(self, status_code, **kwargs):
    -        """Override to implement custom error pages.
    -
    -        ``write_error`` may call `write`, `render`, `set_header`, etc
    -        to produce output as usual.
    -
    -        If this error was caused by an uncaught exception (including
    -        HTTPError), an ``exc_info`` triple will be available as
    -        ``kwargs["exc_info"]``.  Note that this exception may not be
    -        the "current" exception for purposes of methods like
    -        ``sys.exc_info()`` or ``traceback.format_exc``.
    -        """
    -        if self.settings.get("serve_traceback") and "exc_info" in kwargs:
    -            # in debug mode, try to send a traceback
    -            self.set_header('Content-Type', 'text/plain')
    -            for line in traceback.format_exception(*kwargs["exc_info"]):
    -                self.write(line)
    -            self.finish()
    -        else:
    -            self.finish("%(code)d: %(message)s"
    -                        "%(code)d: %(message)s" % {
    -                            "code": status_code,
    -                            "message": self._reason,
    -                        })
    -
    -    @property
    -    def locale(self):
    -        """The local for the current session.
    -
    -        Determined by either `get_user_locale`, which you can override to
    -        set the locale based on, e.g., a user preference stored in a
    -        database, or `get_browser_locale`, which uses the ``Accept-Language``
    -        header.
    -        """
    -        if not hasattr(self, "_locale"):
    -            self._locale = self.get_user_locale()
    -            if not self._locale:
    -                self._locale = self.get_browser_locale()
    -                assert self._locale
    -        return self._locale
    -
    -    def get_user_locale(self):
    -        """Override to determine the locale from the authenticated user.
    -
    -        If None is returned, we fall back to `get_browser_locale()`.
    -
    -        This method should return a `tornado.locale.Locale` object,
    -        most likely obtained via a call like ``tornado.locale.get("en")``
    -        """
    -        return None
    -
    -    def get_browser_locale(self, default="en_US"):
    -        """Determines the user's locale from ``Accept-Language`` header.
    -
    -        See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.4
    -        """
    -        if "Accept-Language" in self.request.headers:
    -            languages = self.request.headers["Accept-Language"].split(",")
    -            locales = []
    -            for language in languages:
    -                parts = language.strip().split(";")
    -                if len(parts) > 1 and parts[1].startswith("q="):
    -                    try:
    -                        score = float(parts[1][2:])
    -                    except (ValueError, TypeError):
    -                        score = 0.0
    -                else:
    -                    score = 1.0
    -                locales.append((parts[0], score))
    -            if locales:
    -                locales.sort(key=lambda pair: pair[1], reverse=True)
    -                codes = [l[0] for l in locales]
    -                return locale.get(*codes)
    -        return locale.get(default)
    -
    -    @property
    -    def current_user(self):
    -        """The authenticated user for this request.
    -
    -        This is a cached version of `get_current_user`, which you can
    -        override to set the user based on, e.g., a cookie. If that
    -        method is not overridden, this method always returns None.
    -
    -        We lazy-load the current user the first time this method is called
    -        and cache the result after that.
    -        """
    -        if not hasattr(self, "_current_user"):
    -            self._current_user = self.get_current_user()
    -        return self._current_user
    -
    -    @current_user.setter
    -    def current_user(self, value):
    -        self._current_user = value
    -
    -    def get_current_user(self):
    -        """Override to determine the current user from, e.g., a cookie."""
    -        return None
    -
    -    def get_login_url(self):
    -        """Override to customize the login URL based on the request.
    -
    -        By default, we use the ``login_url`` application setting.
    -        """
    -        self.require_setting("login_url", "@tornado.web.authenticated")
    -        return self.application.settings["login_url"]
    -
    -    def get_template_path(self):
    -        """Override to customize template path for each handler.
    -
    -        By default, we use the ``template_path`` application setting.
    -        Return None to load templates relative to the calling file.
    -        """
    -        return self.application.settings.get("template_path")
    -
    -    @property
    -    def xsrf_token(self):
    -        """The XSRF-prevention token for the current user/session.
    -
    -        To prevent cross-site request forgery, we set an '_xsrf' cookie
    -        and include the same '_xsrf' value as an argument with all POST
    -        requests. If the two do not match, we reject the form submission
    -        as a potential forgery.
    -
    -        See http://en.wikipedia.org/wiki/Cross-site_request_forgery
    -
    -        .. versionchanged:: 3.2.2
    -           The xsrf token will now be have a random mask applied in every
    -           request, which makes it safe to include the token in pages
    -           that are compressed.  See http://breachattack.com for more
    -           information on the issue fixed by this change.  Old (version 1)
    -           cookies will be converted to version 2 when this method is called
    -           unless the ``xsrf_cookie_version`` `Application` setting is
    -           set to 1.
    -        """
    -        if not hasattr(self, "_xsrf_token"):
    -            version, token, timestamp = self._get_raw_xsrf_token()
    -            output_version = self.settings.get("xsrf_cookie_version", 2)
    -            if output_version == 1:
    -                self._xsrf_token = binascii.b2a_hex(token)
    -            elif output_version == 2:
    -                mask = os.urandom(4)
    -                self._xsrf_token = b"|".join([
    -                    b"2",
    -                    binascii.b2a_hex(mask),
    -                    binascii.b2a_hex(_websocket_mask(mask, token)),
    -                    utf8(str(int(timestamp)))])
    -            else:
    -                raise ValueError("unknown xsrf cookie version %d",
    -                                 output_version)
    -            if version is None:
    -                expires_days = 30 if self.current_user else None
    -                self.set_cookie("_xsrf", self._xsrf_token,
    -                                expires_days=expires_days)
    -        return self._xsrf_token
    -
    -    def _get_raw_xsrf_token(self):
    -        """Read or generate the xsrf token in its raw form.
    -
    -        The raw_xsrf_token is a tuple containing:
    -
    -        * version: the version of the cookie from which this token was read,
    -          or None if we generated a new token in this request.
    -        * token: the raw token data; random (non-ascii) bytes.
    -        * timestamp: the time this token was generated (will not be accurate
    -          for version 1 cookies)
    -        """
    -        if not hasattr(self, '_raw_xsrf_token'):
    -            cookie = self.get_cookie("_xsrf")
    -            if cookie:
    -                version, token, timestamp = self._decode_xsrf_token(cookie)
    -            else:
    -                version, token, timestamp = None, None, None
    -            if token is None:
    -                version = None
    -                token = os.urandom(16)
    -                timestamp = time.time()
    -            self._raw_xsrf_token = (version, token, timestamp)
    -        return self._raw_xsrf_token
    -
    -    def _decode_xsrf_token(self, cookie):
    -        """Convert a cookie string into a the tuple form returned by
    -        _get_raw_xsrf_token.
    -        """
    -        m = _signed_value_version_re.match(utf8(cookie))
    -        if m:
    -            version = int(m.group(1))
    -            if version == 2:
    -                _, mask, masked_token, timestamp = cookie.split("|")
    -                mask = binascii.a2b_hex(utf8(mask))
    -                token = _websocket_mask(
    -                    mask, binascii.a2b_hex(utf8(masked_token)))
    -                timestamp = int(timestamp)
    -                return version, token, timestamp
    -            else:
    -                # Treat unknown versions as not present instead of failing.
    -                return None, None, None
    -        else:
    -            version = 1
    -            try:
    -                token = binascii.a2b_hex(utf8(cookie))
    -            except (binascii.Error, TypeError):
    -                token = utf8(cookie)
    -            # We don't have a usable timestamp in older versions.
    -            timestamp = int(time.time())
    -            return (version, token, timestamp)
    -
    -    def check_xsrf_cookie(self):
    -        """Verifies that the ``_xsrf`` cookie matches the ``_xsrf`` argument.
    -
    -        To prevent cross-site request forgery, we set an ``_xsrf``
    -        cookie and include the same value as a non-cookie
    -        field with all ``POST`` requests. If the two do not match, we
    -        reject the form submission as a potential forgery.
    -
    -        The ``_xsrf`` value may be set as either a form field named ``_xsrf``
    -        or in a custom HTTP header named ``X-XSRFToken`` or ``X-CSRFToken``
    -        (the latter is accepted for compatibility with Django).
    -
    -        See http://en.wikipedia.org/wiki/Cross-site_request_forgery
    -
    -        Prior to release 1.1.1, this check was ignored if the HTTP header
    -        ``X-Requested-With: XMLHTTPRequest`` was present.  This exception
    -        has been shown to be insecure and has been removed.  For more
    -        information please see
    -        http://www.djangoproject.com/weblog/2011/feb/08/security/
    -        http://weblog.rubyonrails.org/2011/2/8/csrf-protection-bypass-in-ruby-on-rails
    -
    -        .. versionchanged:: 3.2.2
    -           Added support for cookie version 2.  Both versions 1 and 2 are
    -           supported.
    -        """
    -        token = (self.get_argument("_xsrf", None) or
    -                 self.request.headers.get("X-Xsrftoken") or
    -                 self.request.headers.get("X-Csrftoken"))
    -        if not token:
    -            raise HTTPError(403, "'_xsrf' argument missing from POST")
    -        _, token, _ = self._decode_xsrf_token(token)
    -        _, expected_token, _ = self._get_raw_xsrf_token()
    -        if not _time_independent_equals(utf8(token), utf8(expected_token)):
    -            raise HTTPError(403, "XSRF cookie does not match POST argument")
    -
    -    def xsrf_form_html(self):
    -        """An HTML ```` element to be included with all POST forms.
    -
    -        It defines the ``_xsrf`` input value, which we check on all POST
    -        requests to prevent cross-site request forgery. If you have set
    -        the ``xsrf_cookies`` application setting, you must include this
    -        HTML within all of your HTML forms.
    -
    -        In a template, this method should be called with ``{% module
    -        xsrf_form_html() %}``
    -
    -        See `check_xsrf_cookie()` above for more information.
    -        """
    -        return ''
    -
    -    def static_url(self, path, include_host=None, **kwargs):
    -        """Returns a static URL for the given relative static file path.
    -
    -        This method requires you set the ``static_path`` setting in your
    -        application (which specifies the root directory of your static
    -        files).
    -
    -        This method returns a versioned url (by default appending
    -        ``?v=``), which allows the static files to be
    -        cached indefinitely.  This can be disabled by passing
    -        ``include_version=False`` (in the default implementation;
    -        other static file implementations are not required to support
    -        this, but they may support other options).
    -
    -        By default this method returns URLs relative to the current
    -        host, but if ``include_host`` is true the URL returned will be
    -        absolute.  If this handler has an ``include_host`` attribute,
    -        that value will be used as the default for all `static_url`
    -        calls that do not pass ``include_host`` as a keyword argument.
    -
    -        """
    -        self.require_setting("static_path", "static_url")
    -        get_url = self.settings.get("static_handler_class",
    -                                    StaticFileHandler).make_static_url
    -
    -        if include_host is None:
    -            include_host = getattr(self, "include_host", False)
    -
    -        if include_host:
    -            base = self.request.protocol + "://" + self.request.host
    -        else:
    -            base = ""
    -
    -        return base + get_url(self.settings, path, **kwargs)
    -
    -    def require_setting(self, name, feature="this feature"):
    -        """Raises an exception if the given app setting is not defined."""
    -        if not self.application.settings.get(name):
    -            raise Exception("You must define the '%s' setting in your "
    -                            "application to use %s" % (name, feature))
    -
    -    def reverse_url(self, name, *args):
    -        """Alias for `Application.reverse_url`."""
    -        return self.application.reverse_url(name, *args)
    -
    -    def compute_etag(self):
    -        """Computes the etag header to be used for this request.
    -
    -        By default uses a hash of the content written so far.
    -
    -        May be overridden to provide custom etag implementations,
    -        or may return None to disable tornado's default etag support.
    -        """
    -        hasher = hashlib.sha1()
    -        for part in self._write_buffer:
    -            hasher.update(part)
    -        return '"%s"' % hasher.hexdigest()
    -
    -    def set_etag_header(self):
    -        """Sets the response's Etag header using ``self.compute_etag()``.
    -
    -        Note: no header will be set if ``compute_etag()`` returns ``None``.
    -
    -        This method is called automatically when the request is finished.
    -        """
    -        etag = self.compute_etag()
    -        if etag is not None:
    -            self.set_header("Etag", etag)
    -
    -    def check_etag_header(self):
    -        """Checks the ``Etag`` header against requests's ``If-None-Match``.
    -
    -        Returns ``True`` if the request's Etag matches and a 304 should be
    -        returned. For example::
    -
    -            self.set_etag_header()
    -            if self.check_etag_header():
    -                self.set_status(304)
    -                return
    -
    -        This method is called automatically when the request is finished,
    -        but may be called earlier for applications that override
    -        `compute_etag` and want to do an early check for ``If-None-Match``
    -        before completing the request.  The ``Etag`` header should be set
    -        (perhaps with `set_etag_header`) before calling this method.
    -        """
    -        etag = self._headers.get("Etag")
    -        inm = utf8(self.request.headers.get("If-None-Match", ""))
    -        return bool(etag and inm and inm.find(etag) >= 0)
    -
    -    def _stack_context_handle_exception(self, type, value, traceback):
    -        try:
    -            # For historical reasons _handle_request_exception only takes
    -            # the exception value instead of the full triple,
    -            # so re-raise the exception to ensure that it's in
    -            # sys.exc_info()
    -            raise_exc_info((type, value, traceback))
    -        except Exception:
    -            self._handle_request_exception(value)
    -        return True
    -
    -    @gen.coroutine
    -    def _execute(self, transforms, *args, **kwargs):
    -        """Executes this request with the given output transforms."""
    -        self._transforms = transforms
    -        try:
    -            if self.request.method not in self.SUPPORTED_METHODS:
    -                raise HTTPError(405)
    -            self.path_args = [self.decode_argument(arg) for arg in args]
    -            self.path_kwargs = dict((k, self.decode_argument(v, name=k))
    -                                    for (k, v) in kwargs.items())
    -            # If XSRF cookies are turned on, reject form submissions without
    -            # the proper cookie
    -            if self.request.method not in ("GET", "HEAD", "OPTIONS") and \
    -                    self.application.settings.get("xsrf_cookies"):
    -                self.check_xsrf_cookie()
    -
    -            result = self.prepare()
    -            if is_future(result):
    -                result = yield result
    -            if result is not None:
    -                raise TypeError("Expected None, got %r" % result)
    -            if self._prepared_future is not None:
    -                # Tell the Application we've finished with prepare()
    -                # and are ready for the body to arrive.
    -                self._prepared_future.set_result(None)
    -            if self._finished:
    -                return
    -
    -            if _has_stream_request_body(self.__class__):
    -                # In streaming mode request.body is a Future that signals
    -                # the body has been completely received.  The Future has no
    -                # result; the data has been passed to self.data_received
    -                # instead.
    -                try:
    -                    yield self.request.body
    -                except iostream.StreamClosedError:
    -                    return
    -
    -            method = getattr(self, self.request.method.lower())
    -            result = method(*self.path_args, **self.path_kwargs)
    -            if is_future(result):
    -                result = yield result
    -            if result is not None:
    -                raise TypeError("Expected None, got %r" % result)
    -            if self._auto_finish and not self._finished:
    -                self.finish()
    -        except Exception as e:
    -            self._handle_request_exception(e)
    -            if (self._prepared_future is not None and
    -                    not self._prepared_future.done()):
    -                # In case we failed before setting _prepared_future, do it
    -                # now (to unblock the HTTP server).  Note that this is not
    -                # in a finally block to avoid GC issues prior to Python 3.4.
    -                self._prepared_future.set_result(None)
    -
    -    def data_received(self, chunk):
    -        """Implement this method to handle streamed request data.
    -
    -        Requires the `.stream_request_body` decorator.
    -        """
    -        raise NotImplementedError()
    -
    -    def _log(self):
    -        """Logs the current request.
    -
    -        Sort of deprecated since this functionality was moved to the
    -        Application, but left in place for the benefit of existing apps
    -        that have overridden this method.
    -        """
    -        self.application.log_request(self)
    -
    -    def _request_summary(self):
    -        return self.request.method + " " + self.request.uri + \
    -            " (" + self.request.remote_ip + ")"
    -
    -    def _handle_request_exception(self, e):
    -        if isinstance(e, Finish):
    -            # Not an error; just finish the request without logging.
    -            if not self._finished:
    -                self.finish()
    -            return
    -        self.log_exception(*sys.exc_info())
    -        if self._finished:
    -            # Extra errors after the request has been finished should
    -            # be logged, but there is no reason to continue to try and
    -            # send a response.
    -            return
    -        if isinstance(e, HTTPError):
    -            if e.status_code not in httputil.responses and not e.reason:
    -                gen_log.error("Bad HTTP status code: %d", e.status_code)
    -                self.send_error(500, exc_info=sys.exc_info())
    -            else:
    -                self.send_error(e.status_code, exc_info=sys.exc_info())
    -        else:
    -            self.send_error(500, exc_info=sys.exc_info())
    -
    -    def log_exception(self, typ, value, tb):
    -        """Override to customize logging of uncaught exceptions.
    -
    -        By default logs instances of `HTTPError` as warnings without
    -        stack traces (on the ``tornado.general`` logger), and all
    -        other exceptions as errors with stack traces (on the
    -        ``tornado.application`` logger).
    -
    -        .. versionadded:: 3.1
    -        """
    -        if isinstance(value, HTTPError):
    -            if value.log_message:
    -                format = "%d %s: " + value.log_message
    -                args = ([value.status_code, self._request_summary()] +
    -                        list(value.args))
    -                gen_log.warning(format, *args)
    -        else:
    -            app_log.error("Uncaught exception %s\n%r", self._request_summary(),
    -                          self.request, exc_info=(typ, value, tb))
    -
    -    def _ui_module(self, name, module):
    -        def render(*args, **kwargs):
    -            if not hasattr(self, "_active_modules"):
    -                self._active_modules = {}
    -            if name not in self._active_modules:
    -                self._active_modules[name] = module(self)
    -            rendered = self._active_modules[name].render(*args, **kwargs)
    -            return rendered
    -        return render
    -
    -    def _ui_method(self, method):
    -        return lambda *args, **kwargs: method(self, *args, **kwargs)
    -
    -    def _clear_headers_for_304(self):
    -        # 304 responses should not contain entity headers (defined in
    -        # http://www.w3.org/Protocols/rfc2616/rfc2616-sec7.html#sec7.1)
    -        # not explicitly allowed by
    -        # http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.3.5
    -        headers = ["Allow", "Content-Encoding", "Content-Language",
    -                   "Content-Length", "Content-MD5", "Content-Range",
    -                   "Content-Type", "Last-Modified"]
    -        for h in headers:
    -            self.clear_header(h)
    -
    -
    -def asynchronous(method):
    -    """Wrap request handler methods with this if they are asynchronous.
    -
    -    This decorator is unnecessary if the method is also decorated with
    -    ``@gen.coroutine`` (it is legal but unnecessary to use the two
    -    decorators together, in which case ``@asynchronous`` must be
    -    first).
    -
    -    This decorator should only be applied to the :ref:`HTTP verb
    -    methods `; its behavior is undefined for any other method.
    -    This decorator does not *make* a method asynchronous; it tells
    -    the framework that the method *is* asynchronous.  For this decorator
    -    to be useful the method must (at least sometimes) do something
    -    asynchronous.
    -
    -    If this decorator is given, the response is not finished when the
    -    method returns. It is up to the request handler to call
    -    `self.finish() ` to finish the HTTP
    -    request. Without this decorator, the request is automatically
    -    finished when the ``get()`` or ``post()`` method returns. Example::
    -
    -       class MyRequestHandler(web.RequestHandler):
    -           @web.asynchronous
    -           def get(self):
    -              http = httpclient.AsyncHTTPClient()
    -              http.fetch("http://friendfeed.com/", self._on_download)
    -
    -           def _on_download(self, response):
    -              self.write("Downloaded!")
    -              self.finish()
    -
    -    .. versionadded:: 3.1
    -       The ability to use ``@gen.coroutine`` without ``@asynchronous``.
    -    """
    -    # Delay the IOLoop import because it's not available on app engine.
    -    from tornado.ioloop import IOLoop
    -    @functools.wraps(method)
    -    def wrapper(self, *args, **kwargs):
    -        self._auto_finish = False
    -        with stack_context.ExceptionStackContext(
    -                self._stack_context_handle_exception):
    -            result = method(self, *args, **kwargs)
    -            if isinstance(result, Future):
    -                # If @asynchronous is used with @gen.coroutine, (but
    -                # not @gen.engine), we can automatically finish the
    -                # request when the future resolves.  Additionally,
    -                # the Future will swallow any exceptions so we need
    -                # to throw them back out to the stack context to finish
    -                # the request.
    -                def future_complete(f):
    -                    f.result()
    -                    if not self._finished:
    -                        self.finish()
    -                IOLoop.current().add_future(result, future_complete)
    -                # Once we have done this, hide the Future from our
    -                # caller (i.e. RequestHandler._when_complete), which
    -                # would otherwise set up its own callback and
    -                # exception handler (resulting in exceptions being
    -                # logged twice).
    -                return None
    -            return result
    -    return wrapper
    -
    -
    -def stream_request_body(cls):
    -    """Apply to `RequestHandler` subclasses to enable streaming body support.
    -
    -    This decorator implies the following changes:
    -
    -    * `.HTTPServerRequest.body` is undefined, and body arguments will not
    -      be included in `RequestHandler.get_argument`.
    -    * `RequestHandler.prepare` is called when the request headers have been
    -      read instead of after the entire body has been read.
    -    * The subclass must define a method ``data_received(self, data):``, which
    -      will be called zero or more times as data is available.  Note that
    -      if the request has an empty body, ``data_received`` may not be called.
    -    * ``prepare`` and ``data_received`` may return Futures (such as via
    -      ``@gen.coroutine``, in which case the next method will not be called
    -      until those futures have completed.
    -    * The regular HTTP method (``post``, ``put``, etc) will be called after
    -      the entire body has been read.
    -
    -    There is a subtle interaction between ``data_received`` and asynchronous
    -    ``prepare``: The first call to ``data_recieved`` may occur at any point
    -    after the call to ``prepare`` has returned *or yielded*.
    -    """
    -    if not issubclass(cls, RequestHandler):
    -        raise TypeError("expected subclass of RequestHandler, got %r", cls)
    -    cls._stream_request_body = True
    -    return cls
    -
    -
    -def _has_stream_request_body(cls):
    -    if not issubclass(cls, RequestHandler):
    -        raise TypeError("expected subclass of RequestHandler, got %r", cls)
    -    return getattr(cls, '_stream_request_body', False)
    -
    -
    -def removeslash(method):
    -    """Use this decorator to remove trailing slashes from the request path.
    -
    -    For example, a request to ``/foo/`` would redirect to ``/foo`` with this
    -    decorator. Your request handler mapping should use a regular expression
    -    like ``r'/foo/*'`` in conjunction with using the decorator.
    -    """
    -    @functools.wraps(method)
    -    def wrapper(self, *args, **kwargs):
    -        if self.request.path.endswith("/"):
    -            if self.request.method in ("GET", "HEAD"):
    -                uri = self.request.path.rstrip("/")
    -                if uri:  # don't try to redirect '/' to ''
    -                    if self.request.query:
    -                        uri += "?" + self.request.query
    -                    self.redirect(uri, permanent=True)
    -                    return
    -            else:
    -                raise HTTPError(404)
    -        return method(self, *args, **kwargs)
    -    return wrapper
    -
    -
    -def addslash(method):
    -    """Use this decorator to add a missing trailing slash to the request path.
    -
    -    For example, a request to ``/foo`` would redirect to ``/foo/`` with this
    -    decorator. Your request handler mapping should use a regular expression
    -    like ``r'/foo/?'`` in conjunction with using the decorator.
    -    """
    -    @functools.wraps(method)
    -    def wrapper(self, *args, **kwargs):
    -        if not self.request.path.endswith("/"):
    -            if self.request.method in ("GET", "HEAD"):
    -                uri = self.request.path + "/"
    -                if self.request.query:
    -                    uri += "?" + self.request.query
    -                self.redirect(uri, permanent=True)
    -                return
    -            raise HTTPError(404)
    -        return method(self, *args, **kwargs)
    -    return wrapper
    -
    -
    -class Application(httputil.HTTPServerConnectionDelegate):
    -    """A collection of request handlers that make up a web application.
    -
    -    Instances of this class are callable and can be passed directly to
    -    HTTPServer to serve the application::
    -
    -        application = web.Application([
    -            (r"/", MainPageHandler),
    -        ])
    -        http_server = httpserver.HTTPServer(application)
    -        http_server.listen(8080)
    -        ioloop.IOLoop.instance().start()
    -
    -    The constructor for this class takes in a list of `URLSpec` objects
    -    or (regexp, request_class) tuples. When we receive requests, we
    -    iterate over the list in order and instantiate an instance of the
    -    first request class whose regexp matches the request path.
    -    The request class can be specified as either a class object or a
    -    (fully-qualified) name.
    -
    -    Each tuple can contain additional elements, which correspond to the
    -    arguments to the `URLSpec` constructor.  (Prior to Tornado 3.2, this
    -    only tuples of two or three elements were allowed).
    -
    -    A dictionary may be passed as the third element of the tuple,
    -    which will be used as keyword arguments to the handler's
    -    constructor and `~RequestHandler.initialize` method.  This pattern
    -    is used for the `StaticFileHandler` in this example (note that a
    -    `StaticFileHandler` can be installed automatically with the
    -    static_path setting described below)::
    -
    -        application = web.Application([
    -            (r"/static/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
    -        ])
    -
    -    We support virtual hosts with the `add_handlers` method, which takes in
    -    a host regular expression as the first argument::
    -
    -        application.add_handlers(r"www\.myhost\.com", [
    -            (r"/article/([0-9]+)", ArticleHandler),
    -        ])
    -
    -    You can serve static files by sending the ``static_path`` setting
    -    as a keyword argument. We will serve those files from the
    -    ``/static/`` URI (this is configurable with the
    -    ``static_url_prefix`` setting), and we will serve ``/favicon.ico``
    -    and ``/robots.txt`` from the same directory.  A custom subclass of
    -    `StaticFileHandler` can be specified with the
    -    ``static_handler_class`` setting.
    -
    -    """
    -    def __init__(self, handlers=None, default_host="", transforms=None,
    -                 **settings):
    -        if transforms is None:
    -            self.transforms = []
    -            if settings.get("compress_response") or settings.get("gzip"):
    -                self.transforms.append(GZipContentEncoding)
    -        else:
    -            self.transforms = transforms
    -        self.handlers = []
    -        self.named_handlers = {}
    -        self.default_host = default_host
    -        self.settings = settings
    -        self.ui_modules = {'linkify': _linkify,
    -                           'xsrf_form_html': _xsrf_form_html,
    -                           'Template': TemplateModule,
    -                           }
    -        self.ui_methods = {}
    -        self._load_ui_modules(settings.get("ui_modules", {}))
    -        self._load_ui_methods(settings.get("ui_methods", {}))
    -        if self.settings.get("static_path"):
    -            path = self.settings["static_path"]
    -            handlers = list(handlers or [])
    -            static_url_prefix = settings.get("static_url_prefix",
    -                                             "/static/")
    -            static_handler_class = settings.get("static_handler_class",
    -                                                StaticFileHandler)
    -            static_handler_args = settings.get("static_handler_args", {})
    -            static_handler_args['path'] = path
    -            for pattern in [re.escape(static_url_prefix) + r"(.*)",
    -                            r"/(favicon\.ico)", r"/(robots\.txt)"]:
    -                handlers.insert(0, (pattern, static_handler_class,
    -                                    static_handler_args))
    -        if handlers:
    -            self.add_handlers(".*$", handlers)
    -
    -        if self.settings.get('debug'):
    -            self.settings.setdefault('autoreload', True)
    -            self.settings.setdefault('compiled_template_cache', False)
    -            self.settings.setdefault('static_hash_cache', False)
    -            self.settings.setdefault('serve_traceback', True)
    -
    -        # Automatically reload modified modules
    -        if self.settings.get('autoreload'):
    -            from tornado import autoreload
    -            autoreload.start()
    -
    -    def listen(self, port, address="", **kwargs):
    -        """Starts an HTTP server for this application on the given port.
    -
    -        This is a convenience alias for creating an `.HTTPServer`
    -        object and calling its listen method.  Keyword arguments not
    -        supported by `HTTPServer.listen <.TCPServer.listen>` are passed to the
    -        `.HTTPServer` constructor.  For advanced uses
    -        (e.g. multi-process mode), do not use this method; create an
    -        `.HTTPServer` and call its
    -        `.TCPServer.bind`/`.TCPServer.start` methods directly.
    -
    -        Note that after calling this method you still need to call
    -        ``IOLoop.instance().start()`` to start the server.
    -        """
    -        # import is here rather than top level because HTTPServer
    -        # is not importable on appengine
    -        from tornado.httpserver import HTTPServer
    -        server = HTTPServer(self, **kwargs)
    -        server.listen(port, address)
    -
    -    def add_handlers(self, host_pattern, host_handlers):
    -        """Appends the given handlers to our handler list.
    -
    -        Host patterns are processed sequentially in the order they were
    -        added. All matching patterns will be considered.
    -        """
    -        if not host_pattern.endswith("$"):
    -            host_pattern += "$"
    -        handlers = []
    -        # The handlers with the wildcard host_pattern are a special
    -        # case - they're added in the constructor but should have lower
    -        # precedence than the more-precise handlers added later.
    -        # If a wildcard handler group exists, it should always be last
    -        # in the list, so insert new groups just before it.
    -        if self.handlers and self.handlers[-1][0].pattern == '.*$':
    -            self.handlers.insert(-1, (re.compile(host_pattern), handlers))
    -        else:
    -            self.handlers.append((re.compile(host_pattern), handlers))
    -
    -        for spec in host_handlers:
    -            if isinstance(spec, (tuple, list)):
    -                assert len(spec) in (2, 3, 4)
    -                spec = URLSpec(*spec)
    -            handlers.append(spec)
    -            if spec.name:
    -                if spec.name in self.named_handlers:
    -                    app_log.warning(
    -                        "Multiple handlers named %s; replacing previous value",
    -                        spec.name)
    -                self.named_handlers[spec.name] = spec
    -
    -    def add_transform(self, transform_class):
    -        self.transforms.append(transform_class)
    -
    -    def _get_host_handlers(self, request):
    -        host = request.host.lower().split(':')[0]
    -        matches = []
    -        for pattern, handlers in self.handlers:
    -            if pattern.match(host):
    -                matches.extend(handlers)
    -        # Look for default host if not behind load balancer (for debugging)
    -        if not matches and "X-Real-Ip" not in request.headers:
    -            for pattern, handlers in self.handlers:
    -                if pattern.match(self.default_host):
    -                    matches.extend(handlers)
    -        return matches or None
    -
    -    def _load_ui_methods(self, methods):
    -        if isinstance(methods, types.ModuleType):
    -            self._load_ui_methods(dict((n, getattr(methods, n))
    -                                       for n in dir(methods)))
    -        elif isinstance(methods, list):
    -            for m in methods:
    -                self._load_ui_methods(m)
    -        else:
    -            for name, fn in methods.items():
    -                if not name.startswith("_") and hasattr(fn, "__call__") \
    -                        and name[0].lower() == name[0]:
    -                    self.ui_methods[name] = fn
    -
    -    def _load_ui_modules(self, modules):
    -        if isinstance(modules, types.ModuleType):
    -            self._load_ui_modules(dict((n, getattr(modules, n))
    -                                       for n in dir(modules)))
    -        elif isinstance(modules, list):
    -            for m in modules:
    -                self._load_ui_modules(m)
    -        else:
    -            assert isinstance(modules, dict)
    -            for name, cls in modules.items():
    -                try:
    -                    if issubclass(cls, UIModule):
    -                        self.ui_modules[name] = cls
    -                except TypeError:
    -                    pass
    -
    -    def start_request(self, connection):
    -        # Modern HTTPServer interface
    -        return _RequestDispatcher(self, connection)
    -
    -    def __call__(self, request):
    -        # Legacy HTTPServer interface
    -        dispatcher = _RequestDispatcher(self, None)
    -        dispatcher.set_request(request)
    -        return dispatcher.execute()
    -
    -    def reverse_url(self, name, *args):
    -        """Returns a URL path for handler named ``name``
    -
    -        The handler must be added to the application as a named `URLSpec`.
    -
    -        Args will be substituted for capturing groups in the `URLSpec` regex.
    -        They will be converted to strings if necessary, encoded as utf8,
    -        and url-escaped.
    -        """
    -        if name in self.named_handlers:
    -            return self.named_handlers[name].reverse(*args)
    -        raise KeyError("%s not found in named urls" % name)
    -
    -    def log_request(self, handler):
    -        """Writes a completed HTTP request to the logs.
    -
    -        By default writes to the python root logger.  To change
    -        this behavior either subclass Application and override this method,
    -        or pass a function in the application settings dictionary as
    -        ``log_function``.
    -        """
    -        if "log_function" in self.settings:
    -            self.settings["log_function"](handler)
    -            return
    -        if handler.get_status() < 400:
    -            log_method = access_log.info
    -        elif handler.get_status() < 500:
    -            log_method = access_log.warning
    -        else:
    -            log_method = access_log.error
    -        request_time = 1000.0 * handler.request.request_time()
    -        log_method("%d %s %.2fms", handler.get_status(),
    -                   handler._request_summary(), request_time)
    -
    -
    -class _RequestDispatcher(httputil.HTTPMessageDelegate):
    -    def __init__(self, application, connection):
    -        self.application = application
    -        self.connection = connection
    -        self.request = None
    -        self.chunks = []
    -        self.handler_class = None
    -        self.handler_kwargs = None
    -        self.path_args = []
    -        self.path_kwargs = {}
    -
    -    def headers_received(self, start_line, headers):
    -        self.set_request(httputil.HTTPServerRequest(
    -            connection=self.connection, start_line=start_line, headers=headers))
    -        if self.stream_request_body:
    -            self.request.body = Future()
    -            return self.execute()
    -
    -    def set_request(self, request):
    -        self.request = request
    -        self._find_handler()
    -        self.stream_request_body = _has_stream_request_body(self.handler_class)
    -
    -    def _find_handler(self):
    -        # Identify the handler to use as soon as we have the request.
    -        # Save url path arguments for later.
    -        app = self.application
    -        handlers = app._get_host_handlers(self.request)
    -        if not handlers:
    -            self.handler_class = RedirectHandler
    -            self.handler_kwargs = dict(url="http://" + app.default_host + "/")
    -            return
    -        for spec in handlers:
    -            match = spec.regex.match(self.request.path)
    -            if match:
    -                self.handler_class = spec.handler_class
    -                self.handler_kwargs = spec.kwargs
    -                if spec.regex.groups:
    -                    # Pass matched groups to the handler.  Since
    -                    # match.groups() includes both named and
    -                    # unnamed groups, we want to use either groups
    -                    # or groupdict but not both.
    -                    if spec.regex.groupindex:
    -                        self.path_kwargs = dict(
    -                            (str(k), _unquote_or_none(v))
    -                            for (k, v) in match.groupdict().items())
    -                    else:
    -                        self.path_args = [_unquote_or_none(s)
    -                                          for s in match.groups()]
    -                return
    -        if app.settings.get('default_handler_class'):
    -            self.handler_class = app.settings['default_handler_class']
    -            self.handler_kwargs = app.settings.get(
    -                'default_handler_args', {})
    -        else:
    -            self.handler_class = ErrorHandler
    -            self.handler_kwargs = dict(status_code=404)
    -
    -    def data_received(self, data):
    -        if self.stream_request_body:
    -            return self.handler.data_received(data)
    -        else:
    -            self.chunks.append(data)
    -
    -    def finish(self):
    -        if self.stream_request_body:
    -            self.request.body.set_result(None)
    -        else:
    -            self.request.body = b''.join(self.chunks)
    -            self.request._parse_body()
    -            self.execute()
    -
    -    def on_connection_close(self):
    -        if self.stream_request_body:
    -            self.handler.on_connection_close()
    -        else:
    -            self.chunks = None
    -
    -    def execute(self):
    -        # If template cache is disabled (usually in the debug mode),
    -        # re-compile templates and reload static files on every
    -        # request so you don't need to restart to see changes
    -        if not self.application.settings.get("compiled_template_cache", True):
    -            with RequestHandler._template_loader_lock:
    -                for loader in RequestHandler._template_loaders.values():
    -                    loader.reset()
    -        if not self.application.settings.get('static_hash_cache', True):
    -            StaticFileHandler.reset()
    -
    -        self.handler = self.handler_class(self.application, self.request,
    -                                          **self.handler_kwargs)
    -        transforms = [t(self.request) for t in self.application.transforms]
    -
    -        if self.stream_request_body:
    -            self.handler._prepared_future = Future()
    -        # Note that if an exception escapes handler._execute it will be
    -        # trapped in the Future it returns (which we are ignoring here).
    -        # However, that shouldn't happen because _execute has a blanket
    -        # except handler, and we cannot easily access the IOLoop here to
    -        # call add_future.
    -        self.handler._execute(transforms, *self.path_args, **self.path_kwargs)
    -        # If we are streaming the request body, then execute() is finished
    -        # when the handler has prepared to receive the body.  If not,
    -        # it doesn't matter when execute() finishes (so we return None)
    -        return self.handler._prepared_future
    -
    -
    -class HTTPError(Exception):
    -    """An exception that will turn into an HTTP error response.
    -
    -    Raising an `HTTPError` is a convenient alternative to calling
    -    `RequestHandler.send_error` since it automatically ends the
    -    current function.
    -
    -    To customize the response sent with an `HTTPError`, override
    -    `RequestHandler.write_error`.
    -
    -    :arg int status_code: HTTP status code.  Must be listed in
    -        `httplib.responses ` unless the ``reason``
    -        keyword argument is given.
    -    :arg string log_message: Message to be written to the log for this error
    -        (will not be shown to the user unless the `Application` is in debug
    -        mode).  May contain ``%s``-style placeholders, which will be filled
    -        in with remaining positional parameters.
    -    :arg string reason: Keyword-only argument.  The HTTP "reason" phrase
    -        to pass in the status line along with ``status_code``.  Normally
    -        determined automatically from ``status_code``, but can be used
    -        to use a non-standard numeric code.
    -    """
    -    def __init__(self, status_code, log_message=None, *args, **kwargs):
    -        self.status_code = status_code
    -        self.log_message = log_message
    -        self.args = args
    -        self.reason = kwargs.get('reason', None)
    -
    -    def __str__(self):
    -        message = "HTTP %d: %s" % (
    -            self.status_code,
    -            self.reason or httputil.responses.get(self.status_code, 'Unknown'))
    -        if self.log_message:
    -            return message + " (" + (self.log_message % self.args) + ")"
    -        else:
    -            return message
    -
    -
    -class Finish(Exception):
    -    """An exception that ends the request without producing an error response.
    -
    -    When `Finish` is raised in a `RequestHandler`, the request will end
    -    (calling `RequestHandler.finish` if it hasn't already been called),
    -    but the outgoing response will not be modified and the error-handling
    -    methods (including `RequestHandler.write_error`) will not be called.
    -
    -    This can be a more convenient way to implement custom error pages
    -    than overriding ``write_error`` (especially in library code)::
    -
    -        if self.current_user is None:
    -            self.set_status(401)
    -            self.set_header('WWW-Authenticate', 'Basic realm="something"')
    -            raise Finish()
    -    """
    -    pass
    -
    -
    -class MissingArgumentError(HTTPError):
    -    """Exception raised by `RequestHandler.get_argument`.
    -
    -    This is a subclass of `HTTPError`, so if it is uncaught a 400 response
    -    code will be used instead of 500 (and a stack trace will not be logged).
    -
    -    .. versionadded:: 3.1
    -    """
    -    def __init__(self, arg_name):
    -        super(MissingArgumentError, self).__init__(
    -            400, 'Missing argument %s' % arg_name)
    -        self.arg_name = arg_name
    -
    -
    -class ErrorHandler(RequestHandler):
    -    """Generates an error response with ``status_code`` for all requests."""
    -    def initialize(self, status_code):
    -        self.set_status(status_code)
    -
    -    def prepare(self):
    -        raise HTTPError(self._status_code)
    -
    -    def check_xsrf_cookie(self):
    -        # POSTs to an ErrorHandler don't actually have side effects,
    -        # so we don't need to check the xsrf token.  This allows POSTs
    -        # to the wrong url to return a 404 instead of 403.
    -        pass
    -
    -
    -class RedirectHandler(RequestHandler):
    -    """Redirects the client to the given URL for all GET requests.
    -
    -    You should provide the keyword argument ``url`` to the handler, e.g.::
    -
    -        application = web.Application([
    -            (r"/oldpath", web.RedirectHandler, {"url": "/newpath"}),
    -        ])
    -    """
    -    def initialize(self, url, permanent=True):
    -        self._url = url
    -        self._permanent = permanent
    -
    -    def get(self):
    -        self.redirect(self._url, permanent=self._permanent)
    -
    -
    -class StaticFileHandler(RequestHandler):
    -    """A simple handler that can serve static content from a directory.
    -
    -    A `StaticFileHandler` is configured automatically if you pass the
    -    ``static_path`` keyword argument to `Application`.  This handler
    -    can be customized with the ``static_url_prefix``, ``static_handler_class``,
    -    and ``static_handler_args`` settings.
    -
    -    To map an additional path to this handler for a static data directory
    -    you would add a line to your application like::
    -
    -        application = web.Application([
    -            (r"/content/(.*)", web.StaticFileHandler, {"path": "/var/www"}),
    -        ])
    -
    -    The handler constructor requires a ``path`` argument, which specifies the
    -    local root directory of the content to be served.
    -
    -    Note that a capture group in the regex is required to parse the value for
    -    the ``path`` argument to the get() method (different than the constructor
    -    argument above); see `URLSpec` for details.
    -
    -    To maximize the effectiveness of browser caching, this class supports
    -    versioned urls (by default using the argument ``?v=``).  If a version
    -    is given, we instruct the browser to cache this file indefinitely.
    -    `make_static_url` (also available as `RequestHandler.static_url`) can
    -    be used to construct a versioned url.
    -
    -    This handler is intended primarily for use in development and light-duty
    -    file serving; for heavy traffic it will be more efficient to use
    -    a dedicated static file server (such as nginx or Apache).  We support
    -    the HTTP ``Accept-Ranges`` mechanism to return partial content (because
    -    some browsers require this functionality to be present to seek in
    -    HTML5 audio or video), but this handler should not be used with
    -    files that are too large to fit comfortably in memory.
    -
    -    **Subclassing notes**
    -
    -    This class is designed to be extensible by subclassing, but because
    -    of the way static urls are generated with class methods rather than
    -    instance methods, the inheritance patterns are somewhat unusual.
    -    Be sure to use the ``@classmethod`` decorator when overriding a
    -    class method.  Instance methods may use the attributes ``self.path``
    -    ``self.absolute_path``, and ``self.modified``.
    -
    -    Subclasses should only override methods discussed in this section;
    -    overriding other methods is error-prone.  Overriding
    -    ``StaticFileHandler.get`` is particularly problematic due to the
    -    tight coupling with ``compute_etag`` and other methods.
    -
    -    To change the way static urls are generated (e.g. to match the behavior
    -    of another server or CDN), override `make_static_url`, `parse_url_path`,
    -    `get_cache_time`, and/or `get_version`.
    -
    -    To replace all interaction with the filesystem (e.g. to serve
    -    static content from a database), override `get_content`,
    -    `get_content_size`, `get_modified_time`, `get_absolute_path`, and
    -    `validate_absolute_path`.
    -
    -    .. versionchanged:: 3.1
    -       Many of the methods for subclasses were added in Tornado 3.1.
    -    """
    -    CACHE_MAX_AGE = 86400 * 365 * 10  # 10 years
    -
    -    _static_hashes = {}
    -    _lock = threading.Lock()  # protects _static_hashes
    -
    -    def initialize(self, path, default_filename=None):
    -        self.root = path
    -        self.default_filename = default_filename
    -
    -    @classmethod
    -    def reset(cls):
    -        with cls._lock:
    -            cls._static_hashes = {}
    -
    -    def head(self, path):
    -        return self.get(path, include_body=False)
    -
    -    @gen.coroutine
    -    def get(self, path, include_body=True):
    -        # Set up our path instance variables.
    -        self.path = self.parse_url_path(path)
    -        del path  # make sure we don't refer to path instead of self.path again
    -        absolute_path = self.get_absolute_path(self.root, self.path)
    -        self.absolute_path = self.validate_absolute_path(
    -            self.root, absolute_path)
    -        if self.absolute_path is None:
    -            return
    -
    -        self.modified = self.get_modified_time()
    -        self.set_headers()
    -
    -        if self.should_return_304():
    -            self.set_status(304)
    -            return
    -
    -        request_range = None
    -        range_header = self.request.headers.get("Range")
    -        if range_header:
    -            # As per RFC 2616 14.16, if an invalid Range header is specified,
    -            # the request will be treated as if the header didn't exist.
    -            request_range = httputil._parse_request_range(range_header)
    -
    -        size = self.get_content_size()
    -        if request_range:
    -            start, end = request_range
    -            if (start is not None and start >= size) or end == 0:
    -                # As per RFC 2616 14.35.1, a range is not satisfiable only: if
    -                # the first requested byte is equal to or greater than the
    -                # content, or when a suffix with length 0 is specified
    -                self.set_status(416)  # Range Not Satisfiable
    -                self.set_header("Content-Type", "text/plain")
    -                self.set_header("Content-Range", "bytes */%s" % (size, ))
    -                return
    -            if start is not None and start < 0:
    -                start += size
    -            if end is not None and end > size:
    -                # Clients sometimes blindly use a large range to limit their
    -                # download size; cap the endpoint at the actual file size.
    -                end = size
    -            # Note: only return HTTP 206 if less than the entire range has been
    -            # requested. Not only is this semantically correct, but Chrome
    -            # refuses to play audio if it gets an HTTP 206 in response to
    -            # ``Range: bytes=0-``.
    -            if size != (end or size) - (start or 0):
    -                self.set_status(206)  # Partial Content
    -                self.set_header("Content-Range",
    -                                httputil._get_content_range(start, end, size))
    -        else:
    -            start = end = None
    -
    -        if start is not None and end is not None:
    -            content_length = end - start
    -        elif end is not None:
    -            content_length = end
    -        elif start is not None:
    -            content_length = size - start
    -        else:
    -            content_length = size
    -        self.set_header("Content-Length", content_length)
    -
    -        if include_body:
    -            content = self.get_content(self.absolute_path, start, end)
    -            if isinstance(content, bytes_type):
    -                content = [content]
    -            for chunk in content:
    -                self.write(chunk)
    -                yield self.flush()
    -        else:
    -            assert self.request.method == "HEAD"
    -
    -    def compute_etag(self):
    -        """Sets the ``Etag`` header based on static url version.
    -
    -        This allows efficient ``If-None-Match`` checks against cached
    -        versions, and sends the correct ``Etag`` for a partial response
    -        (i.e. the same ``Etag`` as the full file).
    -
    -        .. versionadded:: 3.1
    -        """
    -        version_hash = self._get_cached_version(self.absolute_path)
    -        if not version_hash:
    -            return None
    -        return '"%s"' % (version_hash, )
    -
    -    def set_headers(self):
    -        """Sets the content and caching headers on the response.
    -
    -        .. versionadded:: 3.1
    -        """
    -        self.set_header("Accept-Ranges", "bytes")
    -        self.set_etag_header()
    -
    -        if self.modified is not None:
    -            self.set_header("Last-Modified", self.modified)
    -
    -        content_type = self.get_content_type()
    -        if content_type:
    -            self.set_header("Content-Type", content_type)
    -
    -        cache_time = self.get_cache_time(self.path, self.modified, content_type)
    -        if cache_time > 0:
    -            self.set_header("Expires", datetime.datetime.utcnow() +
    -                            datetime.timedelta(seconds=cache_time))
    -            self.set_header("Cache-Control", "max-age=" + str(cache_time))
    -
    -        self.set_extra_headers(self.path)
    -
    -    def should_return_304(self):
    -        """Returns True if the headers indicate that we should return 304.
    -
    -        .. versionadded:: 3.1
    -        """
    -        if self.check_etag_header():
    -            return True
    -
    -        # Check the If-Modified-Since, and don't send the result if the
    -        # content has not been modified
    -        ims_value = self.request.headers.get("If-Modified-Since")
    -        if ims_value is not None:
    -            date_tuple = email.utils.parsedate(ims_value)
    -            if date_tuple is not None:
    -                if_since = datetime.datetime(*date_tuple[:6])
    -                if if_since >= self.modified:
    -                    return True
    -
    -        return False
    -
    -    @classmethod
    -    def get_absolute_path(cls, root, path):
    -        """Returns the absolute location of ``path`` relative to ``root``.
    -
    -        ``root`` is the path configured for this `StaticFileHandler`
    -        (in most cases the ``static_path`` `Application` setting).
    -
    -        This class method may be overridden in subclasses.  By default
    -        it returns a filesystem path, but other strings may be used
    -        as long as they are unique and understood by the subclass's
    -        overridden `get_content`.
    -
    -        .. versionadded:: 3.1
    -        """
    -        abspath = os.path.abspath(os.path.join(root, path))
    -        return abspath
    -
    -    def validate_absolute_path(self, root, absolute_path):
    -        """Validate and return the absolute path.
    -
    -        ``root`` is the configured path for the `StaticFileHandler`,
    -        and ``path`` is the result of `get_absolute_path`
    -
    -        This is an instance method called during request processing,
    -        so it may raise `HTTPError` or use methods like
    -        `RequestHandler.redirect` (return None after redirecting to
    -        halt further processing).  This is where 404 errors for missing files
    -        are generated.
    -
    -        This method may modify the path before returning it, but note that
    -        any such modifications will not be understood by `make_static_url`.
    -
    -        In instance methods, this method's result is available as
    -        ``self.absolute_path``.
    -
    -        .. versionadded:: 3.1
    -        """
    -        root = os.path.abspath(root)
    -        # os.path.abspath strips a trailing /
    -        # it needs to be temporarily added back for requests to root/
    -        if not (absolute_path + os.path.sep).startswith(root):
    -            raise HTTPError(403, "%s is not in root static directory",
    -                            self.path)
    -        if (os.path.isdir(absolute_path) and
    -                self.default_filename is not None):
    -            # need to look at the request.path here for when path is empty
    -            # but there is some prefix to the path that was already
    -            # trimmed by the routing
    -            if not self.request.path.endswith("/"):
    -                self.redirect(self.request.path + "/", permanent=True)
    -                return
    -            absolute_path = os.path.join(absolute_path, self.default_filename)
    -        if not os.path.exists(absolute_path):
    -            raise HTTPError(404)
    -        if not os.path.isfile(absolute_path):
    -            raise HTTPError(403, "%s is not a file", self.path)
    -        return absolute_path
    -
    -    @classmethod
    -    def get_content(cls, abspath, start=None, end=None):
    -        """Retrieve the content of the requested resource which is located
    -        at the given absolute path.
    -
    -        This class method may be overridden by subclasses.  Note that its
    -        signature is different from other overridable class methods
    -        (no ``settings`` argument); this is deliberate to ensure that
    -        ``abspath`` is able to stand on its own as a cache key.
    -
    -        This method should either return a byte string or an iterator
    -        of byte strings.  The latter is preferred for large files
    -        as it helps reduce memory fragmentation.
    -
    -        .. versionadded:: 3.1
    -        """
    -        with open(abspath, "rb") as file:
    -            if start is not None:
    -                file.seek(start)
    -            if end is not None:
    -                remaining = end - (start or 0)
    -            else:
    -                remaining = None
    -            while True:
    -                chunk_size = 64 * 1024
    -                if remaining is not None and remaining < chunk_size:
    -                    chunk_size = remaining
    -                chunk = file.read(chunk_size)
    -                if chunk:
    -                    if remaining is not None:
    -                        remaining -= len(chunk)
    -                    yield chunk
    -                else:
    -                    if remaining is not None:
    -                        assert remaining == 0
    -                    return
    -
    -    @classmethod
    -    def get_content_version(cls, abspath):
    -        """Returns a version string for the resource at the given path.
    -
    -        This class method may be overridden by subclasses.  The
    -        default implementation is a hash of the file's contents.
    -
    -        .. versionadded:: 3.1
    -        """
    -        data = cls.get_content(abspath)
    -        hasher = hashlib.md5()
    -        if isinstance(data, bytes_type):
    -            hasher.update(data)
    -        else:
    -            for chunk in data:
    -                hasher.update(chunk)
    -        return hasher.hexdigest()
    -
    -    def _stat(self):
    -        if not hasattr(self, '_stat_result'):
    -            self._stat_result = os.stat(self.absolute_path)
    -        return self._stat_result
    -
    -    def get_content_size(self):
    -        """Retrieve the total size of the resource at the given path.
    -
    -        This method may be overridden by subclasses.
    -
    -        .. versionadded:: 3.1
    -
    -        .. versionchanged:: 4.0
    -           This method is now always called, instead of only when
    -           partial results are requested.
    -        """
    -        stat_result = self._stat()
    -        return stat_result[stat.ST_SIZE]
    -
    -    def get_modified_time(self):
    -        """Returns the time that ``self.absolute_path`` was last modified.
    -
    -        May be overridden in subclasses.  Should return a `~datetime.datetime`
    -        object or None.
    -
    -        .. versionadded:: 3.1
    -        """
    -        stat_result = self._stat()
    -        modified = datetime.datetime.utcfromtimestamp(stat_result[stat.ST_MTIME])
    -        return modified
    -
    -    def get_content_type(self):
    -        """Returns the ``Content-Type`` header to be used for this request.
    -
    -        .. versionadded:: 3.1
    -        """
    -        mime_type, encoding = mimetypes.guess_type(self.absolute_path)
    -        return mime_type
    -
    -    def set_extra_headers(self, path):
    -        """For subclass to add extra headers to the response"""
    -        pass
    -
    -    def get_cache_time(self, path, modified, mime_type):
    -        """Override to customize cache control behavior.
    -
    -        Return a positive number of seconds to make the result
    -        cacheable for that amount of time or 0 to mark resource as
    -        cacheable for an unspecified amount of time (subject to
    -        browser heuristics).
    -
    -        By default returns cache expiry of 10 years for resources requested
    -        with ``v`` argument.
    -        """
    -        return self.CACHE_MAX_AGE if "v" in self.request.arguments else 0
    -
    -    @classmethod
    -    def make_static_url(cls, settings, path, include_version=True):
    -        """Constructs a versioned url for the given path.
    -
    -        This method may be overridden in subclasses (but note that it
    -        is a class method rather than an instance method).  Subclasses
    -        are only required to implement the signature
    -        ``make_static_url(cls, settings, path)``; other keyword
    -        arguments may be passed through `~RequestHandler.static_url`
    -        but are not standard.
    -
    -        ``settings`` is the `Application.settings` dictionary.  ``path``
    -        is the static path being requested.  The url returned should be
    -        relative to the current host.
    -
    -        ``include_version`` determines whether the generated URL should
    -        include the query string containing the version hash of the
    -        file corresponding to the given ``path``.
    -
    -        """
    -        url = settings.get('static_url_prefix', '/static/') + path
    -        if not include_version:
    -            return url
    -
    -        version_hash = cls.get_version(settings, path)
    -        if not version_hash:
    -            return url
    -
    -        return '%s?v=%s' % (url, version_hash)
    -
    -    def parse_url_path(self, url_path):
    -        """Converts a static URL path into a filesystem path.
    -
    -        ``url_path`` is the path component of the URL with
    -        ``static_url_prefix`` removed.  The return value should be
    -        filesystem path relative to ``static_path``.
    -
    -        This is the inverse of `make_static_url`.
    -        """
    -        if os.path.sep != "/":
    -            url_path = url_path.replace("/", os.path.sep)
    -        return url_path
    -
    -    @classmethod
    -    def get_version(cls, settings, path):
    -        """Generate the version string to be used in static URLs.
    -
    -        ``settings`` is the `Application.settings` dictionary and ``path``
    -        is the relative location of the requested asset on the filesystem.
    -        The returned value should be a string, or ``None`` if no version
    -        could be determined.
    -
    -        .. versionchanged:: 3.1
    -           This method was previously recommended for subclasses to override;
    -           `get_content_version` is now preferred as it allows the base
    -           class to handle caching of the result.
    -        """
    -        abs_path = cls.get_absolute_path(settings['static_path'], path)
    -        return cls._get_cached_version(abs_path)
    -
    -    @classmethod
    -    def _get_cached_version(cls, abs_path):
    -        with cls._lock:
    -            hashes = cls._static_hashes
    -            if abs_path not in hashes:
    -                try:
    -                    hashes[abs_path] = cls.get_content_version(abs_path)
    -                except Exception:
    -                    gen_log.error("Could not open static file %r", abs_path)
    -                    hashes[abs_path] = None
    -            hsh = hashes.get(abs_path)
    -            if hsh:
    -                return hsh
    -        return None
    -
    -
    -class FallbackHandler(RequestHandler):
    -    """A `RequestHandler` that wraps another HTTP server callback.
    -
    -    The fallback is a callable object that accepts an
    -    `~.httputil.HTTPServerRequest`, such as an `Application` or
    -    `tornado.wsgi.WSGIContainer`.  This is most useful to use both
    -    Tornado ``RequestHandlers`` and WSGI in the same server.  Typical
    -    usage::
    -
    -        wsgi_app = tornado.wsgi.WSGIContainer(
    -            django.core.handlers.wsgi.WSGIHandler())
    -        application = tornado.web.Application([
    -            (r"/foo", FooHandler),
    -            (r".*", FallbackHandler, dict(fallback=wsgi_app),
    -        ])
    -    """
    -    def initialize(self, fallback):
    -        self.fallback = fallback
    -
    -    def prepare(self):
    -        self.fallback(self.request)
    -        self._finished = True
    -
    -
    -class OutputTransform(object):
    -    """A transform modifies the result of an HTTP request (e.g., GZip encoding)
    -
    -    Applications are not expected to create their own OutputTransforms
    -    or interact with them directly; the framework chooses which transforms
    -    (if any) to apply.
    -    """
    -    def __init__(self, request):
    -        pass
    -
    -    def transform_first_chunk(self, status_code, headers, chunk, finishing):
    -        return status_code, headers, chunk
    -
    -    def transform_chunk(self, chunk, finishing):
    -        return chunk
    -
    -
    -class GZipContentEncoding(OutputTransform):
    -    """Applies the gzip content encoding to the response.
    -
    -    See http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.11
    -
    -    .. versionchanged:: 4.0
    -        Now compresses all mime types beginning with ``text/``, instead
    -        of just a whitelist. (the whitelist is still used for certain
    -        non-text mime types).
    -    """
    -    # Whitelist of compressible mime types (in addition to any types
    -    # beginning with "text/").
    -    CONTENT_TYPES = set(["application/javascript", "application/x-javascript",
    -                         "application/xml", "application/atom+xml",
    -                         "application/json", "application/xhtml+xml"])
    -    MIN_LENGTH = 5
    -
    -    def __init__(self, request):
    -        self._gzipping = "gzip" in request.headers.get("Accept-Encoding", "")
    -
    -    def _compressible_type(self, ctype):
    -        return ctype.startswith('text/') or ctype in self.CONTENT_TYPES
    -
    -    def transform_first_chunk(self, status_code, headers, chunk, finishing):
    -        if 'Vary' in headers:
    -            headers['Vary'] += b', Accept-Encoding'
    -        else:
    -            headers['Vary'] = b'Accept-Encoding'
    -        if self._gzipping:
    -            ctype = _unicode(headers.get("Content-Type", "")).split(";")[0]
    -            self._gzipping = self._compressible_type(ctype) and \
    -                (not finishing or len(chunk) >= self.MIN_LENGTH) and \
    -                ("Content-Encoding" not in headers)
    -        if self._gzipping:
    -            headers["Content-Encoding"] = "gzip"
    -            self._gzip_value = BytesIO()
    -            self._gzip_file = gzip.GzipFile(mode="w", fileobj=self._gzip_value)
    -            chunk = self.transform_chunk(chunk, finishing)
    -            if "Content-Length" in headers:
    -                # The original content length is no longer correct.
    -                # If this is the last (and only) chunk, we can set the new
    -                # content-length; otherwise we remove it and fall back to
    -                # chunked encoding.
    -                if finishing:
    -                    headers["Content-Length"] = str(len(chunk))
    -                else:
    -                    del headers["Content-Length"]
    -        return status_code, headers, chunk
    -
    -    def transform_chunk(self, chunk, finishing):
    -        if self._gzipping:
    -            self._gzip_file.write(chunk)
    -            if finishing:
    -                self._gzip_file.close()
    -            else:
    -                self._gzip_file.flush()
    -            chunk = self._gzip_value.getvalue()
    -            self._gzip_value.truncate(0)
    -            self._gzip_value.seek(0)
    -        return chunk
    -
    -
    -def authenticated(method):
    -    """Decorate methods with this to require that the user be logged in.
    -
    -    If the user is not logged in, they will be redirected to the configured
    -    `login url `.
    -
    -    If you configure a login url with a query parameter, Tornado will
    -    assume you know what you're doing and use it as-is.  If not, it
    -    will add a `next` parameter so the login page knows where to send
    -    you once you're logged in.
    -    """
    -    @functools.wraps(method)
    -    def wrapper(self, *args, **kwargs):
    -        if not self.current_user:
    -            if self.request.method in ("GET", "HEAD"):
    -                url = self.get_login_url()
    -                if "?" not in url:
    -                    if urlparse.urlsplit(url).scheme:
    -                        # if login url is absolute, make next absolute too
    -                        next_url = self.request.full_url()
    -                    else:
    -                        next_url = self.request.uri
    -                    url += "?" + urlencode(dict(next=next_url))
    -                self.redirect(url)
    -                return
    -            raise HTTPError(403)
    -        return method(self, *args, **kwargs)
    -    return wrapper
    -
    -
    -class UIModule(object):
    -    """A re-usable, modular UI unit on a page.
    -
    -    UI modules often execute additional queries, and they can include
    -    additional CSS and JavaScript that will be included in the output
    -    page, which is automatically inserted on page render.
    -    """
    -    def __init__(self, handler):
    -        self.handler = handler
    -        self.request = handler.request
    -        self.ui = handler.ui
    -        self.locale = handler.locale
    -
    -    @property
    -    def current_user(self):
    -        return self.handler.current_user
    -
    -    def render(self, *args, **kwargs):
    -        """Overridden in subclasses to return this module's output."""
    -        raise NotImplementedError()
    -
    -    def embedded_javascript(self):
    -        """Returns a JavaScript string that will be embedded in the page."""
    -        return None
    -
    -    def javascript_files(self):
    -        """Returns a list of JavaScript files required by this module."""
    -        return None
    -
    -    def embedded_css(self):
    -        """Returns a CSS string that will be embedded in the page."""
    -        return None
    -
    -    def css_files(self):
    -        """Returns a list of CSS files required by this module."""
    -        return None
    -
    -    def html_head(self):
    -        """Returns a CSS string that will be put in the  element"""
    -        return None
    -
    -    def html_body(self):
    -        """Returns an HTML string that will be put in the  element"""
    -        return None
    -
    -    def render_string(self, path, **kwargs):
    -        """Renders a template and returns it as a string."""
    -        return self.handler.render_string(path, **kwargs)
    -
    -
    -class _linkify(UIModule):
    -    def render(self, text, **kwargs):
    -        return escape.linkify(text, **kwargs)
    -
    -
    -class _xsrf_form_html(UIModule):
    -    def render(self):
    -        return self.handler.xsrf_form_html()
    -
    -
    -class TemplateModule(UIModule):
    -    """UIModule that simply renders the given template.
    -
    -    {% module Template("foo.html") %} is similar to {% include "foo.html" %},
    -    but the module version gets its own namespace (with kwargs passed to
    -    Template()) instead of inheriting the outer template's namespace.
    -
    -    Templates rendered through this module also get access to UIModule's
    -    automatic javascript/css features.  Simply call set_resources
    -    inside the template and give it keyword arguments corresponding to
    -    the methods on UIModule: {{ set_resources(js_files=static_url("my.js")) }}
    -    Note that these resources are output once per template file, not once
    -    per instantiation of the template, so they must not depend on
    -    any arguments to the template.
    -    """
    -    def __init__(self, handler):
    -        super(TemplateModule, self).__init__(handler)
    -        # keep resources in both a list and a dict to preserve order
    -        self._resource_list = []
    -        self._resource_dict = {}
    -
    -    def render(self, path, **kwargs):
    -        def set_resources(**kwargs):
    -            if path not in self._resource_dict:
    -                self._resource_list.append(kwargs)
    -                self._resource_dict[path] = kwargs
    -            else:
    -                if self._resource_dict[path] != kwargs:
    -                    raise ValueError("set_resources called with different "
    -                                     "resources for the same template")
    -            return ""
    -        return self.render_string(path, set_resources=set_resources,
    -                                  **kwargs)
    -
    -    def _get_resources(self, key):
    -        return (r[key] for r in self._resource_list if key in r)
    -
    -    def embedded_javascript(self):
    -        return "\n".join(self._get_resources("embedded_javascript"))
    -
    -    def javascript_files(self):
    -        result = []
    -        for f in self._get_resources("javascript_files"):
    -            if isinstance(f, (unicode_type, bytes_type)):
    -                result.append(f)
    -            else:
    -                result.extend(f)
    -        return result
    -
    -    def embedded_css(self):
    -        return "\n".join(self._get_resources("embedded_css"))
    -
    -    def css_files(self):
    -        result = []
    -        for f in self._get_resources("css_files"):
    -            if isinstance(f, (unicode_type, bytes_type)):
    -                result.append(f)
    -            else:
    -                result.extend(f)
    -        return result
    -
    -    def html_head(self):
    -        return "".join(self._get_resources("html_head"))
    -
    -    def html_body(self):
    -        return "".join(self._get_resources("html_body"))
    -
    -
    -class _UIModuleNamespace(object):
    -    """Lazy namespace which creates UIModule proxies bound to a handler."""
    -    def __init__(self, handler, ui_modules):
    -        self.handler = handler
    -        self.ui_modules = ui_modules
    -
    -    def __getitem__(self, key):
    -        return self.handler._ui_module(key, self.ui_modules[key])
    -
    -    def __getattr__(self, key):
    -        try:
    -            return self[key]
    -        except KeyError as e:
    -            raise AttributeError(str(e))
    -
    -
    -class URLSpec(object):
    -    """Specifies mappings between URLs and handlers."""
    -    def __init__(self, pattern, handler, kwargs=None, name=None):
    -        """Parameters:
    -
    -        * ``pattern``: Regular expression to be matched.  Any groups
    -          in the regex will be passed in to the handler's get/post/etc
    -          methods as arguments.
    -
    -        * ``handler``: `RequestHandler` subclass to be invoked.
    -
    -        * ``kwargs`` (optional): A dictionary of additional arguments
    -          to be passed to the handler's constructor.
    -
    -        * ``name`` (optional): A name for this handler.  Used by
    -          `Application.reverse_url`.
    -        """
    -        if not pattern.endswith('$'):
    -            pattern += '$'
    -        self.regex = re.compile(pattern)
    -        assert len(self.regex.groupindex) in (0, self.regex.groups), \
    -            ("groups in url regexes must either be all named or all "
    -             "positional: %r" % self.regex.pattern)
    -
    -        if isinstance(handler, str):
    -            # import the Module and instantiate the class
    -            # Must be a fully qualified name (module.ClassName)
    -            handler = import_object(handler)
    -
    -        self.handler_class = handler
    -        self.kwargs = kwargs or {}
    -        self.name = name
    -        self._path, self._group_count = self._find_groups()
    -
    -    def __repr__(self):
    -        return '%s(%r, %s, kwargs=%r, name=%r)' % \
    -            (self.__class__.__name__, self.regex.pattern,
    -             self.handler_class, self.kwargs, self.name)
    -
    -    def _find_groups(self):
    -        """Returns a tuple (reverse string, group count) for a url.
    -
    -        For example: Given the url pattern /([0-9]{4})/([a-z-]+)/, this method
    -        would return ('/%s/%s/', 2).
    -        """
    -        pattern = self.regex.pattern
    -        if pattern.startswith('^'):
    -            pattern = pattern[1:]
    -        if pattern.endswith('$'):
    -            pattern = pattern[:-1]
    -
    -        if self.regex.groups != pattern.count('('):
    -            # The pattern is too complicated for our simplistic matching,
    -            # so we can't support reversing it.
    -            return (None, None)
    -
    -        pieces = []
    -        for fragment in pattern.split('('):
    -            if ')' in fragment:
    -                paren_loc = fragment.index(')')
    -                if paren_loc >= 0:
    -                    pieces.append('%s' + fragment[paren_loc + 1:])
    -            else:
    -                pieces.append(fragment)
    -
    -        return (''.join(pieces), self.regex.groups)
    -
    -    def reverse(self, *args):
    -        assert self._path is not None, \
    -            "Cannot reverse url regex " + self.regex.pattern
    -        assert len(args) == self._group_count, "required number of arguments "\
    -            "not found"
    -        if not len(args):
    -            return self._path
    -        converted_args = []
    -        for a in args:
    -            if not isinstance(a, (unicode_type, bytes_type)):
    -                a = str(a)
    -            converted_args.append(escape.url_escape(utf8(a), plus=False))
    -        return self._path % tuple(converted_args)
    -
    -url = URLSpec
    -
    -
    -if hasattr(hmac, 'compare_digest'):  # python 3.3
    -    _time_independent_equals = hmac.compare_digest
    -else:
    -    def _time_independent_equals(a, b):
    -        if len(a) != len(b):
    -            return False
    -        result = 0
    -        if isinstance(a[0], int):  # python3 byte strings
    -            for x, y in zip(a, b):
    -                result |= x ^ y
    -        else:  # python2
    -            for x, y in zip(a, b):
    -                result |= ord(x) ^ ord(y)
    -        return result == 0
    -
    -
    -def create_signed_value(secret, name, value, version=None, clock=None):
    -    if version is None:
    -        version = DEFAULT_SIGNED_VALUE_VERSION
    -    if clock is None:
    -        clock = time.time
    -    timestamp = utf8(str(int(clock())))
    -    value = base64.b64encode(utf8(value))
    -    if version == 1:
    -        signature = _create_signature_v1(secret, name, value, timestamp)
    -        value = b"|".join([value, timestamp, signature])
    -        return value
    -    elif version == 2:
    -        # The v2 format consists of a version number and a series of
    -        # length-prefixed fields "%d:%s", the last of which is a
    -        # signature, all separated by pipes.  All numbers are in
    -        # decimal format with no leading zeros.  The signature is an
    -        # HMAC-SHA256 of the whole string up to that point, including
    -        # the final pipe.
    -        #
    -        # The fields are:
    -        # - format version (i.e. 2; no length prefix)
    -        # - key version (currently 0; reserved for future key rotation features)
    -        # - timestamp (integer seconds since epoch)
    -        # - name (not encoded; assumed to be ~alphanumeric)
    -        # - value (base64-encoded)
    -        # - signature (hex-encoded; no length prefix)
    -        def format_field(s):
    -            return utf8("%d:" % len(s)) + utf8(s)
    -        to_sign = b"|".join([
    -            b"2|1:0",
    -            format_field(timestamp),
    -            format_field(name),
    -            format_field(value),
    -            b''])
    -        signature = _create_signature_v2(secret, to_sign)
    -        return to_sign + signature
    -    else:
    -        raise ValueError("Unsupported version %d" % version)
    -
    -# A leading version number in decimal with no leading zeros, followed by a pipe.
    -_signed_value_version_re = re.compile(br"^([1-9][0-9]*)\|(.*)$")
    -
    -
    -def decode_signed_value(secret, name, value, max_age_days=31, clock=None, min_version=None):
    -    if clock is None:
    -        clock = time.time
    -    if min_version is None:
    -        min_version = DEFAULT_SIGNED_VALUE_MIN_VERSION
    -    if min_version > 2:
    -        raise ValueError("Unsupported min_version %d" % min_version)
    -    if not value:
    -        return None
    -
    -    # Figure out what version this is.  Version 1 did not include an
    -    # explicit version field and started with arbitrary base64 data,
    -    # which makes this tricky.
    -    value = utf8(value)
    -    m = _signed_value_version_re.match(value)
    -    if m is None:
    -        version = 1
    -    else:
    -        try:
    -            version = int(m.group(1))
    -            if version > 999:
    -                # Certain payloads from the version-less v1 format may
    -                # be parsed as valid integers.  Due to base64 padding
    -                # restrictions, this can only happen for numbers whose
    -                # length is a multiple of 4, so we can treat all
    -                # numbers up to 999 as versions, and for the rest we
    -                # fall back to v1 format.
    -                version = 1
    -        except ValueError:
    -            version = 1
    -
    -    if version < min_version:
    -        return None
    -    if version == 1:
    -        return _decode_signed_value_v1(secret, name, value, max_age_days, clock)
    -    elif version == 2:
    -        return _decode_signed_value_v2(secret, name, value, max_age_days, clock)
    -    else:
    -        return None
    -
    -
    -def _decode_signed_value_v1(secret, name, value, max_age_days, clock):
    -    parts = utf8(value).split(b"|")
    -    if len(parts) != 3:
    -        return None
    -    signature = _create_signature_v1(secret, name, parts[0], parts[1])
    -    if not _time_independent_equals(parts[2], signature):
    -        gen_log.warning("Invalid cookie signature %r", value)
    -        return None
    -    timestamp = int(parts[1])
    -    if timestamp < clock() - max_age_days * 86400:
    -        gen_log.warning("Expired cookie %r", value)
    -        return None
    -    if timestamp > clock() + 31 * 86400:
    -        # _cookie_signature does not hash a delimiter between the
    -        # parts of the cookie, so an attacker could transfer trailing
    -        # digits from the payload to the timestamp without altering the
    -        # signature.  For backwards compatibility, sanity-check timestamp
    -        # here instead of modifying _cookie_signature.
    -        gen_log.warning("Cookie timestamp in future; possible tampering %r", value)
    -        return None
    -    if parts[1].startswith(b"0"):
    -        gen_log.warning("Tampered cookie %r", value)
    -        return None
    -    try:
    -        return base64.b64decode(parts[0])
    -    except Exception:
    -        return None
    -
    -
    -def _decode_signed_value_v2(secret, name, value, max_age_days, clock):
    -    def _consume_field(s):
    -        length, _, rest = s.partition(b':')
    -        n = int(length)
    -        field_value = rest[:n]
    -        # In python 3, indexing bytes returns small integers; we must
    -        # use a slice to get a byte string as in python 2.
    -        if rest[n:n + 1] != b'|':
    -            raise ValueError("malformed v2 signed value field")
    -        rest = rest[n + 1:]
    -        return field_value, rest
    -    rest = value[2:]  # remove version number
    -    try:
    -        key_version, rest = _consume_field(rest)
    -        timestamp, rest = _consume_field(rest)
    -        name_field, rest = _consume_field(rest)
    -        value_field, rest = _consume_field(rest)
    -    except ValueError:
    -        return None
    -    passed_sig = rest
    -    signed_string = value[:-len(passed_sig)]
    -    expected_sig = _create_signature_v2(secret, signed_string)
    -    if not _time_independent_equals(passed_sig, expected_sig):
    -        return None
    -    if name_field != utf8(name):
    -        return None
    -    timestamp = int(timestamp)
    -    if timestamp < clock() - max_age_days * 86400:
    -        # The signature has expired.
    -        return None
    -    try:
    -        return base64.b64decode(value_field)
    -    except Exception:
    -        return None
    -
    -
    -def _create_signature_v1(secret, *parts):
    -    hash = hmac.new(utf8(secret), digestmod=hashlib.sha1)
    -    for part in parts:
    -        hash.update(utf8(part))
    -    return utf8(hash.hexdigest())
    -
    -
    -def _create_signature_v2(secret, s):
    -    hash = hmac.new(utf8(secret), digestmod=hashlib.sha256)
    -    hash.update(utf8(s))
    -    return utf8(hash.hexdigest())
    -
    -
    -def _unquote_or_none(s):
    -    """None-safe wrapper around url_unescape to handle unamteched optional
    -    groups correctly.
    -
    -    Note that args are passed as bytes so the handler can decide what
    -    encoding to use.
    -    """
    -    if s is None:
    -        return s
    -    return escape.url_unescape(s, encoding=None, plus=False)
    diff --git a/rosbridge_server/src/tornado/websocket.py b/rosbridge_server/src/tornado/websocket.py
    deleted file mode 100644
    index ed520d586..000000000
    --- a/rosbridge_server/src/tornado/websocket.py
    +++ /dev/null
    @@ -1,804 +0,0 @@
    -"""Implementation of the WebSocket protocol.
    -
    -`WebSockets `_ allow for bidirectional
    -communication between the browser and server.
    -
    -WebSockets are supported in the current versions of all major browsers,
    -although older versions that do not support WebSockets are still in use
    -(refer to http://caniuse.com/websockets for details).
    -
    -This module implements the final version of the WebSocket protocol as
    -defined in `RFC 6455 `_.  Certain
    -browser versions (notably Safari 5.x) implemented an earlier draft of
    -the protocol (known as "draft 76") and are not compatible with this module.
    -
    -.. versionchanged:: 4.0
    -   Removed support for the draft 76 protocol version.
    -"""
    -
    -from __future__ import absolute_import, division, print_function, with_statement
    -# Author: Jacob Kristhammar, 2010
    -
    -import base64
    -import collections
    -import hashlib
    -import os
    -import struct
    -import tornado.escape
    -import tornado.web
    -
    -from tornado.concurrent import TracebackFuture
    -from tornado.escape import utf8, native_str, to_unicode
    -from tornado import httpclient, httputil
    -from tornado.ioloop import IOLoop
    -from tornado.iostream import StreamClosedError
    -from tornado.log import gen_log, app_log
    -from tornado import simple_httpclient
    -from tornado.tcpclient import TCPClient
    -from tornado.util import bytes_type, _websocket_mask
    -
    -try:
    -    from urllib.parse import urlparse # py2
    -except ImportError:
    -    from urlparse import urlparse # py3
    -
    -try:
    -    xrange  # py2
    -except NameError:
    -    xrange = range  # py3
    -
    -
    -class WebSocketError(Exception):
    -    pass
    -
    -
    -class WebSocketClosedError(WebSocketError):
    -    """Raised by operations on a closed connection.
    -
    -    .. versionadded:: 3.2
    -    """
    -    pass
    -
    -
    -class WebSocketHandler(tornado.web.RequestHandler):
    -    """Subclass this class to create a basic WebSocket handler.
    -
    -    Override `on_message` to handle incoming messages, and use
    -    `write_message` to send messages to the client. You can also
    -    override `open` and `on_close` to handle opened and closed
    -    connections.
    -
    -    See http://dev.w3.org/html5/websockets/ for details on the
    -    JavaScript interface.  The protocol is specified at
    -    http://tools.ietf.org/html/rfc6455.
    -
    -    Here is an example WebSocket handler that echos back all received messages
    -    back to the client::
    -
    -      class EchoWebSocket(websocket.WebSocketHandler):
    -          def open(self):
    -              print "WebSocket opened"
    -
    -          def on_message(self, message):
    -              self.write_message(u"You said: " + message)
    -
    -          def on_close(self):
    -              print "WebSocket closed"
    -
    -    WebSockets are not standard HTTP connections. The "handshake" is
    -    HTTP, but after the handshake, the protocol is
    -    message-based. Consequently, most of the Tornado HTTP facilities
    -    are not available in handlers of this type. The only communication
    -    methods available to you are `write_message()`, `ping()`, and
    -    `close()`. Likewise, your request handler class should implement
    -    `open()` method rather than ``get()`` or ``post()``.
    -
    -    If you map the handler above to ``/websocket`` in your application, you can
    -    invoke it in JavaScript with::
    -
    -      var ws = new WebSocket("ws://localhost:8888/websocket");
    -      ws.onopen = function() {
    -         ws.send("Hello, world");
    -      };
    -      ws.onmessage = function (evt) {
    -         alert(evt.data);
    -      };
    -
    -    This script pops up an alert box that says "You said: Hello, world".
    -
    -    Web browsers allow any site to open a websocket connection to any other,
    -    instead of using the same-origin policy that governs other network
    -    access from javascript.  This can be surprising and is a potential
    -    security hole, so since Tornado 4.0 `WebSocketHandler` requires
    -    applications that wish to receive cross-origin websockets to opt in
    -    by overriding the `~WebSocketHandler.check_origin` method (see that
    -    method's docs for details).  Failure to do so is the most likely
    -    cause of 403 errors when making a websocket connection.
    -
    -    When using a secure websocket connection (``wss://``) with a self-signed
    -    certificate, the connection from a browser may fail because it wants
    -    to show the "accept this certificate" dialog but has nowhere to show it.
    -    You must first visit a regular HTML page using the same certificate
    -    to accept it before the websocket connection will succeed.
    -    """
    -    def __init__(self, application, request, **kwargs):
    -        tornado.web.RequestHandler.__init__(self, application, request,
    -                                            **kwargs)
    -        self.ws_connection = None
    -        self.close_code = None
    -        self.close_reason = None
    -        self.stream = None
    -
    -    @tornado.web.asynchronous
    -    def get(self, *args, **kwargs):
    -        self.open_args = args
    -        self.open_kwargs = kwargs
    -
    -        # Upgrade header should be present and should be equal to WebSocket
    -        if self.request.headers.get("Upgrade", "").lower() != 'websocket':
    -            self.set_status(400)
    -            self.finish("Can \"Upgrade\" only to \"WebSocket\".")
    -            return
    -
    -        # Connection header should be upgrade. Some proxy servers/load balancers
    -        # might mess with it.
    -        headers = self.request.headers
    -        connection = map(lambda s: s.strip().lower(), headers.get("Connection", "").split(","))
    -        if 'upgrade' not in connection:
    -            self.set_status(400)
    -            self.finish("\"Connection\" must be \"Upgrade\".")
    -            return
    -
    -        # Handle WebSocket Origin naming convention differences
    -        # The difference between version 8 and 13 is that in 8 the
    -        # client sends a "Sec-Websocket-Origin" header and in 13 it's
    -        # simply "Origin".
    -        if "Origin" in self.request.headers:
    -            origin = self.request.headers.get("Origin")
    -        else:
    -            origin = self.request.headers.get("Sec-Websocket-Origin", None)
    -
    -
    -        # If there was an origin header, check to make sure it matches
    -        # according to check_origin. When the origin is None, we assume it
    -        # did not come from a browser and that it can be passed on.
    -        if origin is not None and not self.check_origin(origin):
    -            self.set_status(403)
    -            self.finish("Cross origin websockets not allowed")
    -            return
    -
    -        self.stream = self.request.connection.detach()
    -        self.stream.set_close_callback(self.on_connection_close)
    -
    -        if self.request.headers.get("Sec-WebSocket-Version") in ("7", "8", "13"):
    -            self.ws_connection = WebSocketProtocol13(self)
    -            self.ws_connection.accept_connection()
    -        else:
    -            self.stream.write(tornado.escape.utf8(
    -                "HTTP/1.1 426 Upgrade Required\r\n"
    -                "Sec-WebSocket-Version: 8\r\n\r\n"))
    -            self.stream.close()
    -
    -
    -    def write_message(self, message, binary=False):
    -        """Sends the given message to the client of this Web Socket.
    -
    -        The message may be either a string or a dict (which will be
    -        encoded as json).  If the ``binary`` argument is false, the
    -        message will be sent as utf8; in binary mode any byte string
    -        is allowed.
    -
    -        If the connection is already closed, raises `WebSocketClosedError`.
    -
    -        .. versionchanged:: 3.2
    -           `WebSocketClosedError` was added (previously a closed connection
    -           would raise an `AttributeError`)
    -        """
    -        if self.ws_connection is None:
    -            raise WebSocketClosedError()
    -        if isinstance(message, dict):
    -            message = tornado.escape.json_encode(message)
    -        self.ws_connection.write_message(message, binary=binary)
    -
    -    def select_subprotocol(self, subprotocols):
    -        """Invoked when a new WebSocket requests specific subprotocols.
    -
    -        ``subprotocols`` is a list of strings identifying the
    -        subprotocols proposed by the client.  This method may be
    -        overridden to return one of those strings to select it, or
    -        ``None`` to not select a subprotocol.  Failure to select a
    -        subprotocol does not automatically abort the connection,
    -        although clients may close the connection if none of their
    -        proposed subprotocols was selected.
    -        """
    -        return None
    -
    -    def open(self):
    -        """Invoked when a new WebSocket is opened.
    -
    -        The arguments to `open` are extracted from the `tornado.web.URLSpec`
    -        regular expression, just like the arguments to
    -        `tornado.web.RequestHandler.get`.
    -        """
    -        pass
    -
    -    def on_message(self, message):
    -        """Handle incoming messages on the WebSocket
    -
    -        This method must be overridden.
    -        """
    -        raise NotImplementedError
    -
    -    def ping(self, data):
    -        """Send ping frame to the remote end."""
    -        if self.ws_connection is None:
    -            raise WebSocketClosedError()
    -        self.ws_connection.write_ping(data)
    -
    -    def on_pong(self, data):
    -        """Invoked when the response to a ping frame is received."""
    -        pass
    -
    -    def on_close(self):
    -        """Invoked when the WebSocket is closed.
    -
    -        If the connection was closed cleanly and a status code or reason
    -        phrase was supplied, these values will be available as the attributes
    -        ``self.close_code`` and ``self.close_reason``.
    -
    -        .. versionchanged:: 4.0
    -
    -           Added ``close_code`` and ``close_reason`` attributes.
    -        """
    -        pass
    -
    -    def close(self, code=None, reason=None):
    -        """Closes this Web Socket.
    -
    -        Once the close handshake is successful the socket will be closed.
    -
    -        ``code`` may be a numeric status code, taken from the values
    -        defined in `RFC 6455 section 7.4.1
    -        `_.
    -        ``reason`` may be a textual message about why the connection is
    -        closing.  These values are made available to the client, but are
    -        not otherwise interpreted by the websocket protocol.
    -
    -        .. versionchanged:: 4.0
    -
    -           Added the ``code`` and ``reason`` arguments.
    -        """
    -        if self.ws_connection:
    -            self.ws_connection.close(code, reason)
    -            self.ws_connection = None
    -
    -    def check_origin(self, origin):
    -        """Override to enable support for allowing alternate origins.
    -
    -        The ``origin`` argument is the value of the ``Origin`` HTTP
    -        header, the url responsible for initiating this request.  This
    -        method is not called for clients that do not send this header;
    -        such requests are always allowed (because all browsers that
    -        implement WebSockets support this header, and non-browser
    -        clients do not have the same cross-site security concerns).
    -
    -        Should return True to accept the request or False to reject it.
    -        By default, rejects all requests with an origin on a host other
    -        than this one.
    -
    -        This is a security protection against cross site scripting attacks on
    -        browsers, since WebSockets are allowed to bypass the usual same-origin
    -        policies and don't use CORS headers.
    -
    -        To accept all cross-origin traffic (which was the default prior to
    -        Tornado 4.0), simply override this method to always return true::
    -
    -            def check_origin(self, origin):
    -                return True
    -
    -        To allow connections from any subdomain of your site, you might
    -        do something like::
    -
    -            def check_origin(self, origin):
    -                parsed_origin = urllib.parse.urlparse(origin)
    -                return parsed_origin.netloc.endswith(".mydomain.com")
    -
    -        .. versionadded:: 4.0
    -        """
    -        parsed_origin = urlparse(origin)
    -        origin = parsed_origin.netloc
    -        origin = origin.lower()
    -
    -        host = self.request.headers.get("Host")
    -
    -        # Check to see that origin matches host directly, including ports
    -        return origin == host
    -
    -    def set_nodelay(self, value):
    -        """Set the no-delay flag for this stream.
    -
    -        By default, small messages may be delayed and/or combined to minimize
    -        the number of packets sent.  This can sometimes cause 200-500ms delays
    -        due to the interaction between Nagle's algorithm and TCP delayed
    -        ACKs.  To reduce this delay (at the expense of possibly increasing
    -        bandwidth usage), call ``self.set_nodelay(True)`` once the websocket
    -        connection is established.
    -
    -        See `.BaseIOStream.set_nodelay` for additional details.
    -
    -        .. versionadded:: 3.1
    -        """
    -        self.stream.set_nodelay(value)
    -
    -    def on_connection_close(self):
    -        if self.ws_connection:
    -            self.ws_connection.on_connection_close()
    -            self.ws_connection = None
    -            self.on_close()
    -
    -
    -def _wrap_method(method):
    -    def _disallow_for_websocket(self, *args, **kwargs):
    -        if self.stream is None:
    -            method(self, *args, **kwargs)
    -        else:
    -            raise RuntimeError("Method not supported for Web Sockets")
    -    return _disallow_for_websocket
    -for method in ["write", "redirect", "set_header", "send_error", "set_cookie",
    -               "set_status", "flush", "finish"]:
    -    setattr(WebSocketHandler, method,
    -            _wrap_method(getattr(WebSocketHandler, method)))
    -
    -
    -class WebSocketProtocol(object):
    -    """Base class for WebSocket protocol versions.
    -    """
    -    def __init__(self, handler):
    -        self.handler = handler
    -        self.request = handler.request
    -        self.stream = handler.stream
    -        self.client_terminated = False
    -        self.server_terminated = False
    -
    -    def _run_callback(self, callback, *args, **kwargs):
    -        """Runs the given callback with exception handling.
    -
    -        On error, aborts the websocket connection and returns False.
    -        """
    -        try:
    -            callback(*args, **kwargs)
    -        except Exception:
    -            app_log.error("Uncaught exception in %s",
    -                          self.request.path, exc_info=True)
    -            self._abort()
    -
    -    def on_connection_close(self):
    -        self._abort()
    -
    -    def _abort(self):
    -        """Instantly aborts the WebSocket connection by closing the socket"""
    -        self.client_terminated = True
    -        self.server_terminated = True
    -        self.stream.close()  # forcibly tear down the connection
    -        self.close()  # let the subclass cleanup
    -
    -
    -class WebSocketProtocol13(WebSocketProtocol):
    -    """Implementation of the WebSocket protocol from RFC 6455.
    -
    -    This class supports versions 7 and 8 of the protocol in addition to the
    -    final version 13.
    -    """
    -    def __init__(self, handler, mask_outgoing=False):
    -        WebSocketProtocol.__init__(self, handler)
    -        self.mask_outgoing = mask_outgoing
    -        self._final_frame = False
    -        self._frame_opcode = None
    -        self._masked_frame = None
    -        self._frame_mask = None
    -        self._frame_length = None
    -        self._fragmented_message_buffer = None
    -        self._fragmented_message_opcode = None
    -        self._waiting = None
    -
    -    def accept_connection(self):
    -        try:
    -            self._handle_websocket_headers()
    -            self._accept_connection()
    -        except ValueError:
    -            gen_log.debug("Malformed WebSocket request received", exc_info=True)
    -            self._abort()
    -            return
    -
    -    def _handle_websocket_headers(self):
    -        """Verifies all invariant- and required headers
    -
    -        If a header is missing or have an incorrect value ValueError will be
    -        raised
    -        """
    -        fields = ("Host", "Sec-Websocket-Key", "Sec-Websocket-Version")
    -        if not all(map(lambda f: self.request.headers.get(f), fields)):
    -            raise ValueError("Missing/Invalid WebSocket headers")
    -
    -    @staticmethod
    -    def compute_accept_value(key):
    -        """Computes the value for the Sec-WebSocket-Accept header,
    -        given the value for Sec-WebSocket-Key.
    -        """
    -        sha1 = hashlib.sha1()
    -        sha1.update(utf8(key))
    -        sha1.update(b"258EAFA5-E914-47DA-95CA-C5AB0DC85B11")  # Magic value
    -        return native_str(base64.b64encode(sha1.digest()))
    -
    -    def _challenge_response(self):
    -        return WebSocketProtocol13.compute_accept_value(
    -            self.request.headers.get("Sec-Websocket-Key"))
    -
    -    def _accept_connection(self):
    -        subprotocol_header = ''
    -        subprotocols = self.request.headers.get("Sec-WebSocket-Protocol", '')
    -        subprotocols = [s.strip() for s in subprotocols.split(',')]
    -        if subprotocols:
    -            selected = self.handler.select_subprotocol(subprotocols)
    -            if selected:
    -                assert selected in subprotocols
    -                subprotocol_header = "Sec-WebSocket-Protocol: %s\r\n" % selected
    -
    -        self.stream.write(tornado.escape.utf8(
    -            "HTTP/1.1 101 Switching Protocols\r\n"
    -            "Upgrade: websocket\r\n"
    -            "Connection: Upgrade\r\n"
    -            "Sec-WebSocket-Accept: %s\r\n"
    -            "%s"
    -            "\r\n" % (self._challenge_response(), subprotocol_header)))
    -
    -        self._run_callback(self.handler.open, *self.handler.open_args,
    -                           **self.handler.open_kwargs)
    -        self._receive_frame()
    -
    -    def _write_frame(self, fin, opcode, data):
    -        if fin:
    -            finbit = 0x80
    -        else:
    -            finbit = 0
    -        frame = struct.pack("B", finbit | opcode)
    -        l = len(data)
    -        if self.mask_outgoing:
    -            mask_bit = 0x80
    -        else:
    -            mask_bit = 0
    -        if l < 126:
    -            frame += struct.pack("B", l | mask_bit)
    -        elif l <= 0xFFFF:
    -            frame += struct.pack("!BH", 126 | mask_bit, l)
    -        else:
    -            frame += struct.pack("!BQ", 127 | mask_bit, l)
    -        if self.mask_outgoing:
    -            mask = os.urandom(4)
    -            data = mask + _websocket_mask(mask, data)
    -        frame += data
    -        self.stream.write(frame)
    -
    -    def write_message(self, message, binary=False):
    -        """Sends the given message to the client of this Web Socket."""
    -        if binary:
    -            opcode = 0x2
    -        else:
    -            opcode = 0x1
    -        message = tornado.escape.utf8(message)
    -        assert isinstance(message, bytes_type)
    -        try:
    -            self._write_frame(True, opcode, message)
    -        except StreamClosedError:
    -            self._abort()
    -
    -    def write_ping(self, data):
    -        """Send ping frame."""
    -        assert isinstance(data, bytes_type)
    -        self._write_frame(True, 0x9, data)
    -
    -    def _receive_frame(self):
    -        try:
    -            self.stream.read_bytes(2, self._on_frame_start)
    -        except StreamClosedError:
    -            self._abort()
    -
    -    def _on_frame_start(self, data):
    -        header, payloadlen = struct.unpack("BB", data)
    -        self._final_frame = header & 0x80
    -        reserved_bits = header & 0x70
    -        self._frame_opcode = header & 0xf
    -        self._frame_opcode_is_control = self._frame_opcode & 0x8
    -        if reserved_bits:
    -            # client is using as-yet-undefined extensions; abort
    -            self._abort()
    -            return
    -        self._masked_frame = bool(payloadlen & 0x80)
    -        payloadlen = payloadlen & 0x7f
    -        if self._frame_opcode_is_control and payloadlen >= 126:
    -            # control frames must have payload < 126
    -            self._abort()
    -            return
    -        try:
    -            if payloadlen < 126:
    -                self._frame_length = payloadlen
    -                if self._masked_frame:
    -                    self.stream.read_bytes(4, self._on_masking_key)
    -                else:
    -                    self.stream.read_bytes(self._frame_length, self._on_frame_data)
    -            elif payloadlen == 126:
    -                self.stream.read_bytes(2, self._on_frame_length_16)
    -            elif payloadlen == 127:
    -                self.stream.read_bytes(8, self._on_frame_length_64)
    -        except StreamClosedError:
    -            self._abort()
    -
    -    def _on_frame_length_16(self, data):
    -        self._frame_length = struct.unpack("!H", data)[0]
    -        try:
    -            if self._masked_frame:
    -                self.stream.read_bytes(4, self._on_masking_key)
    -            else:
    -                self.stream.read_bytes(self._frame_length, self._on_frame_data)
    -        except StreamClosedError:
    -            self._abort()
    -
    -    def _on_frame_length_64(self, data):
    -        self._frame_length = struct.unpack("!Q", data)[0]
    -        try:
    -            if self._masked_frame:
    -                self.stream.read_bytes(4, self._on_masking_key)
    -            else:
    -                self.stream.read_bytes(self._frame_length, self._on_frame_data)
    -        except StreamClosedError:
    -            self._abort()
    -
    -    def _on_masking_key(self, data):
    -        self._frame_mask = data
    -        try:
    -            self.stream.read_bytes(self._frame_length, self._on_masked_frame_data)
    -        except StreamClosedError:
    -            self._abort()
    -
    -    def _on_masked_frame_data(self, data):
    -        self._on_frame_data(_websocket_mask(self._frame_mask, data))
    -
    -    def _on_frame_data(self, data):
    -        if self._frame_opcode_is_control:
    -            # control frames may be interleaved with a series of fragmented
    -            # data frames, so control frames must not interact with
    -            # self._fragmented_*
    -            if not self._final_frame:
    -                # control frames must not be fragmented
    -                self._abort()
    -                return
    -            opcode = self._frame_opcode
    -        elif self._frame_opcode == 0:  # continuation frame
    -            if self._fragmented_message_buffer is None:
    -                # nothing to continue
    -                self._abort()
    -                return
    -            self._fragmented_message_buffer += data
    -            if self._final_frame:
    -                opcode = self._fragmented_message_opcode
    -                data = self._fragmented_message_buffer
    -                self._fragmented_message_buffer = None
    -        else:  # start of new data message
    -            if self._fragmented_message_buffer is not None:
    -                # can't start new message until the old one is finished
    -                self._abort()
    -                return
    -            if self._final_frame:
    -                opcode = self._frame_opcode
    -            else:
    -                self._fragmented_message_opcode = self._frame_opcode
    -                self._fragmented_message_buffer = data
    -
    -        if self._final_frame:
    -            self._handle_message(opcode, data)
    -
    -        if not self.client_terminated:
    -            self._receive_frame()
    -
    -    def _handle_message(self, opcode, data):
    -        if self.client_terminated:
    -            return
    -
    -        if opcode == 0x1:
    -            # UTF-8 data
    -            try:
    -                decoded = data.decode("utf-8")
    -            except UnicodeDecodeError:
    -                self._abort()
    -                return
    -            self._run_callback(self.handler.on_message, decoded)
    -        elif opcode == 0x2:
    -            # Binary data
    -            self._run_callback(self.handler.on_message, data)
    -        elif opcode == 0x8:
    -            # Close
    -            self.client_terminated = True
    -            if len(data) >= 2:
    -                self.handler.close_code = struct.unpack('>H', data[:2])[0]
    -            if len(data) > 2:
    -                self.handler.close_reason = to_unicode(data[2:])
    -            self.close()
    -        elif opcode == 0x9:
    -            # Ping
    -            self._write_frame(True, 0xA, data)
    -        elif opcode == 0xA:
    -            # Pong
    -            self._run_callback(self.handler.on_pong, data)
    -        else:
    -            self._abort()
    -
    -    def close(self, code=None, reason=None):
    -        """Closes the WebSocket connection."""
    -        if not self.server_terminated:
    -            if not self.stream.closed():
    -                if code is None and reason is not None:
    -                    code = 1000  # "normal closure" status code
    -                if code is None:
    -                    close_data = b''
    -                else:
    -                    close_data = struct.pack('>H', code)
    -                if reason is not None:
    -                    close_data += utf8(reason)
    -                self._write_frame(True, 0x8, close_data)
    -            self.server_terminated = True
    -        if self.client_terminated:
    -            if self._waiting is not None:
    -                self.stream.io_loop.remove_timeout(self._waiting)
    -                self._waiting = None
    -            self.stream.close()
    -        elif self._waiting is None:
    -            # Give the client a few seconds to complete a clean shutdown,
    -            # otherwise just close the connection.
    -            self._waiting = self.stream.io_loop.add_timeout(
    -                self.stream.io_loop.time() + 5, self._abort)
    -
    -
    -class WebSocketClientConnection(simple_httpclient._HTTPConnection):
    -    """WebSocket client connection.
    -
    -    This class should not be instantiated directly; use the
    -    `websocket_connect` function instead.
    -    """
    -    def __init__(self, io_loop, request):
    -        self.connect_future = TracebackFuture()
    -        self.read_future = None
    -        self.read_queue = collections.deque()
    -        self.key = base64.b64encode(os.urandom(16))
    -
    -        scheme, sep, rest = request.url.partition(':')
    -        scheme = {'ws': 'http', 'wss': 'https'}[scheme]
    -        request.url = scheme + sep + rest
    -        request.headers.update({
    -            'Upgrade': 'websocket',
    -            'Connection': 'Upgrade',
    -            'Sec-WebSocket-Key': self.key,
    -            'Sec-WebSocket-Version': '13',
    -        })
    -
    -        self.tcp_client = TCPClient(io_loop=io_loop)
    -        super(WebSocketClientConnection, self).__init__(
    -            io_loop, None, request, lambda: None, self._on_http_response,
    -            104857600, self.tcp_client, 65536)
    -
    -    def close(self, code=None, reason=None):
    -        """Closes the websocket connection.
    -
    -        ``code`` and ``reason`` are documented under
    -        `WebSocketHandler.close`.
    -
    -        .. versionadded:: 3.2
    -
    -        .. versionchanged:: 4.0
    -
    -           Added the ``code`` and ``reason`` arguments.
    -        """
    -        if self.protocol is not None:
    -            self.protocol.close(code, reason)
    -            self.protocol = None
    -
    -    def on_connection_close(self):
    -        if not self.connect_future.done():
    -            self.connect_future.set_exception(StreamClosedError())
    -        self.on_message(None)
    -        self.tcp_client.close()
    -        super(WebSocketClientConnection, self).on_connection_close()
    -
    -    def _on_http_response(self, response):
    -        if not self.connect_future.done():
    -            if response.error:
    -                self.connect_future.set_exception(response.error)
    -            else:
    -                self.connect_future.set_exception(WebSocketError(
    -                    "Non-websocket response"))
    -
    -    def headers_received(self, start_line, headers):
    -        if start_line.code != 101:
    -            return super(WebSocketClientConnection, self).headers_received(
    -                start_line, headers)
    -
    -        self.headers = headers
    -        assert self.headers['Upgrade'].lower() == 'websocket'
    -        assert self.headers['Connection'].lower() == 'upgrade'
    -        accept = WebSocketProtocol13.compute_accept_value(self.key)
    -        assert self.headers['Sec-Websocket-Accept'] == accept
    -
    -        self.protocol = WebSocketProtocol13(self, mask_outgoing=True)
    -        self.protocol._receive_frame()
    -
    -        if self._timeout is not None:
    -            self.io_loop.remove_timeout(self._timeout)
    -            self._timeout = None
    -
    -        self.stream = self.connection.detach()
    -        self.stream.set_close_callback(self.on_connection_close)
    -        # Once we've taken over the connection, clear the final callback
    -        # we set on the http request.  This deactivates the error handling
    -        # in simple_httpclient that would otherwise interfere with our
    -        # ability to see exceptions.
    -        self.final_callback = None
    -
    -        self.connect_future.set_result(self)
    -
    -    def write_message(self, message, binary=False):
    -        """Sends a message to the WebSocket server."""
    -        self.protocol.write_message(message, binary)
    -
    -    def read_message(self, callback=None):
    -        """Reads a message from the WebSocket server.
    -
    -        Returns a future whose result is the message, or None
    -        if the connection is closed.  If a callback argument
    -        is given it will be called with the future when it is
    -        ready.
    -        """
    -        assert self.read_future is None
    -        future = TracebackFuture()
    -        if self.read_queue:
    -            future.set_result(self.read_queue.popleft())
    -        else:
    -            self.read_future = future
    -        if callback is not None:
    -            self.io_loop.add_future(future, callback)
    -        return future
    -
    -    def on_message(self, message):
    -        if self.read_future is not None:
    -            self.read_future.set_result(message)
    -            self.read_future = None
    -        else:
    -            self.read_queue.append(message)
    -
    -    def on_pong(self, data):
    -        pass
    -
    -
    -def websocket_connect(url, io_loop=None, callback=None, connect_timeout=None):
    -    """Client-side websocket support.
    -
    -    Takes a url and returns a Future whose result is a
    -    `WebSocketClientConnection`.
    -
    -    .. versionchanged:: 3.2
    -       Also accepts ``HTTPRequest`` objects in place of urls.
    -    """
    -    if io_loop is None:
    -        io_loop = IOLoop.current()
    -    if isinstance(url, httpclient.HTTPRequest):
    -        assert connect_timeout is None
    -        request = url
    -        # Copy and convert the headers dict/object (see comments in
    -        # AsyncHTTPClient.fetch)
    -        request.headers = httputil.HTTPHeaders(request.headers)
    -    else:
    -        request = httpclient.HTTPRequest(url, connect_timeout=connect_timeout)
    -    request = httpclient._RequestProxy(
    -        request, httpclient.HTTPRequest._DEFAULTS)
    -    conn = WebSocketClientConnection(io_loop, request)
    -    if callback is not None:
    -        io_loop.add_future(conn.connect_future, callback)
    -    return conn.connect_future
    diff --git a/rosbridge_server/src/tornado/wsgi.py b/rosbridge_server/src/tornado/wsgi.py
    deleted file mode 100644
    index 6e115e125..000000000
    --- a/rosbridge_server/src/tornado/wsgi.py
    +++ /dev/null
    @@ -1,361 +0,0 @@
    -#!/usr/bin/env python
    -#
    -# Copyright 2009 Facebook
    -#
    -# Licensed under the Apache License, Version 2.0 (the "License"); you may
    -# not use this file except in compliance with the License. You may obtain
    -# a copy of the License at
    -#
    -#     http://www.apache.org/licenses/LICENSE-2.0
    -#
    -# Unless required by applicable law or agreed to in writing, software
    -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
    -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
    -# License for the specific language governing permissions and limitations
    -# under the License.
    -
    -"""WSGI support for the Tornado web framework.
    -
    -WSGI is the Python standard for web servers, and allows for interoperability
    -between Tornado and other Python web frameworks and servers.  This module
    -provides WSGI support in two ways:
    -
    -* `WSGIAdapter` converts a `tornado.web.Application` to the WSGI application
    -  interface.  This is useful for running a Tornado app on another
    -  HTTP server, such as Google App Engine.  See the `WSGIAdapter` class
    -  documentation for limitations that apply.
    -* `WSGIContainer` lets you run other WSGI applications and frameworks on the
    -  Tornado HTTP server.  For example, with this class you can mix Django
    -  and Tornado handlers in a single server.
    -"""
    -
    -from __future__ import absolute_import, division, print_function, with_statement
    -
    -import sys
    -import tornado
    -
    -from tornado.concurrent import Future
    -from tornado import escape
    -from tornado import httputil
    -from tornado.log import access_log
    -from tornado import web
    -from tornado.escape import native_str
    -from tornado.util import bytes_type, unicode_type
    -
    -try:
    -    from io import BytesIO  # python 3
    -except ImportError:
    -    from cStringIO import StringIO as BytesIO  # python 2
    -
    -try:
    -    import urllib.parse as urllib_parse  # py3
    -except ImportError:
    -    import urllib as urllib_parse
    -
    -# PEP 3333 specifies that WSGI on python 3 generally deals with byte strings
    -# that are smuggled inside objects of type unicode (via the latin1 encoding).
    -# These functions are like those in the tornado.escape module, but defined
    -# here to minimize the temptation to use them in non-wsgi contexts.
    -if str is unicode_type:
    -    def to_wsgi_str(s):
    -        assert isinstance(s, bytes_type)
    -        return s.decode('latin1')
    -
    -    def from_wsgi_str(s):
    -        assert isinstance(s, str)
    -        return s.encode('latin1')
    -else:
    -    def to_wsgi_str(s):
    -        assert isinstance(s, bytes_type)
    -        return s
    -
    -    def from_wsgi_str(s):
    -        assert isinstance(s, str)
    -        return s
    -
    -
    -class WSGIApplication(web.Application):
    -    """A WSGI equivalent of `tornado.web.Application`.
    -
    -    .. deprecated:: 4.0
    -
    -       Use a regular `.Application` and wrap it in `WSGIAdapter` instead.
    -    """
    -    def __call__(self, environ, start_response):
    -        return WSGIAdapter(self)(environ, start_response)
    -
    -
    -# WSGI has no facilities for flow control, so just return an already-done
    -# Future when the interface requires it.
    -_dummy_future = Future()
    -_dummy_future.set_result(None)
    -
    -
    -class _WSGIConnection(httputil.HTTPConnection):
    -    def __init__(self, method, start_response, context):
    -        self.method = method
    -        self.start_response = start_response
    -        self.context = context
    -        self._write_buffer = []
    -        self._finished = False
    -        self._expected_content_remaining = None
    -        self._error = None
    -
    -    def set_close_callback(self, callback):
    -        # WSGI has no facility for detecting a closed connection mid-request,
    -        # so we can simply ignore the callback.
    -        pass
    -
    -    def write_headers(self, start_line, headers, chunk=None, callback=None):
    -        if self.method == 'HEAD':
    -            self._expected_content_remaining = 0
    -        elif 'Content-Length' in headers:
    -            self._expected_content_remaining = int(headers['Content-Length'])
    -        else:
    -            self._expected_content_remaining = None
    -        self.start_response(
    -            '%s %s' % (start_line.code, start_line.reason),
    -            [(native_str(k), native_str(v)) for (k, v) in headers.get_all()])
    -        if chunk is not None:
    -            self.write(chunk, callback)
    -        elif callback is not None:
    -            callback()
    -        return _dummy_future
    -
    -    def write(self, chunk, callback=None):
    -        if self._expected_content_remaining is not None:
    -            self._expected_content_remaining -= len(chunk)
    -            if self._expected_content_remaining < 0:
    -                self._error = httputil.HTTPOutputError(
    -                    "Tried to write more data than Content-Length")
    -                raise self._error
    -        self._write_buffer.append(chunk)
    -        if callback is not None:
    -            callback()
    -        return _dummy_future
    -
    -    def finish(self):
    -        if (self._expected_content_remaining is not None and
    -                self._expected_content_remaining != 0):
    -            self._error = httputil.HTTPOutputError(
    -                "Tried to write %d bytes less than Content-Length" %
    -                self._expected_content_remaining)
    -            raise self._error
    -        self._finished = True
    -
    -
    -class _WSGIRequestContext(object):
    -    def __init__(self, remote_ip, protocol):
    -        self.remote_ip = remote_ip
    -        self.protocol = protocol
    -
    -    def __str__(self):
    -        return self.remote_ip
    -
    -
    -class WSGIAdapter(object):
    -    """Converts a `tornado.web.Application` instance into a WSGI application.
    -
    -    Example usage::
    -
    -        import tornado.web
    -        import tornado.wsgi
    -        import wsgiref.simple_server
    -
    -        class MainHandler(tornado.web.RequestHandler):
    -            def get(self):
    -                self.write("Hello, world")
    -
    -        if __name__ == "__main__":
    -            application = tornado.web.Application([
    -                (r"/", MainHandler),
    -            ])
    -            wsgi_app = tornado.wsgi.WSGIAdapter(application)
    -            server = wsgiref.simple_server.make_server('', 8888, wsgi_app)
    -            server.serve_forever()
    -
    -    See the `appengine demo
    -    `_
    -    for an example of using this module to run a Tornado app on Google
    -    App Engine.
    -
    -    In WSGI mode asynchronous methods are not supported.  This means
    -    that it is not possible to use `.AsyncHTTPClient`, or the
    -    `tornado.auth` or `tornado.websocket` modules.
    -
    -    .. versionadded:: 4.0
    -    """
    -    def __init__(self, application):
    -        if isinstance(application, WSGIApplication):
    -            self.application = lambda request: web.Application.__call__(
    -                application, request)
    -        else:
    -            self.application = application
    -
    -    def __call__(self, environ, start_response):
    -        method = environ["REQUEST_METHOD"]
    -        uri = urllib_parse.quote(from_wsgi_str(environ.get("SCRIPT_NAME", "")))
    -        uri += urllib_parse.quote(from_wsgi_str(environ.get("PATH_INFO", "")))
    -        if environ.get("QUERY_STRING"):
    -            uri += "?" + environ["QUERY_STRING"]
    -        headers = httputil.HTTPHeaders()
    -        if environ.get("CONTENT_TYPE"):
    -            headers["Content-Type"] = environ["CONTENT_TYPE"]
    -        if environ.get("CONTENT_LENGTH"):
    -            headers["Content-Length"] = environ["CONTENT_LENGTH"]
    -        for key in environ:
    -            if key.startswith("HTTP_"):
    -                headers[key[5:].replace("_", "-")] = environ[key]
    -        if headers.get("Content-Length"):
    -            body = environ["wsgi.input"].read(
    -                int(headers["Content-Length"]))
    -        else:
    -            body = ""
    -        protocol = environ["wsgi.url_scheme"]
    -        remote_ip = environ.get("REMOTE_ADDR", "")
    -        if environ.get("HTTP_HOST"):
    -            host = environ["HTTP_HOST"]
    -        else:
    -            host = environ["SERVER_NAME"]
    -        connection = _WSGIConnection(method, start_response,
    -                                     _WSGIRequestContext(remote_ip, protocol))
    -        request = httputil.HTTPServerRequest(
    -            method, uri, "HTTP/1.1", headers=headers, body=body,
    -            host=host, connection=connection)
    -        request._parse_body()
    -        self.application(request)
    -        if connection._error:
    -            raise connection._error
    -        if not connection._finished:
    -            raise Exception("request did not finish synchronously")
    -        return connection._write_buffer
    -
    -
    -class WSGIContainer(object):
    -    r"""Makes a WSGI-compatible function runnable on Tornado's HTTP server.
    -
    -    .. warning::
    -
    -       WSGI is a *synchronous* interface, while Tornado's concurrency model
    -       is based on single-threaded asynchronous execution.  This means that
    -       running a WSGI app with Tornado's `WSGIContainer` is *less scalable*
    -       than running the same app in a multi-threaded WSGI server like
    -       ``gunicorn`` or ``uwsgi``.  Use `WSGIContainer` only when there are
    -       benefits to combining Tornado and WSGI in the same process that
    -       outweigh the reduced scalability.
    -
    -    Wrap a WSGI function in a `WSGIContainer` and pass it to `.HTTPServer` to
    -    run it. For example::
    -
    -        def simple_app(environ, start_response):
    -            status = "200 OK"
    -            response_headers = [("Content-type", "text/plain")]
    -            start_response(status, response_headers)
    -            return ["Hello world!\n"]
    -
    -        container = tornado.wsgi.WSGIContainer(simple_app)
    -        http_server = tornado.httpserver.HTTPServer(container)
    -        http_server.listen(8888)
    -        tornado.ioloop.IOLoop.instance().start()
    -
    -    This class is intended to let other frameworks (Django, web.py, etc)
    -    run on the Tornado HTTP server and I/O loop.
    -
    -    The `tornado.web.FallbackHandler` class is often useful for mixing
    -    Tornado and WSGI apps in the same server.  See
    -    /~https://github.com/bdarnell/django-tornado-demo for a complete example.
    -    """
    -    def __init__(self, wsgi_application):
    -        self.wsgi_application = wsgi_application
    -
    -    def __call__(self, request):
    -        data = {}
    -        response = []
    -
    -        def start_response(status, response_headers, exc_info=None):
    -            data["status"] = status
    -            data["headers"] = response_headers
    -            return response.append
    -        app_response = self.wsgi_application(
    -            WSGIContainer.environ(request), start_response)
    -        try:
    -            response.extend(app_response)
    -            body = b"".join(response)
    -        finally:
    -            if hasattr(app_response, "close"):
    -                app_response.close()
    -        if not data:
    -            raise Exception("WSGI app did not call start_response")
    -
    -        status_code = int(data["status"].split()[0])
    -        headers = data["headers"]
    -        header_set = set(k.lower() for (k, v) in headers)
    -        body = escape.utf8(body)
    -        if status_code != 304:
    -            if "content-length" not in header_set:
    -                headers.append(("Content-Length", str(len(body))))
    -            if "content-type" not in header_set:
    -                headers.append(("Content-Type", "text/html; charset=UTF-8"))
    -        if "server" not in header_set:
    -            headers.append(("Server", "TornadoServer/%s" % tornado.version))
    -
    -        parts = [escape.utf8("HTTP/1.1 " + data["status"] + "\r\n")]
    -        for key, value in headers:
    -            parts.append(escape.utf8(key) + b": " + escape.utf8(value) + b"\r\n")
    -        parts.append(b"\r\n")
    -        parts.append(body)
    -        request.write(b"".join(parts))
    -        request.finish()
    -        self._log(status_code, request)
    -
    -    @staticmethod
    -    def environ(request):
    -        """Converts a `tornado.httputil.HTTPServerRequest` to a WSGI environment.
    -        """
    -        hostport = request.host.split(":")
    -        if len(hostport) == 2:
    -            host = hostport[0]
    -            port = int(hostport[1])
    -        else:
    -            host = request.host
    -            port = 443 if request.protocol == "https" else 80
    -        environ = {
    -            "REQUEST_METHOD": request.method,
    -            "SCRIPT_NAME": "",
    -            "PATH_INFO": to_wsgi_str(escape.url_unescape(
    -                request.path, encoding=None, plus=False)),
    -            "QUERY_STRING": request.query,
    -            "REMOTE_ADDR": request.remote_ip,
    -            "SERVER_NAME": host,
    -            "SERVER_PORT": str(port),
    -            "SERVER_PROTOCOL": request.version,
    -            "wsgi.version": (1, 0),
    -            "wsgi.url_scheme": request.protocol,
    -            "wsgi.input": BytesIO(escape.utf8(request.body)),
    -            "wsgi.errors": sys.stderr,
    -            "wsgi.multithread": False,
    -            "wsgi.multiprocess": True,
    -            "wsgi.run_once": False,
    -        }
    -        if "Content-Type" in request.headers:
    -            environ["CONTENT_TYPE"] = request.headers.pop("Content-Type")
    -        if "Content-Length" in request.headers:
    -            environ["CONTENT_LENGTH"] = request.headers.pop("Content-Length")
    -        for key, value in request.headers.items():
    -            environ["HTTP_" + key.replace("-", "_").upper()] = value
    -        return environ
    -
    -    def _log(self, status_code, request):
    -        if status_code < 400:
    -            log_method = access_log.info
    -        elif status_code < 500:
    -            log_method = access_log.warning
    -        else:
    -            log_method = access_log.error
    -        request_time = 1000.0 * request.request_time()
    -        summary = request.method + " " + request.uri + " (" + \
    -            request.remote_ip + ")"
    -        log_method("%d %s %.2fms", status_code, summary, request_time)
    -
    -
    -HTTPRequest = httputil.HTTPServerRequest