From defcf235c158e93938c93f556383939515852c88 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Mon, 3 Feb 2014 08:45:14 +1300 Subject: [PATCH] Raise the default max header to accommodate large tokens PKI tokens hit the default limit if there is enough services defined in the keystone catalog. Also the v3 catalog is larger than the v2 catalog which would explain why this bug is being hit just now. This change adds the configuration option max_header_line to each of the API confurations which has a default of 16384. Closes-Bug: #1190149 Change-Id: I5da09aa08a1242c5e356bd8bf532baa9347ce075 (cherry picked from commit 0b02feb20d4485d0c6d486c5a72b814ce3bdf9e5) --- etc/heat/heat.conf.sample | 18 ++++++++++++++++++ heat/common/wsgi.py | 16 ++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/etc/heat/heat.conf.sample b/etc/heat/heat.conf.sample index ad6fb7cab4..d464840893 100644 --- a/etc/heat/heat.conf.sample +++ b/etc/heat/heat.conf.sample @@ -633,6 +633,12 @@ # Number of workers for Heat service (integer value) #workers=0 +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large +# tokens (typically those generated by the Keystone v3 API +# with big service catalogs (integer value) +#max_header_line=16384 + [heat_api] @@ -662,6 +668,12 @@ # Number of workers for Heat service (integer value) #workers=0 +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large +# tokens (typically those generated by the Keystone v3 API +# with big service catalogs (integer value) +#max_header_line=16384 + [heat_api_cfn] @@ -691,6 +703,12 @@ # Number of workers for Heat service (integer value) #workers=0 +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large +# tokens (typically those generated by the Keystone v3 API +# with big service catalogs (integer value) +#max_header_line=16384 + [auth_password] diff --git a/heat/common/wsgi.py b/heat/common/wsgi.py index 7e6948932b..7d189e7697 100644 --- a/heat/common/wsgi.py +++ b/heat/common/wsgi.py @@ -73,6 +73,11 @@ api_opts = [ cfg.IntOpt('workers', default=0, help=_("Number of workers for Heat service"), deprecated_group='DEFAULT'), + cfg.IntOpt('max_header_line', default=16384, + help=_('Maximum line size of message headers to be accepted. ' + 'max_header_line may need to be increased when using ' + 'large tokens (typically those generated by the ' + 'Keystone v3 API with big service catalogs')), ] api_group = cfg.OptGroup('heat_api') cfg.CONF.register_group(api_group) @@ -102,6 +107,11 @@ api_cfn_opts = [ cfg.IntOpt('workers', default=0, help=_("Number of workers for Heat service"), deprecated_group='DEFAULT'), + cfg.IntOpt('max_header_line', default=16384, + help=_('Maximum line size of message headers to be accepted. ' + 'max_header_line may need to be increased when using ' + 'large tokens (typically those generated by the ' + 'Keystone v3 API with big service catalogs')), ] api_cfn_group = cfg.OptGroup('heat_api_cfn') cfg.CONF.register_group(api_cfn_group) @@ -131,6 +141,11 @@ api_cw_opts = [ cfg.IntOpt('workers', default=0, help=_("Number of workers for Heat service"), deprecated_group='DEFAULT'), + cfg.IntOpt('max_header_line', default=16384, + help=_('Maximum line size of message headers to be accepted. ' + 'max_header_line may need to be increased when using ' + 'large tokens (typically those generated by the ' + 'Keystone v3 API with big service catalogs')), ] api_cw_group = cfg.OptGroup('heat_api_cloudwatch') cfg.CONF.register_group(api_cw_group) @@ -250,6 +265,7 @@ class Server(object): signal.signal(signal.SIGHUP, signal.SIG_IGN) self.running = False + eventlet.wsgi.MAX_HEADER_LINE = conf.max_header_line self.application = application self.sock = get_socket(conf, default_port)