From 0b02feb20d4485d0c6d486c5a72b814ce3bdf9e5 Mon Sep 17 00:00:00 2001 From: Steve Baker Date: Mon, 3 Feb 2014 08:45:14 +1300 Subject: [PATCH] Raise the default max header to accommodate large tokens PKI tokens hit the default limit if there is enough services defined in the keystone catalog. Also the v3 catalog is larger than the v2 catalog which would explain why this bug is being hit just now. This change adds the configuration option max_header_line to each of the API confurations which has a default of 16384. Closes-Bug: #1190149 Change-Id: I5da09aa08a1242c5e356bd8bf532baa9347ce075 --- etc/heat/heat.conf.sample | 18 ++++++++++++++++++ heat/common/wsgi.py | 16 ++++++++++++++++ 2 files changed, 34 insertions(+) diff --git a/etc/heat/heat.conf.sample b/etc/heat/heat.conf.sample index 1f6a4fed36..7b52f9996e 100644 --- a/etc/heat/heat.conf.sample +++ b/etc/heat/heat.conf.sample @@ -870,6 +870,12 @@ # Number of workers for Heat service (integer value) #workers=0 +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large +# tokens (typically those generated by the Keystone v3 API +# with big service catalogs (integer value) +#max_header_line=16384 + [heat_api_cfn] @@ -899,6 +905,12 @@ # Number of workers for Heat service (integer value) #workers=0 +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large +# tokens (typically those generated by the Keystone v3 API +# with big service catalogs (integer value) +#max_header_line=16384 + [heat_api_cloudwatch] @@ -928,6 +940,12 @@ # Number of workers for Heat service (integer value) #workers=0 +# Maximum line size of message headers to be accepted. +# max_header_line may need to be increased when using large +# tokens (typically those generated by the Keystone v3 API +# with big service catalogs (integer value) +#max_header_line=16384 + [keystone_authtoken] diff --git a/heat/common/wsgi.py b/heat/common/wsgi.py index deb018d2a6..ea04f175a6 100644 --- a/heat/common/wsgi.py +++ b/heat/common/wsgi.py @@ -73,6 +73,11 @@ api_opts = [ cfg.IntOpt('workers', default=0, help=_("Number of workers for Heat service"), deprecated_group='DEFAULT'), + cfg.IntOpt('max_header_line', default=16384, + help=_('Maximum line size of message headers to be accepted. ' + 'max_header_line may need to be increased when using ' + 'large tokens (typically those generated by the ' + 'Keystone v3 API with big service catalogs')), ] api_group = cfg.OptGroup('heat_api') cfg.CONF.register_group(api_group) @@ -102,6 +107,11 @@ api_cfn_opts = [ cfg.IntOpt('workers', default=0, help=_("Number of workers for Heat service"), deprecated_group='DEFAULT'), + cfg.IntOpt('max_header_line', default=16384, + help=_('Maximum line size of message headers to be accepted. ' + 'max_header_line may need to be increased when using ' + 'large tokens (typically those generated by the ' + 'Keystone v3 API with big service catalogs')), ] api_cfn_group = cfg.OptGroup('heat_api_cfn') cfg.CONF.register_group(api_cfn_group) @@ -131,6 +141,11 @@ api_cw_opts = [ cfg.IntOpt('workers', default=0, help=_("Number of workers for Heat service"), deprecated_group='DEFAULT'), + cfg.IntOpt('max_header_line', default=16384, + help=_('Maximum line size of message headers to be accepted. ' + 'max_header_line may need to be increased when using ' + 'large tokens (typically those generated by the ' + 'Keystone v3 API with big service catalogs')), ] api_cw_group = cfg.OptGroup('heat_api_cloudwatch') cfg.CONF.register_group(api_cw_group) @@ -250,6 +265,7 @@ class Server(object): signal.signal(signal.SIGHUP, signal.SIG_IGN) self.running = False + eventlet.wsgi.MAX_HEADER_LINE = conf.max_header_line self.application = application self.sock = get_socket(conf, default_port)