From 6eef99c3af1a8cbb75a89d6deb6282cb2c7e8449 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Tue, 21 Jul 2015 10:42:36 +0800 Subject: [PATCH 01/36] Support noVNC session for SSL. --- config.yaml | 8 +++++++ hooks/nova_cc_context.py | 34 +++++++++++++++++++++++++++ hooks/nova_cc_utils.py | 3 ++- templates/parts/novnc | 3 +++ unit_tests/test_nova_cc_contexts.py | 36 +++++++++++++++++++++++++++++ 5 files changed, 83 insertions(+), 1 deletion(-) diff --git a/config.yaml b/config.yaml index b09172fd..192ab04d 100644 --- a/config.yaml +++ b/config.yaml @@ -381,3 +381,11 @@ options: If memcached is being used to store the tokens, then it's recommended to change this configuration to False. + encrypted-noVNC: + type: boolean + default: False + description: | + The default value is False, when it is set to True, the noVNC session will + be encrypted, this is useful for the cloud disabled SSL for OpenStack services + but want to ensure the security for noVNC session, ssl_cert and ssl_key are + needed to encrypt noVNC session. diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index 2c178447..0a2754c2 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -1,5 +1,6 @@ import os +from base64 import b64decode from charmhelpers.core.hookenv import ( config, relation_ids, @@ -9,6 +10,8 @@ from charmhelpers.core.hookenv import ( related_units, relations_for_id, relation_get, + DEBUG, + unit_get, ) from charmhelpers.fetch import ( apt_install, @@ -350,3 +353,34 @@ class InstanceConsoleContext(context.OSContextGenerator): ctxt['ssl_key'] = key return ctxt + + +class NoVNCSslOnlyContext(context.OSContextGenerator): + interfaces = [] + + def __call__(self): + ctxt = {} + + if config('encrypted-noVNC') \ + and config('ssl_cert') and config('ssl_key'): + ssl_dir = '/etc/nova/ssl/' + if not os.path.exists(ssl_dir): + log('Creating %s.' % ssl_dir, level=DEBUG) + os.mkdir(ssl_dir) + + cert_path = os.path.join(ssl_dir, 'nova_cert.pem') + with open(cert_path, 'w') as fh: + fh.write(b64decode(config('ssl_cert'))) + + key_path = os.path.join(ssl_dir, 'nova_key.pem') + with open(key_path, 'w') as fh: + fh.write(b64decode(config('ssl_key'))) + + ctxt['ssl_only'] = True + ctxt['ssl_cert'] = cert_path + ctxt['ssl_key'] = key_path + private_addr = unit_get('private-address') + url = 'https://%s:6080/vnc_auto.html' % private_addr + ctxt['novncproxy_base_url'] = url + + return ctxt diff --git a/hooks/nova_cc_utils.py b/hooks/nova_cc_utils.py index 65f331d2..2aebaf06 100644 --- a/hooks/nova_cc_utils.py +++ b/hooks/nova_cc_utils.py @@ -194,7 +194,8 @@ BASE_RESOURCE_MAP = OrderedDict([ nova_cc_context.NovaIPv6Context(), nova_cc_context.NeutronCCContext(), nova_cc_context.NovaConfigContext(), - nova_cc_context.InstanceConsoleContext()], + nova_cc_context.InstanceConsoleContext(), + nova_cc_context.NoVNCSslOnlyContext()], }), (NOVA_API_PASTE, { 'services': [s for s in BASE_SERVICES if 'api' in s], diff --git a/templates/parts/novnc b/templates/parts/novnc index fc3d6336..578ab578 100644 --- a/templates/parts/novnc +++ b/templates/parts/novnc @@ -7,3 +7,6 @@ cert={{ ssl_cert }} {% if ssl_key -%} key={{ ssl_key }} {% endif %} +{% if novncproxy_base_url -%} +novncproxy_base_url={{ novncproxy_base_url }} +{% endif %} diff --git a/unit_tests/test_nova_cc_contexts.py b/unit_tests/test_nova_cc_contexts.py index e6386b42..fb7639aa 100644 --- a/unit_tests/test_nova_cc_contexts.py +++ b/unit_tests/test_nova_cc_contexts.py @@ -1,6 +1,7 @@ from __future__ import print_function import mock +import os ##### # NOTE(freyes): this is a workaround to patch config() function imported by @@ -148,3 +149,38 @@ class NovaComputeContextTests(CharmTestCase): self.assertTrue(context.use_local_neutron_api()) self.related_units.return_value = ['unit/0'] self.assertFalse(context.use_local_neutron_api()) + + @mock.patch('__builtin__.open') + @mock.patch('os.path.exists') + @mock.patch.object(context, 'config') + @mock.patch.object(context, 'unit_get') + def test_noVNC_ssl_only_disabled(self, mock_unit_get, mock_config, + mock_exists, mock_open): + config = {'encrypted-noVNC': False, + 'ssl_cert': 'LS0tLS1CRUdJTiBDRV', + 'ssl_key': 'LS0tLS1CRUdJTiBQUk'} + mock_config.side_effect = lambda key: config.get(key) + + ctxt = context.NoVNCSslOnlyContext()() + self.assertEqual(ctxt, None) + + @mock.patch('__builtin__.open') + @mock.patch('os.path.exists') + @mock.patch.object(context, 'config') + @mock.patch.object(context, 'unit_get') + def test_noVNC_ssl_only_enabled(self, mock_unit_get, mock_config, + mock_exists, mock_open): + config = {'encrypted-noVNC': True, + 'ssl_cert': 'LS0tLS1CRUdJTiBDRV', + 'ssl_key': 'LS0tLS1CRUdJTiBQUk'} + mock_config.side_effect = lambda key: config.get(key) + mock_exists.return_value = True + mock_unit_get.return_value = '127.0.0.1' + + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + + ctxt = context.NoVNCSslOnlyContext()() + self.assertTrue(ctxt['ssl_only']) + self.assertEqual(ctxt['ssl_cert'], '/etc/nova/ssl/nova_cert.pem') + self.assertEqual(ctxt['ssl_key'], '/etc/nova/ssl/nova_key.pem') From f2553fe1595b5488e21f8b32e8966c77fd150855 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Tue, 21 Jul 2015 11:20:51 +0800 Subject: [PATCH 02/36] Add vip support for base url --- hooks/nova_cc_context.py | 11 +++++++++-- unit_tests/test_nova_cc_contexts.py | 10 +++++++--- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index 0a2754c2..a53fdc8a 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -26,6 +26,7 @@ from charmhelpers.contrib.hahelpers.cluster import ( determine_apache_port, determine_api_port, https, + is_clustered, ) from charmhelpers.contrib.network.ip import ( format_ipv6_addr, @@ -33,6 +34,7 @@ from charmhelpers.contrib.network.ip import ( from charmhelpers.contrib.openstack.ip import ( resolve_address, INTERNAL, + PUBLIC, ) @@ -379,8 +381,13 @@ class NoVNCSslOnlyContext(context.OSContextGenerator): ctxt['ssl_only'] = True ctxt['ssl_cert'] = cert_path ctxt['ssl_key'] = key_path - private_addr = unit_get('private-address') - url = 'https://%s:6080/vnc_auto.html' % private_addr + + if is_clustered(): + ip_addr = resolve_address(endpoint_type=PUBLIC) + else: + ip_addr = unit_get('private-address') + + url = 'https://%s:6080/vnc_auto.html' % ip_addr ctxt['novncproxy_base_url'] = url return ctxt diff --git a/unit_tests/test_nova_cc_contexts.py b/unit_tests/test_nova_cc_contexts.py index fb7639aa..c4b5ad0d 100644 --- a/unit_tests/test_nova_cc_contexts.py +++ b/unit_tests/test_nova_cc_contexts.py @@ -1,7 +1,6 @@ from __future__ import print_function import mock -import os ##### # NOTE(freyes): this is a workaround to patch config() function imported by @@ -168,14 +167,19 @@ class NovaComputeContextTests(CharmTestCase): @mock.patch('os.path.exists') @mock.patch.object(context, 'config') @mock.patch.object(context, 'unit_get') - def test_noVNC_ssl_only_enabled(self, mock_unit_get, mock_config, - mock_exists, mock_open): + @mock.patch.object(context, 'is_clustered') + @mock.patch.object(context, 'resolve_address') + def test_noVNC_ssl_only_enabled(self, mock_resolve_address, + mock_is_clustered, mock_unit_get, + mock_config, mock_exists, mock_open): config = {'encrypted-noVNC': True, 'ssl_cert': 'LS0tLS1CRUdJTiBDRV', 'ssl_key': 'LS0tLS1CRUdJTiBQUk'} mock_config.side_effect = lambda key: config.get(key) mock_exists.return_value = True mock_unit_get.return_value = '127.0.0.1' + mock_is_clustered.return_value = True + mock_resolve_address.return_value = '10.5.100.1' mock_open.return_value.__enter__ = lambda s: s mock_open.return_value.__exit__ = mock.Mock() From 168e39816a1267d0683ca923281ec9be4483c226 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Tue, 21 Jul 2015 21:19:27 +0800 Subject: [PATCH 03/36] Add noVNC-ssl-cert/noVNC-ssl-key options --- config.yaml | 16 +++++++++------- hooks/nova_cc_context.py | 7 +++---- unit_tests/test_nova_cc_contexts.py | 5 ++--- 3 files changed, 14 insertions(+), 14 deletions(-) diff --git a/config.yaml b/config.yaml index 192ab04d..d60f17a9 100644 --- a/config.yaml +++ b/config.yaml @@ -381,11 +381,13 @@ options: If memcached is being used to store the tokens, then it's recommended to change this configuration to False. - encrypted-noVNC: - type: boolean - default: False + noVNC-ssl-cert: + type: string + default: description: | - The default value is False, when it is set to True, the noVNC session will - be encrypted, this is useful for the cloud disabled SSL for OpenStack services - but want to ensure the security for noVNC session, ssl_cert and ssl_key are - needed to encrypt noVNC session. + This differs from the SSL certificate to install and use for API ports, + it used for noVNC session only. Setting this value with noVNC-ssl-key + will enable encrypted noVNC session. This has nothing relation with Nova + API SSL, it could be used in the case without Nova API SSL enabled. + noVNC-ssl-key: + description: SSL key to use with certificate specified as noVNC-ssl-cert. diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index a53fdc8a..ab3c194a 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -363,8 +363,7 @@ class NoVNCSslOnlyContext(context.OSContextGenerator): def __call__(self): ctxt = {} - if config('encrypted-noVNC') \ - and config('ssl_cert') and config('ssl_key'): + if config('noVNC-ssl-cert') and config('noVNC-ssl-key'): ssl_dir = '/etc/nova/ssl/' if not os.path.exists(ssl_dir): log('Creating %s.' % ssl_dir, level=DEBUG) @@ -372,11 +371,11 @@ class NoVNCSslOnlyContext(context.OSContextGenerator): cert_path = os.path.join(ssl_dir, 'nova_cert.pem') with open(cert_path, 'w') as fh: - fh.write(b64decode(config('ssl_cert'))) + fh.write(b64decode(config('noVNC-ssl-cert'))) key_path = os.path.join(ssl_dir, 'nova_key.pem') with open(key_path, 'w') as fh: - fh.write(b64decode(config('ssl_key'))) + fh.write(b64decode(config('noVNC-ssl-key'))) ctxt['ssl_only'] = True ctxt['ssl_cert'] = cert_path diff --git a/unit_tests/test_nova_cc_contexts.py b/unit_tests/test_nova_cc_contexts.py index c4b5ad0d..6986994b 100644 --- a/unit_tests/test_nova_cc_contexts.py +++ b/unit_tests/test_nova_cc_contexts.py @@ -172,9 +172,8 @@ class NovaComputeContextTests(CharmTestCase): def test_noVNC_ssl_only_enabled(self, mock_resolve_address, mock_is_clustered, mock_unit_get, mock_config, mock_exists, mock_open): - config = {'encrypted-noVNC': True, - 'ssl_cert': 'LS0tLS1CRUdJTiBDRV', - 'ssl_key': 'LS0tLS1CRUdJTiBQUk'} + config = {'noVNC-ssl-cert': 'LS0tLS1CRUdJTiBDRV', + 'noVNC-ssl-key': 'LS0tLS1CRUdJTiBQUk'} mock_config.side_effect = lambda key: config.get(key) mock_exists.return_value = True mock_unit_get.return_value = '127.0.0.1' From 7267a2c30ea7e0021b9a7fff260b8d47943fe8cd Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Wed, 22 Jul 2015 13:33:03 +0800 Subject: [PATCH 04/36] Fix more --- config.yaml | 15 +++++++++------ hooks/nova_cc_context.py | 16 ++++++++++++--- hooks/nova_cc_hooks.py | 16 ++++++++++++--- hooks/nova_cc_utils.py | 2 +- unit_tests/test_nova_cc_contexts.py | 24 +++++++++++------------ unit_tests/test_nova_cc_hooks.py | 30 ++++++++++++++--------------- 6 files changed, 63 insertions(+), 40 deletions(-) diff --git a/config.yaml b/config.yaml index d60f17a9..f7e92e04 100644 --- a/config.yaml +++ b/config.yaml @@ -381,13 +381,16 @@ options: If memcached is being used to store the tokens, then it's recommended to change this configuration to False. - noVNC-ssl-cert: + console-access-ssl-cert: type: string default: description: | This differs from the SSL certificate to install and use for API ports, - it used for noVNC session only. Setting this value with noVNC-ssl-key - will enable encrypted noVNC session. This has nothing relation with Nova - API SSL, it could be used in the case without Nova API SSL enabled. - noVNC-ssl-key: - description: SSL key to use with certificate specified as noVNC-ssl-cert. + it used for console access session only. Setting this value with console- + access-ssl-key will enable encrypted console session. This has nothing + relation with Nova API SSL, it could be used in the case without Nova API + SSL enabled. + console-access-ssl-cert: + type: string + default: + description: SSL key to use with certificate specified as console-access-ssl-cert. diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index ab3c194a..96d42a51 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -30,6 +30,7 @@ from charmhelpers.contrib.hahelpers.cluster import ( ) from charmhelpers.contrib.network.ip import ( format_ipv6_addr, + is_ipv6, ) from charmhelpers.contrib.openstack.ip import ( resolve_address, @@ -357,13 +358,16 @@ class InstanceConsoleContext(context.OSContextGenerator): return ctxt -class NoVNCSslOnlyContext(context.OSContextGenerator): +class ConsoleSslContext(context.OSContextGenerator): interfaces = [] def __call__(self): ctxt = {} + from nova_cc_utils import console_attributes - if config('noVNC-ssl-cert') and config('noVNC-ssl-key'): + if config('console-access-ssl-cert') \ + and config('console-access-ssl-key') \ + and config('console-access-protocol'): ssl_dir = '/etc/nova/ssl/' if not os.path.exists(ssl_dir): log('Creating %s.' % ssl_dir, level=DEBUG) @@ -386,7 +390,13 @@ class NoVNCSslOnlyContext(context.OSContextGenerator): else: ip_addr = unit_get('private-address') - url = 'https://%s:6080/vnc_auto.html' % ip_addr + if is_ipv6(ip_addr): + ip_addr = "[{}]".format(ip_addr) + + _proto = config('console-access-protocol') + url = "%s:%s%s" % (ip_addr, + console_attributes('proxy-port', proto=_proto), + console_attributes('proxy-page', proto=_proto)) ctxt['novncproxy_base_url'] = url return ctxt diff --git a/hooks/nova_cc_hooks.py b/hooks/nova_cc_hooks.py index 2818f7b1..929cb733 100755 --- a/hooks/nova_cc_hooks.py +++ b/hooks/nova_cc_hooks.py @@ -112,7 +112,8 @@ from charmhelpers.payload.execd import execd_preinstall from charmhelpers.contrib.openstack.ip import ( canonical_url, - PUBLIC, INTERNAL, ADMIN + PUBLIC, INTERNAL, ADMIN, + resolve_address, ) from charmhelpers.contrib.network.ip import ( @@ -511,10 +512,19 @@ def console_settings(): return {} rel_settings['console_keymap'] = config('console-keymap') rel_settings['console_access_protocol'] = proto + + scheme = 'http' + if config('console-access-ssl-cert') and config('console-access-ssl-key'): + scheme = 'https' + + address = resolve_address(endpoint_type=PUBLIC) + if is_ipv6(address): + address = "[{}]".format(address) + if config('console-proxy-ip') == 'local': - proxy_base_addr = canonical_url(CONFIGS, PUBLIC) + proxy_base_addr = '%s://%s' % (scheme, address) else: - proxy_base_addr = "http://" + config('console-proxy-ip') + proxy_base_addr = "%s://%s" % (scheme, config('console-proxy-ip')) if proto == 'vnc': protocols = ['novnc', 'xvpvnc'] else: diff --git a/hooks/nova_cc_utils.py b/hooks/nova_cc_utils.py index 2aebaf06..29bd190f 100644 --- a/hooks/nova_cc_utils.py +++ b/hooks/nova_cc_utils.py @@ -195,7 +195,7 @@ BASE_RESOURCE_MAP = OrderedDict([ nova_cc_context.NeutronCCContext(), nova_cc_context.NovaConfigContext(), nova_cc_context.InstanceConsoleContext(), - nova_cc_context.NoVNCSslOnlyContext()], + nova_cc_context.ConsoleSslContext()], }), (NOVA_API_PASTE, { 'services': [s for s in BASE_SERVICES if 'api' in s], diff --git a/unit_tests/test_nova_cc_contexts.py b/unit_tests/test_nova_cc_contexts.py index 6986994b..be45209f 100644 --- a/unit_tests/test_nova_cc_contexts.py +++ b/unit_tests/test_nova_cc_contexts.py @@ -153,14 +153,13 @@ class NovaComputeContextTests(CharmTestCase): @mock.patch('os.path.exists') @mock.patch.object(context, 'config') @mock.patch.object(context, 'unit_get') - def test_noVNC_ssl_only_disabled(self, mock_unit_get, mock_config, - mock_exists, mock_open): - config = {'encrypted-noVNC': False, - 'ssl_cert': 'LS0tLS1CRUdJTiBDRV', - 'ssl_key': 'LS0tLS1CRUdJTiBQUk'} + def test_noVNC_ssl_disabled(self, mock_unit_get, mock_config, + mock_exists, mock_open): + config = {'console-access-ssl_cert': 'LS0tLS1CRUdJTiBDRV', + 'console-access-ssl_key': 'LS0tLS1CRUdJTiBQUk'} mock_config.side_effect = lambda key: config.get(key) - ctxt = context.NoVNCSslOnlyContext()() + ctxt = context.ConsoleSslContext()() self.assertEqual(ctxt, None) @mock.patch('__builtin__.open') @@ -169,11 +168,12 @@ class NovaComputeContextTests(CharmTestCase): @mock.patch.object(context, 'unit_get') @mock.patch.object(context, 'is_clustered') @mock.patch.object(context, 'resolve_address') - def test_noVNC_ssl_only_enabled(self, mock_resolve_address, - mock_is_clustered, mock_unit_get, - mock_config, mock_exists, mock_open): - config = {'noVNC-ssl-cert': 'LS0tLS1CRUdJTiBDRV', - 'noVNC-ssl-key': 'LS0tLS1CRUdJTiBQUk'} + def test_noVNC_ssl_enabled(self, mock_resolve_address, + mock_is_clustered, mock_unit_get, + mock_config, mock_exists, mock_open): + config = {'console-access-ssl-cert': 'LS0tLS1CRUdJTiBDRV', + 'console-access-ssl-key': 'LS0tLS1CRUdJTiBQUk', + 'console-access-protocol': 'novnc'} mock_config.side_effect = lambda key: config.get(key) mock_exists.return_value = True mock_unit_get.return_value = '127.0.0.1' @@ -183,7 +183,7 @@ class NovaComputeContextTests(CharmTestCase): mock_open.return_value.__enter__ = lambda s: s mock_open.return_value.__exit__ = mock.Mock() - ctxt = context.NoVNCSslOnlyContext()() + ctxt = context.ConsoleSslContext()() self.assertTrue(ctxt['ssl_only']) self.assertEqual(ctxt['ssl_cert'], '/etc/nova/ssl/nova_cert.pem') self.assertEqual(ctxt['ssl_key'], '/etc/nova/ssl/nova_key.pem') diff --git a/unit_tests/test_nova_cc_hooks.py b/unit_tests/test_nova_cc_hooks.py index 91dc18fd..9602e157 100644 --- a/unit_tests/test_nova_cc_hooks.py +++ b/unit_tests/test_nova_cc_hooks.py @@ -609,12 +609,12 @@ class NovaCCHooksTests(CharmTestCase): self.assertTrue(_compute_joined.called) self.assertTrue(_quantum_joined.called) - @patch.object(hooks, 'canonical_url') + @patch.object(hooks, 'resolve_address') @patch.object(utils, 'config') - def test_console_settings_vnc(self, _utils_config, _canonical_url): + def test_console_settings_vnc(self, _utils_config, _resolve_address): _utils_config.return_value = 'vnc' _cc_host = "nova-cc-host1" - _canonical_url.return_value = 'http://' + _cc_host + _resolve_address.return_value = _cc_host _con_sets = hooks.console_settings() console_settings = { 'console_proxy_novnc_address': 'http://%s:6080/vnc_auto.html' % @@ -630,12 +630,12 @@ class NovaCCHooksTests(CharmTestCase): } self.assertEqual(_con_sets, console_settings) - @patch.object(hooks, 'canonical_url') + @patch.object(hooks, 'resolve_address') @patch.object(utils, 'config') - def test_console_settings_xvpvnc(self, _utils_config, _canonical_url): + def test_console_settings_xvpvnc(self, _utils_config, _resolve_address): _utils_config.return_value = 'xvpvnc' _cc_host = "nova-cc-host1" - _canonical_url.return_value = 'http://' + _cc_host + _resolve_address.return_value = _cc_host _con_sets = hooks.console_settings() console_settings = { 'console_access_protocol': 'xvpvnc', @@ -647,12 +647,12 @@ class NovaCCHooksTests(CharmTestCase): } self.assertEqual(_con_sets, console_settings) - @patch.object(hooks, 'canonical_url') + @patch.object(hooks, 'resolve_address') @patch.object(utils, 'config') - def test_console_settings_novnc(self, _utils_config, _canonical_url): + def test_console_settings_novnc(self, _utils_config, _resolve_address): _utils_config.return_value = 'novnc' _cc_host = "nova-cc-host1" - _canonical_url.return_value = 'http://' + _cc_host + _resolve_address.return_value = _cc_host _con_sets = hooks.console_settings() console_settings = { 'console_proxy_novnc_address': 'http://%s:6080/vnc_auto.html' % @@ -664,12 +664,12 @@ class NovaCCHooksTests(CharmTestCase): } self.assertEqual(_con_sets, console_settings) - @patch.object(hooks, 'canonical_url') + @patch.object(hooks, 'resolve_address') @patch.object(utils, 'config') - def test_console_settings_spice(self, _utils_config, _canonical_url): + def test_console_settings_spice(self, _utils_config, _resolve_address): _utils_config.return_value = 'spice' _cc_host = "nova-cc-host1" - _canonical_url.return_value = 'http://' + _cc_host + _resolve_address.return_value = _cc_host _con_sets = hooks.console_settings() console_settings = { 'console_proxy_spice_address': 'http://%s:6082/spice_auto.html' % @@ -681,16 +681,16 @@ class NovaCCHooksTests(CharmTestCase): } self.assertEqual(_con_sets, console_settings) - @patch.object(hooks, 'canonical_url') + @patch.object(hooks, 'resolve_address') @patch.object(utils, 'config') def test_console_settings_explicit_ip(self, _utils_config, - _canonical_url): + _resolve_address): _utils_config.return_value = 'spice' _cc_public_host = "public-host" _cc_private_host = "private-host" self.test_config.set('console-proxy-ip', _cc_public_host) _con_sets = hooks.console_settings() - _canonical_url.return_value = 'http://' + _cc_private_host + _resolve_address.return_value = _cc_private_host console_settings = { 'console_proxy_spice_address': 'http://%s:6082/spice_auto.html' % (_cc_public_host), From 3dc847fea6f9dd107c8f981f6fb41dff20199a9d Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Wed, 22 Jul 2015 14:10:28 +0800 Subject: [PATCH 05/36] s/cert/key --- config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/config.yaml b/config.yaml index f7e92e04..f391101d 100644 --- a/config.yaml +++ b/config.yaml @@ -390,7 +390,7 @@ options: access-ssl-key will enable encrypted console session. This has nothing relation with Nova API SSL, it could be used in the case without Nova API SSL enabled. - console-access-ssl-cert: + console-access-ssl-key: type: string default: description: SSL key to use with certificate specified as console-access-ssl-cert. From 609f0ee821cccdb3f407e82eec0e703ca247d919 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Wed, 22 Jul 2015 14:40:49 +0800 Subject: [PATCH 06/36] s/noVNC/console-access --- hooks/nova_cc_context.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index 96d42a51..a5aa49b6 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -375,11 +375,11 @@ class ConsoleSslContext(context.OSContextGenerator): cert_path = os.path.join(ssl_dir, 'nova_cert.pem') with open(cert_path, 'w') as fh: - fh.write(b64decode(config('noVNC-ssl-cert'))) + fh.write(b64decode(config('console-access-ssl-cert'))) key_path = os.path.join(ssl_dir, 'nova_key.pem') with open(key_path, 'w') as fh: - fh.write(b64decode(config('noVNC-ssl-key'))) + fh.write(b64decode(config('console-access-ssl-key'))) ctxt['ssl_only'] = True ctxt['ssl_cert'] = cert_path From 55d5c621580a02b13286117a523374397a49db5d Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Wed, 22 Jul 2015 16:03:52 +0800 Subject: [PATCH 07/36] Trivial fix. --- hooks/nova_cc_context.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index a5aa49b6..5dda3d5d 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -394,9 +394,11 @@ class ConsoleSslContext(context.OSContextGenerator): ip_addr = "[{}]".format(ip_addr) _proto = config('console-access-protocol') - url = "%s:%s%s" % (ip_addr, - console_attributes('proxy-port', proto=_proto), - console_attributes('proxy-page', proto=_proto)) + url = "https://%s:%s%s" % ( + ip_addr, + console_attributes('proxy-port', proto=_proto), + console_attributes('proxy-page', proto=_proto)) + ctxt['novncproxy_base_url'] = url return ctxt From 0d27c39d93b9b0a28bc63f1217edf403a97f0313 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Wed, 22 Jul 2015 17:12:38 +0800 Subject: [PATCH 08/36] Add more unit_tests --- config.yaml | 3 +- unit_tests/test_nova_cc_contexts.py | 107 ++++++++++++++++++++++++++-- 2 files changed, 104 insertions(+), 6 deletions(-) diff --git a/config.yaml b/config.yaml index f391101d..263bca3c 100644 --- a/config.yaml +++ b/config.yaml @@ -389,7 +389,8 @@ options: it used for console access session only. Setting this value with console- access-ssl-key will enable encrypted console session. This has nothing relation with Nova API SSL, it could be used in the case without Nova API - SSL enabled. + SSL enabled. Option console-access-protocol need to be chosen from set{novnc, + spice} for console session encypted. console-access-ssl-key: type: string default: diff --git a/unit_tests/test_nova_cc_contexts.py b/unit_tests/test_nova_cc_contexts.py index be45209f..fd18b324 100644 --- a/unit_tests/test_nova_cc_contexts.py +++ b/unit_tests/test_nova_cc_contexts.py @@ -149,12 +149,8 @@ class NovaComputeContextTests(CharmTestCase): self.related_units.return_value = ['unit/0'] self.assertFalse(context.use_local_neutron_api()) - @mock.patch('__builtin__.open') - @mock.patch('os.path.exists') @mock.patch.object(context, 'config') - @mock.patch.object(context, 'unit_get') - def test_noVNC_ssl_disabled(self, mock_unit_get, mock_config, - mock_exists, mock_open): + def test_console_access_ssl_disabled(self, mock_config): config = {'console-access-ssl_cert': 'LS0tLS1CRUdJTiBDRV', 'console-access-ssl_key': 'LS0tLS1CRUdJTiBQUk'} mock_config.side_effect = lambda key: config.get(key) @@ -162,6 +158,21 @@ class NovaComputeContextTests(CharmTestCase): ctxt = context.ConsoleSslContext()() self.assertEqual(ctxt, None) + config = {'console-access-ssl_cert': None, + 'console-access-ssl_key': None} + mock_config.side_effect = lambda key: config.get(key) + + ctxt = context.ConsoleSslContext()() + self.assertEqual(ctxt, None) + + config = {'console-access-protocol': 'novnc', + 'console-access-ssl_cert': None, + 'console-access-ssl_key': None} + mock_config.side_effect = lambda key: config.get(key) + + ctxt = context.ConsoleSslContext()() + self.assertEqual(ctxt, None) + @mock.patch('__builtin__.open') @mock.patch('os.path.exists') @mock.patch.object(context, 'config') @@ -187,3 +198,89 @@ class NovaComputeContextTests(CharmTestCase): self.assertTrue(ctxt['ssl_only']) self.assertEqual(ctxt['ssl_cert'], '/etc/nova/ssl/nova_cert.pem') self.assertEqual(ctxt['ssl_key'], '/etc/nova/ssl/nova_key.pem') + self.assertEqual(ctxt['novncproxy_base_url'], + 'https://10.5.100.1:6080/vnc_auto.html') + + @mock.patch('__builtin__.open') + @mock.patch('os.path.exists') + @mock.patch.object(context, 'config') + @mock.patch.object(context, 'unit_get') + @mock.patch.object(context, 'is_clustered') + @mock.patch.object(context, 'resolve_address') + def test_noVNC_ssl_enabled_no_cluster(self, mock_resolve_address, + mock_is_clustered, mock_unit_get, + mock_config, mock_exists, mock_open): + config = {'console-access-ssl-cert': 'LS0tLS1CRUdJTiBDRV', + 'console-access-ssl-key': 'LS0tLS1CRUdJTiBQUk', + 'console-access-protocol': 'novnc'} + mock_config.side_effect = lambda key: config.get(key) + mock_exists.return_value = True + mock_unit_get.return_value = '10.5.0.1' + mock_is_clustered.return_value = False + + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + + ctxt = context.ConsoleSslContext()() + self.assertTrue(ctxt['ssl_only']) + self.assertEqual(ctxt['ssl_cert'], '/etc/nova/ssl/nova_cert.pem') + self.assertEqual(ctxt['ssl_key'], '/etc/nova/ssl/nova_key.pem') + self.assertEqual(ctxt['novncproxy_base_url'], + 'https://10.5.0.1:6080/vnc_auto.html') + + @mock.patch('__builtin__.open') + @mock.patch('os.path.exists') + @mock.patch.object(context, 'config') + @mock.patch.object(context, 'unit_get') + @mock.patch.object(context, 'is_clustered') + @mock.patch.object(context, 'resolve_address') + def test_spice_html5_ssl_enabled(self, mock_resolve_address, + mock_is_clustered, mock_unit_get, + mock_config, mock_exists, mock_open): + config = {'console-access-ssl-cert': 'LS0tLS1CRUdJTiBDRV', + 'console-access-ssl-key': 'LS0tLS1CRUdJTiBQUk', + 'console-access-protocol': 'spice'} + mock_config.side_effect = lambda key: config.get(key) + mock_exists.return_value = True + mock_unit_get.return_value = '127.0.0.1' + mock_is_clustered.return_value = True + mock_resolve_address.return_value = '10.5.100.1' + + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + + ctxt = context.ConsoleSslContext()() + self.assertTrue(ctxt['ssl_only']) + self.assertEqual(ctxt['ssl_cert'], '/etc/nova/ssl/nova_cert.pem') + self.assertEqual(ctxt['ssl_key'], '/etc/nova/ssl/nova_key.pem') + self.assertEqual(ctxt['novncproxy_base_url'], + 'https://10.5.100.1:6082/spice_auto.html') + + @mock.patch('__builtin__.open') + @mock.patch('os.path.exists') + @mock.patch.object(context, 'config') + @mock.patch.object(context, 'unit_get') + @mock.patch.object(context, 'is_clustered') + @mock.patch.object(context, 'resolve_address') + def test_spice_html5_ssl_enabled_no_cluster(self, mock_resolve_address, + mock_is_clustered, + mock_unit_get, + mock_config, mock_exists, + mock_open): + config = {'console-access-ssl-cert': 'LS0tLS1CRUdJTiBDRV', + 'console-access-ssl-key': 'LS0tLS1CRUdJTiBQUk', + 'console-access-protocol': 'spice'} + mock_config.side_effect = lambda key: config.get(key) + mock_exists.return_value = True + mock_unit_get.return_value = '10.5.0.1' + mock_is_clustered.return_value = False + + mock_open.return_value.__enter__ = lambda s: s + mock_open.return_value.__exit__ = mock.Mock() + + ctxt = context.ConsoleSslContext()() + self.assertTrue(ctxt['ssl_only']) + self.assertEqual(ctxt['ssl_cert'], '/etc/nova/ssl/nova_cert.pem') + self.assertEqual(ctxt['ssl_key'], '/etc/nova/ssl/nova_key.pem') + self.assertEqual(ctxt['novncproxy_base_url'], + 'https://10.5.0.1:6082/spice_auto.html') From d61aa28dafdf3fc67cbdd72eff6c2d5a75867197 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Wed, 22 Jul 2015 20:01:24 +0800 Subject: [PATCH 09/36] Fix comments. --- config.yaml | 12 +++---- hooks/nova_cc_context.py | 17 ++++++---- hooks/nova_cc_hooks.py | 27 ++++++++++----- hooks/nova_cc_utils.py | 2 +- templates/icehouse/nova.conf | 2 ++ templates/juno/nova.conf | 2 ++ templates/kilo/nova.conf | 2 ++ templates/parts/spice | 4 +++ unit_tests/test_nova_cc_contexts.py | 38 +++++++++++++-------- unit_tests/test_nova_cc_hooks.py | 51 +++++++++++++++++++---------- 10 files changed, 104 insertions(+), 53 deletions(-) create mode 100644 templates/parts/spice diff --git a/config.yaml b/config.yaml index 263bca3c..a7a03143 100644 --- a/config.yaml +++ b/config.yaml @@ -385,12 +385,12 @@ options: type: string default: description: | - This differs from the SSL certificate to install and use for API ports, - it used for console access session only. Setting this value with console- - access-ssl-key will enable encrypted console session. This has nothing - relation with Nova API SSL, it could be used in the case without Nova API - SSL enabled. Option console-access-protocol need to be chosen from set{novnc, - spice} for console session encypted. + Used for encrypting console connections. This differs from the SSL certificate + used for API endpoints and is used for console access session only. Setting + this value along with console-access-ssl-key will enable encrypted console + sessions. This has nothing to do with Nova API SSL and can be used + independently. This can be used in conjunction with console-access-protocol + set to 'novnc' or 'spice'. console-access-ssl-key: type: string default: diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index 5dda3d5d..5ab07b42 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -30,7 +30,6 @@ from charmhelpers.contrib.hahelpers.cluster import ( ) from charmhelpers.contrib.network.ip import ( format_ipv6_addr, - is_ipv6, ) from charmhelpers.contrib.openstack.ip import ( resolve_address, @@ -358,7 +357,7 @@ class InstanceConsoleContext(context.OSContextGenerator): return ctxt -class ConsoleSslContext(context.OSContextGenerator): +class ConsoleSSLContext(context.OSContextGenerator): interfaces = [] def __call__(self): @@ -374,12 +373,14 @@ class ConsoleSslContext(context.OSContextGenerator): os.mkdir(ssl_dir) cert_path = os.path.join(ssl_dir, 'nova_cert.pem') + decode_ssl_cert = b64decode(config('console-access-ssl-cert')) with open(cert_path, 'w') as fh: - fh.write(b64decode(config('console-access-ssl-cert'))) + fh.write(decode_ssl_cert) key_path = os.path.join(ssl_dir, 'nova_key.pem') + decode_ssl_key = b64decode(config('console-access-ssl-key')) with open(key_path, 'w') as fh: - fh.write(b64decode(config('console-access-ssl-key'))) + fh.write(decode_ssl_key) ctxt['ssl_only'] = True ctxt['ssl_cert'] = cert_path @@ -390,8 +391,7 @@ class ConsoleSslContext(context.OSContextGenerator): else: ip_addr = unit_get('private-address') - if is_ipv6(ip_addr): - ip_addr = "[{}]".format(ip_addr) + ip_addr = format_ipv6_addr(ip_addr) or ip_addr _proto = config('console-access-protocol') url = "https://%s:%s%s" % ( @@ -399,6 +399,9 @@ class ConsoleSslContext(context.OSContextGenerator): console_attributes('proxy-port', proto=_proto), console_attributes('proxy-page', proto=_proto)) - ctxt['novncproxy_base_url'] = url + if _proto == 'novnc': + ctxt['novncproxy_base_url'] = url + elif _proto == 'spice': + ctxt['html5proxy_base_url'] = url return ctxt diff --git a/hooks/nova_cc_hooks.py b/hooks/nova_cc_hooks.py index 929cb733..50cbaf0a 100755 --- a/hooks/nova_cc_hooks.py +++ b/hooks/nova_cc_hooks.py @@ -106,6 +106,7 @@ from nova_cc_utils import ( from charmhelpers.contrib.hahelpers.cluster import ( is_elected_leader, get_hacluster_config, + https, ) from charmhelpers.payload.execd import execd_preinstall @@ -121,7 +122,8 @@ from charmhelpers.contrib.network.ip import ( get_netmask_for_address, get_address_in_network, get_ipv6_addr, - is_ipv6 + is_ipv6, + format_ipv6_addr, ) from charmhelpers.contrib.openstack.context import ADDRESS_TYPES @@ -513,18 +515,25 @@ def console_settings(): rel_settings['console_keymap'] = config('console-keymap') rel_settings['console_access_protocol'] = proto - scheme = 'http' + console_ssl = False if config('console-access-ssl-cert') and config('console-access-ssl-key'): - scheme = 'https' - - address = resolve_address(endpoint_type=PUBLIC) - if is_ipv6(address): - address = "[{}]".format(address) + console_ssl = True if config('console-proxy-ip') == 'local': - proxy_base_addr = '%s://%s' % (scheme, address) + if console_ssl: + address = resolve_address(endpoint_type=PUBLIC) + address = format_ipv6_addr(address) or address + proxy_base_addr = 'https://%s' % address + else: + # canonical_url will only return 'https:' if API SSL are enabled. + proxy_base_addr = canonical_url(CONFIGS, PUBLIC) else: - proxy_base_addr = "%s://%s" % (scheme, config('console-proxy-ip')) + if console_ssl or https(): + schema = "https" + else: + schema = "http" + proxy_base_addr = "%s://%s" % (schema, config('console-proxy-ip')) + if proto == 'vnc': protocols = ['novnc', 'xvpvnc'] else: diff --git a/hooks/nova_cc_utils.py b/hooks/nova_cc_utils.py index 29bd190f..e7a63b61 100644 --- a/hooks/nova_cc_utils.py +++ b/hooks/nova_cc_utils.py @@ -195,7 +195,7 @@ BASE_RESOURCE_MAP = OrderedDict([ nova_cc_context.NeutronCCContext(), nova_cc_context.NovaConfigContext(), nova_cc_context.InstanceConsoleContext(), - nova_cc_context.ConsoleSslContext()], + nova_cc_context.ConsoleSSLContext()], }), (NOVA_API_PASTE, { 'services': [s for s in BASE_SERVICES if 'api' in s], diff --git a/templates/icehouse/nova.conf b/templates/icehouse/nova.conf index fdabd4bf..f480fc16 100644 --- a/templates/icehouse/nova.conf +++ b/templates/icehouse/nova.conf @@ -165,3 +165,5 @@ enabled=True [conductor] workers = {{ workers }} + +{% include "parts/spice "%} diff --git a/templates/juno/nova.conf b/templates/juno/nova.conf index 70f87fbd..f862f23a 100644 --- a/templates/juno/nova.conf +++ b/templates/juno/nova.conf @@ -160,3 +160,5 @@ enabled=True [conductor] workers = {{ workers }} + +{% include "parts/spice" %} diff --git a/templates/kilo/nova.conf b/templates/kilo/nova.conf index 61485263..965c894a 100644 --- a/templates/kilo/nova.conf +++ b/templates/kilo/nova.conf @@ -155,3 +155,5 @@ workers = {{ workers }} [oslo_concurrency] lock_path=/var/lock/nova + +{% include "parts/spice" %} diff --git a/templates/parts/spice b/templates/parts/spice new file mode 100644 index 00000000..facba78c --- /dev/null +++ b/templates/parts/spice @@ -0,0 +1,4 @@ +[spice] +{% if html5proxy_base_url -%} +html5proxy_base_url = {{ html5proxy_base_url }} +{% endif -%} diff --git a/unit_tests/test_nova_cc_contexts.py b/unit_tests/test_nova_cc_contexts.py index fd18b324..9765d73c 100644 --- a/unit_tests/test_nova_cc_contexts.py +++ b/unit_tests/test_nova_cc_contexts.py @@ -155,14 +155,14 @@ class NovaComputeContextTests(CharmTestCase): 'console-access-ssl_key': 'LS0tLS1CRUdJTiBQUk'} mock_config.side_effect = lambda key: config.get(key) - ctxt = context.ConsoleSslContext()() + ctxt = context.ConsoleSSLContext()() self.assertEqual(ctxt, None) config = {'console-access-ssl_cert': None, 'console-access-ssl_key': None} mock_config.side_effect = lambda key: config.get(key) - ctxt = context.ConsoleSslContext()() + ctxt = context.ConsoleSSLContext()() self.assertEqual(ctxt, None) config = {'console-access-protocol': 'novnc', @@ -170,7 +170,7 @@ class NovaComputeContextTests(CharmTestCase): 'console-access-ssl_key': None} mock_config.side_effect = lambda key: config.get(key) - ctxt = context.ConsoleSslContext()() + ctxt = context.ConsoleSSLContext()() self.assertEqual(ctxt, None) @mock.patch('__builtin__.open') @@ -179,7 +179,9 @@ class NovaComputeContextTests(CharmTestCase): @mock.patch.object(context, 'unit_get') @mock.patch.object(context, 'is_clustered') @mock.patch.object(context, 'resolve_address') - def test_noVNC_ssl_enabled(self, mock_resolve_address, + @mock.patch.object(context, 'b64decode') + def test_noVNC_ssl_enabled(self, mock_b64decode, + mock_resolve_address, mock_is_clustered, mock_unit_get, mock_config, mock_exists, mock_open): config = {'console-access-ssl-cert': 'LS0tLS1CRUdJTiBDRV', @@ -190,11 +192,12 @@ class NovaComputeContextTests(CharmTestCase): mock_unit_get.return_value = '127.0.0.1' mock_is_clustered.return_value = True mock_resolve_address.return_value = '10.5.100.1' + mock_b64decode.return_value = 'decode_success' mock_open.return_value.__enter__ = lambda s: s mock_open.return_value.__exit__ = mock.Mock() - ctxt = context.ConsoleSslContext()() + ctxt = context.ConsoleSSLContext()() self.assertTrue(ctxt['ssl_only']) self.assertEqual(ctxt['ssl_cert'], '/etc/nova/ssl/nova_cert.pem') self.assertEqual(ctxt['ssl_key'], '/etc/nova/ssl/nova_key.pem') @@ -207,7 +210,9 @@ class NovaComputeContextTests(CharmTestCase): @mock.patch.object(context, 'unit_get') @mock.patch.object(context, 'is_clustered') @mock.patch.object(context, 'resolve_address') - def test_noVNC_ssl_enabled_no_cluster(self, mock_resolve_address, + @mock.patch.object(context, 'b64decode') + def test_noVNC_ssl_enabled_no_cluster(self, mock_b64decode, + mock_resolve_address, mock_is_clustered, mock_unit_get, mock_config, mock_exists, mock_open): config = {'console-access-ssl-cert': 'LS0tLS1CRUdJTiBDRV', @@ -217,11 +222,12 @@ class NovaComputeContextTests(CharmTestCase): mock_exists.return_value = True mock_unit_get.return_value = '10.5.0.1' mock_is_clustered.return_value = False + mock_b64decode.return_value = 'decode_success' mock_open.return_value.__enter__ = lambda s: s mock_open.return_value.__exit__ = mock.Mock() - ctxt = context.ConsoleSslContext()() + ctxt = context.ConsoleSSLContext()() self.assertTrue(ctxt['ssl_only']) self.assertEqual(ctxt['ssl_cert'], '/etc/nova/ssl/nova_cert.pem') self.assertEqual(ctxt['ssl_key'], '/etc/nova/ssl/nova_key.pem') @@ -234,7 +240,9 @@ class NovaComputeContextTests(CharmTestCase): @mock.patch.object(context, 'unit_get') @mock.patch.object(context, 'is_clustered') @mock.patch.object(context, 'resolve_address') - def test_spice_html5_ssl_enabled(self, mock_resolve_address, + @mock.patch.object(context, 'b64decode') + def test_spice_html5_ssl_enabled(self, mock_b64decode, + mock_resolve_address, mock_is_clustered, mock_unit_get, mock_config, mock_exists, mock_open): config = {'console-access-ssl-cert': 'LS0tLS1CRUdJTiBDRV', @@ -245,15 +253,16 @@ class NovaComputeContextTests(CharmTestCase): mock_unit_get.return_value = '127.0.0.1' mock_is_clustered.return_value = True mock_resolve_address.return_value = '10.5.100.1' + mock_b64decode.return_value = 'decode_success' mock_open.return_value.__enter__ = lambda s: s mock_open.return_value.__exit__ = mock.Mock() - ctxt = context.ConsoleSslContext()() + ctxt = context.ConsoleSSLContext()() self.assertTrue(ctxt['ssl_only']) self.assertEqual(ctxt['ssl_cert'], '/etc/nova/ssl/nova_cert.pem') self.assertEqual(ctxt['ssl_key'], '/etc/nova/ssl/nova_key.pem') - self.assertEqual(ctxt['novncproxy_base_url'], + self.assertEqual(ctxt['html5proxy_base_url'], 'https://10.5.100.1:6082/spice_auto.html') @mock.patch('__builtin__.open') @@ -262,7 +271,9 @@ class NovaComputeContextTests(CharmTestCase): @mock.patch.object(context, 'unit_get') @mock.patch.object(context, 'is_clustered') @mock.patch.object(context, 'resolve_address') - def test_spice_html5_ssl_enabled_no_cluster(self, mock_resolve_address, + @mock.patch.object(context, 'b64decode') + def test_spice_html5_ssl_enabled_no_cluster(self, mock_b64decode, + mock_resolve_address, mock_is_clustered, mock_unit_get, mock_config, mock_exists, @@ -274,13 +285,14 @@ class NovaComputeContextTests(CharmTestCase): mock_exists.return_value = True mock_unit_get.return_value = '10.5.0.1' mock_is_clustered.return_value = False + mock_b64decode.return_value = 'decode_success' mock_open.return_value.__enter__ = lambda s: s mock_open.return_value.__exit__ = mock.Mock() - ctxt = context.ConsoleSslContext()() + ctxt = context.ConsoleSSLContext()() self.assertTrue(ctxt['ssl_only']) self.assertEqual(ctxt['ssl_cert'], '/etc/nova/ssl/nova_cert.pem') self.assertEqual(ctxt['ssl_key'], '/etc/nova/ssl/nova_key.pem') - self.assertEqual(ctxt['novncproxy_base_url'], + self.assertEqual(ctxt['html5proxy_base_url'], 'https://10.5.0.1:6082/spice_auto.html') diff --git a/unit_tests/test_nova_cc_hooks.py b/unit_tests/test_nova_cc_hooks.py index 9602e157..e616efb9 100644 --- a/unit_tests/test_nova_cc_hooks.py +++ b/unit_tests/test_nova_cc_hooks.py @@ -609,12 +609,12 @@ class NovaCCHooksTests(CharmTestCase): self.assertTrue(_compute_joined.called) self.assertTrue(_quantum_joined.called) - @patch.object(hooks, 'resolve_address') + @patch.object(hooks, 'canonical_url') @patch.object(utils, 'config') - def test_console_settings_vnc(self, _utils_config, _resolve_address): + def test_console_settings_vnc(self, _utils_config, _canonical_url): _utils_config.return_value = 'vnc' _cc_host = "nova-cc-host1" - _resolve_address.return_value = _cc_host + _canonical_url.return_value = 'http://' + _cc_host _con_sets = hooks.console_settings() console_settings = { 'console_proxy_novnc_address': 'http://%s:6080/vnc_auto.html' % @@ -630,12 +630,12 @@ class NovaCCHooksTests(CharmTestCase): } self.assertEqual(_con_sets, console_settings) - @patch.object(hooks, 'resolve_address') + @patch.object(hooks, 'canonical_url') @patch.object(utils, 'config') - def test_console_settings_xvpvnc(self, _utils_config, _resolve_address): + def test_console_settings_xvpvnc(self, _utils_config, _canonical_url): _utils_config.return_value = 'xvpvnc' _cc_host = "nova-cc-host1" - _resolve_address.return_value = _cc_host + _canonical_url.return_value = 'http://' + _cc_host _con_sets = hooks.console_settings() console_settings = { 'console_access_protocol': 'xvpvnc', @@ -647,12 +647,12 @@ class NovaCCHooksTests(CharmTestCase): } self.assertEqual(_con_sets, console_settings) - @patch.object(hooks, 'resolve_address') + @patch.object(hooks, 'canonical_url') @patch.object(utils, 'config') - def test_console_settings_novnc(self, _utils_config, _resolve_address): + def test_console_settings_novnc(self, _utils_config, _canonical_url): _utils_config.return_value = 'novnc' _cc_host = "nova-cc-host1" - _resolve_address.return_value = _cc_host + _canonical_url.return_value = 'http://' + _cc_host _con_sets = hooks.console_settings() console_settings = { 'console_proxy_novnc_address': 'http://%s:6080/vnc_auto.html' % @@ -664,12 +664,12 @@ class NovaCCHooksTests(CharmTestCase): } self.assertEqual(_con_sets, console_settings) - @patch.object(hooks, 'resolve_address') + @patch.object(hooks, 'canonical_url') @patch.object(utils, 'config') - def test_console_settings_spice(self, _utils_config, _resolve_address): + def test_console_settings_spice(self, _utils_config, _canonical_url): _utils_config.return_value = 'spice' _cc_host = "nova-cc-host1" - _resolve_address.return_value = _cc_host + _canonical_url.return_value = 'http://' + _cc_host _con_sets = hooks.console_settings() console_settings = { 'console_proxy_spice_address': 'http://%s:6082/spice_auto.html' % @@ -681,16 +681,14 @@ class NovaCCHooksTests(CharmTestCase): } self.assertEqual(_con_sets, console_settings) - @patch.object(hooks, 'resolve_address') + @patch.object(hooks, 'https') @patch.object(utils, 'config') - def test_console_settings_explicit_ip(self, _utils_config, - _resolve_address): + def test_console_settings_explicit_ip(self, _utils_config, _https): _utils_config.return_value = 'spice' + _https.return_value = False _cc_public_host = "public-host" - _cc_private_host = "private-host" self.test_config.set('console-proxy-ip', _cc_public_host) _con_sets = hooks.console_settings() - _resolve_address.return_value = _cc_private_host console_settings = { 'console_proxy_spice_address': 'http://%s:6082/spice_auto.html' % (_cc_public_host), @@ -701,6 +699,25 @@ class NovaCCHooksTests(CharmTestCase): } self.assertEqual(_con_sets, console_settings) + @patch.object(hooks, 'https') + @patch.object(utils, 'config') + def test_console_settings_explicit_ip_with_https(self, _utils_config, + _https): + _utils_config.return_value = 'spice' + _https.return_value = True + _cc_public_host = "public-host" + self.test_config.set('console-proxy-ip', _cc_public_host) + _con_sets = hooks.console_settings() + console_settings = { + 'console_proxy_spice_address': 'https://%s:6082/spice_auto.html' % + (_cc_public_host), + 'console_proxy_spice_host': _cc_public_host, + 'console_proxy_spice_port': 6082, + 'console_access_protocol': 'spice', + 'console_keymap': 'en-us' + } + self.assertEqual(_con_sets, console_settings) + def test_conditional_neutron_migration(self): self.os_release.return_value = 'juno' self.services.return_value = ['neutron-server'] From 0440a4dc78e6d0eceaf9851149b902a5705d8c8c Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Wed, 22 Jul 2015 20:33:59 +0800 Subject: [PATCH 10/36] Fix template format err. --- templates/icehouse/nova.conf | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/templates/icehouse/nova.conf b/templates/icehouse/nova.conf index f480fc16..50746dff 100644 --- a/templates/icehouse/nova.conf +++ b/templates/icehouse/nova.conf @@ -166,4 +166,4 @@ enabled=True [conductor] workers = {{ workers }} -{% include "parts/spice "%} +{% include "parts/spice" %} From 18ae4b52af1bb64997c56cf2e4212c75b2239943 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Wed, 22 Jul 2015 23:02:42 -0400 Subject: [PATCH 11/36] [corey.bryant,trivial] Deploy nova-cc from source for amulet git tests. --- tests/basic_deployment.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py index dc850430..47edc50b 100644 --- a/tests/basic_deployment.py +++ b/tests/basic_deployment.py @@ -66,14 +66,16 @@ class NovaCCBasicDeployment(OpenStackAmuletDeployment): def _configure_services(self): """Configure all of the services.""" - nova_cc_config = {} + nova_cc_config = nova_config = {} if self.git: amulet_http_proxy = os.environ.get('AMULET_HTTP_PROXY') reqs_repo = 'git://github.com/openstack/requirements' + neutron_repo = 'git://github.com/openstack/neutron' nova_repo = 'git://github.com/openstack/nova' if self._get_openstack_release() == self.trusty_icehouse: reqs_repo = 'git://github.com/coreycb/requirements' + neutron_repo = 'git://github.com/coreycb/neutron' nova_repo = 'git://github.com/coreycb/nova' branch = 'stable/' + self._get_openstack_release_string() @@ -83,6 +85,9 @@ class NovaCCBasicDeployment(OpenStackAmuletDeployment): {'name': 'requirements', 'repository': reqs_repo, 'branch': branch}, + {'name': 'neutron', + 'repository': neutron_repo, + 'branch': branch}, {'name': 'nova', 'repository': nova_repo, 'branch': branch}, @@ -92,10 +97,11 @@ class NovaCCBasicDeployment(OpenStackAmuletDeployment): 'https_proxy': amulet_http_proxy, } nova_cc_config['openstack-origin-git'] = yaml.dump(openstack_origin_git) + nova_config['openstack-origin-git'] = yaml.dump(openstack_origin_git) keystone_config = {'admin-password': 'openstack', 'admin-token': 'ubuntutesting'} configs = {'nova-cloud-controller': nova_cc_config, - 'keystone': keystone_config} + 'keystone': keystone_config, 'nova-compute': nova_config} super(NovaCCBasicDeployment, self)._configure_services(configs) def _initialize_tests(self): From 4c3ad8d833072172aff6217539098b3f509000ce Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Fri, 24 Jul 2015 18:05:37 +0800 Subject: [PATCH 12/36] decode before write file --- hooks/nova_cc_context.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index 5ab07b42..f420b23c 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -374,11 +374,12 @@ class ConsoleSSLContext(context.OSContextGenerator): cert_path = os.path.join(ssl_dir, 'nova_cert.pem') decode_ssl_cert = b64decode(config('console-access-ssl-cert')) - with open(cert_path, 'w') as fh: - fh.write(decode_ssl_cert) key_path = os.path.join(ssl_dir, 'nova_key.pem') decode_ssl_key = b64decode(config('console-access-ssl-key')) + + with open(cert_path, 'w') as fh: + fh.write(decode_ssl_cert) with open(key_path, 'w') as fh: fh.write(decode_ssl_key) From 128a62e023d6f14ad96938d2dd4983273c9bfcf8 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Wed, 29 Jul 2015 17:31:20 +0800 Subject: [PATCH 13/36] Fix live-migration fail if re-add unit --- hooks/nova_cc_utils.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/hooks/nova_cc_utils.py b/hooks/nova_cc_utils.py index e7a63b61..d0486ffa 100644 --- a/hooks/nova_cc_utils.py +++ b/hooks/nova_cc_utils.py @@ -787,8 +787,7 @@ def ssh_compute_add(public_key, rid=None, unit=None, user=None): hosts.append(hn.split('.')[0]) for host in list(set(hosts)): - if not ssh_known_host_key(host, unit, user): - add_known_host(host, unit, user) + add_known_host(host, unit, user) if not ssh_authorized_key_exists(public_key, unit, user): log('Saving SSH authorized key for compute host at %s.' % From 3d8ed0298cff6eb5bb7e018f1299bc17d318b351 Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Wed, 29 Jul 2015 18:41:19 +0800 Subject: [PATCH 14/36] fetch the right part of key --- hooks/nova_cc_utils.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/hooks/nova_cc_utils.py b/hooks/nova_cc_utils.py index d0486ffa..badbaf2e 100644 --- a/hooks/nova_cc_utils.py +++ b/hooks/nova_cc_utils.py @@ -724,7 +724,8 @@ def authorized_keys(unit=None, user=None): def ssh_known_host_key(host, unit=None, user=None): cmd = ['ssh-keygen', '-f', known_hosts(unit, user), '-H', '-F', host] try: - return subprocess.check_output(cmd).strip() + output = subprocess.check_output(cmd).strip() + return output.split('\n')[1] except subprocess.CalledProcessError: return None From 8c5f43b1ce4c020b91281a45d41b80c691a570c8 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 29 Jul 2015 11:46:43 +0100 Subject: [PATCH 15/36] [gnuoy,trivial] Pre-release charmhelper sync --- .../charmhelpers/contrib/openstack/context.py | 18 +++++--- .../contrib/openstack/templating.py | 4 +- .../contrib/storage/linux/utils.py | 2 +- hooks/charmhelpers/core/files.py | 45 +++++++++++++++++++ hooks/charmhelpers/core/hookenv.py | 3 +- 5 files changed, 63 insertions(+), 9 deletions(-) create mode 100644 hooks/charmhelpers/core/files.py diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 8f3f1b15..bbf4722b 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -1053,11 +1053,17 @@ class SubordinateConfigContext(OSContextGenerator): """ self.service = service self.config_file = config_file - self.interface = interface + if isinstance(interface, list): + self.interfaces = interface + else: + self.interfaces = [interface] def __call__(self): ctxt = {'sections': {}} - for rid in relation_ids(self.interface): + rids = [] + for interface in self.interfaces: + rids.extend(relation_ids(interface)) + for rid in rids: for unit in related_units(rid): sub_config = relation_get('subordinate_configuration', rid=rid, unit=unit) @@ -1085,13 +1091,15 @@ class SubordinateConfigContext(OSContextGenerator): sub_config = sub_config[self.config_file] for k, v in six.iteritems(sub_config): if k == 'sections': - for section, config_dict in six.iteritems(v): + for section, config_list in six.iteritems(v): log("adding section '%s'" % (section), level=DEBUG) - ctxt[k][section] = config_dict + if ctxt[k].get(section): + ctxt[k][section].extend(config_list) + else: + ctxt[k][section] = config_list else: ctxt[k] = v - log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) return ctxt diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index 24cb272b..021d8cf9 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -29,8 +29,8 @@ from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES try: from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions except ImportError: - # python-jinja2 may not be installed yet, or we're running unittests. - FileSystemLoader = ChoiceLoader = Environment = exceptions = None + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions class OSConfigException(Exception): diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py index c8373b72..e2769e49 100644 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -67,4 +67,4 @@ def is_device_mounted(device): out = check_output(['mount']).decode('UTF-8') if is_partition: return bool(re.search(device + r"\b", out)) - return bool(re.search(device + r"[0-9]+\b", out)) + return bool(re.search(device + r"[0-9]*\b", out)) diff --git a/hooks/charmhelpers/core/files.py b/hooks/charmhelpers/core/files.py new file mode 100644 index 00000000..0f12d321 --- /dev/null +++ b/hooks/charmhelpers/core/files.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +__author__ = 'Jorge Niedbalski ' + +import os +import subprocess + + +def sed(filename, before, after, flags='g'): + """ + Search and replaces the given pattern on filename. + + :param filename: relative or absolute file path. + :param before: expression to be replaced (see 'man sed') + :param after: expression to replace with (see 'man sed') + :param flags: sed-compatible regex flags in example, to make + the search and replace case insensitive, specify ``flags="i"``. + The ``g`` flag is always specified regardless, so you do not + need to remember to include it when overriding this parameter. + :returns: If the sed command exit code was zero then return, + otherwise raise CalledProcessError. + """ + expression = r's/{0}/{1}/{2}'.format(before, + after, flags) + + return subprocess.check_call(["sed", "-i", "-r", "-e", + expression, + os.path.expanduser(filename)]) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index dd8def9a..15b09d11 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -21,6 +21,7 @@ # Charm Helpers Developers from __future__ import print_function +import copy from distutils.version import LooseVersion from functools import wraps import glob @@ -263,7 +264,7 @@ class Config(dict): self.path = path or self.path with open(self.path) as f: self._prev_dict = json.load(f) - for k, v in self._prev_dict.items(): + for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v From 2b3bd28527b701be7d1be02ec4e0f2212dd5caaf Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Wed, 29 Jul 2015 19:23:06 +0800 Subject: [PATCH 16/36] Fix key formats --- hooks/nova_cc_utils.py | 10 ++++++++-- unit_tests/test_nova_cc_utils.py | 12 ++++++------ 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/hooks/nova_cc_utils.py b/hooks/nova_cc_utils.py index badbaf2e..597c94dd 100644 --- a/hooks/nova_cc_utils.py +++ b/hooks/nova_cc_utils.py @@ -736,6 +736,12 @@ def remove_known_host(host, unit=None, user=None): subprocess.check_call(cmd) +def is_same_key(key_1, key_2): + k_1 = key_1.split('= ')[1] + k_2 = key_2.split('= ')[1] + return k_1 == k_2 + + def add_known_host(host, unit=None, user=None): '''Add variations of host to a known hosts file.''' cmd = ['ssh-keyscan', '-H', '-t', 'rsa', host] @@ -746,8 +752,8 @@ def add_known_host(host, unit=None, user=None): raise e current_key = ssh_known_host_key(host, unit, user) - if current_key: - if remote_key == current_key: + if current_key and remote_key: + if is_same_key(remote_key, current_key): log('Known host key for compute host %s up to date.' % host) return else: diff --git a/unit_tests/test_nova_cc_utils.py b/unit_tests/test_nova_cc_utils.py index 7974173d..a0d43b00 100644 --- a/unit_tests/test_nova_cc_utils.py +++ b/unit_tests/test_nova_cc_utils.py @@ -416,8 +416,8 @@ class NovaCCUtilsTests(CharmTestCase): @patch.object(utils, 'ssh_known_host_key') @patch('subprocess.check_output') def test_add_known_host_exists(self, check_output, host_key, rm): - check_output.return_value = 'fookey' - host_key.return_value = 'fookey' + check_output.return_value = '|1|= fookey' + host_key.return_value = '|1|= fookey' with patch_open() as (_open, _file): utils.add_known_host('foohost') self.assertFalse(rm.called) @@ -429,8 +429,8 @@ class NovaCCUtilsTests(CharmTestCase): @patch('subprocess.check_output') def test_add_known_host_exists_outdated( self, check_output, host_key, rm, known_hosts): - check_output.return_value = 'fookey' - host_key.return_value = 'fookey_old' + check_output.return_value = '|1|= fookey' + host_key.return_value = '|1|= fookey_old' with patch_open() as (_open, _file): utils.add_known_host('foohost', None, None) rm.assert_called_with('foohost', None, None) @@ -441,13 +441,13 @@ class NovaCCUtilsTests(CharmTestCase): @patch('subprocess.check_output') def test_add_known_host_exists_added( self, check_output, host_key, rm, known_hosts): - check_output.return_value = 'fookey' + check_output.return_value = '|1|= fookey' host_key.return_value = None with patch_open() as (_open, _file): _file.write = MagicMock() utils.add_known_host('foohost') self.assertFalse(rm.called) - _file.write.assert_called_with('fookey\n') + _file.write.assert_called_with('|1|= fookey\n') @patch('__builtin__.open') @patch('os.mkdir') From a96e69210d620c3d1b1942592ceef291d85e597b Mon Sep 17 00:00:00 2001 From: Ryan Beisner Date: Wed, 29 Jul 2015 17:23:30 +0000 Subject: [PATCH 17/36] remove amulet tests for unsupported releases --- tests/018-basic-utopic-juno | 10 ---------- 1 file changed, 10 deletions(-) delete mode 100755 tests/018-basic-utopic-juno diff --git a/tests/018-basic-utopic-juno b/tests/018-basic-utopic-juno deleted file mode 100755 index 5f808ca3..00000000 --- a/tests/018-basic-utopic-juno +++ /dev/null @@ -1,10 +0,0 @@ -#!/usr/bin/python - -"""Amulet tests on a basic nova cloud controller deployment on - utopic-juno.""" - -from basic_deployment import NovaCCBasicDeployment - -if __name__ == '__main__': - deployment = NovaCCBasicDeployment(series='utopic') - deployment.run_tests() From fb46491d7470158f794223e9dd8f4387701cc8d2 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Fri, 31 Jul 2015 14:10:41 +0100 Subject: [PATCH 18/36] [gnuoy,trivial] Pre-release charmhelper sync to pickup cli module --- charm-helpers-hooks.yaml | 1 + hooks/charmhelpers/cli/__init__.py | 195 ++++++++++++++++++ hooks/charmhelpers/cli/benchmark.py | 36 ++++ hooks/charmhelpers/cli/commands.py | 32 +++ hooks/charmhelpers/cli/host.py | 31 +++ hooks/charmhelpers/cli/unitdata.py | 39 ++++ .../charmhelpers/contrib/openstack/context.py | 52 ++--- hooks/charmhelpers/core/hookenv.py | 106 +++++++++- hooks/charmhelpers/core/unitdata.py | 78 +++++-- 9 files changed, 525 insertions(+), 45 deletions(-) create mode 100644 hooks/charmhelpers/cli/__init__.py create mode 100644 hooks/charmhelpers/cli/benchmark.py create mode 100644 hooks/charmhelpers/cli/commands.py create mode 100644 hooks/charmhelpers/cli/host.py create mode 100644 hooks/charmhelpers/cli/unitdata.py diff --git a/charm-helpers-hooks.yaml b/charm-helpers-hooks.yaml index e80ef2eb..3291ea33 100644 --- a/charm-helpers-hooks.yaml +++ b/charm-helpers-hooks.yaml @@ -2,6 +2,7 @@ branch: lp:charm-helpers destination: hooks/charmhelpers include: - core + - cli - fetch - contrib.openstack|inc=* - contrib.storage diff --git a/hooks/charmhelpers/cli/__init__.py b/hooks/charmhelpers/cli/__init__.py new file mode 100644 index 00000000..7118daf5 --- /dev/null +++ b/hooks/charmhelpers/cli/__init__.py @@ -0,0 +1,195 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import inspect +import argparse +import sys + +from six.moves import zip + +from charmhelpers.core import unitdata + + +class OutputFormatter(object): + def __init__(self, outfile=sys.stdout): + self.formats = ( + "raw", + "json", + "py", + "yaml", + "csv", + "tab", + ) + self.outfile = outfile + + def add_arguments(self, argument_parser): + formatgroup = argument_parser.add_mutually_exclusive_group() + choices = self.supported_formats + formatgroup.add_argument("--format", metavar='FMT', + help="Select output format for returned data, " + "where FMT is one of: {}".format(choices), + choices=choices, default='raw') + for fmt in self.formats: + fmtfunc = getattr(self, fmt) + formatgroup.add_argument("-{}".format(fmt[0]), + "--{}".format(fmt), action='store_const', + const=fmt, dest='format', + help=fmtfunc.__doc__) + + @property + def supported_formats(self): + return self.formats + + def raw(self, output): + """Output data as raw string (default)""" + if isinstance(output, (list, tuple)): + output = '\n'.join(map(str, output)) + self.outfile.write(str(output)) + + def py(self, output): + """Output data as a nicely-formatted python data structure""" + import pprint + pprint.pprint(output, stream=self.outfile) + + def json(self, output): + """Output data in JSON format""" + import json + json.dump(output, self.outfile) + + def yaml(self, output): + """Output data in YAML format""" + import yaml + yaml.safe_dump(output, self.outfile) + + def csv(self, output): + """Output data as excel-compatible CSV""" + import csv + csvwriter = csv.writer(self.outfile) + csvwriter.writerows(output) + + def tab(self, output): + """Output data in excel-compatible tab-delimited format""" + import csv + csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab) + csvwriter.writerows(output) + + def format_output(self, output, fmt='raw'): + fmtfunc = getattr(self, fmt) + fmtfunc(output) + + +class CommandLine(object): + argument_parser = None + subparsers = None + formatter = None + exit_code = 0 + + def __init__(self): + if not self.argument_parser: + self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks') + if not self.formatter: + self.formatter = OutputFormatter() + self.formatter.add_arguments(self.argument_parser) + if not self.subparsers: + self.subparsers = self.argument_parser.add_subparsers(help='Commands') + + def subcommand(self, command_name=None): + """ + Decorate a function as a subcommand. Use its arguments as the + command-line arguments""" + def wrapper(decorated): + cmd_name = command_name or decorated.__name__ + subparser = self.subparsers.add_parser(cmd_name, + description=decorated.__doc__) + for args, kwargs in describe_arguments(decorated): + subparser.add_argument(*args, **kwargs) + subparser.set_defaults(func=decorated) + return decorated + return wrapper + + def test_command(self, decorated): + """ + Subcommand is a boolean test function, so bool return values should be + converted to a 0/1 exit code. + """ + decorated._cli_test_command = True + return decorated + + def no_output(self, decorated): + """ + Subcommand is not expected to return a value, so don't print a spurious None. + """ + decorated._cli_no_output = True + return decorated + + def subcommand_builder(self, command_name, description=None): + """ + Decorate a function that builds a subcommand. Builders should accept a + single argument (the subparser instance) and return the function to be + run as the command.""" + def wrapper(decorated): + subparser = self.subparsers.add_parser(command_name) + func = decorated(subparser) + subparser.set_defaults(func=func) + subparser.description = description or func.__doc__ + return wrapper + + def run(self): + "Run cli, processing arguments and executing subcommands." + arguments = self.argument_parser.parse_args() + argspec = inspect.getargspec(arguments.func) + vargs = [] + kwargs = {} + for arg in argspec.args: + vargs.append(getattr(arguments, arg)) + if argspec.varargs: + vargs.extend(getattr(arguments, argspec.varargs)) + if argspec.keywords: + for kwarg in argspec.keywords.items(): + kwargs[kwarg] = getattr(arguments, kwarg) + output = arguments.func(*vargs, **kwargs) + if getattr(arguments.func, '_cli_test_command', False): + self.exit_code = 0 if output else 1 + output = '' + if getattr(arguments.func, '_cli_no_output', False): + output = '' + self.formatter.format_output(output, arguments.format) + if unitdata._KV: + unitdata._KV.flush() + + +cmdline = CommandLine() + + +def describe_arguments(func): + """ + Analyze a function's signature and return a data structure suitable for + passing in as arguments to an argparse parser's add_argument() method.""" + + argspec = inspect.getargspec(func) + # we should probably raise an exception somewhere if func includes **kwargs + if argspec.defaults: + positional_args = argspec.args[:-len(argspec.defaults)] + keyword_names = argspec.args[-len(argspec.defaults):] + for arg, default in zip(keyword_names, argspec.defaults): + yield ('--{}'.format(arg),), {'default': default} + else: + positional_args = argspec.args + + for arg in positional_args: + yield (arg,), {} + if argspec.varargs: + yield (argspec.varargs,), {'nargs': '*'} diff --git a/hooks/charmhelpers/cli/benchmark.py b/hooks/charmhelpers/cli/benchmark.py new file mode 100644 index 00000000..b23c16ce --- /dev/null +++ b/hooks/charmhelpers/cli/benchmark.py @@ -0,0 +1,36 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.contrib.benchmark import Benchmark + + +@cmdline.subcommand(command_name='benchmark-start') +def start(): + Benchmark.start() + + +@cmdline.subcommand(command_name='benchmark-finish') +def finish(): + Benchmark.finish() + + +@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score") +def service(subparser): + subparser.add_argument("value", help="The composite score.") + subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.") + subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.") + return Benchmark.set_composite_score diff --git a/hooks/charmhelpers/cli/commands.py b/hooks/charmhelpers/cli/commands.py new file mode 100644 index 00000000..443ff05d --- /dev/null +++ b/hooks/charmhelpers/cli/commands.py @@ -0,0 +1,32 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +""" +This module loads sub-modules into the python runtime so they can be +discovered via the inspect module. In order to prevent flake8 from (rightfully) +telling us these are unused modules, throw a ' # noqa' at the end of each import +so that the warning is suppressed. +""" + +from . import CommandLine # noqa + +""" +Import the sub-modules which have decorated subcommands to register with chlp. +""" +import host # noqa +import benchmark # noqa +import unitdata # noqa +from charmhelpers.core import hookenv # noqa diff --git a/hooks/charmhelpers/cli/host.py b/hooks/charmhelpers/cli/host.py new file mode 100644 index 00000000..58e78d6b --- /dev/null +++ b/hooks/charmhelpers/cli/host.py @@ -0,0 +1,31 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.core import host + + +@cmdline.subcommand() +def mounts(): + "List mounts" + return host.mounts() + + +@cmdline.subcommand_builder('service', description="Control system services") +def service(subparser): + subparser.add_argument("action", help="The action to perform (start, stop, etc...)") + subparser.add_argument("service_name", help="Name of the service to control") + return host.service diff --git a/hooks/charmhelpers/cli/unitdata.py b/hooks/charmhelpers/cli/unitdata.py new file mode 100644 index 00000000..d1cd95bf --- /dev/null +++ b/hooks/charmhelpers/cli/unitdata.py @@ -0,0 +1,39 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.core import unitdata + + +@cmdline.subcommand_builder('unitdata', description="Store and retrieve data") +def unitdata_cmd(subparser): + nested = subparser.add_subparsers() + get_cmd = nested.add_parser('get', help='Retrieve data') + get_cmd.add_argument('key', help='Key to retrieve the value of') + get_cmd.set_defaults(action='get', value=None) + set_cmd = nested.add_parser('set', help='Store data') + set_cmd.add_argument('key', help='Key to set') + set_cmd.add_argument('value', help='Value to store') + set_cmd.set_defaults(action='set') + + def _unitdata_cmd(action, key, value): + if action == 'get': + return unitdata.kv().get(key) + elif action == 'set': + unitdata.kv().set(key, value) + unitdata.kv().flush() + return '' + return _unitdata_cmd diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index bbf4722b..ab2ebac1 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -1051,8 +1051,11 @@ class SubordinateConfigContext(OSContextGenerator): :param config_file : Service's config file to query sections :param interface : Subordinate interface to inspect """ - self.service = service self.config_file = config_file + if isinstance(service, list): + self.services = service + else: + self.services = [service] if isinstance(interface, list): self.interfaces = interface else: @@ -1075,31 +1078,32 @@ class SubordinateConfigContext(OSContextGenerator): 'setting from %s' % rid, level=ERROR) continue - if self.service not in sub_config: - log('Found subordinate_config on %s but it contained' - 'nothing for %s service' % (rid, self.service), - level=INFO) - continue + for service in self.services: + if service not in sub_config: + log('Found subordinate_config on %s but it contained' + 'nothing for %s service' % (rid, service), + level=INFO) + continue - sub_config = sub_config[self.service] - if self.config_file not in sub_config: - log('Found subordinate_config on %s but it contained' - 'nothing for %s' % (rid, self.config_file), - level=INFO) - continue + sub_config = sub_config[service] + if self.config_file not in sub_config: + log('Found subordinate_config on %s but it contained' + 'nothing for %s' % (rid, self.config_file), + level=INFO) + continue - sub_config = sub_config[self.config_file] - for k, v in six.iteritems(sub_config): - if k == 'sections': - for section, config_list in six.iteritems(v): - log("adding section '%s'" % (section), - level=DEBUG) - if ctxt[k].get(section): - ctxt[k][section].extend(config_list) - else: - ctxt[k][section] = config_list - else: - ctxt[k] = v + sub_config = sub_config[self.config_file] + for k, v in six.iteritems(sub_config): + if k == 'sections': + for section, config_list in six.iteritems(v): + log("adding section '%s'" % (section), + level=DEBUG) + if ctxt[k].get(section): + ctxt[k][section].extend(config_list) + else: + ctxt[k][section] = config_list + else: + ctxt[k] = v log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) return ctxt diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 15b09d11..6e4fb686 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -34,6 +34,8 @@ import errno import tempfile from subprocess import CalledProcessError +from charmhelpers.cli import cmdline + import six if not six.PY3: from UserDict import UserDict @@ -173,9 +175,20 @@ def relation_type(): return os.environ.get('JUJU_RELATION', None) -def relation_id(): - """The relation ID for the current relation hook""" - return os.environ.get('JUJU_RELATION_ID', None) +@cmdline.subcommand() +@cached +def relation_id(relation_name=None, service_or_unit=None): + """The relation ID for the current or a specified relation""" + if not relation_name and not service_or_unit: + return os.environ.get('JUJU_RELATION_ID', None) + elif relation_name and service_or_unit: + service_name = service_or_unit.split('/')[0] + for relid in relation_ids(relation_name): + remote_service = remote_service_name(relid) + if remote_service == service_name: + return relid + else: + raise ValueError('Must specify neither or both of relation_name and service_or_unit') def local_unit(): @@ -188,14 +201,27 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) +@cmdline.subcommand() def service_name(): """The name service group this unit belongs to""" return local_unit().split('/')[0] +@cmdline.subcommand() +@cached +def remote_service_name(relid=None): + """The remote service name for a given relation-id (or the current relation)""" + if relid is None: + unit = remote_unit() + else: + units = related_units(relid) + unit = units[0] if units else None + return unit.split('/')[0] if unit else None + + def hook_name(): """The name of the currently executing hook""" - return os.path.basename(sys.argv[0]) + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) class Config(dict): @@ -468,6 +494,63 @@ def relation_types(): return rel_types +@cached +def relation_to_interface(relation_name): + """ + Given the name of a relation, return the interface that relation uses. + + :returns: The interface name, or ``None``. + """ + return relation_to_role_and_interface(relation_name)[1] + + +@cached +def relation_to_role_and_interface(relation_name): + """ + Given the name of a relation, return the role and the name of the interface + that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). + + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. + """ + _metadata = metadata() + for role in ('provides', 'requires', 'peer'): + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') + if interface: + return role, interface + return None, None + + +@cached +def role_and_interface_to_relations(role, interface_name): + """ + Given a role and interface name, return a list of relation names for the + current charm that use that interface under that role (where role is one + of ``provides``, ``requires``, or ``peer``). + + :returns: A list of relation names. + """ + _metadata = metadata() + results = [] + for relation_name, relation in _metadata.get(role, {}).items(): + if relation['interface'] == interface_name: + results.append(relation_name) + return results + + +@cached +def interface_to_relations(interface_name): + """ + Given an interface, return a list of relation names for the current + charm that use that interface. + + :returns: A list of relation names. + """ + results = [] + for role in ('provides', 'requires', 'peer'): + results.extend(role_and_interface_to_relations(role, interface_name)) + return results + + @cached def charm_name(): """Get the name of the current charm as is specified on metadata.yaml""" @@ -644,6 +727,21 @@ def action_fail(message): subprocess.check_call(['action-fail', message]) +def action_name(): + """Get the name of the currently executing action.""" + return os.environ.get('JUJU_ACTION_NAME') + + +def action_uuid(): + """Get the UUID of the currently executing action.""" + return os.environ.get('JUJU_ACTION_UUID') + + +def action_tag(): + """Get the tag for the currently executing action.""" + return os.environ.get('JUJU_ACTION_TAG') + + def status_set(workload_state, message): """Set the workload state with a message diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py index 406a35c5..338104e0 100644 --- a/hooks/charmhelpers/core/unitdata.py +++ b/hooks/charmhelpers/core/unitdata.py @@ -152,6 +152,7 @@ associated to the hookname. import collections import contextlib import datetime +import itertools import json import os import pprint @@ -164,8 +165,7 @@ __author__ = 'Kapil Thangavelu ' class Storage(object): """Simple key value database for local unit state within charms. - Modifications are automatically committed at hook exit. That's - currently regardless of exit code. + Modifications are not persisted unless :meth:`flush` is called. To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. @@ -173,8 +173,11 @@ class Storage(object): def __init__(self, path=None): self.db_path = path if path is None: - self.db_path = os.path.join( - os.environ.get('CHARM_DIR', ''), '.unit-state.db') + if 'UNIT_STATE_DB' in os.environ: + self.db_path = os.environ['UNIT_STATE_DB'] + else: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None @@ -189,15 +192,8 @@ class Storage(object): self.conn.close() self._closed = True - def _scoped_query(self, stmt, params=None): - if params is None: - params = [] - return stmt, params - def get(self, key, default=None, record=False): - self.cursor.execute( - *self._scoped_query( - 'select data from kv where key=?', [key])) + self.cursor.execute('select data from kv where key=?', [key]) result = self.cursor.fetchone() if not result: return default @@ -206,33 +202,81 @@ class Storage(object): return json.loads(result[0]) def getrange(self, key_prefix, strip=False): - stmt = "select key, data from kv where key like '%s%%'" % key_prefix - self.cursor.execute(*self._scoped_query(stmt)) + """ + Get a range of keys starting with a common prefix as a mapping of + keys to values. + + :param str key_prefix: Common prefix among all keys + :param bool strip: Optionally strip the common prefix from the key + names in the returned dict + :return dict: A (possibly empty) dict of key-value mappings + """ + self.cursor.execute("select key, data from kv where key like ?", + ['%s%%' % key_prefix]) result = self.cursor.fetchall() if not result: - return None + return {} if not strip: key_prefix = '' return dict([ (k[len(key_prefix):], json.loads(v)) for k, v in result]) def update(self, mapping, prefix=""): + """ + Set the values of multiple keys at once. + + :param dict mapping: Mapping of keys to values + :param str prefix: Optional prefix to apply to all keys in `mapping` + before setting + """ for k, v in mapping.items(): self.set("%s%s" % (prefix, k), v) def unset(self, key): + """ + Remove a key from the database entirely. + """ self.cursor.execute('delete from kv where key=?', [key]) if self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values (?, ?, ?)', [key, self.revision, json.dumps('DELETED')]) + def unsetrange(self, keys=None, prefix=""): + """ + Remove a range of keys starting with a common prefix, from the database + entirely. + + :param list keys: List of keys to remove. + :param str prefix: Optional prefix to apply to all keys in ``keys`` + before removing. + """ + if keys is not None: + keys = ['%s%s' % (prefix, key) for key in keys] + self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), + list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) + else: + self.cursor.execute('delete from kv where key like ?', + ['%s%%' % prefix]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) + def set(self, key, value): + """ + Set a value in the database. + + :param str key: Key to set the value for + :param value: Any JSON-serializable value to be set + """ serialized = json.dumps(value) - self.cursor.execute( - 'select data from kv where key=?', [key]) + self.cursor.execute('select data from kv where key=?', [key]) exists = self.cursor.fetchone() # Skip mutations to the same value From 97171a3486f121f37fb08a76e18c9f05cd46640b Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 3 Aug 2015 14:59:29 +0100 Subject: [PATCH 19/36] [gnuoy,trivial] Pre-release charmhelper sync to pickup leadership election peer migration fix --- .../contrib/peerstorage/__init__.py | 9 +++++---- hooks/charmhelpers/core/hookenv.py | 17 ++++++++++++++++- 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/hooks/charmhelpers/contrib/peerstorage/__init__.py b/hooks/charmhelpers/contrib/peerstorage/__init__.py index 09f2b12b..eafca44f 100644 --- a/hooks/charmhelpers/contrib/peerstorage/__init__.py +++ b/hooks/charmhelpers/contrib/peerstorage/__init__.py @@ -59,7 +59,7 @@ def some_hook(): """ -def leader_get(attribute=None): +def leader_get(attribute=None, rid=None): """Wrapper to ensure that settings are migrated from the peer relation. This is to support upgrading an environment that does not support @@ -94,7 +94,8 @@ def leader_get(attribute=None): # If attribute not present in leader db, check if this unit has set # the attribute in the peer relation if not leader_settings: - peer_setting = relation_get(attribute=attribute, unit=local_unit()) + peer_setting = _relation_get(attribute=attribute, unit=local_unit(), + rid=rid) if peer_setting: leader_set(settings={attribute: peer_setting}) leader_settings = peer_setting @@ -103,7 +104,7 @@ def leader_get(attribute=None): settings_migrated = True migrated.add(attribute) else: - r_settings = relation_get(unit=local_unit()) + r_settings = _relation_get(unit=local_unit(), rid=rid) if r_settings: for key in set(r_settings.keys()).difference(migrated): # Leader setting wins @@ -151,7 +152,7 @@ def relation_get(attribute=None, unit=None, rid=None): """ try: if rid in relation_ids('cluster'): - return leader_get(attribute) + return leader_get(attribute, rid) else: raise NotImplementedError except NotImplementedError: diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 6e4fb686..18860f59 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -34,7 +34,22 @@ import errno import tempfile from subprocess import CalledProcessError -from charmhelpers.cli import cmdline +try: + from charmhelpers.cli import cmdline +except ImportError as e: + # due to the anti-pattern of partially synching charmhelpers directly + # into charms, it's possible that charmhelpers.cli is not available; + # if that's the case, they don't really care about using the cli anyway, + # so mock it out + if str(e) == 'No module named cli': + class cmdline(object): + @classmethod + def subcommand(cls, *args, **kwargs): + def _wrap(func): + return func + return _wrap + else: + raise import six if not six.PY3: From 00c3a1a05d3e2393a6ea7b9ad5c290f6c4edc1be Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Thu, 6 Aug 2015 14:04:08 +0800 Subject: [PATCH 20/36] Add note --- hooks/nova_cc_utils.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/hooks/nova_cc_utils.py b/hooks/nova_cc_utils.py index 597c94dd..121e2dc1 100644 --- a/hooks/nova_cc_utils.py +++ b/hooks/nova_cc_utils.py @@ -724,6 +724,8 @@ def authorized_keys(unit=None, user=None): def ssh_known_host_key(host, unit=None, user=None): cmd = ['ssh-keygen', '-f', known_hosts(unit, user), '-H', '-F', host] try: + # The first line of output is like '# Host xx found: line 1 type RSA', + # which should be excluded. output = subprocess.check_output(cmd).strip() return output.split('\n')[1] except subprocess.CalledProcessError: @@ -737,6 +739,10 @@ def remove_known_host(host, unit=None, user=None): def is_same_key(key_1, key_2): + # The key format get will be like '|1|2rUumCavEXWVaVyB5uMl6m85pZo=|Cp' + # 'EL6l7VTY37T/fg/ihhNb/GPgs= ssh-rsa AAAAB', we only need to compare + # the part start with 'ssh-rsa' followed with '= ', because the hash + # value in the beginning will change each time. k_1 = key_1.split('= ')[1] k_2 = key_2.split('= ')[1] return k_1 == k_2 From 334c66c2b773eb2ee7bef4282c5a350d84aa86a0 Mon Sep 17 00:00:00 2001 From: Corey Bryant Date: Tue, 18 Aug 2015 13:34:35 -0400 Subject: [PATCH 21/36] [corey.bryant,r=trivial] Sync charm-helpers to pick up Liberty support. --- hooks/charmhelpers/cli/__init__.py | 6 +- hooks/charmhelpers/cli/commands.py | 8 +- hooks/charmhelpers/cli/hookenv.py | 23 ++++ .../contrib/openstack/amulet/deployment.py | 4 +- hooks/charmhelpers/contrib/openstack/utils.py | 65 ++++++++--- .../contrib/storage/linux/utils.py | 5 +- hooks/charmhelpers/core/hookenv.py | 21 +--- hooks/charmhelpers/core/host.py | 25 ++++- hooks/charmhelpers/core/services/helpers.py | 20 +++- hooks/charmhelpers/fetch/__init__.py | 8 ++ tests/charmhelpers/contrib/amulet/utils.py | 105 ++++++++++++++---- .../contrib/openstack/amulet/deployment.py | 4 +- 12 files changed, 220 insertions(+), 74 deletions(-) create mode 100644 hooks/charmhelpers/cli/hookenv.py diff --git a/hooks/charmhelpers/cli/__init__.py b/hooks/charmhelpers/cli/__init__.py index 7118daf5..16d52cc4 100644 --- a/hooks/charmhelpers/cli/__init__.py +++ b/hooks/charmhelpers/cli/__init__.py @@ -152,15 +152,11 @@ class CommandLine(object): arguments = self.argument_parser.parse_args() argspec = inspect.getargspec(arguments.func) vargs = [] - kwargs = {} for arg in argspec.args: vargs.append(getattr(arguments, arg)) if argspec.varargs: vargs.extend(getattr(arguments, argspec.varargs)) - if argspec.keywords: - for kwarg in argspec.keywords.items(): - kwargs[kwarg] = getattr(arguments, kwarg) - output = arguments.func(*vargs, **kwargs) + output = arguments.func(*vargs) if getattr(arguments.func, '_cli_test_command', False): self.exit_code = 0 if output else 1 output = '' diff --git a/hooks/charmhelpers/cli/commands.py b/hooks/charmhelpers/cli/commands.py index 443ff05d..7e91db00 100644 --- a/hooks/charmhelpers/cli/commands.py +++ b/hooks/charmhelpers/cli/commands.py @@ -26,7 +26,7 @@ from . import CommandLine # noqa """ Import the sub-modules which have decorated subcommands to register with chlp. """ -import host # noqa -import benchmark # noqa -import unitdata # noqa -from charmhelpers.core import hookenv # noqa +from . import host # noqa +from . import benchmark # noqa +from . import unitdata # noqa +from . import hookenv # noqa diff --git a/hooks/charmhelpers/cli/hookenv.py b/hooks/charmhelpers/cli/hookenv.py new file mode 100644 index 00000000..265c816e --- /dev/null +++ b/hooks/charmhelpers/cli/hookenv.py @@ -0,0 +1,23 @@ +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +from . import cmdline +from charmhelpers.core import hookenv + + +cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped) +cmdline.subcommand('service-name')(hookenv.service_name) +cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped) diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index b01e6cb8..07ee2ef1 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -44,7 +44,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" - base_charms = ['mysql', 'mongodb'] + base_charms = ['mysql', 'mongodb', 'nrpe'] if self.series in ['precise', 'trusty']: base_series = self.series @@ -81,7 +81,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): 'ceph-osd', 'ceph-radosgw'] # Most OpenStack subordinate charms do not expose an origin option # as that is controlled by the principle. - ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] if self.openstack: for svc in services: diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 4dd000c3..c9fd68f7 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -24,6 +24,7 @@ import subprocess import json import os import sys +import re import six import yaml @@ -69,7 +70,6 @@ CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' 'restricted main multiverse universe') - UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('oneiric', 'diablo'), ('precise', 'essex'), @@ -118,6 +118,34 @@ SWIFT_CODENAMES = OrderedDict([ ('2.3.0', 'liberty'), ]) +# >= Liberty version->codename mapping +PACKAGE_CODENAMES = { + 'nova-common': OrderedDict([ + ('12.0.0', 'liberty'), + ]), + 'neutron-common': OrderedDict([ + ('7.0.0', 'liberty'), + ]), + 'cinder-common': OrderedDict([ + ('7.0.0', 'liberty'), + ]), + 'keystone': OrderedDict([ + ('8.0.0', 'liberty'), + ]), + 'horizon-common': OrderedDict([ + ('8.0.0', 'liberty'), + ]), + 'ceilometer-common': OrderedDict([ + ('5.0.0', 'liberty'), + ]), + 'heat-common': OrderedDict([ + ('5.0.0', 'liberty'), + ]), + 'glance-common': OrderedDict([ + ('11.0.0', 'liberty'), + ]), +} + DEFAULT_LOOPBACK_SIZE = '5G' @@ -201,20 +229,29 @@ def get_os_codename_package(package, fatal=True): error_out(e) vers = apt.upstream_version(pkg.current_ver.ver_str) + match = re.match('^(\d)\.(\d)\.(\d)', vers) + if match: + vers = match.group(0) - try: - if 'swift' in pkg.name: - swift_vers = vers[:5] - if swift_vers not in SWIFT_CODENAMES: - # Deal with 1.10.0 upward - swift_vers = vers[:6] - return SWIFT_CODENAMES[swift_vers] - else: - vers = vers[:6] - return OPENSTACK_CODENAMES[vers] - except KeyError: - e = 'Could not determine OpenStack codename for version %s' % vers - error_out(e) + # >= Liberty independent project versions + if (package in PACKAGE_CODENAMES and + vers in PACKAGE_CODENAMES[package]): + return PACKAGE_CODENAMES[package][vers] + else: + # < Liberty co-ordinated project versions + try: + if 'swift' in pkg.name: + swift_vers = vers[:5] + if swift_vers not in SWIFT_CODENAMES: + # Deal with 1.10.0 upward + swift_vers = vers[:6] + return SWIFT_CODENAMES[swift_vers] + else: + vers = vers[:6] + return OPENSTACK_CODENAMES[vers] + except KeyError: + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) def get_os_version_package(pkg, fatal=True): diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py index e2769e49..1e57941a 100644 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -43,9 +43,10 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' + # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b # sometimes sgdisk exits non-zero; this is OK, dd will clean up - call(['sgdisk', '--zap-all', '--mbrtogpt', - '--clear', block_device]) + call(['sgdisk', '--zap-all', '--', block_device]) + call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) dev_end = check_output(['blockdev', '--getsz', block_device]).decode('UTF-8') gpt_end = int(dev_end.split()[0]) - 100 diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index 18860f59..a35d006b 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -34,23 +34,6 @@ import errno import tempfile from subprocess import CalledProcessError -try: - from charmhelpers.cli import cmdline -except ImportError as e: - # due to the anti-pattern of partially synching charmhelpers directly - # into charms, it's possible that charmhelpers.cli is not available; - # if that's the case, they don't really care about using the cli anyway, - # so mock it out - if str(e) == 'No module named cli': - class cmdline(object): - @classmethod - def subcommand(cls, *args, **kwargs): - def _wrap(func): - return func - return _wrap - else: - raise - import six if not six.PY3: from UserDict import UserDict @@ -91,6 +74,7 @@ def cached(func): res = func(*args, **kwargs) cache[key] = res return res + wrapper._wrapped = func return wrapper @@ -190,7 +174,6 @@ def relation_type(): return os.environ.get('JUJU_RELATION', None) -@cmdline.subcommand() @cached def relation_id(relation_name=None, service_or_unit=None): """The relation ID for the current or a specified relation""" @@ -216,13 +199,11 @@ def remote_unit(): return os.environ.get('JUJU_REMOTE_UNIT', None) -@cmdline.subcommand() def service_name(): """The name service group this unit belongs to""" return local_unit().split('/')[0] -@cmdline.subcommand() @cached def remote_service_name(relid=None): """The remote service name for a given relation-id (or the current relation)""" diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 8ae8ef86..ec659eef 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -72,7 +72,7 @@ def service_pause(service_name, init_dir=None): stopped = service_stop(service_name) # XXX: Support systemd too override_path = os.path.join( - init_dir, '{}.conf.override'.format(service_name)) + init_dir, '{}.override'.format(service_name)) with open(override_path, 'w') as fh: fh.write("manual\n") return stopped @@ -86,7 +86,7 @@ def service_resume(service_name, init_dir=None): if init_dir is None: init_dir = "/etc/init" override_path = os.path.join( - init_dir, '{}.conf.override'.format(service_name)) + init_dir, '{}.override'.format(service_name)) if os.path.exists(override_path): os.unlink(override_path) started = service_start(service_name) @@ -148,6 +148,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): return user_info +def user_exists(username): + """Check if a user exists""" + try: + pwd.getpwnam(username) + user_exists = True + except KeyError: + user_exists = False + return user_exists + + def add_group(group_name, system_group=False): """Add a group to the system""" try: @@ -280,6 +290,17 @@ def mounts(): return system_mounts +def fstab_mount(mountpoint): + """Mount filesystem using fstab""" + cmd_args = ['mount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + return True + + def file_hash(path, hash_type='md5'): """ Generate a hash checksum of the contents of 'path' or None if not found. diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py index 8005c415..3f677833 100644 --- a/hooks/charmhelpers/core/services/helpers.py +++ b/hooks/charmhelpers/core/services/helpers.py @@ -16,7 +16,9 @@ import os import yaml + from charmhelpers.core import hookenv +from charmhelpers.core import host from charmhelpers.core import templating from charmhelpers.core.services.base import ManagerCallback @@ -240,27 +242,41 @@ class TemplateCallback(ManagerCallback): :param str source: The template source file, relative to `$CHARM_DIR/templates` + :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file - + :param partial on_change_action: functools partial to be executed when + rendered file changes """ def __init__(self, source, target, - owner='root', group='root', perms=0o444): + owner='root', group='root', perms=0o444, + on_change_action=None): self.source = source self.target = target self.owner = owner self.group = group self.perms = perms + self.on_change_action = on_change_action def __call__(self, manager, service_name, event_name): + pre_checksum = '' + if self.on_change_action and os.path.isfile(self.target): + pre_checksum = host.file_hash(self.target) service = manager.get_service(service_name) context = {} for ctx in service.get('required_data', []): context.update(ctx) templating.render(self.source, self.target, context, self.owner, self.group, self.perms) + if self.on_change_action: + if pre_checksum == host.file_hash(self.target): + hookenv.log( + 'No change detected: {}'.format(self.target), + hookenv.DEBUG) + else: + self.on_change_action() # Convenience aliases for templates diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 0a3bb969..cd0b783c 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -90,6 +90,14 @@ CLOUD_ARCHIVE_POCKETS = { 'kilo/proposed': 'trusty-proposed/kilo', 'trusty-kilo/proposed': 'trusty-proposed/kilo', 'trusty-proposed/kilo': 'trusty-proposed/kilo', + # Liberty + 'liberty': 'trusty-updates/liberty', + 'trusty-liberty': 'trusty-updates/liberty', + 'trusty-liberty/updates': 'trusty-updates/liberty', + 'trusty-updates/liberty': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'trusty-liberty/proposed': 'trusty-proposed/liberty', + 'trusty-proposed/liberty': 'trusty-proposed/liberty', } # The order of this list is very important. Handlers should be listed in from diff --git a/tests/charmhelpers/contrib/amulet/utils.py b/tests/charmhelpers/contrib/amulet/utils.py index 3de26afd..7816c934 100644 --- a/tests/charmhelpers/contrib/amulet/utils.py +++ b/tests/charmhelpers/contrib/amulet/utils.py @@ -14,17 +14,23 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -import amulet -import ConfigParser -import distro_info import io +import json import logging import os import re -import six +import subprocess import sys import time -import urlparse + +import amulet +import distro_info +import six +from six.moves import configparser +if six.PY3: + from urllib import parse as urlparse +else: + import urlparse class AmuletUtils(object): @@ -142,19 +148,23 @@ class AmuletUtils(object): for service_name in services_list: if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name == "rabbitmq-server"): - # init is systemd + service_name in ['rabbitmq-server', 'apache2']): + # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 elif self.ubuntu_releases.index(release) < systemd_switch: # init is upstart cmd = 'sudo status {}'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 and "start/running" in output - output, code = sentry_unit.run(cmd) self.log.debug('{} `{}` returned ' '{}'.format(sentry_unit.info['unit_name'], cmd, code)) - if code != 0: - return "command `{}` returned {}".format(cmd, str(code)) + if not service_running: + return u"command `{}` returned {} {}".format( + cmd, output, str(code)) return None def _get_config(self, unit, filename): @@ -164,7 +174,7 @@ class AmuletUtils(object): # NOTE(beisner): by default, ConfigParser does not handle options # with no value, such as the flags used in the mysql my.cnf file. # https://bugs.python.org/issue7005 - config = ConfigParser.ConfigParser(allow_no_value=True) + config = configparser.ConfigParser(allow_no_value=True) config.readfp(io.StringIO(file_contents)) return config @@ -450,15 +460,20 @@ class AmuletUtils(object): cmd, code, output)) return None - def get_process_id_list(self, sentry_unit, process_name): + def get_process_id_list(self, sentry_unit, process_name, + expect_success=True): """Get a list of process ID(s) from a single sentry juju unit for a single process name. - :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param sentry_unit: Amulet sentry instance (juju unit) :param process_name: Process name + :param expect_success: If False, expect the PID to be missing, + raise if it is present. :returns: List of process IDs """ - cmd = 'pidof {}'.format(process_name) + cmd = 'pidof -x {}'.format(process_name) + if not expect_success: + cmd += " || exit 0 && exit 1" output, code = sentry_unit.run(cmd) if code != 0: msg = ('{} `{}` returned {} ' @@ -467,14 +482,23 @@ class AmuletUtils(object): amulet.raise_status(amulet.FAIL, msg=msg) return str(output).split() - def get_unit_process_ids(self, unit_processes): + def get_unit_process_ids(self, unit_processes, expect_success=True): """Construct a dict containing unit sentries, process names, and - process IDs.""" + process IDs. + + :param unit_processes: A dictionary of Amulet sentry instance + to list of process names. + :param expect_success: if False expect the processes to not be + running, raise if they are. + :returns: Dictionary of Amulet sentry instance to dictionary + of process names to PIDs. + """ pid_dict = {} - for sentry_unit, process_list in unit_processes.iteritems(): + for sentry_unit, process_list in six.iteritems(unit_processes): pid_dict[sentry_unit] = {} for process in process_list: - pids = self.get_process_id_list(sentry_unit, process) + pids = self.get_process_id_list( + sentry_unit, process, expect_success=expect_success) pid_dict[sentry_unit].update({process: pids}) return pid_dict @@ -488,7 +512,7 @@ class AmuletUtils(object): return ('Unit count mismatch. expected, actual: {}, ' '{} '.format(len(expected), len(actual))) - for (e_sentry, e_proc_names) in expected.iteritems(): + for (e_sentry, e_proc_names) in six.iteritems(expected): e_sentry_name = e_sentry.info['unit_name'] if e_sentry in actual.keys(): a_proc_names = actual[e_sentry] @@ -507,11 +531,23 @@ class AmuletUtils(object): '{}'.format(e_proc_name, a_proc_name)) a_pids_length = len(a_pids) - if e_pids_length != a_pids_length: - return ('PID count mismatch. {} ({}) expected, actual: ' + fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' '{}, {} ({})'.format(e_sentry_name, e_proc_name, e_pids_length, a_pids_length, a_pids)) + + # If expected is not bool, ensure PID quantities match + if not isinstance(e_pids_length, bool) and \ + a_pids_length != e_pids_length: + return fail_msg + # If expected is bool True, ensure 1 or more PIDs exist + elif isinstance(e_pids_length, bool) and \ + e_pids_length is True and a_pids_length < 1: + return fail_msg + # If expected is bool False, ensure 0 PIDs exist + elif isinstance(e_pids_length, bool) and \ + e_pids_length is False and a_pids_length != 0: + return fail_msg else: self.log.debug('PID check OK: {} {} {}: ' '{}'.format(e_sentry_name, e_proc_name, @@ -531,3 +567,30 @@ class AmuletUtils(object): return 'Dicts within list are not identical' return None + + def run_action(self, unit_sentry, action, + _check_output=subprocess.check_output): + """Run the named action on a given unit sentry. + + _check_output parameter is used for dependency injection. + + @return action_id. + """ + unit_id = unit_sentry.info["unit_name"] + command = ["juju", "action", "do", "--format=json", unit_id, action] + self.log.info("Running command: %s\n" % " ".join(command)) + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + action_id = data[u'Action queued with id'] + return action_id + + def wait_on_action(self, action_id, _check_output=subprocess.check_output): + """Wait for a given action, returning if it completed or not. + + _check_output parameter is used for dependency injection. + """ + command = ["juju", "action", "fetch", "--format=json", "--wait=0", + action_id] + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + return data.get(u"status") == "completed" diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py index b01e6cb8..07ee2ef1 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -44,7 +44,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" - base_charms = ['mysql', 'mongodb'] + base_charms = ['mysql', 'mongodb', 'nrpe'] if self.series in ['precise', 'trusty']: base_series = self.series @@ -81,7 +81,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): 'ceph-osd', 'ceph-radosgw'] # Most OpenStack subordinate charms do not expose an origin option # as that is controlled by the principle. - ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] if self.openstack: for svc in services: From 746f9afae9fe429f148f814456a33ece279acccf Mon Sep 17 00:00:00 2001 From: Liam Young Date: Wed, 19 Aug 2015 14:48:32 +0100 Subject: [PATCH 22/36] [gnuoy,trivial] Charmhelper sync (+1'd by mojo) --- .../charmhelpers/contrib/openstack/context.py | 27 +++++-- .../charmhelpers/contrib/openstack/neutron.py | 43 +++++++---- hooks/charmhelpers/core/host.py | 77 ++++++++++++++++--- hooks/charmhelpers/core/hugepage.py | 62 +++++++++++++++ 4 files changed, 178 insertions(+), 31 deletions(-) create mode 100644 hooks/charmhelpers/core/hugepage.py diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index ab2ebac1..9a33a035 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -50,6 +50,8 @@ from charmhelpers.core.sysctl import create as sysctl_create from charmhelpers.core.strutils import bool_from_string from charmhelpers.core.host import ( + get_bond_master, + is_phy_iface, list_nics, get_nic_hwaddr, mkdir, @@ -923,7 +925,6 @@ class NeutronContext(OSContextGenerator): class NeutronPortContext(OSContextGenerator): - NIC_PREFIXES = ['eth', 'bond'] def resolve_ports(self, ports): """Resolve NICs not yet bound to bridge(s) @@ -935,7 +936,18 @@ class NeutronPortContext(OSContextGenerator): hwaddr_to_nic = {} hwaddr_to_ip = {} - for nic in list_nics(self.NIC_PREFIXES): + for nic in list_nics(): + # Ignore virtual interfaces (bond masters will be identified from + # their slaves) + if not is_phy_iface(nic): + continue + + _nic = get_bond_master(nic) + if _nic: + log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), + level=DEBUG) + nic = _nic + hwaddr = get_nic_hwaddr(nic) hwaddr_to_nic[hwaddr] = nic addresses = get_ipv4_addr(nic, fatal=False) @@ -961,7 +973,8 @@ class NeutronPortContext(OSContextGenerator): # trust it to be the real external network). resolved.append(entry) - return resolved + # Ensure no duplicates + return list(set(resolved)) class OSConfigFlagContext(OSContextGenerator): @@ -1280,15 +1293,19 @@ class DataPortContext(NeutronPortContext): def __call__(self): ports = config('data-port') if ports: + # Map of {port/mac:bridge} portmap = parse_data_port_mappings(ports) - ports = portmap.values() + ports = portmap.keys() + # Resolve provided ports or mac addresses and filter out those + # already attached to a bridge. resolved = self.resolve_ports(ports) + # FIXME: is this necessary? normalized = {get_nic_hwaddr(port): port for port in resolved if port not in ports} normalized.update({port: port for port in resolved if port in ports}) if resolved: - return {bridge: normalized[port] for bridge, port in + return {bridge: normalized[port] for port, bridge in six.iteritems(portmap) if port in normalized.keys()} return None diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index f7b72352..c3d5c28e 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -255,17 +255,30 @@ def network_manager(): return 'neutron' -def parse_mappings(mappings): +def parse_mappings(mappings, key_rvalue=False): + """By default mappings are lvalue keyed. + + If key_rvalue is True, the mapping will be reversed to allow multiple + configs for the same lvalue. + """ parsed = {} if mappings: mappings = mappings.split() for m in mappings: p = m.partition(':') - key = p[0].strip() - if p[1]: - parsed[key] = p[2].strip() + + if key_rvalue: + key_index = 2 + val_index = 0 + # if there is no rvalue skip to next + if not p[1]: + continue else: - parsed[key] = '' + key_index = 0 + val_index = 2 + + key = p[key_index].strip() + parsed[key] = p[val_index].strip() return parsed @@ -283,25 +296,25 @@ def parse_bridge_mappings(mappings): def parse_data_port_mappings(mappings, default_bridge='br-data'): """Parse data port mappings. - Mappings must be a space-delimited list of bridge:port mappings. + Mappings must be a space-delimited list of port:bridge mappings. - Returns dict of the form {bridge:port}. + Returns dict of the form {port:bridge} where port may be an mac address or + interface name. """ - _mappings = parse_mappings(mappings) + + # NOTE(dosaboy): we use rvalue for key to allow multiple values to be + # proposed for since it may be a mac address which will differ + # across units this allowing first-known-good to be chosen. + _mappings = parse_mappings(mappings, key_rvalue=True) if not _mappings or list(_mappings.values()) == ['']: if not mappings: return {} # For backwards-compatibility we need to support port-only provided in # config. - _mappings = {default_bridge: mappings.split()[0]} - - bridges = _mappings.keys() - ports = _mappings.values() - if len(set(bridges)) != len(bridges): - raise Exception("It is not allowed to have more than one port " - "configured on the same bridge") + _mappings = {mappings.split()[0]: default_bridge} + ports = _mappings.keys() if len(set(ports)) != len(ports): raise Exception("It is not allowed to have the same port configured " "on more than one bridge") diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index ec659eef..29e8fee0 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -417,25 +417,80 @@ def pwgen(length=None): return(''.join(random_chars)) -def list_nics(nic_type): +def is_phy_iface(interface): + """Returns True if interface is not virtual, otherwise False.""" + if interface: + sys_net = '/sys/class/net' + if os.path.isdir(sys_net): + for iface in glob.glob(os.path.join(sys_net, '*')): + if '/virtual/' in os.path.realpath(iface): + continue + + if interface == os.path.basename(iface): + return True + + return False + + +def get_bond_master(interface): + """Returns bond master if interface is bond slave otherwise None. + + NOTE: the provided interface is expected to be physical + """ + if interface: + iface_path = '/sys/class/net/%s' % (interface) + if os.path.exists(iface_path): + if '/virtual/' in os.path.realpath(iface_path): + return None + + master = os.path.join(iface_path, 'master') + if os.path.exists(master): + master = os.path.realpath(master) + # make sure it is a bond master + if os.path.exists(os.path.join(master, 'bonding')): + return os.path.basename(master) + + return None + + +def list_nics(nic_type=None): '''Return a list of nics of given type(s)''' if isinstance(nic_type, six.string_types): int_types = [nic_type] else: int_types = nic_type + interfaces = [] - for int_type in int_types: - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + if nic_type: + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = ip_output.split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + matched = re.search('.*: (' + int_type + + r'[0-9]+\.[0-9]+)@.*', line) + if matched: + iface = matched.groups()[0] + else: + iface = line.split()[1].replace(":", "") + + if iface not in interfaces: + interfaces.append(iface) + else: + cmd = ['ip', 'a'] ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - ip_output = (line for line in ip_output if line) + ip_output = (line.strip() for line in ip_output if line) + + key = re.compile('^[0-9]+:\s+(.+):') for line in ip_output: - if line.split()[1].startswith(int_type): - matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) - if matched: - interface = matched.groups()[0] - else: - interface = line.split()[1].replace(":", "") - interfaces.append(interface) + matched = re.search(key, line) + if matched: + iface = matched.group(1) + iface = iface.partition("@")[0] + if iface not in interfaces: + interfaces.append(iface) return interfaces diff --git a/hooks/charmhelpers/core/hugepage.py b/hooks/charmhelpers/core/hugepage.py new file mode 100644 index 00000000..ba4340ff --- /dev/null +++ b/hooks/charmhelpers/core/hugepage.py @@ -0,0 +1,62 @@ +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import yaml +from charmhelpers.core import fstab +from charmhelpers.core import sysctl +from charmhelpers.core.host import ( + add_group, + add_user_to_group, + fstab_mount, + mkdir, +) + + +def hugepage_support(user, group='hugetlb', nr_hugepages=256, + max_map_count=65536, mnt_point='/run/hugepages/kvm', + pagesize='2MB', mount=True): + """Enable hugepages on system. + + Args: + user (str) -- Username to allow access to hugepages to + group (str) -- Group name to own hugepages + nr_hugepages (int) -- Number of pages to reserve + max_map_count (int) -- Number of Virtual Memory Areas a process can own + mnt_point (str) -- Directory to mount hugepages on + pagesize (str) -- Size of hugepages + mount (bool) -- Whether to Mount hugepages + """ + group_info = add_group(group) + gid = group_info.gr_gid + add_user_to_group(user, group) + sysctl_settings = { + 'vm.nr_hugepages': nr_hugepages, + 'vm.max_map_count': max_map_count, + 'vm.hugetlb_shm_group': gid, + } + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) + lfstab = fstab.Fstab() + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) + if fstab_entry: + lfstab.remove_entry(fstab_entry) + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) + lfstab.add_entry(entry) + if mount: + fstab_mount(mnt_point) From 880d21af41982e73e01ff9dee30eff51c484342c Mon Sep 17 00:00:00 2001 From: Nobuto Murata Date: Tue, 25 Aug 2015 22:42:14 +0900 Subject: [PATCH 23/36] fix catch error of "No such file or directory" with python2, LP: #1484605 --- hooks/nova_cc_hooks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/hooks/nova_cc_hooks.py b/hooks/nova_cc_hooks.py index 72657cf4..24e1bc9c 100755 --- a/hooks/nova_cc_hooks.py +++ b/hooks/nova_cc_hooks.py @@ -134,7 +134,7 @@ try: FileNotFoundError except NameError: # python3 compatibility - FileNotFoundError = IOError + FileNotFoundError = OSError hooks = Hooks() CONFIGS = register_configs() From 7abb0798828c900ecd747da70a13ce8b8758563b Mon Sep 17 00:00:00 2001 From: "bbaqar@plumgrid.com" <> Date: Tue, 25 Aug 2015 20:34:46 +0500 Subject: [PATCH 24/36] Enabling neutron security groups for PLUMgrid plugin --- templates/icehouse/nova.conf | 5 +++++ templates/juno/nova.conf | 5 +++++ templates/kilo/nova.conf | 5 +++++ 3 files changed, 15 insertions(+) diff --git a/templates/icehouse/nova.conf b/templates/icehouse/nova.conf index ca93c2e5..6dcea89d 100644 --- a/templates/icehouse/nova.conf +++ b/templates/icehouse/nova.conf @@ -89,6 +89,11 @@ security_group_api = neutron nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver {% endif -%} +{% if neutron_plugin and neutron_plugin == 'plumgrid' -%} +security_group_api=neutron +firewall_driver = nova.virt.firewall.NoopFirewallDriver +{% endif -%} + {% if network_manager_config -%} {% for key, value in network_manager_config.iteritems() -%} {{ key }} = {{ value }} diff --git a/templates/juno/nova.conf b/templates/juno/nova.conf index 41013404..46d8ee5e 100644 --- a/templates/juno/nova.conf +++ b/templates/juno/nova.conf @@ -84,6 +84,11 @@ default_floating_pool = {{ external_network }} {% endif -%} {% endif -%} +{% if neutron_plugin and neutron_plugin == 'plumgrid' -%} +security_group_api=neutron +firewall_driver = nova.virt.firewall.NoopFirewallDriver +{% endif -%} + {% if network_manager_config -%} {% for key, value in network_manager_config.iteritems() -%} {{ key }} = {{ value }} diff --git a/templates/kilo/nova.conf b/templates/kilo/nova.conf index d013a501..8149c82e 100644 --- a/templates/kilo/nova.conf +++ b/templates/kilo/nova.conf @@ -82,6 +82,11 @@ security_group_api = neutron nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver {% endif -%} +{% if neutron_plugin and neutron_plugin == 'plumgrid' -%} +security_group_api=neutron +firewall_driver = nova.virt.firewall.NoopFirewallDriver +{% endif -%} + {% if network_manager_config -%} {% for key, value in network_manager_config.iteritems() -%} {{ key }} = {{ value }} From 08b92a7317fb555c65fcc9cd6827166ba05b3761 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 3 Sep 2015 10:39:34 +0100 Subject: [PATCH 25/36] [gnuoy,trivial] Charmhelper sync (+1'd by mojo) --- hooks/charmhelpers/contrib/network/ip.py | 6 +++++- .../charmhelpers/contrib/openstack/context.py | 14 ++++++++++++++ .../charmhelpers/contrib/openstack/neutron.py | 14 ++++++++++++++ hooks/charmhelpers/contrib/openstack/utils.py | 16 ++++++++++------ hooks/charmhelpers/core/hookenv.py | 18 ++++++++++-------- 5 files changed, 53 insertions(+), 15 deletions(-) diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index fff6d5ca..67b4dccc 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -435,8 +435,12 @@ def get_hostname(address, fqdn=True): rev = dns.reversename.from_address(address) result = ns_query(rev) + if not result: - return None + try: + result = socket.gethostbyaddr(address)[0] + except: + return None else: result = address diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 9a33a035..9ae4582c 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -895,6 +895,18 @@ class NeutronContext(OSContextGenerator): 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} return ctxt + def pg_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'plumgrid', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + return ovs_ctxt + def __call__(self): if self.network_manager not in ['quantum', 'neutron']: return {} @@ -914,6 +926,8 @@ class NeutronContext(OSContextGenerator): ctxt.update(self.calico_ctxt()) elif self.plugin == 'vsp': ctxt.update(self.nuage_ctxt()) + elif self.plugin == 'plumgrid': + ctxt.update(self.pg_ctxt()) alchemy_flags = config('neutron-alchemy-flags') if alchemy_flags: diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index c3d5c28e..55b2037f 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -195,6 +195,20 @@ def neutron_plugins(): 'packages': [], 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], 'server_services': ['neutron-server'] + }, + 'plumgrid': { + 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', + 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2', + 'contexts': [ + context.SharedDBContext(user=config('database-user'), + database=config('database'), + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [['plumgrid-lxc'], + ['iovisor-dkms']], + 'server_packages': ['neutron-server', + 'neutron-plugin-plumgrid'], + 'server_services': ['neutron-server'] } } if release >= 'icehouse': diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index c9fd68f7..c98c5c9e 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -1,5 +1,3 @@ -#!/usr/bin/python - # Copyright 2014-2015 Canonical Limited. # # This file is part of charm-helpers. @@ -195,9 +193,9 @@ def get_os_codename_version(vers): error_out(e) -def get_os_version_codename(codename): +def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): '''Determine OpenStack version number from codename.''' - for k, v in six.iteritems(OPENSTACK_CODENAMES): + for k, v in six.iteritems(version_map): if v == codename: return k e = 'Could not derive OpenStack version for '\ @@ -229,7 +227,7 @@ def get_os_codename_package(package, fatal=True): error_out(e) vers = apt.upstream_version(pkg.current_ver.ver_str) - match = re.match('^(\d)\.(\d)\.(\d)', vers) + match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) if match: vers = match.group(0) @@ -250,6 +248,8 @@ def get_os_codename_package(package, fatal=True): vers = vers[:6] return OPENSTACK_CODENAMES[vers] except KeyError: + if not fatal: + return None e = 'Could not determine OpenStack codename for version %s' % vers error_out(e) @@ -429,7 +429,11 @@ def openstack_upgrade_available(package): import apt_pkg as apt src = config('openstack-origin') cur_vers = get_os_version_package(package) - available_vers = get_os_version_install_source(src) + if "swift" in package: + codename = get_os_codename_install_source(src) + available_vers = get_os_version_codename(codename, SWIFT_CODENAMES) + else: + available_vers = get_os_version_install_source(src) apt.init() return apt.version_compare(available_vers, cur_vers) == 1 diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index a35d006b..ab53a780 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -767,21 +767,23 @@ def status_set(workload_state, message): def status_get(): - """Retrieve the previously set juju workload state + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" - If the status-set command is not found then assume this is juju < 1.23 and - return 'unknown' """ - cmd = ['status-get'] + cmd = ['status-get', "--format=json", "--include-data"] try: - raw_status = subprocess.check_output(cmd, universal_newlines=True) - status = raw_status.rstrip() - return status + raw_status = subprocess.check_output(cmd) except OSError as e: if e.errno == errno.ENOENT: - return 'unknown' + return ('unknown', "") else: raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) def translate_exc(from_exc, to_exc): From 2f906cb4370c72fdc7535dfbd18c909a94b8075b Mon Sep 17 00:00:00 2001 From: Chris Johnston Date: Mon, 14 Sep 2015 19:52:34 +0000 Subject: [PATCH 26/36] Readme fixes --- README.txt | 22 ++++++++-------------- 1 file changed, 8 insertions(+), 14 deletions(-) diff --git a/README.txt b/README.txt index fd957ebb..ba7b749c 100644 --- a/README.txt +++ b/README.txt @@ -1,8 +1,6 @@ -===================== -nova-cloud-controller -===================== +# nova-cloud-controller -Cloud controller node for Openstack nova. Contains nova-schedule, nova-api, nova-network and nova-objectstore. +Cloud controller node for OpenStack nova. Contains nova-schedule, nova-api, nova-network and nova-objectstore. The neutron-api interface can be used join this charm with an external neutron-api server. If this is done then this charm will shutdown its neutron-api service and the external charm will be registered as the @@ -14,18 +12,14 @@ to the nova-cloud-controller. If running in HA mode then the public vip is used to local. Note: The console access protocol is baked into a guest when it is created, if you change it then console access for existing guests will stop working -****************************************************** -Special considerations to be deployed using Postgresql -****************************************************** +**Special considerations to be deployed using Postgresql** -juju deploy nova-cloud-controller -juju deploy postgresql + juju deploy nova-cloud-controller + juju deploy postgresql + juju add-relation "nova-cloud-controller:pgsql-nova-db" "postgresql:db" + juju add-relation "nova-cloud-controller:pgsql-neutron-db" "postgresql:db" -juju add-relation "nova-cloud-controller:pgsql-nova-db" "postgresql:db" -juju add-relation "nova-cloud-controller:pgsql-neutron-db" "postgresql:db" - -Deploying from source -===================== +## Deploying from source The minimum openstack-origin-git config required to deploy from source is: From 8264456231d0df08478cbd718e3f5b6e54f1b1f7 Mon Sep 17 00:00:00 2001 From: Chris Johnston Date: Mon, 14 Sep 2015 20:41:18 +0000 Subject: [PATCH 27/36] Rename categories to tags in metadata --- metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metadata.yaml b/metadata.yaml index f973bc3d..79ef4c2a 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -4,7 +4,7 @@ summary: "Openstack nova controller node." description: | Cloud controller node for Openstack nova. Contains nova-schedule, nova-api, nova-network and nova-objectstore. -categories: +tags: - openstack provides: nrpe-external-master: From 43d31e368d30116a28e22ed75d2fc8929afcc8b1 Mon Sep 17 00:00:00 2001 From: Chris Johnston Date: Mon, 14 Sep 2015 22:00:29 +0000 Subject: [PATCH 28/36] Move README to .md per request --- README.txt => README.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename README.txt => README.md (100%) diff --git a/README.txt b/README.md similarity index 100% rename from README.txt rename to README.md From 3c112706f105311c6a292e71b3a49cd67d3eb19e Mon Sep 17 00:00:00 2001 From: Chris Johnston Date: Tue, 15 Sep 2015 19:51:39 +0000 Subject: [PATCH 29/36] Typo fixes --- README.md | 2 +- config.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ba7b749c..89ce27a3 100644 --- a/README.md +++ b/README.md @@ -37,7 +37,7 @@ The minimum openstack-origin-git config required to deploy from source is: Note that there are only two 'name' values the charm knows about: 'requirements' and 'nova'. These repositories must correspond to these 'name' values. Additionally, the requirements repository must be specified first and the -nova repository must be specified last. All other repostories are installed +nova repository must be specified last. All other repositories are installed in the order in which they are specified. The following is a full list of current tip repos (may not be up-to-date): diff --git a/config.yaml b/config.yaml index 96a8ccd5..d617e34c 100644 --- a/config.yaml +++ b/config.yaml @@ -200,7 +200,7 @@ options: description: | SSL certificate to install and use for API ports. Setting this value and ssl_key will enable reverse proxying, point Nova's entry in the - Keystone catalog to use https, and override any certficiate and key + Keystone catalog to use https, and override any certificiate and key issued by Keystone (if it is configured to do so). ssl_key: type: string From 9ace8eaa417639e22a2a8fa97d2ffae61ea3badf Mon Sep 17 00:00:00 2001 From: David Ames Date: Mon, 21 Sep 2015 12:01:14 -0700 Subject: [PATCH 30/36] categories -> tags --- metadata.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/metadata.yaml b/metadata.yaml index f973bc3d..79ef4c2a 100644 --- a/metadata.yaml +++ b/metadata.yaml @@ -4,7 +4,7 @@ summary: "Openstack nova controller node." description: | Cloud controller node for Openstack nova. Contains nova-schedule, nova-api, nova-network and nova-objectstore. -categories: +tags: - openstack provides: nrpe-external-master: From 468f3ae945f3d5d2d79c08f69007b1c265ef102d Mon Sep 17 00:00:00 2001 From: David Ames Date: Mon, 21 Sep 2015 16:06:54 -0700 Subject: [PATCH 31/36] Action managed upgrades --- actions.yaml | 2 + actions/openstack-upgrade | 1 + actions/openstack_upgrade.py | 42 +++++++++++++++ config.yaml | 10 ++++ hooks/charmhelpers/contrib/openstack/utils.py | 51 +++++++++++++++++++ hooks/nova_cc_hooks.py | 4 +- hooks/nova_cc_utils.py | 2 +- unit_tests/test_nova_cc_utils.py | 14 ++--- 8 files changed, 116 insertions(+), 10 deletions(-) create mode 120000 actions/openstack-upgrade create mode 100755 actions/openstack_upgrade.py diff --git a/actions.yaml b/actions.yaml index e00e5f38..ff365819 100644 --- a/actions.yaml +++ b/actions.yaml @@ -1,2 +1,4 @@ git-reinstall: description: Reinstall nova-cloud-controller from the openstack-origin-git repositories. +openstack-upgrade: + description: Perform openstack upgrades. Config option action-managed-upgrade must be set to True. diff --git a/actions/openstack-upgrade b/actions/openstack-upgrade new file mode 120000 index 00000000..61793013 --- /dev/null +++ b/actions/openstack-upgrade @@ -0,0 +1 @@ +openstack_upgrade.py \ No newline at end of file diff --git a/actions/openstack_upgrade.py b/actions/openstack_upgrade.py new file mode 100755 index 00000000..ab76509f --- /dev/null +++ b/actions/openstack_upgrade.py @@ -0,0 +1,42 @@ +#!/usr/bin/python +import sys + +sys.path.append('hooks/') + +from charmhelpers.contrib.openstack.utils import ( + do_action_openstack_upgrade, +) + +from charmhelpers.core.hookenv import ( + relation_ids, +) + +from nova_cc_hooks import neutron_api_relation_joined + +from nova_cc_utils import ( + do_openstack_upgrade, + register_configs +) + +from nova_cc_hooks import config_changed + +CONFIGS = register_configs() + + +def openstack_upgrade(): + """Upgrade packages to config-set Openstack version. + + If the charm was installed from source we cannot upgrade it. + For backwards compatibility a config flag must be set for this + code to run, otherwise a full service level upgrade will fire + on config-changed.""" + + if (do_action_openstack_upgrade('nova-common', + do_openstack_upgrade, + CONFIGS)): + [neutron_api_relation_joined(rid=rid, remote_restart=True) + for rid in relation_ids('neutron-api')] + config_changed() + +if __name__ == '__main__': + openstack_upgrade() diff --git a/config.yaml b/config.yaml index 96a8ccd5..7688b702 100644 --- a/config.yaml +++ b/config.yaml @@ -395,3 +395,13 @@ options: If memcached is being used to store the tokens, then it's recommended to change this configuration to False. + action-managed-upgrade: + type: boolean + default: False + description: | + If True enables openstack upgrades for this charm via juju actions. + You will still need to set openstack-origin to the new repository but + instead of an upgrade running automatically across all units, it will + wait for you to execute the openstack-upgrade action for this charm on + each unit. If False it will revert to existing behavior of upgrading + all units on config change. diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index c98c5c9e..2f5280e6 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -25,6 +25,7 @@ import sys import re import six +import traceback import yaml from charmhelpers.contrib.network import ip @@ -34,6 +35,8 @@ from charmhelpers.core import ( ) from charmhelpers.core.hookenv import ( + action_fail, + action_set, config, log as juju_log, charm_dir, @@ -114,6 +117,7 @@ SWIFT_CODENAMES = OrderedDict([ ('2.2.1', 'kilo'), ('2.2.2', 'kilo'), ('2.3.0', 'liberty'), + ('2.4.0', 'liberty'), ]) # >= Liberty version->codename mapping @@ -142,6 +146,9 @@ PACKAGE_CODENAMES = { 'glance-common': OrderedDict([ ('11.0.0', 'liberty'), ]), + 'openstack-dashboard': OrderedDict([ + ('8.0.0', 'liberty'), + ]), } DEFAULT_LOOPBACK_SIZE = '5G' @@ -745,3 +752,47 @@ def git_yaml_value(projects_yaml, key): return projects[key] return None + + +def do_action_openstack_upgrade(package, upgrade_callback, configs): + """Perform action-managed OpenStack upgrade. + + Upgrades packages to the configured openstack-origin version and sets + the corresponding action status as a result. + + If the charm was installed from source we cannot upgrade it. + For backwards compatibility a config flag (action-managed-upgrade) must + be set for this code to run, otherwise a full service level upgrade will + fire on config-changed. + + @param package: package name for determining if upgrade available + @param upgrade_callback: function callback to charm's upgrade function + @param configs: templating object derived from OSConfigRenderer class + + @return: True if upgrade successful; False if upgrade failed or skipped + """ + ret = False + + if git_install_requested(): + action_set({'outcome': 'installed from source, skipped upgrade.'}) + else: + if openstack_upgrade_available(package): + if config('action-managed-upgrade'): + juju_log('Upgrading OpenStack release') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed.'}) + ret = True + except: + action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('do_openstack_upgrade resulted in an ' + 'unexpected error') + else: + action_set({'outcome': 'action-managed-upgrade config is ' + 'False, skipped upgrade.'}) + else: + action_set({'outcome': 'no upgrade available.'}) + + return ret diff --git a/hooks/nova_cc_hooks.py b/hooks/nova_cc_hooks.py index 72657cf4..077c2a23 100755 --- a/hooks/nova_cc_hooks.py +++ b/hooks/nova_cc_hooks.py @@ -182,9 +182,9 @@ def config_changed(): if git_install_requested(): if config_value_changed('openstack-origin-git'): git_install(config('openstack-origin-git')) - else: + elif not config('action-managed-upgrade'): if openstack_upgrade_available('nova-common'): - CONFIGS = do_openstack_upgrade() + CONFIGS = do_openstack_upgrade(CONFIGS) [neutron_api_relation_joined(rid=rid, remote_restart=True) for rid in relation_ids('neutron-api')] save_script_rc() diff --git a/hooks/nova_cc_utils.py b/hooks/nova_cc_utils.py index e7a63b61..99771331 100644 --- a/hooks/nova_cc_utils.py +++ b/hooks/nova_cc_utils.py @@ -624,7 +624,7 @@ def _do_openstack_upgrade(new_src): return configs -def do_openstack_upgrade(): +def do_openstack_upgrade(configs): new_src = config('openstack-origin') if new_src[:6] != 'cloud:': raise ValueError("Unable to perform upgrade to %s" % new_src) diff --git a/unit_tests/test_nova_cc_utils.py b/unit_tests/test_nova_cc_utils.py index 7974173d..e884e196 100644 --- a/unit_tests/test_nova_cc_utils.py +++ b/unit_tests/test_nova_cc_utils.py @@ -647,7 +647,7 @@ class NovaCCUtilsTests(CharmTestCase): 'icehouse'] self.is_elected_leader.return_value = True self.relation_ids.return_value = [] - utils.do_openstack_upgrade() + utils.do_openstack_upgrade(self.register_configs()) expected = [call(['stamp', 'grizzly']), call(['upgrade', 'head']), call(['stamp', 'havana']), call(['upgrade', 'head'])] self.assertEquals(self.neutron_db_manage.call_args_list, expected) @@ -655,7 +655,7 @@ class NovaCCUtilsTests(CharmTestCase): self.apt_upgrade.assert_called_with(options=DPKG_OPTS, fatal=True, dist=True) self.apt_install.assert_called_with(determine_packages(), fatal=True) - expected = [call(release='havana'), call(release='icehouse')] + expected = [call(), call(release='havana'), call(release='icehouse')] self.assertEquals(self.register_configs.call_args_list, expected) self.assertEquals(self.ml2_migration.call_count, 1) self.assertTrue(migrate_nova_database.call_count, 2) @@ -673,7 +673,7 @@ class NovaCCUtilsTests(CharmTestCase): self.get_os_codename_install_source.return_value = 'icehouse' self.is_elected_leader.return_value = True self.relation_ids.return_value = [] - utils.do_openstack_upgrade() + utils.do_openstack_upgrade(self.register_configs()) self.neutron_db_manage.assert_called_with(['upgrade', 'head']) self.apt_update.assert_called_with(fatal=True) self.apt_upgrade.assert_called_with(options=DPKG_OPTS, fatal=True, @@ -696,7 +696,7 @@ class NovaCCUtilsTests(CharmTestCase): self.get_os_codename_install_source.return_value = 'juno' self.is_elected_leader.return_value = True self.relation_ids.return_value = [] - utils.do_openstack_upgrade() + utils.do_openstack_upgrade(self.register_configs()) neutron_db_calls = [call(['stamp', 'icehouse']), call(['upgrade', 'head'])] self.neutron_db_manage.assert_has_calls(neutron_db_calls, @@ -722,7 +722,7 @@ class NovaCCUtilsTests(CharmTestCase): self.get_os_codename_install_source.return_value = 'kilo' self.is_elected_leader.return_value = True self.relation_ids.return_value = [] - utils.do_openstack_upgrade() + utils.do_openstack_upgrade(self.register_configs()) self.assertEquals(self.neutron_db_manage.call_count, 0) self.apt_update.assert_called_with(fatal=True) self.apt_upgrade.assert_called_with(options=DPKG_OPTS, fatal=True, @@ -741,7 +741,7 @@ class NovaCCUtilsTests(CharmTestCase): _file.read = MagicMock() _file.readline.return_value = ("deb url" " precise-updates/grizzly main") - utils.do_openstack_upgrade() + utils.do_openstack_upgrade(self.register_configs()) expected = [call('cloud:precise-havana'), call('cloud:precise-icehouse')] self.assertEquals(_do_openstack_upgrade.call_args_list, expected) @@ -754,7 +754,7 @@ class NovaCCUtilsTests(CharmTestCase): with patch_open() as (_open, _file): _file.read = MagicMock() _file.readline.return_value = "deb url precise-updates/havana main" - utils.do_openstack_upgrade() + utils.do_openstack_upgrade(self.register_configs()) expected = [call('cloud:precise-icehouse')] self.assertEquals(_do_openstack_upgrade.call_args_list, expected) From 9ac81c9fde9971553c59ed7e961cb4ffee0ee68e Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Tue, 22 Sep 2015 10:07:16 +0800 Subject: [PATCH 32/36] Merge lp:charm-helpers. --- hooks/charmhelpers/contrib/network/ip.py | 14 +- .../contrib/openstack/amulet/deployment.py | 34 +- .../contrib/openstack/amulet/utils.py | 359 ++++++++++++++++ .../charmhelpers/contrib/openstack/context.py | 177 ++++++-- .../charmhelpers/contrib/openstack/neutron.py | 57 ++- .../contrib/openstack/templating.py | 36 +- hooks/charmhelpers/contrib/openstack/utils.py | 304 ++++++++++++- .../contrib/peerstorage/__init__.py | 9 +- .../contrib/storage/linux/ceph.py | 239 ++++++++++- .../contrib/storage/linux/utils.py | 7 +- hooks/charmhelpers/core/files.py | 45 ++ hooks/charmhelpers/core/hookenv.py | 123 +++++- hooks/charmhelpers/core/host.py | 146 +++++-- hooks/charmhelpers/core/hugepage.py | 69 +++ hooks/charmhelpers/core/kernel.py | 68 +++ hooks/charmhelpers/core/services/helpers.py | 20 +- hooks/charmhelpers/core/strutils.py | 30 ++ hooks/charmhelpers/core/unitdata.py | 78 +++- hooks/charmhelpers/fetch/__init__.py | 8 + .../charmhelpers/contrib/amulet/deployment.py | 6 +- tests/charmhelpers/contrib/amulet/utils.py | 398 ++++++++++++++---- .../contrib/openstack/amulet/deployment.py | 34 +- .../contrib/openstack/amulet/utils.py | 359 ++++++++++++++++ 23 files changed, 2359 insertions(+), 261 deletions(-) create mode 100644 hooks/charmhelpers/core/files.py create mode 100644 hooks/charmhelpers/core/hugepage.py create mode 100644 hooks/charmhelpers/core/kernel.py diff --git a/hooks/charmhelpers/contrib/network/ip.py b/hooks/charmhelpers/contrib/network/ip.py index fff6d5ca..7f3b66b1 100644 --- a/hooks/charmhelpers/contrib/network/ip.py +++ b/hooks/charmhelpers/contrib/network/ip.py @@ -23,7 +23,7 @@ import socket from functools import partial from charmhelpers.core.hookenv import unit_get -from charmhelpers.fetch import apt_install +from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( log, WARNING, @@ -32,13 +32,15 @@ from charmhelpers.core.hookenv import ( try: import netifaces except ImportError: - apt_install('python-netifaces') + apt_update(fatal=True) + apt_install('python-netifaces', fatal=True) import netifaces try: import netaddr except ImportError: - apt_install('python-netaddr') + apt_update(fatal=True) + apt_install('python-netaddr', fatal=True) import netaddr @@ -435,8 +437,12 @@ def get_hostname(address, fqdn=True): rev = dns.reversename.from_address(address) result = ns_query(rev) + if not result: - return None + try: + result = socket.gethostbyaddr(address)[0] + except: + return None else: result = address diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py index b01e6cb8..722bc645 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py @@ -44,20 +44,31 @@ class OpenStackAmuletDeployment(AmuletDeployment): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" - base_charms = ['mysql', 'mongodb'] + + # Charms outside the lp:~openstack-charmers namespace + base_charms = ['mysql', 'mongodb', 'nrpe'] + + # Force these charms to current series even when using an older series. + # ie. Use trusty/nrpe even when series is precise, as the P charm + # does not possess the necessary external master config and hooks. + force_series_current = ['nrpe'] if self.series in ['precise', 'trusty']: base_series = self.series else: base_series = self.current_next - if self.stable: - for svc in other_services: + for svc in other_services: + if svc['name'] in force_series_current: + base_series = self.current_next + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if self.stable: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, svc['name']) - else: - for svc in other_services: + else: if svc['name'] in base_charms: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, @@ -66,6 +77,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = temp.format(self.current_next, svc['name']) + return other_services def _add_services(self, this_service, other_services): @@ -77,21 +89,23 @@ class OpenStackAmuletDeployment(AmuletDeployment): services = other_services services.append(this_service) + + # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] - # Most OpenStack subordinate charms do not expose an origin option - # as that is controlled by the principle. - ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] if self.openstack: for svc in services: - if svc['name'] not in use_source + ignore: + if svc['name'] not in use_source + no_origin: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source and svc['name'] not in ignore: + if svc['name'] in use_source and svc['name'] not in no_origin: config = {'source': self.source} self.d.configure(svc['name'], config) diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py index 03f79277..b1397419 100644 --- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py +++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py @@ -27,6 +27,7 @@ import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import pika import swiftclient from charmhelpers.contrib.amulet.utils import ( @@ -602,3 +603,361 @@ class OpenStackAmuletUtils(AmuletUtils): self.log.debug('Ceph {} samples (OK): ' '{}'.format(sample_type, samples)) return None + +# rabbitmq/amqp specific helpers: + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not port and not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.configure('rabbitmq-server', config) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.configure('rabbitmq-server', config) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.server_properties['product'] == 'RabbitMQ' + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py index 8f3f1b15..1248d49f 100644 --- a/hooks/charmhelpers/contrib/openstack/context.py +++ b/hooks/charmhelpers/contrib/openstack/context.py @@ -50,6 +50,8 @@ from charmhelpers.core.sysctl import create as sysctl_create from charmhelpers.core.strutils import bool_from_string from charmhelpers.core.host import ( + get_bond_master, + is_phy_iface, list_nics, get_nic_hwaddr, mkdir, @@ -192,10 +194,50 @@ def config_flags_parser(config_flags): class OSContextGenerator(object): """Base class for all context generators.""" interfaces = [] + related = False + complete = False + missing_data = [] def __call__(self): raise NotImplementedError + def context_complete(self, ctxt): + """Check for missing data for the required context data. + Set self.missing_data if it exists and return False. + Set self.complete if no missing data and return True. + """ + # Fresh start + self.complete = False + self.missing_data = [] + for k, v in six.iteritems(ctxt): + if v is None or v == '': + if k not in self.missing_data: + self.missing_data.append(k) + + if self.missing_data: + self.complete = False + log('Missing required data: %s' % ' '.join(self.missing_data), level=INFO) + else: + self.complete = True + return self.complete + + def get_related(self): + """Check if any of the context interfaces have relation ids. + Set self.related and return True if one of the interfaces + has relation ids. + """ + # Fresh start + self.related = False + try: + for interface in self.interfaces: + if relation_ids(interface): + self.related = True + return self.related + except AttributeError as e: + log("{} {}" + "".format(self, e), 'INFO') + return self.related + class SharedDBContext(OSContextGenerator): interfaces = ['shared-db'] @@ -211,6 +253,7 @@ class SharedDBContext(OSContextGenerator): self.database = database self.user = user self.ssl_dir = ssl_dir + self.rel_name = self.interfaces[0] def __call__(self): self.database = self.database or config('database') @@ -244,6 +287,7 @@ class SharedDBContext(OSContextGenerator): password_setting = self.relation_prefix + '_password' for rid in relation_ids(self.interfaces[0]): + self.related = True for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) host = rdata.get('db_host') @@ -255,7 +299,7 @@ class SharedDBContext(OSContextGenerator): 'database_password': rdata.get(password_setting), 'database_type': 'mysql' } - if context_complete(ctxt): + if self.context_complete(ctxt): db_ssl(rdata, ctxt, self.ssl_dir) return ctxt return {} @@ -276,6 +320,7 @@ class PostgresqlDBContext(OSContextGenerator): ctxt = {} for rid in relation_ids(self.interfaces[0]): + self.related = True for unit in related_units(rid): rel_host = relation_get('host', rid=rid, unit=unit) rel_user = relation_get('user', rid=rid, unit=unit) @@ -285,7 +330,7 @@ class PostgresqlDBContext(OSContextGenerator): 'database_user': rel_user, 'database_password': rel_passwd, 'database_type': 'postgresql'} - if context_complete(ctxt): + if self.context_complete(ctxt): return ctxt return {} @@ -346,6 +391,7 @@ class IdentityServiceContext(OSContextGenerator): ctxt['signing_dir'] = cachedir for rid in relation_ids(self.rel_name): + self.related = True for unit in related_units(rid): rdata = relation_get(rid=rid, unit=unit) serv_host = rdata.get('service_host') @@ -364,7 +410,7 @@ class IdentityServiceContext(OSContextGenerator): 'service_protocol': svc_protocol, 'auth_protocol': auth_protocol}) - if context_complete(ctxt): + if self.context_complete(ctxt): # NOTE(jamespage) this is required for >= icehouse # so a missing value just indicates keystone needs # upgrading @@ -403,6 +449,7 @@ class AMQPContext(OSContextGenerator): ctxt = {} for rid in relation_ids(self.rel_name): ha_vip_only = False + self.related = True for unit in related_units(rid): if relation_get('clustered', rid=rid, unit=unit): ctxt['clustered'] = True @@ -435,7 +482,7 @@ class AMQPContext(OSContextGenerator): ha_vip_only = relation_get('ha-vip-only', rid=rid, unit=unit) is not None - if context_complete(ctxt): + if self.context_complete(ctxt): if 'rabbit_ssl_ca' in ctxt: if not self.ssl_dir: log("Charm not setup for ssl support but ssl ca " @@ -467,7 +514,7 @@ class AMQPContext(OSContextGenerator): ctxt['oslo_messaging_flags'] = config_flags_parser( oslo_messaging_flags) - if not context_complete(ctxt): + if not self.complete: return {} return ctxt @@ -483,13 +530,15 @@ class CephContext(OSContextGenerator): log('Generating template context for ceph', level=DEBUG) mon_hosts = [] - auth = None - key = None - use_syslog = str(config('use-syslog')).lower() + ctxt = { + 'use_syslog': str(config('use-syslog')).lower() + } for rid in relation_ids('ceph'): for unit in related_units(rid): - auth = relation_get('auth', rid=rid, unit=unit) - key = relation_get('key', rid=rid, unit=unit) + if not ctxt.get('auth'): + ctxt['auth'] = relation_get('auth', rid=rid, unit=unit) + if not ctxt.get('key'): + ctxt['key'] = relation_get('key', rid=rid, unit=unit) ceph_pub_addr = relation_get('ceph-public-address', rid=rid, unit=unit) unit_priv_addr = relation_get('private-address', rid=rid, @@ -498,15 +547,12 @@ class CephContext(OSContextGenerator): ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr mon_hosts.append(ceph_addr) - ctxt = {'mon_hosts': ' '.join(sorted(mon_hosts)), - 'auth': auth, - 'key': key, - 'use_syslog': use_syslog} + ctxt['mon_hosts'] = ' '.join(sorted(mon_hosts)) if not os.path.isdir('/etc/ceph'): os.mkdir('/etc/ceph') - if not context_complete(ctxt): + if not self.context_complete(ctxt): return {} ensure_packages(['ceph-common']) @@ -893,6 +939,18 @@ class NeutronContext(OSContextGenerator): 'neutron_url': '%s://%s:%s' % (proto, host, '9696')} return ctxt + def pg_ctxt(self): + driver = neutron_plugin_attribute(self.plugin, 'driver', + self.network_manager) + config = neutron_plugin_attribute(self.plugin, 'config', + self.network_manager) + ovs_ctxt = {'core_plugin': driver, + 'neutron_plugin': 'plumgrid', + 'neutron_security_groups': self.neutron_security_groups, + 'local_ip': unit_private_ip(), + 'config': config} + return ovs_ctxt + def __call__(self): if self.network_manager not in ['quantum', 'neutron']: return {} @@ -912,6 +970,8 @@ class NeutronContext(OSContextGenerator): ctxt.update(self.calico_ctxt()) elif self.plugin == 'vsp': ctxt.update(self.nuage_ctxt()) + elif self.plugin == 'plumgrid': + ctxt.update(self.pg_ctxt()) alchemy_flags = config('neutron-alchemy-flags') if alchemy_flags: @@ -923,7 +983,6 @@ class NeutronContext(OSContextGenerator): class NeutronPortContext(OSContextGenerator): - NIC_PREFIXES = ['eth', 'bond'] def resolve_ports(self, ports): """Resolve NICs not yet bound to bridge(s) @@ -935,7 +994,18 @@ class NeutronPortContext(OSContextGenerator): hwaddr_to_nic = {} hwaddr_to_ip = {} - for nic in list_nics(self.NIC_PREFIXES): + for nic in list_nics(): + # Ignore virtual interfaces (bond masters will be identified from + # their slaves) + if not is_phy_iface(nic): + continue + + _nic = get_bond_master(nic) + if _nic: + log("Replacing iface '%s' with bond master '%s'" % (nic, _nic), + level=DEBUG) + nic = _nic + hwaddr = get_nic_hwaddr(nic) hwaddr_to_nic[hwaddr] = nic addresses = get_ipv4_addr(nic, fatal=False) @@ -961,7 +1031,8 @@ class NeutronPortContext(OSContextGenerator): # trust it to be the real external network). resolved.append(entry) - return resolved + # Ensure no duplicates + return list(set(resolved)) class OSConfigFlagContext(OSContextGenerator): @@ -1051,13 +1122,22 @@ class SubordinateConfigContext(OSContextGenerator): :param config_file : Service's config file to query sections :param interface : Subordinate interface to inspect """ - self.service = service self.config_file = config_file - self.interface = interface + if isinstance(service, list): + self.services = service + else: + self.services = [service] + if isinstance(interface, list): + self.interfaces = interface + else: + self.interfaces = [interface] def __call__(self): ctxt = {'sections': {}} - for rid in relation_ids(self.interface): + rids = [] + for interface in self.interfaces: + rids.extend(relation_ids(interface)) + for rid in rids: for unit in related_units(rid): sub_config = relation_get('subordinate_configuration', rid=rid, unit=unit) @@ -1069,29 +1149,32 @@ class SubordinateConfigContext(OSContextGenerator): 'setting from %s' % rid, level=ERROR) continue - if self.service not in sub_config: - log('Found subordinate_config on %s but it contained' - 'nothing for %s service' % (rid, self.service), - level=INFO) - continue + for service in self.services: + if service not in sub_config: + log('Found subordinate_config on %s but it contained' + 'nothing for %s service' % (rid, service), + level=INFO) + continue - sub_config = sub_config[self.service] - if self.config_file not in sub_config: - log('Found subordinate_config on %s but it contained' - 'nothing for %s' % (rid, self.config_file), - level=INFO) - continue - - sub_config = sub_config[self.config_file] - for k, v in six.iteritems(sub_config): - if k == 'sections': - for section, config_dict in six.iteritems(v): - log("adding section '%s'" % (section), - level=DEBUG) - ctxt[k][section] = config_dict - else: - ctxt[k] = v + sub_config = sub_config[service] + if self.config_file not in sub_config: + log('Found subordinate_config on %s but it contained' + 'nothing for %s' % (rid, self.config_file), + level=INFO) + continue + sub_config = sub_config[self.config_file] + for k, v in six.iteritems(sub_config): + if k == 'sections': + for section, config_list in six.iteritems(v): + log("adding section '%s'" % (section), + level=DEBUG) + if ctxt[k].get(section): + ctxt[k][section].extend(config_list) + else: + ctxt[k][section] = config_list + else: + ctxt[k] = v log("%d section(s) found" % (len(ctxt['sections'])), level=DEBUG) return ctxt @@ -1268,15 +1351,19 @@ class DataPortContext(NeutronPortContext): def __call__(self): ports = config('data-port') if ports: + # Map of {port/mac:bridge} portmap = parse_data_port_mappings(ports) - ports = portmap.values() + ports = portmap.keys() + # Resolve provided ports or mac addresses and filter out those + # already attached to a bridge. resolved = self.resolve_ports(ports) + # FIXME: is this necessary? normalized = {get_nic_hwaddr(port): port for port in resolved if port not in ports} normalized.update({port: port for port in resolved if port in ports}) if resolved: - return {bridge: normalized[port] for bridge, port in + return {bridge: normalized[port] for port, bridge in six.iteritems(portmap) if port in normalized.keys()} return None @@ -1324,6 +1411,6 @@ class NetworkServiceContext(OSContextGenerator): 'auth_protocol': rdata.get('auth_protocol') or 'http', } - if context_complete(ctxt): + if self.context_complete(ctxt): return ctxt return {} diff --git a/hooks/charmhelpers/contrib/openstack/neutron.py b/hooks/charmhelpers/contrib/openstack/neutron.py index f7b72352..55b2037f 100644 --- a/hooks/charmhelpers/contrib/openstack/neutron.py +++ b/hooks/charmhelpers/contrib/openstack/neutron.py @@ -195,6 +195,20 @@ def neutron_plugins(): 'packages': [], 'server_packages': ['neutron-server', 'neutron-plugin-nuage'], 'server_services': ['neutron-server'] + }, + 'plumgrid': { + 'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini', + 'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2', + 'contexts': [ + context.SharedDBContext(user=config('database-user'), + database=config('database'), + ssl_dir=NEUTRON_CONF_DIR)], + 'services': [], + 'packages': [['plumgrid-lxc'], + ['iovisor-dkms']], + 'server_packages': ['neutron-server', + 'neutron-plugin-plumgrid'], + 'server_services': ['neutron-server'] } } if release >= 'icehouse': @@ -255,17 +269,30 @@ def network_manager(): return 'neutron' -def parse_mappings(mappings): +def parse_mappings(mappings, key_rvalue=False): + """By default mappings are lvalue keyed. + + If key_rvalue is True, the mapping will be reversed to allow multiple + configs for the same lvalue. + """ parsed = {} if mappings: mappings = mappings.split() for m in mappings: p = m.partition(':') - key = p[0].strip() - if p[1]: - parsed[key] = p[2].strip() + + if key_rvalue: + key_index = 2 + val_index = 0 + # if there is no rvalue skip to next + if not p[1]: + continue else: - parsed[key] = '' + key_index = 0 + val_index = 2 + + key = p[key_index].strip() + parsed[key] = p[val_index].strip() return parsed @@ -283,25 +310,25 @@ def parse_bridge_mappings(mappings): def parse_data_port_mappings(mappings, default_bridge='br-data'): """Parse data port mappings. - Mappings must be a space-delimited list of bridge:port mappings. + Mappings must be a space-delimited list of port:bridge mappings. - Returns dict of the form {bridge:port}. + Returns dict of the form {port:bridge} where port may be an mac address or + interface name. """ - _mappings = parse_mappings(mappings) + + # NOTE(dosaboy): we use rvalue for key to allow multiple values to be + # proposed for since it may be a mac address which will differ + # across units this allowing first-known-good to be chosen. + _mappings = parse_mappings(mappings, key_rvalue=True) if not _mappings or list(_mappings.values()) == ['']: if not mappings: return {} # For backwards-compatibility we need to support port-only provided in # config. - _mappings = {default_bridge: mappings.split()[0]} - - bridges = _mappings.keys() - ports = _mappings.values() - if len(set(bridges)) != len(bridges): - raise Exception("It is not allowed to have more than one port " - "configured on the same bridge") + _mappings = {mappings.split()[0]: default_bridge} + ports = _mappings.keys() if len(set(ports)) != len(ports): raise Exception("It is not allowed to have the same port configured " "on more than one bridge") diff --git a/hooks/charmhelpers/contrib/openstack/templating.py b/hooks/charmhelpers/contrib/openstack/templating.py index 24cb272b..e5e3cb1b 100644 --- a/hooks/charmhelpers/contrib/openstack/templating.py +++ b/hooks/charmhelpers/contrib/openstack/templating.py @@ -18,7 +18,7 @@ import os import six -from charmhelpers.fetch import apt_install +from charmhelpers.fetch import apt_install, apt_update from charmhelpers.core.hookenv import ( log, ERROR, @@ -29,8 +29,9 @@ from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES try: from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions except ImportError: - # python-jinja2 may not be installed yet, or we're running unittests. - FileSystemLoader = ChoiceLoader = Environment = exceptions = None + apt_update(fatal=True) + apt_install('python-jinja2', fatal=True) + from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions class OSConfigException(Exception): @@ -112,7 +113,7 @@ class OSConfigTemplate(object): def complete_contexts(self): ''' - Return a list of interfaces that have atisfied contexts. + Return a list of interfaces that have satisfied contexts. ''' if self._complete_contexts: return self._complete_contexts @@ -293,3 +294,30 @@ class OSConfigRenderer(object): [interfaces.extend(i.complete_contexts()) for i in six.itervalues(self.templates)] return interfaces + + def get_incomplete_context_data(self, interfaces): + ''' + Return dictionary of relation status of interfaces and any missing + required context data. Example: + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}} + ''' + incomplete_context_data = {} + + for i in six.itervalues(self.templates): + for context in i.contexts: + for interface in interfaces: + related = False + if interface in context.interfaces: + related = context.get_related() + missing_data = context.missing_data + if missing_data: + incomplete_context_data[interface] = {'missing_data': missing_data} + if related: + if incomplete_context_data.get(interface): + incomplete_context_data[interface].update({'related': True}) + else: + incomplete_context_data[interface] = {'related': True} + else: + incomplete_context_data[interface] = {'related': False} + return incomplete_context_data diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py index 4dd000c3..4d395a73 100644 --- a/hooks/charmhelpers/contrib/openstack/utils.py +++ b/hooks/charmhelpers/contrib/openstack/utils.py @@ -1,5 +1,3 @@ -#!/usr/bin/python - # Copyright 2014-2015 Canonical Limited. # # This file is part of charm-helpers. @@ -24,8 +22,10 @@ import subprocess import json import os import sys +import re import six +import traceback import yaml from charmhelpers.contrib.network import ip @@ -35,12 +35,16 @@ from charmhelpers.core import ( ) from charmhelpers.core.hookenv import ( + action_fail, + action_set, config, log as juju_log, charm_dir, INFO, relation_ids, - relation_set + relation_set, + status_set, + hook_name ) from charmhelpers.contrib.storage.linux.lvm import ( @@ -69,7 +73,6 @@ CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed ' 'restricted main multiverse universe') - UBUNTU_OPENSTACK_RELEASE = OrderedDict([ ('oneiric', 'diablo'), ('precise', 'essex'), @@ -116,8 +119,40 @@ SWIFT_CODENAMES = OrderedDict([ ('2.2.1', 'kilo'), ('2.2.2', 'kilo'), ('2.3.0', 'liberty'), + ('2.4.0', 'liberty'), ]) +# >= Liberty version->codename mapping +PACKAGE_CODENAMES = { + 'nova-common': OrderedDict([ + ('12.0.0', 'liberty'), + ]), + 'neutron-common': OrderedDict([ + ('7.0.0', 'liberty'), + ]), + 'cinder-common': OrderedDict([ + ('7.0.0', 'liberty'), + ]), + 'keystone': OrderedDict([ + ('8.0.0', 'liberty'), + ]), + 'horizon-common': OrderedDict([ + ('8.0.0', 'liberty'), + ]), + 'ceilometer-common': OrderedDict([ + ('5.0.0', 'liberty'), + ]), + 'heat-common': OrderedDict([ + ('5.0.0', 'liberty'), + ]), + 'glance-common': OrderedDict([ + ('11.0.0', 'liberty'), + ]), + 'openstack-dashboard': OrderedDict([ + ('8.0.0', 'liberty'), + ]), +} + DEFAULT_LOOPBACK_SIZE = '5G' @@ -167,9 +202,9 @@ def get_os_codename_version(vers): error_out(e) -def get_os_version_codename(codename): +def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES): '''Determine OpenStack version number from codename.''' - for k, v in six.iteritems(OPENSTACK_CODENAMES): + for k, v in six.iteritems(version_map): if v == codename: return k e = 'Could not derive OpenStack version for '\ @@ -201,20 +236,31 @@ def get_os_codename_package(package, fatal=True): error_out(e) vers = apt.upstream_version(pkg.current_ver.ver_str) + match = re.match('^(\d+)\.(\d+)\.(\d+)', vers) + if match: + vers = match.group(0) - try: - if 'swift' in pkg.name: - swift_vers = vers[:5] - if swift_vers not in SWIFT_CODENAMES: - # Deal with 1.10.0 upward - swift_vers = vers[:6] - return SWIFT_CODENAMES[swift_vers] - else: - vers = vers[:6] - return OPENSTACK_CODENAMES[vers] - except KeyError: - e = 'Could not determine OpenStack codename for version %s' % vers - error_out(e) + # >= Liberty independent project versions + if (package in PACKAGE_CODENAMES and + vers in PACKAGE_CODENAMES[package]): + return PACKAGE_CODENAMES[package][vers] + else: + # < Liberty co-ordinated project versions + try: + if 'swift' in pkg.name: + swift_vers = vers[:5] + if swift_vers not in SWIFT_CODENAMES: + # Deal with 1.10.0 upward + swift_vers = vers[:6] + return SWIFT_CODENAMES[swift_vers] + else: + vers = vers[:6] + return OPENSTACK_CODENAMES[vers] + except KeyError: + if not fatal: + return None + e = 'Could not determine OpenStack codename for version %s' % vers + error_out(e) def get_os_version_package(pkg, fatal=True): @@ -392,7 +438,11 @@ def openstack_upgrade_available(package): import apt_pkg as apt src = config('openstack-origin') cur_vers = get_os_version_package(package) - available_vers = get_os_version_install_source(src) + if "swift" in package: + codename = get_os_codename_install_source(src) + available_vers = get_os_version_codename(codename, SWIFT_CODENAMES) + else: + available_vers = get_os_version_install_source(src) apt.init() return apt.version_compare(available_vers, cur_vers) == 1 @@ -704,3 +754,217 @@ def git_yaml_value(projects_yaml, key): return projects[key] return None + + +def os_workload_status(configs, required_interfaces, charm_func=None): + """ + Decorator to set workload status based on complete contexts + """ + def wrap(f): + @wraps(f) + def wrapped_f(*args, **kwargs): + # Run the original function first + f(*args, **kwargs) + # Set workload status now that contexts have been + # acted on + set_os_workload_status(configs, required_interfaces, charm_func) + return wrapped_f + return wrap + + +def set_os_workload_status(configs, required_interfaces, charm_func=None): + """ + Set workload status based on complete contexts. + status-set missing or incomplete contexts + and juju-log details of missing required data. + charm_func is a charm specific function to run checking + for charm specific requirements such as a VIP setting. + """ + incomplete_rel_data = incomplete_relation_data(configs, required_interfaces) + state = 'active' + missing_relations = [] + incomplete_relations = [] + message = None + charm_state = None + charm_message = None + + for generic_interface in incomplete_rel_data.keys(): + related_interface = None + missing_data = {} + # Related or not? + for interface in incomplete_rel_data[generic_interface]: + if incomplete_rel_data[generic_interface][interface].get('related'): + related_interface = interface + missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data') + # No relation ID for the generic_interface + if not related_interface: + juju_log("{} relation is missing and must be related for " + "functionality. ".format(generic_interface), 'WARN') + state = 'blocked' + if generic_interface not in missing_relations: + missing_relations.append(generic_interface) + else: + # Relation ID exists but no related unit + if not missing_data: + # Edge case relation ID exists but departing + if ('departed' in hook_name() or 'broken' in hook_name()) \ + and related_interface in hook_name(): + state = 'blocked' + if generic_interface not in missing_relations: + missing_relations.append(generic_interface) + juju_log("{} relation's interface, {}, " + "relationship is departed or broken " + "and is required for functionality." + "".format(generic_interface, related_interface), "WARN") + # Normal case relation ID exists but no related unit + # (joining) + else: + juju_log("{} relations's interface, {}, is related but has " + "no units in the relation." + "".format(generic_interface, related_interface), "INFO") + # Related unit exists and data missing on the relation + else: + juju_log("{} relation's interface, {}, is related awaiting " + "the following data from the relationship: {}. " + "".format(generic_interface, related_interface, + ", ".join(missing_data)), "INFO") + if state != 'blocked': + state = 'waiting' + if generic_interface not in incomplete_relations \ + and generic_interface not in missing_relations: + incomplete_relations.append(generic_interface) + + if missing_relations: + message = "Missing relations: {}".format(", ".join(missing_relations)) + if incomplete_relations: + message += "; incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'blocked' + elif incomplete_relations: + message = "Incomplete relations: {}" \ + "".format(", ".join(incomplete_relations)) + state = 'waiting' + + # Run charm specific checks + if charm_func: + charm_state, charm_message = charm_func(configs) + if charm_state != 'active' and charm_state != 'unknown': + state = workload_state_compare(state, charm_state) + if message: + message = "{} {}".format(message, charm_message) + else: + message = charm_message + + # Set to active if all requirements have been met + if state == 'active': + message = "Unit is ready" + juju_log(message, "INFO") + + status_set(state, message) + + +def workload_state_compare(current_workload_state, workload_state): + """ Return highest priority of two states""" + hierarchy = {'unknown': -1, + 'active': 0, + 'maintenance': 1, + 'waiting': 2, + 'blocked': 3, + } + + if hierarchy.get(workload_state) is None: + workload_state = 'unknown' + if hierarchy.get(current_workload_state) is None: + current_workload_state = 'unknown' + + # Set workload_state based on hierarchy of statuses + if hierarchy.get(current_workload_state) > hierarchy.get(workload_state): + return current_workload_state + else: + return workload_state + + +def incomplete_relation_data(configs, required_interfaces): + """ + Check complete contexts against required_interfaces + Return dictionary of incomplete relation data. + + configs is an OSConfigRenderer object with configs registered + + required_interfaces is a dictionary of required general interfaces + with dictionary values of possible specific interfaces. + Example: + required_interfaces = {'database': ['shared-db', 'pgsql-db']} + + The interface is said to be satisfied if anyone of the interfaces in the + list has a complete context. + + Return dictionary of incomplete or missing required contexts with relation + status of interfaces and any missing data points. Example: + {'message': + {'amqp': {'missing_data': ['rabbitmq_password'], 'related': True}, + 'zeromq-configuration': {'related': False}}, + 'identity': + {'identity-service': {'related': False}}, + 'database': + {'pgsql-db': {'related': False}, + 'shared-db': {'related': True}}} + """ + complete_ctxts = configs.complete_contexts() + incomplete_relations = [] + for svc_type in required_interfaces.keys(): + # Avoid duplicates + found_ctxt = False + for interface in required_interfaces[svc_type]: + if interface in complete_ctxts: + found_ctxt = True + if not found_ctxt: + incomplete_relations.append(svc_type) + incomplete_context_data = {} + for i in incomplete_relations: + incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i]) + return incomplete_context_data + + +def do_action_openstack_upgrade(package, upgrade_callback, configs): + """Perform action-managed OpenStack upgrade. + + Upgrades packages to the configured openstack-origin version and sets + the corresponding action status as a result. + + If the charm was installed from source we cannot upgrade it. + For backwards compatibility a config flag (action-managed-upgrade) must + be set for this code to run, otherwise a full service level upgrade will + fire on config-changed. + + @param package: package name for determining if upgrade available + @param upgrade_callback: function callback to charm's upgrade function + @param configs: templating object derived from OSConfigRenderer class + + @return: True if upgrade successful; False if upgrade failed or skipped + """ + ret = False + + if git_install_requested(): + action_set({'outcome': 'installed from source, skipped upgrade.'}) + else: + if openstack_upgrade_available(package): + if config('action-managed-upgrade'): + juju_log('Upgrading OpenStack release') + + try: + upgrade_callback(configs=configs) + action_set({'outcome': 'success, upgrade completed.'}) + ret = True + except: + action_set({'outcome': 'upgrade failed, see traceback.'}) + action_set({'traceback': traceback.format_exc()}) + action_fail('do_openstack_upgrade resulted in an ' + 'unexpected error') + else: + action_set({'outcome': 'action-managed-upgrade config is ' + 'False, skipped upgrade.'}) + else: + action_set({'outcome': 'no upgrade available.'}) + + return ret diff --git a/hooks/charmhelpers/contrib/peerstorage/__init__.py b/hooks/charmhelpers/contrib/peerstorage/__init__.py index 09f2b12b..eafca44f 100644 --- a/hooks/charmhelpers/contrib/peerstorage/__init__.py +++ b/hooks/charmhelpers/contrib/peerstorage/__init__.py @@ -59,7 +59,7 @@ def some_hook(): """ -def leader_get(attribute=None): +def leader_get(attribute=None, rid=None): """Wrapper to ensure that settings are migrated from the peer relation. This is to support upgrading an environment that does not support @@ -94,7 +94,8 @@ def leader_get(attribute=None): # If attribute not present in leader db, check if this unit has set # the attribute in the peer relation if not leader_settings: - peer_setting = relation_get(attribute=attribute, unit=local_unit()) + peer_setting = _relation_get(attribute=attribute, unit=local_unit(), + rid=rid) if peer_setting: leader_set(settings={attribute: peer_setting}) leader_settings = peer_setting @@ -103,7 +104,7 @@ def leader_get(attribute=None): settings_migrated = True migrated.add(attribute) else: - r_settings = relation_get(unit=local_unit()) + r_settings = _relation_get(unit=local_unit(), rid=rid) if r_settings: for key in set(r_settings.keys()).difference(migrated): # Leader setting wins @@ -151,7 +152,7 @@ def relation_get(attribute=None, unit=None, rid=None): """ try: if rid in relation_ids('cluster'): - return leader_get(attribute) + return leader_get(attribute, rid) else: raise NotImplementedError except NotImplementedError: diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py index 00dbffb4..83f264db 100644 --- a/hooks/charmhelpers/contrib/storage/linux/ceph.py +++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py @@ -28,6 +28,7 @@ import os import shutil import json import time +import uuid from subprocess import ( check_call, @@ -35,8 +36,10 @@ from subprocess import ( CalledProcessError, ) from charmhelpers.core.hookenv import ( + local_unit, relation_get, relation_ids, + relation_set, related_units, log, DEBUG, @@ -56,6 +59,8 @@ from charmhelpers.fetch import ( apt_install, ) +from charmhelpers.core.kernel import modprobe + KEYRING = '/etc/ceph/ceph.client.{}.keyring' KEYFILE = '/etc/ceph/ceph.client.{}.key' @@ -288,17 +293,6 @@ def place_data_on_block_device(blk_device, data_src_dst): os.chown(data_src_dst, uid, gid) -# TODO: re-use -def modprobe(module): - """Load a kernel module and configure for auto-load on reboot.""" - log('Loading kernel module', level=INFO) - cmd = ['modprobe', module] - check_call(cmd) - with open('/etc/modules', 'r+') as modules: - if module not in modules.read(): - modules.write(module) - - def copy_files(src, dst, symlinks=False, ignore=None): """Copy files from src to dst.""" for item in os.listdir(src): @@ -411,17 +405,52 @@ class CephBrokerRq(object): The API is versioned and defaults to version 1. """ - def __init__(self, api_version=1): + def __init__(self, api_version=1, request_id=None): self.api_version = api_version + if request_id: + self.request_id = request_id + else: + self.request_id = str(uuid.uuid1()) self.ops = [] def add_op_create_pool(self, name, replica_count=3): self.ops.append({'op': 'create-pool', 'name': name, 'replicas': replica_count}) + def set_ops(self, ops): + """Set request ops to provided value. + + Useful for injecting ops that come from a previous request + to allow comparisons to ensure validity. + """ + self.ops = ops + @property def request(self): - return json.dumps({'api-version': self.api_version, 'ops': self.ops}) + return json.dumps({'api-version': self.api_version, 'ops': self.ops, + 'request-id': self.request_id}) + + def _ops_equal(self, other): + if len(self.ops) == len(other.ops): + for req_no in range(0, len(self.ops)): + for key in ['replicas', 'name', 'op']: + if self.ops[req_no][key] != other.ops[req_no][key]: + return False + else: + return False + return True + + def __eq__(self, other): + if not isinstance(other, self.__class__): + return False + if self.api_version == other.api_version and \ + self._ops_equal(other): + return True + else: + return False + + def __ne__(self, other): + return not self.__eq__(other) class CephBrokerRsp(object): @@ -431,10 +460,15 @@ class CephBrokerRsp(object): The API is versioned and defaults to version 1. """ + def __init__(self, encoded_rsp): self.api_version = None self.rsp = json.loads(encoded_rsp) + @property + def request_id(self): + return self.rsp.get('request-id') + @property def exit_code(self): return self.rsp.get('exit-code') @@ -442,3 +476,182 @@ class CephBrokerRsp(object): @property def exit_msg(self): return self.rsp.get('stderr') + + +# Ceph Broker Conversation: +# If a charm needs an action to be taken by ceph it can create a CephBrokerRq +# and send that request to ceph via the ceph relation. The CephBrokerRq has a +# unique id so that the client can identity which CephBrokerRsp is associated +# with the request. Ceph will also respond to each client unit individually +# creating a response key per client unit eg glance/0 will get a CephBrokerRsp +# via key broker-rsp-glance-0 +# +# To use this the charm can just do something like: +# +# from charmhelpers.contrib.storage.linux.ceph import ( +# send_request_if_needed, +# is_request_complete, +# CephBrokerRq, +# ) +# +# @hooks.hook('ceph-relation-changed') +# def ceph_changed(): +# rq = CephBrokerRq() +# rq.add_op_create_pool(name='poolname', replica_count=3) +# +# if is_request_complete(rq): +# +# else: +# send_request_if_needed(get_ceph_request()) +# +# CephBrokerRq and CephBrokerRsp are serialized into JSON. Below is an example +# of glance having sent a request to ceph which ceph has successfully processed +# 'ceph:8': { +# 'ceph/0': { +# 'auth': 'cephx', +# 'broker-rsp-glance-0': '{"request-id": "0bc7dc54", "exit-code": 0}', +# 'broker_rsp': '{"request-id": "0da543b8", "exit-code": 0}', +# 'ceph-public-address': '10.5.44.103', +# 'key': 'AQCLDttVuHXINhAAvI144CB09dYchhHyTUY9BQ==', +# 'private-address': '10.5.44.103', +# }, +# 'glance/0': { +# 'broker_req': ('{"api-version": 1, "request-id": "0bc7dc54", ' +# '"ops": [{"replicas": 3, "name": "glance", ' +# '"op": "create-pool"}]}'), +# 'private-address': '10.5.44.109', +# }, +# } + +def get_previous_request(rid): + """Return the last ceph broker request sent on a given relation + + @param rid: Relation id to query for request + """ + request = None + broker_req = relation_get(attribute='broker_req', rid=rid, + unit=local_unit()) + if broker_req: + request_data = json.loads(broker_req) + request = CephBrokerRq(api_version=request_data['api-version'], + request_id=request_data['request-id']) + request.set_ops(request_data['ops']) + + return request + + +def get_request_states(request): + """Return a dict of requests per relation id with their corresponding + completion state. + + This allows a charm, which has a request for ceph, to see whether there is + an equivalent request already being processed and if so what state that + request is in. + + @param request: A CephBrokerRq object + """ + complete = [] + requests = {} + for rid in relation_ids('ceph'): + complete = False + previous_request = get_previous_request(rid) + if request == previous_request: + sent = True + complete = is_request_complete_for_rid(previous_request, rid) + else: + sent = False + complete = False + + requests[rid] = { + 'sent': sent, + 'complete': complete, + } + + return requests + + +def is_request_sent(request): + """Check to see if a functionally equivalent request has already been sent + + Returns True if a similair request has been sent + + @param request: A CephBrokerRq object + """ + states = get_request_states(request) + for rid in states.keys(): + if not states[rid]['sent']: + return False + + return True + + +def is_request_complete(request): + """Check to see if a functionally equivalent request has already been + completed + + Returns True if a similair request has been completed + + @param request: A CephBrokerRq object + """ + states = get_request_states(request) + for rid in states.keys(): + if not states[rid]['complete']: + return False + + return True + + +def is_request_complete_for_rid(request, rid): + """Check if a given request has been completed on the given relation + + @param request: A CephBrokerRq object + @param rid: Relation ID + """ + broker_key = get_broker_rsp_key() + for unit in related_units(rid): + rdata = relation_get(rid=rid, unit=unit) + if rdata.get(broker_key): + rsp = CephBrokerRsp(rdata.get(broker_key)) + if rsp.request_id == request.request_id: + if not rsp.exit_code: + return True + else: + # The remote unit sent no reply targeted at this unit so either the + # remote ceph cluster does not support unit targeted replies or it + # has not processed our request yet. + if rdata.get('broker_rsp'): + request_data = json.loads(rdata['broker_rsp']) + if request_data.get('request-id'): + log('Ignoring legacy broker_rsp without unit key as remote ' + 'service supports unit specific replies', level=DEBUG) + else: + log('Using legacy broker_rsp as remote service does not ' + 'supports unit specific replies', level=DEBUG) + rsp = CephBrokerRsp(rdata['broker_rsp']) + if not rsp.exit_code: + return True + + return False + + +def get_broker_rsp_key(): + """Return broker response key for this unit + + This is the key that ceph is going to use to pass request status + information back to this unit + """ + return 'broker-rsp-' + local_unit().replace('/', '-') + + +def send_request_if_needed(request): + """Send broker request if an equivalent request has not already been sent + + @param request: A CephBrokerRq object + """ + if is_request_sent(request): + log('Request already sent but not complete, not sending new request', + level=DEBUG) + else: + for rid in relation_ids('ceph'): + log('Sending request {}'.format(request.request_id), level=DEBUG) + relation_set(relation_id=rid, broker_req=request.request) diff --git a/hooks/charmhelpers/contrib/storage/linux/utils.py b/hooks/charmhelpers/contrib/storage/linux/utils.py index c8373b72..1e57941a 100644 --- a/hooks/charmhelpers/contrib/storage/linux/utils.py +++ b/hooks/charmhelpers/contrib/storage/linux/utils.py @@ -43,9 +43,10 @@ def zap_disk(block_device): :param block_device: str: Full path of block device to clean. ''' + # https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b # sometimes sgdisk exits non-zero; this is OK, dd will clean up - call(['sgdisk', '--zap-all', '--mbrtogpt', - '--clear', block_device]) + call(['sgdisk', '--zap-all', '--', block_device]) + call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device]) dev_end = check_output(['blockdev', '--getsz', block_device]).decode('UTF-8') gpt_end = int(dev_end.split()[0]) - 100 @@ -67,4 +68,4 @@ def is_device_mounted(device): out = check_output(['mount']).decode('UTF-8') if is_partition: return bool(re.search(device + r"\b", out)) - return bool(re.search(device + r"[0-9]+\b", out)) + return bool(re.search(device + r"[0-9]*\b", out)) diff --git a/hooks/charmhelpers/core/files.py b/hooks/charmhelpers/core/files.py new file mode 100644 index 00000000..0f12d321 --- /dev/null +++ b/hooks/charmhelpers/core/files.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +__author__ = 'Jorge Niedbalski ' + +import os +import subprocess + + +def sed(filename, before, after, flags='g'): + """ + Search and replaces the given pattern on filename. + + :param filename: relative or absolute file path. + :param before: expression to be replaced (see 'man sed') + :param after: expression to replace with (see 'man sed') + :param flags: sed-compatible regex flags in example, to make + the search and replace case insensitive, specify ``flags="i"``. + The ``g`` flag is always specified regardless, so you do not + need to remember to include it when overriding this parameter. + :returns: If the sed command exit code was zero then return, + otherwise raise CalledProcessError. + """ + expression = r's/{0}/{1}/{2}'.format(before, + after, flags) + + return subprocess.check_call(["sed", "-i", "-r", "-e", + expression, + os.path.expanduser(filename)]) diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py index dd8def9a..ab53a780 100644 --- a/hooks/charmhelpers/core/hookenv.py +++ b/hooks/charmhelpers/core/hookenv.py @@ -21,6 +21,7 @@ # Charm Helpers Developers from __future__ import print_function +import copy from distutils.version import LooseVersion from functools import wraps import glob @@ -73,6 +74,7 @@ def cached(func): res = func(*args, **kwargs) cache[key] = res return res + wrapper._wrapped = func return wrapper @@ -172,9 +174,19 @@ def relation_type(): return os.environ.get('JUJU_RELATION', None) -def relation_id(): - """The relation ID for the current relation hook""" - return os.environ.get('JUJU_RELATION_ID', None) +@cached +def relation_id(relation_name=None, service_or_unit=None): + """The relation ID for the current or a specified relation""" + if not relation_name and not service_or_unit: + return os.environ.get('JUJU_RELATION_ID', None) + elif relation_name and service_or_unit: + service_name = service_or_unit.split('/')[0] + for relid in relation_ids(relation_name): + remote_service = remote_service_name(relid) + if remote_service == service_name: + return relid + else: + raise ValueError('Must specify neither or both of relation_name and service_or_unit') def local_unit(): @@ -192,9 +204,20 @@ def service_name(): return local_unit().split('/')[0] +@cached +def remote_service_name(relid=None): + """The remote service name for a given relation-id (or the current relation)""" + if relid is None: + unit = remote_unit() + else: + units = related_units(relid) + unit = units[0] if units else None + return unit.split('/')[0] if unit else None + + def hook_name(): """The name of the currently executing hook""" - return os.path.basename(sys.argv[0]) + return os.environ.get('JUJU_HOOK_NAME', os.path.basename(sys.argv[0])) class Config(dict): @@ -263,7 +286,7 @@ class Config(dict): self.path = path or self.path with open(self.path) as f: self._prev_dict = json.load(f) - for k, v in self._prev_dict.items(): + for k, v in copy.deepcopy(self._prev_dict).items(): if k not in self: self[k] = v @@ -467,6 +490,63 @@ def relation_types(): return rel_types +@cached +def relation_to_interface(relation_name): + """ + Given the name of a relation, return the interface that relation uses. + + :returns: The interface name, or ``None``. + """ + return relation_to_role_and_interface(relation_name)[1] + + +@cached +def relation_to_role_and_interface(relation_name): + """ + Given the name of a relation, return the role and the name of the interface + that relation uses (where role is one of ``provides``, ``requires``, or ``peer``). + + :returns: A tuple containing ``(role, interface)``, or ``(None, None)``. + """ + _metadata = metadata() + for role in ('provides', 'requires', 'peer'): + interface = _metadata.get(role, {}).get(relation_name, {}).get('interface') + if interface: + return role, interface + return None, None + + +@cached +def role_and_interface_to_relations(role, interface_name): + """ + Given a role and interface name, return a list of relation names for the + current charm that use that interface under that role (where role is one + of ``provides``, ``requires``, or ``peer``). + + :returns: A list of relation names. + """ + _metadata = metadata() + results = [] + for relation_name, relation in _metadata.get(role, {}).items(): + if relation['interface'] == interface_name: + results.append(relation_name) + return results + + +@cached +def interface_to_relations(interface_name): + """ + Given an interface, return a list of relation names for the current + charm that use that interface. + + :returns: A list of relation names. + """ + results = [] + for role in ('provides', 'requires', 'peer'): + results.extend(role_and_interface_to_relations(role, interface_name)) + return results + + @cached def charm_name(): """Get the name of the current charm as is specified on metadata.yaml""" @@ -643,6 +723,21 @@ def action_fail(message): subprocess.check_call(['action-fail', message]) +def action_name(): + """Get the name of the currently executing action.""" + return os.environ.get('JUJU_ACTION_NAME') + + +def action_uuid(): + """Get the UUID of the currently executing action.""" + return os.environ.get('JUJU_ACTION_UUID') + + +def action_tag(): + """Get the tag for the currently executing action.""" + return os.environ.get('JUJU_ACTION_TAG') + + def status_set(workload_state, message): """Set the workload state with a message @@ -672,21 +767,23 @@ def status_set(workload_state, message): def status_get(): - """Retrieve the previously set juju workload state + """Retrieve the previously set juju workload state and message + + If the status-get command is not found then assume this is juju < 1.23 and + return 'unknown', "" - If the status-set command is not found then assume this is juju < 1.23 and - return 'unknown' """ - cmd = ['status-get'] + cmd = ['status-get', "--format=json", "--include-data"] try: - raw_status = subprocess.check_output(cmd, universal_newlines=True) - status = raw_status.rstrip() - return status + raw_status = subprocess.check_output(cmd) except OSError as e: if e.errno == errno.ENOENT: - return 'unknown' + return ('unknown', "") else: raise + else: + status = json.loads(raw_status.decode("UTF-8")) + return (status["status"], status["message"]) def translate_exc(from_exc, to_exc): diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py index 8ae8ef86..cb3c527e 100644 --- a/hooks/charmhelpers/core/host.py +++ b/hooks/charmhelpers/core/host.py @@ -63,32 +63,48 @@ def service_reload(service_name, restart_on_failure=False): return service_result -def service_pause(service_name, init_dir=None): +def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"): """Pause a system service. Stop it, and prevent it from starting again at boot.""" - if init_dir is None: - init_dir = "/etc/init" stopped = service_stop(service_name) - # XXX: Support systemd too - override_path = os.path.join( - init_dir, '{}.conf.override'.format(service_name)) - with open(override_path, 'w') as fh: - fh.write("manual\n") + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + with open(override_path, 'w') as fh: + fh.write("manual\n") + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "disable"]) + else: + # XXX: Support SystemD too + raise ValueError( + "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + service_name, upstart_file, sysv_file)) return stopped -def service_resume(service_name, init_dir=None): +def service_resume(service_name, init_dir="/etc/init", + initd_dir="/etc/init.d"): """Resume a system service. Reenable starting again at boot. Start the service""" - # XXX: Support systemd too - if init_dir is None: - init_dir = "/etc/init" - override_path = os.path.join( - init_dir, '{}.conf.override'.format(service_name)) - if os.path.exists(override_path): - os.unlink(override_path) + upstart_file = os.path.join(init_dir, "{}.conf".format(service_name)) + sysv_file = os.path.join(initd_dir, service_name) + if os.path.exists(upstart_file): + override_path = os.path.join( + init_dir, '{}.override'.format(service_name)) + if os.path.exists(override_path): + os.unlink(override_path) + elif os.path.exists(sysv_file): + subprocess.check_call(["update-rc.d", service_name, "enable"]) + else: + # XXX: Support SystemD too + raise ValueError( + "Unable to detect {0} as either Upstart {1} or SysV {2}".format( + service_name, upstart_file, sysv_file)) + started = service_start(service_name) return started @@ -148,6 +164,16 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False): return user_info +def user_exists(username): + """Check if a user exists""" + try: + pwd.getpwnam(username) + user_exists = True + except KeyError: + user_exists = False + return user_exists + + def add_group(group_name, system_group=False): """Add a group to the system""" try: @@ -280,6 +306,17 @@ def mounts(): return system_mounts +def fstab_mount(mountpoint): + """Mount filesystem using fstab""" + cmd_args = ['mount', mountpoint] + try: + subprocess.check_output(cmd_args) + except subprocess.CalledProcessError as e: + log('Error unmounting {}\n{}'.format(mountpoint, e.output)) + return False + return True + + def file_hash(path, hash_type='md5'): """ Generate a hash checksum of the contents of 'path' or None if not found. @@ -396,25 +433,80 @@ def pwgen(length=None): return(''.join(random_chars)) -def list_nics(nic_type): +def is_phy_iface(interface): + """Returns True if interface is not virtual, otherwise False.""" + if interface: + sys_net = '/sys/class/net' + if os.path.isdir(sys_net): + for iface in glob.glob(os.path.join(sys_net, '*')): + if '/virtual/' in os.path.realpath(iface): + continue + + if interface == os.path.basename(iface): + return True + + return False + + +def get_bond_master(interface): + """Returns bond master if interface is bond slave otherwise None. + + NOTE: the provided interface is expected to be physical + """ + if interface: + iface_path = '/sys/class/net/%s' % (interface) + if os.path.exists(iface_path): + if '/virtual/' in os.path.realpath(iface_path): + return None + + master = os.path.join(iface_path, 'master') + if os.path.exists(master): + master = os.path.realpath(master) + # make sure it is a bond master + if os.path.exists(os.path.join(master, 'bonding')): + return os.path.basename(master) + + return None + + +def list_nics(nic_type=None): '''Return a list of nics of given type(s)''' if isinstance(nic_type, six.string_types): int_types = [nic_type] else: int_types = nic_type + interfaces = [] - for int_type in int_types: - cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + if nic_type: + for int_type in int_types: + cmd = ['ip', 'addr', 'show', 'label', int_type + '*'] + ip_output = subprocess.check_output(cmd).decode('UTF-8') + ip_output = ip_output.split('\n') + ip_output = (line for line in ip_output if line) + for line in ip_output: + if line.split()[1].startswith(int_type): + matched = re.search('.*: (' + int_type + + r'[0-9]+\.[0-9]+)@.*', line) + if matched: + iface = matched.groups()[0] + else: + iface = line.split()[1].replace(":", "") + + if iface not in interfaces: + interfaces.append(iface) + else: + cmd = ['ip', 'a'] ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n') - ip_output = (line for line in ip_output if line) + ip_output = (line.strip() for line in ip_output if line) + + key = re.compile('^[0-9]+:\s+(.+):') for line in ip_output: - if line.split()[1].startswith(int_type): - matched = re.search('.*: (' + int_type + r'[0-9]+\.[0-9]+)@.*', line) - if matched: - interface = matched.groups()[0] - else: - interface = line.split()[1].replace(":", "") - interfaces.append(interface) + matched = re.search(key, line) + if matched: + iface = matched.group(1) + iface = iface.partition("@")[0] + if iface not in interfaces: + interfaces.append(iface) return interfaces diff --git a/hooks/charmhelpers/core/hugepage.py b/hooks/charmhelpers/core/hugepage.py new file mode 100644 index 00000000..4aaca3f5 --- /dev/null +++ b/hooks/charmhelpers/core/hugepage.py @@ -0,0 +1,69 @@ +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +import yaml +from charmhelpers.core import fstab +from charmhelpers.core import sysctl +from charmhelpers.core.host import ( + add_group, + add_user_to_group, + fstab_mount, + mkdir, +) +from charmhelpers.core.strutils import bytes_from_string +from subprocess import check_output + + +def hugepage_support(user, group='hugetlb', nr_hugepages=256, + max_map_count=65536, mnt_point='/run/hugepages/kvm', + pagesize='2MB', mount=True, set_shmmax=False): + """Enable hugepages on system. + + Args: + user (str) -- Username to allow access to hugepages to + group (str) -- Group name to own hugepages + nr_hugepages (int) -- Number of pages to reserve + max_map_count (int) -- Number of Virtual Memory Areas a process can own + mnt_point (str) -- Directory to mount hugepages on + pagesize (str) -- Size of hugepages + mount (bool) -- Whether to Mount hugepages + """ + group_info = add_group(group) + gid = group_info.gr_gid + add_user_to_group(user, group) + sysctl_settings = { + 'vm.nr_hugepages': nr_hugepages, + 'vm.max_map_count': max_map_count, + 'vm.hugetlb_shm_group': gid, + } + if set_shmmax: + shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax'])) + shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages + if shmmax_minsize > shmmax_current: + sysctl_settings['kernel.shmmax'] = shmmax_minsize + sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf') + mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False) + lfstab = fstab.Fstab() + fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point) + if fstab_entry: + lfstab.remove_entry(fstab_entry) + entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs', + 'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0) + lfstab.add_entry(entry) + if mount: + fstab_mount(mnt_point) diff --git a/hooks/charmhelpers/core/kernel.py b/hooks/charmhelpers/core/kernel.py new file mode 100644 index 00000000..5dc64952 --- /dev/null +++ b/hooks/charmhelpers/core/kernel.py @@ -0,0 +1,68 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +# Copyright 2014-2015 Canonical Limited. +# +# This file is part of charm-helpers. +# +# charm-helpers is free software: you can redistribute it and/or modify +# it under the terms of the GNU Lesser General Public License version 3 as +# published by the Free Software Foundation. +# +# charm-helpers is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU Lesser General Public License for more details. +# +# You should have received a copy of the GNU Lesser General Public License +# along with charm-helpers. If not, see . + +__author__ = "Jorge Niedbalski " + +from charmhelpers.core.hookenv import ( + log, + INFO +) + +from subprocess import check_call, check_output +import re + + +def modprobe(module, persist=True): + """Load a kernel module and configure for auto-load on reboot.""" + cmd = ['modprobe', module] + + log('Loading kernel module %s' % module, level=INFO) + + check_call(cmd) + if persist: + with open('/etc/modules', 'r+') as modules: + if module not in modules.read(): + modules.write(module) + + +def rmmod(module, force=False): + """Remove a module from the linux kernel""" + cmd = ['rmmod'] + if force: + cmd.append('-f') + cmd.append(module) + log('Removing kernel module %s' % module, level=INFO) + return check_call(cmd) + + +def lsmod(): + """Shows what kernel modules are currently loaded""" + return check_output(['lsmod'], + universal_newlines=True) + + +def is_module_loaded(module): + """Checks if a kernel module is already loaded""" + matches = re.findall('^%s[ ]+' % module, lsmod(), re.M) + return len(matches) > 0 + + +def update_initramfs(version='all'): + """Updates an initramfs image""" + return check_call(["update-initramfs", "-k", version, "-u"]) diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py index 8005c415..3f677833 100644 --- a/hooks/charmhelpers/core/services/helpers.py +++ b/hooks/charmhelpers/core/services/helpers.py @@ -16,7 +16,9 @@ import os import yaml + from charmhelpers.core import hookenv +from charmhelpers.core import host from charmhelpers.core import templating from charmhelpers.core.services.base import ManagerCallback @@ -240,27 +242,41 @@ class TemplateCallback(ManagerCallback): :param str source: The template source file, relative to `$CHARM_DIR/templates` + :param str target: The target to write the rendered template to :param str owner: The owner of the rendered file :param str group: The group of the rendered file :param int perms: The permissions of the rendered file - + :param partial on_change_action: functools partial to be executed when + rendered file changes """ def __init__(self, source, target, - owner='root', group='root', perms=0o444): + owner='root', group='root', perms=0o444, + on_change_action=None): self.source = source self.target = target self.owner = owner self.group = group self.perms = perms + self.on_change_action = on_change_action def __call__(self, manager, service_name, event_name): + pre_checksum = '' + if self.on_change_action and os.path.isfile(self.target): + pre_checksum = host.file_hash(self.target) service = manager.get_service(service_name) context = {} for ctx in service.get('required_data', []): context.update(ctx) templating.render(self.source, self.target, context, self.owner, self.group, self.perms) + if self.on_change_action: + if pre_checksum == host.file_hash(self.target): + hookenv.log( + 'No change detected: {}'.format(self.target), + hookenv.DEBUG) + else: + self.on_change_action() # Convenience aliases for templates diff --git a/hooks/charmhelpers/core/strutils.py b/hooks/charmhelpers/core/strutils.py index a2a784aa..7e3f9693 100644 --- a/hooks/charmhelpers/core/strutils.py +++ b/hooks/charmhelpers/core/strutils.py @@ -18,6 +18,7 @@ # along with charm-helpers. If not, see . import six +import re def bool_from_string(value): @@ -40,3 +41,32 @@ def bool_from_string(value): msg = "Unable to interpret string value '%s' as boolean" % (value) raise ValueError(msg) + + +def bytes_from_string(value): + """Interpret human readable string value as bytes. + + Returns int + """ + BYTE_POWER = { + 'K': 1, + 'KB': 1, + 'M': 2, + 'MB': 2, + 'G': 3, + 'GB': 3, + 'T': 4, + 'TB': 4, + 'P': 5, + 'PB': 5, + } + if isinstance(value, six.string_types): + value = six.text_type(value) + else: + msg = "Unable to interpret non-string value '%s' as boolean" % (value) + raise ValueError(msg) + matches = re.match("([0-9]+)([a-zA-Z]+)", value) + if not matches: + msg = "Unable to interpret string value '%s' as bytes" % (value) + raise ValueError(msg) + return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)]) diff --git a/hooks/charmhelpers/core/unitdata.py b/hooks/charmhelpers/core/unitdata.py index 406a35c5..338104e0 100644 --- a/hooks/charmhelpers/core/unitdata.py +++ b/hooks/charmhelpers/core/unitdata.py @@ -152,6 +152,7 @@ associated to the hookname. import collections import contextlib import datetime +import itertools import json import os import pprint @@ -164,8 +165,7 @@ __author__ = 'Kapil Thangavelu ' class Storage(object): """Simple key value database for local unit state within charms. - Modifications are automatically committed at hook exit. That's - currently regardless of exit code. + Modifications are not persisted unless :meth:`flush` is called. To support dicts, lists, integer, floats, and booleans values are automatically json encoded/decoded. @@ -173,8 +173,11 @@ class Storage(object): def __init__(self, path=None): self.db_path = path if path is None: - self.db_path = os.path.join( - os.environ.get('CHARM_DIR', ''), '.unit-state.db') + if 'UNIT_STATE_DB' in os.environ: + self.db_path = os.environ['UNIT_STATE_DB'] + else: + self.db_path = os.path.join( + os.environ.get('CHARM_DIR', ''), '.unit-state.db') self.conn = sqlite3.connect('%s' % self.db_path) self.cursor = self.conn.cursor() self.revision = None @@ -189,15 +192,8 @@ class Storage(object): self.conn.close() self._closed = True - def _scoped_query(self, stmt, params=None): - if params is None: - params = [] - return stmt, params - def get(self, key, default=None, record=False): - self.cursor.execute( - *self._scoped_query( - 'select data from kv where key=?', [key])) + self.cursor.execute('select data from kv where key=?', [key]) result = self.cursor.fetchone() if not result: return default @@ -206,33 +202,81 @@ class Storage(object): return json.loads(result[0]) def getrange(self, key_prefix, strip=False): - stmt = "select key, data from kv where key like '%s%%'" % key_prefix - self.cursor.execute(*self._scoped_query(stmt)) + """ + Get a range of keys starting with a common prefix as a mapping of + keys to values. + + :param str key_prefix: Common prefix among all keys + :param bool strip: Optionally strip the common prefix from the key + names in the returned dict + :return dict: A (possibly empty) dict of key-value mappings + """ + self.cursor.execute("select key, data from kv where key like ?", + ['%s%%' % key_prefix]) result = self.cursor.fetchall() if not result: - return None + return {} if not strip: key_prefix = '' return dict([ (k[len(key_prefix):], json.loads(v)) for k, v in result]) def update(self, mapping, prefix=""): + """ + Set the values of multiple keys at once. + + :param dict mapping: Mapping of keys to values + :param str prefix: Optional prefix to apply to all keys in `mapping` + before setting + """ for k, v in mapping.items(): self.set("%s%s" % (prefix, k), v) def unset(self, key): + """ + Remove a key from the database entirely. + """ self.cursor.execute('delete from kv where key=?', [key]) if self.revision and self.cursor.rowcount: self.cursor.execute( 'insert into kv_revisions values (?, ?, ?)', [key, self.revision, json.dumps('DELETED')]) + def unsetrange(self, keys=None, prefix=""): + """ + Remove a range of keys starting with a common prefix, from the database + entirely. + + :param list keys: List of keys to remove. + :param str prefix: Optional prefix to apply to all keys in ``keys`` + before removing. + """ + if keys is not None: + keys = ['%s%s' % (prefix, key) for key in keys] + self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)), + list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys))) + else: + self.cursor.execute('delete from kv where key like ?', + ['%s%%' % prefix]) + if self.revision and self.cursor.rowcount: + self.cursor.execute( + 'insert into kv_revisions values (?, ?, ?)', + ['%s%%' % prefix, self.revision, json.dumps('DELETED')]) + def set(self, key, value): + """ + Set a value in the database. + + :param str key: Key to set the value for + :param value: Any JSON-serializable value to be set + """ serialized = json.dumps(value) - self.cursor.execute( - 'select data from kv where key=?', [key]) + self.cursor.execute('select data from kv where key=?', [key]) exists = self.cursor.fetchone() # Skip mutations to the same value diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py index 0a3bb969..cd0b783c 100644 --- a/hooks/charmhelpers/fetch/__init__.py +++ b/hooks/charmhelpers/fetch/__init__.py @@ -90,6 +90,14 @@ CLOUD_ARCHIVE_POCKETS = { 'kilo/proposed': 'trusty-proposed/kilo', 'trusty-kilo/proposed': 'trusty-proposed/kilo', 'trusty-proposed/kilo': 'trusty-proposed/kilo', + # Liberty + 'liberty': 'trusty-updates/liberty', + 'trusty-liberty': 'trusty-updates/liberty', + 'trusty-liberty/updates': 'trusty-updates/liberty', + 'trusty-updates/liberty': 'trusty-updates/liberty', + 'liberty/proposed': 'trusty-proposed/liberty', + 'trusty-liberty/proposed': 'trusty-proposed/liberty', + 'trusty-proposed/liberty': 'trusty-proposed/liberty', } # The order of this list is very important. Handlers should be listed in from diff --git a/tests/charmhelpers/contrib/amulet/deployment.py b/tests/charmhelpers/contrib/amulet/deployment.py index 367d6b47..d451698d 100644 --- a/tests/charmhelpers/contrib/amulet/deployment.py +++ b/tests/charmhelpers/contrib/amulet/deployment.py @@ -51,7 +51,8 @@ class AmuletDeployment(object): if 'units' not in this_service: this_service['units'] = 1 - self.d.add(this_service['name'], units=this_service['units']) + self.d.add(this_service['name'], units=this_service['units'], + constraints=this_service.get('constraints')) for svc in other_services: if 'location' in svc: @@ -64,7 +65,8 @@ class AmuletDeployment(object): if 'units' not in svc: svc['units'] = 1 - self.d.add(svc['name'], charm=branch_location, units=svc['units']) + self.d.add(svc['name'], charm=branch_location, units=svc['units'], + constraints=svc.get('constraints')) def _add_relations(self, relations): """Add all of the relations for the services.""" diff --git a/tests/charmhelpers/contrib/amulet/utils.py b/tests/charmhelpers/contrib/amulet/utils.py index 3de26afd..55c86347 100644 --- a/tests/charmhelpers/contrib/amulet/utils.py +++ b/tests/charmhelpers/contrib/amulet/utils.py @@ -14,17 +14,25 @@ # You should have received a copy of the GNU Lesser General Public License # along with charm-helpers. If not, see . -import amulet -import ConfigParser -import distro_info import io +import json import logging import os import re -import six +import socket +import subprocess import sys import time -import urlparse +import uuid + +import amulet +import distro_info +import six +from six.moves import configparser +if six.PY3: + from urllib import parse as urlparse +else: + import urlparse class AmuletUtils(object): @@ -108,7 +116,7 @@ class AmuletUtils(object): # /!\ DEPRECATION WARNING (beisner): # New and existing tests should be rewritten to use # validate_services_by_name() as it is aware of init systems. - self.log.warn('/!\\ DEPRECATION WARNING: use ' + self.log.warn('DEPRECATION WARNING: use ' 'validate_services_by_name instead of validate_services ' 'due to init system differences.') @@ -142,19 +150,23 @@ class AmuletUtils(object): for service_name in services_list: if (self.ubuntu_releases.index(release) >= systemd_switch or - service_name == "rabbitmq-server"): - # init is systemd + service_name in ['rabbitmq-server', 'apache2']): + # init is systemd (or regular sysv) cmd = 'sudo service {} status'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 elif self.ubuntu_releases.index(release) < systemd_switch: # init is upstart cmd = 'sudo status {}'.format(service_name) + output, code = sentry_unit.run(cmd) + service_running = code == 0 and "start/running" in output - output, code = sentry_unit.run(cmd) self.log.debug('{} `{}` returned ' '{}'.format(sentry_unit.info['unit_name'], cmd, code)) - if code != 0: - return "command `{}` returned {}".format(cmd, str(code)) + if not service_running: + return u"command `{}` returned {} {}".format( + cmd, output, str(code)) return None def _get_config(self, unit, filename): @@ -164,7 +176,7 @@ class AmuletUtils(object): # NOTE(beisner): by default, ConfigParser does not handle options # with no value, such as the flags used in the mysql my.cnf file. # https://bugs.python.org/issue7005 - config = ConfigParser.ConfigParser(allow_no_value=True) + config = configparser.ConfigParser(allow_no_value=True) config.readfp(io.StringIO(file_contents)) return config @@ -259,33 +271,52 @@ class AmuletUtils(object): """Get last modification time of directory.""" return sentry_unit.directory_stat(directory)['mtime'] - def _get_proc_start_time(self, sentry_unit, service, pgrep_full=False): - """Get process' start time. + def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None): + """Get start time of a process based on the last modification time + of the /proc/pid directory. - Determine start time of the process based on the last modification - time of the /proc/pid directory. If pgrep_full is True, the process - name is matched against the full command line. - """ - if pgrep_full: - cmd = 'pgrep -o -f {}'.format(service) - else: - cmd = 'pgrep -o {}'.format(service) - cmd = cmd + ' | grep -v pgrep || exit 0' - cmd_out = sentry_unit.run(cmd) - self.log.debug('CMDout: ' + str(cmd_out)) - if cmd_out[0]: - self.log.debug('Pid for %s %s' % (service, str(cmd_out[0]))) - proc_dir = '/proc/{}'.format(cmd_out[0].strip()) - return self._get_dir_mtime(sentry_unit, proc_dir) + :sentry_unit: The sentry unit to check for the service on + :service: service name to look for in process table + :pgrep_full: [Deprecated] Use full command line search mode with pgrep + :returns: epoch time of service process start + :param commands: list of bash commands + :param sentry_units: list of sentry unit pointers + :returns: None if successful; Failure message otherwise + """ + if pgrep_full is not None: + # /!\ DEPRECATION WARNING (beisner): + # No longer implemented, as pidof is now used instead of pgrep. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.warn('DEPRECATION WARNING: pgrep_full bool is no ' + 'longer implemented re: lp 1474030.') + + pid_list = self.get_process_id_list(sentry_unit, service) + pid = pid_list[0] + proc_dir = '/proc/{}'.format(pid) + self.log.debug('Pid for {} on {}: {}'.format( + service, sentry_unit.info['unit_name'], pid)) + + return self._get_dir_mtime(sentry_unit, proc_dir) def service_restarted(self, sentry_unit, service, filename, - pgrep_full=False, sleep_time=20): + pgrep_full=None, sleep_time=20): """Check if service was restarted. Compare a service's start time vs a file's last modification time (such as a config file for that service) to determine if the service has been restarted. """ + # /!\ DEPRECATION WARNING (beisner): + # This method is prone to races in that no before-time is known. + # Use validate_service_config_changed instead. + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + self.log.warn('DEPRECATION WARNING: use ' + 'validate_service_config_changed instead of ' + 'service_restarted due to known races.') + time.sleep(sleep_time) if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >= self._get_file_mtime(sentry_unit, filename)): @@ -294,15 +325,15 @@ class AmuletUtils(object): return False def service_restarted_since(self, sentry_unit, mtime, service, - pgrep_full=False, sleep_time=20, - retry_count=2): + pgrep_full=None, sleep_time=20, + retry_count=2, retry_sleep_time=30): """Check if service was been started after a given time. Args: sentry_unit (sentry): The sentry unit to check for the service on mtime (float): The epoch time to check against service (string): service name to look for in process table - pgrep_full (boolean): Use full command line search mode with pgrep + pgrep_full: [Deprecated] Use full command line search mode with pgrep sleep_time (int): Seconds to sleep before looking for process retry_count (int): If service is not found, how many times to retry @@ -311,30 +342,44 @@ class AmuletUtils(object): False if service is older than mtime or if service was not found. """ - self.log.debug('Checking %s restarted since %s' % (service, mtime)) + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + unit_name = sentry_unit.info['unit_name'] + self.log.debug('Checking that %s service restarted since %s on ' + '%s' % (service, mtime, unit_name)) time.sleep(sleep_time) - proc_start_time = self._get_proc_start_time(sentry_unit, service, - pgrep_full) - while retry_count > 0 and not proc_start_time: - self.log.debug('No pid file found for service %s, will retry %i ' - 'more times' % (service, retry_count)) - time.sleep(30) - proc_start_time = self._get_proc_start_time(sentry_unit, service, - pgrep_full) - retry_count = retry_count - 1 + proc_start_time = None + tries = 0 + while tries <= retry_count and not proc_start_time: + try: + proc_start_time = self._get_proc_start_time(sentry_unit, + service, + pgrep_full) + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'OK'.format(tries, service, unit_name)) + except IOError: + # NOTE(beisner) - race avoidance, proc may not exist yet. + # https://bugs.launchpad.net/charm-helpers/+bug/1474030 + self.log.debug('Attempt {} to get {} proc start time on {} ' + 'failed'.format(tries, service, unit_name)) + time.sleep(retry_sleep_time) + tries += 1 if not proc_start_time: self.log.warn('No proc start time found, assuming service did ' 'not start') return False if proc_start_time >= mtime: - self.log.debug('proc start time is newer than provided mtime' - '(%s >= %s)' % (proc_start_time, mtime)) + self.log.debug('Proc start time is newer than provided mtime' + '(%s >= %s) on %s (OK)' % (proc_start_time, + mtime, unit_name)) return True else: - self.log.warn('proc start time (%s) is older than provided mtime ' - '(%s), service did not restart' % (proc_start_time, - mtime)) + self.log.warn('Proc start time (%s) is older than provided mtime ' + '(%s) on %s, service did not ' + 'restart' % (proc_start_time, mtime, unit_name)) return False def config_updated_since(self, sentry_unit, filename, mtime, @@ -364,8 +409,9 @@ class AmuletUtils(object): return False def validate_service_config_changed(self, sentry_unit, mtime, service, - filename, pgrep_full=False, - sleep_time=20, retry_count=2): + filename, pgrep_full=None, + sleep_time=20, retry_count=2, + retry_sleep_time=30): """Check service and file were updated after mtime Args: @@ -373,9 +419,10 @@ class AmuletUtils(object): mtime (float): The epoch time to check against service (string): service name to look for in process table filename (string): The file to check mtime of - pgrep_full (boolean): Use full command line search mode with pgrep - sleep_time (int): Seconds to sleep before looking for process + pgrep_full: [Deprecated] Use full command line search mode with pgrep + sleep_time (int): Initial sleep in seconds to pass to test helpers retry_count (int): If service is not found, how many times to retry + retry_sleep_time (int): Time in seconds to wait between retries Typical Usage: u = OpenStackAmuletUtils(ERROR) @@ -392,15 +439,25 @@ class AmuletUtils(object): mtime, False if service is older than mtime or if service was not found or if filename was modified before mtime. """ - self.log.debug('Checking %s restarted since %s' % (service, mtime)) - time.sleep(sleep_time) - service_restart = self.service_restarted_since(sentry_unit, mtime, - service, - pgrep_full=pgrep_full, - sleep_time=0, - retry_count=retry_count) - config_update = self.config_updated_since(sentry_unit, filename, mtime, - sleep_time=0) + + # NOTE(beisner) pgrep_full is no longer implemented, as pidof is now + # used instead of pgrep. pgrep_full is still passed through to ensure + # deprecation WARNS. lp1474030 + + service_restart = self.service_restarted_since( + sentry_unit, mtime, + service, + pgrep_full=pgrep_full, + sleep_time=sleep_time, + retry_count=retry_count, + retry_sleep_time=retry_sleep_time) + + config_update = self.config_updated_since( + sentry_unit, + filename, + mtime, + sleep_time=0) + return service_restart and config_update def get_sentry_time(self, sentry_unit): @@ -418,7 +475,6 @@ class AmuletUtils(object): """Return a list of all Ubuntu releases in order of release.""" _d = distro_info.UbuntuDistroInfo() _release_list = _d.all - self.log.debug('Ubuntu release list: {}'.format(_release_list)) return _release_list def file_to_url(self, file_rel_path): @@ -450,15 +506,20 @@ class AmuletUtils(object): cmd, code, output)) return None - def get_process_id_list(self, sentry_unit, process_name): + def get_process_id_list(self, sentry_unit, process_name, + expect_success=True): """Get a list of process ID(s) from a single sentry juju unit for a single process name. - :param sentry_unit: Pointer to amulet sentry instance (juju unit) + :param sentry_unit: Amulet sentry instance (juju unit) :param process_name: Process name + :param expect_success: If False, expect the PID to be missing, + raise if it is present. :returns: List of process IDs """ - cmd = 'pidof {}'.format(process_name) + cmd = 'pidof -x {}'.format(process_name) + if not expect_success: + cmd += " || exit 0 && exit 1" output, code = sentry_unit.run(cmd) if code != 0: msg = ('{} `{}` returned {} ' @@ -467,14 +528,23 @@ class AmuletUtils(object): amulet.raise_status(amulet.FAIL, msg=msg) return str(output).split() - def get_unit_process_ids(self, unit_processes): + def get_unit_process_ids(self, unit_processes, expect_success=True): """Construct a dict containing unit sentries, process names, and - process IDs.""" + process IDs. + + :param unit_processes: A dictionary of Amulet sentry instance + to list of process names. + :param expect_success: if False expect the processes to not be + running, raise if they are. + :returns: Dictionary of Amulet sentry instance to dictionary + of process names to PIDs. + """ pid_dict = {} - for sentry_unit, process_list in unit_processes.iteritems(): + for sentry_unit, process_list in six.iteritems(unit_processes): pid_dict[sentry_unit] = {} for process in process_list: - pids = self.get_process_id_list(sentry_unit, process) + pids = self.get_process_id_list( + sentry_unit, process, expect_success=expect_success) pid_dict[sentry_unit].update({process: pids}) return pid_dict @@ -488,7 +558,7 @@ class AmuletUtils(object): return ('Unit count mismatch. expected, actual: {}, ' '{} '.format(len(expected), len(actual))) - for (e_sentry, e_proc_names) in expected.iteritems(): + for (e_sentry, e_proc_names) in six.iteritems(expected): e_sentry_name = e_sentry.info['unit_name'] if e_sentry in actual.keys(): a_proc_names = actual[e_sentry] @@ -507,11 +577,23 @@ class AmuletUtils(object): '{}'.format(e_proc_name, a_proc_name)) a_pids_length = len(a_pids) - if e_pids_length != a_pids_length: - return ('PID count mismatch. {} ({}) expected, actual: ' + fail_msg = ('PID count mismatch. {} ({}) expected, actual: ' '{}, {} ({})'.format(e_sentry_name, e_proc_name, e_pids_length, a_pids_length, a_pids)) + + # If expected is not bool, ensure PID quantities match + if not isinstance(e_pids_length, bool) and \ + a_pids_length != e_pids_length: + return fail_msg + # If expected is bool True, ensure 1 or more PIDs exist + elif isinstance(e_pids_length, bool) and \ + e_pids_length is True and a_pids_length < 1: + return fail_msg + # If expected is bool False, ensure 0 PIDs exist + elif isinstance(e_pids_length, bool) and \ + e_pids_length is False and a_pids_length != 0: + return fail_msg else: self.log.debug('PID check OK: {} {} {}: ' '{}'.format(e_sentry_name, e_proc_name, @@ -531,3 +613,175 @@ class AmuletUtils(object): return 'Dicts within list are not identical' return None + + def validate_sectionless_conf(self, file_contents, expected): + """A crude conf parser. Useful to inspect configuration files which + do not have section headers (as would be necessary in order to use + the configparser). Such as openstack-dashboard or rabbitmq confs.""" + for line in file_contents.split('\n'): + if '=' in line: + args = line.split('=') + if len(args) <= 1: + continue + key = args[0].strip() + value = args[1].strip() + if key in expected.keys(): + if expected[key] != value: + msg = ('Config mismatch. Expected, actual: {}, ' + '{}'.format(expected[key], value)) + amulet.raise_status(amulet.FAIL, msg=msg) + + def get_unit_hostnames(self, units): + """Return a dict of juju unit names to hostnames.""" + host_names = {} + for unit in units: + host_names[unit.info['unit_name']] = \ + str(unit.file_contents('/etc/hostname').strip()) + self.log.debug('Unit host names: {}'.format(host_names)) + return host_names + + def run_cmd_unit(self, sentry_unit, cmd): + """Run a command on a unit, return the output and exit code.""" + output, code = sentry_unit.run(cmd) + if code == 0: + self.log.debug('{} `{}` command returned {} ' + '(OK)'.format(sentry_unit.info['unit_name'], + cmd, code)) + else: + msg = ('{} `{}` command returned {} ' + '{}'.format(sentry_unit.info['unit_name'], + cmd, code, output)) + amulet.raise_status(amulet.FAIL, msg=msg) + return str(output), code + + def file_exists_on_unit(self, sentry_unit, file_name): + """Check if a file exists on a unit.""" + try: + sentry_unit.file_stat(file_name) + return True + except IOError: + return False + except Exception as e: + msg = 'Error checking file {}: {}'.format(file_name, e) + amulet.raise_status(amulet.FAIL, msg=msg) + + def file_contents_safe(self, sentry_unit, file_name, + max_wait=60, fatal=False): + """Get file contents from a sentry unit. Wrap amulet file_contents + with retry logic to address races where a file checks as existing, + but no longer exists by the time file_contents is called. + Return None if file not found. Optionally raise if fatal is True.""" + unit_name = sentry_unit.info['unit_name'] + file_contents = False + tries = 0 + while not file_contents and tries < (max_wait / 4): + try: + file_contents = sentry_unit.file_contents(file_name) + except IOError: + self.log.debug('Attempt {} to open file {} from {} ' + 'failed'.format(tries, file_name, + unit_name)) + time.sleep(4) + tries += 1 + + if file_contents: + return file_contents + elif not fatal: + return None + elif fatal: + msg = 'Failed to get file contents from unit.' + amulet.raise_status(amulet.FAIL, msg) + + def port_knock_tcp(self, host="localhost", port=22, timeout=15): + """Open a TCP socket to check for a listening sevice on a host. + + :param host: host name or IP address, default to localhost + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :returns: True if successful, False if connect failed + """ + + # Resolve host name if possible + try: + connect_host = socket.gethostbyname(host) + host_human = "{} ({})".format(connect_host, host) + except socket.error as e: + self.log.warn('Unable to resolve address: ' + '{} ({}) Trying anyway!'.format(host, e)) + connect_host = host + host_human = connect_host + + # Attempt socket connection + try: + knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + knock.settimeout(timeout) + knock.connect((connect_host, port)) + knock.close() + self.log.debug('Socket connect OK for host ' + '{} on port {}.'.format(host_human, port)) + return True + except socket.error as e: + self.log.debug('Socket connect FAIL for' + ' {} port {} ({})'.format(host_human, port, e)) + return False + + def port_knock_units(self, sentry_units, port=22, + timeout=15, expect_success=True): + """Open a TCP socket to check for a listening sevice on each + listed juju unit. + + :param sentry_units: list of sentry unit pointers + :param port: TCP port number, default to 22 + :param timeout: Connect timeout, default to 15 seconds + :expect_success: True by default, set False to invert logic + :returns: None if successful, Failure message otherwise + """ + for unit in sentry_units: + host = unit.info['public-address'] + connected = self.port_knock_tcp(host, port, timeout) + if not connected and expect_success: + return 'Socket connect failed.' + elif connected and not expect_success: + return 'Socket connected unexpectedly.' + + def get_uuid_epoch_stamp(self): + """Returns a stamp string based on uuid4 and epoch time. Useful in + generating test messages which need to be unique-ish.""" + return '[{}-{}]'.format(uuid.uuid4(), time.time()) + +# amulet juju action helpers: + def run_action(self, unit_sentry, action, + _check_output=subprocess.check_output): + """Run the named action on a given unit sentry. + + _check_output parameter is used for dependency injection. + + @return action_id. + """ + unit_id = unit_sentry.info["unit_name"] + command = ["juju", "action", "do", "--format=json", unit_id, action] + self.log.info("Running command: %s\n" % " ".join(command)) + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + action_id = data[u'Action queued with id'] + return action_id + + def wait_on_action(self, action_id, _check_output=subprocess.check_output): + """Wait for a given action, returning if it completed or not. + + _check_output parameter is used for dependency injection. + """ + command = ["juju", "action", "fetch", "--format=json", "--wait=0", + action_id] + output = _check_output(command, universal_newlines=True) + data = json.loads(output) + return data.get(u"status") == "completed" + + def status_get(self, unit): + """Return the current service status of this unit.""" + raw_status, return_code = unit.run( + "status-get --format=json --include-data") + if return_code != 0: + return ("unknown", "") + status = json.loads(raw_status) + return (status["status"], status["message"]) diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py index b01e6cb8..722bc645 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -44,20 +44,31 @@ class OpenStackAmuletDeployment(AmuletDeployment): Determine if the local branch being tested is derived from its stable or next (dev) branch, and based on this, use the corresonding stable or next branches for the other_services.""" - base_charms = ['mysql', 'mongodb'] + + # Charms outside the lp:~openstack-charmers namespace + base_charms = ['mysql', 'mongodb', 'nrpe'] + + # Force these charms to current series even when using an older series. + # ie. Use trusty/nrpe even when series is precise, as the P charm + # does not possess the necessary external master config and hooks. + force_series_current = ['nrpe'] if self.series in ['precise', 'trusty']: base_series = self.series else: base_series = self.current_next - if self.stable: - for svc in other_services: + for svc in other_services: + if svc['name'] in force_series_current: + base_series = self.current_next + # If a location has been explicitly set, use it + if svc.get('location'): + continue + if self.stable: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, svc['name']) - else: - for svc in other_services: + else: if svc['name'] in base_charms: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, @@ -66,6 +77,7 @@ class OpenStackAmuletDeployment(AmuletDeployment): temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = temp.format(self.current_next, svc['name']) + return other_services def _add_services(self, this_service, other_services): @@ -77,21 +89,23 @@ class OpenStackAmuletDeployment(AmuletDeployment): services = other_services services.append(this_service) + + # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] - # Most OpenStack subordinate charms do not expose an origin option - # as that is controlled by the principle. - ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch'] + + # Charms which can not use openstack-origin, ie. many subordinates + no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] if self.openstack: for svc in services: - if svc['name'] not in use_source + ignore: + if svc['name'] not in use_source + no_origin: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source and svc['name'] not in ignore: + if svc['name'] in use_source and svc['name'] not in no_origin: config = {'source': self.source} self.d.configure(svc['name'], config) diff --git a/tests/charmhelpers/contrib/openstack/amulet/utils.py b/tests/charmhelpers/contrib/openstack/amulet/utils.py index 03f79277..b1397419 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/utils.py +++ b/tests/charmhelpers/contrib/openstack/amulet/utils.py @@ -27,6 +27,7 @@ import glanceclient.v1.client as glance_client import heatclient.v1.client as heat_client import keystoneclient.v2_0 as keystone_client import novaclient.v1_1.client as nova_client +import pika import swiftclient from charmhelpers.contrib.amulet.utils import ( @@ -602,3 +603,361 @@ class OpenStackAmuletUtils(AmuletUtils): self.log.debug('Ceph {} samples (OK): ' '{}'.format(sample_type, samples)) return None + +# rabbitmq/amqp specific helpers: + def add_rmq_test_user(self, sentry_units, + username="testuser1", password="changeme"): + """Add a test user via the first rmq juju unit, check connection as + the new user against all sentry units. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful. Raise on error. + """ + self.log.debug('Adding rmq user ({})...'.format(username)) + + # Check that user does not already exist + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + if username in output: + self.log.warning('User ({}) already exists, returning ' + 'gracefully.'.format(username)) + return + + perms = '".*" ".*" ".*"' + cmds = ['rabbitmqctl add_user {} {}'.format(username, password), + 'rabbitmqctl set_permissions {} {}'.format(username, perms)] + + # Add user via first unit + for cmd in cmds: + output, _ = self.run_cmd_unit(sentry_units[0], cmd) + + # Check connection against the other sentry_units + self.log.debug('Checking user connect against units...') + for sentry_unit in sentry_units: + connection = self.connect_amqp_by_unit(sentry_unit, ssl=False, + username=username, + password=password) + connection.close() + + def delete_rmq_test_user(self, sentry_units, username="testuser1"): + """Delete a rabbitmq user via the first rmq juju unit. + + :param sentry_units: list of sentry unit pointers + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: None if successful or no such user. + """ + self.log.debug('Deleting rmq user ({})...'.format(username)) + + # Check that the user exists + cmd_user_list = 'rabbitmqctl list_users' + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list) + + if username not in output: + self.log.warning('User ({}) does not exist, returning ' + 'gracefully.'.format(username)) + return + + # Delete the user + cmd_user_del = 'rabbitmqctl delete_user {}'.format(username) + output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del) + + def get_rmq_cluster_status(self, sentry_unit): + """Execute rabbitmq cluster status command on a unit and return + the full output. + + :param unit: sentry unit + :returns: String containing console output of cluster status command + """ + cmd = 'rabbitmqctl cluster_status' + output, _ = self.run_cmd_unit(sentry_unit, cmd) + self.log.debug('{} cluster_status:\n{}'.format( + sentry_unit.info['unit_name'], output)) + return str(output) + + def get_rmq_cluster_running_nodes(self, sentry_unit): + """Parse rabbitmqctl cluster_status output string, return list of + running rabbitmq cluster nodes. + + :param unit: sentry unit + :returns: List containing node names of running nodes + """ + # NOTE(beisner): rabbitmqctl cluster_status output is not + # json-parsable, do string chop foo, then json.loads that. + str_stat = self.get_rmq_cluster_status(sentry_unit) + if 'running_nodes' in str_stat: + pos_start = str_stat.find("{running_nodes,") + 15 + pos_end = str_stat.find("]},", pos_start) + 1 + str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"') + run_nodes = json.loads(str_run_nodes) + return run_nodes + else: + return [] + + def validate_rmq_cluster_running_nodes(self, sentry_units): + """Check that all rmq unit hostnames are represented in the + cluster_status output of all units. + + :param host_names: dict of juju unit names to host names + :param units: list of sentry unit pointers (all rmq units) + :returns: None if successful, otherwise return error message + """ + host_names = self.get_unit_hostnames(sentry_units) + errors = [] + + # Query every unit for cluster_status running nodes + for query_unit in sentry_units: + query_unit_name = query_unit.info['unit_name'] + running_nodes = self.get_rmq_cluster_running_nodes(query_unit) + + # Confirm that every unit is represented in the queried unit's + # cluster_status running nodes output. + for validate_unit in sentry_units: + val_host_name = host_names[validate_unit.info['unit_name']] + val_node_name = 'rabbit@{}'.format(val_host_name) + + if val_node_name not in running_nodes: + errors.append('Cluster member check failed on {}: {} not ' + 'in {}\n'.format(query_unit_name, + val_node_name, + running_nodes)) + if errors: + return ''.join(errors) + + def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None): + """Check a single juju rmq unit for ssl and port in the config file.""" + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + conf_file = '/etc/rabbitmq/rabbitmq.config' + conf_contents = str(self.file_contents_safe(sentry_unit, + conf_file, max_wait=16)) + # Checks + conf_ssl = 'ssl' in conf_contents + conf_port = str(port) in conf_contents + + # Port explicitly checked in config + if port and conf_port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif port and not conf_port and conf_ssl: + self.log.debug('SSL is enabled @{} but not on port {} ' + '({})'.format(host, port, unit_name)) + return False + # Port not checked (useful when checking that ssl is disabled) + elif not port and conf_ssl: + self.log.debug('SSL is enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return True + elif not port and not conf_ssl: + self.log.debug('SSL not enabled @{}:{} ' + '({})'.format(host, port, unit_name)) + return False + else: + msg = ('Unknown condition when checking SSL status @{}:{} ' + '({})'.format(host, port, unit_name)) + amulet.raise_status(amulet.FAIL, msg) + + def validate_rmq_ssl_enabled_units(self, sentry_units, port=None): + """Check that ssl is enabled on rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :param port: optional ssl port override to validate + :returns: None if successful, otherwise return error message + """ + for sentry_unit in sentry_units: + if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port): + return ('Unexpected condition: ssl is disabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def validate_rmq_ssl_disabled_units(self, sentry_units): + """Check that ssl is enabled on listed rmq juju sentry units. + + :param sentry_units: list of all rmq sentry units + :returns: True if successful. Raise on error. + """ + for sentry_unit in sentry_units: + if self.rmq_ssl_is_enabled_on_unit(sentry_unit): + return ('Unexpected condition: ssl is enabled on unit ' + '({})'.format(sentry_unit.info['unit_name'])) + return None + + def configure_rmq_ssl_on(self, sentry_units, deployment, + port=None, max_wait=60): + """Turn ssl charm config option on, with optional non-default + ssl port specification. Confirm that it is enabled on every + unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param port: amqp port, use defaults if None + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: on') + + # Enable RMQ SSL + config = {'ssl': 'on'} + if port: + config['ssl_port'] = port + + deployment.configure('rabbitmq-server', config) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60): + """Turn ssl charm config option off, confirm that it is disabled + on every unit. + + :param sentry_units: list of sentry units + :param deployment: amulet deployment object pointer + :param max_wait: maximum time to wait in seconds to confirm + :returns: None if successful. Raise on error. + """ + self.log.debug('Setting ssl charm config option: off') + + # Disable RMQ SSL + config = {'ssl': 'off'} + deployment.configure('rabbitmq-server', config) + + # Confirm + tries = 0 + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + while ret and tries < (max_wait / 4): + time.sleep(4) + self.log.debug('Attempt {}: {}'.format(tries, ret)) + ret = self.validate_rmq_ssl_disabled_units(sentry_units) + tries += 1 + + if ret: + amulet.raise_status(amulet.FAIL, ret) + + def connect_amqp_by_unit(self, sentry_unit, ssl=False, + port=None, fatal=True, + username="testuser1", password="changeme"): + """Establish and return a pika amqp connection to the rabbitmq service + running on a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :param fatal: boolean, default to True (raises on connect error) + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :returns: pika amqp connection pointer or None if failed and non-fatal + """ + host = sentry_unit.info['public-address'] + unit_name = sentry_unit.info['unit_name'] + + # Default port logic if port is not specified + if ssl and not port: + port = 5671 + elif not ssl and not port: + port = 5672 + + self.log.debug('Connecting to amqp on {}:{} ({}) as ' + '{}...'.format(host, port, unit_name, username)) + + try: + credentials = pika.PlainCredentials(username, password) + parameters = pika.ConnectionParameters(host=host, port=port, + credentials=credentials, + ssl=ssl, + connection_attempts=3, + retry_delay=5, + socket_timeout=1) + connection = pika.BlockingConnection(parameters) + assert connection.server_properties['product'] == 'RabbitMQ' + self.log.debug('Connect OK') + return connection + except Exception as e: + msg = ('amqp connection failed to {}:{} as ' + '{} ({})'.format(host, port, username, str(e))) + if fatal: + amulet.raise_status(amulet.FAIL, msg) + else: + self.log.warn(msg) + return None + + def publish_amqp_message_by_unit(self, sentry_unit, message, + queue="test", ssl=False, + username="testuser1", + password="changeme", + port=None): + """Publish an amqp message to a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param message: amqp message string + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: None. Raises exception if publish failed. + """ + self.log.debug('Publishing message to {} queue:\n{}'.format(queue, + message)) + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + + # NOTE(beisner): extra debug here re: pika hang potential: + # https://github.com/pika/pika/issues/297 + # https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw + self.log.debug('Defining channel...') + channel = connection.channel() + self.log.debug('Declaring queue...') + channel.queue_declare(queue=queue, auto_delete=False, durable=True) + self.log.debug('Publishing message...') + channel.basic_publish(exchange='', routing_key=queue, body=message) + self.log.debug('Closing channel...') + channel.close() + self.log.debug('Closing connection...') + connection.close() + + def get_amqp_message_by_unit(self, sentry_unit, queue="test", + username="testuser1", + password="changeme", + ssl=False, port=None): + """Get an amqp message from a rmq juju unit. + + :param sentry_unit: sentry unit pointer + :param queue: message queue, default to test + :param username: amqp user name, default to testuser1 + :param password: amqp user password + :param ssl: boolean, default to False + :param port: amqp port, use defaults if None + :returns: amqp message body as string. Raise if get fails. + """ + connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl, + port=port, + username=username, + password=password) + channel = connection.channel() + method_frame, _, body = channel.basic_get(queue) + + if method_frame: + self.log.debug('Retreived message from {} queue:\n{}'.format(queue, + body)) + channel.basic_ack(method_frame.delivery_tag) + channel.close() + connection.close() + return body + else: + msg = 'No message retrieved.' + amulet.raise_status(amulet.FAIL, msg) From f66ce8fa16011817d6460a041a51250d0356fa4a Mon Sep 17 00:00:00 2001 From: Hui Xiang Date: Tue, 22 Sep 2015 14:05:15 +0800 Subject: [PATCH 33/36] Fix lint error --- .../contrib/openstack/amulet/deployment.py | 22 ++++++++----------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/tests/charmhelpers/contrib/openstack/amulet/deployment.py b/tests/charmhelpers/contrib/openstack/amulet/deployment.py index 1d504e42..07ee2ef1 100644 --- a/tests/charmhelpers/contrib/openstack/amulet/deployment.py +++ b/tests/charmhelpers/contrib/openstack/amulet/deployment.py @@ -51,17 +51,13 @@ class OpenStackAmuletDeployment(AmuletDeployment): else: base_series = self.current_next - for svc in other_services: - if svc['name'] in force_series_current: - base_series = self.current_next - # If a location has been explicitly set, use it - if svc.get('location'): - continue - if self.stable: + if self.stable: + for svc in other_services: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, svc['name']) - else: + else: + for svc in other_services: if svc['name'] in base_charms: temp = 'lp:charms/{}/{}' svc['location'] = temp.format(base_series, @@ -70,7 +66,6 @@ class OpenStackAmuletDeployment(AmuletDeployment): temp = 'lp:~openstack-charmers/charms/{}/{}/next' svc['location'] = temp.format(self.current_next, svc['name']) - return other_services def _add_services(self, this_service, other_services): @@ -82,20 +77,21 @@ class OpenStackAmuletDeployment(AmuletDeployment): services = other_services services.append(this_service) - - # Charms which should use the source config option use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', 'ceph-osd', 'ceph-radosgw'] + # Most OpenStack subordinate charms do not expose an origin option + # as that is controlled by the principle. + ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe'] if self.openstack: for svc in services: - if svc['name'] not in use_source + no_origin: + if svc['name'] not in use_source + ignore: config = {'openstack-origin': self.openstack} self.d.configure(svc['name'], config) if self.source: for svc in services: - if svc['name'] in use_source and svc['name'] not in no_origin: + if svc['name'] in use_source and svc['name'] not in ignore: config = {'source': self.source} self.d.configure(svc['name'], config) From f26a5ab508e8285461283d63d28c36bc44eca0a6 Mon Sep 17 00:00:00 2001 From: James Page Date: Tue, 22 Sep 2015 14:52:12 +0100 Subject: [PATCH 34/36] Ensure python2 is installed before hook execution --- hooks/install | 21 ++++++++++++++++++++- hooks/install.real | 1 + hooks/nova_cc_hooks.py | 2 +- 3 files changed, 22 insertions(+), 2 deletions(-) mode change 120000 => 100755 hooks/install create mode 120000 hooks/install.real diff --git a/hooks/install b/hooks/install deleted file mode 120000 index f6702415..00000000 --- a/hooks/install +++ /dev/null @@ -1 +0,0 @@ -nova_cc_hooks.py \ No newline at end of file diff --git a/hooks/install b/hooks/install new file mode 100755 index 00000000..83a9d3ce --- /dev/null +++ b/hooks/install @@ -0,0 +1,20 @@ +#!/bin/bash +# Wrapper to deal with newer Ubuntu versions that don't have py2 installed +# by default. + +declare -a DEPS=('apt' 'netaddr' 'netifaces' 'pip' 'yaml') + +check_and_install() { + pkg="${1}-${2}" + if ! dpkg -s ${pkg} 2>&1 > /dev/null; then + apt-get -y install ${pkg} + fi +} + +PYTHON="python" + +for dep in ${DEPS[@]}; do + check_and_install ${PYTHON} ${dep} +done + +exec ./hooks/install.real diff --git a/hooks/install.real b/hooks/install.real new file mode 120000 index 00000000..f6702415 --- /dev/null +++ b/hooks/install.real @@ -0,0 +1 @@ +nova_cc_hooks.py \ No newline at end of file diff --git a/hooks/nova_cc_hooks.py b/hooks/nova_cc_hooks.py index 24e1bc9c..73d77c3e 100755 --- a/hooks/nova_cc_hooks.py +++ b/hooks/nova_cc_hooks.py @@ -144,7 +144,7 @@ AGENT_CA_PARAMS = 'op monitor interval="5s"' NOVA_CONSOLEAUTH_OVERRIDE = '/etc/init/nova-consoleauth.override' -@hooks.hook() +@hooks.hook('install.real') def install(): execd_preinstall() configure_installation_source(config('openstack-origin')) From 0d75d89e56823e33aed67100d66e37335b5a0367 Mon Sep 17 00:00:00 2001 From: David Ames Date: Wed, 23 Sep 2015 09:08:31 -0700 Subject: [PATCH 35/36] Do not use register_configs import CONFIGS directly --- actions/openstack_upgrade.py | 11 ++- unit_tests/test_actions_openstack_upgrade.py | 76 ++++++++++++++++++++ 2 files changed, 81 insertions(+), 6 deletions(-) create mode 100644 unit_tests/test_actions_openstack_upgrade.py diff --git a/actions/openstack_upgrade.py b/actions/openstack_upgrade.py index ab76509f..d4af66fe 100755 --- a/actions/openstack_upgrade.py +++ b/actions/openstack_upgrade.py @@ -11,16 +11,15 @@ from charmhelpers.core.hookenv import ( relation_ids, ) -from nova_cc_hooks import neutron_api_relation_joined - from nova_cc_utils import ( do_openstack_upgrade, - register_configs ) -from nova_cc_hooks import config_changed - -CONFIGS = register_configs() +from nova_cc_hooks import ( + config_changed, + CONFIGS, + neutron_api_relation_joined, +) def openstack_upgrade(): diff --git a/unit_tests/test_actions_openstack_upgrade.py b/unit_tests/test_actions_openstack_upgrade.py new file mode 100644 index 00000000..ed630082 --- /dev/null +++ b/unit_tests/test_actions_openstack_upgrade.py @@ -0,0 +1,76 @@ +from mock import patch, MagicMock +import os + +os.environ['JUJU_UNIT_NAME'] = 'nova-cloud-controller' + + +with patch('charmhelpers.core.hookenv.config') as config: + config.return_value = 'nova' + import nova_cc_utils as utils # noqa + +_reg = utils.register_configs +_map = utils.restart_map + +utils.register_configs = MagicMock() +utils.restart_map = MagicMock() + +with patch('nova_cc_utils.guard_map') as gmap: + with patch('charmhelpers.core.hookenv.config') as config: + config.return_value = False + gmap.return_value = {} + import openstack_upgrade + +utils.register_configs = _reg +utils.restart_map = _map + +from test_utils import ( + CharmTestCase +) + +TO_PATCH = [ + 'do_openstack_upgrade', + 'relation_ids', + 'neutron_api_relation_joined', + 'config_changed', +] + + +class TestNovaCCUpgradeActions(CharmTestCase): + + def setUp(self): + super(TestNovaCCUpgradeActions, self).setUp(openstack_upgrade, + TO_PATCH) + + @patch('charmhelpers.contrib.openstack.utils.config') + @patch('charmhelpers.contrib.openstack.utils.action_set') + @patch('charmhelpers.contrib.openstack.utils.git_install_requested') + @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available') + def test_openstack_upgrade_true(self, upgrade_avail, git_requested, + action_set, config): + git_requested.return_value = False + upgrade_avail.return_value = True + config.return_value = True + self.relation_ids.return_value = ['relid1'] + + openstack_upgrade.openstack_upgrade() + + self.assertTrue(self.do_openstack_upgrade.called) + self.assertTrue( + self.neutron_api_relation_joined.called_with(rid='relid1', + remote_restart=True)) + self.assertTrue(self.config_changed.called) + + @patch('charmhelpers.contrib.openstack.utils.config') + @patch('charmhelpers.contrib.openstack.utils.action_set') + @patch('charmhelpers.contrib.openstack.utils.git_install_requested') + @patch('charmhelpers.contrib.openstack.utils.openstack_upgrade_available') + def test_openstack_upgrade_false(self, upgrade_avail, git_requested, + action_set, config): + git_requested.return_value = False + upgrade_avail.return_value = True + config.return_value = False + + openstack_upgrade.openstack_upgrade() + + self.assertFalse(self.do_openstack_upgrade.called) + self.assertFalse(self.config_changed.called) From 0bc90edd3ca9cc42173658ff54661be356ca17df Mon Sep 17 00:00:00 2001 From: Edward Hope-Morley Date: Thu, 24 Sep 2015 17:32:02 +0100 Subject: [PATCH 36/36] [hopem,r=] No more trusty-backports for haproxy >= Liberty Closes-Bug: 1499435 --- hooks/nova_cc_utils.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/hooks/nova_cc_utils.py b/hooks/nova_cc_utils.py index 121e2dc1..f24fbcdf 100644 --- a/hooks/nova_cc_utils.py +++ b/hooks/nova_cc_utils.py @@ -1056,12 +1056,11 @@ def setup_ipv6(): raise Exception("IPv6 is not supported in the charms for Ubuntu " "versions less than Trusty 14.04") - # NOTE(xianghui): Need to install haproxy(1.5.3) from trusty-backports - # to support ipv6 address, so check is required to make sure not - # breaking other versions, IPv6 only support for >= Trusty - if ubuntu_rel == 'trusty': - add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports' - ' main') + # Need haproxy >= 1.5.3 for ipv6 so for Trusty if we are <= Kilo we need to + # use trusty-backports otherwise we can use the UCA. + if ubuntu_rel == 'trusty' and os_release('nova-api') < 'liberty': + add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports ' + 'main') apt_update() apt_install('haproxy/trusty-backports', fatal=True)