From b5b6966f6dfb7e27c9cfdd7ba13596928ebb8436 Mon Sep 17 00:00:00 2001 From: Hemanth Nakkina Date: Thu, 26 Mar 2020 17:40:40 +0530 Subject: [PATCH] Add new config option to set [quota].count_usage_from_placement New config option count_usage_from_placement is added in Nova from Train release to enable/disable counting of quota usage from placement service. Corresponding config parameter is required in nova-cloud-controller charm. This patch introduces quota-count-usage-from-placement config parameter in nova-cloud-controller charm. For openstack releases train or above, this option is rendered in nova.conf for nova-cloud-controller units. func-test-pr: https://github.com/openstack-charmers/zaza-openstack-tests/pull/250 Change-Id: I57b9335b7b6aecb8610a66a59cb2e4e506e76a5e Closes-Bug: #1864859 --- charmhelpers/contrib/openstack/utils.py | 20 +- config.yaml | 13 +- hooks/nova_cc_context.py | 2 + hooks/nova_cc_utils.py | 14 +- templates/train/nova.conf | 278 ++++++++++++++++++++++++ unit_tests/test_nova_cc_contexts.py | 2 + 6 files changed, 326 insertions(+), 3 deletions(-) create mode 100644 templates/train/nova.conf diff --git a/charmhelpers/contrib/openstack/utils.py b/charmhelpers/contrib/openstack/utils.py index 2b522eac..16fbd51f 100644 --- a/charmhelpers/contrib/openstack/utils.py +++ b/charmhelpers/contrib/openstack/utils.py @@ -963,6 +963,7 @@ def _determine_os_workload_status( @param ports: OPTIONAL list of port numbers. @returns state, message: the new workload status, user message """ + messages = [] state, message = _ows_check_if_paused(services, ports) if state is None: @@ -973,14 +974,24 @@ def _determine_os_workload_status( # _ows_check_charm_func() may modify the state, message state, message = _ows_check_charm_func( state, message, lambda: charm_func(configs)) + if message is not None: + messages.append(message) if state is None: state, message = _ows_check_services_running(services, ports) + if message is not None: + messages.append(message) if state is None: state = 'active' - message = "Unit is ready" + if not messages: + message = "Unit is ready" + else: + warning_message = '; '.join(messages) + message = 'Unit is ready; {}'.format(warning_message) juju_log(message, 'INFO') + else: + message = '; '.join(messages) try: if config(POLICYD_CONFIG_NAME): @@ -1130,6 +1141,13 @@ def _ows_check_charm_func(state, message, charm_func_with_configs): message = "{}, {}".format(message, charm_message) else: message = charm_message + + if charm_state == 'unknown' and charm_message != '': + if message: + message = "{}, {}".format(message, charm_message) + else: + message = charm_message + return state, message diff --git a/config.yaml b/config.yaml index 16fcd31a..0208321c 100644 --- a/config.yaml +++ b/config.yaml @@ -583,6 +583,17 @@ options: and before . Possible Values are positive integers or 0 and -1 to disable the quota. + quota-count-usage-from-placement: + type: boolean + default: False + description: | + Setting this to True, enables the counting of quota usage from the + placement service. + . + By default, the parameter is False and Nova will count quota usage for + instances, cores, and ram from its cell databases. + . + This is only supported on OpenStack Train or later releases. use-policyd-override: type: boolean default: False @@ -655,4 +666,4 @@ options: If that AZ is not in Cinder, the volume create request will fail and the instance will fail the build request. . - By default there is no availability zone restriction on volume attach. \ No newline at end of file + By default there is no availability zone restriction on volume attach. diff --git a/hooks/nova_cc_context.py b/hooks/nova_cc_context.py index 7879bf55..109e3248 100644 --- a/hooks/nova_cc_context.py +++ b/hooks/nova_cc_context.py @@ -408,6 +408,8 @@ class NovaConfigContext(ch_context.WorkerConfigContext): ctxt['quota_server_groups'] = hookenv.config('quota-server-groups') ctxt['quota_server_group_members'] = hookenv.config( 'quota-server-group-members') + ctxt['quota_count_usage_from_placement'] = hookenv.config( + 'quota-count-usage-from-placement') ctxt['console_access_protocol'] = hookenv.config( 'console-access-protocol') ctxt['console_access_port'] = hookenv.config('console-access-port') diff --git a/hooks/nova_cc_utils.py b/hooks/nova_cc_utils.py index fd1cc982..fcb68868 100644 --- a/hooks/nova_cc_utils.py +++ b/hooks/nova_cc_utils.py @@ -1707,9 +1707,21 @@ def check_optional_relations(configs): 'hacluster missing configuration: ' 'vip, vip_iface, vip_cidr') + if cmp_cur_os_rel < 'train' and hookenv.config( + 'quota-count-usage-from-placement'): + hookenv.log( + 'quota-count-usage-from-placement not supported in {} release'. + format(cmp_cur_os_rel), + level=hookenv.ERROR) + return ( + 'unknown', + 'WARN: Reset the configuration quota-count-usage-from-placement to' + 'false, this configuration is only availabe for releases>=Train' + ) + # return 'unknown' as the lowest priority to not clobber an existing # status. - return "unknown", "" + return "unknown", None def assess_status(configs): diff --git a/templates/train/nova.conf b/templates/train/nova.conf new file mode 100644 index 00000000..c9017aa5 --- /dev/null +++ b/templates/train/nova.conf @@ -0,0 +1,278 @@ +# train +############################################################################### +# [ WARNING ] +# Configuration file maintained by Juju. Local changes may be overwritten. +############################################################################### +[DEFAULT] +verbose={{ verbose }} +debug={{ debug }} +dhcpbridge_flagfile=/etc/nova/nova.conf +dhcpbridge=/usr/bin/nova-dhcpbridge +logdir=/var/log/nova +state_path=/var/lib/nova +iscsi_helper=tgtadm +libvirt_use_virtio_for_bridges=True +connection_type=libvirt +root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf +volumes_path=/var/lib/nova/volumes +enabled_apis=osapi_compute,metadata +compute_driver=libvirt.LibvirtDriver +use_ipv6 = {{ use_ipv6 }} +osapi_compute_listen = {{ bind_host }} +{% if unique_server_names -%} +osapi_compute_unique_unique_server_names = {{ unique_server_names }} +{% endif -%} +metadata_host = {{ bind_host }} +s3_listen = {{ bind_host }} +enable_new_services = {{ enable_new_services }} + +{% if debug -%} +default_log_levels = "amqp=WARN, amqplib=WARN, boto=WARN, qpid=WARN, sqlalchemy=WARN, suds=INFO, oslo.messaging=INFO, oslo_messaging=DEBUG, iso8601=WARN, requests.packages.urllib3.connectionpool=WARN, urllib3.connectionpool=WARN, websocket=WARN, requests.packages.urllib3.util.retry=WARN, urllib3.util.retry=WARN, keystonemiddleware=WARN, routes.middleware=WARN, stevedore=WARN, taskflow=WARN, keystoneauth=WARN, oslo.cache=INFO, dogpile.core.dogpile=INFO, glanceclient=WARN, oslo.privsep.daemon=INFO" +glance.debug = True +{% endif -%} + +{% if transport_url %} +transport_url = {{ transport_url }} +{% endif %} + +{% if dns_domain -%} +# Per LP#1805645, dhcp_domain needs to be configured for nova-metadata-api +# It gets this information from neutron. +dhcp_domain = {{ dns_domain }} +{% endif -%} + +osapi_compute_workers = {{ workers }} + +cpu_allocation_ratio = {{ cpu_allocation_ratio }} +ram_allocation_ratio = {{ ram_allocation_ratio }} +disk_allocation_ratio = {{ disk_allocation_ratio }} + +use_syslog={{ use_syslog }} +my_ip = {{ host_ip }} + +{% include "parts/novnc" %} + +{% if rbd_pool -%} +rbd_pool = {{ rbd_pool }} +rbd_user = {{ rbd_user }} +rbd_secret_uuid = {{ rbd_secret_uuid }} +{% endif -%} + +{% if neutron_plugin and neutron_plugin in ('ovs', 'midonet') -%} +libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver +libvirt_user_virtio_for_bridges = True +{% if neutron_security_groups -%} +security_group_api = {{ network_manager }} +nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver +{% endif -%} +{% if external_network -%} +default_floating_pool = {{ external_network }} +{% endif -%} +{% endif -%} + +{% if neutron_plugin and neutron_plugin == 'vsp' -%} +neutron_ovs_bridge = alubr0 +{% endif -%} + +{% if neutron_plugin and neutron_plugin == 'nvp' -%} +security_group_api = neutron +nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver +{% if external_network -%} +default_floating_pool = {{ external_network }} +{% endif -%} +{% endif -%} + +{% if neutron_plugin and neutron_plugin == 'Calico' -%} +security_group_api = neutron +nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver +{% endif -%} + +{% if neutron_plugin and neutron_plugin == 'plumgrid' -%} +security_group_api=neutron +firewall_driver = nova.virt.firewall.NoopFirewallDriver +{% endif -%} + +{% if network_manager_config -%} +{% for key, value in network_manager_config.items() -%} +{{ key }} = {{ value }} +{% endfor -%} +{% endif -%} + +{% if network_manager and network_manager == 'neutron' -%} +network_api_class = nova.network.neutronv2.api.API +{% else -%} +network_manager = nova.network.manager.FlatDHCPManager +{% endif -%} + +{% if default_floating_pool -%} +default_floating_pool = {{ default_floating_pool }} +{% endif -%} + +{% if volume_service -%} +volume_api_class=nova.volume.cinder.API +{% endif -%} + +{% if user_config_flags -%} +{% for key, value in user_config_flags.items() -%} +{{ key }} = {{ value }} +{% endfor -%} +{% endif -%} + +{% if listen_ports -%} +{% for key, value in listen_ports.items() -%} +{{ key }} = {{ value }} +{% endfor -%} +{% endif -%} + +{% if sections and 'DEFAULT' in sections -%} +{% for key, value in sections['DEFAULT'] -%} +{{ key }} = {{ value }} +{% endfor -%} +{% endif %} + +[upgrade_levels] +compute = auto + +{% include "section-zeromq" %} + +{% include "parts/database-v2" %} + +{% include "parts/database-api" %} + +{% if glance_api_servers -%} +[glance] +api_servers = {{ glance_api_servers }} +{% endif -%} + +{% if network_manager and network_manager == 'neutron' %} +{% include "parts/section-neutron" %} +{% endif %} + +{% include "section-keystone-authtoken-mitaka" %} + +{% include "parts/section-cinder" %} + +[osapi_v3] +enabled=True + +{% include "parts/cell" %} + +[conductor] +workers = {{ workers }} + +{% include "section-oslo-messaging-rabbit" %} + +{% include "section-oslo-notifications" %} + +[oslo_concurrency] +lock_path=/var/lock/nova + +[vnc] +{% if console_access_port and console_access_protocol == 'novnc' -%} +novncproxy_port = {{ console_access_port }} +{% endif %} +{% if console_access_port and console_access_protocol == 'xvpvnc' -%} +xvpvncproxy_port = {{ console_access_port }} +{% endif %} + +[spice] +{% include "parts/spice" %} +{% if console_access_port and console_access_protocol == 'spice' -%} +html5proxy_port = {{ console_access_port }} +{% endif %} + +{% include "parts/section-serial-console" %} + +{% if memcached_servers %} +[cache] +enabled = true +backend = oslo_cache.memcache_pool +memcache_servers = {{ memcached_servers }} +{% endif %} + +{% include "section-placement" %} + +[scheduler] +# NOTE(jamespage): perform automatic host cell mapping +# until we can orchestrate this better +# using the nova-cc <--> nova-compute +# relation +discover_hosts_in_cells_interval = 30 + +[filter_scheduler] +{% if additional_neutron_filters is defined %} +enabled_filters = {{ scheduler_default_filters }},{{ additional_neutron_filters }} +{% else %} +enabled_filters = {{ scheduler_default_filters }} +{% endif %} + +# Disable BuildFailureWeigher as any failed build will result +# in a very low weighting for the hypervisor, resulting in +# instances all being scheduled to hypervisors with no build +# failures. +# https://bugs.launchpad.net/charm-nova-cloud-controller/+bug/1818239 +build_failure_weight_multiplier = 0.0 + +{%- if scheduler_host_subset_size %} +host_subset_size = {{ scheduler_host_subset_size }} +{%- endif %} + +[api] +auth_strategy=keystone +{% if vendor_data or vendor_data_url -%} +vendordata_providers = {{ vendordata_providers }} +{% if vendor_data -%} +vendordata_jsonfile_path = /etc/nova/vendor_data.json +{% endif -%} +{% if vendor_data_url -%} +vendordata_dynamic_targets = {{ vendor_data_url }} +{% endif -%} +{% endif -%} + +[wsgi] +api_paste_config=/etc/nova/api-paste.ini + +[pci] +{% if pci_alias %} +alias = {{ pci_alias }} +{% endif %} +{% for alias in pci_aliases -%} +alias = {{ alias }} +{% endfor -%} + +{% include "section-oslo-middleware" %} + +[quota] +{% if quota_instances is not none -%} +instances = {{ quota_instances }} +{% endif -%} +{% if quota_cores is not none -%} +cores = {{ quota_cores }} +{% endif -%} +{% if quota_ram is not none -%} +ram = {{ quota_ram }} +{% endif -%} +{% if quota_metadata_items is not none -%} +metadata_items = {{ quota_metadata_items }} +{% endif -%} +{% if quota_injected_files is not none -%} +injected_files = {{ quota_injected_files }} +{% endif -%} +{% if quota_injected_file_content_bytes is not none -%} +injected_file_content_bytes = {{ quota_injected_file_content_bytes }} +{% endif -%} +{% if quota_injected_file_path_length is not none -%} +injected_file_path_length = {{ quota_injected_file_path_length }} +{% endif -%} +{% if quota_key_pairs is not none -%} +key_pairs = {{ quota_key_pairs }} +{% endif -%} +{% if quota_server_groups is not none -%} +server_groups = {{ quota_server_groups }} +{% endif -%} +{% if quota_server_group_members is not none -%} +server_group_members = {{ quota_server_group_members }} +{% endif -%} +{% if quota_count_usage_from_placement is sameas true -%} +count_usage_from_placement = {{ quota_count_usage_from_placement }} +{% endif -%} diff --git a/unit_tests/test_nova_cc_contexts.py b/unit_tests/test_nova_cc_contexts.py index 34616cfc..520a12af 100644 --- a/unit_tests/test_nova_cc_contexts.py +++ b/unit_tests/test_nova_cc_contexts.py @@ -374,6 +374,8 @@ class NovaComputeContextTests(CharmTestCase): self.assertEqual(ctxt['quota_server_group_members'], self.config('quota-server-group-members')) self.assertEqual(ctxt['quota_server_group_members'], None) + self.assertEqual(ctxt['quota_count_usage_from_placement'], + self.config('quota-count-usage-from-placement')) self.assertEqual(ctxt['enable_new_services'], self.config('enable-new-services')) self.assertEqual(ctxt['console_access_protocol'],