diff --git a/nova/conf/service.py b/nova/conf/service.py index aa8688e95548..8880993ea49b 100644 --- a/nova/conf/service.py +++ b/nova/conf/service.py @@ -1,7 +1,4 @@ -# needs:fix_opt_description # needs:check_deprecation_status -# needs:fix_opt_description_indentation -# needs:fix_opt_registration_consistency # Copyright 2015 OpenStack Foundation @@ -22,79 +19,180 @@ from oslo_config import cfg service_opts = [ + # TODO(johngarbutt) we need a better default and minimum, in a backwards + # compatible way for report_interval cfg.IntOpt('report_interval', default=10, - help='Seconds between nodes reporting state to datastore'), + help=""" +Number of seconds indicating how frequently the state of services on a +given hypervisor is reported. Nova needs to know this to determine the +overall health of the deployment. + +Related Options: + +* service_down_time + report_interval should be less than service_down_time. If service_down_time + is less than report_interval, services will routinely be considered down, + because they report in too rarely. +"""), + # TODO(johngarbutt) the code enforces the min value here, but we could + # do to add some min value here, once we sort out report_interval + cfg.IntOpt('service_down_time', + default=60, + help=""" +Maximum time in seconds since last check-in for up service + +Each compute node periodically updates their database status based on the +specified report interval. If the compute node hasn't updated the status +for more than service_down_time, then the compute node is considered down. + +Related Options: + +* report_interval (service_down_time should not be less than report_interval) +"""), + # TODO(macsz) deprecate periodic_enable. Setting to False causes nova to + # fail cfg.BoolOpt('periodic_enable', default=True, - help='Enable periodic tasks'), + help=""" +Enable periodic tasks. + +If set to true, this option allows services to periodically run tasks +on the manager. +"""), cfg.IntOpt('periodic_fuzzy_delay', default=60, - help='Range of seconds to randomly delay when starting the' - ' periodic task scheduler to reduce stampeding.' - ' (Disable by setting to 0)'), + min=0, + help=""" +Number of seconds to randomly delay when starting the periodic task +scheduler to reduce stampeding. + +When compute workers are restarted in unison across a cluster, +they all end up running the periodic tasks at the same time +causing problems for the external services. To mitigate this +behavior, periodic_fuzzy_delay option allows you to introduce a +random initial delay when starting the periodic task scheduler. + +Possible Values: + +* Any positive integer (in seconds) +* 0 : disable the random delay +"""), cfg.ListOpt('enabled_apis', + item_type=cfg.types.String(choices=['osapi_compute', + 'metadata']), default=['osapi_compute', 'metadata'], - help='A list of APIs to enable by default'), + help="List of APIs to be enabled by default."), cfg.ListOpt('enabled_ssl_apis', default=[], - help='A list of APIs with enabled SSL'), + help=""" +List of APIs with enabled SSL. + +Nova provides SSL support for the API servers. enabled_ssl_apis option +allows configuring the SSL support. +"""), cfg.StrOpt('osapi_compute_listen', default="0.0.0.0", - help='The IP address on which the OpenStack API will listen.'), + help=""" +IP address on which the OpenStack API will listen. + +The OpenStack API service listens on this IP address for incoming +requests. +"""), cfg.PortOpt('osapi_compute_listen_port', default=8774, - help='The port on which the OpenStack API will listen.'), + help=""" +Port on which the OpenStack API will listen. + +The OpenStack API service listens on this port number for incoming +requests. +"""), cfg.IntOpt('osapi_compute_workers', - help='Number of workers for OpenStack API service. The default ' - 'will be the number of CPUs available.'), + min=1, + help=""" +Number of workers for OpenStack API service. The default will be the number +of CPUs available. + +OpenStack API services can be configured to run as multi-process (workers). +This overcomes the problem of reduction in throughput when API request +concurrency increases. OpenStack API service will run in the specified +number of processes. + +Possible Values: + +* Any positive integer +* None (default value) +"""), cfg.StrOpt('metadata_manager', default='nova.api.manager.MetadataManager', - help='DEPRECATED: OpenStack metadata service manager', - deprecated_for_removal=True), + deprecated_for_removal=True, + help="Full class name for the service metadata manager."), cfg.StrOpt('metadata_listen', default="0.0.0.0", - help='The IP address on which the metadata API will listen.'), + help=""" +IP address on which the metadata API will listen. + +The metadata API service listens on this IP address for incoming +requests. +"""), cfg.PortOpt('metadata_listen_port', default=8775, - help='The port on which the metadata API will listen.'), + help=""" +Port on which the metadata API will listen. + +The metadata API service listens on this port number for incoming +requests. +"""), cfg.IntOpt('metadata_workers', - help='Number of workers for metadata service. The default will ' - 'be the number of CPUs available.'), + min=1, + help=""" +Number of workers for metadata service. If not specified the number of +available CPUs will be used. + +The metadata service can be configured to run as multi-process (workers). +This overcomes the problem of reduction in throughput when API request +concurrency increases. The metadata service will run in the specified +number of processes. + +Possible Values: + +* Any positive integer +* None (default value) +"""), # NOTE(sdague): Ironic is still using this facility for their HA # manager. Ensure they are sorted before removing this. cfg.StrOpt('compute_manager', default='nova.compute.manager.ComputeManager', - help='DEPRECATED: Full class name for the Manager for compute', - deprecated_for_removal=True), + deprecated_for_removal=True, + help="Full class name for the Manager for compute"), cfg.StrOpt('console_manager', default='nova.console.manager.ConsoleProxyManager', - help='DEPRECATED: Full class name for the Manager for ' - 'console proxy', - deprecated_for_removal=True), + deprecated_for_removal=True, + help="Full class name for the Manager for console proxy"), cfg.StrOpt('consoleauth_manager', default='nova.consoleauth.manager.ConsoleAuthManager', - help='DEPRECATED: Manager for console auth', - deprecated_for_removal=True), + deprecated_for_removal=True, + help="Full clase name for the Manager for console auth"), cfg.StrOpt('cert_manager', default='nova.cert.manager.CertManager', - help='DEPRECATED: Full class name for the Manager for cert', - deprecated_for_removal=True), + deprecated_for_removal=True, + help="Full class name for the Manager for cert"), # NOTE(sdague): the network_manager has a bunch of different in # tree classes that are still legit options. In Newton we should # turn this into a selector. cfg.StrOpt('network_manager', + choices=[ + 'nova.network.manager.FlatManager', + 'nova.network.manager.FlatDHCPManager', + 'nova.network.manager.VlanManager', + ], default='nova.network.manager.VlanManager', - help='Full class name for the Manager for network'), + help="Full class name for the Manager for network"), cfg.StrOpt('scheduler_manager', default='nova.scheduler.manager.SchedulerManager', - help='DEPRECATED: Full class name for the Manager for ' - 'scheduler', - deprecated_for_removal=True), - cfg.IntOpt('service_down_time', - default=60, - help='Maximum time since last check-in for up service'), - ] + deprecated_for_removal=True, + help="Full class name for the Manager for scheduler"), +] def register_opts(conf): diff --git a/nova/tests/unit/network/test_api.py b/nova/tests/unit/network/test_api.py index 328b94d50843..9a5e48664778 100644 --- a/nova/tests/unit/network/test_api.py +++ b/nova/tests/unit/network/test_api.py @@ -67,7 +67,7 @@ class ApiTestCase(test.TestCase): @mock.patch('nova.objects.NetworkList.get_all') def test_get_all_liberal(self, mock_get_all): - self.flags(network_manager='nova.network.manager.FlatDHCPManaager') + self.flags(network_manager='nova.network.manager.FlatDHCPManager') mock_get_all.return_value = mock.sentinel.get_all self.assertEqual(mock.sentinel.get_all, self.network_api.get_all(self.context))