diff --git a/ci/README.rst b/ci/README.rst index 44e8626d88..666e711026 100644 --- a/ci/README.rst +++ b/ci/README.rst @@ -7,5 +7,5 @@ purpose or for development usage. They should not be used in production and we don't guarantee they work outside TripleO CI. -For more informations about TripleO CI, please look: +For more information about TripleO CI, please look: https://github.com/openstack-infra/tripleo-ci diff --git a/container_config_scripts/nova_statedir_ownership.py b/container_config_scripts/nova_statedir_ownership.py index ce7f0d647d..2978a32f4d 100644 --- a/container_config_scripts/nova_statedir_ownership.py +++ b/container_config_scripts/nova_statedir_ownership.py @@ -94,7 +94,7 @@ class PathManager(object): gid) def chcon(self, context): - # If dir returns whether to recusively set context + # If dir returns whether to recursively set context try: try: selinux.lsetfilecon(self.path, context) diff --git a/container_config_scripts/pacemaker_mutex_restart_bundle.sh b/container_config_scripts/pacemaker_mutex_restart_bundle.sh index ab76ce11c2..063b7e3c8f 100755 --- a/container_config_scripts/pacemaker_mutex_restart_bundle.sh +++ b/container_config_scripts/pacemaker_mutex_restart_bundle.sh @@ -59,7 +59,7 @@ else promoted_role="Master" fi -# The lock TTL should accomodate for the resource start/promote timeout +# The lock TTL should accommodate for the resource start/promote timeout if [ "$RESOURCE_NAME" != "$BUNDLE_NAME" ]; then if [ "$WAIT_TARGET_LOCAL" = "$promoted_role" ] || [ "$WAIT_TARGET_ANYWHERE" = "$promoted_role" ]; then rsc_op="promote" diff --git a/container_config_scripts/pacemaker_mutex_shutdown.sh b/container_config_scripts/pacemaker_mutex_shutdown.sh index 9de8f3a90c..1145c89b80 100755 --- a/container_config_scripts/pacemaker_mutex_shutdown.sh +++ b/container_config_scripts/pacemaker_mutex_shutdown.sh @@ -105,7 +105,7 @@ if [ $rc -ne 0 ]; then fi fi -# We start with a very high TTL, that long enough to accomodate a cluster stop. +# We start with a very high TTL, that long enough to accommodate a cluster stop. # As soon as the node will get offline, the other competing node will be entitled # to steal the lock, so they should never wait that long in practice. LOCK_TTL=600 diff --git a/container_config_scripts/pacemaker_wait_bundle.sh b/container_config_scripts/pacemaker_wait_bundle.sh index 379df91d0e..93335aabdf 100755 --- a/container_config_scripts/pacemaker_wait_bundle.sh +++ b/container_config_scripts/pacemaker_wait_bundle.sh @@ -16,7 +16,7 @@ # - the purpose of this script is to ensure that restarting the # service replica locally won't disrupt the service availability # for the end user. To reach that goal, the script waits until the -# service is restarted locally or globallu and reaches a given +# service is restarted locally or globally and reaches a given # target state (i.e. Started, Slave or Master). # design note 2: # - we don't want to track restart error: our only job is to ensure diff --git a/deployment/README.rst b/deployment/README.rst index f9cee86e04..7ab6c21f9f 100644 --- a/deployment/README.rst +++ b/deployment/README.rst @@ -110,7 +110,7 @@ are available for containerized services. * container_puppet_tasks: This section provides data to drive the puppet containers tooling directly. The task is executed for the defined steps before the corresponding docker_config's step. Puppet - always sees the step number overrided as the step #6. It might be useful + always sees the step number overridden as the step #6. It might be useful for initialization of things. Note that the tasks are executed only once for the bootstrap node per a role in the cluster. Make sure the puppet manifest ensures the wanted @@ -183,7 +183,7 @@ Each service may define its own input parameters and defaults. Operators will use the parameter_defaults section of any Heat environment to set per service parameters. -Apart from sevice specific inputs, there are few default parameters for all +Apart from service specific inputs, there are few default parameters for all the services. Following are the list of default parameters: * ServiceData: Mapping of service specific data. It is used to encapsulate @@ -266,7 +266,7 @@ the services. When running an OS upgrade via the tags `system_upgrade_prepare` and `system_upgrade_run`, or the combined tag `system_upgrade`, the steps -corellate to the following: +correlate to the following: 1) Any pre-service-stop actions. (`system_upgrade_prepare`) diff --git a/deployment/cephadm/ceph-base.yaml b/deployment/cephadm/ceph-base.yaml index 960f334811..3b047c806d 100644 --- a/deployment/cephadm/ceph-base.yaml +++ b/deployment/cephadm/ceph-base.yaml @@ -136,7 +136,7 @@ parameters: cinder2_store: GlanceBackend: cinder GlanceCinderVolumeType: 'volume-type-2' - GlanceStoreDescription: 'Seconde cinder store' + GlanceStoreDescription: 'Second cinder store' GlanceRbdPoolName: default: images type: string @@ -288,9 +288,9 @@ parameters: CephOsdSpec: description: | If CephDynamicSpec is true, then any valid OSD service specification set in - CephOsdSpec will appear in the genereated Ceph spec for the 'osd' service_type. + CephOsdSpec will appear in the generated Ceph spec for the 'osd' service_type. Replaces CephAnsibleDisksConfig. This parameter has no effect if CephDynamicSpec - is false. Use this paramter to override the default of using all available block + is false. Use this parameter to override the default of using all available block devices as data_devices. See the Ceph documentation for cephadm drivegroups. Exclude service_type, service_id, and placement from this parameter. In the example below all rotating devices will be data devices and all non-rotating @@ -362,7 +362,7 @@ parameters: default: false type: boolean description: | - Use the default continer defined in cephadm instead of + Use the default container defined in cephadm instead of the one defined in container_image_prepare_defaults.yaml. parameter_groups: diff --git a/deployment/cephadm/ceph-osd.yaml b/deployment/cephadm/ceph-osd.yaml index f5565df882..81ef520158 100644 --- a/deployment/cephadm/ceph-osd.yaml +++ b/deployment/cephadm/ceph-osd.yaml @@ -68,7 +68,7 @@ parameters: has data_devices/rotational=1 and db_devices/rotational=0, then set CephHciOsdType to 'hdd', since only the DB/WAL are on SSD. If an OSD data device is an SSD or NVMe SSD, then set the type accordingly so - that the derive paramters module can allocate the optimal number of + that the derive parameters module can allocate the optimal number of CPUs per device. resources: diff --git a/deployment/cinder/cinder-backend-dellemc-powermax-puppet.yaml b/deployment/cinder/cinder-backend-dellemc-powermax-puppet.yaml index 9e267229d2..e0a9dd3b18 100644 --- a/deployment/cinder/cinder-backend-dellemc-powermax-puppet.yaml +++ b/deployment/cinder/cinder-backend-dellemc-powermax-puppet.yaml @@ -23,7 +23,7 @@ parameters: CinderPowermaxBackendName: type: comma_delimited_list default: 'tripleo_dellemc_powermax' - description: A list of Cinder PoweMax Storage backend names. + description: A list of Cinder PowerMax Storage backend names. CinderPowermaxMultiConfig: type: json default: {} diff --git a/deployment/cinder/cinder-backend-pure-puppet.yaml b/deployment/cinder/cinder-backend-pure-puppet.yaml index 811b89a633..9978bd01f4 100644 --- a/deployment/cinder/cinder-backend-pure-puppet.yaml +++ b/deployment/cinder/cinder-backend-pure-puppet.yaml @@ -86,7 +86,7 @@ parameters: 'oracle-vm-server', 'solaris', 'vms', ''] description: Determines how the Purity system tunes the protocol used between the array and the initiator. - A blank string will default to no Host Pesonality. + A blank string will default to no Host Personality. CinderPureIscsiCidr: type: string default: '0.0.0.0/0' diff --git a/deployment/cinder/cinder-backup-pacemaker-puppet.yaml b/deployment/cinder/cinder-backup-pacemaker-puppet.yaml index 647ec1c1f6..3fb6421854 100644 --- a/deployment/cinder/cinder-backup-pacemaker-puppet.yaml +++ b/deployment/cinder/cinder-backup-pacemaker-puppet.yaml @@ -262,7 +262,7 @@ outputs: # the shell module instead. # TODO(odyssey4me): # Fix the pacemaker_resource module to handle the exception - # for a non-existant cluster more gracefully. + # for a non-existent cluster more gracefully. - name: Check openstack-cinder-backup cluster resource status shell: pcs resource config openstack-cinder-backup failed_when: false diff --git a/deployment/cinder/cinder-volume-pacemaker-puppet.yaml b/deployment/cinder/cinder-volume-pacemaker-puppet.yaml index a903d6d592..e87775ad60 100644 --- a/deployment/cinder/cinder-volume-pacemaker-puppet.yaml +++ b/deployment/cinder/cinder-volume-pacemaker-puppet.yaml @@ -251,7 +251,7 @@ outputs: # the shell module instead. # TODO(odyssey4me): # Fix the pacemaker_resource module to handle the exception - # for a non-existant cluster more gracefully. + # for a non-existent cluster more gracefully. - name: Check openstack-cinder-volume cluster resource status shell: pcs resource config openstack-cinder-volume changed_when: false diff --git a/deployment/database/mysql-pacemaker-puppet.yaml b/deployment/database/mysql-pacemaker-puppet.yaml index d56862ab97..a0bb667143 100644 --- a/deployment/database/mysql-pacemaker-puppet.yaml +++ b/deployment/database/mysql-pacemaker-puppet.yaml @@ -457,7 +457,7 @@ outputs: # the shell module instead. # TODO(odyssey4me): # Fix the pacemaker_resource module to handle the exception - # for a non-existant cluster more gracefully. + # for a non-existent cluster more gracefully. - name: Check galera cluster resource status shell: pcs resource config galera-bundle failed_when: false @@ -578,7 +578,7 @@ outputs: - name: Check and upgrade Mysql database after major version upgrade # Note: during upgrade to Stein, a new pacemaker cluster is recreated, # controller nodes added sequentially to this new cluster, and the upgrade - # workflow (upgrade tasks, deploy/convertge) is ran once per controller. + # workflow (upgrade tasks, deploy/converge) is ran once per controller. # This mysql upgrade block must run only once per controller, before # the controller is added into the cluster (by mysql_init_bundle) and # before pacemaker has a chance to start galera on that controller. diff --git a/deployment/database/redis-pacemaker-puppet.yaml b/deployment/database/redis-pacemaker-puppet.yaml index d8ecd6144b..9aa533c081 100644 --- a/deployment/database/redis-pacemaker-puppet.yaml +++ b/deployment/database/redis-pacemaker-puppet.yaml @@ -378,7 +378,7 @@ outputs: # the shell module instead. # TODO(odyssey4me): # Fix the pacemaker_resource module to handle the exception - # for a non-existant cluster more gracefully. + # for a non-existent cluster more gracefully. - name: Check redis cluster resource status shell: pcs resource config redis-bundle failed_when: false diff --git a/deployment/deprecated/nova/nova-libvirt-container-puppet.yaml b/deployment/deprecated/nova/nova-libvirt-container-puppet.yaml index 23ba5d7b2c..e5a2f55651 100644 --- a/deployment/deprecated/nova/nova-libvirt-container-puppet.yaml +++ b/deployment/deprecated/nova/nova-libvirt-container-puppet.yaml @@ -109,13 +109,13 @@ parameters: type: boolean default: true description: If set to true and if EnableInternalTLS is enabled, it will - enable TLS transaport for libvirt VNC and configure the + enable TLS transport for libvirt VNC and configure the relevant keys for libvirt. UseTLSTransportForNbd: type: boolean default: true description: If set to true and if EnableInternalTLS is enabled, it will - enable TLS transaport for libvirt NBD and configure the + enable TLS transport for libvirt NBD and configure the relevant keys for libvirt. InternalTLSCAFile: default: '/etc/ipa/ca.crt' @@ -891,7 +891,7 @@ outputs: - { 'path': /var/cache/libvirt } - { 'path': /var/lib/nova, 'setype': container_file_t } - { 'path': /run/libvirt, 'setype': virt_var_run_t } - # qemu user on host will be cretaed by libvirt package install, ensure + # qemu user on host will be created by libvirt package install, ensure # the qemu user created with same uid/gid as like libvirt package. # These specific values are required since ovs is running on host. # Once ovs with DPDK is containerized, we could modify this uid/gid diff --git a/deployment/designate/designate-bind-container.yaml b/deployment/designate/designate-bind-container.yaml index 57c4f8ee6e..f01c938414 100644 --- a/deployment/designate/designate-bind-container.yaml +++ b/deployment/designate/designate-bind-container.yaml @@ -54,7 +54,7 @@ parameters: (e.g. interface on the external network or the VIP interface in standalone). If provided, the number of IPs must match the number of deployed Bind instances. If left empty, - the deployment will create additonal IPs using the external + the deployment will create additional IPs using the external network. type: comma_delimited_list diff --git a/deployment/designate/designate-central-container-puppet.yaml b/deployment/designate/designate-central-container-puppet.yaml index ab2ac9c622..1a85c001c1 100644 --- a/deployment/designate/designate-central-container-puppet.yaml +++ b/deployment/designate/designate-central-container-puppet.yaml @@ -87,7 +87,7 @@ parameters: default: 16000 DesignateExternalBindServers: description: > - Used to configure desginate with bind servers managed + Used to configure designate with bind servers managed externally to the overcloud. Example format: [ { # entry with minimal required values diff --git a/deployment/designate/designate-worker-container-puppet.yaml b/deployment/designate/designate-worker-container-puppet.yaml index c916fb31ab..7548c24c5b 100644 --- a/deployment/designate/designate-worker-container-puppet.yaml +++ b/deployment/designate/designate-worker-container-puppet.yaml @@ -55,7 +55,7 @@ parameters: hidden: true DesignateExternalBindServers: description: > - Used to configure desginate with bind servers managed + Used to configure designate with bind servers managed externally to the overcloud. Example format: [ { # entry with minimal required values diff --git a/deployment/experimental/README.rst b/deployment/experimental/README.rst index def30b386a..6e54819bdf 100644 --- a/deployment/experimental/README.rst +++ b/deployment/experimental/README.rst @@ -3,4 +3,4 @@ Experimental services ===================== This directory contains services that are experimental. They can be deployed -but there is no garantee that they are tested and work correctly. +but there is no guarantee that they are tested and work correctly. diff --git a/deployment/frr/frr-container-ansible.yaml b/deployment/frr/frr-container-ansible.yaml index 4e7daa3c3d..89c693e45b 100644 --- a/deployment/frr/frr-container-ansible.yaml +++ b/deployment/frr/frr-container-ansible.yaml @@ -127,7 +127,7 @@ parameters: FrrPacemakerVipNic: default: 'lo' description: Name of the nic that the pacemaker VIPs will be added to when - runninng with FRR. + running with FRR. type: string FrrBgpNeighborTtlSecurityHops: default: 1 diff --git a/deployment/glance/glance-api-container-puppet.yaml b/deployment/glance/glance-api-container-puppet.yaml index ef539e1030..20c1cbfbcf 100644 --- a/deployment/glance/glance-api-container-puppet.yaml +++ b/deployment/glance/glance-api-container-puppet.yaml @@ -266,7 +266,7 @@ parameters: cinder2_store: GlanceBackend: cinder GlanceCinderVolumeType: 'volume-type-2' - GlanceStoreDescription: 'Seconde cinder store' + GlanceStoreDescription: 'Second cinder store' GlanceCinderMountPointBase: default: '/var/lib/glance/mnt' type: string diff --git a/deployment/glance/glance-api-edge-container-puppet.yaml b/deployment/glance/glance-api-edge-container-puppet.yaml index 9a4e65b957..e2f80db428 100644 --- a/deployment/glance/glance-api-edge-container-puppet.yaml +++ b/deployment/glance/glance-api-edge-container-puppet.yaml @@ -54,7 +54,7 @@ resources: outputs: glance_api_edge_uri: - description: URI of the glance-api service runing at the edge site. + description: URI of the glance-api service running at the edge site. value: &glance_api_edge_uri if: - {get_param: EnableInternalTLS} diff --git a/deployment/haproxy/haproxy-edge-container-puppet.yaml b/deployment/haproxy/haproxy-edge-container-puppet.yaml index 2f1102ad2c..1ef21705e8 100644 --- a/deployment/haproxy/haproxy-edge-container-puppet.yaml +++ b/deployment/haproxy/haproxy-edge-container-puppet.yaml @@ -48,7 +48,7 @@ resources: outputs: glance_api_edge_uri: - description: URI of the glance-api service runing at the edge site. + description: URI of the glance-api service running at the edge site. value: &glance_api_edge_uri if: - {get_param: EnableInternalTLS} diff --git a/deployment/haproxy/haproxy-internal-tls-certmonger.j2.yaml b/deployment/haproxy/haproxy-internal-tls-certmonger.j2.yaml index 5c61e5cc2a..88050a5a9a 100644 --- a/deployment/haproxy/haproxy-internal-tls-certmonger.j2.yaml +++ b/deployment/haproxy/haproxy-internal-tls-certmonger.j2.yaml @@ -58,7 +58,7 @@ resources: # * The 'ctlplane' network is always included. # * The tenant network is skipped in jinja2 filter since it # does not have a VIP. We don't need a certificate for the - # tenant nework. + # tenant network. # * The "external" (PublicNetwork) network will be handled in # another template, it is skipped by a yaql filter on the # PublicNetwork defined in ServiceNetMap. diff --git a/deployment/haproxy/haproxy-pacemaker-puppet.yaml b/deployment/haproxy/haproxy-pacemaker-puppet.yaml index ca831250b5..c8992e0b07 100644 --- a/deployment/haproxy/haproxy-pacemaker-puppet.yaml +++ b/deployment/haproxy/haproxy-pacemaker-puppet.yaml @@ -188,7 +188,7 @@ outputs: tripleo::haproxy::haproxy_log_facility: {get_param: HAProxySyslogFacility} - haproxy_docker: true tripleo::profile::pacemaker::haproxy_bundle::container_backend: {get_param: ContainerCli} - # the list of directories that contain the certs to bind mount in the countainer + # the list of directories that contain the certs to bind mount in the container # bind-mounting the directories rather than all the cert, key and pem files ensures # that docker won't create directories on the host when then pem files do not exist tripleo::profile::pacemaker::haproxy_bundle::tls_mapping: &tls_mapping @@ -479,7 +479,7 @@ outputs: # the shell module instead. # TODO(odyssey4me): # Fix the pacemaker_resource module to handle the exception - # for a non-existant cluster more gracefully. + # for a non-existent cluster more gracefully. - name: Check haproxy cluster resource status shell: pcs resource config haproxy-bundle failed_when: false diff --git a/deployment/heat/heat-base-puppet.yaml b/deployment/heat/heat-base-puppet.yaml index 01cfd849c2..6eaff25d30 100644 --- a/deployment/heat/heat-base-puppet.yaml +++ b/deployment/heat/heat-base-puppet.yaml @@ -111,7 +111,7 @@ parameters: HeatYaqlMemoryQuota: type: number description: > - The maximum size of memory in bytes that yaql exrpessions can take for + The maximum size of memory in bytes that yaql expressions can take for its evaluation. default: 100000 HeatMaxJsonBodySize: diff --git a/deployment/ironic/ironic-api-container-puppet.yaml b/deployment/ironic/ironic-api-container-puppet.yaml index e32bb75131..9f0a5c96b9 100644 --- a/deployment/ironic/ironic-api-container-puppet.yaml +++ b/deployment/ironic/ironic-api-container-puppet.yaml @@ -356,7 +356,7 @@ outputs: with_items: - { 'path': /var/log/containers/ironic, 'setype': container_file_t, 'mode': '0750' } - { 'path': /var/log/containers/httpd/ironic-api, 'setype': container_file_t, 'mode': '0750' } - - name: create password file when auth_stragy is 'http_basic' + - name: create password file when auth_strategy is 'http_basic' vars: is_http_basic: if: diff --git a/deployment/ironic/ironic-conductor-container-puppet.yaml b/deployment/ironic/ironic-conductor-container-puppet.yaml index 6b90593700..e6f72c511a 100644 --- a/deployment/ironic/ironic-conductor-container-puppet.yaml +++ b/deployment/ironic/ironic-conductor-container-puppet.yaml @@ -237,7 +237,7 @@ parameters: - role_specific IronicRescuingNetwork: default: 'provisioning' - description: Name or UUID of the *overcloud* network used for resucing + description: Name or UUID of the *overcloud* network used for rescuing of bare metal nodes, if IronicDefaultRescueInterface is not set to "no-rescue". The default value of "provisioning" can be left during the initial deployment (when no networks are @@ -300,7 +300,7 @@ parameters: - allowed_values: ['oslo', 'json-rpc'] IronicIPXEUefiSnpOnly: type: boolean - description: Wheater to use SNP (Simple Network Protocol) iPXE EFI, or not. + description: Whether to use SNP (Simple Network Protocol) iPXE EFI, or not. When set to true `ipxe-snponly` EFI is used. default: true diff --git a/deployment/ironic/ironic-inspector-container-puppet.yaml b/deployment/ironic/ironic-inspector-container-puppet.yaml index 6aab0a2f11..9ca995d1b0 100644 --- a/deployment/ironic/ironic-inspector-container-puppet.yaml +++ b/deployment/ironic/ironic-inspector-container-puppet.yaml @@ -582,7 +582,7 @@ outputs: with_items: - { 'path': /var/log/containers/ironic-inspector, 'setype': container_file_t, 'mode': '0750' } - { 'path': /var/lib/ironic-inspector/dhcp-hostsdir, 'setype': container_file_t } - - name: create password file when auth_stragy is 'http_basic' + - name: create password file when auth_strategy is 'http_basic' vars: is_http_basic: if: diff --git a/deployment/kernel/kernel-boot-params-baremetal-ansible.yaml b/deployment/kernel/kernel-boot-params-baremetal-ansible.yaml index 61816ef457..0770019bb9 100644 --- a/deployment/kernel/kernel-boot-params-baremetal-ansible.yaml +++ b/deployment/kernel/kernel-boot-params-baremetal-ansible.yaml @@ -58,7 +58,7 @@ parameters: default: "" type: string description: > - List of logical CPU ids whic need to be isolated from the host processes. + List of logical CPU ids which need to be isolated from the host processes. This input is provided to the tuned profile cpu-partitioning to configure systemd and repin interrupts (IRQ repinning). tags: @@ -88,7 +88,7 @@ parameters: description: > By default, tripleo-kernel will configure the provided Hugepages as specified by operators but it won't try to remove Hugepages configured on the host that are not part of the TripleO - paramater. Enabling this setting will make sure everything is reconfigured exactly like the + parameter. Enabling this setting will make sure everything is reconfigured exactly like the TripleO parameter, otherwise we just add the content of the Hugepages parameter to what's already in place. tags: @@ -145,7 +145,7 @@ outputs: tripleo_kernel_defer_reboot: {get_attr: [RoleParametersValue, value, kernel_args_defer_reboot]} tripleo_kernel_reboot_timeout: {get_param: NodeRebootWaitTimeout} upgrade_tasks: - - name: upgrade prepare for leapp to align kernel arg shortcommings in leapp + - name: upgrade prepare for leapp to align kernel arg shortcomings in leapp tags: - never - system_upgrade diff --git a/deployment/logging/rsyslog-container-puppet.yaml b/deployment/logging/rsyslog-container-puppet.yaml index 860c11f0d7..f0668333f4 100644 --- a/deployment/logging/rsyslog-container-puppet.yaml +++ b/deployment/logging/rsyslog-container-puppet.yaml @@ -130,7 +130,7 @@ outputs: service_name: rsyslog config_settings: map_merge: - # puppet-rsyslog does not have params.pp with deault values for parameters + # puppet-rsyslog does not have params.pp with default values for parameters # so we need to add those here or include module's data/common.yaml in hiera - rsyslog::confdir: /etc/rsyslog.d rsyslog::package_name: rsyslog diff --git a/deployment/logrotate/logrotate-crond-container-puppet.yaml b/deployment/logrotate/logrotate-crond-container-puppet.yaml index 5e0c8ce175..5323f3a71e 100644 --- a/deployment/logrotate/logrotate-crond-container-puppet.yaml +++ b/deployment/logrotate/logrotate-crond-container-puppet.yaml @@ -68,7 +68,7 @@ parameters: constraints: - allowed_pattern: '-(%[YmdHMSVs])+$' LogrotateDateYesterday: - description: Configures dateyesterday paramter for containerized logrotate. + description: Configures dateyesterday parameter for containerized logrotate. This is valid when LogrotateDateExt is true. type: boolean default: false diff --git a/deployment/manila/manila-share-pacemaker-puppet.yaml b/deployment/manila/manila-share-pacemaker-puppet.yaml index a70f6bf94e..e0abd015df 100644 --- a/deployment/manila/manila-share-pacemaker-puppet.yaml +++ b/deployment/manila/manila-share-pacemaker-puppet.yaml @@ -275,7 +275,7 @@ outputs: # the shell module instead. # TODO(odyssey4me): # Fix the pacemaker_resource module to handle the exception - # for a non-existant cluster more gracefully. + # for a non-existent cluster more gracefully. - name: Check openstack-manila-share cluster resource status shell: pcs resource config openstack-manila-share failed_when: false diff --git a/deployment/metrics/collectd-container-ansible.yaml b/deployment/metrics/collectd-container-ansible.yaml index c807342777..ef8dffbe85 100644 --- a/deployment/metrics/collectd-container-ansible.yaml +++ b/deployment/metrics/collectd-container-ansible.yaml @@ -185,7 +185,7 @@ parameters: type: json description: > Hash of hashes. Each inner hash represent Instance block in plugin - configuration file. Key of outter hash represents instance name. + configuration file. Key of outer hash represents instance name. The 'address' value concatenated with the 'name' given will be used as the send-to address for communications over the messaging link. default: {} @@ -243,7 +243,7 @@ parameters: default: 2 CollectdSensubilityChecks: type: json - description: JSON formated definition of standalone checks to be scheduled on client side. + description: JSON formatted definition of standalone checks to be scheduled on client side. default: {} CollectdEnableContainerHealthCheck: type: boolean @@ -351,7 +351,7 @@ resources: tripleo_collectd_enable_stf: {get_param: EnableSTF} tripleo_collectd_enable_mcelog: {get_param: CollectdEnableMcelog} tripleo_collectd_enable_libpodstats: {get_param: CollectdEnableLibpodstats } - # The last element should be the CollectdVars, which overides any previous deprecated metric. + # The last element should be the CollectdVars, which overrides any previous deprecated metric. - {get_param: CollectdVars} - if: - role_specific_required diff --git a/deployment/metrics/collectd-container-puppet.yaml b/deployment/metrics/collectd-container-puppet.yaml index 166976f18b..ca89101e41 100644 --- a/deployment/metrics/collectd-container-puppet.yaml +++ b/deployment/metrics/collectd-container-puppet.yaml @@ -190,7 +190,7 @@ parameters: type: json description: > Hash of hashes. Each inner hash represent Instance block in plugin - configuration file. Key of outter hash represents instance name. + configuration file. Key of outer hash represents instance name. The 'address' value concatenated with the 'name' given will be used as the send-to address for communications over the messaging link. default: {} @@ -248,7 +248,7 @@ parameters: default: 2 CollectdSensubilityChecks: type: json - description: JSON formated definition of standalone checks to be scheduled on client side. + description: JSON formatted definition of standalone checks to be scheduled on client side. default: {} CollectdSensubilityTransport: type: string diff --git a/deployment/neutron/derive_pci_passthrough_whitelist.py b/deployment/neutron/derive_pci_passthrough_whitelist.py index 17c6662061..c4730e1f79 100644 --- a/deployment/neutron/derive_pci_passthrough_whitelist.py +++ b/deployment/neutron/derive_pci_passthrough_whitelist.py @@ -259,7 +259,7 @@ def user_passthrough_config(): def match_pf_details(user_config, pf_name, is_non_nic_pf: bool): """Decide the action for whitelist_pci_addr, based on user config - :param user_configs: THT param NovaPCIPassthrough + :param user_config: THT param NovaPCIPassthrough :param pf_name: Interface/device name (str) :param is_non_nic_pf: Indicates whether the PF is noc-partitioned or not :return: Return the actions to be done, based on match criteria @@ -321,12 +321,12 @@ def match_pf_details(user_config, pf_name, is_non_nic_pf: bool): # +------------+-----------------------+---------------------------+-------------------+ def get_passthrough_config(user_config, pf_name, allocated_pci, is_non_nic_pf: bool): - """Handle all variations of user specifid pci addr format + """Handle all variations of user specified pci addr format Arrive at the address fields of the whitelist. Check the address fields of the pci.passthrough_whitelist configuration option, validating the address fields. - :param user_configs: THT param NovaPCIPassthrough + :param user_config: THT param NovaPCIPassthrough :param pf_name: Interface/device name (str) :param allocated_pci: List of VFs (for nic-partitioned PF), which are used by host :param is_non_nic_pf: Indicates whether the PF is non-partitioned or not @@ -486,8 +486,8 @@ def generate_combined_configuration(user_configs, system_configs): as it is. :param user_configs: THT param NovaPCIPassthrough :param system_configs: Derived from sriov-mapping.yaml - :return user_config_copy: Any non-nic partinioned cfg will be returned in this list - :return derived_config: All nic partinioned cfg will be returned after derivation in this list + :return user_config_copy: Any non-nic partitioned cfg will be returned in this list + :return derived_config: All nic partitioned cfg will be returned after derivation in this list """ user_config_copy = [] diff --git a/deployment/neutron/neutron-api-container-puppet.yaml b/deployment/neutron/neutron-api-container-puppet.yaml index c4cb38fb9a..b0ea24549e 100644 --- a/deployment/neutron/neutron-api-container-puppet.yaml +++ b/deployment/neutron/neutron-api-container-puppet.yaml @@ -145,11 +145,11 @@ parameters: default: [] type: comma_delimited_list NeutronNetworkSchedulerDriver: - description: The network schedule driver to use for avialability zones. + description: The network schedule driver to use for availability zones. default: neutron.scheduler.dhcp_agent_scheduler.AZAwareWeightScheduler type: string NeutronRouterSchedulerDriver: - description: The router schedule driver to use for avialability zones. + description: The router schedule driver to use for availability zones. default: neutron.scheduler.l3_agent_scheduler.AZLeastRoutersScheduler type: string NeutronDhcpLoadType: @@ -597,7 +597,7 @@ outputs: host_prep_tasks: list_concat: - {get_attr: [NeutronLogging, host_prep_tasks]} - - - name: create password file when auth_stragy is 'http_basic' + - - name: create password file when auth_strategy is 'http_basic' vars: is_http_basic: if: diff --git a/deployment/neutron/neutron-bgpvpn-bagpipe-baremetal-puppet.yaml b/deployment/neutron/neutron-bgpvpn-bagpipe-baremetal-puppet.yaml index 18b9e4b854..cf5ce5a21b 100644 --- a/deployment/neutron/neutron-bgpvpn-bagpipe-baremetal-puppet.yaml +++ b/deployment/neutron/neutron-bgpvpn-bagpipe-baremetal-puppet.yaml @@ -14,7 +14,7 @@ parameters: type: number BagpipeDataplaneDriverIpVpn: default: 'ovs' - description: IP VPN dataplane drvier class + description: IP VPN dataplane driver class type: string BagpipeEnableRtc: default: true diff --git a/deployment/neutron/neutron-dhcp-container-puppet.yaml b/deployment/neutron/neutron-dhcp-container-puppet.yaml index 4c4fc0d736..7f0a30ccb0 100644 --- a/deployment/neutron/neutron-dhcp-container-puppet.yaml +++ b/deployment/neutron/neutron-dhcp-container-puppet.yaml @@ -143,7 +143,7 @@ parameters: certificate for this service conditions: - dhcp_ovs_intergation_bridge_set: + dhcp_ovs_integration_bridge_set: not: {equals: [{get_param: NeutronDhcpOvsIntegrationBridge}, '']} az_set: not: {equals: [{get_param: NeutronDhcpAgentAvailabilityZone}, '']} @@ -247,7 +247,7 @@ outputs: neutron::agents::dhcp::ovsdb_agent_ssl_cert_file: '/etc/pki/tls/certs/neutron.crt' neutron::agents::dhcp::ovsdb_agent_ssl_ca_file: {get_param: InternalTLSCAFile} - if: - - dhcp_ovs_intergation_bridge_set + - dhcp_ovs_integration_bridge_set - neutron::agents::dhcp::ovs_integration_bridge: {get_param: NeutronDhcpOvsIntegrationBridge} - if: - az_set diff --git a/deployment/nova/nova-compute-common-container-puppet.yaml b/deployment/nova/nova-compute-common-container-puppet.yaml index f2299620d5..2d2ae95673 100644 --- a/deployment/nova/nova-compute-common-container-puppet.yaml +++ b/deployment/nova/nova-compute-common-container-puppet.yaml @@ -49,7 +49,7 @@ outputs: # Runs as external_post_deploy_tasks value: &nova_compute_common_deploy_steps_tasks - block: - - name: is additonal Cell? + - name: is additional Cell? set_fact: nova_additional_cell: {get_param: NovaAdditionalCell} - name: check if discover hosts is required diff --git a/deployment/nova/nova-compute-container-puppet.yaml b/deployment/nova/nova-compute-container-puppet.yaml index ecd89d40c2..0442939381 100644 --- a/deployment/nova/nova-compute-container-puppet.yaml +++ b/deployment/nova/nova-compute-container-puppet.yaml @@ -102,7 +102,7 @@ parameters: - range: { min: 0, max: 600 } EnableInstanceHA: default: false - description: Whether to enable an Instance Ha configurarion or not. + description: Whether to enable an Instance Ha configuration or not. This setup requires the Compute role to have the PacemakerRemote service added to it. type: boolean @@ -715,7 +715,7 @@ parameters: cinder2_store: GlanceBackend: cinder GlanceCinderVolumeType: 'volume-type-2' - GlanceStoreDescription: 'Seconde cinder store' + GlanceStoreDescription: 'Second cinder store' NovaGlanceEnableRbdDownload: type: boolean description: > @@ -1150,7 +1150,7 @@ outputs: nova::compute::rbd::libvirt_rbd_secret_uuid: {get_param: CephClusterFSID} nova::compute::consecutive_build_service_disable_threshold: {get_param: NovaAutoDisabling} nova::compute::live_migration_wait_for_vif_plug: {get_param: NovaLiveMigrationWaitForVIFPlug} - # Always disable post-copy in realy time kernel + # Always disable post-copy in real time kernel nova::migration::libvirt::live_migration_permit_post_copy: if: - is_realtime diff --git a/deployment/nova/nova-modular-libvirt-container-puppet.yaml b/deployment/nova/nova-modular-libvirt-container-puppet.yaml index fb4dfc174a..a2f6b9dcad 100644 --- a/deployment/nova/nova-modular-libvirt-container-puppet.yaml +++ b/deployment/nova/nova-modular-libvirt-container-puppet.yaml @@ -114,13 +114,13 @@ parameters: type: boolean default: true description: If set to true and if EnableInternalTLS is enabled, it will - enable TLS transaport for libvirt VNC and configure the + enable TLS transport for libvirt VNC and configure the relevant keys for libvirt. UseTLSTransportForNbd: type: boolean default: true description: If set to true and if EnableInternalTLS is enabled, it will - enable TLS transaport for libvirt NBD and configure the + enable TLS transport for libvirt NBD and configure the relevant keys for libvirt. InternalTLSCAFile: default: '/etc/ipa/ca.crt' diff --git a/deployment/nova/nova-vnc-proxy-container-puppet.yaml b/deployment/nova/nova-vnc-proxy-container-puppet.yaml index 27d88d0401..6b5a613237 100644 --- a/deployment/nova/nova-vnc-proxy-container-puppet.yaml +++ b/deployment/nova/nova-vnc-proxy-container-puppet.yaml @@ -49,7 +49,7 @@ parameters: type: boolean default: true description: If set to true and if EnableInternalTLS is enabled, it will - enable TLS transaport for libvirt VNC and configure the + enable TLS transport for libvirt VNC and configure the relevant keys for libvirt. InternalTLSVncProxyCAFile: default: '/etc/ipa/ca.crt' diff --git a/deployment/octavia/octavia-base.yaml b/deployment/octavia/octavia-base.yaml index dbc43aec22..2ace226482 100644 --- a/deployment/octavia/octavia-base.yaml +++ b/deployment/octavia/octavia-base.yaml @@ -181,7 +181,7 @@ parameters: OctaviaForwardAllLogs: default: false description: When true, all log messages from the amphora will be forwarded - to the administrative log endponts, including non-load + to the administrative log endpoints, including non-load balancing related logs. type: boolean OctaviaTenantLogTargets: diff --git a/deployment/octavia/octavia-deployment-config.j2.yaml b/deployment/octavia/octavia-deployment-config.j2.yaml index c00e3b4aa8..21c7c75df1 100644 --- a/deployment/octavia/octavia-deployment-config.j2.yaml +++ b/deployment/octavia/octavia-deployment-config.j2.yaml @@ -175,7 +175,7 @@ parameters: OctaviaLogOffload: default: true description: When true, log messages from the amphora will be forwarded - to the administrative log endponts and will be stored with + to the administrative log endpoints and will be stored with the controller logs. type: boolean OctaviaLogOffloadProtocol: diff --git a/deployment/octavia/octavia-health-manager-container-puppet.yaml b/deployment/octavia/octavia-health-manager-container-puppet.yaml index a8ca3aadb0..0a92994858 100644 --- a/deployment/octavia/octavia-health-manager-container-puppet.yaml +++ b/deployment/octavia/octavia-health-manager-container-puppet.yaml @@ -59,7 +59,7 @@ parameters: OctaviaLogOffload: default: true description: When true, log messages from the amphora will be forwarded - to the administrative log endponts and will be stored with + to the administrative log endpoints and will be stored with the controller logs. type: boolean OctaviaLogOffloadProtocol: diff --git a/deployment/octavia/providers/ovn-provider-config.yaml b/deployment/octavia/providers/ovn-provider-config.yaml index 867df00066..da3962bef4 100644 --- a/deployment/octavia/providers/ovn-provider-config.yaml +++ b/deployment/octavia/providers/ovn-provider-config.yaml @@ -63,7 +63,7 @@ conditions: outputs: role_data: - description: OVN provider driver configuraton + description: OVN provider driver configuration value: config_settings: map_merge: diff --git a/deployment/ovn/ovn-dbs-pacemaker-puppet.yaml b/deployment/ovn/ovn-dbs-pacemaker-puppet.yaml index 5c8ddc1db4..42ff6f1541 100644 --- a/deployment/ovn/ovn-dbs-pacemaker-puppet.yaml +++ b/deployment/ovn/ovn-dbs-pacemaker-puppet.yaml @@ -379,7 +379,7 @@ outputs: # the shell module instead. # TODO(odyssey4me): # Fix the pacemaker_resource module to handle the exception - # for a non-existant cluster more gracefully. + # for a non-existent cluster more gracefully. - name: Check ovn-dbs-bundle cluster resource status shell: pcs resource config ovn-dbs-bundle failed_when: false diff --git a/deployment/pacemaker/compute-instanceha-baremetal-puppet.yaml b/deployment/pacemaker/compute-instanceha-baremetal-puppet.yaml index 5d75c08516..b9edd5a20f 100644 --- a/deployment/pacemaker/compute-instanceha-baremetal-puppet.yaml +++ b/deployment/pacemaker/compute-instanceha-baremetal-puppet.yaml @@ -29,7 +29,7 @@ parameters: type: json EnableInstanceHA: default: false - description: Whether to enable an Instance Ha configurarion or not. + description: Whether to enable an Instance Ha configuration or not. This setup requires the Compute role to have the PacemakerRemote service added to it. type: boolean diff --git a/deployment/pacemaker/pacemaker-baremetal-puppet.yaml b/deployment/pacemaker/pacemaker-baremetal-puppet.yaml index a86114446d..ced9a8fe42 100644 --- a/deployment/pacemaker/pacemaker-baremetal-puppet.yaml +++ b/deployment/pacemaker/pacemaker-baremetal-puppet.yaml @@ -98,7 +98,7 @@ parameters: - allowed_values: ['podman'] EnableInstanceHA: default: false - description: Whether to enable an Instance Ha configurarion or not. + description: Whether to enable an Instance Ha configuration or not. This setup requires the Compute role to have the PacemakerRemote service added to it. type: boolean @@ -267,7 +267,7 @@ outputs: msg: "Prepare pacemaker upgrade for {{ pacemaker_short_node_names_upgraded }}" - name: set pacemaker node ips fact from the names fact set_fact: - # Generate matching IPs for the names, e.g. for these varaible values: + # Generate matching IPs for the names, e.g. for these variable values: # pacemaker_node_ips: [ "1", "2", "3" ] # pacemaker_short_node_names: [ "a", "b", "c" ] # pacemaker_short_node_names_override: [ "b" ] diff --git a/deployment/pacemaker/pacemaker-remote-baremetal-puppet.yaml b/deployment/pacemaker/pacemaker-remote-baremetal-puppet.yaml index 94299f43a0..b2eda64b29 100644 --- a/deployment/pacemaker/pacemaker-remote-baremetal-puppet.yaml +++ b/deployment/pacemaker/pacemaker-remote-baremetal-puppet.yaml @@ -133,7 +133,7 @@ outputs: msg: "Prepare pacemaker remote upgrade for {{ pacemaker_remote_short_node_names_upgraded }}" - name: set pacemaker remote node ips fact from the names fact set_fact: - # Generate matching IPs for the names, e.g. for these varaible values: + # Generate matching IPs for the names, e.g. for these variable values: # pacemaker_node_ips: [ "1", "2", "3" ] # pacemaker_short_node_names: [ "a", "b", "c" ] # pacemaker_short_node_names_override: [ "b" ] diff --git a/deployment/podman/podman-baremetal-ansible.yaml b/deployment/podman/podman-baremetal-ansible.yaml index 9cdcae4eec..18b9d159b4 100644 --- a/deployment/podman/podman-baremetal-ansible.yaml +++ b/deployment/podman/podman-baremetal-ansible.yaml @@ -239,7 +239,7 @@ outputs: # https://github.com/containers/podman/commit/a5ad36c65ea07d839fd9bf55a820c8cb9884eed1 # is available in the podman version used by TripleO, the ephemeral # Heat images could be labeled in a way that they could be kept with - # --fitler, and these tasks could be re-enabled on the undercloud. + # --filter, and these tasks could be re-enabled on the undercloud. - name: Purge Podman when: - (step | int) == 3 @@ -256,7 +256,7 @@ outputs: # https://github.com/containers/podman/commit/a5ad36c65ea07d839fd9bf55a820c8cb9884eed1 # is available in the podman version used by TripleO, the ephemeral # Heat images could be labeled in a way that they could be kept with - # --fitler, and these tasks could be re-enabled on the undercloud. + # --filter, and these tasks could be re-enabled on the undercloud. - name: Purge Podman when: - (step | int) == 3 diff --git a/deployment/rabbitmq/rabbitmq-container-puppet.yaml b/deployment/rabbitmq/rabbitmq-container-puppet.yaml index 7c4b53556b..3c82405744 100644 --- a/deployment/rabbitmq/rabbitmq-container-puppet.yaml +++ b/deployment/rabbitmq/rabbitmq-container-puppet.yaml @@ -435,7 +435,7 @@ outputs: - {get_param: CertificateKeySize} ca: ipa host_prep_tasks: - - name: creat fcontext entry for rabbitmq data + - name: create fcontext entry for rabbitmq data community.general.sefcontext: target: "/var/lib/rabbitmq(/.*)?" setype: container_file_t diff --git a/deployment/swift/swift-proxy-container-puppet.yaml b/deployment/swift/swift-proxy-container-puppet.yaml index 02dfa0a5ec..ccc7bc8159 100644 --- a/deployment/swift/swift-proxy-container-puppet.yaml +++ b/deployment/swift/swift-proxy-container-puppet.yaml @@ -76,7 +76,7 @@ parameters: type: boolean SwiftCeilometerIgnoreProjects: default: ['service'] - description: Comma-seperated list of project names to ignore. + description: Comma-separated list of project names to ignore. type: comma_delimited_list EnableInternalTLS: type: boolean diff --git a/deployment/tests/test-container-volume.yaml b/deployment/tests/test-container-volume.yaml index 2755b57ef9..b1a9211200 100644 --- a/deployment/tests/test-container-volume.yaml +++ b/deployment/tests/test-container-volume.yaml @@ -58,14 +58,14 @@ outputs: assert: that: - cinder_fake_group_var_one == 'var_one_override' - fail_msg: "cinder_fake_group_var_one was not overriden" - success_msg: "cinder_fake_group_var_one was overriden" + fail_msg: "cinder_fake_group_var_one was not overridden" + success_msg: "cinder_fake_group_var_one was overridden" - name: Test that cinder_fake_group_var_two was not overridden assert: that: - cinder_fake_group_var_two == 'var_two' - fail_msg: "cinder_fake_group_var_two was overriden" - success_msg: "cinder_fake_group_var_two was not overriden" + fail_msg: "cinder_fake_group_var_two was overridden" + success_msg: "cinder_fake_group_var_two was not overridden" - name: Create Test Volume facts set_fact: test_container_cli: {get_param: ContainerCli} diff --git a/deployment/time/ptp-baremetal-ansible.yaml b/deployment/time/ptp-baremetal-ansible.yaml index 3280b41ddf..1e805472a0 100644 --- a/deployment/time/ptp-baremetal-ansible.yaml +++ b/deployment/time/ptp-baremetal-ansible.yaml @@ -69,7 +69,7 @@ resources: outputs: role_data: - description: Role ptp using commposable services. + description: Role ptp using composable services. value: service_name: ptp firewall_rules: diff --git a/deployment/timemaster/timemaster-baremetal-ansible.yaml b/deployment/timemaster/timemaster-baremetal-ansible.yaml index 73063d1e6d..1dc4047b84 100644 --- a/deployment/timemaster/timemaster-baremetal-ansible.yaml +++ b/deployment/timemaster/timemaster-baremetal-ansible.yaml @@ -58,7 +58,7 @@ parameters: description: Specifies whether to enable the iburst option for every NTP peer. If iburst is enabled, when the ntp server is unreachable ntp will send a burst of eight packages instead of one. This - is designed to speed up the initial syncrhonization. + is designed to speed up the initial synchronization. type: boolean MaxPoll: description: Specify maximum poll interval of upstream servers for NTP diff --git a/deployment/timesync/chrony-baremetal-ansible.yaml b/deployment/timesync/chrony-baremetal-ansible.yaml index 5aa1ef936a..9712b3fb67 100644 --- a/deployment/timesync/chrony-baremetal-ansible.yaml +++ b/deployment/timesync/chrony-baremetal-ansible.yaml @@ -58,7 +58,7 @@ parameters: description: Specifies whether to enable the iburst option for every NTP peer. If iburst is enabled, when the ntp server is unreachable ntp will send a burst of eight packages instead of one. This - is designed to speed up the initial syncrhonization. + is designed to speed up the initial synchronization. type: boolean MaxPoll: description: Specify maximum poll interval of upstream servers for NTP diff --git a/deployment/unbound/unbound-container-ansible.yaml b/deployment/unbound/unbound-container-ansible.yaml index 18eb8b0fc7..b942c390f4 100644 --- a/deployment/unbound/unbound-container-ansible.yaml +++ b/deployment/unbound/unbound-container-ansible.yaml @@ -91,7 +91,7 @@ parameters: type: boolean DesignateExternalBindServers: description: > - Used to configure desginate with bind servers managed + Used to configure designate with bind servers managed externally to the overcloud. Example format: [ { # entry with minimal required values diff --git a/environments/barbican-backend-pkcs11-atos.yaml b/environments/barbican-backend-pkcs11-atos.yaml index 9563d5174e..0918f060d6 100644 --- a/environments/barbican-backend-pkcs11-atos.yaml +++ b/environments/barbican-backend-pkcs11-atos.yaml @@ -28,10 +28,10 @@ parameter_defaults: # atos_client_iso_location: # atos_client_iso_name: # atos_client_cert_location: - # atos_client_key_loaction: + # atos_client_key_location: # atos_hsms: # -- A list of HSMs. When more than one HSM is specified, # # they will be configured in Load Balancing mode. - # - name: my-hsm-hostanme.example.com + # - name: my-hsm-hostname.example.com # server_cert_location: https://user@PASSWORD:example.com/cert.CRT # ip: 127.0.0.1 diff --git a/environments/enable-stf.yaml b/environments/enable-stf.yaml index fa95b09028..f1313d0547 100644 --- a/environments/enable-stf.yaml +++ b/environments/enable-stf.yaml @@ -1,5 +1,5 @@ # Enable data collection that is compatible with the STF data model. -# Enablement of the collectors and transport are done with separate enviroment files. +# Enablement of the collectors and transport are done with separate environment files. # Recommended: # - environments/metrics/ceilometer-qdr-write.yaml # - environments/metrics/collectd-qdr-write.yaml diff --git a/environments/firewall.yaml b/environments/firewall.yaml index aa8efb1d44..af382041bf 100644 --- a/environments/firewall.yaml +++ b/environments/firewall.yaml @@ -3,7 +3,7 @@ parameter_defaults: -# This firewall rule will autorize 12345/tcp from localhost on all the nodes +# This firewall rule will authorize 12345/tcp from localhost on all the nodes # in the overcloud: # ExtraFirewallRules: # '301 allow arbitrary tcp rule': @@ -12,7 +12,7 @@ parameter_defaults: # source: 127.0.0.1 # action: insert -# This firewall rule will autorize 12345/tcp from localhost on all the +# This firewall rule will authorize 12345/tcp from localhost on all the # compute nodes: # ComputeParameters: # ExtraFirewallRules: diff --git a/environments/ha-redis.yaml b/environments/ha-redis.yaml index 9490789486..cb3a8f3fe6 100644 --- a/environments/ha-redis.yaml +++ b/environments/ha-redis.yaml @@ -1,6 +1,6 @@ # Environment file to deploy the Redis in a pacemaker cluster. # As Redis is no longer deployed by default, this file is only -# necessary if you upgrade an existing overcloud or if you implicitely +# necessary if you upgrade an existing overcloud or if you implicitly # rely on Redis resource_registry: OS::TripleO::Services::Redis: ../deployment/database/redis-pacemaker-puppet.yaml diff --git a/environments/manila-cephfsganesha-config.yaml b/environments/manila-cephfsganesha-config.yaml index ba0bc787bc..6da5881f4d 100644 --- a/environments/manila-cephfsganesha-config.yaml +++ b/environments/manila-cephfsganesha-config.yaml @@ -22,7 +22,7 @@ parameter_defaults: ManilaCephFSCephFSProtocolHelperType: 'NFS' # Use ManilaCephFSNFSIdmapConf to change the path of the idmap.conf file consumed by ganesha ManilaCephFSNFSIdmapConf: '/etc/ganesha/idmap.conf' - # Use ManilaCephFSNFSIdmapOverrides to overide ini configuration in the idmap.conf file + # Use ManilaCephFSNFSIdmapOverrides to override ini configuration in the idmap.conf file ManilaCephFSNFSIdmapOverrides: {} # Uncomment the following and set them appropriately if the Ceph cluster is # external and you need to use a specific keyring/key and pool to access the diff --git a/environments/updates/README.md b/environments/updates/README.md index 93714ed838..24e6904fd0 100644 --- a/environments/updates/README.md +++ b/environments/updates/README.md @@ -11,5 +11,5 @@ Contents **update-from-publicvip-on-ctlplane.yaml** To be used if the PublicVirtualIP resource was deployed as an additional VIP on the 'ctlplane'. -**update-from-deloyed-server-newton.yaml** +**update-from-deployed-server-newton.yaml** To be used when updating from the deployed-server template from Newton. diff --git a/extraconfig/post_deploy/undercloud_ctlplane_network.py b/extraconfig/post_deploy/undercloud_ctlplane_network.py index 8835b73c89..564766a23f 100755 --- a/extraconfig/post_deploy/undercloud_ctlplane_network.py +++ b/extraconfig/post_deploy/undercloud_ctlplane_network.py @@ -252,8 +252,8 @@ def _set_network_tags(sdk, network, tags): def _local_neutron_segments_and_subnets(sdk, ctlplane_id, net_cidrs): - """Create's and updates the ctlplane subnet on the segment that is local to - the underclud. + """Creates and updates the ctlplane subnet on the segment that is local to + the undercloud. """ s = CONF['subnets'][CONF['local_subnet']] # If the subnet is IPv6 we need to start a router so that router @@ -308,7 +308,7 @@ def _local_neutron_segments_and_subnets(sdk, ctlplane_id, net_cidrs): def _remote_neutron_segments_and_subnets(sdk, ctlplane_id, net_cidrs): - """Create's and updates the ctlplane subnet(s) on segments that is + """Creates and updates the ctlplane subnet(s) on segments that is not local to the undercloud. """ diff --git a/extraconfig/post_deploy/undercloud_post.yaml b/extraconfig/post_deploy/undercloud_post.yaml index e781e8498b..1597678b2c 100644 --- a/extraconfig/post_deploy/undercloud_post.yaml +++ b/extraconfig/post_deploy/undercloud_post.yaml @@ -39,7 +39,7 @@ parameters: description: Physical network name for the ctlplane network local to the undercloud UndercloudCtlplaneSubnets: description: > - Dictionary of subnets to configure on the Undercloud ctlplan network + Dictionary of subnets to configure on the Undercloud ctlplane network default: {} type: json UndercloudCtlplaneLocalSubnet: diff --git a/network/ports/from_service_v6.yaml b/network/ports/from_service_v6.yaml index 0aa6fcc043..d8f0ca338d 100644 --- a/network/ports/from_service_v6.yaml +++ b/network/ports/from_service_v6.yaml @@ -8,11 +8,11 @@ parameters: description: Name of the service to lookup default: '' type: string - NetworkName: # Here for compatability with ctlplane_vip.yaml + NetworkName: # Here for compatibility with ctlplane_vip.yaml description: Name of the network where the VIP will be created default: ctlplane type: string - PortName: # Here for compatability with ctlplane_vip.yaml + PortName: # Here for compatibility with ctlplane_vip.yaml description: Name of the port default: '' type: string @@ -20,7 +20,7 @@ parameters: description: DNS name of the port default: '' type: string - ControlPlaneIP: # Here for compatability with ctlplane_vip.yaml + ControlPlaneIP: # Here for compatibility with ctlplane_vip.yaml description: IP address on the control plane default: '' type: string @@ -30,7 +30,7 @@ parameters: The subnet CIDR of the control plane network. (The parameter is automatically resolved from the ctlplane subnet's cidr attribute.) type: string - ControlPlaneNetwork: # Here for compatability with ctlplane_vip.yaml + ControlPlaneNetwork: # Here for compatibility with ctlplane_vip.yaml description: The name of the undercloud Neutron control plane default: ctlplane type: string diff --git a/network/service_net_map.j2.yaml b/network/service_net_map.j2.yaml index 579dce5a33..b5b28f94e7 100644 --- a/network/service_net_map.j2.yaml +++ b/network/service_net_map.j2.yaml @@ -12,7 +12,7 @@ parameters: in the service template, e.g if the service_name is heat_api the key must be either heat_api_network, or optionally HeatApiNetwork (which will be internally converted to - transform captalization to underscores). + transform capitalization to underscores). type: json ControlPlaneSubnet: diff --git a/network_data_routed.yaml b/network_data_routed.yaml index 8f6254aa27..29a83bb9f9 100644 --- a/network_data_routed.yaml +++ b/network_data_routed.yaml @@ -71,7 +71,7 @@ # gateway_ip: '10.0.2.254' # routes: [{'destination':'10.0.0.0/16', 'nexthop':'10.0.2.254'}] # -# To support backward compatility, two versions of the network definitions will +# To support backward compatibility, two versions of the network definitions will # be created, network/.yaml and network/_v6.yaml. Only # one of these files may be used in the deployment at a time, since the # parameters used for configuration are the same in both files. In the diff --git a/network_data_subnets_routed.yaml b/network_data_subnets_routed.yaml index 125dad0cf1..fa584032fa 100644 --- a/network_data_subnets_routed.yaml +++ b/network_data_subnets_routed.yaml @@ -59,7 +59,7 @@ # allocation_pools: [{'start': '172.16.1.70', 'end': '172.16.1.110'}] # gateway_ip: '172.16.1.65' # -# To support backward compatility, two versions of the network definitions will +# To support backward compatibility, two versions of the network definitions will # be created, network/.yaml and network/_v6.yaml. Only # one of these files may be used in the deployment at a time, since the # parameters used for configuration are the same in both files. In the diff --git a/overcloud-resource-registry-puppet.j2.yaml b/overcloud-resource-registry-puppet.j2.yaml index 591007fd3e..962497dabb 100644 --- a/overcloud-resource-registry-puppet.j2.yaml +++ b/overcloud-resource-registry-puppet.j2.yaml @@ -382,7 +382,7 @@ parameter_defaults: RabbitmqManagementNetwork: {{ _service_nets.get('internal_api', 'ctlplane') }} QdrNetwork: {{ _service_nets.get('internal_api', 'ctlplane') }} RedisNetwork: {{ _service_nets.get('internal_api', 'ctlplane') }} - # Guest VMs use GaneshaNetwork and connot reach ctlplane network, + # Guest VMs use GaneshaNetwork and can not reach ctlplane network, # so default to external when storage_nfs is not available. GaneshaNetwork: {{ _service_nets.get('storage_nfs', _service_nets.get('external', 'ctlplane')) }} MysqlNetwork: {{ _service_nets.get('internal_api', 'ctlplane') }} diff --git a/overcloud.j2.yaml b/overcloud.j2.yaml index f1b9c03138..233bdbbb87 100644 --- a/overcloud.j2.yaml +++ b/overcloud.j2.yaml @@ -925,7 +925,7 @@ resources: - - default: true next_hop: {get_attr: [Networks, net_attributes_map, ctlplane, subnets, {get_param: {{role.name}}ControlPlaneSubnet}, gateway_ip]} {%- endif %} - # MTU is not filtered on role.networks, for DVR we need the External MTU on the exteranl_bridge + # MTU is not filtered on role.networks, for DVR we need the External MTU on the external_bridge {% for network in networks if network.enabled|default(true) %} {{network.name_lower}}_mtu: {get_attr: [Networks, net_attributes_map, {{network.name_lower}}, network, mtu]} {%- endfor %} diff --git a/plan-samples/plan-environment-derived-params.yaml b/plan-samples/plan-environment-derived-params.yaml index f66cb59ca0..5d230ef720 100644 --- a/plan-samples/plan-environment-derived-params.yaml +++ b/plan-samples/plan-environment-derived-params.yaml @@ -21,7 +21,7 @@ playbook_parameters: hci_profile: default hci_profile_config: default: - # By default we do not not know expected workload. At leaast by + # By default we do not not know expected workload. At least by # defaulting these values to zero we can reserve memory for OSDs average_guest_memory_size_in_mb: 0 average_guest_cpu_utilization_percentage: 0 diff --git a/roles/CellController.yaml b/roles/CellController.yaml index 7809c19e8e..23b76d65f5 100644 --- a/roles/CellController.yaml +++ b/roles/CellController.yaml @@ -3,7 +3,7 @@ ############################################################################### - name: CellController description: | - CellController role for the nova cell_v2 controler services + CellController role for the nova cell_v2 controller services CountDefault: 1 tags: - primary diff --git a/roles/Controller.yaml b/roles/Controller.yaml index 9a1f200caf..7b647c2620 100644 --- a/roles/Controller.yaml +++ b/roles/Controller.yaml @@ -3,7 +3,7 @@ ############################################################################### - name: Controller description: | - Controller role that has all the controler services loaded and handles + Controller role that has all the controller services loaded and handles Database, Messaging and Network functions. CountDefault: 1 tags: diff --git a/roles/ControllerSriov.yaml b/roles/ControllerSriov.yaml index f9412cdc0d..b483027079 100644 --- a/roles/ControllerSriov.yaml +++ b/roles/ControllerSriov.yaml @@ -3,7 +3,7 @@ ############################################################################### - name: ControllerSriov description: | - Controller role that has all the controler services loaded and handles + Controller role that has all the controller services loaded and handles Database, Messaging and Network functions. CountDefault: 1 tags: diff --git a/roles/README.rst b/roles/README.rst index 7e9872320c..4285c51abb 100644 --- a/roles/README.rst +++ b/roles/README.rst @@ -94,7 +94,7 @@ Usage usage: openstack overcloud role list [-h] [--roles-path ] - List availables roles + List available roles optional arguments: -h, --help show this help message and exit diff --git a/roles_data.yaml b/roles_data.yaml index 9b6e548e5c..9d18546112 100644 --- a/roles_data.yaml +++ b/roles_data.yaml @@ -6,7 +6,7 @@ ############################################################################### - name: Controller description: | - Controller role that has all the controler services loaded and handles + Controller role that has all the controller services loaded and handles Database, Messaging and Network functions. CountDefault: 1 tags: diff --git a/scripts/check-run-nova-compute b/scripts/check-run-nova-compute index 58de75f6eb..ad55825063 100755 --- a/scripts/check-run-nova-compute +++ b/scripts/check-run-nova-compute @@ -95,7 +95,7 @@ def create_nova_connection(options): # Via https://review.opendev.org/#/c/492247/ os_interface has been deprecated in queens # and we need to use 'valid_interfaces' which is a: # "List of interfaces, in order of preference, for endpoint URL. (list value)" - # Since it is not explicitely set in nova.conf we still keep the check for os_interface + # Since it is not explicitly set in nova.conf we still keep the check for os_interface elif 'valid_interfaces' in options and len(options["valid_interfaces"]) >= 1: nova_endpoint_type = options["valid_interfaces"][0] diff --git a/scripts/undercloud-upgrade-ephemeral-heat.py b/scripts/undercloud-upgrade-ephemeral-heat.py index 9233fc1582..d67d505807 100755 --- a/scripts/undercloud-upgrade-ephemeral-heat.py +++ b/scripts/undercloud-upgrade-ephemeral-heat.py @@ -233,8 +233,8 @@ def export_passwords(heat, stack, stack_dir): """Export passwords from an existing stack and write them in Heat environment file format to the specified directory. - :param cloud: Heat client - :type cloud: heatclient.client.Client + :param heat: Heat client + :type heat: heatclient.client.Client :param stack: Stack name to query for passwords :type stack: str :param stack_dir: Directory to save the generated Heat environment diff --git a/tools/convert_policy_yaml_to_heat_template.py b/tools/convert_policy_yaml_to_heat_template.py index 81519620f2..18c826fb70 100755 --- a/tools/convert_policy_yaml_to_heat_template.py +++ b/tools/convert_policy_yaml_to_heat_template.py @@ -19,7 +19,7 @@ import ruamel.yaml from ruamel.yaml import YAML # Not all policy variables across services in THT are consistent. This mapping -# assoicates the service name to the right THT variable. +# associates the service name to the right THT variable. _SERVICE_MAP = { 'barbican': 'BarbicanPolicies', 'cinder': 'CinderApiPolicies', diff --git a/tools/yaml-diff.py b/tools/yaml-diff.py index 7ede12278b..44a706e2f8 100755 --- a/tools/yaml-diff.py +++ b/tools/yaml-diff.py @@ -76,7 +76,7 @@ def diff_dict(dict_a, dict_b): """Compares two dicts Converts 2 dicts to strings with pformat and returns - a unified diff formated string + a unified diff formatted string """ if output_format == "pformat": str_a = pformat(dict_a, width=output_width) diff --git a/tools/yaml-validate.py b/tools/yaml-validate.py index 0d41a0c9b5..2af20e46b9 100755 --- a/tools/yaml-validate.py +++ b/tools/yaml-validate.py @@ -152,7 +152,7 @@ PARAMETER_DEFINITION_EXCLUSIONS = { 'KeystoneErrorLoggingSource': ['default'], 'KeystoneAdminAccessLoggingSource': ['default'], 'KeystoneAdminErrorLoggingSource': ['default'], - 'KeystoneMainAcccessLoggingSource': ['default'], + 'KeystoneMainAccessLoggingSource': ['default'], 'KeystoneMainErrorLoggingSource': ['default'], 'LibvirtVncCACert': ['description'], 'NeutronApiLoggingSource': ['default'], @@ -1017,7 +1017,7 @@ def validate_service_hiera_interpol(f, tpl): # Omit apache remoteip proxy_ips if 'apache::mod::remoteip::proxy_ips' in path: continue - # Omit Designate rndc_allowed_addressses + # Omit Designate rndc_allowed_addresses if ('rndc_allowed_addresses' in path): continue # Omit Neutron ml2 overlay_ip_version diff --git a/tripleo_heat_templates/tests/test_environment_generator.py b/tripleo_heat_templates/tests/test_environment_generator.py index b3d951e60f..c5e2b5f93d 100644 --- a/tripleo_heat_templates/tests/test_environment_generator.py +++ b/tripleo_heat_templates/tests/test_environment_generator.py @@ -608,8 +608,8 @@ parameter_defaults: 'input_file': '''environments: - name: basic_role_static_param - title: Basic Role Static Prams Environment - description: Basic Role Static Prams description + title: Basic Role Static Params Environment + description: Basic Role Static Params description files: foo.yaml: parameters: @@ -620,9 +620,9 @@ parameter_defaults: - FooParam - RoleParam ''', - 'expected_output': '''# title: Basic Role Static Prams Environment + 'expected_output': '''# title: Basic Role Static Params Environment # description: | -# Basic Role Static Prams description +# Basic Role Static Params description parameter_defaults: # ****************************************************** # Static parameters - these are values that must be @@ -655,9 +655,9 @@ parameter_defaults: 'nested_output': '', 'input_file': '''environments: - - name: multline_role_static_param - title: Multiline Role Static Prams Environment - description: Multiline Role Static Prams description + name: multiline_role_static_param + title: Multiline Role Static Params Environment + description: Multiline Role Static Params description files: foo.yaml: parameters: @@ -668,9 +668,9 @@ parameter_defaults: - FooParam - RoleParam ''', - 'expected_output': '''# title: Multiline Role Static Prams Environment + 'expected_output': '''# title: Multiline Role Static Params Environment # description: | -# Multiline Role Static Prams description +# Multiline Role Static Params description parameter_defaults: # ****************************************************** # Static parameters - these are values that must be diff --git a/tripleo_heat_templates/tests/test_tht_ansible_syntax.yml b/tripleo_heat_templates/tests/test_tht_ansible_syntax.yml index 972c32d0d6..fc2ea0b894 100644 --- a/tripleo_heat_templates/tests/test_tht_ansible_syntax.yml +++ b/tripleo_heat_templates/tests/test_tht_ansible_syntax.yml @@ -34,7 +34,7 @@ external_upgrade_tasks # In the future this list should be extended to support # automatically any role definition in t-h-t/roles/* - # Currently we have a --all option check allservices + # Currently we have a --all option check all services # in the resource registry roles_list: > Compute