Try a timesync as part of first boot

We're running into issues where if someone creates a firstboot script
that touches a file that will eventually be mounted into a container, it
can fail if the time of the file ends up being in the future due to a
later timesync. Let's try a basic timesync bootstrap as part of
cloud-init to address the case of configuration changes occuring prior
to the host_prep_tasks where we traditionally configure chrony/ntp

NOTE: For Rocky, we use ntp instead of chrony.

Change-Id: I294eba826b98c5793336815282f766e3d2e60a51
Related-Bug: #1776869
(cherry picked from commit eafe390853)
This commit is contained in:
Alex Schultz 2019-05-13 15:42:59 -06:00
parent dd46170b74
commit 45dcd0e5a8
3 changed files with 109 additions and 0 deletions

View File

@ -0,0 +1,97 @@
heat_template_version: rocky
parameters:
NtpServer:
default: ['pool.ntp.org']
description: NTP servers list. Defaulted to pool.ntp.org in order to
have a sane default for Pacemaker deployments when
not configuring this parameter by default.
type: comma_delimited_list
NtpPool:
default: []
description: NTP pool list. Defaults to [], so only NtpServer is used by
default.
type: comma_delimited_list
NtpService:
default: ntp
description: NTP Service to use for the timesync bootstrap.
type: string
description: >
Uses cloud-init to bootstrap timesync configuration to ensure it is done
as soon as possible. We do additional and more complex configurations as
part of the deployment itself.
conditions:
use_chrony: {equals: [{get_param: NtpService}, 'chrony']}
resources:
userdata:
type: OS::Heat::MultipartMime
properties:
parts:
- config: {get_resource: timesync_chrony}
- config: {get_resource: timesync_sync}
# chrony sync needs chrony to be configured, if not chrony just exit
timesync_chrony:
type: OS::Heat::SoftwareConfig
properties:
config:
str_replace:
template: |
#!/bin/bash
if [ "$service" != "chrony" ]; then
exit 0
fi
set -x
SERVERS="$ntp_servers"
POOLS="$ntp_pools"
systemctl is-active --quiet chronyd || systemctl start chronyd
for server in $SERVERS; do
chronyc add server "${server}" iburst
done
for pool in $POOLS; do
chronyc add server "${pool}" iburst
done
chronyc sources
params:
$ntp_servers:
list_join: [' ', {get_param: NtpServer}]
$ntp_pools:
list_join: [' ', {get_param: NtpPool}]
$service: {get_param: NtpService}
# attempt a timesync on boot to ensure the time has been synced
timesync_sync:
type: OS::Heat::SoftwareConfig
properties:
config:
str_replace:
template: |
#!/bin/bash
set -x
if [ "$service" = "chrony" ]; then
if command -v chronyc >/dev/null; then
chronyc waitsync 20
else
echo "No chronyc available, skipping sync"
fi
elif [ "$service" = "ntp" ]; then
if command -v ntpdate >/dev/null; then
ntpdate -u $ntp_servers
else
echo "No ntpdate available, skipping sync"
fi
fi
hwclock --systohc --utc
params:
$service: {get_param: NtpService}
$ntp_servers:
list_join: [' ', {get_param: NtpServer}]
outputs:
OS::stack_id:
value: {get_resource: userdata}

View File

@ -44,6 +44,10 @@ resource_registry:
# To disable, replace with firstboot/userdata_default.yaml # To disable, replace with firstboot/userdata_default.yaml
OS::TripleO::NodeAdminUserData: firstboot/userdata_heat_admin.yaml OS::TripleO::NodeAdminUserData: firstboot/userdata_heat_admin.yaml
# This bootstraps the timesync configuration for any subsequent deployment
# operations. To disable, replace with firstboot/userdata_default.yaml
OS::TripleO::NodeTimesyncUserData: firstboot/userdata_timesync.yaml
# Hooks for operator extra config # Hooks for operator extra config
# NodeUserData == Cloud-init additional user-data, e.g cloud-config # NodeUserData == Cloud-init additional user-data, e.g cloud-config
# role::NodeUserData == Role specific cloud-init additional user-data # role::NodeUserData == Role specific cloud-init additional user-data

View File

@ -401,6 +401,12 @@ resources:
NodeAdminUserData: NodeAdminUserData:
type: OS::TripleO::NodeAdminUserData type: OS::TripleO::NodeAdminUserData
# Bootstraps an ntp configuration and includes a hardware clock sync to
# for containers.
# Should return a OS::Heat::MultipartMime reference via OS::stack_id
NodeTimesyncUserData:
type: OS::TripleO::NodeTimesyncUserData
# For optional operator additional userdata # For optional operator additional userdata
# Should return a OS::Heat::MultipartMime reference via OS::stack_id # Should return a OS::Heat::MultipartMime reference via OS::stack_id
NodeUserData: NodeUserData:
@ -573,6 +579,8 @@ resources:
parts: parts:
- config: {get_resource: NodeAdminUserData} - config: {get_resource: NodeAdminUserData}
type: multipart type: multipart
- config: {get_resource: NodeTimesyncUserData}
type: multipart
- config: {get_resource: NodeUserData} - config: {get_resource: NodeUserData}
type: multipart type: multipart
- config: {get_resource: {{role.name}}RoleUserData} - config: {get_resource: {{role.name}}RoleUserData}