Merge "Fix no-member for tpm config manifests"
This commit is contained in:
commit
e948a02f29
|
@ -214,8 +214,6 @@ class AgentManager(service.PeriodicService):
|
|||
self._prev_fs = None
|
||||
self._subfunctions = None
|
||||
self._subfunctions_configured = False
|
||||
self._tpmconfig_rpc_failure = False
|
||||
self._tpmconfig_host_first_apply = False
|
||||
self._first_grub_update = False
|
||||
self._inventoried_initial = False
|
||||
self._inventory_reported = set()
|
||||
|
@ -1918,135 +1916,6 @@ class AgentManager(service.PeriodicService):
|
|||
|
||||
return
|
||||
|
||||
def _audit_tpm_device(self, context, host_id):
|
||||
""" Audit the tpmdevice status on this host and update. """
|
||||
rpcapi = conductor_rpcapi.ConductorAPI(
|
||||
topic=conductor_rpcapi.MANAGER_TOPIC)
|
||||
tpmconfig = None
|
||||
tpmdevice = None
|
||||
response_dict = {'is_configured': False} # guilty until proven innocent
|
||||
try:
|
||||
tpmconfig = rpcapi.get_system_tpmconfig(context)
|
||||
except exception.SysinvException:
|
||||
pass
|
||||
finally:
|
||||
if not tpmconfig:
|
||||
LOG.debug("Sysinv Agent cannot get host system tpmconfig.")
|
||||
return
|
||||
|
||||
try:
|
||||
tpmdevice = rpcapi.get_tpmdevice_by_host(context, host_id)
|
||||
if tpmdevice:
|
||||
# if we found a tpmdevice configuration then
|
||||
# that implies that a tpmconfig has as already
|
||||
# been applied on this host. Set it here since
|
||||
# that flag (originally set in apply_tpm_config())
|
||||
# would be cleared on Sysinv agent restarts/swacts
|
||||
self._tpmconfig_host_first_apply = True
|
||||
except exception.SysinvException:
|
||||
# it could be that a TPM configuration was attempted before
|
||||
# this controller was provisioned in which case we will
|
||||
# raise a failure. However it could also be that the agent
|
||||
# simply hasn't applied the tpmdevice configuration.
|
||||
# Check for both cases.
|
||||
if self._tpmconfig_host_first_apply:
|
||||
LOG.debug("Sysinv Agent still applying host "
|
||||
"tpmdevice configuration.")
|
||||
return
|
||||
finally:
|
||||
if not self._tpmconfig_host_first_apply:
|
||||
rpcapi.tpm_config_update_by_host(context,
|
||||
host_id,
|
||||
response_dict)
|
||||
|
||||
if (tpmconfig and tpmdevice and
|
||||
(self._tpmconfig_rpc_failure or
|
||||
tpmdevice['state'] != constants.TPMCONFIG_APPLYING)):
|
||||
# If there is an rpc failure then always send an update
|
||||
# If there has been no rpc failure, and TPM is not in
|
||||
# applying state and if TPM is configured in the system,
|
||||
# then query the tpm path, and inform the conductor
|
||||
if os.path.isfile(tpmconfig['tpm_path']):
|
||||
response_dict['is_configured'] = True
|
||||
|
||||
LOG.debug("Conductor: config_update_by_host for host (%s), "
|
||||
"response(%s)" % (host_id, response_dict))
|
||||
rpcapi.tpm_config_update_by_host(context,
|
||||
host_id,
|
||||
response_dict)
|
||||
|
||||
def apply_tpm_config(self, context, tpm_context):
|
||||
"""Configure or Update TPM device on this node
|
||||
|
||||
:param context: request context
|
||||
:param tpm_context: the tpm object context
|
||||
"""
|
||||
|
||||
if (self._ihost_uuid and self._ihost_personality and
|
||||
self._ihost_personality == constants.CONTROLLER):
|
||||
LOG.info("AgentManager apply_tpm_config: %s" % self._ihost_uuid)
|
||||
|
||||
# this flag will be set to true the first time this
|
||||
# agent applies the tpmconfig
|
||||
self._tpmconfig_host_first_apply = True
|
||||
|
||||
self._tpmconfig_rpc_failure = False
|
||||
response_dict = {}
|
||||
attribute_dict = {}
|
||||
rpcapi = conductor_rpcapi.ConductorAPI(
|
||||
topic=conductor_rpcapi.MANAGER_TOPIC)
|
||||
|
||||
# invoke tpmdevice-setup on this node.
|
||||
#
|
||||
# We also need to fetch and persist the content
|
||||
# of the TPM certificates in DB.
|
||||
try:
|
||||
utils.execute('tpmdevice-setup',
|
||||
tpm_context['cert_path'],
|
||||
tpm_context['tpm_path'],
|
||||
tpm_context['public_path'],
|
||||
run_as_root=True)
|
||||
|
||||
attribute_dict['tpm_data'] = \
|
||||
utils.read_filtered_directory_content(
|
||||
os.path.dirname(tpm_context['tpm_path']),
|
||||
"*.bin", "*.tpm")
|
||||
except exception.ProcessExecutionError as e:
|
||||
LOG.exception(e)
|
||||
response_dict['is_configured'] = False
|
||||
else:
|
||||
response_dict['is_configured'] = True
|
||||
attribute_dict['state'] = constants.TPMCONFIG_APPLYING
|
||||
|
||||
# Only create a TPM device entry if the TPM certificates
|
||||
# were successfully created
|
||||
if response_dict['is_configured']:
|
||||
# Create a new TPM device for this host, or update it
|
||||
# with new TPM certs if such a device already exists.
|
||||
tpmdevice = rpcapi.tpm_device_update_by_host(context,
|
||||
self._ihost_uuid,
|
||||
attribute_dict)
|
||||
if not tpmdevice:
|
||||
response_dict['is_configured'] = False
|
||||
|
||||
# we will not tie this to agent audit, send back
|
||||
# response to conductor now.
|
||||
try:
|
||||
rpcapi.tpm_config_update_by_host(context,
|
||||
self._ihost_uuid,
|
||||
response_dict)
|
||||
except Timeout:
|
||||
# TPM configuration has applied, however incase
|
||||
# the agent cannot reach the conductor, tpmconfig
|
||||
# will be stuck in Applying state. Since the agent
|
||||
# audit by default does not send status updates during
|
||||
# "Applying" state, we will mark this as a failure case
|
||||
# and have the agent send an update (even in Applying state)
|
||||
LOG.info("tpm_config_update_by_host rpc Timeout.")
|
||||
self._tpmconfig_rpc_failure = True
|
||||
|
||||
return
|
||||
|
||||
def device_update_image(self, context, host_uuid, pci_addr, filename, transaction_id,
|
||||
retimer_included):
|
||||
"""Write the device image to the device at the specified address.
|
||||
|
|
|
@ -12882,163 +12882,6 @@ class ConductorManager(service.PeriodicService):
|
|||
"""
|
||||
return self._openstack.region_has_ceph_backend()
|
||||
|
||||
def get_system_tpmconfig(self, context):
|
||||
"""
|
||||
Retrieve the system tpmconfig object
|
||||
"""
|
||||
try:
|
||||
tpmconfig = self.dbapi.tpmconfig_get_one()
|
||||
if tpmconfig:
|
||||
return tpmconfig.as_dict()
|
||||
except exception.NotFound:
|
||||
# No TPM configuration found
|
||||
return None
|
||||
|
||||
def get_tpmdevice_by_host(self, context, host_id):
|
||||
"""
|
||||
Retrieve the tpmdevice object for this host
|
||||
"""
|
||||
try:
|
||||
tpmdevice = self.dbapi.tpmdevice_get_by_host(host_id)
|
||||
if tpmdevice and len(tpmdevice) == 1:
|
||||
return tpmdevice[0].as_dict()
|
||||
except exception.NotFound:
|
||||
# No TPM device found
|
||||
return None
|
||||
|
||||
def _set_tpm_config_state(self,
|
||||
ihost, response_dict):
|
||||
"""Update tpm configuration state. """
|
||||
try:
|
||||
existing_tpmdevice = \
|
||||
self.dbapi.tpmdevice_get_by_host(ihost.uuid)
|
||||
if (len(existing_tpmdevice) > 1):
|
||||
LOG.error("Multiple tpmdevice entries found for host %s" %
|
||||
ihost.uuid)
|
||||
return
|
||||
elif not existing_tpmdevice:
|
||||
LOG.debug("TPM Audit: No tpmdevice entry found while TPM "
|
||||
"configuration exists.")
|
||||
return
|
||||
existing_tpmdevice = existing_tpmdevice[0]
|
||||
except exception.NotFound:
|
||||
# No TPM configuration. No need to update status
|
||||
return
|
||||
|
||||
updated_state = None
|
||||
if response_dict['is_configured']:
|
||||
updated_state = constants.TPMCONFIG_APPLIED
|
||||
else:
|
||||
updated_state = constants.TPMCONFIG_FAILED
|
||||
|
||||
if (updated_state and updated_state != existing_tpmdevice.state):
|
||||
self.dbapi.tpmdevice_update(existing_tpmdevice.uuid,
|
||||
{'state': updated_state})
|
||||
|
||||
def tpm_config_update_by_host(self, context,
|
||||
host_uuid, response_dict):
|
||||
"""Get TPM configuration status from Agent host.
|
||||
|
||||
This method allows for alarms to be raised for hosts if TPM
|
||||
is not configured properly.
|
||||
|
||||
:param context: an admin context
|
||||
:param host_uuid: host unique id
|
||||
:param response_dict: configuration status
|
||||
:returns: pass or fail
|
||||
"""
|
||||
LOG.debug("Entering tpm_config_update_by_host %s %s" %
|
||||
(host_uuid, response_dict))
|
||||
host_uuid.strip()
|
||||
try:
|
||||
tpm_host = self.dbapi.ihost_get(host_uuid)
|
||||
entity_instance_id = ("%s=%s" %
|
||||
(fm_constants.FM_ENTITY_TYPE_HOST,
|
||||
tpm_host.hostname))
|
||||
alarm_id = fm_constants.FM_ALARM_ID_TPM_INIT
|
||||
|
||||
if response_dict['is_configured']:
|
||||
tpmdevice = self.get_tpmdevice_by_host(context, host_uuid)
|
||||
# apply config manifest for tpm create/update
|
||||
if (tpmdevice and
|
||||
tpmdevice['state'] ==
|
||||
constants.TPMCONFIG_APPLYING):
|
||||
self.update_tpm_config_manifests(context)
|
||||
# update the system configuration state
|
||||
self._set_tpm_config_state(tpm_host, response_dict)
|
||||
# do a blind clear on any TPM alarm
|
||||
# for this host.
|
||||
self.fm_api.clear_fault(alarm_id,
|
||||
entity_instance_id)
|
||||
else:
|
||||
# update the system configuration state
|
||||
self._set_tpm_config_state(tpm_host, response_dict)
|
||||
# set an alarm for this host and tell
|
||||
# mtce to degrade this node
|
||||
if not self.fm_api.get_fault(alarm_id, entity_instance_id):
|
||||
fault = fm_api.Fault(
|
||||
alarm_id=alarm_id,
|
||||
alarm_state=fm_constants.FM_ALARM_STATE_SET,
|
||||
entity_type_id=fm_constants.FM_ENTITY_TYPE_HOST,
|
||||
entity_instance_id=entity_instance_id,
|
||||
severity=fm_constants.FM_ALARM_SEVERITY_MAJOR,
|
||||
reason_text="TPM configuration failed "
|
||||
"or device not found.",
|
||||
# equipment
|
||||
alarm_type=fm_constants.FM_ALARM_TYPE_4,
|
||||
# procedural-error
|
||||
probable_cause=fm_constants.ALARM_PROBABLE_CAUSE_64,
|
||||
proposed_repair_action="reinstall HTTPS certificate; "
|
||||
"if problem persists",
|
||||
service_affecting=False)
|
||||
self.fm_api.set_fault(fault)
|
||||
|
||||
except Exception:
|
||||
raise exception.SysinvException(_(
|
||||
"Invalid host_uuid: %s") % host_uuid)
|
||||
|
||||
def tpm_device_update_by_host(self, context,
|
||||
host_uuid, tpmdevice_dict):
|
||||
"""Synchronously, have the conductor create or update
|
||||
a tpmdevice per host.
|
||||
|
||||
:param context: request context.
|
||||
:param host_uuid: uuid or id of the host
|
||||
:param tpmdevice_dict: a dicitionary of tpm device attributes
|
||||
|
||||
:returns tpmdevice object
|
||||
"""
|
||||
try:
|
||||
tpm_host = self.dbapi.ihost_get(host_uuid)
|
||||
except exception.ServerNotFound:
|
||||
LOG.error("Cannot find host by id %s" % host_uuid)
|
||||
return
|
||||
|
||||
tpm_devices = self.dbapi.tpmdevice_get_by_host(tpm_host.id)
|
||||
if tpm_devices:
|
||||
tpmdevice = self.dbapi.tpmdevice_update(tpm_devices[0].uuid,
|
||||
tpmdevice_dict)
|
||||
# update table tpmconfig updated_at as its visible from tpmconfig-show
|
||||
try:
|
||||
tpm_obj = self.dbapi.tpmconfig_get_one()
|
||||
updated_at = timeutils.utcnow()
|
||||
self.dbapi.tpmconfig_update(tpm_obj.uuid,
|
||||
{'updated_at': updated_at})
|
||||
LOG.info("TPM config updated at: %s" % updated_at)
|
||||
except exception.NotFound:
|
||||
LOG.error("tpm_device_update_by_host tpmconfig NotFound")
|
||||
else:
|
||||
try:
|
||||
# create new tpmdevice
|
||||
tpmdevice_dict.update({'host_uuid': tpm_host['uuid']})
|
||||
tpmdevice = self.dbapi.tpmdevice_create(tpm_host['id'],
|
||||
tpmdevice_dict)
|
||||
except Exception:
|
||||
LOG.exception("Cannot create TPM device for host %s" % host_uuid)
|
||||
return
|
||||
|
||||
return tpmdevice
|
||||
|
||||
def cinder_prepare_db_for_volume_restore(self, context):
|
||||
"""
|
||||
Send a request to cinder to remove all volume snapshots and set all
|
||||
|
|
|
@ -1443,55 +1443,6 @@ class ConductorAPI(sysinv.openstack.common.rpc.proxy.RpcProxy):
|
|||
"""
|
||||
return self.call(context, self.make_msg('region_has_ceph_backend'))
|
||||
|
||||
def get_system_tpmconfig(self, context):
|
||||
"""
|
||||
Retrieve the system tpmconfig object
|
||||
"""
|
||||
return self.call(context, self.make_msg('get_system_tpmconfig'))
|
||||
|
||||
def get_tpmdevice_by_host(self, context, host_id):
|
||||
"""
|
||||
Retrieve the tpmdevice object for this host
|
||||
"""
|
||||
return self.call(context,
|
||||
self.make_msg('get_tpmdevice_by_host',
|
||||
host_id=host_id))
|
||||
|
||||
def tpm_config_update_by_host(self, context,
|
||||
host_uuid, response_dict):
|
||||
"""Get TPM configuration status from Agent host.
|
||||
|
||||
This method allows for alarms to be raised for hosts if TPM
|
||||
is not configured properly.
|
||||
|
||||
:param context: an admin context
|
||||
:param host_uuid: host unique id
|
||||
:param response_dict: configuration status
|
||||
:returns: pass or fail
|
||||
"""
|
||||
return self.call(
|
||||
context,
|
||||
self.make_msg('tpm_config_update_by_host',
|
||||
host_uuid=host_uuid,
|
||||
response_dict=response_dict))
|
||||
|
||||
def tpm_device_update_by_host(self, context,
|
||||
host_uuid, tpmdevice_dict):
|
||||
"""Synchronously , have the conductor create or update
|
||||
a tpmdevice per host.
|
||||
|
||||
:param context: request context.
|
||||
:param host_uuid: uuid or id of the host
|
||||
:param tpmdevice_dict: a dictionary of tpm device attributes
|
||||
|
||||
:returns: tpmdevice object
|
||||
"""
|
||||
return self.call(
|
||||
context,
|
||||
self.make_msg('tpm_device_update_by_host',
|
||||
host_uuid=host_uuid,
|
||||
tpmdevice_dict=tpmdevice_dict))
|
||||
|
||||
def cinder_prepare_db_for_volume_restore(self, context):
|
||||
"""
|
||||
Send a request to cinder to remove all volume snapshots and set all
|
||||
|
|
Loading…
Reference in New Issue