Derive parameters clean up

This change is to remove derive parameters code
for OvS DPDK, SRIOV and HCI Parameters.

Change-Id: Iface80568fe3f9db3fa9fbe859eedf73f861e55b
(cherry picked from commit 9fe9f2ee60)
This commit is contained in:
Jaganathan Palanisamy 2022-07-27 14:43:55 +05:30
parent 1533716c2d
commit a14e158b52
37 changed files with 36 additions and 4415 deletions

View File

@ -1,14 +0,0 @@
======================================
Module - tripleo_derive_hci_parameters
======================================
This module provides for the following ansible plugin:
* tripleo_derive_hci_parameters
.. ansibleautoplugin::
:module: tripleo_ansible/ansible_plugins/modules/tripleo_derive_hci_parameters.py
:documentation: true
:examples: true

View File

@ -1,14 +0,0 @@
===================================
Module - tripleo_get_flavor_profile
===================================
This module provides for the following ansible plugin:
* tripleo_get_flavor_profile
.. ansibleautoplugin::
:module: tripleo_ansible/ansible_plugins/modules/tripleo_get_flavor_profile.py
:documentation: true
:examples: true

View File

@ -1,14 +0,0 @@
======================================
Module - tripleo_get_introspected_data
======================================
This module provides for the following ansible plugin:
* tripleo_get_introspected_data
.. ansibleautoplugin::
:module: tripleo_ansible/ansible_plugins/modules/tripleo_get_introspected_data.py
:documentation: true
:examples: true

View File

@ -1,54 +0,0 @@
#!/usr/bin/python
from ansible.parsing.yaml.objects import AnsibleUnicode
class FilterModule(object):
def filters(self):
return {
'number_list': self.number_list
}
# converts range list into number list
# here input parameter and return value as list
# example: ["12-14", "^13", "17"] into [12, 14, 17]
def convert_range_to_number_list(self, range_list):
num_list = []
exclude_num_list = []
try:
for val in range_list:
val = val.strip(' ')
if '^' in val:
exclude_num_list.append(int(val[1:]))
elif '-' in val:
split_list = val.split("-")
range_min = int(split_list[0])
range_max = int(split_list[1])
num_list.extend(range(range_min, (range_max + 1)))
else:
num_list.append(int(val))
except ValueError as exc:
msg = ("Invalid number in input param "
"'range_list': %s" % exc)
raise Exception(msg)
# here, num_list is a list of integers
return [num for num in num_list if num not in exclude_num_list]
def number_list(self, range_list):
try:
if not range_list:
msg = "Input param 'range_list' is blank."
raise Exception(msg)
range_list = range_list
# converts into python list if range_list is not list type
if not isinstance(range_list, list):
range_list = range_list.split(",")
num_list = self.convert_range_to_number_list(range_list)
except Exception as err:
msg = ('Derive Params Error: %s', err)
raise Exception(msg)
# converts into comma delimited number list as string
return ','.join([str(num) for num in num_list])

View File

@ -1,51 +0,0 @@
#!/usr/bin/python
from ansible.parsing.yaml.objects import AnsibleUnicode
class FilterModule(object):
def filters(self):
return {
'range_list': self.range_list
}
# converts number list into range list.
# here input parameter and return value as list
# example: [12, 13, 14, 17] into ["12-14", "17"]
def _convert_number_to_range_list(self, num_list):
num_list.sort()
range_list = []
range_min = num_list[0]
for num in num_list:
next_val = num + 1
if next_val not in num_list:
if range_min != num:
range_list.append(str(range_min) + '-' + str(num))
else:
range_list.append(str(range_min))
next_index = num_list.index(num) + 1
if next_index < len(num_list):
range_min = num_list[next_index]
# here, range_list is a list of strings
return range_list
def range_list(self, num_list):
if not num_list:
msg = "Input param 'num_list' is blank."
raise Exception(msg)
try:
# splitting a string (comma delimited list) into
# list of numbers
# example: "12,13,14,17" string into [12,13,14,17]
num_list = [int(num.strip(' '))
for num in num_list.split(",")]
except ValueError as exc:
msg = ("Invalid number in input param "
"'num_list': %s" % exc)
raise Exception(msg)
range_list = self._convert_number_to_range_list(num_list)
# converts into comma delimited range list as string
return ','.join(range_list)

View File

@ -24,8 +24,11 @@ import os
from glanceclient import client as glanceclient
from heatclient.v1 import client as heatclient
from ironicclient import client as ironicclient
<<<<<<< HEAD (153371 Merge "Mark B&R job as voting" into stable/wallaby)
from novaclient import client as novaclient
from swiftclient import client as swift_client
=======
>>>>>>> CHANGE (9fe9f2 Derive parameters clean up)
from tripleo_common.utils import nodes
from tripleo_common.utils import parameters
@ -41,10 +44,6 @@ except ImportError:
heat = None
class DeriveParamsError(Exception):
"""Error while performing a derive parameters operation"""
class TripleOCommon(object):
def __init__(self, session):
self.sess = session
@ -94,25 +93,6 @@ class TripleOCommon(object):
heatclient.Client(session=self.sess)
return self.client_cache['heatclient']
def get_compute_client(self):
"""Return the compute (nova) client.
This method will return a client object using the legacy library. Upon
the creation of a successful client creation, the client object will
be stored in the `self.client_cache object`, should this method be
called more than once, the cached object will automatically return,
resulting in fewer authentications and faster API interactions.
:returns: Object
"""
if 'novaclient' in self.client_cache:
return self.client_cache['novaclient']
else:
self.client_cache['novaclient'] = \
novaclient.Client(version=2, session=self.sess)
return self.client_cache['novaclient']
def get_baremetal_client(self):
"""Return the baremetal (ironic) client.
@ -192,17 +172,3 @@ class TripleOCommon(object):
client = self.get_ironic_inspector_client()
return client.get_data(node_id=node_id)
def return_flavor_profile(self, flavor_name):
"""Return flavor profile information.
:param flavor_name: Flavor name
:type flavor_name: String
:returns: Object
"""
return parameters.get_profile_of_flavor(
flavor_name=flavor_name,
compute_client=self.get_compute_client()
)

View File

@ -1,638 +0,0 @@
#!/usr/bin/env python
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Derive paramters for HCI (hyper-converged) deployments"""
import os
import re
import yaml
from ansible.module_utils.basic import AnsibleModule
ANSIBLE_METADATA = {
'metadata_version': '0.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = '''
---
module: tripleo_derive_hci_parameters
short_description: Tune Nova scheduler parameters to reserve resources for collocated Ceph OSDs
description:
- "When collocating Ceph OSDs on Nova Compute hosts (hyperconverged or hci) the Nova Scheduler does not take into account the CPU/Memory needs of the OSDs. This module returns recommended NovaReservedHostMemory and NovaCPUAllocationRatio parmaters so that the host reseves memory and CPU for Ceph. The values are based on workload description, deployment configuration, and Ironic data. The workload description is the expected average_guest_cpu_utilization_percentage and average_guest_memory_size_in_mb."
options:
tripleo_environment_parameters:
description: Map from key environment_parameters from stack_data. Used to determine number of OSDs in deployment per role
required: True
type: map
tripleo_role_name:
description: TripleO role name whose parameters are being derived
required: True
type: string
introspection_data:
description: Output of the tripleo_get_introspected_data module. Used to determine available memory and CPU of each instance from any role with the CephOSD service
required: True
type: map
average_guest_cpu_utilization_percentage:
description: Percentage of CPU utilization expected for average guest, e.g. 99 means 99% and 10 means 10%
required: False
type: int
default: 0
average_guest_memory_size_in_mb:
description: Amount of memory in MB required by the average guest
required: False
type: int
default: 0
derived_parameters:
description: any previously derived parameters which should be included in the final result
required: False
type: map
new_heat_environment_path:
description: Path to file new where resultant derived parameters will be written; result will be valid input to TripleO client, e.g. /home/stack/derived_paramaters.yaml
required: False
type: str
append_new_heat_environment_path:
description: If new_heat_environment_path already exists and append_new_heat_environment_path is true, then append new content to the existing new_heat_environment_path instead of overwriting a new version of that file.
required: False
type: bool
report_path:
description: Path to file new where a report on how HCI paramters were derived be written, e.g. /home/stack/hci_derived_paramaters.txt
required: False
type: str
author:
- John Fulton (fultonj)
'''
EXAMPLES = '''
- name: Add Derived HCI parameters to existing derived parameters for ComputeHCI role
tripleo_derive_hci_parameters:
tripleo_environment_parameters: "{{ tripleo_environment_parameters }}"
introspection_data: "{{ hw_data }}"
derived_parameters: "{{ derived_parameters }}"
tripleo_role_name: "ComputeHCI"
average_guest_cpu_utilization_percentage: 90
average_guest_memory_size_in_mb: 8192
new_heat_environment_path: "/home/stack/hci_result.yaml"
report_path: "/home/stack/hci_report.txt"
register: derived_parameters_result
- name: Show derived HCI Memory result
debug:
msg: "{{ derived_parameters_result['derived_parameters']['ComputeHCIParameters']['NovaReservedHostMemory'] }}"
- name: Show derived HCI CPU result
debug:
msg: "{{ derived_parameters_result['derived_parameters']['ComputeHCIParameters']['NovaCPUAllocationRatio'] }}"
'''
RETURN = '''
message:
description: A description of the HCI derived parameters calculation or an error message
type: str
returned: always
derived_parameters:
description: map with derived hci paramters and any previously derived parameters
required: False
type: map
'''
MB_PER_GB = 1024
def derive(mem_gb, vcpus, osds, average_guest_memory_size_in_mb=0,
average_guest_cpu_utilization_percentage=0,
mem_gb_per_osd=5, vcpus_per_osd=1, total_memory_threshold=0.8):
"""
Determines the recommended Nova scheduler values based on Ceph needs
and described average Nova guest workload in CPU and Memory utilization.
If expected guest utilization is not provided result is less accurate.
Returns dictionary containing the keys: cpu_allocation_ratio (float),
nova_reserved_mem_mb (int), message (string), failed (boolean).
"""
gb_overhead_per_guest = 0.5 # based on measurement in test environment
# seed the result
derived = {}
derived['failed'] = False
derived['message'] = ""
messages = []
if average_guest_memory_size_in_mb == 0 and \
average_guest_cpu_utilization_percentage == 0:
workload = False
else:
workload = True
# catch possible errors in parameters
if mem_gb < 1:
messages.append("Unable to determine the amount of physical memory "
"(no 'memory_mb' found in introspection_data).")
derived['failed'] = True
if vcpus < 1:
messages.append("Unable to determine the number of CPU cores. "
"Either no 'cpus' found in introspection_data or "
"NovaVcpuPinSet is not correctly set.")
derived['failed'] = True
if osds < 1:
messages.append("No OSDs were found in the deployment definition. ")
derived['failed'] = True
if average_guest_memory_size_in_mb < 0 and workload:
messages.append("If average_guest_memory_size_in_mb "
"is used it must be greater than 0")
derived['failed'] = True
if average_guest_cpu_utilization_percentage < 0 and workload:
messages.append("If average_guest_cpu_utilization_percentage is "
"used it must be greater than 0")
derived['failed'] = True
left_over_mem = mem_gb - (mem_gb_per_osd * osds)
if left_over_mem < 0:
messages.append(("There is not enough memory to run %d OSDs. "
"%d GB RAM - (%d GB per OSD * %d OSDs) is < 0")
% (osds, mem_gb, mem_gb_per_osd, osds))
derived['failed'] = True
if derived['failed']:
derived['message'] = " ".join(messages)
return derived
# perform the calculation
if workload:
average_guest_size = average_guest_memory_size_in_mb / float(MB_PER_GB)
average_guest_util = average_guest_cpu_utilization_percentage * 0.01
number_of_guests = int(left_over_mem
/ (average_guest_size + gb_overhead_per_guest))
nova_reserved_mem_mb = MB_PER_GB * ((mem_gb_per_osd * osds)
+ (number_of_guests * gb_overhead_per_guest))
nonceph_vcpus = vcpus - (vcpus_per_osd * osds)
guest_vcpus = nonceph_vcpus / average_guest_util
cpu_allocation_ratio = guest_vcpus / vcpus
else:
nova_reserved_mem_mb = MB_PER_GB * (mem_gb_per_osd * osds)
# save calculation results
derived['nova_reserved_mem_mb'] = int(nova_reserved_mem_mb)
if workload:
derived['cpu_allocation_ratio'] = cpu_allocation_ratio
# capture derivation details in message
messages.append(("Derived Parameters results"
"\n Inputs:"
"\n - Total host RAM in GB: %d"
"\n - Total host vCPUs: %d"
"\n - Ceph OSDs per host: %d")
% (mem_gb, vcpus, osds))
if workload:
messages.append(("\n - Average guest memory size in GB: %d"
"\n - Average guest CPU utilization: %.0f%%") %
(average_guest_size, average_guest_cpu_utilization_percentage))
messages.append("\n Outputs:")
if workload:
messages.append(("\n - number of guests allowed based on memory = %d"
"\n - number of guest vCPUs allowed = %d"
"\n - nova.conf cpu_allocation_ratio = %2.2f") %
(number_of_guests, int(guest_vcpus), cpu_allocation_ratio))
messages.append(("\n - nova.conf reserved_host_memory = %d MB"
% nova_reserved_mem_mb))
if workload:
messages.append("\nCompare \"guest vCPUs allowed\" to "
"\"guests allowed based on memory\" "
"for actual guest count.")
warnings = []
if nova_reserved_mem_mb > (MB_PER_GB * mem_gb * total_memory_threshold):
warnings.append(("ERROR: %d GB is not enough memory to "
"run hyperconverged\n") % mem_gb)
derived['failed'] = True
if workload:
if cpu_allocation_ratio < 0.5:
warnings.append("ERROR: %d is not enough vCPU to run hyperconverged\n" % vcpus)
derived['failed'] = True
if cpu_allocation_ratio > 16.0:
warnings.append("WARNING: do not increase vCPU overcommit ratio beyond 16:1\n")
else:
warnings.append("WARNING: the average guest workload was not provided. \n"
"Both average_guest_cpu_utilization_percentage and \n"
"average_guest_memory_size_in_mb are defaulted to 0. \n"
"The HCI derived parameter calculation cannot set the \n"
"Nova cpu_allocation_ratio. The Nova reserved_host_memory_mb \n"
"will be set based on the number of OSDs but the Nova \n"
"guest memory overhead will not be taken into account. \n")
derived['message'] = " ".join(warnings) + " ".join(messages)
return derived
def count_osds(tripleo_environment_parameters):
"""
Counts the requested OSDs in the tripleo_environment_parameters.
Returns an integer representing the count.
"""
total = 0
if 'CephAnsibleDisksConfig' in tripleo_environment_parameters:
disks_config = tripleo_environment_parameters['CephAnsibleDisksConfig']
for key in ['devices', 'lvm_volumes']:
if key in disks_config:
total = total + len(disks_config[key])
return total
def count_memory(ironic):
"""
Counts the memory found in the ironic introspection data. If
memory_mb is 0, uses ['inventory']['memory']['total'] in bytes.
Returns integer of memory in GB.
"""
memory = 0
if 'data' in ironic:
if 'memory_mb' in ironic['data']:
if int(ironic['data']['memory_mb']) > 0:
memory = int(ironic['data']['memory_mb']) / float(MB_PER_GB)
elif 'inventory' in ironic['data']:
if 'memory' in ironic['data']['inventory']:
if 'total' in ironic['data']['inventory']['memory']:
memory = int(ironic['data']['inventory']['memory']['total']) \
/ float(MB_PER_GB) / float(MB_PER_GB) / float(MB_PER_GB)
return memory
def convert_range_to_number_list(range_list):
"""
Returns list of numbers from descriptive range input list
E.g. ['12-14', '^13', '17'] is converted to [12, 14, 17]
Returns string with error message if unable to parse input
"""
# borrowed from jpalanis@redhat.com
num_list = []
exclude_num_list = []
try:
for val in range_list:
val = val.strip(' ')
if '^' in val:
exclude_num_list.append(int(val[1:]))
elif '-' in val:
split_list = val.split("-")
range_min = int(split_list[0])
range_max = int(split_list[1])
num_list.extend(range(range_min, (range_max + 1)))
else:
num_list.append(int(val))
except ValueError as exc:
return "Parse Error: Invalid number in input param 'num_list': %s" % exc
return [num for num in num_list if num not in exclude_num_list]
def count_nova_vcpu_pins(module):
"""
Returns the number of CPUs defined in NovaVcpuPinSet as set in
the environment or derived parameters. If multiple NovaVcpuPinSet
parameters are defined, priority is given to role, then the default
value for all roles, and then what's in previously derived_parameters
"""
tripleo_role_name = module.params['tripleo_role_name']
tripleo_environment_parameters = module.params['tripleo_environment_parameters']
derived_parameters = module.params['derived_parameters']
# NovaVcpuPinSet can be defined in multiple locations, and it's
# important to select the value in order of precedence:
# 1) User specified value for this role
# 2) User specified default value for all roles
# 3) Value derived by a previous derived parameters playbook run
#
# Set an exclusive prioritized possible_location to get the NovaVcpuPinSet
if tripleo_role_name + 'Parameters' in tripleo_environment_parameters: # 1
possible_location = tripleo_environment_parameters[tripleo_role_name + 'Parameters']
elif 'NovaVcpuPinSet' in tripleo_environment_parameters: # 2
possible_location = tripleo_environment_parameters
elif tripleo_role_name + 'Parameters' in derived_parameters: # 3
possible_location = derived_parameters[tripleo_role_name + 'Parameters']
else: # default the possible_location to an empty dictionary
possible_location = {}
if 'NovaVcpuPinSet' in possible_location:
converted = convert_range_to_number_list(possible_location['NovaVcpuPinSet'])
if isinstance(converted, str):
module.fail_json(converted)
if isinstance(converted, list):
return len(converted)
return 0
def count_vcpus(module):
# if only look at ironic data if NovaVcpuPinSet is not used
vcpus = count_nova_vcpu_pins(module)
if vcpus == 0:
try:
vcpus = module.params['introspection_data']['data']['cpus']
except KeyError:
vcpus = 0
return vcpus
def get_vcpus_per_osd_from_ironic(ironic, tripleo_environment_parameters, num_osds):
"""
Dynamically sets the vCPU to OSD ratio based the OSD type to:
HDD | OSDs per device: 1 | vCPUs per device: 1
SSD | OSDs per device: 1 | vCPUs per device: 4
NVMe | OSDs per device: 4 | vCPUs per device: 3
Gets requested OSD list from tripleo_environment_parameters input
and looks up the device type in ironic input. Returns the vCPUs
per OSD, an explanation message.
"""
cpus = 1
nvme_re = re.compile('.*nvme.*')
type_map = {}
hdd_count = ssd_count = nvme_count = 0
warning = False
messages = []
try:
devices = tripleo_environment_parameters['CephAnsibleDisksConfig']['devices']
except KeyError:
devices = []
messages.append("No devices defined in CephAnsibleDisksConfig")
warning = True
try:
ironic_disks = ironic['data']['inventory']['disks']
except KeyError:
ironic_disks = []
messages.append("No disks found in introspection data inventory")
warning = True
if len(devices) != num_osds:
messages.append("Not all OSDs are in the devices list. Unable to "
"determine hardware type for all OSDs. This might be "
"because lvm_volumes was used to define some OSDs. ")
warning = True
elif len(devices) > 0 and len(ironic_disks) > 0:
disks_config = tripleo_environment_parameters['CephAnsibleDisksConfig']
for osd_dev in disks_config['devices']:
for ironic_dev in ironic_disks:
for key in ('name', 'by_path', 'wwn'):
if key in ironic_dev:
if osd_dev == ironic_dev[key]:
if 'rotational' in ironic_dev:
if ironic_dev['rotational']:
type_map[osd_dev] = 'hdd'
hdd_count += 1
elif nvme_re.search(osd_dev):
type_map[osd_dev] = 'nvme'
nvme_count += 1
else:
type_map[osd_dev] = 'ssd'
ssd_count += 1
messages.append(("HDDs %i | Non-NVMe SSDs %i | NVMe SSDs %i \n" %
(hdd_count, ssd_count, nvme_count)))
if hdd_count > 0 and ssd_count == 0 and nvme_count == 0:
cpus = 1 # default
messages.append(("vCPU to OSD ratio: %i" % cpus))
elif hdd_count == 0 and ssd_count > 0 and nvme_count == 0:
cpus = 4
messages.append(("vCPU to OSD ratio: %i" % cpus))
elif hdd_count == 0 and ssd_count == 0 and nvme_count > 0:
# did they set OSDs per device?
if 'osds_per_device' in disks_config:
osds_per_device = disks_config['osds_per_device']
else:
osds_per_device = 1 # default defined in ceph-ansible
if osds_per_device == 4:
# All NVMe OSDs so 12 vCPUs per OSD for optimal IO performance
cpus = 3
else:
cpus = 4 # use standard SSD default
messages.append("\nWarning: osds_per_device not set to 4 "
"but all OSDs are of type NVMe. \n"
"Recomentation to improve IO: "
"set osds_per_device to 4 and re-run \n"
"so that vCPU to OSD ratio is 3 "
"for 12 vCPUs per OSD device.")
warning = True
messages.append(("vCPU to OSD ratio: %i"
" (found osds_per_device set to: %i)") %
(cpus, osds_per_device))
elif hdd_count == 0 and ssd_count == 0 and nvme_count == 0:
cpus = 1 # default
messages.append(("vCPU to OSD ratio: %i \nWarning: "
"unable to determine OSD types. "
"Unable to recommend optimal ratio "
"so using default.") % cpus)
warning = True
else:
cpus = 1 # default
messages.append(("vCPU to OSD ratio: %i \nWarning: Requested "
"OSDs are of mixed type. Unable to recommend "
"optimal ratio so using default.") % cpus)
warning = True
msg = "".join(["\nOSD type distribution:\n"] + messages)
if warning:
msg = "WARNING: " + msg
return cpus, msg
def get_vcpus_per_osd(tripleo_environment_parameters, osd_count, osd_type, osd_spec):
"""
Dynamically sets the vCPU to OSD ratio based the OSD type to:
HDD | OSDs per device: 1 | vCPUs per device: 1
SSD | OSDs per device: 1 | vCPUs per device: 4
NVMe | OSDs per device: 4 | vCPUs per device: 3
Relies on parameters from tripleo_environment_parameters input.
Returns the vCPUs per OSD and an explanation message.
"""
cpus = 1
messages = []
warning = False
# This module can analyze a THT file even when it is not called from
# within Heat. Thus, we cannot assume THT validations are enforced.
if osd_type not in ['hdd', 'ssd', 'nvme']:
warning = True
messages.append(("'%s' is not a valid osd_type so "
"defaulting to 'hdd'. ") % osd_type)
osd_type = 'hdd'
messages.append(("CephHciOsdType: %s\n") % osd_type)
if osd_type == 'hdd':
cpus = 1
elif osd_type == 'ssd':
cpus = 4
elif osd_type == 'nvme':
# If they set it to NVMe and used a manual spec, then 3 is also valid
cpus = 3
if type(osd_spec) is not dict:
messages.append("\nNo valid CephOsdSpec was not found. Unable "
"to determine if osds_per_device is being used. "
"osds_per_device: 4 is recommended for 'nvme'. ")
warning = True
if 'osds_per_device' in osd_spec:
if osd_spec['osds_per_device'] == 4:
cpus = 3
else:
cpus = 4
messages.append("\nosds_per_device not set to 4 "
"but all OSDs are of type NVMe. \n"
"Recommendation to improve IO: "
"set osds_per_device to 4 and re-run \n"
"so that vCPU to OSD ratio is 3 "
"for 12 vCPUs per OSD device.")
warning = True
messages.append(("vCPU to OSD ratio: %i\n" % cpus))
if osd_spec != 0 and 'osds_per_device' in osd_spec:
messages.append(" (found osds_per_device set to: %i)" %
osd_spec['osds_per_device'])
msg = "".join(messages)
if warning:
msg = "WARNING: " + msg
return cpus, msg
def find_parameter(env, param, role=""):
"""
Find a parameter in an environment map and return it.
If paramter is not found return 0.
Supports role parameters too. E.g. given the following
inside of env, with param=CephHciOsdCount and role="",
this function returns 3. But if role=ComputeHCI, then
it would return 4.
CephHciOsdCount: 3
ComputeHCIParameters:
CephHciOsdCount: 4
"""
role_parameters = role + 'Parameters'
if role_parameters in env and param in env[role_parameters]:
return env[role_parameters][param]
elif param in env:
return env[param]
return 0
def main():
"""Main method of Ansible module
"""
result = dict(
changed=False,
message=''
)
module_args = dict(
tripleo_environment_parameters=dict(type=dict, required=True),
tripleo_role_name=dict(type=str, required=True),
introspection_data=dict(type=dict, required=True),
average_guest_cpu_utilization_percentage=dict(type=int, required=False, default=0),
average_guest_memory_size_in_mb=dict(type=int, required=False, default=0),
derived_parameters=dict(type=dict, required=False),
new_heat_environment_path=dict(type=str, required=False),
append_new_heat_environment_path=dict(type=bool, required=False),
report_path=dict(type=str, required=False),
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
if module.params['derived_parameters'] is None:
module.params['derived_parameters'] = {}
vcpus = count_vcpus(module)
mem_gb = count_memory(module.params['introspection_data'])
num_osds = find_parameter(module.params['tripleo_environment_parameters'],
'CephHciOsdCount', module.params['tripleo_role_name'])
if num_osds > 0:
osd_type = find_parameter(module.params['tripleo_environment_parameters'],
'CephHciOsdType', module.params['tripleo_role_name'])
osd_spec = find_parameter(module.params['tripleo_environment_parameters'],
'CephOsdSpec', module.params['tripleo_role_name'])
vcpu_ratio, vcpu_ratio_msg = get_vcpus_per_osd(
module.params['tripleo_environment_parameters'],
num_osds, osd_type, osd_spec)
else:
num_osds = count_osds(module.params['tripleo_environment_parameters'])
vcpu_ratio, vcpu_ratio_msg = get_vcpus_per_osd_from_ironic(
module.params['introspection_data'],
module.params['tripleo_environment_parameters'],
num_osds)
# Derive HCI parameters
mem_gb_per_osd = 5
derivation = derive(mem_gb, vcpus, num_osds,
module.params['average_guest_memory_size_in_mb'],
module.params['average_guest_cpu_utilization_percentage'],
mem_gb_per_osd, vcpu_ratio)
# directly set failed status and message
result['failed'] = derivation['failed']
result['message'] = derivation['message'] + "\n" + vcpu_ratio_msg
# make a copy of the existing derived_parameters (e.g. perhaps from NFV)
existing_params = module.params['derived_parameters']
# add HCI derived paramters for Nova scheduler
if not derivation['failed']:
role_derivation = {}
role_derivation['NovaReservedHostMemory'] = derivation['nova_reserved_mem_mb']
if 'cpu_allocation_ratio' in derivation:
role_derivation['NovaCPUAllocationRatio'] = derivation['cpu_allocation_ratio']
role_name_parameters = module.params['tripleo_role_name'] + 'Parameters'
existing_params[role_name_parameters] = role_derivation
# write out to file if requested
if module.params['new_heat_environment_path'] and not module.check_mode:
if module.params['append_new_heat_environment_path'] and \
os.path.exists(module.params['new_heat_environment_path']):
with open(module.params['new_heat_environment_path'], 'r') as stream:
try:
output = yaml.safe_load(stream)
if 'parameter_defaults' in output:
output['parameter_defaults'][role_name_parameters] = \
role_derivation
else:
result['failed'] = True
result['message'] = ("tripleo_derive_hci_parameters module "
"cannot append to environment file %s. "
"It is missing the 'parameter_defaults' "
"key. Try again with the parameter "
"append_new_heat_environment_path set "
"False") \
% module.params['new_heat_environment_path']
except yaml.YAMLError as exc:
result['failed'] = True
result['message'] = exec
else:
output = {}
output['parameter_defaults'] = existing_params
with open(module.params['new_heat_environment_path'], 'w') as outfile:
yaml.safe_dump(output, outfile, default_flow_style=False)
# because we wrote a file we're making a change on the target system
result['changed'] = True
if module.params['report_path'] and not module.check_mode:
with open(module.params['report_path'], 'w') as outfile:
outfile.write(result['message'])
# because we wrote a file we're making a change on the target system
result['changed'] = True
# return existing derived parameters with the new HCI parameters too
result['derived_parameters'] = existing_params
# Exit and pass the key/value results
module.exit_json(**result)
if __name__ == '__main__':
main()

View File

@ -1,3 +1,4 @@
<<<<<<< HEAD (153371 Merge "Mark B&R job as voting" into stable/wallaby)
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2020 Red Hat, Inc.
@ -162,3 +163,5 @@ def main():
if __name__ == '__main__':
main()
=======
>>>>>>> CHANGE (9fe9f2 Derive parameters clean up)

View File

@ -1,3 +1,4 @@
<<<<<<< HEAD (153371 Merge "Mark B&R job as voting" into stable/wallaby)
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2020 Red Hat, Inc.
@ -278,3 +279,5 @@ def main():
if __name__ == '__main__':
main()
=======
>>>>>>> CHANGE (9fe9f2 Derive parameters clean up)

View File

@ -1,180 +0,0 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__metaclass__ = type
from ansible.module_utils.basic import AnsibleModule
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_full_argument_spec
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_module_kwargs
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_cloud_from_module
ANSIBLE_METADATA = {
'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'
}
DOCUMENTATION = """
---
module: tripleo_get_dpdk_socket_memory
author:
- Jaganathan Palanisamy <jpalanis@redhat.com>
version_added: '2.8'
short_description: Gets the dpdk socket memory
notes: []
description:
- This module gets the dpdk socket memory
options:
dpdk_nics_numa_info:
description:
- DPDK nics numa details
required: True
type: list
numa_nodes:
description:
- NUMA nodes
required: True
type: list
overhead:
description:
- Overhead value
required: True
type: int
packet_size_in_buffer:
description:
- Packet size in buffer value
required: True
type: int
minimum_socket_memory:
description:
- Minimum socket memory per node
required: False
type: int
default: 1024
"""
EXAMPLES = """
- name: Gets the DPDK socket memory
tripleo_get_dpdk_socket_memory:
dpdk_nics_numa_info: {}
numa_nodes: []
overhead: 800
packet_size_in_buffer: 64
minimum_socket_memory: 1500
"""
RETURN = """
configs:
description:
- DPDK socket memory for each numa node.
returned: always
type: string
"""
import json
import math
import yaml
# Computes round off MTU value in bytes
# example: MTU value 9000 into 9216 bytes
def _roundup_mtu_bytes(mtu):
max_div_val = int(math.ceil(float(mtu) / float(1024)))
return (max_div_val * 1024)
# Calculates socket memory for a NUMA node
def _calculate_node_socket_memory(numa_node, dpdk_nics_numa_info,
overhead, packet_size_in_buffer,
minimum_socket_memory):
distinct_mtu_per_node = []
socket_memory = 0
# For DPDK numa node
for nics_info in dpdk_nics_numa_info:
if (numa_node == nics_info['numa_node']
and not nics_info['mtu'] in distinct_mtu_per_node):
distinct_mtu_per_node.append(nics_info['mtu'])
roundup_mtu = _roundup_mtu_bytes(nics_info['mtu'])
socket_memory += (((roundup_mtu + overhead) * packet_size_in_buffer)
/ (1024 * 1024))
# For Non DPDK numa node
if socket_memory == 0:
socket_memory = minimum_socket_memory
# For DPDK numa node
else:
socket_memory += 512
socket_memory_in_gb = int(socket_memory / 1024)
if socket_memory % 1024 > 0:
socket_memory_in_gb += 1
return (socket_memory_in_gb * 1024)
# Gets the DPDK Socket Memory List.
# For NUMA node with DPDK nic, socket memory is calculated
# based on MTU, Overhead and Packet size in buffer.
# For NUMA node without DPDK nic, minimum socket memory is
# assigned (recommended 1GB)
def _get_dpdk_socket_memory(dpdk_nics_numa_info, numa_nodes, overhead,
packet_size_in_buffer,
minimum_socket_memory=1024):
dpdk_socket_memory_list = []
for node in numa_nodes:
socket_mem = _calculate_node_socket_memory(
node, dpdk_nics_numa_info, overhead,
packet_size_in_buffer, minimum_socket_memory)
dpdk_socket_memory_list.append(socket_mem)
return ','.join([str(sm) for sm in dpdk_socket_memory_list])
def main():
result = dict(
socket_memory="",
success=False,
error=None
)
module = AnsibleModule(
openstack_full_argument_spec(
**yaml.safe_load(DOCUMENTATION)['options']
),
**openstack_module_kwargs()
)
try:
result['socket_memory'] = _get_dpdk_socket_memory(
module.params["dpdk_nics_numa_info"],
module.params["numa_nodes"],
module.params["overhead"],
module.params["packet_size_in_buffer"],
module.params["minimum_socket_memory"]
)
except Exception as exp:
result['error'] = str(exp)
result['msg'] = 'Error unable to determine DPDK socket memory : {}'.format(
exp
)
module.fail_json(**result)
else:
result['success'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()

View File

@ -1,94 +0,0 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils import tripleo_common_utils as tc
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_full_argument_spec
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_module_kwargs
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_cloud_from_module
DOCUMENTATION = """
---
module: tripleo_get_flavor_profile
short_description: Get the flavor profile data
extends_documentation_fragment: openstack
author:
- "Kevin Carter (@cloudnull)"
version_added: "2.10"
description:
- Pull profile from a given flavor
options:
flavor_name:
description:
- Name of flavor
type: str
required: true
requirements: ["openstacksdk", "tripleo-common"]
"""
EXAMPLES = """
- name: Get flavor profile
tripleo_get_flavor_profile:
flavor_name: m1.tiny
register: flavor_profile
"""
import os
import yaml
from tripleo_common import exception
from tripleo_common.utils import stack_parameters as stack_param_utils
def main():
result = dict(
success=False,
changed=False,
error=None,
)
module = AnsibleModule(
openstack_full_argument_spec(
**yaml.safe_load(DOCUMENTATION)['options']
),
**openstack_module_kwargs()
)
_, conn = openstack_cloud_from_module(module)
tripleo = tc.TripleOCommon(session=conn.session)
try:
result['profile'] = tripleo.return_flavor_profile(
module.params["flavor_name"]
)
except exception.DeriveParamsError:
result['profile'] = None
result['success'] = True
module.exit_json(**result)
except Exception as exp:
result['error'] = str(exp)
result['msg'] = 'Error pulling flavor properties for {}: {}'.format(
module.params["flavor_name"],
exp
)
module.fail_json(**result)
else:
result['success'] = True
module.exit_json(**result)
if __name__ == "__main__":
main()

View File

@ -1,3 +1,4 @@
<<<<<<< HEAD (153371 Merge "Mark B&R job as voting" into stable/wallaby)
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright 2020 Red Hat, Inc.
@ -148,3 +149,5 @@ def main():
if __name__ == '__main__':
main()
=======
>>>>>>> CHANGE (9fe9f2 Derive parameters clean up)

View File

@ -1,89 +0,0 @@
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils import tripleo_common_utils as tc
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_full_argument_spec
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_module_kwargs
from ansible_collections.openstack.cloud.plugins.module_utils.openstack import openstack_cloud_from_module
DOCUMENTATION = """
---
module: tripleo_get_introspected_data
short_description: Retrieve introspection data
extends_documentation_fragment: openstack
author:
- "Kevin Carter (@cloudnull)"
version_added: "2.10"
description:
- Pull introspection data from a baremetal node.
options:
node_id:
description:
- ID of the baremetal node
type: str
required: true
requirements: ["openstacksdk", "tripleo-common"]
"""
EXAMPLES = """
- name: Get introspected data
tripleo_get_introspected_data:
node_id: xxx
register: introspected_data
"""
import os
import yaml
from tripleo_common import exception
def main():
result = dict(
success=False,
changed=False,
error=None,
)
module = AnsibleModule(
openstack_full_argument_spec(
**yaml.safe_load(DOCUMENTATION)['options']
),
**openstack_module_kwargs()
)
_, conn = openstack_cloud_from_module(module)
tripleo = tc.TripleOCommon(session=conn.session)
try:
result['data'] = tripleo.return_introspected_node_data(
node_id=module.params["node_id"]
)
except Exception as exp:
result['error'] = str(exp)
result['msg'] = 'Error pulling introspection data for {}: {}'.format(
module.params["node_id"],
exp
)
module.fail_json(**result)
else:
result['success'] = True
module.exit_json(**result)
if __name__ == "__main__":
main()

View File

@ -1,61 +0,0 @@
---
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
- name: Derive parameters
connection: "{{ (tripleo_target_host is defined) | ternary('ssh', 'local') }}"
hosts: "{{ tripleo_target_host | default('localhost') }}"
remote_user: "{{ tripleo_target_user | default(lookup('env', 'USER')) }}"
gather_facts: "{{ (tripleo_target_host is defined) | ternary(true, false) }}"
any_errors_fatal: true
vars:
plan: overcloud
pre_tasks:
- name: Set local connection user facts
set_fact:
ansible_home: "{{ lookup('env', 'HOME') }}"
ansible_user: "{{ lookup('env', 'USER') }}"
run_once: true
when:
- (tripleo_target_host is defined) | ternary('ssh', 'local') == 'local'
- name: Fail if stack_data is not defined
when:
- tripleo_get_flatten_params.stack_data is not defined
- stack_data is not defined
fail:
msg: "Missing stack_data"
- name: Fail if role_list is not defined
when:
- tripleo_role_list.roles is not defined
- role_list is not defined
fail:
msg: "Missing valid roles"
- name: Derive params for each role
include_role:
name: tripleo_derived_parameters
vars:
tripleo_plan_name: "{{ plan }}"
tripleo_role_name: "{{ outer_item }}"
tripleo_environment_parameters: "{{ tripleo_get_flatten_params.stack_data.environment_parameters }}"
tripleo_heat_resource_tree: "{{ tripleo_get_flatten_params.stack_data.heat_resource_tree }}"
new_heat_environment_path: "{{ derived_environment_path }}"
append_new_heat_environment_path: True
loop: "{{ tripleo_role_list.roles }}"
loop_control:
loop_var: outer_item

View File

@ -1,27 +0,0 @@
---
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# All variables intended for modification should be placed in this file.
# All variables within this role should have a prefix of "tripleo_tripleo_derived_parameters"
tripleo_tripleo_derived_parameters_debug: "{{ ((ansible_verbosity | int) >= 2) | bool }}"
tripleo_plan_name: Overcloud
tripleo_role_name: undefined
tripleo_environment_parameters: {}
tripleo_heat_resource_tree: {}
new_heat_environment_path: ""
append_new_heat_environment_path: false

View File

@ -1,43 +0,0 @@
---
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
galaxy_info:
namespace: openstack
author: OpenStack
description: TripleO OpenStack Role -- tripleo_derived_parameters
company: Red Hat
license: Apache-2.0
min_ansible_version: 2.7
#
# Provide a list of supported platforms, and for each platform a list of versions.
# If you don't wish to enumerate all versions for a particular platform, use 'all'.
# To view available platforms and versions (or releases), visit:
# https://galaxy.ansible.com/api/v1/platforms/
#
platforms:
- name: CentOS
versions:
- 7
- 8
galaxy_tags:
- tripleo
# List your role dependencies here, one per line. Be sure to remove the '[]' above,
# if you add dependencies to this list.
dependencies: []

View File

@ -1,43 +0,0 @@
---
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
- name: Converge
hosts: all
vars:
tripleo_get_flatten_params: "{{ lookup('file', '../mock_params') | from_yaml }}"
tripleo_role_list: "{{ lookup('file', '../mock_roles') | from_yaml }}"
tripleo_all_nodes: "{{ lookup('file', '../mock_ironic_all') | from_yaml }}"
hci_profile_config: "{{ lookup('file', '../mock_hci_profile_config') | from_yaml }}"
hci_profile: default
num_phy_cores_per_numa_node_for_pmd: 1
hw_data_required: true
tasks:
- name: Derive params for each role
include_role:
name: tripleo_derived_parameters
vars:
tripleo_plan_name: "" # empty string so molecule doesn't try to update plan
tripleo_all_nodes: [] # empty list so molecule doesn't try to talk to ironic
tripleo_role_name: "{{ outer_item }}"
tripleo_environment_parameters: "{{ tripleo_get_flatten_params.stack_data.environment_parameters }}"
tripleo_heat_resource_tree: "{{ tripleo_get_flatten_params.stack_data.heat_resource_tree }}"
baremetal_data: "{{ lookup('file', '../mock_baremetal_{{ outer_item }}') | from_yaml }}"
new_heat_environment_path: ""
append_new_heat_environment_path: false
loop: "{{ tripleo_role_list.roles }}"
loop_control:
loop_var: outer_item

View File

@ -1,33 +0,0 @@
---
driver:
name: podman
provisioner:
name: ansible
inventory:
hosts:
all:
hosts:
centos:
ansible_python_interpreter: /usr/bin/python3
log: true
env:
ANSIBLE_STDOUT_CALLBACK: yaml
ANSIBLE_ROLES_PATH: "${ANSIBLE_ROLES_PATH:-/usr/share/ansible/roles}:${HOME}/zuul-jobs/roles"
ANSIBLE_MODULE_UTILS: "${ANSIBLE_MODULE_UTILS:-/tripleo_ansible/ansible_plugins/module_utils}"
ANSIBLE_LIBRARY: "${ANSIBLE_LIBRARY:-/usr/share/ansible/plugins/modules}"
ANSIBLE_FILTER_PLUGINS: "${ANSIBLE_FILTER_PLUGINS:-/usr/share/ansible/plugins/filter}"
ANSIBLE_ACTION_PLUGINS: "${ANSIBLE_ACTION_PLUGINS:-/usr/share/ansible/plugins/action}"
scenario:
test_sequence:
- destroy
- create
- prepare
- converge
- check
- verify
- destroy
verifier:
name: testinfra

View File

@ -1,3 +1,4 @@
<<<<<<< HEAD (153371 Merge "Mark B&R job as voting" into stable/wallaby)
---
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
@ -19,3 +20,5 @@
hosts: all
roles:
- role: test_deps
=======
>>>>>>> CHANGE (9fe9f2 Derive parameters clean up)

View File

@ -1,574 +0,0 @@
---
success: true
changed: false
error: null
data:
inventory:
interfaces:
-
name: "ens3"
mac_address: "24:42:00:68:d0:30"
ipv4_address: "192.168.24.100"
ipv6_address: "fe80::c67a:d622:19ef:abbb%ens3"
has_carrier: true
lldp: []
vendor: "0x1af4"
product: "0x0001"
client_id: null
biosdevname: null
-
name: "ens4"
mac_address: "24:42:00:61:03:eb"
ipv4_address: "192.168.24.101"
ipv6_address: "fe80::2065:a697:4f38:715%ens4"
has_carrier: true
lldp: []
vendor: "0x1af4"
product: "0x0001"
client_id: null
biosdevname: null
cpu:
model_name: "AMD Ryzen 7 1800X Eight-Core Processor"
frequency: "3599.998"
count: 2
architecture: "x86_64"
flags:
- "fpu"
- "vme"
- "de"
- "pse"
- "tsc"
- "msr"
- "pae"
- "mce"
- "cx8"
- "apic"
- "sep"
- "mtrr"
- "pge"
- "mca"
- "cmov"
- "pat"
- "pse36"
- "clflush"
- "mmx"
- "fxsr"
- "sse"
- "sse2"
- "syscall"
- "nx"
- "mmxext"
- "fxsr_opt"
- "pdpe1gb"
- "rdtscp"
- "lm"
- "rep_good"
- "nopl"
- "cpuid"
- "extd_apicid"
- "tsc_known_freq"
- "pni"
- "pclmulqdq"
- "ssse3"
- "fma"
- "cx16"
- "sse4_1"
- "sse4_2"
- "x2apic"
- "movbe"
- "popcnt"
- "tsc_deadline_timer"
- "aes"
- "xsave"
- "avx"
- "f16c"
- "rdrand"
- "hypervisor"
- "lahf_lm"
- "cmp_legacy"
- "svm"
- "cr8_legacy"
- "abm"
- "sse4a"
- "misalignsse"
- "3dnowprefetch"
- "osvw"
- "perfctr_core"
- "cpb"
- "ssbd"
- "vmmcall"
- "fsgsbase"
- "tsc_adjust"
- "bmi1"
- "avx2"
- "smep"
- "bmi2"
- "rdseed"
- "adx"
- "smap"
- "clflushopt"
- "sha_ni"
- "xsaveopt"
- "xsavec"
- "xgetbv1"
- "virt_ssbd"
- "arat"
- "npt"
- "nrip_save"
- "arch_capabilities"
disks:
-
name: "/dev/sda"
model: "QEMU HARDDISK"
size: 53687091200
rotational: true
wwn: null
serial: "drive-scsi0-0-0-5"
vendor: "QEMU"
wwn_with_extension: null
wwn_vendor_extension: null
hctl: "1:0:0:5"
by_path: "/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:0:5"
-
name: "/dev/sdb"
model: "QEMU HARDDISK"
size: 53687091200
rotational: true
wwn: null
serial: "drive-scsi0-0-0-4"
vendor: "QEMU"
wwn_with_extension: null
wwn_vendor_extension: null
hctl: "1:0:0:4"
by_path: "/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:0:4"
-
name: "/dev/sdc"
model: "QEMU HARDDISK"
size: 53687091200
rotational: true
wwn: null
serial: "drive-scsi0-0-0-3"
vendor: "QEMU"
wwn_with_extension: null
wwn_vendor_extension: null
hctl: "1:0:0:3"
by_path: "/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:0:3"
-
name: "/dev/sdd"
model: "QEMU HARDDISK"
size: 53687091200
rotational: true
wwn: null
serial: "drive-scsi0-0-0-2"
vendor: "QEMU"
wwn_with_extension: null
wwn_vendor_extension: null
hctl: "1:0:0:2"
by_path: "/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:0:2"
-
name: "/dev/sde"
model: "QEMU HARDDISK"
size: 107374182400
rotational: true
wwn: null
serial: "QM00001"
vendor: "ATA"
wwn_with_extension: null
wwn_vendor_extension: null
hctl: "0:0:0:0"
by_path: "/dev/disk/by-path/pci-0000:00:01.1-ata-1"
memory:
total: 274877766206
physical_mb: 256000
bmc_address: "0.0.0.0"
bmc_v6address: "::/0"
system_vendor:
product_name: "KVM"
serial_number: ""
manufacturer: "Red Hat"
boot:
current_boot_mode: "bios"
pxe_interface: "24:42:00:68:d0:30"
hostname: "localhost.localdomain"
root_disk:
name: "/dev/sde"
model: "QEMU HARDDISK"
size: 107374182400
rotational: true
wwn: null
serial: "QM00001"
vendor: "ATA"
wwn_with_extension: null
wwn_vendor_extension: null
hctl: "0:0:0:0"
by_path: "/dev/disk/by-path/pci-0000:00:01.1-ata-1"
boot_interface: "24:42:00:68:d0:30"
configuration:
collectors:
- "default"
- "extra-hardware"
- "numa-topology"
- "logs"
managers:
-
name: "generic_hardware_manager"
version: "1.1"
numa_topology:
ram:
-
numa_node: 0
size_kb: 5812392
cpus:
-
thread_siblings:
- 1
- 0
cpu: 0
numa_node: 0
nics: []
error: null
ipmi_address: null
ipmi_v6address: null
all_interfaces:
ens3:
ip: "192.168.24.100"
mac: "24:42:00:68:d0:30"
client_id: null
pxe: true
ens4:
ip: "192.168.24.101"
mac: "24:42:00:61:03:eb"
client_id: null
pxe: false
interfaces:
ens3:
ip: "192.168.24.100"
mac: "24:42:00:68:d0:30"
client_id: null
pxe: true
macs:
- "24:42:00:68:d0:30"
local_gb: 99
cpus: 2
cpu_arch: "x86_64"
memory_mb: 256000
extra:
disk:
logical:
count: 5
sdd:
size: 53
vendor: "QEMU"
model: "QEMU HARDDISK"
rev: "2.5+"
optimal_io_size: 0
physical_block_size: 512
rotational: 1
nr_requests: 256
scheduler: "mq-deadline"
Write Cache Enable: 1
Read Cache Disable: 0
scsi-id: "scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-2"
SMART/vendor: "QEMU"
SMART/product: "QEMU HARDDISK"
sdb:
size: 53
vendor: "QEMU"
model: "QEMU HARDDISK"
rev: "2.5+"
optimal_io_size: 0
physical_block_size: 512
rotational: 1
nr_requests: 256
scheduler: "mq-deadline"
Write Cache Enable: 1
Read Cache Disable: 0
scsi-id: "scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-4"
SMART/vendor: "QEMU"
SMART/product: "QEMU HARDDISK"
sde:
size: 107
vendor: "ATA"
model: "QEMU HARDDISK"
rev: "2.5+"
optimal_io_size: 0
physical_block_size: 512
rotational: 1
nr_requests: 2
scheduler: "mq-deadline"
Write Cache Enable: 1
Read Cache Disable: 0
id: "ata-QEMU_HARDDISK_QM00001"
SMART/device_model: "QEMU HARDDISK"
SMART/serial_number: "QM00001"
SMART/firmware_version: "2.5+"
SMART/Raw_Read_Error_Rate(1)/value: 100
SMART/Raw_Read_Error_Rate(1)/worst: 100
SMART/Raw_Read_Error_Rate(1)/thresh: 6
SMART/Raw_Read_Error_Rate(1)/when_failed: "NEVER"
SMART/Raw_Read_Error_Rate(1)/raw: 0
SMART/Spin_Up_Time(3)/value: 100
SMART/Spin_Up_Time(3)/worst: 100
SMART/Spin_Up_Time(3)/thresh: 0
SMART/Spin_Up_Time(3)/when_failed: "NEVER"
SMART/Spin_Up_Time(3)/raw: 16
SMART/Start_Stop_Count(4)/value: 100
SMART/Start_Stop_Count(4)/worst: 100
SMART/Start_Stop_Count(4)/thresh: 20
SMART/Start_Stop_Count(4)/when_failed: "NEVER"
SMART/Start_Stop_Count(4)/raw: 100
SMART/Reallocated_Sector_Ct(5)/value: 100
SMART/Reallocated_Sector_Ct(5)/worst: 100
SMART/Reallocated_Sector_Ct(5)/thresh: 36
SMART/Reallocated_Sector_Ct(5)/when_failed: "NEVER"
SMART/Reallocated_Sector_Ct(5)/raw: 0
SMART/Power_On_Hours(9)/value: 100
SMART/Power_On_Hours(9)/worst: 100
SMART/Power_On_Hours(9)/thresh: 0
SMART/Power_On_Hours(9)/when_failed: "NEVER"
SMART/Power_On_Hours(9)/raw: 1
SMART/Power_Cycle_Count(12)/value: 100
SMART/Power_Cycle_Count(12)/worst: 100
SMART/Power_Cycle_Count(12)/thresh: 0
SMART/Power_Cycle_Count(12)/when_failed: "NEVER"
SMART/Power_Cycle_Count(12)/raw: 0
SMART/Airflow_Temperature_Cel(190)/value: 69
SMART/Airflow_Temperature_Cel(190)/worst: 69
SMART/Airflow_Temperature_Cel(190)/thresh: 50
SMART/Airflow_Temperature_Cel(190)/when_failed: "NEVER"
SMART/Airflow_Temperature_Cel(190)/raw: " 31 (Min/Max 31/31)"
sdc:
size: 53
vendor: "QEMU"
model: "QEMU HARDDISK"
rev: "2.5+"
optimal_io_size: 0
physical_block_size: 512
rotational: 1
nr_requests: 256
scheduler: "mq-deadline"
Write Cache Enable: 1
Read Cache Disable: 0
scsi-id: "scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-3"
SMART/vendor: "QEMU"
SMART/product: "QEMU HARDDISK"
sda:
size: 53
vendor: "QEMU"
model: "QEMU HARDDISK"
rev: "2.5+"
optimal_io_size: 0
physical_block_size: 512
rotational: 1
nr_requests: 256
scheduler: "mq-deadline"
Write Cache Enable: 1
Read Cache Disable: 0
scsi-id: "scsi-0QEMU_QEMU_HARDDISK_drive-scsi0-0-0-5"
SMART/vendor: "QEMU"
SMART/product: "QEMU HARDDISK"
system:
product:
name: "KVM"
vendor: "Red Hat"
version: "RHEL 7.6.0 PC (i440FX + PIIX, 1996)"
uuid: "d1eaed04-8530-4592-89af-b82487eda835"
kernel:
version: "4.18.0-147.8.1.el8_1.x86_64"
arch: "x86_64"
cmdline: "ipa-inspection-callback-url=http://192.168.24.1:5050/v1/continue ipa-inspection-collectors=default,extra-hardware,numa-topology,logs systemd.journald.forward_to_console=yes BOOTIF=24:42:00:68:d0:30 ipa-debug=1 ipa-inspection-dhcp-all-interfaces=1 ipa-collect-lldp=1 initrd=agent.ramdisk"
rtc:
utc: "no"
firmware:
bios:
version: "1.11.0-2.el7"
date: "04/01/2014"
vendor: "SeaBIOS"
memory:
total:
size: 6144655360
network:
ens3:
businfo: "virtio@0"
ipv4: "192.168.24.100"
ipv4-netmask: "255.255.255.0"
ipv4-cidr: 24
ipv4-network: "192.168.24.0"
link: "yes"
driver: "virtio_net"
autonegotiation: "off"
serial: "24:42:00:68:d0:30"
rx-checksumming: "on [fixed]"
tx-checksumming: "on"
tx-checksumming/tx-checksum-ipv4: "off [fixed]"
tx-checksumming/tx-checksum-ip-generic: "on"
tx-checksumming/tx-checksum-ipv6: "off [fixed]"
tx-checksumming/tx-checksum-fcoe-crc: "off [fixed]"
tx-checksumming/tx-checksum-sctp: "off [fixed]"
scatter-gather: "on"
scatter-gather/tx-scatter-gather: "on"
scatter-gather/tx-scatter-gather-fraglist: "off [fixed]"
tcp-segmentation-offload: "on"
tcp-segmentation-offload/tx-tcp-segmentation: "on"
tcp-segmentation-offload/tx-tcp-ecn-segmentation: "on"
tcp-segmentation-offload/tx-tcp-mangleid-segmentation: "off"
tcp-segmentation-offload/tx-tcp6-segmentation: "on"
generic-segmentation-offload: "on"
generic-receive-offload: "on"
large-receive-offload: "off [fixed]"
rx-vlan-offload: "off [fixed]"
tx-vlan-offload: "off [fixed]"
ntuple-filters: "off [fixed]"
receive-hashing: "off [fixed]"
highdma: "on [fixed]"
rx-vlan-filter: "on [fixed]"
vlan-challenged: "off [fixed]"
tx-lockless: "off [fixed]"
netns-local: "off [fixed]"
tx-gso-robust: "on [fixed]"
tx-fcoe-segmentation: "off [fixed]"
tx-gre-segmentation: "off [fixed]"
tx-gre-csum-segmentation: "off [fixed]"
tx-ipxip4-segmentation: "off [fixed]"
tx-ipxip6-segmentation: "off [fixed]"
tx-udp_tnl-segmentation: "off [fixed]"
tx-udp_tnl-csum-segmentation: "off [fixed]"
tx-gso-partial: "off [fixed]"
tx-sctp-segmentation: "off [fixed]"
tx-esp-segmentation: "off [fixed]"
tx-udp-segmentation: "off [fixed]"
tls-hw-rx-offload: "off [fixed]"
fcoe-mtu: "off [fixed]"
tx-nocache-copy: "off"
loopback: "off [fixed]"
rx-fcs: "off [fixed]"
rx-all: "off [fixed]"
tx-vlan-stag-hw-insert: "off [fixed]"
rx-vlan-stag-hw-parse: "off [fixed]"
rx-vlan-stag-filter: "off [fixed]"
l2-fwd-offload: "off [fixed]"
hw-tc-offload: "off [fixed]"
esp-hw-offload: "off [fixed]"
esp-tx-csum-hw-offload: "off [fixed]"
rx-udp_tunnel-port-offload: "off [fixed]"
tls-hw-tx-offload: "off [fixed]"
rx-gro-hw: "off [fixed]"
tls-hw-record: "off [fixed]"
ens4:
businfo: "virtio@1"
ipv4: "192.168.24.101"
ipv4-netmask: "255.255.255.0"
ipv4-cidr: 24
ipv4-network: "192.168.24.0"
link: "yes"
driver: "virtio_net"
autonegotiation: "off"
serial: "24:42:00:61:03:eb"
rx-checksumming: "on [fixed]"
tx-checksumming: "on"
tx-checksumming/tx-checksum-ipv4: "off [fixed]"
tx-checksumming/tx-checksum-ip-generic: "on"
tx-checksumming/tx-checksum-ipv6: "off [fixed]"
tx-checksumming/tx-checksum-fcoe-crc: "off [fixed]"
tx-checksumming/tx-checksum-sctp: "off [fixed]"
scatter-gather: "on"
scatter-gather/tx-scatter-gather: "on"
scatter-gather/tx-scatter-gather-fraglist: "off [fixed]"
tcp-segmentation-offload: "on"
tcp-segmentation-offload/tx-tcp-segmentation: "on"
tcp-segmentation-offload/tx-tcp-ecn-segmentation: "on"
tcp-segmentation-offload/tx-tcp-mangleid-segmentation: "off"
tcp-segmentation-offload/tx-tcp6-segmentation: "on"
generic-segmentation-offload: "on"
generic-receive-offload: "on"
large-receive-offload: "off [fixed]"
rx-vlan-offload: "off [fixed]"
tx-vlan-offload: "off [fixed]"
ntuple-filters: "off [fixed]"
receive-hashing: "off [fixed]"
highdma: "on [fixed]"
rx-vlan-filter: "on [fixed]"
vlan-challenged: "off [fixed]"
tx-lockless: "off [fixed]"
netns-local: "off [fixed]"
tx-gso-robust: "on [fixed]"
tx-fcoe-segmentation: "off [fixed]"
tx-gre-segmentation: "off [fixed]"
tx-gre-csum-segmentation: "off [fixed]"
tx-ipxip4-segmentation: "off [fixed]"
tx-ipxip6-segmentation: "off [fixed]"
tx-udp_tnl-segmentation: "off [fixed]"
tx-udp_tnl-csum-segmentation: "off [fixed]"
tx-gso-partial: "off [fixed]"
tx-sctp-segmentation: "off [fixed]"
tx-esp-segmentation: "off [fixed]"
tx-udp-segmentation: "off [fixed]"
tls-hw-rx-offload: "off [fixed]"
fcoe-mtu: "off [fixed]"
tx-nocache-copy: "off"
loopback: "off [fixed]"
rx-fcs: "off [fixed]"
rx-all: "off [fixed]"
tx-vlan-stag-hw-insert: "off [fixed]"
rx-vlan-stag-hw-parse: "off [fixed]"
rx-vlan-stag-filter: "off [fixed]"
l2-fwd-offload: "off [fixed]"
hw-tc-offload: "off [fixed]"
esp-hw-offload: "off [fixed]"
esp-tx-csum-hw-offload: "off [fixed]"
rx-udp_tunnel-port-offload: "off [fixed]"
tls-hw-tx-offload: "off [fixed]"
rx-gro-hw: "off [fixed]"
tls-hw-record: "off [fixed]"
cpu:
physical:
number: 2
smt: "notsupported"
physical_0:
vendor: "AuthenticAMD"
product: "AMD Ryzen 7 1800X Eight-Core Processor"
cores: 1
threads: 1
family: 23
model: 1
stepping: 1
architecture: "x86_64"
l1d cache: "64K"
l1i cache: "64K"
l2 cache: "512K"
l3 cache: "16384K"
current_Mhz: 3599
flags: "fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm rep_good nopl cpuid extd_apicid tsc_known_freq pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy svm cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw perfctr_core cpb ssbd vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt sha_ni xsaveopt xsavec xgetbv1 virt_ssbd arat npt nrip_save arch_capabilities"
threads_per_core: 1
physical_1:
vendor: "AuthenticAMD"
product: "AMD Ryzen 7 1800X Eight-Core Processor"
cores: 1
threads: 1
family: 23
model: 1
stepping: 1
architecture: "x86_64"
l1d cache: "64K"
l1i cache: "64K"
l2 cache: "512K"
l3 cache: "16384K"
current_Mhz: 3599
flags: "fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm rep_good nopl cpuid extd_apicid tsc_known_freq pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy svm cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw perfctr_core cpb ssbd vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt sha_ni xsaveopt xsavec xgetbv1 virt_ssbd arat npt nrip_save arch_capabilities"
threads_per_core: 1
logical:
number: 2
numa:
nodes:
count: 1
node_0:
cpu_count: 2
cpu_mask: 3
hw:
auxv:
hwcap: "78bfbff"
pagesz: 4096
flags: "0x0"
hwcap2: "0x0"
platform: "x86_64"
failed: false

View File

@ -1,466 +0,0 @@
---
success: true
changed: false
error: null
data:
inventory:
interfaces:
-
name: "ens3"
mac_address: "24:42:00:9a:63:09"
ipv4_address: "192.168.24.100"
ipv6_address: "fe80::b60d:bb3a:8f88:1ef5%ens3"
has_carrier: true
lldp: []
vendor: "0x1af4"
product: "0x0001"
client_id: null
biosdevname: null
-
name: "ens4"
mac_address: "24:42:00:5c:86:35"
ipv4_address: "192.168.24.101"
ipv6_address: "fe80::bf16:96cc:eac2:a36a%ens4"
has_carrier: true
lldp: []
vendor: "0x1af4"
product: "0x0001"
client_id: null
biosdevname: null
cpu:
model_name: "AMD Ryzen 7 1800X Eight-Core Processor"
frequency: "3599.998"
count: 2
architecture: "x86_64"
flags:
- "fpu"
- "vme"
- "de"
- "pse"
- "tsc"
- "msr"
- "pae"
- "mce"
- "cx8"
- "apic"
- "sep"
- "mtrr"
- "pge"
- "mca"
- "cmov"
- "pat"
- "pse36"
- "clflush"
- "mmx"
- "fxsr"
- "sse"
- "sse2"
- "syscall"
- "nx"
- "mmxext"
- "fxsr_opt"
- "pdpe1gb"
- "rdtscp"
- "lm"
- "rep_good"
- "nopl"
- "cpuid"
- "extd_apicid"
- "tsc_known_freq"
- "pni"
- "pclmulqdq"
- "ssse3"
- "fma"
- "cx16"
- "sse4_1"
- "sse4_2"
- "x2apic"
- "movbe"
- "popcnt"
- "tsc_deadline_timer"
- "aes"
- "xsave"
- "avx"
- "f16c"
- "rdrand"
- "hypervisor"
- "lahf_lm"
- "cmp_legacy"
- "svm"
- "cr8_legacy"
- "abm"
- "sse4a"
- "misalignsse"
- "3dnowprefetch"
- "osvw"
- "perfctr_core"
- "cpb"
- "ssbd"
- "vmmcall"
- "fsgsbase"
- "tsc_adjust"
- "bmi1"
- "avx2"
- "smep"
- "bmi2"
- "rdseed"
- "adx"
- "smap"
- "clflushopt"
- "sha_ni"
- "xsaveopt"
- "xsavec"
- "xgetbv1"
- "virt_ssbd"
- "arat"
- "npt"
- "nrip_save"
- "arch_capabilities"
disks:
-
name: "/dev/sda"
model: "QEMU HARDDISK"
size: 107374182400
rotational: true
wwn: null
serial: "QM00001"
vendor: "ATA"
wwn_with_extension: null
wwn_vendor_extension: null
hctl: "0:0:0:0"
by_path: "/dev/disk/by-path/pci-0000:00:01.1-ata-1"
memory:
total: 3935551488
physical_mb: 3907
bmc_address: "0.0.0.0"
bmc_v6address: "::/0"
system_vendor:
product_name: "KVM"
serial_number: ""
manufacturer: "Red Hat"
boot:
current_boot_mode: "bios"
pxe_interface: "24:42:00:9a:63:09"
hostname: "localhost.localdomain"
root_disk:
name: "/dev/sda"
model: "QEMU HARDDISK"
size: 107374182400
rotational: true
wwn: null
serial: "QM00001"
vendor: "ATA"
wwn_with_extension: null
wwn_vendor_extension: null
hctl: "0:0:0:0"
by_path: "/dev/disk/by-path/pci-0000:00:01.1-ata-1"
boot_interface: "24:42:00:9a:63:09"
configuration:
collectors:
- "default"
- "extra-hardware"
- "numa-topology"
- "logs"
managers:
-
name: "generic_hardware_manager"
version: "1.1"
numa_topology:
ram:
-
numa_node: 0
size_kb: 3843312
cpus:
-
thread_siblings:
- 1
- 0
cpu: 0
numa_node: 0
nics: []
error: null
ipmi_address: null
ipmi_v6address: null
all_interfaces:
ens3:
ip: "192.168.24.100"
mac: "24:42:00:9a:63:09"
client_id: null
pxe: true
ens4:
ip: "192.168.24.101"
mac: "24:42:00:5c:86:35"
client_id: null
pxe: false
interfaces:
ens3:
ip: "192.168.24.100"
mac: "24:42:00:9a:63:09"
client_id: null
pxe: true
macs:
- "24:42:00:9a:63:09"
local_gb: 99
cpus: 2
cpu_arch: "x86_64"
memory_mb: 3907
extra:
disk:
logical:
count: 1
sda:
size: 107
vendor: "ATA"
model: "QEMU HARDDISK"
rev: "2.5+"
optimal_io_size: 0
physical_block_size: 512
rotational: 1
nr_requests: 2
scheduler: "mq-deadline"
Write Cache Enable: 1
Read Cache Disable: 0
id: "ata-QEMU_HARDDISK_QM00001"
SMART/device_model: "QEMU HARDDISK"
SMART/serial_number: "QM00001"
SMART/firmware_version: "2.5+"
SMART/Raw_Read_Error_Rate(1)/value: 100
SMART/Raw_Read_Error_Rate(1)/worst: 100
SMART/Raw_Read_Error_Rate(1)/thresh: 6
SMART/Raw_Read_Error_Rate(1)/when_failed: "NEVER"
SMART/Raw_Read_Error_Rate(1)/raw: 0
SMART/Spin_Up_Time(3)/value: 100
SMART/Spin_Up_Time(3)/worst: 100
SMART/Spin_Up_Time(3)/thresh: 0
SMART/Spin_Up_Time(3)/when_failed: "NEVER"
SMART/Spin_Up_Time(3)/raw: 16
SMART/Start_Stop_Count(4)/value: 100
SMART/Start_Stop_Count(4)/worst: 100
SMART/Start_Stop_Count(4)/thresh: 20
SMART/Start_Stop_Count(4)/when_failed: "NEVER"
SMART/Start_Stop_Count(4)/raw: 100
SMART/Reallocated_Sector_Ct(5)/value: 100
SMART/Reallocated_Sector_Ct(5)/worst: 100
SMART/Reallocated_Sector_Ct(5)/thresh: 36
SMART/Reallocated_Sector_Ct(5)/when_failed: "NEVER"
SMART/Reallocated_Sector_Ct(5)/raw: 0
SMART/Power_On_Hours(9)/value: 100
SMART/Power_On_Hours(9)/worst: 100
SMART/Power_On_Hours(9)/thresh: 0
SMART/Power_On_Hours(9)/when_failed: "NEVER"
SMART/Power_On_Hours(9)/raw: 1
SMART/Power_Cycle_Count(12)/value: 100
SMART/Power_Cycle_Count(12)/worst: 100
SMART/Power_Cycle_Count(12)/thresh: 0
SMART/Power_Cycle_Count(12)/when_failed: "NEVER"
SMART/Power_Cycle_Count(12)/raw: 0
SMART/Airflow_Temperature_Cel(190)/value: 69
SMART/Airflow_Temperature_Cel(190)/worst: 69
SMART/Airflow_Temperature_Cel(190)/thresh: 50
SMART/Airflow_Temperature_Cel(190)/when_failed: "NEVER"
SMART/Airflow_Temperature_Cel(190)/raw: " 31 (Min/Max 31/31)"
system:
product:
name: "KVM"
vendor: "Red Hat"
version: "RHEL 7.6.0 PC (i440FX + PIIX, 1996)"
uuid: "9da21fb0-062b-4264-96a3-4b5896181898"
kernel:
version: "4.18.0-147.8.1.el8_1.x86_64"
arch: "x86_64"
cmdline: "ipa-inspection-callback-url=http://192.168.24.1:5050/v1/continue ipa-inspection-collectors=default,extra-hardware,numa-topology,logs systemd.journald.forward_to_console=yes BOOTIF=24:42:00:9a:63:09 ipa-debug=1 ipa-inspection-dhcp-all-interfaces=1 ipa-collect-lldp=1 initrd=agent.ramdisk"
rtc:
utc: "no"
firmware:
bios:
version: "1.11.0-2.el7"
date: "04/01/2014"
vendor: "SeaBIOS"
memory:
total:
size: 4096786432
network:
ens3:
businfo: "virtio@0"
ipv4: "192.168.24.100"
ipv4-netmask: "255.255.255.0"
ipv4-cidr: 24
ipv4-network: "192.168.24.0"
link: "yes"
driver: "virtio_net"
autonegotiation: "off"
serial: "24:42:00:9a:63:09"
rx-checksumming: "on [fixed]"
tx-checksumming: "on"
tx-checksumming/tx-checksum-ipv4: "off [fixed]"
tx-checksumming/tx-checksum-ip-generic: "on"
tx-checksumming/tx-checksum-ipv6: "off [fixed]"
tx-checksumming/tx-checksum-fcoe-crc: "off [fixed]"
tx-checksumming/tx-checksum-sctp: "off [fixed]"
scatter-gather: "on"
scatter-gather/tx-scatter-gather: "on"
scatter-gather/tx-scatter-gather-fraglist: "off [fixed]"
tcp-segmentation-offload: "on"
tcp-segmentation-offload/tx-tcp-segmentation: "on"
tcp-segmentation-offload/tx-tcp-ecn-segmentation: "on"
tcp-segmentation-offload/tx-tcp-mangleid-segmentation: "off"
tcp-segmentation-offload/tx-tcp6-segmentation: "on"
generic-segmentation-offload: "on"
generic-receive-offload: "on"
large-receive-offload: "off [fixed]"
rx-vlan-offload: "off [fixed]"
tx-vlan-offload: "off [fixed]"
ntuple-filters: "off [fixed]"
receive-hashing: "off [fixed]"
highdma: "on [fixed]"
rx-vlan-filter: "on [fixed]"
vlan-challenged: "off [fixed]"
tx-lockless: "off [fixed]"
netns-local: "off [fixed]"
tx-gso-robust: "on [fixed]"
tx-fcoe-segmentation: "off [fixed]"
tx-gre-segmentation: "off [fixed]"
tx-gre-csum-segmentation: "off [fixed]"
tx-ipxip4-segmentation: "off [fixed]"
tx-ipxip6-segmentation: "off [fixed]"
tx-udp_tnl-segmentation: "off [fixed]"
tx-udp_tnl-csum-segmentation: "off [fixed]"
tx-gso-partial: "off [fixed]"
tx-sctp-segmentation: "off [fixed]"
tx-esp-segmentation: "off [fixed]"
tx-udp-segmentation: "off [fixed]"
tls-hw-rx-offload: "off [fixed]"
fcoe-mtu: "off [fixed]"
tx-nocache-copy: "off"
loopback: "off [fixed]"
rx-fcs: "off [fixed]"
rx-all: "off [fixed]"
tx-vlan-stag-hw-insert: "off [fixed]"
rx-vlan-stag-hw-parse: "off [fixed]"
rx-vlan-stag-filter: "off [fixed]"
l2-fwd-offload: "off [fixed]"
hw-tc-offload: "off [fixed]"
esp-hw-offload: "off [fixed]"
esp-tx-csum-hw-offload: "off [fixed]"
rx-udp_tunnel-port-offload: "off [fixed]"
tls-hw-tx-offload: "off [fixed]"
rx-gro-hw: "off [fixed]"
tls-hw-record: "off [fixed]"
ens4:
businfo: "virtio@1"
ipv4: "192.168.24.101"
ipv4-netmask: "255.255.255.0"
ipv4-cidr: 24
ipv4-network: "192.168.24.0"
link: "yes"
driver: "virtio_net"
autonegotiation: "off"
serial: "24:42:00:5c:86:35"
rx-checksumming: "on [fixed]"
tx-checksumming: "on"
tx-checksumming/tx-checksum-ipv4: "off [fixed]"
tx-checksumming/tx-checksum-ip-generic: "on"
tx-checksumming/tx-checksum-ipv6: "off [fixed]"
tx-checksumming/tx-checksum-fcoe-crc: "off [fixed]"
tx-checksumming/tx-checksum-sctp: "off [fixed]"
scatter-gather: "on"
scatter-gather/tx-scatter-gather: "on"
scatter-gather/tx-scatter-gather-fraglist: "off [fixed]"
tcp-segmentation-offload: "on"
tcp-segmentation-offload/tx-tcp-segmentation: "on"
tcp-segmentation-offload/tx-tcp-ecn-segmentation: "on"
tcp-segmentation-offload/tx-tcp-mangleid-segmentation: "off"
tcp-segmentation-offload/tx-tcp6-segmentation: "on"
generic-segmentation-offload: "on"
generic-receive-offload: "on"
large-receive-offload: "off [fixed]"
rx-vlan-offload: "off [fixed]"
tx-vlan-offload: "off [fixed]"
ntuple-filters: "off [fixed]"
receive-hashing: "off [fixed]"
highdma: "on [fixed]"
rx-vlan-filter: "on [fixed]"
vlan-challenged: "off [fixed]"
tx-lockless: "off [fixed]"
netns-local: "off [fixed]"
tx-gso-robust: "on [fixed]"
tx-fcoe-segmentation: "off [fixed]"
tx-gre-segmentation: "off [fixed]"
tx-gre-csum-segmentation: "off [fixed]"
tx-ipxip4-segmentation: "off [fixed]"
tx-ipxip6-segmentation: "off [fixed]"
tx-udp_tnl-segmentation: "off [fixed]"
tx-udp_tnl-csum-segmentation: "off [fixed]"
tx-gso-partial: "off [fixed]"
tx-sctp-segmentation: "off [fixed]"
tx-esp-segmentation: "off [fixed]"
tx-udp-segmentation: "off [fixed]"
tls-hw-rx-offload: "off [fixed]"
fcoe-mtu: "off [fixed]"
tx-nocache-copy: "off"
loopback: "off [fixed]"
rx-fcs: "off [fixed]"
rx-all: "off [fixed]"
tx-vlan-stag-hw-insert: "off [fixed]"
rx-vlan-stag-hw-parse: "off [fixed]"
rx-vlan-stag-filter: "off [fixed]"
l2-fwd-offload: "off [fixed]"
hw-tc-offload: "off [fixed]"
esp-hw-offload: "off [fixed]"
esp-tx-csum-hw-offload: "off [fixed]"
rx-udp_tunnel-port-offload: "off [fixed]"
tls-hw-tx-offload: "off [fixed]"
rx-gro-hw: "off [fixed]"
tls-hw-record: "off [fixed]"
cpu:
physical:
number: 2
smt: "notsupported"
physical_0:
vendor: "AuthenticAMD"
product: "AMD Ryzen 7 1800X Eight-Core Processor"
cores: 1
threads: 1
family: 23
model: 1
stepping: 1
architecture: "x86_64"
l1d cache: "64K"
l1i cache: "64K"
l2 cache: "512K"
l3 cache: "16384K"
current_Mhz: 3599
flags: "fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm rep_good nopl cpuid extd_apicid tsc_known_freq pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy svm cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw perfctr_core cpb ssbd vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt sha_ni xsaveopt xsavec xgetbv1 virt_ssbd arat npt nrip_save arch_capabilities"
threads_per_core: 1
physical_1:
vendor: "AuthenticAMD"
product: "AMD Ryzen 7 1800X Eight-Core Processor"
cores: 1
threads: 1
family: 23
model: 1
stepping: 1
architecture: "x86_64"
l1d cache: "64K"
l1i cache: "64K"
l2 cache: "512K"
l3 cache: "16384K"
current_Mhz: 3599
flags: "fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm rep_good nopl cpuid extd_apicid tsc_known_freq pni pclmulqdq ssse3 fma cx16 sse4_1 sse4_2 x2apic movbe popcnt tsc_deadline_timer aes xsave avx f16c rdrand hypervisor lahf_lm cmp_legacy svm cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw perfctr_core cpb ssbd vmmcall fsgsbase tsc_adjust bmi1 avx2 smep bmi2 rdseed adx smap clflushopt sha_ni xsaveopt xsavec xgetbv1 virt_ssbd arat npt nrip_save arch_capabilities"
threads_per_core: 1
logical:
number: 2
numa:
nodes:
count: 1
node_0:
cpu_count: 2
cpu_mask: 3
hw:
auxv:
hwcap: "78bfbff"
pagesz: 4096
flags: "0x0"
hwcap2: "0x0"
platform: "x86_64"
failed: false

View File

@ -1,12 +0,0 @@
default:
average_guest_memory_size_in_mb: 2048
average_guest_cpu_utilization_percentage: 50
many_small_vms:
average_guest_memory_size_in_mb: 1024
average_guest_cpu_utilization_percentage: 20
few_large_vms:
average_guest_memory_size_in_mb: 4096
average_guest_cpu_utilization_percentage: 80
nfv_default:
average_guest_memory_size_in_mb: 8192
average_guest_cpu_utilization_percentage: 90

View File

@ -1,3 +1,4 @@
<<<<<<< HEAD (153371 Merge "Mark B&R job as voting" into stable/wallaby)
changed: false
error: null
failed: false
@ -2509,3 +2510,5 @@ stack_data:
- EndpointMap
type: OS::TripleO::Services::ContainerImagePrepare
success: true
=======
>>>>>>> CHANGE (9fe9f2 Derive parameters clean up)

View File

@ -1,7 +0,0 @@
changed: false
error: null
failed: false
roles:
- Controller
- ComputeHCI
success: true

View File

@ -1,3 +1,4 @@
<<<<<<< HEAD (153371 Merge "Mark B&R job as voting" into stable/wallaby)
---
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
@ -109,3 +110,5 @@
default({})) |
combine({(tripleo_role_name + 'Parameters'): ((dpdk_parameters | default({})) | combine(host_parameters))})
}}"
=======
>>>>>>> CHANGE (9fe9f2 Derive parameters clean up)

View File

@ -1,3 +1,4 @@
<<<<<<< HEAD (153371 Merge "Mark B&R job as voting" into stable/wallaby)
---
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
@ -156,3 +157,5 @@
- name: Update dpdk parameters in derived parameters dictionary
set_fact:
derived_parameters: "{{ (derived_parameters | default({})) | combine({(tripleo_role_name + 'Parameters'): dpdk_parameters}) }}"
=======
>>>>>>> CHANGE (9fe9f2 Derive parameters clean up)

View File

@ -1,3 +1,4 @@
<<<<<<< HEAD (153371 Merge "Mark B&R job as voting" into stable/wallaby)
---
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
@ -22,3 +23,5 @@
- name: Convert host cpus in range list
set_fact:
host_cpus: "{{ host_cpus_list.host_cpus_list | range_list }}"
=======
>>>>>>> CHANGE (9fe9f2 Derive parameters clean up)

View File

@ -1,3 +1,4 @@
<<<<<<< HEAD (153371 Merge "Mark B&R job as voting" into stable/wallaby)
---
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
@ -216,3 +217,5 @@
when:
- derived_parameters_result is defined
- derived_parameters_result|length >0
=======
>>>>>>> CHANGE (9fe9f2 Derive parameters clean up)

View File

@ -1,231 +0,0 @@
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test the derive and get_vcpus_per_osd methods of the HCI module"""
import yaml
from tripleo_ansible.ansible_plugins.modules import tripleo_derive_hci_parameters as derive_params
from tripleo_ansible.tests import base as tests_base
class TestTripleoDeriveHciParameters(tests_base.TestCase):
"""Test the derive method of the HCI module"""
def test_derive_positive(self):
"""Test the derive method with valid input and confirm expected result
"""
der = derive_params.derive(mem_gb=256, vcpus=4, osds=1,
average_guest_memory_size_in_mb=2048,
average_guest_cpu_utilization_percentage=20)
self.assertFalse(der['failed'])
self.assertEqual(der['nova_reserved_mem_mb'], 56320)
self.assertEqual(der['cpu_allocation_ratio'], 3.75)
def test_derive_negative(self):
"""Test the derive method with invalid input
"""
der = derive_params.derive(mem_gb=2, vcpus=1, osds=1,
average_guest_memory_size_in_mb=0,
average_guest_cpu_utilization_percentage=0)
self.assertTrue(der['failed'])
def test_vcpu_ratio(self):
"""Test the get_vcpus_per_osd method and confirm expected result
"""
def mock_ironic():
"""Return a dictionary with partial disks section of introspection
"""
return {'data':
{'inventory':
{'disks':
[
{'by_path': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:0:5',
'name': '/dev/sda',
'rotational': True,
'wwn': None},
{'by_path': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:0:4',
'name': '/dev/sdb',
'rotational': True,
'wwn': None},
{'by_path': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:0:3',
'name': '/dev/sdc',
'rotational': True,
'wwn': None},
{'by_path': '/dev/disk/by-path/pci-0000:00:07.0-scsi-0:0:0:2',
'name': '/dev/sdd',
'rotational': True,
'wwn': None},
{'by_path': '/dev/disk/by-path/pci-0000:00:01.1-ata-1',
'name': '/dev/sde',
'rotational': True,
'wwn': None}
]
}
}
}
def get_ironic(flavor='hdd'):
"""Returns a dictionary which mocks ironic introspection
data. Uses mock introspection data as the source but then
applies flavor variations to make it look like the system
which was introspected has SSD or NVMe SSDs.
"""
ironic = mock_ironic()
if flavor in 'ssd':
for dev in ironic['data']['inventory']['disks']:
dev['rotational'] = False
if flavor in 'nvme':
i = 1
for dev in ironic['data']['inventory']['disks']:
nvm_name = "/dev/nvme0n%i" % i
dev['name'] = nvm_name
dev['rotational'] = False
i += 1
return ironic
def get_env_ceph_ansible(flavor='hdd', osds_per_device=1):
"""Returns a dictionary which mocks the content of the
tripleo_environment_parameters CephAnsibleDisksConfig
where the deployer requests four OSDs using device
list within ceph-ansible of differing flavor types.
The flavor may be set to one of hdd, ssd, by_path,
or nvme and it is also possible to set the
osds_per_device (usually used with NVMe). Uses mock
introspection data in molecule to build the device
list with flavor variations.
"""
ironic = mock_ironic()
devices = []
i = 1
for dev in ironic['data']['inventory']['disks']:
if flavor in ('hdd', 'ssd'):
devices.append(dev['name'])
elif flavor in 'by_path':
devices.append(dev['by_path'])
elif flavor in 'nvme':
nvm_name = "/dev/nvme0n%i" % i
devices.append(nvm_name)
i += 1
if i > 4:
break
disks_config = {
"osd_objectstore": "bluestore",
"osd_scenario": "lvm",
"devices": devices
}
if osds_per_device > 1:
disks_config['osds_per_device'] = osds_per_device
env = {
"CephAnsibleDisksConfig": disks_config
}
return env
def get_env_cephadm(flavor='hdd', osds_per_device=1):
"""Returns a dictionary which mocks the content of the
tripleo_environment_parameters CephOsd{Count,Type,Spec}
where the deployer requests a number of OSDs of differing
flavor types. The flavor may be set to one of hdd, ssd,
or nvme and it is also possible to set the osds_per_device
(usually used with NVMe).
"""
if osds_per_device == 0:
osds_per_device = 1
env_cephadm = {
"CephHciOsdCount": 5,
"CephHciOsdType": flavor,
"CephOsdSpec": {
"data_devices": {
"all": True,
},
"osds_per_device": osds_per_device
}
}
return env_cephadm
ratio_map = {
'hdd': 1,
'ssd': 4,
'by_path': 1,
'nvme': 3
}
for flavor in ratio_map:
envs = []
if flavor == 'nvme':
osds_per_device = 4
else:
osds_per_device = 0
envs.append(get_env_ceph_ansible(flavor, osds_per_device))
if flavor != 'by_path':
envs.append(get_env_cephadm(flavor, osds_per_device))
ironic = get_ironic(flavor)
for env in envs:
if "CephHciOsdCount" in env and "CephHciOsdType" in env:
vcpu_ratio, vcpu_msg = derive_params\
.get_vcpus_per_osd(env,
env['CephHciOsdCount'],
env['CephHciOsdType'],
env['CephOsdSpec'])
else:
num_osds = derive_params.count_osds(env)
vcpu_ratio, vcpu_msg = \
derive_params.get_vcpus_per_osd_from_ironic(ironic,
env,
num_osds)
self.assertEqual(vcpu_ratio, ratio_map[flavor])
self.assertIsNotNone(vcpu_msg)
def test_derive_without_workload(self):
"""Test the derive method without passing the expected average
guest cpu and mem utilization and confirm expected result
"""
der = derive_params.derive(mem_gb=256, vcpus=56, osds=16)
self.assertFalse(der['failed'])
self.assertEqual(der['nova_reserved_mem_mb'], 81920)
def test_count_memory(self):
"""Test that the count_memory method can the right number
regardless of which value ironic might provide.
"""
mock_ironic_memory_mb = {'data':
{'memory_mb': 262144}}
mock_ironic_memory_bytes = {'data':
{'memory_mb': 0,
'inventory':
{'memory':
{'total': 274877906944}}}}
gb_from_mb = derive_params.count_memory(mock_ironic_memory_mb)
gb_from_bytes = derive_params.count_memory(mock_ironic_memory_bytes)
self.assertEqual(gb_from_mb, gb_from_bytes)
def test_find_parameter(self):
"""Tests that the find_parameter method returns the
expected output for particular inputs.
"""
env = {'CephHciOsdCount': 3,
'CephHciOsdType': 'ssd',
'ComputeHCIParameters': {
'CephHciOsdCount': 4
},
}
value = derive_params.find_parameter(env, 'CephHciOsdCount', 'ComputeHCI')
self.assertEqual(value, 4)
value = derive_params.find_parameter(env, 'CephHciOsdCount')
self.assertEqual(value, 3)
value = derive_params.find_parameter(env, 'CephOsdSpec')
self.assertEqual(value, 0)
value = derive_params.find_parameter(env, 'CephHciOsdType')
self.assertEqual(value, 'ssd')

View File

@ -1,3 +1,4 @@
<<<<<<< HEAD (153371 Merge "Mark B&R job as voting" into stable/wallaby)
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
@ -73,3 +74,5 @@ class TestTripleoGetDpdkCoreList(tests_base.TestCase):
self.assertRaises(tc.DeriveParamsError,
derive_params._get_dpdk_core_list,
inspect_data, numa_nodes_cores_count)
=======
>>>>>>> CHANGE (9fe9f2 Derive parameters clean up)

View File

@ -1,270 +0,0 @@
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
try:
from ansible.module_utils import tripleo_common_utils as tc
except ImportError:
from tripleo_ansible.ansible_plugins.module_utils import tripleo_common_utils as tc
from tripleo_ansible.ansible_plugins.modules import tripleo_get_dpdk_nics_numa_info as derive_params
from tripleo_ansible.tests import base as tests_base
class TestTripleoGetDpdkNicsNumaInfo(tests_base.TestCase):
"""Test the _get_dpdk_nics_numa_info method of the OvS DPDK module"""
def test_run_dpdk_port(self):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge",
"addresses": [{"ip_netmask": ""}]}]
inspect_data = {
"numa_topology": {
"nics": [{"name": "ens802f1", "numa_node": 1},
{"name": "ens802f0", "numa_node": 1},
{"name": "eno1", "numa_node": 0},
{"name": "eno2", "numa_node": 0},
{"name": "enp12s0f1", "numa_node": 0},
{"name": "enp12s0f0", "numa_node": 0},
{"name": "enp13s0f0", "numa_node": 0},
{"name": "enp13s0f1", "numa_node": 0}]
},
"inventory": {
"interfaces": [{"has_carrier": True,
"name": "ens802f1"},
{"has_carrier": True,
"name": "ens802f0"},
{"has_carrier": True,
"name": "eno1"},
{"has_carrier": True,
"name": "eno2"},
{"has_carrier": True,
"name": "enp12s0f0"},
{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
expected_result = [{'bridge_name': 'br-link', 'name': 'ens802f1',
'mtu': 8192, 'numa_node': 1,
'addresses': [{'ip_netmask': ''}]}]
result = derive_params._get_dpdk_nics_numa_info(network_configs,
inspect_data)
self.assertEqual(result, expected_result)
def test_run_dpdk_bond(self):
network_configs = [{
"members": [{"type": "ovs_dpdk_bond", "name": "dpdkbond0",
"mtu": 9000, "rx_queue": 4,
"members": [{"type": "ovs_dpdk_port",
"name": "dpdk0",
"members": [{"type": "interface",
"name": "nic4"}]},
{"type": "ovs_dpdk_port",
"name": "dpdk1",
"members": [{"type": "interface",
"name": "nic5"}]}]}],
"name": "br-link",
"type": "ovs_user_bridge",
"addresses": [{"ip_netmask": "172.16.10.0/24"}]}]
inspect_data = {
"numa_topology": {
"nics": [{"name": "ens802f1", "numa_node": 1},
{"name": "ens802f0", "numa_node": 1},
{"name": "eno1", "numa_node": 0},
{"name": "eno2", "numa_node": 0},
{"name": "enp12s0f1", "numa_node": 0},
{"name": "enp12s0f0", "numa_node": 0},
{"name": "enp13s0f0", "numa_node": 0},
{"name": "enp13s0f1", "numa_node": 0}]
},
"inventory": {
"interfaces": [{"has_carrier": True,
"name": "ens802f1"},
{"has_carrier": True,
"name": "ens802f0"},
{"has_carrier": True,
"name": "eno1"},
{"has_carrier": True,
"name": "eno2"},
{"has_carrier": True,
"name": "enp12s0f0"},
{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
expected_result = [{'bridge_name': 'br-link', 'mtu': 9000,
'numa_node': 1, 'name': 'ens802f0',
'addresses': [{'ip_netmask': '172.16.10.0/24'}]},
{'bridge_name': 'br-link', 'mtu': 9000,
'numa_node': 1, 'name': 'ens802f1',
'addresses': [{'ip_netmask': '172.16.10.0/24'}]}]
result = derive_params._get_dpdk_nics_numa_info(network_configs,
inspect_data)
self.assertEqual(result, expected_result)
def test_run_no_inspect_nics(self):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge"}]
inspect_data = {
"numa_topology": {
"nics": []
},
"inventory": {
"interfaces": [{"has_carrier": True,
"name": "ens802f1"},
{"has_carrier": True,
"name": "ens802f0"},
{"has_carrier": True,
"name": "eno1"},
{"has_carrier": True,
"name": "eno2"},
{"has_carrier": True,
"name": "enp12s0f0"},
{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
self.assertRaises(tc.DeriveParamsError,
derive_params._get_dpdk_nics_numa_info,
network_configs, inspect_data)
def test_run_no_inspect_interfaces(self):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge"}]
inspect_data = {
"numa_topology": {
"nics": []
},
"inventory": {
"interfaces": []
}
}
self.assertRaises(tc.DeriveParamsError,
derive_params._get_dpdk_nics_numa_info,
network_configs, inspect_data)
def test_run_no_inspect_active_interfaces(self):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge"}]
inspect_data = {
"numa_topology": {
"nics": [{"name": "ens802f1", "numa_node": 1},
{"name": "ens802f0", "numa_node": 1},
{"name": "eno1", "numa_node": 0},
{"name": "eno2", "numa_node": 0},
{"name": "enp12s0f1", "numa_node": 0},
{"name": "enp12s0f0", "numa_node": 0},
{"name": "enp13s0f0", "numa_node": 0},
{"name": "enp13s0f1", "numa_node": 0}]
},
"inventory": {
"interfaces": [{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
self.assertRaises(tc.DeriveParamsError,
derive_params._get_dpdk_nics_numa_info,
network_configs, inspect_data)
def test_run_no_numa_node(self):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge"}]
inspect_data = {
"numa_topology": {
"nics": [{"name": "ens802f1"},
{"name": "ens802f0", "numa_node": 1},
{"name": "eno1", "numa_node": 0},
{"name": "eno2", "numa_node": 0},
{"name": "enp12s0f1", "numa_node": 0},
{"name": "enp12s0f0", "numa_node": 0},
{"name": "enp13s0f0", "numa_node": 0},
{"name": "enp13s0f1", "numa_node": 0}]
},
"inventory": {
"interfaces": [{"has_carrier": True,
"name": "ens802f1"},
{"has_carrier": True,
"name": "ens802f0"},
{"has_carrier": True,
"name": "eno1"},
{"has_carrier": True,
"name": "eno2"},
{"has_carrier": True,
"name": "enp12s0f0"},
{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
self.assertRaises(tc.DeriveParamsError,
derive_params._get_dpdk_nics_numa_info,
network_configs, inspect_data)

View File

@ -1,77 +0,0 @@
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import yaml
from tripleo_ansible.ansible_plugins.modules import tripleo_get_dpdk_socket_memory as derive_params
from tripleo_ansible.tests import base as tests_base
class TestTripleoGetDpdkSocketMemory(tests_base.TestCase):
"""Test the _get_dpdk_socket_memory method of the OvS DPDK module"""
def test_run_valid_dpdk_nics_numa_info(self):
dpdk_nics_numa_info = [{"name": "ens802f1", "numa_node": 1,
"mtu": 8192}]
numa_nodes = [0, 1]
overhead = 800
packet_size_in_buffer = (4096 * 64)
expected_result = "1024,3072"
result = derive_params._get_dpdk_socket_memory(
dpdk_nics_numa_info, numa_nodes, overhead,
packet_size_in_buffer)
self.assertEqual(result, expected_result)
def test_run_multiple_mtu_in_same_numa_node(self):
dpdk_nics_numa_info = [{"name": "ens802f1", "numa_node": 1,
"mtu": 1500},
{"name": "ens802f2", "numa_node": 1,
"mtu": 2048}]
numa_nodes = [0, 1]
overhead = 800
packet_size_in_buffer = (4096 * 64)
expected_result = "1024,2048"
result = derive_params._get_dpdk_socket_memory(
dpdk_nics_numa_info, numa_nodes, overhead, packet_size_in_buffer)
self.assertEqual(result, expected_result)
def test_run_duplicate_mtu_in_same_numa_node(self):
dpdk_nics_numa_info = [{"name": "ens802f1", "numa_node": 1,
"mtu": 4096},
{"name": "ens802f2", "numa_node": 1,
"mtu": 4096}]
numa_nodes = [0, 1]
overhead = 800
packet_size_in_buffer = (4096 * 64)
expected_result = "1024,2048"
result = derive_params._get_dpdk_socket_memory(
dpdk_nics_numa_info, numa_nodes, overhead, packet_size_in_buffer)
self.assertEqual(result, expected_result)
def test_run_valid_roundup_mtu(self):
dpdk_nics_numa_info = [{"name": "ens802f1", "numa_node": 1,
"mtu": 1200}]
numa_nodes = [0, 1]
overhead = 800
packet_size_in_buffer = (4096 * 64)
expected_result = "1024,2048"
result = derive_params._get_dpdk_socket_memory(
dpdk_nics_numa_info, numa_nodes, overhead,
packet_size_in_buffer)
self.assertEqual(result, expected_result)

View File

@ -1,3 +1,4 @@
<<<<<<< HEAD (153371 Merge "Mark B&R job as voting" into stable/wallaby)
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
@ -50,3 +51,5 @@ class TestTripleoGetHostCpus(tests_base.TestCase):
self.assertRaises(tc.DeriveParamsError,
derive_params._get_host_cpus_list,
inspect_data)
=======
>>>>>>> CHANGE (9fe9f2 Derive parameters clean up)

View File

@ -1,66 +0,0 @@
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tripleo_ansible.ansible_plugins.filter import number_list
from tripleo_ansible.tests import base as tests_base
class TestNumberListFilters(tests_base.TestCase):
def setUp(self):
super(TestNumberListFilters, self).setUp()
self.filters = number_list.FilterModule()
def test_run_with_ranges_in_comma_delimited_str(self):
range_list = "24-27,60,65-67"
expected_result = "24,25,26,27,60,65,66,67"
result = self.filters.number_list(range_list)
self.assertEqual(result, expected_result)
def test_run_with_ranges_in_comma_delimited_list(self):
range_list = ['24-27', '60', '65-67']
expected_result = "24,25,26,27,60,65,66,67"
result = self.filters.number_list(range_list)
self.assertEqual(result, expected_result)
def test_run_with_ranges_exclude_num(self):
range_list = "24-27,^25,60,65-67"
expected_result = "24,26,27,60,65,66,67"
result = self.filters.number_list(range_list)
self.assertEqual(result, expected_result)
def test_run_with_no_ranges(self):
range_list = "24,25,26,27,60,65,66,67"
expected_result = "24,25,26,27,60,65,66,67"
result = self.filters.number_list(range_list)
self.assertEqual(result, expected_result)
def test_run_with_empty_input(self):
range_list = ""
self.assertRaises(Exception,
self.filters.number_list,
range_list)
def test_run_with_invalid_input(self):
range_list = ",d"
self.assertRaises(Exception,
self.filters.number_list,
range_list)
def test_run_with_invalid_exclude_number(self):
range_list = "12-15,^17"
expected_result = "12,13,14,15"
result = self.filters.number_list(range_list)
self.assertEqual(result, expected_result)

View File

@ -1,46 +0,0 @@
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tripleo_ansible.ansible_plugins.filter import range_list
from tripleo_ansible.tests import base as tests_base
class TestRangeListFilters(tests_base.TestCase):
def setUp(self):
super(TestRangeListFilters, self).setUp()
self.filters = range_list.FilterModule()
def test_run_with_ranges(self):
num_list = "0,22,23,24,25,60,65,66,67"
expected_result = "0,22-25,60,65-67"
result = self.filters.range_list(num_list)
self.assertEqual(result, expected_result)
def test_run_with_no_range(self):
num_list = "0,22,24,60,65,67"
expected_result = "0,22,24,60,65,67"
result = self.filters.range_list(num_list)
self.assertEqual(result, expected_result)
def test_run_with_empty_input(self):
num_list = ""
self.assertRaises(Exception,
self.filters.range_list, num_list)
def test_run_with_invalid_input(self):
num_list = ",d"
self.assertRaises(Exception,
self.filters.range_list, num_list)

View File

@ -28,7 +28,6 @@
- tripleo-ansible-centos-stream-molecule-tripleo_container_stop
- tripleo-ansible-centos-stream-molecule-tripleo_container_tag
- tripleo-ansible-centos-stream-molecule-tripleo_create_admin
- tripleo-ansible-centos-stream-molecule-tripleo_derived_parameters
- tripleo-ansible-centos-stream-molecule-tripleo_firewall
- tripleo-ansible-centos-stream-molecule-tripleo_ha_wrapper
- tripleo-ansible-centos-stream-molecule-tripleo_hieradata
@ -93,7 +92,6 @@
- tripleo-ansible-centos-stream-molecule-tripleo_container_stop
- tripleo-ansible-centos-stream-molecule-tripleo_container_tag
- tripleo-ansible-centos-stream-molecule-tripleo_create_admin
- tripleo-ansible-centos-stream-molecule-tripleo_derived_parameters
- tripleo-ansible-centos-stream-molecule-tripleo_firewall
- tripleo-ansible-centos-stream-molecule-tripleo_ha_wrapper
- tripleo-ansible-centos-stream-molecule-tripleo_hieradata
@ -389,13 +387,6 @@
parent: tripleo-ansible-centos-stream-base
vars:
tripleo_role_name: tripleo_create_admin
- job:
files:
- ^tripleo_ansible/roles/tripleo_derived_parameters/(?!meta).*
name: tripleo-ansible-centos-stream-molecule-tripleo_derived_parameters
parent: tripleo-ansible-centos-stream-base
vars:
tripleo_role_name: tripleo_derived_parameters
- job:
files:
- ^tripleo_ansible/roles/tripleo_firewall/(?!meta).*