NetApp cDOT controller utilization metrics
The NetApp cDOT drivers now include the cluster node utilization metrics for each pool reported to the manila scheduler. These values are designed to be included in the filter & goodness functions used by the scheduler, so the cDOT drivers now also report those functions to the scheduler for each pool. Implements: blueprint netapp-cdot-goodness-functions Change-Id: I3fca5c0ece1819eb4b3b98ed3fd9471cc5045977
This commit is contained in:
parent
16c522bf5e
commit
1ce2473f3f
@ -59,9 +59,13 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
|
||||
ontapi_version = self.get_ontapi_version(cached=True)
|
||||
ontapi_1_20 = ontapi_version >= (1, 20)
|
||||
ontapi_1_2x = (1, 20) <= ontapi_version < (1, 30)
|
||||
ontapi_1_30 = ontapi_version >= (1, 30)
|
||||
|
||||
self.features.add_feature('SNAPMIRROR_V2', supported=ontapi_1_20)
|
||||
self.features.add_feature('SYSTEM_METRICS', supported=ontapi_1_2x)
|
||||
self.features.add_feature('SYSTEM_CONSTITUENT_METRICS',
|
||||
supported=ontapi_1_30)
|
||||
self.features.add_feature('BROADCAST_DOMAINS', supported=ontapi_1_30)
|
||||
self.features.add_feature('IPSPACES', supported=ontapi_1_30)
|
||||
self.features.add_feature('SUBNETS', supported=ontapi_1_30)
|
||||
@ -1048,6 +1052,105 @@ class NetAppCmodeClient(client_base.NetAppBaseClient):
|
||||
else:
|
||||
return result.get_child_by_name('attributes-list').get_children()
|
||||
|
||||
def get_performance_instance_uuids(self, object_name, node_name):
|
||||
"""Get UUIDs of performance instances for a cluster node."""
|
||||
|
||||
api_args = {
|
||||
'objectname': object_name,
|
||||
'query': {
|
||||
'instance-info': {
|
||||
'uuid': node_name + ':*',
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result = self.send_request('perf-object-instance-list-info-iter',
|
||||
api_args)
|
||||
|
||||
uuids = []
|
||||
|
||||
instances = result.get_child_by_name(
|
||||
'attributes-list') or netapp_api.NaElement('None')
|
||||
|
||||
for instance_info in instances.get_children():
|
||||
uuids.append(instance_info.get_child_content('uuid'))
|
||||
|
||||
return uuids
|
||||
|
||||
def get_performance_counter_info(self, object_name, counter_name):
|
||||
"""Gets info about one or more Data ONTAP performance counters."""
|
||||
|
||||
api_args = {'objectname': object_name}
|
||||
result = self.send_request('perf-object-counter-list-info', api_args)
|
||||
|
||||
counters = result.get_child_by_name(
|
||||
'counters') or netapp_api.NaElement('None')
|
||||
|
||||
for counter in counters.get_children():
|
||||
|
||||
if counter.get_child_content('name') == counter_name:
|
||||
|
||||
labels = []
|
||||
label_list = counter.get_child_by_name(
|
||||
'labels') or netapp_api.NaElement('None')
|
||||
for label in label_list.get_children():
|
||||
labels.extend(label.get_content().split(','))
|
||||
base_counter = counter.get_child_content('base-counter')
|
||||
|
||||
return {
|
||||
'name': counter_name,
|
||||
'labels': labels,
|
||||
'base-counter': base_counter,
|
||||
}
|
||||
else:
|
||||
raise exception.NotFound(_('Counter %s not found') % counter_name)
|
||||
|
||||
def get_performance_counters(self, object_name, instance_uuids,
|
||||
counter_names):
|
||||
"""Gets one or more cDOT performance counters."""
|
||||
|
||||
api_args = {
|
||||
'objectname': object_name,
|
||||
'instance-uuids': [
|
||||
{'instance-uuid': instance_uuid}
|
||||
for instance_uuid in instance_uuids
|
||||
],
|
||||
'counters': [
|
||||
{'counter': counter} for counter in counter_names
|
||||
],
|
||||
}
|
||||
|
||||
result = self.send_request('perf-object-get-instances', api_args)
|
||||
|
||||
counter_data = []
|
||||
|
||||
timestamp = result.get_child_content('timestamp')
|
||||
|
||||
instances = result.get_child_by_name(
|
||||
'instances') or netapp_api.NaElement('None')
|
||||
for instance in instances.get_children():
|
||||
|
||||
instance_name = instance.get_child_content('name')
|
||||
instance_uuid = instance.get_child_content('uuid')
|
||||
node_name = instance_uuid.split(':')[0]
|
||||
|
||||
counters = instance.get_child_by_name(
|
||||
'counters') or netapp_api.NaElement('None')
|
||||
for counter in counters.get_children():
|
||||
|
||||
counter_name = counter.get_child_content('name')
|
||||
counter_value = counter.get_child_content('value')
|
||||
|
||||
counter_data.append({
|
||||
'instance-name': instance_name,
|
||||
'instance-uuid': instance_uuid,
|
||||
'node-name': node_name,
|
||||
'timestamp': timestamp,
|
||||
counter_name: counter_value,
|
||||
})
|
||||
|
||||
return counter_data
|
||||
|
||||
@na_utils.trace
|
||||
def setup_security_services(self, security_services, vserver_client,
|
||||
vserver_name):
|
||||
|
@ -107,10 +107,18 @@ class NetAppCmodeMultiSvmShareDriver(driver.ShareDriver):
|
||||
delete_rules, **kwargs)
|
||||
|
||||
def _update_share_stats(self, data=None):
|
||||
data = self.library.get_share_stats()
|
||||
data = self.library.get_share_stats(
|
||||
filter_function=self.get_filter_function(),
|
||||
goodness_function=self.get_goodness_function())
|
||||
super(NetAppCmodeMultiSvmShareDriver, self)._update_share_stats(
|
||||
data=data)
|
||||
|
||||
def get_default_filter_function(self):
|
||||
return self.library.get_default_filter_function()
|
||||
|
||||
def get_default_goodness_function(self):
|
||||
return self.library.get_default_goodness_function()
|
||||
|
||||
def get_share_server_pools(self, share_server):
|
||||
return self.library.get_share_server_pools(share_server)
|
||||
|
||||
|
@ -107,10 +107,18 @@ class NetAppCmodeSingleSvmShareDriver(driver.ShareDriver):
|
||||
delete_rules, **kwargs)
|
||||
|
||||
def _update_share_stats(self, data=None):
|
||||
data = self.library.get_share_stats()
|
||||
data = self.library.get_share_stats(
|
||||
filter_function=self.get_filter_function(),
|
||||
goodness_function=self.get_goodness_function())
|
||||
super(NetAppCmodeSingleSvmShareDriver, self)._update_share_stats(
|
||||
data=data)
|
||||
|
||||
def get_default_filter_function(self):
|
||||
return self.library.get_default_filter_function()
|
||||
|
||||
def get_default_goodness_function(self):
|
||||
return self.library.get_default_goodness_function()
|
||||
|
||||
def get_share_server_pools(self, share_server):
|
||||
return self.library.get_share_server_pools(share_server)
|
||||
|
||||
|
@ -36,6 +36,7 @@ from manila.i18n import _, _LE, _LI, _LW
|
||||
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
|
||||
from manila.share.drivers.netapp.dataontap.client import client_cmode
|
||||
from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
|
||||
from manila.share.drivers.netapp.dataontap.cluster_mode import performance
|
||||
from manila.share.drivers.netapp.dataontap.protocols import cifs_cmode
|
||||
from manila.share.drivers.netapp.dataontap.protocols import nfs_cmode
|
||||
from manila.share.drivers.netapp import options as na_opts
|
||||
@ -55,6 +56,9 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
|
||||
SUPPORTED_PROTOCOLS = ('nfs', 'cifs')
|
||||
|
||||
DEFAULT_FILTER_FUNCTION = 'capabilities.utilization < 70'
|
||||
DEFAULT_GOODNESS_FUNCTION = '100 - capabilities.utilization'
|
||||
|
||||
# Maps NetApp qualified extra specs keys to corresponding backend API
|
||||
# client library argument keywords. When we expose more backend
|
||||
# capabilities here, we will add them to this map.
|
||||
@ -110,6 +114,9 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
self._client = self._get_api_client()
|
||||
self._have_cluster_creds = self._client.check_for_cluster_credentials()
|
||||
|
||||
# Performance monitoring library
|
||||
self._perf_library = performance.PerformanceLibrary(self._client)
|
||||
|
||||
@na_utils.trace
|
||||
def check_for_setup_error(self):
|
||||
self._licenses = self._get_licenses()
|
||||
@ -214,8 +221,16 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_default_filter_function(self):
|
||||
"""Get the default filter_function string."""
|
||||
return self.DEFAULT_FILTER_FUNCTION
|
||||
|
||||
def get_default_goodness_function(self):
|
||||
"""Get the default goodness_function string."""
|
||||
return self.DEFAULT_GOODNESS_FUNCTION
|
||||
|
||||
@na_utils.trace
|
||||
def get_share_stats(self):
|
||||
def get_share_stats(self, filter_function=None, goodness_function=None):
|
||||
"""Retrieve stats info from Data ONTAP backend."""
|
||||
|
||||
data = {
|
||||
@ -226,7 +241,8 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
'netapp_storage_family': 'ontap_cluster',
|
||||
'storage_protocol': 'NFS_CIFS',
|
||||
'consistency_group_support': 'host',
|
||||
'pools': self._get_pools(),
|
||||
'pools': self._get_pools(filter_function=filter_function,
|
||||
goodness_function=goodness_function),
|
||||
}
|
||||
|
||||
if (self.configuration.replication_domain and
|
||||
@ -249,28 +265,35 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
return self._get_pools()
|
||||
|
||||
@na_utils.trace
|
||||
def _get_pools(self):
|
||||
def _get_pools(self, filter_function=None, goodness_function=None):
|
||||
"""Retrieve list of pools available to this backend."""
|
||||
|
||||
pools = []
|
||||
aggr_space = self._get_aggregate_space()
|
||||
aggregates = aggr_space.keys()
|
||||
|
||||
for aggr_name in sorted(aggr_space.keys()):
|
||||
# Get up-to-date node utilization metrics just once.
|
||||
if self._have_cluster_creds:
|
||||
self._perf_library.update_performance_cache({}, self._ssc_stats)
|
||||
|
||||
for aggr_name in sorted(aggregates):
|
||||
|
||||
reserved_percentage = self.configuration.reserved_share_percentage
|
||||
|
||||
total_capacity_gb = na_utils.round_down(float(
|
||||
aggr_space[aggr_name].get('total', 0)) / units.Gi, '0.01')
|
||||
aggr_space[aggr_name].get('total', 0)) / units.Gi)
|
||||
free_capacity_gb = na_utils.round_down(float(
|
||||
aggr_space[aggr_name].get('available', 0)) / units.Gi, '0.01')
|
||||
aggr_space[aggr_name].get('available', 0)) / units.Gi)
|
||||
allocated_capacity_gb = na_utils.round_down(float(
|
||||
aggr_space[aggr_name].get('used', 0)) / units.Gi, '0.01')
|
||||
aggr_space[aggr_name].get('used', 0)) / units.Gi)
|
||||
|
||||
if total_capacity_gb == 0.0:
|
||||
total_capacity_gb = 'unknown'
|
||||
|
||||
pool = {
|
||||
'pool_name': aggr_name,
|
||||
'filter_function': filter_function,
|
||||
'goodness_function': goodness_function,
|
||||
'total_capacity_gb': total_capacity_gb,
|
||||
'free_capacity_gb': free_capacity_gb,
|
||||
'allocated_capacity_gb': allocated_capacity_gb,
|
||||
@ -279,7 +302,6 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
'dedupe': [True, False],
|
||||
'compression': [True, False],
|
||||
'thin_provisioning': [True, False],
|
||||
'netapp_aggregate': aggr_name,
|
||||
}
|
||||
|
||||
# Add storage service catalog data.
|
||||
@ -287,6 +309,11 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
if pool_ssc_stats:
|
||||
pool.update(pool_ssc_stats)
|
||||
|
||||
# Add utilization info, or nominal value if not available.
|
||||
utilization = self._perf_library.get_node_utilization_for_pool(
|
||||
aggr_name)
|
||||
pool['utilization'] = na_utils.round_down(utilization)
|
||||
|
||||
pools.append(pool)
|
||||
|
||||
return pools
|
||||
@ -1160,7 +1187,9 @@ class NetAppCmodeFileStorageLibrary(object):
|
||||
# Initialize entries for each aggregate.
|
||||
for aggregate_name in aggregate_names:
|
||||
if aggregate_name not in ssc_stats:
|
||||
ssc_stats[aggregate_name] = {}
|
||||
ssc_stats[aggregate_name] = {
|
||||
'netapp_aggregate': aggregate_name,
|
||||
}
|
||||
|
||||
if aggregate_names:
|
||||
self._update_ssc_aggr_info(aggregate_names, ssc_stats)
|
||||
|
@ -0,0 +1,405 @@
|
||||
# Copyright (c) 2016 Clinton Knight
|
||||
# All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
"""
|
||||
Performance metrics functions and cache for NetApp systems.
|
||||
"""
|
||||
|
||||
import copy
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
from manila import exception
|
||||
from manila.i18n import _, _LE
|
||||
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
DEFAULT_UTILIZATION = 50
|
||||
|
||||
|
||||
class PerformanceLibrary(object):
|
||||
|
||||
def __init__(self, zapi_client):
|
||||
|
||||
self.zapi_client = zapi_client
|
||||
self.performance_counters = {}
|
||||
self.pool_utilization = {}
|
||||
self._init_counter_info()
|
||||
|
||||
def _init_counter_info(self):
|
||||
"""Set a few counter names based on Data ONTAP version."""
|
||||
|
||||
self.system_object_name = None
|
||||
self.avg_processor_busy_base_counter_name = None
|
||||
|
||||
try:
|
||||
if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS:
|
||||
self.system_object_name = 'system:constituent'
|
||||
self.avg_processor_busy_base_counter_name = (
|
||||
self._get_base_counter_name('system:constituent',
|
||||
'avg_processor_busy'))
|
||||
elif self.zapi_client.features.SYSTEM_METRICS:
|
||||
self.system_object_name = 'system'
|
||||
self.avg_processor_busy_base_counter_name = (
|
||||
self._get_base_counter_name('system',
|
||||
'avg_processor_busy'))
|
||||
except netapp_api.NaApiError:
|
||||
if self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS:
|
||||
self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time'
|
||||
else:
|
||||
self.avg_processor_busy_base_counter_name = 'cpu_elapsed_time1'
|
||||
LOG.exception(_LE('Could not get performance base counter '
|
||||
'name. Performance-based scheduler '
|
||||
'functions may not be available.'))
|
||||
|
||||
def update_performance_cache(self, flexvol_pools, aggregate_pools):
|
||||
"""Called periodically to update per-pool node utilization metrics."""
|
||||
|
||||
# Nothing to do on older systems
|
||||
if not (self.zapi_client.features.SYSTEM_METRICS or
|
||||
self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS):
|
||||
return
|
||||
|
||||
# Get aggregates and nodes for all known pools
|
||||
aggr_names = self._get_aggregates_for_pools(flexvol_pools,
|
||||
aggregate_pools)
|
||||
node_names, aggr_node_map = self._get_nodes_for_aggregates(aggr_names)
|
||||
|
||||
# Update performance counter cache for each node
|
||||
node_utilization = {}
|
||||
for node_name in node_names:
|
||||
if node_name not in self.performance_counters:
|
||||
self.performance_counters[node_name] = []
|
||||
|
||||
# Get new performance counters and save only the last 10
|
||||
counters = self._get_node_utilization_counters(node_name)
|
||||
if not counters:
|
||||
continue
|
||||
|
||||
self.performance_counters[node_name].append(counters)
|
||||
self.performance_counters[node_name] = (
|
||||
self.performance_counters[node_name][-10:])
|
||||
|
||||
# Update utilization for each node using newest & oldest sample
|
||||
counters = self.performance_counters[node_name]
|
||||
if len(counters) < 2:
|
||||
node_utilization[node_name] = DEFAULT_UTILIZATION
|
||||
else:
|
||||
node_utilization[node_name] = self._get_node_utilization(
|
||||
counters[0], counters[-1], node_name)
|
||||
|
||||
# Update pool utilization map atomically
|
||||
pool_utilization = {}
|
||||
all_pools = copy.deepcopy(flexvol_pools)
|
||||
all_pools.update(aggregate_pools)
|
||||
for pool_name, pool_info in all_pools.items():
|
||||
aggr_name = pool_info.get('netapp_aggregate', 'unknown')
|
||||
node_name = aggr_node_map.get(aggr_name)
|
||||
if node_name:
|
||||
pool_utilization[pool_name] = node_utilization.get(
|
||||
node_name, DEFAULT_UTILIZATION)
|
||||
else:
|
||||
pool_utilization[pool_name] = DEFAULT_UTILIZATION
|
||||
|
||||
self.pool_utilization = pool_utilization
|
||||
|
||||
def get_node_utilization_for_pool(self, pool_name):
|
||||
"""Get the node utilization for the specified pool, if available."""
|
||||
|
||||
return self.pool_utilization.get(pool_name, DEFAULT_UTILIZATION)
|
||||
|
||||
def update_for_failover(self, zapi_client, flexvol_pools, aggregate_pools):
|
||||
"""Change API client after a whole-backend failover event."""
|
||||
|
||||
self.zapi_client = zapi_client
|
||||
self.update_performance_cache(flexvol_pools, aggregate_pools)
|
||||
|
||||
def _get_aggregates_for_pools(self, flexvol_pools, aggregate_pools):
|
||||
"""Get the set of aggregates that contain the specified pools."""
|
||||
|
||||
aggr_names = set()
|
||||
for pool_name, pool_info in aggregate_pools.items():
|
||||
aggr_names.add(pool_info.get('netapp_aggregate'))
|
||||
for pool_name, pool_info in flexvol_pools.items():
|
||||
aggr_names.add(pool_info.get('netapp_aggregate'))
|
||||
return list(aggr_names)
|
||||
|
||||
def _get_nodes_for_aggregates(self, aggr_names):
|
||||
"""Get the cluster nodes that own the specified aggregates."""
|
||||
|
||||
node_names = set()
|
||||
aggr_node_map = {}
|
||||
|
||||
for aggr_name in aggr_names:
|
||||
node_name = self.zapi_client.get_node_for_aggregate(aggr_name)
|
||||
if node_name:
|
||||
node_names.add(node_name)
|
||||
aggr_node_map[aggr_name] = node_name
|
||||
|
||||
return list(node_names), aggr_node_map
|
||||
|
||||
def _get_node_utilization(self, counters_t1, counters_t2, node_name):
|
||||
"""Get node utilization from two sets of performance counters."""
|
||||
|
||||
try:
|
||||
# Time spent in the single-threaded Kahuna domain
|
||||
kahuna_percent = self._get_kahuna_utilization(counters_t1,
|
||||
counters_t2)
|
||||
|
||||
# If Kahuna is using >60% of the CPU, the controller is fully busy
|
||||
if kahuna_percent > 60:
|
||||
return 100.0
|
||||
|
||||
# Average CPU busyness across all processors
|
||||
avg_cpu_percent = 100.0 * self._get_average_cpu_utilization(
|
||||
counters_t1, counters_t2)
|
||||
|
||||
# Total Consistency Point (CP) time
|
||||
total_cp_time_msec = self._get_total_consistency_point_time(
|
||||
counters_t1, counters_t2)
|
||||
|
||||
# Time spent in CP Phase 2 (buffer flush)
|
||||
p2_flush_time_msec = self._get_consistency_point_p2_flush_time(
|
||||
counters_t1, counters_t2)
|
||||
|
||||
# Wall-clock time between the two counter sets
|
||||
poll_time_msec = self._get_total_time(counters_t1,
|
||||
counters_t2,
|
||||
'total_cp_msecs')
|
||||
|
||||
# If two polls happened in quick succession, use CPU utilization
|
||||
if total_cp_time_msec == 0 or poll_time_msec == 0:
|
||||
return max(min(100.0, avg_cpu_percent), 0)
|
||||
|
||||
# Adjusted Consistency Point time
|
||||
adjusted_cp_time_msec = self._get_adjusted_consistency_point_time(
|
||||
total_cp_time_msec, p2_flush_time_msec)
|
||||
adjusted_cp_percent = (100.0 *
|
||||
adjusted_cp_time_msec / poll_time_msec)
|
||||
|
||||
# Utilization is the greater of CPU busyness & CP time
|
||||
node_utilization = max(avg_cpu_percent, adjusted_cp_percent)
|
||||
return max(min(100.0, node_utilization), 0)
|
||||
|
||||
except Exception:
|
||||
LOG.exception(_LE('Could not calculate node utilization for '
|
||||
'node %s.'), node_name)
|
||||
return DEFAULT_UTILIZATION
|
||||
|
||||
def _get_kahuna_utilization(self, counters_t1, counters_t2):
|
||||
"""Get time spent in the single-threaded Kahuna domain."""
|
||||
|
||||
# Note(cknight): Because Kahuna is single-threaded, running only on
|
||||
# one CPU at a time, we can safely sum the Kahuna CPU usage
|
||||
# percentages across all processors in a node.
|
||||
return sum(self._get_performance_counter_average_multi_instance(
|
||||
counters_t1, counters_t2, 'domain_busy:kahuna',
|
||||
'processor_elapsed_time')) * 100.0
|
||||
|
||||
def _get_average_cpu_utilization(self, counters_t1, counters_t2):
|
||||
"""Get average CPU busyness across all processors."""
|
||||
|
||||
return self._get_performance_counter_average(
|
||||
counters_t1, counters_t2, 'avg_processor_busy',
|
||||
self.avg_processor_busy_base_counter_name)
|
||||
|
||||
def _get_total_consistency_point_time(self, counters_t1, counters_t2):
|
||||
"""Get time spent in Consistency Points in msecs."""
|
||||
|
||||
return float(self._get_performance_counter_delta(
|
||||
counters_t1, counters_t2, 'total_cp_msecs'))
|
||||
|
||||
def _get_consistency_point_p2_flush_time(self, counters_t1, counters_t2):
|
||||
"""Get time spent in CP Phase 2 (buffer flush) in msecs."""
|
||||
|
||||
return float(self._get_performance_counter_delta(
|
||||
counters_t1, counters_t2, 'cp_phase_times:p2_flush'))
|
||||
|
||||
def _get_total_time(self, counters_t1, counters_t2, counter_name):
|
||||
"""Get wall clock time between two successive counters in msecs."""
|
||||
|
||||
timestamp_t1 = float(self._find_performance_counter_timestamp(
|
||||
counters_t1, counter_name))
|
||||
timestamp_t2 = float(self._find_performance_counter_timestamp(
|
||||
counters_t2, counter_name))
|
||||
return (timestamp_t2 - timestamp_t1) * 1000.0
|
||||
|
||||
def _get_adjusted_consistency_point_time(self, total_cp_time,
|
||||
p2_flush_time):
|
||||
"""Get adjusted CP time by limiting CP phase 2 flush time to 20%."""
|
||||
|
||||
return (total_cp_time - p2_flush_time) * 1.20
|
||||
|
||||
def _get_performance_counter_delta(self, counters_t1, counters_t2,
|
||||
counter_name):
|
||||
"""Calculate a delta value from two performance counters."""
|
||||
|
||||
counter_t1 = int(
|
||||
self._find_performance_counter_value(counters_t1, counter_name))
|
||||
counter_t2 = int(
|
||||
self._find_performance_counter_value(counters_t2, counter_name))
|
||||
|
||||
return counter_t2 - counter_t1
|
||||
|
||||
def _get_performance_counter_average(self, counters_t1, counters_t2,
|
||||
counter_name, base_counter_name,
|
||||
instance_name=None):
|
||||
"""Calculate an average value from two performance counters."""
|
||||
|
||||
counter_t1 = float(self._find_performance_counter_value(
|
||||
counters_t1, counter_name, instance_name))
|
||||
counter_t2 = float(self._find_performance_counter_value(
|
||||
counters_t2, counter_name, instance_name))
|
||||
base_counter_t1 = float(self._find_performance_counter_value(
|
||||
counters_t1, base_counter_name, instance_name))
|
||||
base_counter_t2 = float(self._find_performance_counter_value(
|
||||
counters_t2, base_counter_name, instance_name))
|
||||
|
||||
return (counter_t2 - counter_t1) / (base_counter_t2 - base_counter_t1)
|
||||
|
||||
def _get_performance_counter_average_multi_instance(self, counters_t1,
|
||||
counters_t2,
|
||||
counter_name,
|
||||
base_counter_name):
|
||||
"""Calculate an average value from multiple counter instances."""
|
||||
|
||||
averages = []
|
||||
instance_names = []
|
||||
for counter in counters_t1:
|
||||
if counter_name in counter:
|
||||
instance_names.append(counter['instance-name'])
|
||||
|
||||
for instance_name in instance_names:
|
||||
average = self._get_performance_counter_average(
|
||||
counters_t1, counters_t2, counter_name, base_counter_name,
|
||||
instance_name)
|
||||
averages.append(average)
|
||||
|
||||
return averages
|
||||
|
||||
def _find_performance_counter_value(self, counters, counter_name,
|
||||
instance_name=None):
|
||||
"""Given a counter set, return the value of a named instance."""
|
||||
|
||||
for counter in counters:
|
||||
if counter_name in counter:
|
||||
if (instance_name is None
|
||||
or counter['instance-name'] == instance_name):
|
||||
return counter[counter_name]
|
||||
else:
|
||||
raise exception.NotFound(_('Counter %s not found') % counter_name)
|
||||
|
||||
def _find_performance_counter_timestamp(self, counters, counter_name,
|
||||
instance_name=None):
|
||||
"""Given a counter set, return the timestamp of a named instance."""
|
||||
|
||||
for counter in counters:
|
||||
if counter_name in counter:
|
||||
if (instance_name is None
|
||||
or counter['instance-name'] == instance_name):
|
||||
return counter['timestamp']
|
||||
else:
|
||||
raise exception.NotFound(_('Counter %s not found') % counter_name)
|
||||
|
||||
def _expand_performance_array(self, object_name, counter_name, counter):
|
||||
"""Get array labels and expand counter data array."""
|
||||
|
||||
# Get array labels for counter value
|
||||
counter_info = self.zapi_client.get_performance_counter_info(
|
||||
object_name, counter_name)
|
||||
|
||||
array_labels = [counter_name + ':' + label.lower()
|
||||
for label in counter_info['labels']]
|
||||
array_values = counter[counter_name].split(',')
|
||||
|
||||
# Combine labels and values, and then mix into existing counter
|
||||
array_data = dict(zip(array_labels, array_values))
|
||||
counter.update(array_data)
|
||||
|
||||
def _get_base_counter_name(self, object_name, counter_name):
|
||||
"""Get the name of the base counter for the specified counter."""
|
||||
|
||||
counter_info = self.zapi_client.get_performance_counter_info(
|
||||
object_name, counter_name)
|
||||
return counter_info['base-counter']
|
||||
|
||||
def _get_node_utilization_counters(self, node_name):
|
||||
"""Get all performance counters for calculating node utilization."""
|
||||
|
||||
try:
|
||||
return (self._get_node_utilization_system_counters(node_name) +
|
||||
self._get_node_utilization_wafl_counters(node_name) +
|
||||
self._get_node_utilization_processor_counters(node_name))
|
||||
except netapp_api.NaApiError:
|
||||
LOG.exception(_LE('Could not get utilization counters from node '
|
||||
'%s'), node_name)
|
||||
return None
|
||||
|
||||
def _get_node_utilization_system_counters(self, node_name):
|
||||
"""Get the system counters for calculating node utilization."""
|
||||
|
||||
system_instance_uuids = (
|
||||
self.zapi_client.get_performance_instance_uuids(
|
||||
self.system_object_name, node_name))
|
||||
|
||||
system_counter_names = [
|
||||
'avg_processor_busy',
|
||||
self.avg_processor_busy_base_counter_name,
|
||||
]
|
||||
if 'cpu_elapsed_time1' in system_counter_names:
|
||||
system_counter_names.append('cpu_elapsed_time')
|
||||
|
||||
system_counters = self.zapi_client.get_performance_counters(
|
||||
self.system_object_name, system_instance_uuids,
|
||||
system_counter_names)
|
||||
|
||||
return system_counters
|
||||
|
||||
def _get_node_utilization_wafl_counters(self, node_name):
|
||||
"""Get the WAFL counters for calculating node utilization."""
|
||||
|
||||
wafl_instance_uuids = self.zapi_client.get_performance_instance_uuids(
|
||||
'wafl', node_name)
|
||||
|
||||
wafl_counter_names = ['total_cp_msecs', 'cp_phase_times']
|
||||
wafl_counters = self.zapi_client.get_performance_counters(
|
||||
'wafl', wafl_instance_uuids, wafl_counter_names)
|
||||
|
||||
# Expand array data so we can use wafl:cp_phase_times[P2_FLUSH]
|
||||
for counter in wafl_counters:
|
||||
if 'cp_phase_times' in counter:
|
||||
self._expand_performance_array(
|
||||
'wafl', 'cp_phase_times', counter)
|
||||
|
||||
return wafl_counters
|
||||
|
||||
def _get_node_utilization_processor_counters(self, node_name):
|
||||
"""Get the processor counters for calculating node utilization."""
|
||||
|
||||
processor_instance_uuids = (
|
||||
self.zapi_client.get_performance_instance_uuids('processor',
|
||||
node_name))
|
||||
|
||||
processor_counter_names = ['domain_busy', 'processor_elapsed_time']
|
||||
processor_counters = self.zapi_client.get_performance_counters(
|
||||
'processor', processor_instance_uuids, processor_counter_names)
|
||||
|
||||
# Expand array data so we can use processor:domain_busy[kahuna]
|
||||
for counter in processor_counters:
|
||||
if 'domain_busy' in counter:
|
||||
self._expand_performance_array(
|
||||
'processor', 'domain_busy', counter)
|
||||
|
||||
return processor_counters
|
@ -56,7 +56,7 @@ def check_flags(required_flags, configuration):
|
||||
raise exception.InvalidInput(reason=msg)
|
||||
|
||||
|
||||
def round_down(value, precision):
|
||||
def round_down(value, precision='0.00'):
|
||||
"""Round a number downward using a specified level of precision.
|
||||
|
||||
Example: round_down(float(total_space_in_bytes) / units.Gi, '0.01')
|
||||
|
@ -32,7 +32,8 @@ REMOTE_CLUSTER_NAME = 'fake_cluster_2'
|
||||
CLUSTER_ADDRESS_1 = 'fake_cluster_address'
|
||||
CLUSTER_ADDRESS_2 = 'fake_cluster_address_2'
|
||||
VERSION = 'NetApp Release 8.2.1 Cluster-Mode: Fri Mar 21 14:25:07 PDT 2014'
|
||||
NODE_NAME = 'fake_node'
|
||||
NODE_NAME = 'fake_node1'
|
||||
NODE_NAMES = ('fake_node1', 'fake_node2')
|
||||
VSERVER_NAME = 'fake_vserver'
|
||||
VSERVER_NAME_2 = 'fake_vserver_2'
|
||||
ADMIN_VSERVER_NAME = 'fake_admin_vserver'
|
||||
@ -2058,6 +2059,131 @@ SNAPMIRROR_INITIALIZE_RESULT = etree.XML("""
|
||||
</results>
|
||||
""")
|
||||
|
||||
PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS = [
|
||||
'SETUP', 'PRE_P0', 'P0_SNAP_DEL', 'P1_CLEAN', 'P1_QUOTA', 'IPU_DISK_ADD',
|
||||
'P2V_INOFILE', 'P2V_INO_PUB', 'P2V_INO_PRI', 'P2V_FSINFO', 'P2V_DLOG1',
|
||||
'P2V_DLOG2', 'P2V_REFCOUNT', 'P2V_TOPAA', 'P2V_DF_SCORES_SUB', 'P2V_BM',
|
||||
'P2V_SNAP', 'P2V_DF_SCORES', 'P2V_VOLINFO', 'P2V_CONT', 'P2A_INOFILE',
|
||||
'P2A_INO', 'P2A_DLOG1', 'P2A_HYA', 'P2A_DLOG2', 'P2A_FSINFO',
|
||||
'P2A_IPU_BITMAP_GROW', 'P2A_REFCOUNT', 'P2A_TOPAA', 'P2A_HYABC', 'P2A_BM',
|
||||
'P2A_SNAP', 'P2A_VOLINFO', 'P2_FLUSH', 'P2_FINISH', 'P3_WAIT',
|
||||
'P3V_VOLINFO', 'P3A_VOLINFO', 'P3_FINISH', 'P4_FINISH', 'P5_FINISH',
|
||||
]
|
||||
|
||||
PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE = etree.XML("""
|
||||
<results status="passed">
|
||||
<counters>
|
||||
<counter-info>
|
||||
<desc>No. of times 8.3 names are accessed per second.</desc>
|
||||
<name>access_8_3_names</name>
|
||||
<privilege-level>diag</privilege-level>
|
||||
<properties>rate</properties>
|
||||
<unit>per_sec</unit>
|
||||
</counter-info>
|
||||
<counter-info>
|
||||
<desc>Array of counts of different types of CPs</desc>
|
||||
<labels>
|
||||
<label-info>wafl_timer generated CP</label-info>
|
||||
<label-info>snapshot generated CP</label-info>
|
||||
<label-info>wafl_avail_bufs generated CP</label-info>
|
||||
<label-info>dirty_blk_cnt generated CP</label-info>
|
||||
<label-info>full NV-log generated CP,back-to-back CP</label-info>
|
||||
<label-info>flush generated CP,sync generated CP</label-info>
|
||||
<label-info>deferred back-to-back CP</label-info>
|
||||
<label-info>low mbufs generated CP</label-info>
|
||||
<label-info>low datavecs generated CP</label-info>
|
||||
<label-info>nvlog replay takeover time limit CP</label-info>
|
||||
</labels>
|
||||
<name>cp_count</name>
|
||||
<privilege-level>diag</privilege-level>
|
||||
<properties>delta</properties>
|
||||
<type>array</type>
|
||||
<unit>none</unit>
|
||||
</counter-info>
|
||||
<counter-info>
|
||||
<base-counter>total_cp_msecs</base-counter>
|
||||
<desc>Array of percentage time spent in different phases of CP</desc>
|
||||
<labels>
|
||||
<label-info>%(labels)s</label-info>
|
||||
</labels>
|
||||
<name>cp_phase_times</name>
|
||||
<privilege-level>diag</privilege-level>
|
||||
<properties>percent</properties>
|
||||
<type>array</type>
|
||||
<unit>percent</unit>
|
||||
</counter-info>
|
||||
</counters>
|
||||
</results>
|
||||
""" % {'labels': ','.join(PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS)})
|
||||
|
||||
PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_CMODE = etree.XML("""
|
||||
<results status="passed">
|
||||
<instances>
|
||||
<instance-data>
|
||||
<counters>
|
||||
<counter-data>
|
||||
<name>avg_processor_busy</name>
|
||||
<value>5674745133134</value>
|
||||
</counter-data>
|
||||
</counters>
|
||||
<name>system</name>
|
||||
<uuid>%(node1)s:kernel:system</uuid>
|
||||
</instance-data>
|
||||
<instance-data>
|
||||
<counters>
|
||||
<counter-data>
|
||||
<name>avg_processor_busy</name>
|
||||
<value>4077649009234</value>
|
||||
</counter-data>
|
||||
</counters>
|
||||
<name>system</name>
|
||||
<uuid>%(node2)s:kernel:system</uuid>
|
||||
</instance-data>
|
||||
</instances>
|
||||
<timestamp>1453412013</timestamp>
|
||||
</results>
|
||||
""" % {'node1': NODE_NAMES[0], 'node2': NODE_NAMES[1]})
|
||||
|
||||
PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_7MODE = etree.XML("""
|
||||
<results status="passed">
|
||||
<timestamp>1454146292</timestamp>
|
||||
<instances>
|
||||
<instance-data>
|
||||
<name>system</name>
|
||||
<counters>
|
||||
<counter-data>
|
||||
<name>avg_processor_busy</name>
|
||||
<value>13215732322</value>
|
||||
</counter-data>
|
||||
</counters>
|
||||
</instance-data>
|
||||
</instances>
|
||||
</results>""")
|
||||
|
||||
PERF_OBJECT_INSTANCE_LIST_INFO_ITER_RESPONSE = etree.XML("""
|
||||
<results status="passed">
|
||||
<attributes-list>
|
||||
<instance-info>
|
||||
<name>system</name>
|
||||
<uuid>%(node)s:kernel:system</uuid>
|
||||
</instance-info>
|
||||
</attributes-list>
|
||||
<num-records>1</num-records>
|
||||
</results>
|
||||
""" % {'node': NODE_NAME})
|
||||
|
||||
PERF_OBJECT_INSTANCE_LIST_INFO_RESPONSE = etree.XML("""
|
||||
<results status="passed">
|
||||
<instances>
|
||||
<instance-info>
|
||||
<name>processor0</name>
|
||||
</instance-info>
|
||||
<instance-info>
|
||||
<name>processor1</name>
|
||||
</instance-info>
|
||||
</instances>
|
||||
</results>""")
|
||||
|
||||
FAKE_VOL_XML = """<volume-info xmlns='http://www.netapp.com/filer/admin'>
|
||||
<name>open123</name>
|
||||
<state>online</state>
|
||||
|
@ -1848,6 +1848,115 @@ class NetAppClientCmodeTestCase(test.TestCase):
|
||||
mock.call('aggr-get-iter', {})])
|
||||
self.assertListEqual([], result)
|
||||
|
||||
def test_get_performance_instance_uuids(self):
|
||||
|
||||
api_response = netapp_api.NaElement(
|
||||
fake.PERF_OBJECT_INSTANCE_LIST_INFO_ITER_RESPONSE)
|
||||
self.mock_object(self.client,
|
||||
'send_request',
|
||||
mock.Mock(return_value=api_response))
|
||||
|
||||
result = self.client.get_performance_instance_uuids(
|
||||
'system', fake.NODE_NAME)
|
||||
|
||||
expected = [fake.NODE_NAME + ':kernel:system']
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
perf_object_instance_list_info_iter_args = {
|
||||
'objectname': 'system',
|
||||
'query': {
|
||||
'instance-info': {
|
||||
'uuid': fake.NODE_NAME + ':*',
|
||||
}
|
||||
}
|
||||
}
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'perf-object-instance-list-info-iter',
|
||||
perf_object_instance_list_info_iter_args)
|
||||
|
||||
def test_get_performance_counter_info(self):
|
||||
|
||||
api_response = netapp_api.NaElement(
|
||||
fake.PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE)
|
||||
self.mock_object(self.client,
|
||||
'send_request',
|
||||
mock.Mock(return_value=api_response))
|
||||
|
||||
result = self.client.get_performance_counter_info('wafl',
|
||||
'cp_phase_times')
|
||||
|
||||
expected = {
|
||||
'name': 'cp_phase_times',
|
||||
'base-counter': 'total_cp_msecs',
|
||||
'labels': fake.PERF_OBJECT_COUNTER_TOTAL_CP_MSECS_LABELS,
|
||||
}
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
perf_object_counter_list_info_args = {'objectname': 'wafl'}
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'perf-object-counter-list-info',
|
||||
perf_object_counter_list_info_args)
|
||||
|
||||
def test_get_performance_counter_info_not_found(self):
|
||||
|
||||
api_response = netapp_api.NaElement(
|
||||
fake.PERF_OBJECT_COUNTER_LIST_INFO_WAFL_RESPONSE)
|
||||
self.mock_object(self.client,
|
||||
'send_request',
|
||||
mock.Mock(return_value=api_response))
|
||||
|
||||
self.assertRaises(exception.NotFound,
|
||||
self.client.get_performance_counter_info,
|
||||
'wafl',
|
||||
'invalid')
|
||||
|
||||
def test_get_performance_counters(self):
|
||||
|
||||
api_response = netapp_api.NaElement(
|
||||
fake.PERF_OBJECT_GET_INSTANCES_SYSTEM_RESPONSE_CMODE)
|
||||
self.mock_object(self.client,
|
||||
'send_request',
|
||||
mock.Mock(return_value=api_response))
|
||||
|
||||
instance_uuids = [
|
||||
fake.NODE_NAMES[0] + ':kernel:system',
|
||||
fake.NODE_NAMES[1] + ':kernel:system',
|
||||
]
|
||||
counter_names = ['avg_processor_busy']
|
||||
result = self.client.get_performance_counters('system',
|
||||
instance_uuids,
|
||||
counter_names)
|
||||
|
||||
expected = [
|
||||
{
|
||||
'avg_processor_busy': '5674745133134',
|
||||
'instance-name': 'system',
|
||||
'instance-uuid': instance_uuids[0],
|
||||
'node-name': fake.NODE_NAMES[0],
|
||||
'timestamp': '1453412013',
|
||||
}, {
|
||||
'avg_processor_busy': '4077649009234',
|
||||
'instance-name': 'system',
|
||||
'instance-uuid': instance_uuids[1],
|
||||
'node-name': fake.NODE_NAMES[1],
|
||||
'timestamp': '1453412013'
|
||||
},
|
||||
]
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
perf_object_get_instances_args = {
|
||||
'objectname': 'system',
|
||||
'instance-uuids': [
|
||||
{'instance-uuid': instance_uuid}
|
||||
for instance_uuid in instance_uuids
|
||||
],
|
||||
'counters': [
|
||||
{'counter': counter} for counter in counter_names
|
||||
],
|
||||
}
|
||||
self.client.send_request.assert_called_once_with(
|
||||
'perf-object-get-instances', perf_object_get_instances_args)
|
||||
|
||||
def test_setup_security_services_ldap(self):
|
||||
|
||||
self.mock_object(self.client, 'send_request')
|
||||
|
@ -35,6 +35,7 @@ from manila.share.drivers.netapp.dataontap.client import api as netapp_api
|
||||
from manila.share.drivers.netapp.dataontap.client import client_cmode
|
||||
from manila.share.drivers.netapp.dataontap.cluster_mode import data_motion
|
||||
from manila.share.drivers.netapp.dataontap.cluster_mode import lib_base
|
||||
from manila.share.drivers.netapp.dataontap.cluster_mode import performance
|
||||
from manila.share.drivers.netapp.dataontap.protocols import cifs_cmode
|
||||
from manila.share.drivers.netapp.dataontap.protocols import nfs_cmode
|
||||
from manila.share.drivers.netapp import utils as na_utils
|
||||
@ -81,6 +82,7 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
self.library = lib_base.NetAppCmodeFileStorageLibrary(fake.DRIVER_NAME,
|
||||
**kwargs)
|
||||
self.library._client = mock.Mock()
|
||||
self.library._perf_library = mock.Mock()
|
||||
self.client = self.library._client
|
||||
self.context = mock.Mock()
|
||||
self.fake_replica = copy.deepcopy(fake.SHARE)
|
||||
@ -104,12 +106,16 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
|
||||
def test_do_setup(self):
|
||||
mock_get_api_client = self.mock_object(self.library, '_get_api_client')
|
||||
self.mock_object(
|
||||
performance, 'PerformanceLibrary',
|
||||
mock.Mock(return_value='fake_perf_library'))
|
||||
|
||||
self.library.do_setup(self.context)
|
||||
|
||||
mock_get_api_client.assert_called_once_with()
|
||||
self.library._client.check_for_cluster_credentials.\
|
||||
assert_called_once_with()
|
||||
self.assertEqual('fake_perf_library', self.library._perf_library)
|
||||
|
||||
def test_check_for_setup_error(self):
|
||||
|
||||
@ -330,13 +336,26 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
self.assertFalse(self.library._client.get_node_for_aggregate.called)
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_get_default_filter_function(self):
|
||||
|
||||
result = self.library.get_default_filter_function()
|
||||
|
||||
self.assertEqual(self.library.DEFAULT_FILTER_FUNCTION, result)
|
||||
|
||||
def test_get_default_goodness_function(self):
|
||||
|
||||
result = self.library.get_default_goodness_function()
|
||||
|
||||
self.assertEqual(self.library.DEFAULT_GOODNESS_FUNCTION, result)
|
||||
|
||||
def test_get_share_stats(self):
|
||||
|
||||
self.mock_object(self.library,
|
||||
'_get_pools',
|
||||
mock.Mock(return_value=fake.POOLS))
|
||||
mock_get_pools = self.mock_object(
|
||||
self.library, '_get_pools',
|
||||
mock.Mock(return_value=fake.POOLS))
|
||||
|
||||
result = self.library.get_share_stats()
|
||||
result = self.library.get_share_stats(filter_function='filter',
|
||||
goodness_function='goodness')
|
||||
|
||||
expected = {
|
||||
'share_backend_name': fake.BACKEND_NAME,
|
||||
@ -349,15 +368,18 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
'pools': fake.POOLS,
|
||||
}
|
||||
self.assertDictEqual(expected, result)
|
||||
mock_get_pools.assert_called_once_with(filter_function='filter',
|
||||
goodness_function='goodness')
|
||||
|
||||
def test_get_share_stats_with_replication(self):
|
||||
|
||||
self.library.configuration.replication_domain = "fake_domain"
|
||||
self.mock_object(self.library,
|
||||
'_get_pools',
|
||||
mock.Mock(return_value=fake.POOLS))
|
||||
mock_get_pools = self.mock_object(
|
||||
self.library, '_get_pools',
|
||||
mock.Mock(return_value=fake.POOLS))
|
||||
|
||||
result = self.library.get_share_stats()
|
||||
result = self.library.get_share_stats(filter_function='filter',
|
||||
goodness_function='goodness')
|
||||
|
||||
expected = {
|
||||
'share_backend_name': fake.BACKEND_NAME,
|
||||
@ -372,6 +394,8 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
'pools': fake.POOLS,
|
||||
}
|
||||
self.assertDictEqual(expected, result)
|
||||
mock_get_pools.assert_called_once_with(filter_function='filter',
|
||||
goodness_function='goodness')
|
||||
|
||||
def test_get_share_server_pools(self):
|
||||
|
||||
@ -390,8 +414,11 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
mock.Mock(return_value=fake.AGGREGATE_CAPACITIES))
|
||||
self.library._have_cluster_creds = True
|
||||
self.library._ssc_stats = fake.SSC_INFO
|
||||
self.library._perf_library.get_node_utilization_for_pool = (
|
||||
mock.Mock(side_effect=[30.0, 42.0]))
|
||||
|
||||
result = self.library._get_pools()
|
||||
result = self.library._get_pools(filter_function='filter',
|
||||
goodness_function='goodness')
|
||||
|
||||
self.assertListEqual(fake.POOLS, result)
|
||||
|
||||
@ -401,6 +428,9 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
self.library, '_get_aggregate_space',
|
||||
mock.Mock(return_value=fake.AGGREGATE_CAPACITIES_VSERVER_CREDS))
|
||||
self.library._have_cluster_creds = False
|
||||
self.library._ssc_stats = fake.SSC_INFO_VSERVER_CREDS
|
||||
self.library._perf_library.get_node_utilization_for_pool = (
|
||||
mock.Mock(side_effect=[50.0, 50.0]))
|
||||
|
||||
result = self.library._get_pools()
|
||||
|
||||
@ -2149,8 +2179,12 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
self.library._update_ssc_info()
|
||||
|
||||
expected = {
|
||||
fake.AGGREGATES[0]: {},
|
||||
fake.AGGREGATES[1]: {}
|
||||
fake.AGGREGATES[0]: {
|
||||
'netapp_aggregate': fake.AGGREGATES[0],
|
||||
},
|
||||
fake.AGGREGATES[1]: {
|
||||
'netapp_aggregate': fake.AGGREGATES[1],
|
||||
}
|
||||
}
|
||||
|
||||
self.assertDictEqual(expected, self.library._ssc_stats)
|
||||
@ -2179,8 +2213,12 @@ class NetAppFileStorageLibraryTestCase(test.TestCase):
|
||||
self.client, 'get_aggregate_disk_types',
|
||||
mock.Mock(side_effect=fake.SSC_DISK_TYPES))
|
||||
ssc_stats = {
|
||||
fake.AGGREGATES[0]: {},
|
||||
fake.AGGREGATES[1]: {},
|
||||
fake.AGGREGATES[0]: {
|
||||
'netapp_aggregate': fake.AGGREGATES[0],
|
||||
},
|
||||
fake.AGGREGATES[1]: {
|
||||
'netapp_aggregate': fake.AGGREGATES[1],
|
||||
},
|
||||
}
|
||||
|
||||
self.library._update_ssc_aggr_info(fake.AGGREGATES, ssc_stats)
|
||||
|
@ -0,0 +1,808 @@
|
||||
# Copyright (c) 2016 Clinton Knight
|
||||
# All rights reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ddt
|
||||
import mock
|
||||
|
||||
from manila import exception
|
||||
from manila.share.drivers.netapp.dataontap.client import api as netapp_api
|
||||
from manila.share.drivers.netapp.dataontap.cluster_mode import performance
|
||||
from manila import test
|
||||
from manila.tests.share.drivers.netapp.dataontap import fakes as fake
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class PerformanceLibraryTestCase(test.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(PerformanceLibraryTestCase, self).setUp()
|
||||
|
||||
with mock.patch.object(performance.PerformanceLibrary,
|
||||
'_init_counter_info'):
|
||||
self.zapi_client = mock.Mock()
|
||||
self.perf_library = performance.PerformanceLibrary(
|
||||
self.zapi_client)
|
||||
self.perf_library.system_object_name = 'system'
|
||||
self.perf_library.avg_processor_busy_base_counter_name = (
|
||||
'cpu_elapsed_time1')
|
||||
|
||||
self._set_up_fake_pools()
|
||||
|
||||
def _set_up_fake_pools(self):
|
||||
|
||||
self.fake_volumes = {
|
||||
'pool1': {
|
||||
'netapp_aggregate': 'aggr1',
|
||||
},
|
||||
'pool2': {
|
||||
'netapp_aggregate': 'aggr2',
|
||||
},
|
||||
'pool3': {
|
||||
'netapp_aggregate': 'aggr2',
|
||||
},
|
||||
}
|
||||
self.fake_aggregates = {
|
||||
'pool4': {
|
||||
'netapp_aggregate': 'aggr3',
|
||||
}
|
||||
}
|
||||
|
||||
self.fake_aggr_names = ['aggr1', 'aggr2', 'aggr3']
|
||||
self.fake_nodes = ['node1', 'node2']
|
||||
self.fake_aggr_node_map = {
|
||||
'aggr1': 'node1',
|
||||
'aggr2': 'node2',
|
||||
'aggr3': 'node2',
|
||||
}
|
||||
|
||||
def _get_fake_counters(self):
|
||||
|
||||
return {
|
||||
'node1': list(range(11, 21)),
|
||||
'node2': list(range(21, 31)),
|
||||
}
|
||||
|
||||
def test_init(self):
|
||||
|
||||
mock_zapi_client = mock.Mock()
|
||||
mock_init_counter_info = self.mock_object(
|
||||
performance.PerformanceLibrary, '_init_counter_info')
|
||||
|
||||
library = performance.PerformanceLibrary(mock_zapi_client)
|
||||
|
||||
self.assertEqual(mock_zapi_client, library.zapi_client)
|
||||
mock_init_counter_info.assert_called_once_with()
|
||||
|
||||
def test_init_counter_info_not_supported(self):
|
||||
|
||||
self.zapi_client.features.SYSTEM_METRICS = False
|
||||
self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False
|
||||
mock_get_base_counter_name = self.mock_object(
|
||||
self.perf_library, '_get_base_counter_name')
|
||||
|
||||
self.perf_library._init_counter_info()
|
||||
|
||||
self.assertIsNone(self.perf_library.system_object_name)
|
||||
self.assertIsNone(
|
||||
self.perf_library.avg_processor_busy_base_counter_name)
|
||||
self.assertFalse(mock_get_base_counter_name.called)
|
||||
|
||||
@ddt.data({
|
||||
'system_constituent': False,
|
||||
'base_counter': 'cpu_elapsed_time1',
|
||||
}, {
|
||||
'system_constituent': True,
|
||||
'base_counter': 'cpu_elapsed_time',
|
||||
})
|
||||
@ddt.unpack
|
||||
def test_init_counter_info_api_error(self, system_constituent,
|
||||
base_counter):
|
||||
|
||||
self.zapi_client.features.SYSTEM_METRICS = True
|
||||
self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = (
|
||||
system_constituent)
|
||||
self.mock_object(self.perf_library,
|
||||
'_get_base_counter_name',
|
||||
mock.Mock(side_effect=netapp_api.NaApiError))
|
||||
|
||||
self.perf_library._init_counter_info()
|
||||
|
||||
self.assertEqual(
|
||||
base_counter,
|
||||
self.perf_library.avg_processor_busy_base_counter_name)
|
||||
|
||||
def test_init_counter_info_system(self):
|
||||
|
||||
self.zapi_client.features.SYSTEM_METRICS = True
|
||||
self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False
|
||||
mock_get_base_counter_name = self.mock_object(
|
||||
self.perf_library, '_get_base_counter_name',
|
||||
mock.Mock(return_value='cpu_elapsed_time1'))
|
||||
|
||||
self.perf_library._init_counter_info()
|
||||
|
||||
self.assertEqual('system', self.perf_library.system_object_name)
|
||||
self.assertEqual(
|
||||
'cpu_elapsed_time1',
|
||||
self.perf_library.avg_processor_busy_base_counter_name)
|
||||
mock_get_base_counter_name.assert_called_once_with(
|
||||
'system', 'avg_processor_busy')
|
||||
|
||||
def test_init_counter_info_system_constituent(self):
|
||||
|
||||
self.zapi_client.features.SYSTEM_METRICS = False
|
||||
self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = True
|
||||
mock_get_base_counter_name = self.mock_object(
|
||||
self.perf_library, '_get_base_counter_name',
|
||||
mock.Mock(return_value='cpu_elapsed_time'))
|
||||
|
||||
self.perf_library._init_counter_info()
|
||||
|
||||
self.assertEqual('system:constituent',
|
||||
self.perf_library.system_object_name)
|
||||
self.assertEqual(
|
||||
'cpu_elapsed_time',
|
||||
self.perf_library.avg_processor_busy_base_counter_name)
|
||||
mock_get_base_counter_name.assert_called_once_with(
|
||||
'system:constituent', 'avg_processor_busy')
|
||||
|
||||
def test_update_performance_cache(self):
|
||||
|
||||
self.perf_library.performance_counters = self._get_fake_counters()
|
||||
mock_get_aggregates_for_pools = self.mock_object(
|
||||
self.perf_library, '_get_aggregates_for_pools',
|
||||
mock.Mock(return_value=self.fake_aggr_names))
|
||||
mock_get_nodes_for_aggregates = self.mock_object(
|
||||
self.perf_library, '_get_nodes_for_aggregates',
|
||||
mock.Mock(return_value=(self.fake_nodes,
|
||||
self.fake_aggr_node_map)))
|
||||
mock_get_node_utilization_counters = self.mock_object(
|
||||
self.perf_library, '_get_node_utilization_counters',
|
||||
mock.Mock(side_effect=[21, 31]))
|
||||
mock_get_node_utilization = self.mock_object(
|
||||
self.perf_library, '_get_node_utilization',
|
||||
mock.Mock(side_effect=[25, 75]))
|
||||
|
||||
self.perf_library.update_performance_cache(self.fake_volumes,
|
||||
self.fake_aggregates)
|
||||
|
||||
expected_performance_counters = {
|
||||
'node1': list(range(12, 22)),
|
||||
'node2': list(range(22, 32)),
|
||||
}
|
||||
self.assertEqual(expected_performance_counters,
|
||||
self.perf_library.performance_counters)
|
||||
|
||||
expected_pool_utilization = {
|
||||
'pool1': 25,
|
||||
'pool2': 75,
|
||||
'pool3': 75,
|
||||
'pool4': 75,
|
||||
}
|
||||
self.assertEqual(expected_pool_utilization,
|
||||
self.perf_library.pool_utilization)
|
||||
|
||||
mock_get_aggregates_for_pools.assert_called_once_with(
|
||||
self.fake_volumes, self.fake_aggregates)
|
||||
mock_get_nodes_for_aggregates.assert_called_once_with(
|
||||
self.fake_aggr_names)
|
||||
mock_get_node_utilization_counters.assert_has_calls([
|
||||
mock.call('node1'), mock.call('node2')])
|
||||
mock_get_node_utilization.assert_has_calls([
|
||||
mock.call(12, 21, 'node1'), mock.call(22, 31, 'node2')])
|
||||
|
||||
def test_update_performance_cache_first_pass(self):
|
||||
|
||||
mock_get_aggregates_for_pools = self.mock_object(
|
||||
self.perf_library, '_get_aggregates_for_pools',
|
||||
mock.Mock(return_value=self.fake_aggr_names))
|
||||
mock_get_nodes_for_aggregates = self.mock_object(
|
||||
self.perf_library, '_get_nodes_for_aggregates',
|
||||
mock.Mock(return_value=(self.fake_nodes,
|
||||
self.fake_aggr_node_map)))
|
||||
mock_get_node_utilization_counters = self.mock_object(
|
||||
self.perf_library, '_get_node_utilization_counters',
|
||||
mock.Mock(side_effect=[11, 21]))
|
||||
mock_get_node_utilization = self.mock_object(
|
||||
self.perf_library, '_get_node_utilization',
|
||||
mock.Mock(side_effect=[25, 75]))
|
||||
|
||||
self.perf_library.update_performance_cache(self.fake_volumes,
|
||||
self.fake_aggregates)
|
||||
|
||||
expected_performance_counters = {'node1': [11], 'node2': [21]}
|
||||
self.assertEqual(expected_performance_counters,
|
||||
self.perf_library.performance_counters)
|
||||
|
||||
expected_pool_utilization = {
|
||||
'pool1': performance.DEFAULT_UTILIZATION,
|
||||
'pool2': performance.DEFAULT_UTILIZATION,
|
||||
'pool3': performance.DEFAULT_UTILIZATION,
|
||||
'pool4': performance.DEFAULT_UTILIZATION,
|
||||
}
|
||||
self.assertEqual(expected_pool_utilization,
|
||||
self.perf_library.pool_utilization)
|
||||
|
||||
mock_get_aggregates_for_pools.assert_called_once_with(
|
||||
self.fake_volumes, self.fake_aggregates)
|
||||
mock_get_nodes_for_aggregates.assert_called_once_with(
|
||||
self.fake_aggr_names)
|
||||
mock_get_node_utilization_counters.assert_has_calls([
|
||||
mock.call('node1'), mock.call('node2')])
|
||||
self.assertFalse(mock_get_node_utilization.called)
|
||||
|
||||
def test_update_performance_cache_unknown_nodes(self):
|
||||
|
||||
self.perf_library.performance_counters = self._get_fake_counters()
|
||||
mock_get_aggregates_for_pools = self.mock_object(
|
||||
self.perf_library, '_get_aggregates_for_pools',
|
||||
mock.Mock(return_value=self.fake_aggr_names))
|
||||
mock_get_nodes_for_aggregates = self.mock_object(
|
||||
self.perf_library, '_get_nodes_for_aggregates',
|
||||
mock.Mock(return_value=([], {})))
|
||||
mock_get_node_utilization_counters = self.mock_object(
|
||||
self.perf_library, '_get_node_utilization_counters',
|
||||
mock.Mock(side_effect=[11, 21]))
|
||||
mock_get_node_utilization = self.mock_object(
|
||||
self.perf_library, '_get_node_utilization',
|
||||
mock.Mock(side_effect=[25, 75]))
|
||||
|
||||
self.perf_library.update_performance_cache(self.fake_volumes,
|
||||
self.fake_aggregates)
|
||||
|
||||
self.assertEqual(self._get_fake_counters(),
|
||||
self.perf_library.performance_counters)
|
||||
|
||||
expected_pool_utilization = {
|
||||
'pool1': performance.DEFAULT_UTILIZATION,
|
||||
'pool2': performance.DEFAULT_UTILIZATION,
|
||||
'pool3': performance.DEFAULT_UTILIZATION,
|
||||
'pool4': performance.DEFAULT_UTILIZATION,
|
||||
}
|
||||
self.assertEqual(expected_pool_utilization,
|
||||
self.perf_library.pool_utilization)
|
||||
|
||||
mock_get_aggregates_for_pools.assert_called_once_with(
|
||||
self.fake_volumes, self.fake_aggregates)
|
||||
mock_get_nodes_for_aggregates.assert_called_once_with(
|
||||
self.fake_aggr_names)
|
||||
self.assertFalse(mock_get_node_utilization_counters.called)
|
||||
self.assertFalse(mock_get_node_utilization.called)
|
||||
|
||||
def test_update_performance_cache_counters_unavailable(self):
|
||||
|
||||
self.perf_library.performance_counters = self._get_fake_counters()
|
||||
mock_get_aggregates_for_pools = self.mock_object(
|
||||
self.perf_library, '_get_aggregates_for_pools',
|
||||
mock.Mock(return_value=self.fake_aggr_names))
|
||||
mock_get_nodes_for_aggregates = self.mock_object(
|
||||
self.perf_library, '_get_nodes_for_aggregates',
|
||||
mock.Mock(return_value=(self.fake_nodes,
|
||||
self.fake_aggr_node_map)))
|
||||
mock_get_node_utilization_counters = self.mock_object(
|
||||
self.perf_library, '_get_node_utilization_counters',
|
||||
mock.Mock(side_effect=[None, None]))
|
||||
mock_get_node_utilization = self.mock_object(
|
||||
self.perf_library, '_get_node_utilization',
|
||||
mock.Mock(side_effect=[25, 75]))
|
||||
|
||||
self.perf_library.update_performance_cache(self.fake_volumes,
|
||||
self.fake_aggregates)
|
||||
|
||||
self.assertEqual(self._get_fake_counters(),
|
||||
self.perf_library.performance_counters)
|
||||
|
||||
expected_pool_utilization = {
|
||||
'pool1': performance.DEFAULT_UTILIZATION,
|
||||
'pool2': performance.DEFAULT_UTILIZATION,
|
||||
'pool3': performance.DEFAULT_UTILIZATION,
|
||||
'pool4': performance.DEFAULT_UTILIZATION,
|
||||
}
|
||||
self.assertEqual(expected_pool_utilization,
|
||||
self.perf_library.pool_utilization)
|
||||
|
||||
mock_get_aggregates_for_pools.assert_called_once_with(
|
||||
self.fake_volumes, self.fake_aggregates)
|
||||
mock_get_nodes_for_aggregates.assert_called_once_with(
|
||||
self.fake_aggr_names)
|
||||
mock_get_node_utilization_counters.assert_has_calls([
|
||||
mock.call('node1'), mock.call('node2')])
|
||||
self.assertFalse(mock_get_node_utilization.called)
|
||||
|
||||
def test_update_performance_cache_not_supported(self):
|
||||
|
||||
self.zapi_client.features.SYSTEM_METRICS = False
|
||||
self.zapi_client.features.SYSTEM_CONSTITUENT_METRICS = False
|
||||
|
||||
mock_get_aggregates_for_pools = self.mock_object(
|
||||
self.perf_library, '_get_aggregates_for_pools')
|
||||
|
||||
self.perf_library.update_performance_cache(self.fake_volumes,
|
||||
self.fake_aggregates)
|
||||
|
||||
expected_performance_counters = {}
|
||||
self.assertEqual(expected_performance_counters,
|
||||
self.perf_library.performance_counters)
|
||||
|
||||
expected_pool_utilization = {}
|
||||
self.assertEqual(expected_pool_utilization,
|
||||
self.perf_library.pool_utilization)
|
||||
|
||||
self.assertFalse(mock_get_aggregates_for_pools.called)
|
||||
|
||||
@ddt.data({'pool': 'pool1', 'expected': 10.0},
|
||||
{'pool': 'pool3', 'expected': performance.DEFAULT_UTILIZATION})
|
||||
@ddt.unpack
|
||||
def test_get_node_utilization_for_pool(self, pool, expected):
|
||||
|
||||
self.perf_library.pool_utilization = {'pool1': 10.0, 'pool2': 15.0}
|
||||
|
||||
result = self.perf_library.get_node_utilization_for_pool(pool)
|
||||
|
||||
self.assertAlmostEqual(expected, result)
|
||||
|
||||
def test__update_for_failover(self):
|
||||
self.mock_object(self.perf_library, 'update_performance_cache')
|
||||
mock_client = mock.Mock(name='FAKE_ZAPI_CLIENT')
|
||||
|
||||
self.perf_library.update_for_failover(mock_client,
|
||||
self.fake_volumes,
|
||||
self.fake_aggregates)
|
||||
|
||||
self.assertEqual(mock_client, self.perf_library.zapi_client)
|
||||
self.perf_library.update_performance_cache.assert_called_once_with(
|
||||
self.fake_volumes, self.fake_aggregates)
|
||||
|
||||
def test_get_aggregates_for_pools(self):
|
||||
|
||||
result = self.perf_library._get_aggregates_for_pools(
|
||||
self.fake_volumes, self.fake_aggregates)
|
||||
|
||||
expected_aggregate_names = ['aggr1', 'aggr2', 'aggr3']
|
||||
self.assertItemsEqual(expected_aggregate_names, result)
|
||||
|
||||
def test_get_nodes_for_aggregates(self):
|
||||
|
||||
aggregate_names = ['aggr1', 'aggr2', 'aggr3']
|
||||
aggregate_nodes = ['node1', 'node2', 'node2']
|
||||
|
||||
mock_get_node_for_aggregate = self.mock_object(
|
||||
self.zapi_client, 'get_node_for_aggregate',
|
||||
mock.Mock(side_effect=aggregate_nodes))
|
||||
|
||||
result = self.perf_library._get_nodes_for_aggregates(aggregate_names)
|
||||
|
||||
self.assertEqual(2, len(result))
|
||||
result_node_names, result_aggr_node_map = result
|
||||
|
||||
expected_node_names = ['node1', 'node2']
|
||||
expected_aggr_node_map = dict(zip(aggregate_names, aggregate_nodes))
|
||||
self.assertItemsEqual(expected_node_names, result_node_names)
|
||||
self.assertEqual(expected_aggr_node_map, result_aggr_node_map)
|
||||
mock_get_node_for_aggregate.assert_has_calls([
|
||||
mock.call('aggr1'), mock.call('aggr2'), mock.call('aggr3')])
|
||||
|
||||
def test_get_node_utilization_kahuna_overutilized(self):
|
||||
|
||||
mock_get_kahuna_utilization = self.mock_object(
|
||||
self.perf_library, '_get_kahuna_utilization',
|
||||
mock.Mock(return_value=61.0))
|
||||
mock_get_average_cpu_utilization = self.mock_object(
|
||||
self.perf_library, '_get_average_cpu_utilization',
|
||||
mock.Mock(return_value=25.0))
|
||||
|
||||
result = self.perf_library._get_node_utilization('fake1',
|
||||
'fake2',
|
||||
'fake_node')
|
||||
|
||||
self.assertAlmostEqual(100.0, result)
|
||||
mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2')
|
||||
self.assertFalse(mock_get_average_cpu_utilization.called)
|
||||
|
||||
@ddt.data({'cpu': -0.01, 'cp_time': 10000, 'poll_time': 0},
|
||||
{'cpu': 1.01, 'cp_time': 0, 'poll_time': 1000},
|
||||
{'cpu': 0.50, 'cp_time': 0, 'poll_time': 0})
|
||||
@ddt.unpack
|
||||
def test_get_node_utilization_zero_time(self, cpu, cp_time, poll_time):
|
||||
|
||||
mock_get_kahuna_utilization = self.mock_object(
|
||||
self.perf_library, '_get_kahuna_utilization',
|
||||
mock.Mock(return_value=59.0))
|
||||
mock_get_average_cpu_utilization = self.mock_object(
|
||||
self.perf_library, '_get_average_cpu_utilization',
|
||||
mock.Mock(return_value=cpu))
|
||||
mock_get_total_consistency_point_time = self.mock_object(
|
||||
self.perf_library, '_get_total_consistency_point_time',
|
||||
mock.Mock(return_value=cp_time))
|
||||
mock_get_consistency_point_p2_flush_time = self.mock_object(
|
||||
self.perf_library, '_get_consistency_point_p2_flush_time',
|
||||
mock.Mock(return_value=cp_time))
|
||||
mock_get_total_time = self.mock_object(
|
||||
self.perf_library, '_get_total_time',
|
||||
mock.Mock(return_value=poll_time))
|
||||
mock_get_adjusted_consistency_point_time = self.mock_object(
|
||||
self.perf_library, '_get_adjusted_consistency_point_time')
|
||||
|
||||
result = self.perf_library._get_node_utilization('fake1',
|
||||
'fake2',
|
||||
'fake_node')
|
||||
|
||||
expected = max(min(100.0, 100.0 * cpu), 0)
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2')
|
||||
mock_get_average_cpu_utilization.assert_called_once_with('fake1',
|
||||
'fake2')
|
||||
mock_get_total_consistency_point_time.assert_called_once_with('fake1',
|
||||
'fake2')
|
||||
mock_get_consistency_point_p2_flush_time.assert_called_once_with(
|
||||
'fake1', 'fake2')
|
||||
mock_get_total_time.assert_called_once_with('fake1',
|
||||
'fake2',
|
||||
'total_cp_msecs')
|
||||
self.assertFalse(mock_get_adjusted_consistency_point_time.called)
|
||||
|
||||
@ddt.data({'cpu': 0.75, 'adjusted_cp_time': 8000, 'expected': 80},
|
||||
{'cpu': 0.80, 'adjusted_cp_time': 7500, 'expected': 80},
|
||||
{'cpu': 0.50, 'adjusted_cp_time': 11000, 'expected': 100})
|
||||
@ddt.unpack
|
||||
def test_get_node_utilization(self, cpu, adjusted_cp_time, expected):
|
||||
|
||||
mock_get_kahuna_utilization = self.mock_object(
|
||||
self.perf_library, '_get_kahuna_utilization',
|
||||
mock.Mock(return_value=59.0))
|
||||
mock_get_average_cpu_utilization = self.mock_object(
|
||||
self.perf_library, '_get_average_cpu_utilization',
|
||||
mock.Mock(return_value=cpu))
|
||||
mock_get_total_consistency_point_time = self.mock_object(
|
||||
self.perf_library, '_get_total_consistency_point_time',
|
||||
mock.Mock(return_value=90.0))
|
||||
mock_get_consistency_point_p2_flush_time = self.mock_object(
|
||||
self.perf_library, '_get_consistency_point_p2_flush_time',
|
||||
mock.Mock(return_value=50.0))
|
||||
mock_get_total_time = self.mock_object(
|
||||
self.perf_library, '_get_total_time',
|
||||
mock.Mock(return_value=10000))
|
||||
mock_get_adjusted_consistency_point_time = self.mock_object(
|
||||
self.perf_library, '_get_adjusted_consistency_point_time',
|
||||
mock.Mock(return_value=adjusted_cp_time))
|
||||
|
||||
result = self.perf_library._get_node_utilization('fake1',
|
||||
'fake2',
|
||||
'fake_node')
|
||||
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
mock_get_kahuna_utilization.assert_called_once_with('fake1', 'fake2')
|
||||
mock_get_average_cpu_utilization.assert_called_once_with('fake1',
|
||||
'fake2')
|
||||
mock_get_total_consistency_point_time.assert_called_once_with('fake1',
|
||||
'fake2')
|
||||
mock_get_consistency_point_p2_flush_time.assert_called_once_with(
|
||||
'fake1', 'fake2')
|
||||
mock_get_total_time.assert_called_once_with('fake1',
|
||||
'fake2',
|
||||
'total_cp_msecs')
|
||||
mock_get_adjusted_consistency_point_time.assert_called_once_with(
|
||||
90.0, 50.0)
|
||||
|
||||
def test_get_node_utilization_calculation_error(self):
|
||||
|
||||
self.mock_object(self.perf_library,
|
||||
'_get_kahuna_utilization',
|
||||
mock.Mock(return_value=59.0))
|
||||
self.mock_object(self.perf_library,
|
||||
'_get_average_cpu_utilization',
|
||||
mock.Mock(return_value=25.0))
|
||||
self.mock_object(self.perf_library,
|
||||
'_get_total_consistency_point_time',
|
||||
mock.Mock(return_value=90.0))
|
||||
self.mock_object(self.perf_library,
|
||||
'_get_consistency_point_p2_flush_time',
|
||||
mock.Mock(return_value=50.0))
|
||||
self.mock_object(self.perf_library,
|
||||
'_get_total_time',
|
||||
mock.Mock(return_value=10000))
|
||||
self.mock_object(self.perf_library,
|
||||
'_get_adjusted_consistency_point_time',
|
||||
mock.Mock(side_effect=ZeroDivisionError))
|
||||
|
||||
result = self.perf_library._get_node_utilization('fake1',
|
||||
'fake2',
|
||||
'fake_node')
|
||||
|
||||
self.assertEqual(performance.DEFAULT_UTILIZATION, result)
|
||||
(self.perf_library._get_adjusted_consistency_point_time.
|
||||
assert_called_once_with(mock.ANY, mock.ANY))
|
||||
|
||||
def test_get_kahuna_utilization(self):
|
||||
|
||||
mock_get_performance_counter = self.mock_object(
|
||||
self.perf_library,
|
||||
'_get_performance_counter_average_multi_instance',
|
||||
mock.Mock(return_value=[0.2, 0.3]))
|
||||
|
||||
result = self.perf_library._get_kahuna_utilization('fake_t1',
|
||||
'fake_t2')
|
||||
|
||||
self.assertAlmostEqual(50.0, result)
|
||||
mock_get_performance_counter.assert_called_once_with(
|
||||
'fake_t1', 'fake_t2', 'domain_busy:kahuna',
|
||||
'processor_elapsed_time')
|
||||
|
||||
def test_get_average_cpu_utilization(self):
|
||||
|
||||
mock_get_performance_counter_average = self.mock_object(
|
||||
self.perf_library, '_get_performance_counter_average',
|
||||
mock.Mock(return_value=0.45))
|
||||
|
||||
result = self.perf_library._get_average_cpu_utilization('fake_t1',
|
||||
'fake_t2')
|
||||
|
||||
self.assertAlmostEqual(0.45, result)
|
||||
mock_get_performance_counter_average.assert_called_once_with(
|
||||
'fake_t1', 'fake_t2', 'avg_processor_busy', 'cpu_elapsed_time1')
|
||||
|
||||
def test_get_total_consistency_point_time(self):
|
||||
|
||||
mock_get_performance_counter_delta = self.mock_object(
|
||||
self.perf_library, '_get_performance_counter_delta',
|
||||
mock.Mock(return_value=500))
|
||||
|
||||
result = self.perf_library._get_total_consistency_point_time(
|
||||
'fake_t1', 'fake_t2')
|
||||
|
||||
self.assertEqual(500, result)
|
||||
mock_get_performance_counter_delta.assert_called_once_with(
|
||||
'fake_t1', 'fake_t2', 'total_cp_msecs')
|
||||
|
||||
def test_get_consistency_point_p2_flush_time(self):
|
||||
|
||||
mock_get_performance_counter_delta = self.mock_object(
|
||||
self.perf_library, '_get_performance_counter_delta',
|
||||
mock.Mock(return_value=500))
|
||||
|
||||
result = self.perf_library._get_consistency_point_p2_flush_time(
|
||||
'fake_t1', 'fake_t2')
|
||||
|
||||
self.assertEqual(500, result)
|
||||
mock_get_performance_counter_delta.assert_called_once_with(
|
||||
'fake_t1', 'fake_t2', 'cp_phase_times:p2_flush')
|
||||
|
||||
def test_get_total_time(self):
|
||||
|
||||
mock_find_performance_counter_timestamp = self.mock_object(
|
||||
self.perf_library, '_find_performance_counter_timestamp',
|
||||
mock.Mock(side_effect=[100, 105]))
|
||||
|
||||
result = self.perf_library._get_total_time('fake_t1',
|
||||
'fake_t2',
|
||||
'fake_counter')
|
||||
|
||||
self.assertEqual(5000, result)
|
||||
mock_find_performance_counter_timestamp.assert_has_calls([
|
||||
mock.call('fake_t1', 'fake_counter'),
|
||||
mock.call('fake_t2', 'fake_counter')])
|
||||
|
||||
def test_get_adjusted_consistency_point_time(self):
|
||||
|
||||
result = self.perf_library._get_adjusted_consistency_point_time(
|
||||
500, 200)
|
||||
|
||||
self.assertAlmostEqual(360.0, result)
|
||||
|
||||
def test_get_performance_counter_delta(self):
|
||||
|
||||
result = self.perf_library._get_performance_counter_delta(
|
||||
fake.COUNTERS_T1, fake.COUNTERS_T2, 'total_cp_msecs')
|
||||
|
||||
self.assertEqual(1482, result)
|
||||
|
||||
def test_get_performance_counter_average(self):
|
||||
|
||||
result = self.perf_library._get_performance_counter_average(
|
||||
fake.COUNTERS_T1, fake.COUNTERS_T2, 'domain_busy:kahuna',
|
||||
'processor_elapsed_time', 'processor0')
|
||||
|
||||
self.assertAlmostEqual(0.00281954360981, result)
|
||||
|
||||
def test_get_performance_counter_average_multi_instance(self):
|
||||
|
||||
result = (
|
||||
self.perf_library._get_performance_counter_average_multi_instance(
|
||||
fake.COUNTERS_T1, fake.COUNTERS_T2, 'domain_busy:kahuna',
|
||||
'processor_elapsed_time'))
|
||||
|
||||
expected = [0.002819543609809441, 0.0033421611147606135]
|
||||
self.assertAlmostEqual(expected, result)
|
||||
|
||||
def test_find_performance_counter_value(self):
|
||||
|
||||
result = self.perf_library._find_performance_counter_value(
|
||||
fake.COUNTERS_T1, 'domain_busy:kahuna',
|
||||
instance_name='processor0')
|
||||
|
||||
self.assertEqual('2712467226', result)
|
||||
|
||||
def test_find_performance_counter_value_not_found(self):
|
||||
|
||||
self.assertRaises(
|
||||
exception.NotFound,
|
||||
self.perf_library._find_performance_counter_value,
|
||||
fake.COUNTERS_T1, 'invalid', instance_name='processor0')
|
||||
|
||||
def test_find_performance_counter_timestamp(self):
|
||||
|
||||
result = self.perf_library._find_performance_counter_timestamp(
|
||||
fake.COUNTERS_T1, 'domain_busy')
|
||||
|
||||
self.assertEqual('1453573777', result)
|
||||
|
||||
def test_find_performance_counter_timestamp_not_found(self):
|
||||
|
||||
self.assertRaises(
|
||||
exception.NotFound,
|
||||
self.perf_library._find_performance_counter_timestamp,
|
||||
fake.COUNTERS_T1, 'invalid', instance_name='processor0')
|
||||
|
||||
def test_expand_performance_array(self):
|
||||
|
||||
counter_info = {
|
||||
'labels': ['idle', 'kahuna', 'storage', 'exempt'],
|
||||
'name': 'domain_busy',
|
||||
}
|
||||
self.zapi_client.get_performance_counter_info = mock.Mock(
|
||||
return_value=counter_info)
|
||||
|
||||
counter = {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:processor0',
|
||||
'domain_busy': '969142314286,2567571412,2131582146,5383861579',
|
||||
'instance-name': 'processor0',
|
||||
'timestamp': '1453512244',
|
||||
}
|
||||
self.perf_library._expand_performance_array('wafl',
|
||||
'domain_busy',
|
||||
counter)
|
||||
|
||||
modified_counter = {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:processor0',
|
||||
'domain_busy': '969142314286,2567571412,2131582146,5383861579',
|
||||
'instance-name': 'processor0',
|
||||
'timestamp': '1453512244',
|
||||
'domain_busy:idle': '969142314286',
|
||||
'domain_busy:kahuna': '2567571412',
|
||||
'domain_busy:storage': '2131582146',
|
||||
'domain_busy:exempt': '5383861579',
|
||||
}
|
||||
self.assertEqual(modified_counter, counter)
|
||||
|
||||
def test_get_base_counter_name(self):
|
||||
|
||||
counter_info = {
|
||||
'base-counter': 'cpu_elapsed_time',
|
||||
'labels': [],
|
||||
'name': 'avg_processor_busy',
|
||||
}
|
||||
self.zapi_client.get_performance_counter_info = mock.Mock(
|
||||
return_value=counter_info)
|
||||
|
||||
result = self.perf_library._get_base_counter_name(
|
||||
'system:constituent', 'avg_processor_busy')
|
||||
|
||||
self.assertEqual('cpu_elapsed_time', result)
|
||||
|
||||
def test_get_node_utilization_counters(self):
|
||||
|
||||
mock_get_node_utilization_system_counters = self.mock_object(
|
||||
self.perf_library, '_get_node_utilization_system_counters',
|
||||
mock.Mock(return_value=['A', 'B', 'C']))
|
||||
mock_get_node_utilization_wafl_counters = self.mock_object(
|
||||
self.perf_library, '_get_node_utilization_wafl_counters',
|
||||
mock.Mock(return_value=['D', 'E', 'F']))
|
||||
mock_get_node_utilization_processor_counters = self.mock_object(
|
||||
self.perf_library, '_get_node_utilization_processor_counters',
|
||||
mock.Mock(return_value=['G', 'H', 'I']))
|
||||
|
||||
result = self.perf_library._get_node_utilization_counters(fake.NODE)
|
||||
|
||||
expected = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I']
|
||||
self.assertEqual(expected, result)
|
||||
|
||||
mock_get_node_utilization_system_counters.assert_called_once_with(
|
||||
fake.NODE)
|
||||
mock_get_node_utilization_wafl_counters.assert_called_once_with(
|
||||
fake.NODE)
|
||||
mock_get_node_utilization_processor_counters.assert_called_once_with(
|
||||
fake.NODE)
|
||||
|
||||
def test_get_node_utilization_counters_api_error(self):
|
||||
|
||||
self.mock_object(self.perf_library,
|
||||
'_get_node_utilization_system_counters',
|
||||
mock.Mock(side_effect=netapp_api.NaApiError))
|
||||
|
||||
result = self.perf_library._get_node_utilization_counters(fake.NODE)
|
||||
|
||||
self.assertIsNone(result)
|
||||
|
||||
def test_get_node_utilization_system_counters(self):
|
||||
|
||||
mock_get_performance_instance_uuids = self.mock_object(
|
||||
self.zapi_client, 'get_performance_instance_uuids',
|
||||
mock.Mock(return_value=fake.SYSTEM_INSTANCE_UUIDS))
|
||||
mock_get_performance_counters = self.mock_object(
|
||||
self.zapi_client, 'get_performance_counters',
|
||||
mock.Mock(return_value=fake.SYSTEM_COUNTERS))
|
||||
|
||||
result = self.perf_library._get_node_utilization_system_counters(
|
||||
fake.NODE)
|
||||
|
||||
self.assertEqual(fake.SYSTEM_COUNTERS, result)
|
||||
|
||||
mock_get_performance_instance_uuids.assert_called_once_with(
|
||||
'system', fake.NODE)
|
||||
mock_get_performance_counters.assert_called_once_with(
|
||||
'system', fake.SYSTEM_INSTANCE_UUIDS,
|
||||
['avg_processor_busy', 'cpu_elapsed_time1', 'cpu_elapsed_time'])
|
||||
|
||||
def test_get_node_utilization_wafl_counters(self):
|
||||
|
||||
mock_get_performance_instance_uuids = self.mock_object(
|
||||
self.zapi_client, 'get_performance_instance_uuids',
|
||||
mock.Mock(return_value=fake.WAFL_INSTANCE_UUIDS))
|
||||
mock_get_performance_counters = self.mock_object(
|
||||
self.zapi_client, 'get_performance_counters',
|
||||
mock.Mock(return_value=fake.WAFL_COUNTERS))
|
||||
mock_get_performance_counter_info = self.mock_object(
|
||||
self.zapi_client, 'get_performance_counter_info',
|
||||
mock.Mock(return_value=fake.WAFL_CP_PHASE_TIMES_COUNTER_INFO))
|
||||
|
||||
result = self.perf_library._get_node_utilization_wafl_counters(
|
||||
fake.NODE)
|
||||
|
||||
self.assertEqual(fake.EXPANDED_WAFL_COUNTERS, result)
|
||||
|
||||
mock_get_performance_instance_uuids.assert_called_once_with(
|
||||
'wafl', fake.NODE)
|
||||
mock_get_performance_counters.assert_called_once_with(
|
||||
'wafl', fake.WAFL_INSTANCE_UUIDS,
|
||||
['total_cp_msecs', 'cp_phase_times'])
|
||||
mock_get_performance_counter_info.assert_called_once_with(
|
||||
'wafl', 'cp_phase_times')
|
||||
|
||||
def test_get_node_utilization_processor_counters(self):
|
||||
|
||||
mock_get_performance_instance_uuids = self.mock_object(
|
||||
self.zapi_client, 'get_performance_instance_uuids',
|
||||
mock.Mock(return_value=fake.PROCESSOR_INSTANCE_UUIDS))
|
||||
mock_get_performance_counters = self.mock_object(
|
||||
self.zapi_client, 'get_performance_counters',
|
||||
mock.Mock(return_value=fake.PROCESSOR_COUNTERS))
|
||||
self.mock_object(
|
||||
self.zapi_client, 'get_performance_counter_info',
|
||||
mock.Mock(return_value=fake.PROCESSOR_DOMAIN_BUSY_COUNTER_INFO))
|
||||
|
||||
result = self.perf_library._get_node_utilization_processor_counters(
|
||||
fake.NODE)
|
||||
|
||||
self.assertEqual(fake.EXPANDED_PROCESSOR_COUNTERS, result)
|
||||
|
||||
mock_get_performance_instance_uuids.assert_called_once_with(
|
||||
'processor', fake.NODE)
|
||||
mock_get_performance_counters.assert_called_once_with(
|
||||
'processor', fake.PROCESSOR_INSTANCE_UUIDS,
|
||||
['domain_busy', 'processor_elapsed_time'])
|
@ -513,11 +513,22 @@ SSC_INFO = {
|
||||
'netapp_raid_type': 'raid4',
|
||||
'netapp_disk_type': 'FCAL',
|
||||
'netapp_hybrid_aggregate': 'false',
|
||||
'netapp_aggregate': AGGREGATES[0],
|
||||
},
|
||||
AGGREGATES[1]: {
|
||||
'netapp_raid_type': 'raid_dp',
|
||||
'netapp_disk_type': ['SATA', 'SSD'],
|
||||
'netapp_hybrid_aggregate': 'true',
|
||||
'netapp_aggregate': AGGREGATES[1],
|
||||
}
|
||||
}
|
||||
|
||||
SSC_INFO_VSERVER_CREDS = {
|
||||
AGGREGATES[0]: {
|
||||
'netapp_aggregate': AGGREGATES[0],
|
||||
},
|
||||
AGGREGATES[1]: {
|
||||
'netapp_aggregate': AGGREGATES[1],
|
||||
}
|
||||
}
|
||||
|
||||
@ -535,6 +546,9 @@ POOLS = [
|
||||
'netapp_raid_type': 'raid4',
|
||||
'netapp_disk_type': 'FCAL',
|
||||
'netapp_hybrid_aggregate': 'false',
|
||||
'utilization': 30.0,
|
||||
'filter_function': 'filter',
|
||||
'goodness_function': 'goodness',
|
||||
},
|
||||
{'pool_name': AGGREGATES[1],
|
||||
'netapp_aggregate': AGGREGATES[1],
|
||||
@ -549,6 +563,9 @@ POOLS = [
|
||||
'netapp_raid_type': 'raid_dp',
|
||||
'netapp_disk_type': ['SATA', 'SSD'],
|
||||
'netapp_hybrid_aggregate': 'true',
|
||||
'utilization': 42.0,
|
||||
'filter_function': 'filter',
|
||||
'goodness_function': 'goodness',
|
||||
},
|
||||
]
|
||||
|
||||
@ -563,6 +580,9 @@ POOLS_VSERVER_CREDS = [
|
||||
'dedupe': [True, False],
|
||||
'compression': [True, False],
|
||||
'thin_provisioning': [True, False],
|
||||
'utilization': 50.0,
|
||||
'filter_function': None,
|
||||
'goodness_function': None,
|
||||
},
|
||||
{'pool_name': AGGREGATES[1],
|
||||
'netapp_aggregate': AGGREGATES[1],
|
||||
@ -574,6 +594,9 @@ POOLS_VSERVER_CREDS = [
|
||||
'dedupe': [True, False],
|
||||
'compression': [True, False],
|
||||
'thin_provisioning': [True, False],
|
||||
'utilization': 50.0,
|
||||
'filter_function': None,
|
||||
'goodness_function': None,
|
||||
},
|
||||
]
|
||||
|
||||
@ -592,6 +615,538 @@ SSC_AGGREGATES = [
|
||||
|
||||
SSC_DISK_TYPES = ['FCAL', ['SATA', 'SSD']]
|
||||
|
||||
NODE = 'cluster1-01'
|
||||
|
||||
COUNTERS_T1 = [
|
||||
{
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:system',
|
||||
'avg_processor_busy': '29078861388',
|
||||
'instance-name': 'system',
|
||||
'timestamp': '1453573776',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:system',
|
||||
'cpu_elapsed_time': '1063283283681',
|
||||
'instance-name': 'system',
|
||||
'timestamp': '1453573776',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:system',
|
||||
'cpu_elapsed_time1': '1063283283681',
|
||||
'instance-name': 'system',
|
||||
'timestamp': '1453573776',
|
||||
}, {
|
||||
'cp_phase_times:p2a_snap': '714',
|
||||
'cp_phase_times:p4_finish': '14897',
|
||||
'cp_phase_times:setup': '581',
|
||||
'cp_phase_times:p2a_dlog1': '6019',
|
||||
'cp_phase_times:p2a_dlog2': '2328',
|
||||
'cp_phase_times:p2v_cont': '2479',
|
||||
'cp_phase_times:p2v_volinfo': '1138',
|
||||
'cp_phase_times:p2v_bm': '3484',
|
||||
'cp_phase_times:p2v_fsinfo': '2031',
|
||||
'cp_phase_times:p2a_inofile': '356',
|
||||
'cp_phase_times': '581,5007,1840,9832,498,0,839,799,1336,2031,0,377,'
|
||||
'427,1058,354,3484,5135,1460,1138,2479,356,1373'
|
||||
',6019,9,2328,2257,229,493,1275,0,6059,714,530215,'
|
||||
'21603833,0,0,3286,11075940,22001,14897,36',
|
||||
'cp_phase_times:p2v_dlog2': '377',
|
||||
'instance-name': 'wafl',
|
||||
'cp_phase_times:p3_wait': '0',
|
||||
'cp_phase_times:p2a_bm': '6059',
|
||||
'cp_phase_times:p1_quota': '498',
|
||||
'cp_phase_times:p2v_inofile': '839',
|
||||
'cp_phase_times:p2a_refcount': '493',
|
||||
'cp_phase_times:p2a_fsinfo': '2257',
|
||||
'cp_phase_times:p2a_hyabc': '0',
|
||||
'cp_phase_times:p2a_volinfo': '530215',
|
||||
'cp_phase_times:pre_p0': '5007',
|
||||
'cp_phase_times:p2a_hya': '9',
|
||||
'cp_phase_times:p0_snap_del': '1840',
|
||||
'cp_phase_times:p2a_ino': '1373',
|
||||
'cp_phase_times:p2v_df_scores_sub': '354',
|
||||
'cp_phase_times:p2v_ino_pub': '799',
|
||||
'cp_phase_times:p2a_ipu_bitmap_grow': '229',
|
||||
'cp_phase_times:p2v_refcount': '427',
|
||||
'timestamp': '1453573776',
|
||||
'cp_phase_times:p2v_dlog1': '0',
|
||||
'cp_phase_times:p2_finish': '0',
|
||||
'cp_phase_times:p1_clean': '9832',
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:wafl',
|
||||
'cp_phase_times:p3a_volinfo': '11075940',
|
||||
'cp_phase_times:p2a_topaa': '1275',
|
||||
'cp_phase_times:p2_flush': '21603833',
|
||||
'cp_phase_times:p2v_df_scores': '1460',
|
||||
'cp_phase_times:ipu_disk_add': '0',
|
||||
'cp_phase_times:p2v_snap': '5135',
|
||||
'cp_phase_times:p5_finish': '36',
|
||||
'cp_phase_times:p2v_ino_pri': '1336',
|
||||
'cp_phase_times:p3v_volinfo': '3286',
|
||||
'cp_phase_times:p2v_topaa': '1058',
|
||||
'cp_phase_times:p3_finish': '22001',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:wafl',
|
||||
'total_cp_msecs': '33309624',
|
||||
'instance-name': 'wafl',
|
||||
'timestamp': '1453573776',
|
||||
}, {
|
||||
'domain_busy:kahuna': '2712467226',
|
||||
'timestamp': '1453573777',
|
||||
'domain_busy:cifs': '434036',
|
||||
'domain_busy:raid_exempt': '28',
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:processor0',
|
||||
'domain_busy:target': '6460782',
|
||||
'domain_busy:nwk_exempt': '20',
|
||||
'domain_busy:raid': '722094140',
|
||||
'domain_busy:storage': '2253156562',
|
||||
'instance-name': 'processor0',
|
||||
'domain_busy:cluster': '34',
|
||||
'domain_busy:wafl_xcleaner': '51275254',
|
||||
'domain_busy:wafl_exempt': '1243553699',
|
||||
'domain_busy:protocol': '54',
|
||||
'domain_busy': '1028851855595,2712467226,2253156562,5688808118,'
|
||||
'722094140,28,6460782,59,434036,1243553699,51275254,'
|
||||
'61237441,34,54,11,20,5254181873,13656398235,452215',
|
||||
'domain_busy:nwk_legacy': '5254181873',
|
||||
'domain_busy:dnscache': '59',
|
||||
'domain_busy:exempt': '5688808118',
|
||||
'domain_busy:hostos': '13656398235',
|
||||
'domain_busy:sm_exempt': '61237441',
|
||||
'domain_busy:nwk_exclusive': '11',
|
||||
'domain_busy:idle': '1028851855595',
|
||||
'domain_busy:ssan_exempt': '452215',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:processor0',
|
||||
'processor_elapsed_time': '1063283843318',
|
||||
'instance-name': 'processor0',
|
||||
'timestamp': '1453573777',
|
||||
}, {
|
||||
'domain_busy:kahuna': '1978024846',
|
||||
'timestamp': '1453573777',
|
||||
'domain_busy:cifs': '318584',
|
||||
'domain_busy:raid_exempt': '0',
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:processor1',
|
||||
'domain_busy:target': '3330956',
|
||||
'domain_busy:nwk_exempt': '0',
|
||||
'domain_busy:raid': '722235930',
|
||||
'domain_busy:storage': '1498890708',
|
||||
'instance-name': 'processor1',
|
||||
'domain_busy:cluster': '0',
|
||||
'domain_busy:wafl_xcleaner': '50122685',
|
||||
'domain_busy:wafl_exempt': '1265921369',
|
||||
'domain_busy:protocol': '0',
|
||||
'domain_busy': '1039557880852,1978024846,1498890708,3734060289,'
|
||||
'722235930,0,3330956,0,318584,1265921369,50122685,'
|
||||
'36417362,0,0,0,0,2815252976,10274810484,393451',
|
||||
'domain_busy:nwk_legacy': '2815252976',
|
||||
'domain_busy:dnscache': '0',
|
||||
'domain_busy:exempt': '3734060289',
|
||||
'domain_busy:hostos': '10274810484',
|
||||
'domain_busy:sm_exempt': '36417362',
|
||||
'domain_busy:nwk_exclusive': '0',
|
||||
'domain_busy:idle': '1039557880852',
|
||||
'domain_busy:ssan_exempt': '393451',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:processor1',
|
||||
'processor_elapsed_time': '1063283843321',
|
||||
'instance-name': 'processor1',
|
||||
'timestamp': '1453573777',
|
||||
}
|
||||
]
|
||||
|
||||
COUNTERS_T2 = [
|
||||
{
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:system',
|
||||
'avg_processor_busy': '29081228905',
|
||||
'instance-name': 'system',
|
||||
'timestamp': '1453573834',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:system',
|
||||
'cpu_elapsed_time': '1063340792148',
|
||||
'instance-name': 'system',
|
||||
'timestamp': '1453573834',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:system',
|
||||
'cpu_elapsed_time1': '1063340792148',
|
||||
'instance-name': 'system',
|
||||
'timestamp': '1453573834',
|
||||
}, {
|
||||
'cp_phase_times:p2a_snap': '714',
|
||||
'cp_phase_times:p4_finish': '14897',
|
||||
'cp_phase_times:setup': '581',
|
||||
'cp_phase_times:p2a_dlog1': '6019',
|
||||
'cp_phase_times:p2a_dlog2': '2328',
|
||||
'cp_phase_times:p2v_cont': '2479',
|
||||
'cp_phase_times:p2v_volinfo': '1138',
|
||||
'cp_phase_times:p2v_bm': '3484',
|
||||
'cp_phase_times:p2v_fsinfo': '2031',
|
||||
'cp_phase_times:p2a_inofile': '356',
|
||||
'cp_phase_times': '581,5007,1840,9832,498,0,839,799,1336,2031,0,377,'
|
||||
'427,1058,354,3484,5135,1460,1138,2479,356,1373,'
|
||||
'6019,9,2328,2257,229,493,1275,0,6059,714,530215,'
|
||||
'21604863,0,0,3286,11076392,22001,14897,36',
|
||||
'cp_phase_times:p2v_dlog2': '377',
|
||||
'instance-name': 'wafl',
|
||||
'cp_phase_times:p3_wait': '0',
|
||||
'cp_phase_times:p2a_bm': '6059',
|
||||
'cp_phase_times:p1_quota': '498',
|
||||
'cp_phase_times:p2v_inofile': '839',
|
||||
'cp_phase_times:p2a_refcount': '493',
|
||||
'cp_phase_times:p2a_fsinfo': '2257',
|
||||
'cp_phase_times:p2a_hyabc': '0',
|
||||
'cp_phase_times:p2a_volinfo': '530215',
|
||||
'cp_phase_times:pre_p0': '5007',
|
||||
'cp_phase_times:p2a_hya': '9',
|
||||
'cp_phase_times:p0_snap_del': '1840',
|
||||
'cp_phase_times:p2a_ino': '1373',
|
||||
'cp_phase_times:p2v_df_scores_sub': '354',
|
||||
'cp_phase_times:p2v_ino_pub': '799',
|
||||
'cp_phase_times:p2a_ipu_bitmap_grow': '229',
|
||||
'cp_phase_times:p2v_refcount': '427',
|
||||
'timestamp': '1453573834',
|
||||
'cp_phase_times:p2v_dlog1': '0',
|
||||
'cp_phase_times:p2_finish': '0',
|
||||
'cp_phase_times:p1_clean': '9832',
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:wafl',
|
||||
'cp_phase_times:p3a_volinfo': '11076392',
|
||||
'cp_phase_times:p2a_topaa': '1275',
|
||||
'cp_phase_times:p2_flush': '21604863',
|
||||
'cp_phase_times:p2v_df_scores': '1460',
|
||||
'cp_phase_times:ipu_disk_add': '0',
|
||||
'cp_phase_times:p2v_snap': '5135',
|
||||
'cp_phase_times:p5_finish': '36',
|
||||
'cp_phase_times:p2v_ino_pri': '1336',
|
||||
'cp_phase_times:p3v_volinfo': '3286',
|
||||
'cp_phase_times:p2v_topaa': '1058',
|
||||
'cp_phase_times:p3_finish': '22001',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:wafl',
|
||||
'total_cp_msecs': '33311106',
|
||||
'instance-name': 'wafl',
|
||||
'timestamp': '1453573834',
|
||||
}, {
|
||||
'domain_busy:kahuna': '2712629374',
|
||||
'timestamp': '1453573834',
|
||||
'domain_busy:cifs': '434036',
|
||||
'domain_busy:raid_exempt': '28',
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:processor0',
|
||||
'domain_busy:target': '6461082',
|
||||
'domain_busy:nwk_exempt': '20',
|
||||
'domain_busy:raid': '722136824',
|
||||
'domain_busy:storage': '2253260824',
|
||||
'instance-name': 'processor0',
|
||||
'domain_busy:cluster': '34',
|
||||
'domain_busy:wafl_xcleaner': '51277506',
|
||||
'domain_busy:wafl_exempt': '1243637154',
|
||||
'domain_busy:protocol': '54',
|
||||
'domain_busy': '1028906640232,2712629374,2253260824,5689093500,'
|
||||
'722136824,28,6461082,59,434036,1243637154,51277506,'
|
||||
'61240335,34,54,11,20,5254491236,13657992139,452215',
|
||||
'domain_busy:nwk_legacy': '5254491236',
|
||||
'domain_busy:dnscache': '59',
|
||||
'domain_busy:exempt': '5689093500',
|
||||
'domain_busy:hostos': '13657992139',
|
||||
'domain_busy:sm_exempt': '61240335',
|
||||
'domain_busy:nwk_exclusive': '11',
|
||||
'domain_busy:idle': '1028906640232',
|
||||
'domain_busy:ssan_exempt': '452215',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:processor0',
|
||||
'processor_elapsed_time': '1063341351916',
|
||||
'instance-name': 'processor0',
|
||||
'timestamp': '1453573834',
|
||||
}, {
|
||||
'domain_busy:kahuna': '1978217049',
|
||||
'timestamp': '1453573834',
|
||||
'domain_busy:cifs': '318584',
|
||||
'domain_busy:raid_exempt': '0',
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:processor1',
|
||||
'domain_busy:target': '3331147',
|
||||
'domain_busy:nwk_exempt': '0',
|
||||
'domain_busy:raid': '722276805',
|
||||
'domain_busy:storage': '1498984059',
|
||||
'instance-name': 'processor1',
|
||||
'domain_busy:cluster': '0',
|
||||
'domain_busy:wafl_xcleaner': '50126176',
|
||||
'domain_busy:wafl_exempt': '1266039846',
|
||||
'domain_busy:protocol': '0',
|
||||
'domain_busy': '1039613222253,1978217049,1498984059,3734279672,'
|
||||
'722276805,0,3331147,0,318584,1266039846,50126176,'
|
||||
'36419297,0,0,0,0,2815435865,10276068104,393451',
|
||||
'domain_busy:nwk_legacy': '2815435865',
|
||||
'domain_busy:dnscache': '0',
|
||||
'domain_busy:exempt': '3734279672',
|
||||
'domain_busy:hostos': '10276068104',
|
||||
'domain_busy:sm_exempt': '36419297',
|
||||
'domain_busy:nwk_exclusive': '0',
|
||||
'domain_busy:idle': '1039613222253',
|
||||
'domain_busy:ssan_exempt': '393451',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:processor1',
|
||||
'processor_elapsed_time': '1063341351919',
|
||||
'instance-name': 'processor1',
|
||||
'timestamp': '1453573834',
|
||||
},
|
||||
]
|
||||
|
||||
SYSTEM_INSTANCE_UUIDS = ['cluster1-01:kernel:system']
|
||||
SYSTEM_INSTANCE_NAMES = ['system']
|
||||
|
||||
SYSTEM_COUNTERS = [
|
||||
{
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:system',
|
||||
'avg_processor_busy': '27877641199',
|
||||
'instance-name': 'system',
|
||||
'timestamp': '1453524928',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:system',
|
||||
'cpu_elapsed_time': '1014438541279',
|
||||
'instance-name': 'system',
|
||||
'timestamp': '1453524928',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:system',
|
||||
'cpu_elapsed_time1': '1014438541279',
|
||||
'instance-name': 'system',
|
||||
'timestamp': '1453524928',
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
WAFL_INSTANCE_UUIDS = ['cluster1-01:kernel:wafl']
|
||||
WAFL_INSTANCE_NAMES = ['wafl']
|
||||
|
||||
WAFL_COUNTERS = [
|
||||
{
|
||||
'cp_phase_times': '563,4844,1731,9676,469,0,821,763,1282,1937,0,359,'
|
||||
'418,1048,344,3344,4867,1397,1101,2380,356,1318,'
|
||||
'5954,9,2236,2190,228,476,1221,0,5838,696,515588,'
|
||||
'20542954,0,0,3122,10567367,20696,13982,36',
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:wafl',
|
||||
'instance-name': 'wafl',
|
||||
'timestamp': '1453523339',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:wafl',
|
||||
'total_cp_msecs': '31721222',
|
||||
'instance-name': 'wafl',
|
||||
'timestamp': '1453523339',
|
||||
},
|
||||
]
|
||||
|
||||
WAFL_CP_PHASE_TIMES_COUNTER_INFO = {
|
||||
'labels': [
|
||||
'SETUP', 'PRE_P0', 'P0_SNAP_DEL', 'P1_CLEAN', 'P1_QUOTA',
|
||||
'IPU_DISK_ADD', 'P2V_INOFILE', 'P2V_INO_PUB', 'P2V_INO_PRI',
|
||||
'P2V_FSINFO', 'P2V_DLOG1', 'P2V_DLOG2', 'P2V_REFCOUNT',
|
||||
'P2V_TOPAA', 'P2V_DF_SCORES_SUB', 'P2V_BM', 'P2V_SNAP',
|
||||
'P2V_DF_SCORES', 'P2V_VOLINFO', 'P2V_CONT', 'P2A_INOFILE',
|
||||
'P2A_INO', 'P2A_DLOG1', 'P2A_HYA', 'P2A_DLOG2', 'P2A_FSINFO',
|
||||
'P2A_IPU_BITMAP_GROW', 'P2A_REFCOUNT', 'P2A_TOPAA',
|
||||
'P2A_HYABC', 'P2A_BM', 'P2A_SNAP', 'P2A_VOLINFO', 'P2_FLUSH',
|
||||
'P2_FINISH', 'P3_WAIT', 'P3V_VOLINFO', 'P3A_VOLINFO',
|
||||
'P3_FINISH', 'P4_FINISH', 'P5_FINISH',
|
||||
],
|
||||
'name': 'cp_phase_times',
|
||||
}
|
||||
|
||||
EXPANDED_WAFL_COUNTERS = [
|
||||
{
|
||||
'cp_phase_times:p2a_snap': '696',
|
||||
'cp_phase_times:p4_finish': '13982',
|
||||
'cp_phase_times:setup': '563',
|
||||
'cp_phase_times:p2a_dlog1': '5954',
|
||||
'cp_phase_times:p2a_dlog2': '2236',
|
||||
'cp_phase_times:p2v_cont': '2380',
|
||||
'cp_phase_times:p2v_volinfo': '1101',
|
||||
'cp_phase_times:p2v_bm': '3344',
|
||||
'cp_phase_times:p2v_fsinfo': '1937',
|
||||
'cp_phase_times:p2a_inofile': '356',
|
||||
'cp_phase_times': '563,4844,1731,9676,469,0,821,763,1282,1937,0,359,'
|
||||
'418,1048,344,3344,4867,1397,1101,2380,356,1318,'
|
||||
'5954,9,2236,2190,228,476,1221,0,5838,696,515588,'
|
||||
'20542954,0,0,3122,10567367,20696,13982,36',
|
||||
'cp_phase_times:p2v_dlog2': '359',
|
||||
'instance-name': 'wafl',
|
||||
'cp_phase_times:p3_wait': '0',
|
||||
'cp_phase_times:p2a_bm': '5838',
|
||||
'cp_phase_times:p1_quota': '469',
|
||||
'cp_phase_times:p2v_inofile': '821',
|
||||
'cp_phase_times:p2a_refcount': '476',
|
||||
'cp_phase_times:p2a_fsinfo': '2190',
|
||||
'cp_phase_times:p2a_hyabc': '0',
|
||||
'cp_phase_times:p2a_volinfo': '515588',
|
||||
'cp_phase_times:pre_p0': '4844',
|
||||
'cp_phase_times:p2a_hya': '9',
|
||||
'cp_phase_times:p0_snap_del': '1731',
|
||||
'cp_phase_times:p2a_ino': '1318',
|
||||
'cp_phase_times:p2v_df_scores_sub': '344',
|
||||
'cp_phase_times:p2v_ino_pub': '763',
|
||||
'cp_phase_times:p2a_ipu_bitmap_grow': '228',
|
||||
'cp_phase_times:p2v_refcount': '418',
|
||||
'timestamp': '1453523339',
|
||||
'cp_phase_times:p2v_dlog1': '0',
|
||||
'cp_phase_times:p2_finish': '0',
|
||||
'cp_phase_times:p1_clean': '9676',
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:wafl',
|
||||
'cp_phase_times:p3a_volinfo': '10567367',
|
||||
'cp_phase_times:p2a_topaa': '1221',
|
||||
'cp_phase_times:p2_flush': '20542954',
|
||||
'cp_phase_times:p2v_df_scores': '1397',
|
||||
'cp_phase_times:ipu_disk_add': '0',
|
||||
'cp_phase_times:p2v_snap': '4867',
|
||||
'cp_phase_times:p5_finish': '36',
|
||||
'cp_phase_times:p2v_ino_pri': '1282',
|
||||
'cp_phase_times:p3v_volinfo': '3122',
|
||||
'cp_phase_times:p2v_topaa': '1048',
|
||||
'cp_phase_times:p3_finish': '20696',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:wafl',
|
||||
'total_cp_msecs': '31721222',
|
||||
'instance-name': 'wafl',
|
||||
'timestamp': '1453523339',
|
||||
},
|
||||
]
|
||||
|
||||
PROCESSOR_INSTANCE_UUIDS = [
|
||||
'cluster1-01:kernel:processor0',
|
||||
'cluster1-01:kernel:processor1',
|
||||
]
|
||||
PROCESSOR_INSTANCE_NAMES = ['processor0', 'processor1']
|
||||
|
||||
PROCESSOR_COUNTERS = [
|
||||
{
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:processor0',
|
||||
'domain_busy': '980648687811,2597164534,2155400686,5443901498,'
|
||||
'690280568,28,6180773,59,413895,1190100947,48989575,'
|
||||
'58549809,34,54,11,20,5024141791,13136260754,452215',
|
||||
'instance-name': 'processor0',
|
||||
'timestamp': '1453524150',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:processor0',
|
||||
'processor_elapsed_time': '1013660714257',
|
||||
'instance-name': 'processor0',
|
||||
'timestamp': '1453524150',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:processor1',
|
||||
'domain_busy': '990957980543,1891766637,1433411516,3572427934,'
|
||||
'691372324,0,3188648,0,305947,1211235777,47954620,'
|
||||
'34832715,0,0,0,0,2692084482,9834648927,393451',
|
||||
'instance-name': 'processor1',
|
||||
'timestamp': '1453524150',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:processor1',
|
||||
'processor_elapsed_time': '1013660714261',
|
||||
'instance-name': 'processor1',
|
||||
'timestamp': '1453524150',
|
||||
},
|
||||
]
|
||||
|
||||
PROCESSOR_DOMAIN_BUSY_COUNTER_INFO = {
|
||||
'labels': [
|
||||
'idle', 'kahuna', 'storage', 'exempt', 'raid', 'raid_exempt',
|
||||
'target', 'dnscache', 'cifs', 'wafl_exempt', 'wafl_xcleaner',
|
||||
'sm_exempt', 'cluster', 'protocol', 'nwk_exclusive', 'nwk_exempt',
|
||||
'nwk_legacy', 'hostOS', 'ssan_exempt',
|
||||
],
|
||||
'name': 'domain_busy',
|
||||
}
|
||||
|
||||
EXPANDED_PROCESSOR_COUNTERS = [
|
||||
{
|
||||
'domain_busy:kahuna': '2597164534',
|
||||
'timestamp': '1453524150',
|
||||
'domain_busy:cifs': '413895',
|
||||
'domain_busy:raid_exempt': '28',
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:processor0',
|
||||
'domain_busy:target': '6180773',
|
||||
'domain_busy:nwk_exempt': '20',
|
||||
'domain_busy:raid': '690280568',
|
||||
'domain_busy:storage': '2155400686',
|
||||
'instance-name': 'processor0',
|
||||
'domain_busy:cluster': '34',
|
||||
'domain_busy:wafl_xcleaner': '48989575',
|
||||
'domain_busy:wafl_exempt': '1190100947',
|
||||
'domain_busy:protocol': '54',
|
||||
'domain_busy': '980648687811,2597164534,2155400686,5443901498,'
|
||||
'690280568,28,6180773,59,413895,1190100947,48989575,'
|
||||
'58549809,34,54,11,20,5024141791,13136260754,452215',
|
||||
'domain_busy:nwk_legacy': '5024141791',
|
||||
'domain_busy:dnscache': '59',
|
||||
'domain_busy:exempt': '5443901498',
|
||||
'domain_busy:hostos': '13136260754',
|
||||
'domain_busy:sm_exempt': '58549809',
|
||||
'domain_busy:nwk_exclusive': '11',
|
||||
'domain_busy:idle': '980648687811',
|
||||
'domain_busy:ssan_exempt': '452215',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:processor0',
|
||||
'processor_elapsed_time': '1013660714257',
|
||||
'instance-name': 'processor0',
|
||||
'timestamp': '1453524150',
|
||||
}, {
|
||||
'domain_busy:kahuna': '1891766637',
|
||||
'timestamp': '1453524150',
|
||||
'domain_busy:cifs': '305947',
|
||||
'domain_busy:raid_exempt': '0',
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:processor1',
|
||||
'domain_busy:target': '3188648',
|
||||
'domain_busy:nwk_exempt': '0',
|
||||
'domain_busy:raid': '691372324',
|
||||
'domain_busy:storage': '1433411516',
|
||||
'instance-name': 'processor1',
|
||||
'domain_busy:cluster': '0',
|
||||
'domain_busy:wafl_xcleaner': '47954620',
|
||||
'domain_busy:wafl_exempt': '1211235777',
|
||||
'domain_busy:protocol': '0',
|
||||
'domain_busy': '990957980543,1891766637,1433411516,3572427934,'
|
||||
'691372324,0,3188648,0,305947,1211235777,47954620,'
|
||||
'34832715,0,0,0,0,2692084482,9834648927,393451',
|
||||
'domain_busy:nwk_legacy': '2692084482',
|
||||
'domain_busy:dnscache': '0',
|
||||
'domain_busy:exempt': '3572427934',
|
||||
'domain_busy:hostos': '9834648927',
|
||||
'domain_busy:sm_exempt': '34832715',
|
||||
'domain_busy:nwk_exclusive': '0',
|
||||
'domain_busy:idle': '990957980543',
|
||||
'domain_busy:ssan_exempt': '393451',
|
||||
}, {
|
||||
'node-name': 'cluster1-01',
|
||||
'instance-uuid': 'cluster1-01:kernel:processor1',
|
||||
'processor_elapsed_time': '1013660714261',
|
||||
'instance-name': 'processor1',
|
||||
'timestamp': '1453524150',
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
def get_config_cmode():
|
||||
config = na_fakes.create_configuration_cmode()
|
||||
|
@ -44,6 +44,17 @@ class NetAppDriverUtilsTestCase(test.TestCase):
|
||||
|
||||
na_utils.setup_tracing(None)
|
||||
|
||||
def test_round_down(self):
|
||||
self.assertAlmostEqual(na_utils.round_down(5.567), 5.56)
|
||||
self.assertAlmostEqual(na_utils.round_down(5.567, '0.00'), 5.56)
|
||||
self.assertAlmostEqual(na_utils.round_down(5.567, '0.0'), 5.5)
|
||||
self.assertAlmostEqual(na_utils.round_down(5.567, '0'), 5)
|
||||
self.assertAlmostEqual(na_utils.round_down(0, '0.00'), 0)
|
||||
self.assertAlmostEqual(na_utils.round_down(-5.567), -5.56)
|
||||
self.assertAlmostEqual(na_utils.round_down(-5.567, '0.00'), -5.56)
|
||||
self.assertAlmostEqual(na_utils.round_down(-5.567, '0.0'), -5.5)
|
||||
self.assertAlmostEqual(na_utils.round_down(-5.567, '0'), -5)
|
||||
|
||||
def test_setup_tracing(self):
|
||||
na_utils.setup_tracing(None)
|
||||
self.assertFalse(na_utils.TRACE_API)
|
||||
|
@ -0,0 +1,7 @@
|
||||
---
|
||||
features:
|
||||
- The NetApp cDOT drivers now include the cluster node utilization
|
||||
metrics for each pool reported to the manila scheduler. These
|
||||
values are designed to be included in the filter & goodness functions
|
||||
used by the scheduler, so the cDOT drivers now also report those
|
||||
functions to the scheduler for each pool.
|
Loading…
Reference in New Issue
Block a user