Merge "Fix issues with aggregate and granularity attributes"

This commit is contained in:
Zuul
2018-02-06 06:05:50 +00:00
committed by Gerrit Code Review
21 changed files with 204 additions and 534 deletions

View File

@@ -57,6 +57,12 @@ class DataSourceBase(object):
),
)
@abc.abstractmethod
def statistic_aggregation(self, resource_id=None, meter_name=None,
period=300, granularity=300, dimensions=None,
aggregation='avg', group_by='*'):
pass
@abc.abstractmethod
def list_metrics(self):
pass

View File

@@ -145,24 +145,28 @@ class CeilometerHelper(base.DataSourceBase):
else:
return meters
def statistic_aggregation(self,
resource_id,
meter_name,
period,
aggregate='avg'):
def statistic_aggregation(self, resource_id=None, meter_name=None,
period=300, granularity=300, dimensions=None,
aggregation='avg', group_by='*'):
"""Representing a statistic aggregate by operators
:param resource_id: id of resource to list statistics for.
:param meter_name: Name of meter to list statistics for.
:param period: Period in seconds over which to group samples.
:param aggregate: Available aggregates are: count, cardinality,
min, max, sum, stddev, avg. Defaults to avg.
:param granularity: frequency of marking metric point, in seconds.
This param isn't used in Ceilometer datasource.
:param dimensions: dimensions (dict). This param isn't used in
Ceilometer datasource.
:param aggregation: Available aggregates are: count, cardinality,
min, max, sum, stddev, avg. Defaults to avg.
:param group_by: list of columns to group the metrics to be returned.
This param isn't used in Ceilometer datasource.
:return: Return the latest statistical data, None if no data.
"""
end_time = datetime.datetime.utcnow()
if aggregate == 'mean':
aggregate = 'avg'
if aggregation == 'mean':
aggregation = 'avg'
start_time = end_time - datetime.timedelta(seconds=int(period))
query = self.build_query(
resource_id=resource_id, start_time=start_time, end_time=end_time)
@@ -171,11 +175,11 @@ class CeilometerHelper(base.DataSourceBase):
q=query,
period=period,
aggregates=[
{'func': aggregate}])
{'func': aggregation}])
item_value = None
if statistic:
item_value = statistic[-1]._info.get('aggregate').get(aggregate)
item_value = statistic[-1]._info.get('aggregate').get(aggregation)
return item_value
def get_last_sample_values(self, resource_id, meter_name, limit=1):
@@ -204,64 +208,64 @@ class CeilometerHelper(base.DataSourceBase):
granularity=None):
meter_name = self.METRIC_MAP.get('host_cpu_usage')
return self.statistic_aggregation(resource_id, meter_name, period,
aggregate=aggregate)
granularity, aggregate=aggregate)
def get_instance_cpu_usage(self, resource_id, period, aggregate,
granularity=None):
meter_name = self.METRIC_MAP.get('instance_cpu_usage')
return self.statistic_aggregation(resource_id, meter_name, period,
aggregate=aggregate)
granularity, aggregate=aggregate)
def get_host_memory_usage(self, resource_id, period, aggregate,
granularity=None):
meter_name = self.METRIC_MAP.get('host_memory_usage')
return self.statistic_aggregation(resource_id, meter_name, period,
aggregate=aggregate)
granularity, aggregate=aggregate)
def get_instance_memory_usage(self, resource_id, period, aggregate,
granularity=None):
meter_name = self.METRIC_MAP.get('instance_ram_usage')
return self.statistic_aggregation(resource_id, meter_name, period,
aggregate=aggregate)
granularity, aggregate=aggregate)
def get_instance_l3_cache_usage(self, resource_id, period, aggregate,
granularity=None):
meter_name = self.METRIC_MAP.get('instance_l3_cache_usage')
return self.statistic_aggregation(resource_id, meter_name, period,
aggregate=aggregate)
granularity, aggregate=aggregate)
def get_instance_ram_allocated(self, resource_id, period, aggregate,
granularity=None):
meter_name = self.METRIC_MAP.get('instance_ram_allocated')
return self.statistic_aggregation(resource_id, meter_name, period,
aggregate=aggregate)
granularity, aggregate=aggregate)
def get_instance_root_disk_allocated(self, resource_id, period, aggregate,
granularity=None):
meter_name = self.METRIC_MAP.get('instance_root_disk_size')
return self.statistic_aggregation(resource_id, meter_name, period,
aggregate=aggregate)
granularity, aggregate=aggregate)
def get_host_outlet_temperature(self, resource_id, period, aggregate,
granularity=None):
meter_name = self.METRIC_MAP.get('host_outlet_temp')
return self.statistic_aggregation(resource_id, meter_name, period,
aggregate=aggregate)
granularity, aggregate=aggregate)
def get_host_inlet_temperature(self, resource_id, period, aggregate,
granularity=None):
meter_name = self.METRIC_MAP.get('host_inlet_temp')
return self.statistic_aggregation(resource_id, meter_name, period,
aggregate=aggregate)
granularity, aggregate=aggregate)
def get_host_airflow(self, resource_id, period, aggregate,
granularity=None):
meter_name = self.METRIC_MAP.get('host_airflow')
return self.statistic_aggregation(resource_id, meter_name, period,
aggregate=aggregate)
granularity, aggregate=aggregate)
def get_host_power(self, resource_id, period, aggregate,
granularity=None):
meter_name = self.METRIC_MAP.get('host_power')
return self.statistic_aggregation(resource_id, meter_name, period,
aggregate=aggregate)
granularity, aggregate=aggregate)

View File

@@ -58,32 +58,35 @@ class GnocchiHelper(base.DataSourceBase):
return 'not available'
return 'available'
def _statistic_aggregation(self,
resource_id,
metric,
granularity,
start_time=None,
stop_time=None,
aggregation='mean'):
def list_metrics(self):
"""List the user's meters."""
try:
response = self.query_retry(f=self.gnocchi.metric.list)
except Exception:
return set()
else:
return set([metric['name'] for metric in response])
def statistic_aggregation(self, resource_id=None, meter_name=None,
period=300, granularity=300, dimensions=None,
aggregation='avg', group_by='*'):
"""Representing a statistic aggregate by operators
:param metric: metric name of which we want the statistics
:param resource_id: id of resource to list statistics for
:param start_time: Start datetime from which metrics will be used
:param stop_time: End datetime from which metrics will be used
:param granularity: frequency of marking metric point, in seconds
:param resource_id: id of resource to list statistics for.
:param meter_name: meter name of which we want the statistics.
:param period: Period in seconds over which to group samples.
:param granularity: frequency of marking metric point, in seconds.
:param dimensions: dimensions (dict). This param isn't used in
Gnocchi datasource.
:param aggregation: Should be chosen in accordance with policy
aggregations
aggregations.
:param group_by: list of columns to group the metrics to be returned.
This param isn't used in Gnocchi datasource.
:return: value of aggregated metric
"""
if start_time is not None and not isinstance(start_time, datetime):
raise exception.InvalidParameter(parameter='start_time',
parameter_type=datetime)
if stop_time is not None and not isinstance(stop_time, datetime):
raise exception.InvalidParameter(parameter='stop_time',
parameter_type=datetime)
stop_time = datetime.utcnow()
start_time = stop_time - timedelta(seconds=(int(period)))
if not common_utils.is_uuid_like(resource_id):
kwargs = dict(query={"=": {"original_resource_id": resource_id}},
@@ -97,7 +100,7 @@ class GnocchiHelper(base.DataSourceBase):
resource_id = resources[0]['id']
raw_kwargs = dict(
metric=metric,
metric=meter_name,
start=start_time,
stop=stop_time,
resource_id=resource_id,
@@ -115,27 +118,6 @@ class GnocchiHelper(base.DataSourceBase):
# measure has structure [time, granularity, value]
return statistics[-1][2]
def list_metrics(self):
"""List the user's meters."""
try:
response = self.query_retry(f=self.gnocchi.metric.list)
except Exception:
return set()
else:
return set([metric['name'] for metric in response])
def statistic_aggregation(self, resource_id, metric, period, granularity,
aggregation='mean'):
stop_time = datetime.utcnow()
start_time = stop_time - timedelta(seconds=(int(period)))
return self._statistic_aggregation(
resource_id=resource_id,
metric=metric,
granularity=granularity,
start_time=start_time,
stop_time=stop_time,
aggregation=aggregation)
def get_host_cpu_usage(self, resource_id, period, aggregate,
granularity=300):
meter_name = self.METRIC_MAP.get('host_cpu_usage')

View File

@@ -21,6 +21,7 @@ import datetime
from monascaclient import exc
from watcher.common import clients
from watcher.common import exception
from watcher.datasource import base
@@ -97,41 +98,42 @@ class MonascaHelper(base.DataSourceBase):
return statistics
def statistic_aggregation(self,
meter_name,
dimensions,
start_time=None,
end_time=None,
period=None,
aggregate='avg',
group_by='*'):
def statistic_aggregation(self, resource_id=None, meter_name=None,
period=300, granularity=300, dimensions=None,
aggregation='avg', group_by='*'):
"""Representing a statistic aggregate by operators
:param meter_name: meter names of which we want the statistics
:param dimensions: dimensions (dict)
:param start_time: Start datetime from which metrics will be used
:param end_time: End datetime from which metrics will be used
:param resource_id: id of resource to list statistics for.
This param isn't used in Monasca datasource.
:param meter_name: meter names of which we want the statistics.
:param period: Sampling `period`: In seconds. If no period is given,
only one aggregate statistic is returned. If given, a
faceted result will be returned, divided into given
periods. Periods with no data are ignored.
:param aggregate: Should be either 'avg', 'count', 'min' or 'max'
:param granularity: frequency of marking metric point, in seconds.
This param isn't used in Ceilometer datasource.
:param dimensions: dimensions (dict).
:param aggregation: Should be either 'avg', 'count', 'min' or 'max'.
:param group_by: list of columns to group the metrics to be returned.
:return: A list of dict with each dict being a distinct result row
"""
start_timestamp, end_timestamp, period = self._format_time_params(
start_time, end_time, period
)
if aggregate == 'mean':
aggregate = 'avg'
if dimensions is None:
raise exception.UnsupportedDataSource(datasource='Monasca')
stop_time = datetime.datetime.utcnow()
start_time = stop_time - datetime.timedelta(seconds=(int(period)))
if aggregation == 'mean':
aggregation = 'avg'
raw_kwargs = dict(
name=meter_name,
start_time=start_timestamp,
end_time=end_timestamp,
start_time=start_time.isoformat(),
end_time=stop_time.isoformat(),
dimensions=dimensions,
period=period,
statistics=aggregate,
statistics=aggregation,
group_by=group_by,
)
@@ -140,45 +142,36 @@ class MonascaHelper(base.DataSourceBase):
statistics = self.query_retry(
f=self.monasca.metrics.list_statistics, **kwargs)
return statistics
cpu_usage = None
for stat in statistics:
avg_col_idx = stat['columns'].index(aggregation)
values = [r[avg_col_idx] for r in stat['statistics']]
value = float(sum(values)) / len(values)
cpu_usage = value
return cpu_usage
def get_host_cpu_usage(self, resource_id, period, aggregate,
granularity=None):
metric_name = self.METRIC_MAP.get('host_cpu_usage')
node_uuid = resource_id.split('_')[0]
statistics = self.statistic_aggregation(
return self.statistic_aggregation(
meter_name=metric_name,
dimensions=dict(hostname=node_uuid),
period=period,
aggregate=aggregate
aggregation=aggregate
)
cpu_usage = None
for stat in statistics:
avg_col_idx = stat['columns'].index('avg')
values = [r[avg_col_idx] for r in stat['statistics']]
value = float(sum(values)) / len(values)
cpu_usage = value
return cpu_usage
def get_instance_cpu_usage(self, resource_id, period, aggregate,
granularity=None):
metric_name = self.METRIC_MAP.get('instance_cpu_usage')
statistics = self.statistic_aggregation(
return self.statistic_aggregation(
meter_name=metric_name,
dimensions=dict(resource_id=resource_id),
period=period,
aggregate=aggregate
aggregation=aggregate
)
cpu_usage = None
for stat in statistics:
avg_col_idx = stat['columns'].index('avg')
values = [r[avg_col_idx] for r in stat['statistics']]
value = float(sum(values)) / len(values)
cpu_usage = value
return cpu_usage
def get_host_memory_usage(self, resource_id, period, aggregate,
granularity=None):

View File

@@ -28,15 +28,11 @@ Outlet (Exhaust Air) Temperature is one of the important thermal
telemetries to measure thermal/workload status of server.
"""
import datetime
from oslo_config import cfg
from oslo_log import log
from watcher._i18n import _
from watcher.common import exception as wexc
from watcher.datasource import ceilometer as ceil
from watcher.datasource import gnocchi as gnoc
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base
@@ -95,8 +91,6 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
:type osc: :py:class:`~.OpenStackClients` instance, optional
"""
super(OutletTempControl, self).__init__(config, osc)
self._ceilometer = None
self._gnocchi = None
@classmethod
def get_name(cls):
@@ -139,26 +133,6 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
},
}
@property
def ceilometer(self):
if self._ceilometer is None:
self.ceilometer = ceil.CeilometerHelper(osc=self.osc)
return self._ceilometer
@ceilometer.setter
def ceilometer(self, c):
self._ceilometer = c
@property
def gnocchi(self):
if self._gnocchi is None:
self.gnocchi = gnoc.GnocchiHelper(osc=self.osc)
return self._gnocchi
@gnocchi.setter
def gnocchi(self, g):
self._gnocchi = g
@property
def granularity(self):
return self.input_parameters.get('granularity', 300)
@@ -208,25 +182,13 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
resource_id = node.uuid
outlet_temp = None
if self.config.datasource == "ceilometer":
outlet_temp = self.ceilometer.statistic_aggregation(
resource_id=resource_id,
meter_name=metric_name,
period=self.period,
aggregate='avg'
)
elif self.config.datasource == "gnocchi":
stop_time = datetime.datetime.utcnow()
start_time = stop_time - datetime.timedelta(
seconds=int(self.period))
outlet_temp = self.gnocchi.statistic_aggregation(
resource_id=resource_id,
metric=metric_name,
granularity=self.granularity,
start_time=start_time,
stop_time=stop_time,
aggregation='mean'
)
outlet_temp = self.datasource_backend.statistic_aggregation(
resource_id=resource_id,
meter_name=metric_name,
period=self.period,
granularity=self.granularity,
)
# some hosts may not have outlet temp meters, remove from target
if outlet_temp is None:
LOG.warning("%s: no outlet temp data", resource_id)

View File

@@ -42,15 +42,11 @@ airflow is higher than the specified threshold.
- It assumes that live migrations are possible.
"""
import datetime
from oslo_config import cfg
from oslo_log import log
from watcher._i18n import _
from watcher.common import exception as wexc
from watcher.datasource import ceilometer as ceil
from watcher.datasource import gnocchi as gnoc
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base
@@ -125,30 +121,8 @@ class UniformAirflow(base.BaseStrategy):
self.config.datasource]['host_inlet_temp']
self.meter_name_power = self.METRIC_NAMES[
self.config.datasource]['host_power']
self._ceilometer = None
self._gnocchi = None
self._period = self.PERIOD
@property
def ceilometer(self):
if self._ceilometer is None:
self.ceilometer = ceil.CeilometerHelper(osc=self.osc)
return self._ceilometer
@ceilometer.setter
def ceilometer(self, c):
self._ceilometer = c
@property
def gnocchi(self):
if self._gnocchi is None:
self.gnocchi = gnoc.GnocchiHelper(osc=self.osc)
return self._gnocchi
@gnocchi.setter
def gnocchi(self, g):
self._gnocchi = g
@classmethod
def get_name(cls):
return "uniform_airflow"
@@ -247,35 +221,16 @@ class UniformAirflow(base.BaseStrategy):
source_instances = self.compute_model.get_node_instances(
source_node)
if source_instances:
if self.config.datasource == "ceilometer":
inlet_t = self.ceilometer.statistic_aggregation(
resource_id=source_node.uuid,
meter_name=self.meter_name_inlet_t,
period=self._period,
aggregate='avg')
power = self.ceilometer.statistic_aggregation(
resource_id=source_node.uuid,
meter_name=self.meter_name_power,
period=self._period,
aggregate='avg')
elif self.config.datasource == "gnocchi":
stop_time = datetime.datetime.utcnow()
start_time = stop_time - datetime.timedelta(
seconds=int(self._period))
inlet_t = self.gnocchi.statistic_aggregation(
resource_id=source_node.uuid,
metric=self.meter_name_inlet_t,
granularity=self.granularity,
start_time=start_time,
stop_time=stop_time,
aggregation='mean')
power = self.gnocchi.statistic_aggregation(
resource_id=source_node.uuid,
metric=self.meter_name_power,
granularity=self.granularity,
start_time=start_time,
stop_time=stop_time,
aggregation='mean')
inlet_t = self.datasource_backend.statistic_aggregation(
resource_id=source_node.uuid,
meter_name=self.meter_name_inlet_t,
period=self._period,
granularity=self.granularity)
power = self.datasource_backend.statistic_aggregation(
resource_id=source_node.uuid,
meter_name=self.meter_name_power,
period=self._period,
granularity=self.granularity)
if (power < self.threshold_power and
inlet_t < self.threshold_inlet_t):
# hardware issue, migrate all instances from this node
@@ -353,23 +308,11 @@ class UniformAirflow(base.BaseStrategy):
node = self.compute_model.get_node_by_uuid(
node_id)
resource_id = node.uuid
if self.config.datasource == "ceilometer":
airflow = self.ceilometer.statistic_aggregation(
resource_id=resource_id,
meter_name=self.meter_name_airflow,
period=self._period,
aggregate='avg')
elif self.config.datasource == "gnocchi":
stop_time = datetime.datetime.utcnow()
start_time = stop_time - datetime.timedelta(
seconds=int(self._period))
airflow = self.gnocchi.statistic_aggregation(
resource_id=resource_id,
metric=self.meter_name_airflow,
granularity=self.granularity,
start_time=start_time,
stop_time=stop_time,
aggregation='mean')
airflow = self.datasource_backend.statistic_aggregation(
resource_id=resource_id,
meter_name=self.meter_name_airflow,
period=self._period,
granularity=self.granularity)
# some hosts may not have airflow meter, remove from target
if airflow is None:
LOG.warning("%s: no airflow data", resource_id)

View File

@@ -52,7 +52,6 @@ correctly on all compute nodes within the cluster.
This strategy assumes it is possible to live migrate any VM from
an active compute node to any other active compute node.
"""
import datetime
from oslo_config import cfg
from oslo_log import log
@@ -60,8 +59,6 @@ import six
from watcher._i18n import _
from watcher.common import exception
from watcher.datasource import ceilometer as ceil
from watcher.datasource import gnocchi as gnoc
from watcher.decision_engine.model import element
from watcher.decision_engine.strategy.strategies import base
@@ -118,26 +115,6 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
def period(self):
return self.input_parameters.get('period', 3600)
@property
def ceilometer(self):
if self._ceilometer is None:
self.ceilometer = ceil.CeilometerHelper(osc=self.osc)
return self._ceilometer
@ceilometer.setter
def ceilometer(self, ceilometer):
self._ceilometer = ceilometer
@property
def gnocchi(self):
if self._gnocchi is None:
self.gnocchi = gnoc.GnocchiHelper(osc=self.osc)
return self._gnocchi
@gnocchi.setter
def gnocchi(self, gnocchi):
self._gnocchi = gnocchi
@property
def granularity(self):
return self.input_parameters.get('granularity', 300)
@@ -315,57 +292,28 @@ class VMWorkloadConsolidation(base.ServerConsolidationBaseStrategy):
disk_alloc_metric = self.METRIC_NAMES[
self.config.datasource]['disk_alloc_metric']
if self.config.datasource == "ceilometer":
instance_cpu_util = self.ceilometer.statistic_aggregation(
resource_id=instance.uuid, meter_name=cpu_util_metric,
period=self.period, aggregate='avg')
instance_ram_util = self.ceilometer.statistic_aggregation(
resource_id=instance.uuid, meter_name=ram_util_metric,
period=self.period, aggregate='avg')
if not instance_ram_util:
instance_ram_util = self.ceilometer.statistic_aggregation(
resource_id=instance.uuid, meter_name=ram_alloc_metric,
period=self.period, aggregate='avg')
instance_disk_util = self.ceilometer.statistic_aggregation(
resource_id=instance.uuid, meter_name=disk_alloc_metric,
period=self.period, aggregate='avg')
elif self.config.datasource == "gnocchi":
stop_time = datetime.datetime.utcnow()
start_time = stop_time - datetime.timedelta(
seconds=int(self.period))
instance_cpu_util = self.gnocchi.statistic_aggregation(
instance_cpu_util = self.datasource_backend.statistic_aggregation(
resource_id=instance.uuid,
meter_name=cpu_util_metric,
period=self.period,
granularity=self.granularity)
instance_ram_util = self.datasource_backend.statistic_aggregation(
resource_id=instance.uuid,
meter_name=ram_util_metric,
period=self.period,
granularity=self.granularity)
if not instance_ram_util:
instance_ram_util = self.datasource_backend.statistic_aggregation(
resource_id=instance.uuid,
metric=cpu_util_metric,
granularity=self.granularity,
start_time=start_time,
stop_time=stop_time,
aggregation='mean'
)
instance_ram_util = self.gnocchi.statistic_aggregation(
resource_id=instance.uuid,
metric=ram_util_metric,
granularity=self.granularity,
start_time=start_time,
stop_time=stop_time,
aggregation='mean'
)
if not instance_ram_util:
instance_ram_util = self.gnocchi.statistic_aggregation(
resource_id=instance.uuid,
metric=ram_alloc_metric,
granularity=self.granularity,
start_time=start_time,
stop_time=stop_time,
aggregation='mean'
)
instance_disk_util = self.gnocchi.statistic_aggregation(
resource_id=instance.uuid,
metric=disk_alloc_metric,
granularity=self.granularity,
start_time=start_time,
stop_time=stop_time,
aggregation='mean'
)
meter_name=ram_alloc_metric,
period=self.period,
granularity=self.granularity)
instance_disk_util = self.datasource_backend.statistic_aggregation(
resource_id=instance.uuid,
meter_name=disk_alloc_metric,
period=self.period,
granularity=self.granularity)
if instance_cpu_util:
total_cpu_utilization = (
instance.vcpus * (instance_cpu_util / 100.0))

View File

@@ -290,8 +290,9 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
util = None
try:
util = self.datasource_backend.statistic_aggregation(
instance.uuid, self._meter, self._period, 'mean',
granularity=self.granularity)
instance.uuid, self._meter, self._period,
self._granularity, aggregation='mean',
dimensions=dict(resource_id=instance.uuid))
except Exception as exc:
LOG.exception(exc)
LOG.error("Can not get %s from %s", self._meter,
@@ -352,6 +353,7 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
self.threshold = self.input_parameters.threshold
self._period = self.input_parameters.period
self._meter = self.input_parameters.metrics
self._granularity = self.input_parameters.granularity
source_nodes, target_nodes, avg_workload, workload_cache = (
self.group_hosts_by_cpu_or_ram_util())

View File

@@ -198,8 +198,8 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
instance_load = {'uuid': instance.uuid, 'vcpus': instance.vcpus}
for meter in self.metrics:
avg_meter = self.datasource_backend.statistic_aggregation(
instance.uuid, meter, self.periods['instance'], 'mean',
granularity=self.granularity)
instance.uuid, meter, self.periods['instance'],
self.granularity, aggregation='mean')
if avg_meter is None:
LOG.warning(
"No values returned by %(resource_id)s "
@@ -242,8 +242,7 @@ class WorkloadStabilization(base.WorkloadStabilizationBaseStrategy):
resource_id = node_id
avg_meter = self.datasource_backend.statistic_aggregation(
resource_id, self.instance_metrics[metric],
self.periods['node'], 'mean', granularity=self.granularity)
self.periods['node'], self.granularity, aggregation='mean')
if avg_meter is None:
LOG.warning('No values returned by node %s for %s',
node_id, meter_name)

View File

@@ -55,7 +55,8 @@ class TestCeilometerHelper(base.BaseTestCase):
val = cm.statistic_aggregation(
resource_id="INSTANCE_ID",
meter_name="cpu_util",
period="7300"
period="7300",
granularity=None
)
self.assertEqual(expected_result, val)
@@ -100,7 +101,7 @@ class TestCeilometerHelper(base.BaseTestCase):
helper = ceilometer_helper.CeilometerHelper()
helper.get_host_cpu_usage('compute1', 600, 'mean')
mock_aggregation.assert_called_once_with(
'compute1', helper.METRIC_MAP['host_cpu_usage'], 600,
'compute1', helper.METRIC_MAP['host_cpu_usage'], 600, None,
aggregate='mean')
@mock.patch.object(ceilometer_helper.CeilometerHelper,
@@ -109,7 +110,7 @@ class TestCeilometerHelper(base.BaseTestCase):
helper = ceilometer_helper.CeilometerHelper()
helper.get_instance_cpu_usage('compute1', 600, 'mean')
mock_aggregation.assert_called_once_with(
'compute1', helper.METRIC_MAP['instance_cpu_usage'], 600,
'compute1', helper.METRIC_MAP['instance_cpu_usage'], 600, None,
aggregate='mean')
@mock.patch.object(ceilometer_helper.CeilometerHelper,
@@ -118,7 +119,7 @@ class TestCeilometerHelper(base.BaseTestCase):
helper = ceilometer_helper.CeilometerHelper()
helper.get_host_memory_usage('compute1', 600, 'mean')
mock_aggregation.assert_called_once_with(
'compute1', helper.METRIC_MAP['host_memory_usage'], 600,
'compute1', helper.METRIC_MAP['host_memory_usage'], 600, None,
aggregate='mean')
@mock.patch.object(ceilometer_helper.CeilometerHelper,
@@ -128,7 +129,7 @@ class TestCeilometerHelper(base.BaseTestCase):
helper = ceilometer_helper.CeilometerHelper()
helper.get_instance_memory_usage('compute1', 600, 'mean')
mock_aggregation.assert_called_once_with(
'compute1', helper.METRIC_MAP['instance_ram_usage'], 600,
'compute1', helper.METRIC_MAP['instance_ram_usage'], 600, None,
aggregate='mean')
@mock.patch.object(ceilometer_helper.CeilometerHelper,
@@ -139,7 +140,7 @@ class TestCeilometerHelper(base.BaseTestCase):
helper.get_instance_l3_cache_usage('compute1', 600, 'mean')
mock_aggregation.assert_called_once_with(
'compute1', helper.METRIC_MAP['instance_l3_cache_usage'], 600,
aggregate='mean')
None, aggregate='mean')
@mock.patch.object(ceilometer_helper.CeilometerHelper,
'statistic_aggregation')
@@ -148,7 +149,7 @@ class TestCeilometerHelper(base.BaseTestCase):
helper = ceilometer_helper.CeilometerHelper()
helper.get_instance_ram_allocated('compute1', 600, 'mean')
mock_aggregation.assert_called_once_with(
'compute1', helper.METRIC_MAP['instance_ram_allocated'], 600,
'compute1', helper.METRIC_MAP['instance_ram_allocated'], 600, None,
aggregate='mean')
@mock.patch.object(ceilometer_helper.CeilometerHelper,
@@ -159,7 +160,7 @@ class TestCeilometerHelper(base.BaseTestCase):
helper.get_instance_root_disk_allocated('compute1', 600, 'mean')
mock_aggregation.assert_called_once_with(
'compute1', helper.METRIC_MAP['instance_root_disk_size'], 600,
aggregate='mean')
None, aggregate='mean')
@mock.patch.object(ceilometer_helper.CeilometerHelper,
'statistic_aggregation')
@@ -168,7 +169,7 @@ class TestCeilometerHelper(base.BaseTestCase):
helper = ceilometer_helper.CeilometerHelper()
helper.get_host_outlet_temperature('compute1', 600, 'mean')
mock_aggregation.assert_called_once_with(
'compute1', helper.METRIC_MAP['host_outlet_temp'], 600,
'compute1', helper.METRIC_MAP['host_outlet_temp'], 600, None,
aggregate='mean')
@mock.patch.object(ceilometer_helper.CeilometerHelper,
@@ -178,7 +179,7 @@ class TestCeilometerHelper(base.BaseTestCase):
helper = ceilometer_helper.CeilometerHelper()
helper.get_host_inlet_temperature('compute1', 600, 'mean')
mock_aggregation.assert_called_once_with(
'compute1', helper.METRIC_MAP['host_inlet_temp'], 600,
'compute1', helper.METRIC_MAP['host_inlet_temp'], 600, None,
aggregate='mean')
@mock.patch.object(ceilometer_helper.CeilometerHelper,
@@ -187,7 +188,7 @@ class TestCeilometerHelper(base.BaseTestCase):
helper = ceilometer_helper.CeilometerHelper()
helper.get_host_airflow('compute1', 600, 'mean')
mock_aggregation.assert_called_once_with(
'compute1', helper.METRIC_MAP['host_airflow'], 600,
'compute1', helper.METRIC_MAP['host_airflow'], 600, None,
aggregate='mean')
@mock.patch.object(ceilometer_helper.CeilometerHelper,
@@ -196,7 +197,7 @@ class TestCeilometerHelper(base.BaseTestCase):
helper = ceilometer_helper.CeilometerHelper()
helper.get_host_power('compute1', 600, 'mean')
mock_aggregation.assert_called_once_with(
'compute1', helper.METRIC_MAP['host_power'], 600,
'compute1', helper.METRIC_MAP['host_power'], 600, None,
aggregate='mean')
def test_check_availability(self, mock_ceilometer):

View File

@@ -16,10 +16,8 @@
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from watcher.common import clients
from watcher.common import exception
from watcher.datasource import gnocchi as gnocchi_helper
from watcher.tests import base
@@ -39,34 +37,17 @@ class TestGnocchiHelper(base.BaseTestCase):
mock_gnocchi.return_value = gnocchi
helper = gnocchi_helper.GnocchiHelper()
result = helper._statistic_aggregation(
result = helper.statistic_aggregation(
resource_id='16a86790-327a-45f9-bc82-45839f062fdc',
metric='cpu_util',
meter_name='cpu_util',
period=300,
granularity=360,
start_time=timeutils.parse_isotime("2017-02-02T09:00:00.000000"),
stop_time=timeutils.parse_isotime("2017-02-02T10:00:00.000000"),
aggregation='mean'
dimensions=None,
aggregation='mean',
group_by='*'
)
self.assertEqual(expected_result, result)
def test_gnocchi_wrong_datetime(self, mock_gnocchi):
gnocchi = mock.MagicMock()
expected_measures = [["2017-02-02T09:00:00.000000", 360, 5.5]]
gnocchi.metric.get_measures.return_value = expected_measures
mock_gnocchi.return_value = gnocchi
helper = gnocchi_helper.GnocchiHelper()
self.assertRaises(
exception.InvalidParameter, helper._statistic_aggregation,
resource_id='16a86790-327a-45f9-bc82-45839f062fdc',
metric='cpu_util',
granularity=360,
start_time="2017-02-02T09:00:00.000000",
stop_time=timeutils.parse_isotime("2017-02-02T10:00:00.000000"),
aggregation='mean')
@mock.patch.object(gnocchi_helper.GnocchiHelper, 'statistic_aggregation')
def test_get_host_cpu_usage(self, mock_aggregation, mock_gnocchi):
helper = gnocchi_helper.GnocchiHelper()

View File

@@ -16,7 +16,6 @@
import mock
from oslo_config import cfg
from oslo_utils import timeutils
from watcher.common import clients
from watcher.datasource import monasca as monasca_helper
@@ -30,7 +29,7 @@ class TestMonascaHelper(base.BaseTestCase):
def test_monasca_statistic_aggregation(self, mock_monasca):
monasca = mock.MagicMock()
expected_result = [{
expected_stat = [{
'columns': ['timestamp', 'avg'],
'dimensions': {
'hostname': 'rdev-indeedsrv001',
@@ -39,23 +38,23 @@ class TestMonascaHelper(base.BaseTestCase):
'name': 'cpu.percent',
'statistics': [
['2016-07-29T12:45:00Z', 0.0],
['2016-07-29T12:50:00Z', 0.9100000000000001],
['2016-07-29T12:55:00Z', 0.9111111111111112]]}]
['2016-07-29T12:50:00Z', 0.9],
['2016-07-29T12:55:00Z', 0.9]]}]
monasca.metrics.list_statistics.return_value = expected_result
monasca.metrics.list_statistics.return_value = expected_stat
mock_monasca.return_value = monasca
helper = monasca_helper.MonascaHelper()
result = helper.statistic_aggregation(
resource_id=None,
meter_name='cpu.percent',
dimensions={'hostname': 'NODE_UUID'},
start_time=timeutils.parse_isotime("2016-06-06T10:33:22.063176"),
end_time=None,
period=7200,
aggregate='avg',
granularity=300,
dimensions={'hostname': 'NODE_UUID'},
aggregation='avg',
group_by='*',
)
self.assertEqual(expected_result, result)
self.assertEqual(0.6, result)
def test_check_availability(self, mock_monasca):
monasca = mock.MagicMock()
@@ -117,34 +116,14 @@ class TestMonascaHelper(base.BaseTestCase):
@mock.patch.object(monasca_helper.MonascaHelper, 'statistic_aggregation')
def test_get_host_cpu_usage(self, mock_aggregation, mock_monasca):
node = "compute1_compute1"
mock_aggregation.return_value = [{
'columns': ['timestamp', 'avg'],
'dimensions': {
'hostname': 'rdev-indeedsrv001',
'service': 'monasca'},
'id': '0',
'name': 'cpu.percent',
'statistics': [
['2016-07-29T12:45:00Z', 0.0],
['2016-07-29T12:50:00Z', 0.9],
['2016-07-29T12:55:00Z', 0.9]]}]
mock_aggregation.return_value = 0.6
helper = monasca_helper.MonascaHelper()
cpu_usage = helper.get_host_cpu_usage(node, 600, 'mean')
self.assertEqual(0.6, cpu_usage)
@mock.patch.object(monasca_helper.MonascaHelper, 'statistic_aggregation')
def test_get_instance_cpu_usage(self, mock_aggregation, mock_monasca):
mock_aggregation.return_value = [{
'columns': ['timestamp', 'avg'],
'dimensions': {
'name': 'vm1',
'service': 'monasca'},
'id': '0',
'name': 'cpu.percent',
'statistics': [
['2016-07-29T12:45:00Z', 0.0],
['2016-07-29T12:50:00Z', 0.9],
['2016-07-29T12:55:00Z', 0.9]]}]
mock_aggregation.return_value = 0.6
helper = monasca_helper.MonascaHelper()
cpu_usage = helper.get_instance_cpu_usage('vm1', 600, 'mean')
self.assertEqual(0.6, cpu_usage)

View File

@@ -26,14 +26,9 @@ class FakeCeilometerMetrics(object):
def empty_one_metric(self, emptytype):
self.emptytype = emptytype
# TODO(alexchadin): This method is added as temporary solution until
# all strategies use datasource_backend property.
def temp_mock_get_statistics(self, resource_id, meter_name, period,
aggregate, granularity=300):
return self.mock_get_statistics(resource_id, meter_name, period)
def mock_get_statistics(self, resource_id, meter_name, period,
aggregate='avg'):
def mock_get_statistics(self, resource_id=None, meter_name=None,
period=None, granularity=None, dimensions=None,
aggregation='avg', group_by='*'):
result = 0
if meter_name == "hardware.cpu.util":
result = self.get_usage_node_cpu(resource_id)
@@ -56,7 +51,8 @@ class FakeCeilometerMetrics(object):
return result
def mock_get_statistics_wb(self, resource_id, meter_name, period,
aggregate, granularity=300):
granularity, dimensions=None,
aggregation='avg', group_by='*'):
result = 0.0
if meter_name == "cpu_util":
result = self.get_average_usage_instance_cpu_wb(resource_id)

View File

@@ -84,8 +84,9 @@ class FakeCeilometerMetrics(object):
def __init__(self, model):
self.model = model
def mock_get_statistics(self, resource_id, meter_name, period=3600,
aggregate='avg'):
def mock_get_statistics(self, resource_id=None, meter_name=None,
period=300, granularity=300, dimensions=None,
aggregation='avg', group_by='*'):
if meter_name == "compute.node.cpu.percent":
return self.get_node_cpu_util(resource_id)
elif meter_name == "cpu_util":
@@ -166,15 +167,16 @@ class FakeGnocchiMetrics(object):
def __init__(self, model):
self.model = model
def mock_get_statistics(self, resource_id, metric, granularity,
start_time, stop_time, aggregation='mean'):
if metric == "compute.node.cpu.percent":
def mock_get_statistics(self, resource_id=None, meter_name=None,
period=300, granularity=300, dimensions=None,
aggregation='avg', group_by='*'):
if meter_name == "compute.node.cpu.percent":
return self.get_node_cpu_util(resource_id)
elif metric == "cpu_util":
elif meter_name == "cpu_util":
return self.get_instance_cpu_util(resource_id)
elif metric == "memory.resident":
elif meter_name == "memory.resident":
return self.get_instance_ram_util(resource_id)
elif metric == "disk.root.size":
elif meter_name == "disk.root.size":
return self.get_instance_disk_root_size(resource_id)
def get_node_cpu_util(self, r_id):

View File

@@ -21,17 +21,10 @@ class FakeGnocchiMetrics(object):
def empty_one_metric(self, emptytype):
self.emptytype = emptytype
# TODO(alexchadin): This method is added as temporary solution until
# all strategies use datasource_backend property.
def temp_mock_get_statistics(self, resource_id, metric, period, aggregate,
granularity=300):
return self.mock_get_statistics(resource_id, metric, granularity,
0, 0, aggregation='mean')
def mock_get_statistics(self, resource_id, metric, granularity,
start_time, stop_time, aggregation='mean'):
def mock_get_statistics(self, resource_id=None, meter_name=None,
period=None, granularity=None, dimensions=None,
aggregation='avg', group_by='*'):
result = 0
meter_name = metric
if meter_name == "hardware.cpu.util":
result = self.get_usage_node_cpu(resource_id)
elif meter_name == "compute.node.cpu.percent":
@@ -87,12 +80,13 @@ class FakeGnocchiMetrics(object):
mock[uuid] = 25 * oslo_utils.units.Ki
return mock[str(uuid)]
def mock_get_statistics_wb(self, resource_id, metric, period, aggregate,
granularity=300):
def mock_get_statistics_wb(self, resource_id, meter_name, period,
granularity, dimensions=None,
aggregation='avg', group_by='*'):
result = 0.0
if metric == "cpu_util":
if meter_name == "cpu_util":
result = self.get_average_usage_instance_cpu_wb(resource_id)
elif metric == "memory.resident":
elif meter_name == "memory.resident":
result = self.get_average_usage_instance_memory_wb(resource_id)
return result

View File

@@ -26,15 +26,9 @@ class FakeMonascaMetrics(object):
def empty_one_metric(self, emptytype):
self.emptytype = emptytype
# This method is added as temporary solution until all strategies use
# datasource_backend property
def temp_mock_get_statistics(self, metric, dimensions, period,
aggregate='avg', granularity=300):
return self.mock_get_statistics(metric, dimensions,
period, aggregate='avg')
def mock_get_statistics(self, meter_name, dimensions, period,
aggregate='avg'):
def mock_get_statistics(self, resource_id=None, meter_name=None,
period=None, granularity=None, dimensions=None,
aggregation='avg', group_by='*'):
resource_id = dimensions.get(
"resource_id") or dimensions.get("hostname")
result = 0.0

View File

@@ -17,7 +17,6 @@
# limitations under the License.
#
import collections
import datetime
import mock
from watcher.applier.loading import default
@@ -57,7 +56,7 @@ class TestOutletTempControl(base.TestCase):
self.addCleanup(p_model.stop)
p_datasource = mock.patch.object(
strategies.OutletTempControl, self.datasource,
strategies.OutletTempControl, 'datasource_backend',
new_callable=mock.PropertyMock)
self.m_datasource = p_datasource.start()
self.addCleanup(p_datasource.stop)
@@ -164,44 +163,3 @@ class TestOutletTempControl(base.TestCase):
loaded_action = loader.load(action['action_type'])
loaded_action.input_parameters = action['input_parameters']
loaded_action.validate_parameters()
def test_periods(self):
model = self.fake_cluster.generate_scenario_3_with_2_nodes()
self.m_model.return_value = model
p_ceilometer = mock.patch.object(
strategies.OutletTempControl, "ceilometer")
m_ceilometer = p_ceilometer.start()
self.addCleanup(p_ceilometer.stop)
p_gnocchi = mock.patch.object(strategies.OutletTempControl, "gnocchi")
m_gnocchi = p_gnocchi.start()
self.addCleanup(p_gnocchi.stop)
datetime_patcher = mock.patch.object(
datetime, 'datetime',
mock.Mock(wraps=datetime.datetime)
)
mocked_datetime = datetime_patcher.start()
mocked_datetime.utcnow.return_value = datetime.datetime(
2017, 3, 19, 18, 53, 11, 657417)
self.addCleanup(datetime_patcher.stop)
m_ceilometer.statistic_aggregation = mock.Mock(
side_effect=self.fake_metrics.mock_get_statistics)
m_gnocchi.statistic_aggregation = mock.Mock(
side_effect=self.fake_metrics.mock_get_statistics)
node = model.get_node_by_uuid('Node_0')
self.strategy.input_parameters.update({'threshold': 35.0})
self.strategy.threshold = 35.0
self.strategy.group_hosts_by_outlet_temp()
if self.strategy.config.datasource == "ceilometer":
m_ceilometer.statistic_aggregation.assert_any_call(
aggregate='avg',
meter_name='hardware.ipmi.node.outlet_temperature',
period=30, resource_id=node.uuid)
elif self.strategy.config.datasource == "gnocchi":
stop_time = datetime.datetime.utcnow()
start_time = stop_time - datetime.timedelta(
seconds=int('30'))
m_gnocchi.statistic_aggregation.assert_called_with(
resource_id=mock.ANY,
metric='hardware.ipmi.node.outlet_temperature',
granularity=300, start_time=start_time, stop_time=stop_time,
aggregation='mean')

View File

@@ -17,7 +17,6 @@
# limitations under the License.
#
import collections
import datetime
import mock
from watcher.applier.loading import default
@@ -56,7 +55,7 @@ class TestUniformAirflow(base.TestCase):
self.addCleanup(p_model.stop)
p_datasource = mock.patch.object(
strategies.UniformAirflow, self.datasource,
strategies.UniformAirflow, 'datasource_backend',
new_callable=mock.PropertyMock)
self.m_datasource = p_datasource.start()
self.addCleanup(p_datasource.stop)
@@ -211,39 +210,3 @@ class TestUniformAirflow(base.TestCase):
loaded_action = loader.load(action['action_type'])
loaded_action.input_parameters = action['input_parameters']
loaded_action.validate_parameters()
def test_periods(self):
model = self.fake_cluster.generate_scenario_7_with_2_nodes()
self.m_model.return_value = model
p_ceilometer = mock.patch.object(
strategies.UniformAirflow, "ceilometer")
m_ceilometer = p_ceilometer.start()
self.addCleanup(p_ceilometer.stop)
p_gnocchi = mock.patch.object(strategies.UniformAirflow, "gnocchi")
m_gnocchi = p_gnocchi.start()
self.addCleanup(p_gnocchi.stop)
datetime_patcher = mock.patch.object(
datetime, 'datetime',
mock.Mock(wraps=datetime.datetime)
)
mocked_datetime = datetime_patcher.start()
mocked_datetime.utcnow.return_value = datetime.datetime(
2017, 3, 19, 18, 53, 11, 657417)
self.addCleanup(datetime_patcher.stop)
m_ceilometer.statistic_aggregation = mock.Mock(
side_effect=self.fake_metrics.mock_get_statistics)
m_gnocchi.statistic_aggregation = mock.Mock(
side_effect=self.fake_metrics.mock_get_statistics)
self.strategy.group_hosts_by_airflow()
if self.strategy.config.datasource == "ceilometer":
m_ceilometer.statistic_aggregation.assert_any_call(
aggregate='avg', meter_name='hardware.ipmi.node.airflow',
period=300, resource_id=mock.ANY)
elif self.strategy.config.datasource == "gnocchi":
stop_time = datetime.datetime.utcnow()
start_time = stop_time - datetime.timedelta(
seconds=int('300'))
m_gnocchi.statistic_aggregation.assert_called_with(
resource_id=mock.ANY, metric='hardware.ipmi.node.airflow',
granularity=300, start_time=start_time, stop_time=stop_time,
aggregation='mean')

View File

@@ -18,7 +18,6 @@
# limitations under the License.
#
import datetime
import mock
from watcher.common import exception
@@ -55,7 +54,7 @@ class TestVMWorkloadConsolidation(base.TestCase):
self.addCleanup(p_model.stop)
p_datasource = mock.patch.object(
strategies.VMWorkloadConsolidation, self.datasource,
strategies.VMWorkloadConsolidation, 'datasource_backend',
new_callable=mock.PropertyMock)
self.m_datasource = p_datasource.start()
self.addCleanup(p_datasource.stop)
@@ -333,41 +332,3 @@ class TestVMWorkloadConsolidation(base.TestCase):
del expected[3]
del expected[1]
self.assertEqual(expected, self.strategy.solution.actions)
def test_periods(self):
model = self.fake_cluster.generate_scenario_1()
self.m_model.return_value = model
p_ceilometer = mock.patch.object(
strategies.VMWorkloadConsolidation, "ceilometer")
m_ceilometer = p_ceilometer.start()
self.addCleanup(p_ceilometer.stop)
p_gnocchi = mock.patch.object(
strategies.VMWorkloadConsolidation, "gnocchi")
m_gnocchi = p_gnocchi.start()
self.addCleanup(p_gnocchi.stop)
datetime_patcher = mock.patch.object(
datetime, 'datetime',
mock.Mock(wraps=datetime.datetime)
)
mocked_datetime = datetime_patcher.start()
mocked_datetime.utcnow.return_value = datetime.datetime(
2017, 3, 19, 18, 53, 11, 657417)
self.addCleanup(datetime_patcher.stop)
m_ceilometer.return_value = mock.Mock(
statistic_aggregation=self.fake_metrics.mock_get_statistics)
m_gnocchi.return_value = mock.Mock(
statistic_aggregation=self.fake_metrics.mock_get_statistics)
instance0 = model.get_instance_by_uuid("INSTANCE_0")
self.strategy.get_instance_utilization(instance0)
if self.strategy.config.datasource == "ceilometer":
m_ceilometer.statistic_aggregation.assert_any_call(
aggregate='avg', meter_name='disk.root.size',
period=3600, resource_id=instance0.uuid)
elif self.strategy.config.datasource == "gnocchi":
stop_time = datetime.datetime.utcnow()
start_time = stop_time - datetime.timedelta(
seconds=int('3600'))
m_gnocchi.statistic_aggregation.assert_called_with(
resource_id=instance0.uuid, metric='disk.root.size',
granularity=300, start_time=start_time, stop_time=stop_time,
aggregation='mean')

View File

@@ -75,10 +75,12 @@ class TestWorkloadBalance(base.TestCase):
self.strategy.input_parameters = utils.Struct()
self.strategy.input_parameters.update({'metrics': 'cpu_util',
'threshold': 25.0,
'period': 300})
'period': 300,
'granularity': 300})
self.strategy.threshold = 25.0
self.strategy._period = 300
self.strategy._meter = "cpu_util"
self.strategy._granularity = 300
def test_calc_used_resource(self):
model = self.fake_cluster.generate_scenario_6_with_2_nodes()

View File

@@ -83,7 +83,7 @@ class TestWorkloadStabilization(base.TestCase):
self.m_model.return_value = model_root.ModelRoot()
self.m_audit_scope.return_value = mock.Mock()
self.m_datasource.return_value = mock.Mock(
statistic_aggregation=self.fake_metrics.temp_mock_get_statistics)
statistic_aggregation=self.fake_metrics.mock_get_statistics)
self.strategy = strategies.WorkloadStabilization(
config=mock.Mock(datasource=self.datasource))