Refactoring the codes about getting used and free resources
We have provided functions to get used and free resources in class ModelRoot. So strategies can invoke the functions to get used and free resources. Change-Id: I3c74d56539ac6c6eb16b0d254a76260bc791567c
This commit is contained in:
parent
a7b24ac6a5
commit
689ae25ef5
|
@ -201,19 +201,13 @@ class BasicConsolidation(base.ServerConsolidationBaseStrategy):
|
|||
LOG.debug('Migrate instance %s from %s to %s',
|
||||
instance_to_migrate, source_node, destination_node)
|
||||
|
||||
total_cores = 0
|
||||
total_disk = 0
|
||||
total_mem = 0
|
||||
for instance in self.compute_model.get_node_instances(
|
||||
destination_node):
|
||||
total_cores += instance.vcpus
|
||||
total_disk += instance.disk
|
||||
total_mem += instance.memory
|
||||
used_resources = self.compute_model.get_node_used_resources(
|
||||
destination_node)
|
||||
|
||||
# capacity requested by the compute node
|
||||
total_cores += instance_to_migrate.vcpus
|
||||
total_disk += instance_to_migrate.disk
|
||||
total_mem += instance_to_migrate.memory
|
||||
total_cores = used_resources['vcpu'] + instance_to_migrate.vcpus
|
||||
total_disk = used_resources['disk'] + instance_to_migrate.disk
|
||||
total_mem = used_resources['memory'] + instance_to_migrate.memory
|
||||
|
||||
return self.check_threshold(destination_node, total_cores, total_disk,
|
||||
total_mem)
|
||||
|
|
|
@ -137,37 +137,6 @@ class HostMaintenance(base.HostMaintenanceBaseStrategy):
|
|||
ram=node.memory_mb_capacity,
|
||||
disk=node.disk_gb_capacity)
|
||||
|
||||
def get_node_used(self, node):
|
||||
"""Collect cpu, ram and disk used of a node.
|
||||
|
||||
:param node: node object
|
||||
:return: dict(cpu(cores), ram(MB), disk(B))
|
||||
"""
|
||||
vcpus_used = 0
|
||||
memory_used = 0
|
||||
disk_used = 0
|
||||
for instance in self.compute_model.get_node_instances(node):
|
||||
vcpus_used += instance.vcpus
|
||||
memory_used += instance.memory
|
||||
disk_used += instance.disk
|
||||
|
||||
return dict(cpu=vcpus_used,
|
||||
ram=memory_used,
|
||||
disk=disk_used)
|
||||
|
||||
def get_node_free(self, node):
|
||||
"""Collect cpu, ram and disk free of a node.
|
||||
|
||||
:param node: node object
|
||||
:return: dict(cpu(cores), ram(MB), disk(B))
|
||||
"""
|
||||
node_capacity = self.get_node_capacity(node)
|
||||
node_used = self.get_node_used(node)
|
||||
return dict(cpu=node_capacity['cpu']-node_used['cpu'],
|
||||
ram=node_capacity['ram']-node_used['ram'],
|
||||
disk=node_capacity['disk']-node_used['disk'],
|
||||
)
|
||||
|
||||
def host_fits(self, source_node, destination_node):
|
||||
"""check host fits
|
||||
|
||||
|
@ -175,9 +144,11 @@ class HostMaintenance(base.HostMaintenanceBaseStrategy):
|
|||
from source_node to destination_node.
|
||||
"""
|
||||
|
||||
source_node_used = self.get_node_used(source_node)
|
||||
destination_node_free = self.get_node_free(destination_node)
|
||||
metrics = ['cpu', 'ram']
|
||||
source_node_used = self.compute_model.get_node_used_resources(
|
||||
source_node)
|
||||
destination_node_free = self.compute_model.get_node_free_resources(
|
||||
destination_node)
|
||||
metrics = ['vcpu', 'memory']
|
||||
for m in metrics:
|
||||
if source_node_used[m] > destination_node_free[m]:
|
||||
return False
|
||||
|
|
|
@ -208,19 +208,6 @@ class NoisyNeighbor(base.NoisyNeighborBaseStrategy):
|
|||
|
||||
return hosts_need_release, hosts_target
|
||||
|
||||
def calc_used_resource(self, node):
|
||||
"""Calculate the used vcpus, memory and disk based on VM flavors"""
|
||||
instances = self.compute_model.get_node_instances(node)
|
||||
vcpus_used = 0
|
||||
memory_mb_used = 0
|
||||
disk_gb_used = 0
|
||||
for instance in instances:
|
||||
vcpus_used += instance.vcpus
|
||||
memory_mb_used += instance.memory
|
||||
disk_gb_used += instance.disk
|
||||
|
||||
return vcpus_used, memory_mb_used, disk_gb_used
|
||||
|
||||
def filter_dest_servers(self, hosts, instance_to_migrate):
|
||||
required_cores = instance_to_migrate.vcpus
|
||||
required_disk = instance_to_migrate.disk
|
||||
|
@ -228,12 +215,9 @@ class NoisyNeighbor(base.NoisyNeighborBaseStrategy):
|
|||
|
||||
dest_servers = []
|
||||
for host in hosts:
|
||||
cores_used, mem_used, disk_used = self.calc_used_resource(host)
|
||||
cores_available = host.vcpu_capacity - cores_used
|
||||
disk_available = host.disk_gb_capacity - disk_used
|
||||
mem_available = host.memory_mb_capacity - mem_used
|
||||
if (cores_available >= required_cores and disk_available >=
|
||||
required_disk and mem_available >= required_memory):
|
||||
free_res = self.compute_model.get_node_free_resources(host)
|
||||
if (free_res['vcpu'] >= required_cores and free_res['disk'] >=
|
||||
required_disk and free_res['memory'] >= required_memory):
|
||||
dest_servers.append(host)
|
||||
|
||||
return dest_servers
|
||||
|
|
|
@ -140,19 +140,6 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
|
|||
if cn.state == element.ServiceState.ONLINE.value and
|
||||
cn.status in default_node_scope}
|
||||
|
||||
def calc_used_resource(self, node):
|
||||
"""Calculate the used vcpus, memory and disk based on VM flavors"""
|
||||
instances = self.compute_model.get_node_instances(node)
|
||||
vcpus_used = 0
|
||||
memory_mb_used = 0
|
||||
disk_gb_used = 0
|
||||
for instance in instances:
|
||||
vcpus_used += instance.vcpus
|
||||
memory_mb_used += instance.memory
|
||||
disk_gb_used += instance.disk
|
||||
|
||||
return vcpus_used, memory_mb_used, disk_gb_used
|
||||
|
||||
def group_hosts_by_outlet_temp(self):
|
||||
"""Group hosts based on outlet temp meters"""
|
||||
nodes = self.get_available_compute_nodes()
|
||||
|
@ -222,13 +209,9 @@ class OutletTempControl(base.ThermalOptimizationBaseStrategy):
|
|||
for instance_data in hosts:
|
||||
host = instance_data['compute_node']
|
||||
# available
|
||||
cores_used, mem_used, disk_used = self.calc_used_resource(host)
|
||||
cores_available = host.vcpu_capacity - cores_used
|
||||
disk_available = host.disk_gb_capacity - disk_used
|
||||
mem_available = host.memory_mb_capacity - mem_used
|
||||
if cores_available >= required_cores \
|
||||
and disk_available >= required_disk \
|
||||
and mem_available >= required_memory:
|
||||
free_res = self.compute_model.get_node_free_resources(host)
|
||||
if (free_res['vcpu'] >= required_cores and free_res['disk'] >=
|
||||
required_disk and free_res['memory'] >= required_memory):
|
||||
dest_servers.append(instance_data)
|
||||
|
||||
return dest_servers
|
||||
|
|
|
@ -135,16 +135,9 @@ class UniformAirflow(base.BaseStrategy):
|
|||
|
||||
def calculate_used_resource(self, node):
|
||||
"""Compute the used vcpus, memory and disk based on instance flavors"""
|
||||
instances = self.compute_model.get_node_instances(node)
|
||||
vcpus_used = 0
|
||||
memory_mb_used = 0
|
||||
disk_gb_used = 0
|
||||
for instance in instances:
|
||||
vcpus_used += instance.vcpus
|
||||
memory_mb_used += instance.memory
|
||||
disk_gb_used += instance.disk
|
||||
used_res = self.compute_model.get_node_used_resources(node)
|
||||
|
||||
return vcpus_used, memory_mb_used, disk_gb_used
|
||||
return used_res['vcpu'], used_res['memory'], used_res['disk']
|
||||
|
||||
def choose_instance_to_migrate(self, hosts):
|
||||
"""Pick up an active instance instance to migrate from provided hosts
|
||||
|
|
|
@ -132,19 +132,6 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
|||
if cn.state == element.ServiceState.ONLINE.value and
|
||||
cn.status in default_node_scope}
|
||||
|
||||
def calculate_used_resource(self, node):
|
||||
"""Calculate the used vcpus, memory and disk based on VM flavors"""
|
||||
instances = self.compute_model.get_node_instances(node)
|
||||
vcpus_used = 0
|
||||
memory_mb_used = 0
|
||||
disk_gb_used = 0
|
||||
for instance in instances:
|
||||
vcpus_used += instance.vcpus
|
||||
memory_mb_used += instance.memory
|
||||
disk_gb_used += instance.disk
|
||||
|
||||
return vcpus_used, memory_mb_used, disk_gb_used
|
||||
|
||||
def choose_instance_to_migrate(self, hosts, avg_workload, workload_cache):
|
||||
"""Pick up an active instance instance to migrate from provided hosts
|
||||
|
||||
|
@ -203,14 +190,10 @@ class WorkloadBalance(base.WorkloadStabilizationBaseStrategy):
|
|||
host = instance_data['compute_node']
|
||||
workload = instance_data['workload']
|
||||
# calculate the available resources
|
||||
cores_used, mem_used, disk_used = self.calculate_used_resource(
|
||||
host)
|
||||
cores_available = host.vcpu_capacity - cores_used
|
||||
disk_available = host.disk_gb_capacity - disk_used
|
||||
mem_available = host.memory_mb_capacity - mem_used
|
||||
if (cores_available >= required_cores and
|
||||
mem_available >= required_mem and
|
||||
disk_available >= required_disk):
|
||||
free_res = self.compute_model.get_node_free_resources(host)
|
||||
if (free_res['vcpu'] >= required_cores and
|
||||
free_res['memory'] >= required_mem and
|
||||
free_res['disk'] >= required_disk):
|
||||
if (self._meter == 'instance_cpu_usage' and
|
||||
((src_instance_workload + workload) <
|
||||
self.threshold / 100 * host.vcpus)):
|
||||
|
|
|
@ -70,22 +70,6 @@ class TestHostMaintenance(TestBaseStrategy):
|
|||
self.assertEqual(node_capacity,
|
||||
self.strategy.get_node_capacity(node_0))
|
||||
|
||||
def test_get_node_used(self):
|
||||
model = self.fake_c_cluster.generate_scenario_1()
|
||||
self.m_c_model.return_value = model
|
||||
node_0 = model.get_node_by_uuid("Node_0")
|
||||
node_used = dict(cpu=20, ram=4, disk=40)
|
||||
self.assertEqual(node_used,
|
||||
self.strategy.get_node_used(node_0))
|
||||
|
||||
def test_get_node_free(self):
|
||||
model = self.fake_c_cluster.generate_scenario_1()
|
||||
self.m_c_model.return_value = model
|
||||
node_0 = model.get_node_by_uuid("Node_0")
|
||||
node_free = dict(cpu=20, ram=128, disk=210)
|
||||
self.assertEqual(node_free,
|
||||
self.strategy.get_node_free(node_0))
|
||||
|
||||
def test_host_fits(self):
|
||||
model = self.fake_c_cluster.generate_scenario_1()
|
||||
self.m_c_model.return_value = model
|
||||
|
|
|
@ -60,15 +60,6 @@ class TestNoisyNeighbor(TestBaseStrategy):
|
|||
self.strategy.input_parameters.update({'period': 100})
|
||||
self.strategy.threshold = 100
|
||||
|
||||
def test_calc_used_resource(self):
|
||||
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
|
||||
self.m_c_model.return_value = model
|
||||
node = model.get_node_by_uuid("fa69c544-906b-4a6a-a9c6-c1f7a8078c73")
|
||||
cores_used, mem_used, disk_used = self.strategy.calc_used_resource(
|
||||
node)
|
||||
|
||||
self.assertEqual((10, 2, 20), (cores_used, mem_used, disk_used))
|
||||
|
||||
def test_group_hosts(self):
|
||||
self.strategy.cache_threshold = 35
|
||||
self.strategy.period = 100
|
||||
|
|
|
@ -60,15 +60,6 @@ class TestOutletTempControl(TestBaseStrategy):
|
|||
self.strategy.input_parameters.update({'threshold': 34.3})
|
||||
self.strategy.threshold = 34.3
|
||||
|
||||
def test_calc_used_resource(self):
|
||||
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
|
||||
self.m_c_model.return_value = model
|
||||
node = model.get_node_by_uuid("fa69c544-906b-4a6a-a9c6-c1f7a8078c73")
|
||||
cores_used, mem_used, disk_used = self.strategy.calc_used_resource(
|
||||
node)
|
||||
|
||||
self.assertEqual((10, 2, 20), (cores_used, mem_used, disk_used))
|
||||
|
||||
def test_group_hosts_by_outlet_temp(self):
|
||||
model = self.fake_c_cluster.generate_scenario_3_with_2_nodes()
|
||||
self.m_c_model.return_value = model
|
||||
|
|
|
@ -64,15 +64,6 @@ class TestWorkloadBalance(TestBaseStrategy):
|
|||
self.strategy._meter = 'instance_cpu_usage'
|
||||
self.strategy._granularity = 300
|
||||
|
||||
def test_calc_used_resource(self):
|
||||
model = self.fake_c_cluster.generate_scenario_6_with_2_nodes()
|
||||
self.m_c_model.return_value = model
|
||||
node = model.get_node_by_uuid('Node_0')
|
||||
cores_used, mem_used, disk_used = (
|
||||
self.strategy.calculate_used_resource(node))
|
||||
|
||||
self.assertEqual((cores_used, mem_used, disk_used), (20, 64, 40))
|
||||
|
||||
def test_group_hosts_by_cpu_util(self):
|
||||
model = self.fake_c_cluster.generate_scenario_6_with_2_nodes()
|
||||
self.m_c_model.return_value = model
|
||||
|
|
Loading…
Reference in New Issue