Get disk info from compute_node

Part of blueprint limit-container-diskspace

Change-Id: Iddbde43a417b873d6dd8c52f02c0d2165aba2c31
This commit is contained in:
Feng Shengqin 2018-03-05 16:36:13 +08:00
parent 22ead31e77
commit db118391c5
7 changed files with 58 additions and 9 deletions

View File

@ -41,6 +41,10 @@ class NopClaim(object):
def cpu(self):
return 0
@property
def disk(self):
return 0
def __enter__(self):
return self
@ -93,6 +97,10 @@ class Claim(NopClaim):
def cpu(self):
return self.container.cpu or 0
@property
def disk(self):
return self.container.disk or 0
def abort(self):
"""Requiring claimed resources has failed or been aborted."""
LOG.debug("Aborting claim: %s", self)
@ -116,13 +124,15 @@ class Claim(NopClaim):
# unlimited:
memory_limit = limits.get('memory')
cpu_limit = limits.get('cpu')
disk_limit = limits.get('disk')
LOG.info('Attempting claim: memory %(memory)s, '
'cpu %(cpu).02f CPU',
{'memory': self.memory, 'cpu': self.cpu})
'cpu %(cpu).02f CPU, disk %(disk)s',
{'memory': self.memory, 'cpu': self.cpu, 'disk': self.disk})
reasons = [self._test_memory(resources, memory_limit),
self._test_cpu(resources, cpu_limit),
self._test_disk(resources, disk_limit),
self._test_pci()]
# TODO(Shunli): test numa here
reasons = [r for r in reasons if r is not None]
@ -156,6 +166,14 @@ class Claim(NopClaim):
return self._test(type_, unit, total, used, requested, limit)
def _test_disk(self, resources, limit):
type_ = _("disk")
unit = "GB"
total = resources.disk_total
used = resources.disk_used
requested = self.disk
return self._test(type_, unit, total, used, requested, limit)
def _test(self, type_, unit, total, used, requested, limit):
"""Test if the type resource needed for a claim can be allocated."""

View File

@ -92,10 +92,10 @@ class ComputeNodeTracker(object):
be used to revert the resource usage if an error occurs
during the container build.
"""
# No memory, cpu, or pci_request specified, no need to claim resource
# now.
if not (container.memory or container.cpu or pci_requests):
self._set_container_host(context, container)
# No memory, cpu, disk or pci_request specified, no need to claim
# resource now.
if not (container.memory or container.cpu or pci_requests or
container.disk):
return claims.NopClaim()
# We should have the compute node created here, just get it.
@ -213,6 +213,7 @@ class ComputeNodeTracker(object):
cn.mem_free = cn.mem_total
cn.mem_used = 0
cn.running_containers = 0
cn.disk_used = 0
for cnt in containers:
self._update_usage_from_container(context, cnt)
@ -222,10 +223,12 @@ class ComputeNodeTracker(object):
def _update_usage(self, usage, sign=1):
mem_usage = usage['memory']
cpus_usage = usage.get('cpu', 0)
disk_usage = usage['disk']
cn = self.compute_node
cn.mem_used += sign * mem_usage
cn.cpu_used += sign * cpus_usage
cn.disk_used += sign * disk_usage
# free ram may be negative, depending on policy:
cn.mem_free = cn.mem_total - cn.mem_used
@ -294,8 +297,8 @@ class ComputeNodeTracker(object):
if container.memory:
memory = int(container.memory[:-1])
usage = {'memory': memory,
'cpu': container.cpu or 0}
'cpu': container.cpu or 0,
'disk': container.disk or 0}
# update numa usage here
return usage

View File

@ -21,6 +21,10 @@ compute_opts = [
default=True,
help='restart the containers which are running'
'before the host reboots.'),
cfg.FloatOpt(
'reserve_disk_for_image',
default=0.2,
help='reserve disk for docker images'),
]
service_opts = [

View File

@ -53,6 +53,9 @@ docker_opts = [
default=5,
help='Timeout in seconds for executing a command in a docker '
'container.'),
cfg.StrOpt('docker_data_root',
default='/var/lib/docker',
help='Root directory of persistent Docker state.'),
]
ALL_OPTS = (docker_opts)

View File

@ -892,6 +892,18 @@ class DockerDriver(driver.ContainerDriver):
return (total, running, paused, stopped, cpus,
architecture, os_type, os, kernel_version, labels)
def get_total_disk_for_container(self):
try:
(output, err) = utils.execute('df', '-B', '1G',
CONF.docker.docker_data_root,
run_as_root=True)
except exception.CommandError:
LOG.info('There was a problem while executing df -B 1G %s',
CONF.docker.docker_data_root)
raise exception.CommandError(cmd='df')
total_disk = int(output.split('\n')[1].split()[1])
return int(total_disk * (1 - CONF.compute.reserve_disk_for_image))
def get_cpu_used(self):
cpu_used = 0
with docker_utils.docker_client() as docker:

View File

@ -192,6 +192,9 @@ class ContainerDriver(object):
def get_host_info(self):
raise NotImplementedError()
def get_total_disk_for_container(self):
return NotImplementedError()
def get_cpu_used(self):
raise NotImplementedError()
@ -241,6 +244,8 @@ class ContainerDriver(object):
cpu_used = self.get_cpu_used()
node.cpu_used = cpu_used
node.labels = labels
disk_total = self.get_total_disk_for_container()
node.disk_total = disk_total
def node_is_available(self, nodename):
"""Return whether this compute service manages a particular node."""

View File

@ -711,13 +711,15 @@ class TestDockerDriver(base.DriverTestCase):
security_groups=test_sec_group_id)
@mock.patch('zun.common.utils.execute')
@mock.patch('zun.container.docker.driver.DockerDriver'
'.get_total_disk_for_container')
@mock.patch('zun.container.driver.ContainerDriver.get_host_mem')
@mock.patch(
'zun.container.docker.driver.DockerDriver.get_host_info')
@mock.patch(
'zun.container.docker.driver.DockerDriver.get_cpu_used')
def test_get_available_resources(self, mock_cpu_used, mock_info, mock_mem,
mock_output):
mock_disk, mock_output):
self.driver = DockerDriver()
mock_output.return_value = LSCPU_ON
conf.CONF.set_override('floating_cpu_set', "0")
@ -727,6 +729,7 @@ class TestDockerDriver(base.DriverTestCase):
'CentOS', '3.10.0-123',
{'dev.type': 'product'})
mock_cpu_used.return_value = 1.0
mock_disk.return_value = 80
node_obj = objects.ComputeNode()
self.driver.get_available_resources(node_obj)
self.assertEqual(_numa_topo_spec, node_obj.numa_topology.to_list())
@ -744,3 +747,4 @@ class TestDockerDriver(base.DriverTestCase):
self.assertEqual('CentOS', node_obj.os)
self.assertEqual('3.10.0-123', node_obj.kernel_version)
self.assertEqual({'dev.type': 'product'}, node_obj.labels)
self.assertEqual(80, node_obj.disk_total)