Add support for cache tier management
This change will add two new actions to the ceph-mon charm. These actions will allow the user to create and remove cache tiers from existing pools. Both writeback and read only mode are supported. Limitations of this patch include not having fine grain control over the cache tier properties. Things like hit_set_count, bloom filter control or cache sizing are not supported yet. Change-Id: I5a37e79d0d23d35295a8ae97177c940af66b0485
This commit is contained in:
parent
42233da15e
commit
38c868b048
1
.gitignore
vendored
1
.gitignore
vendored
@ -4,3 +4,4 @@ bin
|
||||
.tox
|
||||
*.sw[nop]
|
||||
*.pyc
|
||||
.idea
|
||||
|
2
Makefile
2
Makefile
@ -3,7 +3,7 @@ PYTHON := /usr/bin/env python
|
||||
|
||||
lint:
|
||||
@flake8 --exclude hooks/charmhelpers,tests/charmhelpers \
|
||||
hooks tests unit_tests
|
||||
actions hooks tests unit_tests
|
||||
@charm proof
|
||||
|
||||
test:
|
||||
|
37
actions.yaml
37
actions.yaml
@ -2,3 +2,40 @@ pause-health:
|
||||
description: Pause ceph health operations across the entire ceph cluster
|
||||
resume-health:
|
||||
description: Resume ceph health operations across the entire ceph cluster
|
||||
create-cache-tier:
|
||||
description: Create a new cache tier
|
||||
params:
|
||||
backer-pool:
|
||||
type: string
|
||||
description: |
|
||||
The name of the pool that will back the cache tier. Also known as
|
||||
the cold pool
|
||||
cache-pool:
|
||||
type: string
|
||||
description: |
|
||||
The name of the pool that will be the cache pool. Also known
|
||||
as the hot pool
|
||||
cache-mode:
|
||||
type: string
|
||||
default: writeback
|
||||
enum: [writeback, readonly]
|
||||
description: |
|
||||
The mode of the caching tier. Please refer to the Ceph docs for more
|
||||
information
|
||||
required: [backer-pool, cache-pool]
|
||||
additionalProperties: false
|
||||
remove-cache-tier:
|
||||
description: Remove an existing cache tier
|
||||
params:
|
||||
backer-pool:
|
||||
type: string
|
||||
description: |
|
||||
The name of the pool that backs the cache tier. Also known as
|
||||
the cold pool
|
||||
cache-pool:
|
||||
type: string
|
||||
description: |
|
||||
The name of the pool that is the cache pool. Also known
|
||||
as the hot pool
|
||||
required: [backer-pool, cache-pool]
|
||||
additionalProperties: false
|
||||
|
1
actions/__init__.py
Normal file
1
actions/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
__author__ = 'chris'
|
1
actions/create-cache-tier
Symbolic link
1
actions/create-cache-tier
Symbolic link
@ -0,0 +1 @@
|
||||
create-cache-tier.py
|
41
actions/create-cache-tier.py
Executable file
41
actions/create-cache-tier.py
Executable file
@ -0,0 +1,41 @@
|
||||
#!/usr/bin/python
|
||||
__author__ = 'chris'
|
||||
from subprocess import CalledProcessError
|
||||
import sys
|
||||
|
||||
sys.path.append('hooks')
|
||||
|
||||
from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists
|
||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
||||
|
||||
|
||||
def make_cache_tier():
|
||||
backer_pool = action_get("backer-pool")
|
||||
cache_pool = action_get("cache-pool")
|
||||
cache_mode = action_get("cache-mode")
|
||||
|
||||
# Pre flight checks
|
||||
if not pool_exists('admin', backer_pool):
|
||||
log("Please create {} pool before calling create-cache-tier".format(
|
||||
backer_pool))
|
||||
action_fail("create-cache-tier failed. Backer pool {} must exist "
|
||||
"before calling this".format(backer_pool))
|
||||
|
||||
if not pool_exists('admin', cache_pool):
|
||||
log("Please create {} pool before calling create-cache-tier".format(
|
||||
cache_pool))
|
||||
action_fail("create-cache-tier failed. Cache pool {} must exist "
|
||||
"before calling this".format(cache_pool))
|
||||
|
||||
pool = Pool(service='admin', name=backer_pool)
|
||||
try:
|
||||
pool.add_cache_tier(cache_pool=cache_pool, mode=cache_mode)
|
||||
except CalledProcessError as err:
|
||||
log("Add cache tier failed with message: {}".format(
|
||||
err.message))
|
||||
action_fail("create-cache-tier failed. Add cache tier failed with "
|
||||
"message: {}".format(err.message))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
make_cache_tier()
|
1
actions/remove-cache-tier
Symbolic link
1
actions/remove-cache-tier
Symbolic link
@ -0,0 +1 @@
|
||||
remove-cache-tier.py
|
41
actions/remove-cache-tier.py
Executable file
41
actions/remove-cache-tier.py
Executable file
@ -0,0 +1,41 @@
|
||||
#!/usr/bin/python
|
||||
from subprocess import CalledProcessError
|
||||
import sys
|
||||
|
||||
sys.path.append('hooks')
|
||||
|
||||
from charmhelpers.contrib.storage.linux.ceph import Pool, pool_exists
|
||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
||||
|
||||
__author__ = 'chris'
|
||||
|
||||
|
||||
def delete_cache_tier():
|
||||
backer_pool = action_get("backer-pool")
|
||||
cache_pool = action_get("cache-pool")
|
||||
|
||||
# Pre flight checks
|
||||
if not pool_exists('admin', backer_pool):
|
||||
log("Backer pool {} must exist before calling this".format(
|
||||
backer_pool))
|
||||
action_fail("remove-cache-tier failed. Backer pool {} must exist "
|
||||
"before calling this".format(backer_pool))
|
||||
|
||||
if not pool_exists('admin', cache_pool):
|
||||
log("Cache pool {} must exist before calling this".format(
|
||||
cache_pool))
|
||||
action_fail("remove-cache-tier failed. Cache pool {} must exist "
|
||||
"before calling this".format(cache_pool))
|
||||
|
||||
pool = Pool(service='admin', name=backer_pool)
|
||||
try:
|
||||
pool.remove_cache_tier(cache_pool=cache_pool)
|
||||
except CalledProcessError as err:
|
||||
log("Removing the cache tier failed with message: {}".format(
|
||||
err.message))
|
||||
action_fail("remove-cache-tier failed. Removing the cache tier failed "
|
||||
"with message: {}".format(err.message))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
delete_cache_tier()
|
@ -163,7 +163,7 @@ class Pool(object):
|
||||
:return: None
|
||||
"""
|
||||
# read-only is easy, writeback is much harder
|
||||
mode = get_cache_mode(cache_pool)
|
||||
mode = get_cache_mode(self.service, cache_pool)
|
||||
if mode == 'readonly':
|
||||
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
|
||||
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
|
||||
@ -171,7 +171,7 @@ class Pool(object):
|
||||
elif mode == 'writeback':
|
||||
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'forward'])
|
||||
# Flush the cache and wait for it to return
|
||||
check_call(['ceph', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
|
||||
check_call(['rados', '--id', self.service, '-p', cache_pool, 'cache-flush-evict-all'])
|
||||
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
|
||||
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
#!/usr/bin/python
|
||||
|
||||
import amulet
|
||||
import re
|
||||
import time
|
||||
from charmhelpers.contrib.openstack.amulet.deployment import (
|
||||
OpenStackAmuletDeployment
|
||||
@ -9,7 +10,7 @@ from charmhelpers.contrib.openstack.amulet.utils import ( # noqa
|
||||
OpenStackAmuletUtils,
|
||||
DEBUG,
|
||||
# ERROR
|
||||
)
|
||||
)
|
||||
|
||||
# Use DEBUG to turn on debug logging
|
||||
u = OpenStackAmuletUtils(DEBUG)
|
||||
@ -457,6 +458,75 @@ class CephBasicDeployment(OpenStackAmuletDeployment):
|
||||
if 'nodown' in output or 'noout' in output:
|
||||
amulet.raise_status(amulet.FAIL, msg="Still has noout,nodown")
|
||||
|
||||
@staticmethod
|
||||
def find_pool(sentry_unit, pool_name):
|
||||
"""
|
||||
This will do a ceph osd dump and search for pool you specify
|
||||
:param sentry_unit: The unit to run this command from.
|
||||
:param pool_name: str. The name of the Ceph pool to query
|
||||
:return: str or None. The ceph pool or None if not found
|
||||
"""
|
||||
output, dump_code = sentry_unit.run("ceph osd dump")
|
||||
if dump_code is not 0:
|
||||
amulet.raise_status(
|
||||
amulet.FAIL,
|
||||
msg="ceph osd dump failed with output: {}".format(
|
||||
output))
|
||||
for line in output.split('\n'):
|
||||
match = re.search(r"pool\s+\d+\s+'(?P<pool_name>.*)'", line)
|
||||
if match:
|
||||
name = match.group('pool_name')
|
||||
if name == pool_name:
|
||||
return line
|
||||
return None
|
||||
|
||||
def test_403_cache_tier_actions(self):
|
||||
"""Verify that cache tier add/remove works"""
|
||||
u.log.debug("Testing cache tiering")
|
||||
|
||||
sentry_unit = self.ceph0_sentry
|
||||
# Create our backer pool
|
||||
output, code = sentry_unit.run("ceph osd pool create cold 128 128 ")
|
||||
if code is not 0:
|
||||
amulet.raise_status(
|
||||
amulet.FAIL,
|
||||
msg="ceph osd pool create cold failed with output: {}".format(
|
||||
output))
|
||||
|
||||
# Create our cache pool
|
||||
output, code = sentry_unit.run("ceph osd pool create hot 128 128 ")
|
||||
if code is not 0:
|
||||
amulet.raise_status(
|
||||
amulet.FAIL,
|
||||
msg="ceph osd pool create hot failed with output: {}".format(
|
||||
output))
|
||||
|
||||
action_id = u.run_action(sentry_unit,
|
||||
'create-cache-tier',
|
||||
params={
|
||||
'backer-pool': 'cold',
|
||||
'cache-pool': 'hot',
|
||||
'cache-mode': 'writeback'})
|
||||
assert u.wait_on_action(action_id), \
|
||||
"Create cache tier action failed."
|
||||
|
||||
pool_line = self.find_pool(
|
||||
sentry_unit=sentry_unit,
|
||||
pool_name='hot')
|
||||
|
||||
assert "cache_mode writeback" in pool_line, \
|
||||
"cache_mode writeback not found in cache pool"
|
||||
remove_action_id = u.run_action(sentry_unit,
|
||||
'remove-cache-tier',
|
||||
params={
|
||||
'backer-pool': 'cold',
|
||||
'cache-pool': 'hot'})
|
||||
assert u.wait_on_action(remove_action_id), \
|
||||
"Remove cache tier action failed"
|
||||
pool_line = self.find_pool(sentry_unit=sentry_unit, pool_name='hot')
|
||||
assert "cache_mode" not in pool_line, \
|
||||
"cache_mode is still enabled on cache pool"
|
||||
|
||||
def test_410_ceph_cinder_vol_create(self):
|
||||
"""Create and confirm a ceph-backed cinder volume, and inspect
|
||||
ceph cinder pool object count as the volume is created
|
||||
@ -592,5 +662,5 @@ class CephBasicDeployment(OpenStackAmuletDeployment):
|
||||
if ret:
|
||||
amulet.raise_status(amulet.FAIL, msg=ret)
|
||||
|
||||
# FYI: No restart check as ceph services do not restart
|
||||
# when charm config changes, unless monitor count increases.
|
||||
# FYI: No restart check as ceph services do not restart
|
||||
# when charm config changes, unless monitor count increases.
|
||||
|
@ -781,16 +781,20 @@ class AmuletUtils(object):
|
||||
return '[{}-{}]'.format(uuid.uuid4(), time.time())
|
||||
|
||||
# amulet juju action helpers:
|
||||
def run_action(self, unit_sentry, action,
|
||||
def run_action(self, unit_sentry, action, params=None,
|
||||
_check_output=subprocess.check_output):
|
||||
"""Run the named action on a given unit sentry.
|
||||
|
||||
params a dict of parameters to use
|
||||
_check_output parameter is used for dependency injection.
|
||||
|
||||
@return action_id.
|
||||
"""
|
||||
unit_id = unit_sentry.info["unit_name"]
|
||||
command = ["juju", "action", "do", "--format=json", unit_id, action]
|
||||
if params is not None:
|
||||
for key, value in params.iteritems():
|
||||
command.append("{}={}".format(key, value))
|
||||
self.log.info("Running command: %s\n" % " ".join(command))
|
||||
output = _check_output(command, universal_newlines=True)
|
||||
data = json.loads(output)
|
||||
|
2
tox.ini
2
tox.ini
@ -19,7 +19,7 @@ deps = -r{toxinidir}/requirements.txt
|
||||
basepython = python2.7
|
||||
deps = -r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
commands = flake8 {posargs} hooks unit_tests tests
|
||||
commands = flake8 {posargs} actions hooks unit_tests tests
|
||||
charm proof
|
||||
|
||||
[testenv:venv]
|
||||
|
Loading…
Reference in New Issue
Block a user