Latest charmhelpers audit update fixes file permission check
Change-Id: I5c96f030a74c1aca83eadf1c12944c3fd2fd2ab5
This commit is contained in:
parent
25ec688145
commit
b3ffdaa3f7
@ -19,9 +19,16 @@ from charmhelpers.core import unitdata
|
|||||||
@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
|
@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
|
||||||
def unitdata_cmd(subparser):
|
def unitdata_cmd(subparser):
|
||||||
nested = subparser.add_subparsers()
|
nested = subparser.add_subparsers()
|
||||||
|
|
||||||
get_cmd = nested.add_parser('get', help='Retrieve data')
|
get_cmd = nested.add_parser('get', help='Retrieve data')
|
||||||
get_cmd.add_argument('key', help='Key to retrieve the value of')
|
get_cmd.add_argument('key', help='Key to retrieve the value of')
|
||||||
get_cmd.set_defaults(action='get', value=None)
|
get_cmd.set_defaults(action='get', value=None)
|
||||||
|
|
||||||
|
getrange_cmd = nested.add_parser('getrange', help='Retrieve multiple data')
|
||||||
|
getrange_cmd.add_argument('key', metavar='prefix',
|
||||||
|
help='Prefix of the keys to retrieve')
|
||||||
|
getrange_cmd.set_defaults(action='getrange', value=None)
|
||||||
|
|
||||||
set_cmd = nested.add_parser('set', help='Store data')
|
set_cmd = nested.add_parser('set', help='Store data')
|
||||||
set_cmd.add_argument('key', help='Key to set')
|
set_cmd.add_argument('key', help='Key to set')
|
||||||
set_cmd.add_argument('value', help='Value to store')
|
set_cmd.add_argument('value', help='Value to store')
|
||||||
@ -30,6 +37,8 @@ def unitdata_cmd(subparser):
|
|||||||
def _unitdata_cmd(action, key, value):
|
def _unitdata_cmd(action, key, value):
|
||||||
if action == 'get':
|
if action == 'get':
|
||||||
return unitdata.kv().get(key)
|
return unitdata.kv().get(key)
|
||||||
|
elif action == 'getrange':
|
||||||
|
return unitdata.kv().getrange(key)
|
||||||
elif action == 'set':
|
elif action == 'set':
|
||||||
unitdata.kv().set(key, value)
|
unitdata.kv().set(key, value)
|
||||||
unitdata.kv().flush()
|
unitdata.kv().flush()
|
||||||
|
@ -30,14 +30,20 @@ from charmhelpers.core.hookenv import (
|
|||||||
cached,
|
cached,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
"""
|
||||||
|
The Security Guide suggests a specific list of files inside the
|
||||||
|
config directory for the service having 640 specifically, but
|
||||||
|
by ensuring the containing directory is 750, only the owner can
|
||||||
|
write, and only the group can read files within the directory.
|
||||||
|
|
||||||
|
By restricting access to the containing directory, we can more
|
||||||
|
effectively ensure that there is no accidental leakage if a new
|
||||||
|
file is added to the service without being added to the security
|
||||||
|
guide, and to this check.
|
||||||
|
"""
|
||||||
FILE_ASSERTIONS = {
|
FILE_ASSERTIONS = {
|
||||||
'barbican': {
|
'barbican': {
|
||||||
# From security guide
|
'/etc/barbican': {'group': 'barbican', 'mode': '750'},
|
||||||
'/etc/barbican/barbican.conf': {'group': 'barbican', 'mode': '640'},
|
|
||||||
'/etc/barbican/barbican-api-paste.ini':
|
|
||||||
{'group': 'barbican', 'mode': '640'},
|
|
||||||
'/etc/barbican/policy.json': {'group': 'barbican', 'mode': '640'},
|
|
||||||
},
|
},
|
||||||
'ceph-mon': {
|
'ceph-mon': {
|
||||||
'/var/lib/charm/ceph-mon/ceph.conf':
|
'/var/lib/charm/ceph-mon/ceph.conf':
|
||||||
@ -60,82 +66,29 @@ FILE_ASSERTIONS = {
|
|||||||
{'owner': 'ceph', 'group': 'ceph', 'mode': '755'},
|
{'owner': 'ceph', 'group': 'ceph', 'mode': '755'},
|
||||||
},
|
},
|
||||||
'cinder': {
|
'cinder': {
|
||||||
# From security guide
|
'/etc/cinder': {'group': 'cinder', 'mode': '750'},
|
||||||
'/etc/cinder/cinder.conf': {'group': 'cinder', 'mode': '640'},
|
|
||||||
'/etc/cinder/api-paste.conf': {'group': 'cinder', 'mode': '640'},
|
|
||||||
'/etc/cinder/rootwrap.conf': {'group': 'cinder', 'mode': '640'},
|
|
||||||
},
|
},
|
||||||
'glance': {
|
'glance': {
|
||||||
# From security guide
|
'/etc/glance': {'group': 'glance', 'mode': '750'},
|
||||||
'/etc/glance/glance-api-paste.ini': {'group': 'glance', 'mode': '640'},
|
|
||||||
'/etc/glance/glance-api.conf': {'group': 'glance', 'mode': '640'},
|
|
||||||
'/etc/glance/glance-cache.conf': {'group': 'glance', 'mode': '640'},
|
|
||||||
'/etc/glance/glance-manage.conf': {'group': 'glance', 'mode': '640'},
|
|
||||||
'/etc/glance/glance-registry-paste.ini':
|
|
||||||
{'group': 'glance', 'mode': '640'},
|
|
||||||
'/etc/glance/glance-registry.conf': {'group': 'glance', 'mode': '640'},
|
|
||||||
'/etc/glance/glance-scrubber.conf': {'group': 'glance', 'mode': '640'},
|
|
||||||
'/etc/glance/glance-swift-store.conf':
|
|
||||||
{'group': 'glance', 'mode': '640'},
|
|
||||||
'/etc/glance/policy.json': {'group': 'glance', 'mode': '640'},
|
|
||||||
'/etc/glance/schema-image.json': {'group': 'glance', 'mode': '640'},
|
|
||||||
'/etc/glance/schema.json': {'group': 'glance', 'mode': '640'},
|
|
||||||
},
|
},
|
||||||
'keystone': {
|
'keystone': {
|
||||||
# From security guide
|
'/etc/keystone':
|
||||||
'/etc/keystone/keystone.conf': {'group': 'keystone', 'mode': '640'},
|
{'owner': 'keystone', 'group': 'keystone', 'mode': '750'},
|
||||||
'/etc/keystone/keystone-paste.ini':
|
|
||||||
{'group': 'keystone', 'mode': '640'},
|
|
||||||
'/etc/keystone/policy.json': {'group': 'keystone', 'mode': '640'},
|
|
||||||
'/etc/keystone/logging.conf': {'group': 'keystone', 'mode': '640'},
|
|
||||||
'/etc/keystone/ssl/certs/signing_cert.pem':
|
|
||||||
{'group': 'keystone', 'mode': '640'},
|
|
||||||
'/etc/keystone/ssl/private/signing_key.pem':
|
|
||||||
{'group': 'keystone', 'mode': '640'},
|
|
||||||
'/etc/keystone/ssl/certs/ca.pem': {'group': 'keystone', 'mode': '640'},
|
|
||||||
},
|
},
|
||||||
'manilla': {
|
'manilla': {
|
||||||
# From security guide
|
'/etc/manila': {'group': 'manilla', 'mode': '750'},
|
||||||
'/etc/manila/manila.conf': {'group': 'manilla', 'mode': '640'},
|
|
||||||
'/etc/manila/api-paste.ini': {'group': 'manilla', 'mode': '640'},
|
|
||||||
'/etc/manila/policy.json': {'group': 'manilla', 'mode': '640'},
|
|
||||||
'/etc/manila/rootwrap.conf': {'group': 'manilla', 'mode': '640'},
|
|
||||||
},
|
},
|
||||||
'neutron-gateway': {
|
'neutron-gateway': {
|
||||||
'/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'},
|
'/etc/neutron': {'group': 'neutron', 'mode': '750'},
|
||||||
'/etc/neutron/rootwrap.conf': {'mode': '640'},
|
|
||||||
'/etc/neutron/rootwrap.d': {'mode': '755'},
|
|
||||||
'/etc/neutron/*': {'group': 'neutron', 'mode': '644'},
|
|
||||||
},
|
},
|
||||||
'neutron-api': {
|
'neutron-api': {
|
||||||
# From security guide
|
'/etc/neutron/': {'group': 'neutron', 'mode': '750'},
|
||||||
'/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'},
|
|
||||||
'/etc/nova/api-paste.ini': {'group': 'neutron', 'mode': '640'},
|
|
||||||
'/etc/neutron/rootwrap.conf': {'group': 'neutron', 'mode': '640'},
|
|
||||||
# Additional validations
|
|
||||||
'/etc/neutron/rootwrap.d': {'mode': '755'},
|
|
||||||
'/etc/neutron/neutron_lbaas.conf': {'mode': '644'},
|
|
||||||
'/etc/neutron/neutron_vpnaas.conf': {'mode': '644'},
|
|
||||||
'/etc/neutron/*': {'group': 'neutron', 'mode': '644'},
|
|
||||||
},
|
},
|
||||||
'nova-cloud-controller': {
|
'nova-cloud-controller': {
|
||||||
# From security guide
|
'/etc/nova': {'group': 'nova', 'mode': '750'},
|
||||||
'/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'},
|
|
||||||
'/etc/nova/nova.conf': {'group': 'nova', 'mode': '750'},
|
|
||||||
'/etc/nova/*': {'group': 'nova', 'mode': '640'},
|
|
||||||
# Additional validations
|
|
||||||
'/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'},
|
|
||||||
},
|
},
|
||||||
'nova-compute': {
|
'nova-compute': {
|
||||||
# From security guide
|
'/etc/nova/': {'group': 'nova', 'mode': '750'},
|
||||||
'/etc/nova/nova.conf': {'group': 'nova', 'mode': '640'},
|
|
||||||
'/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'},
|
|
||||||
'/etc/nova/rootwrap.conf': {'group': 'nova', 'mode': '640'},
|
|
||||||
# Additional Validations
|
|
||||||
'/etc/nova/nova-compute.conf': {'group': 'nova', 'mode': '640'},
|
|
||||||
'/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'},
|
|
||||||
'/etc/nova/nm.conf': {'mode': '644'},
|
|
||||||
'/etc/nova/*': {'group': 'nova', 'mode': '640'},
|
|
||||||
},
|
},
|
||||||
'openstack-dashboard': {
|
'openstack-dashboard': {
|
||||||
# From security guide
|
# From security guide
|
||||||
@ -178,7 +131,7 @@ def _config_ini(path):
|
|||||||
return dict(conf)
|
return dict(conf)
|
||||||
|
|
||||||
|
|
||||||
def _validate_file_ownership(owner, group, file_name):
|
def _validate_file_ownership(owner, group, file_name, optional=False):
|
||||||
"""
|
"""
|
||||||
Validate that a specified file is owned by `owner:group`.
|
Validate that a specified file is owned by `owner:group`.
|
||||||
|
|
||||||
@ -188,12 +141,16 @@ def _validate_file_ownership(owner, group, file_name):
|
|||||||
:type group: str
|
:type group: str
|
||||||
:param file_name: Path to the file to verify
|
:param file_name: Path to the file to verify
|
||||||
:type file_name: str
|
:type file_name: str
|
||||||
|
:param optional: Is this file optional,
|
||||||
|
ie: Should this test fail when it's missing
|
||||||
|
:type optional: bool
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
ownership = _stat(file_name)
|
ownership = _stat(file_name)
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
print("Error reading file: {}".format(e))
|
print("Error reading file: {}".format(e))
|
||||||
assert False, "Specified file does not exist: {}".format(file_name)
|
if not optional:
|
||||||
|
assert False, "Specified file does not exist: {}".format(file_name)
|
||||||
assert owner == ownership.owner, \
|
assert owner == ownership.owner, \
|
||||||
"{} has an incorrect owner: {} should be {}".format(
|
"{} has an incorrect owner: {} should be {}".format(
|
||||||
file_name, ownership.owner, owner)
|
file_name, ownership.owner, owner)
|
||||||
@ -203,7 +160,7 @@ def _validate_file_ownership(owner, group, file_name):
|
|||||||
print("Validate ownership of {}: PASS".format(file_name))
|
print("Validate ownership of {}: PASS".format(file_name))
|
||||||
|
|
||||||
|
|
||||||
def _validate_file_mode(mode, file_name):
|
def _validate_file_mode(mode, file_name, optional=False):
|
||||||
"""
|
"""
|
||||||
Validate that a specified file has the specified permissions.
|
Validate that a specified file has the specified permissions.
|
||||||
|
|
||||||
@ -211,12 +168,16 @@ def _validate_file_mode(mode, file_name):
|
|||||||
:type owner: str
|
:type owner: str
|
||||||
:param file_name: Path to the file to verify
|
:param file_name: Path to the file to verify
|
||||||
:type file_name: str
|
:type file_name: str
|
||||||
|
:param optional: Is this file optional,
|
||||||
|
ie: Should this test fail when it's missing
|
||||||
|
:type optional: bool
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
ownership = _stat(file_name)
|
ownership = _stat(file_name)
|
||||||
except subprocess.CalledProcessError as e:
|
except subprocess.CalledProcessError as e:
|
||||||
print("Error reading file: {}".format(e))
|
print("Error reading file: {}".format(e))
|
||||||
assert False, "Specified file does not exist: {}".format(file_name)
|
if not optional:
|
||||||
|
assert False, "Specified file does not exist: {}".format(file_name)
|
||||||
assert mode == ownership.mode, \
|
assert mode == ownership.mode, \
|
||||||
"{} has an incorrect mode: {} should be {}".format(
|
"{} has an incorrect mode: {} should be {}".format(
|
||||||
file_name, ownership.mode, mode)
|
file_name, ownership.mode, mode)
|
||||||
@ -243,14 +204,15 @@ def validate_file_ownership(config):
|
|||||||
"Invalid ownership configuration: {}".format(key))
|
"Invalid ownership configuration: {}".format(key))
|
||||||
owner = options.get('owner', config.get('owner', 'root'))
|
owner = options.get('owner', config.get('owner', 'root'))
|
||||||
group = options.get('group', config.get('group', 'root'))
|
group = options.get('group', config.get('group', 'root'))
|
||||||
|
optional = options.get('optional', config.get('optional', 'False'))
|
||||||
if '*' in file_name:
|
if '*' in file_name:
|
||||||
for file in glob.glob(file_name):
|
for file in glob.glob(file_name):
|
||||||
if file not in files.keys():
|
if file not in files.keys():
|
||||||
if os.path.isfile(file):
|
if os.path.isfile(file):
|
||||||
_validate_file_ownership(owner, group, file)
|
_validate_file_ownership(owner, group, file, optional)
|
||||||
else:
|
else:
|
||||||
if os.path.isfile(file_name):
|
if os.path.isfile(file_name):
|
||||||
_validate_file_ownership(owner, group, file_name)
|
_validate_file_ownership(owner, group, file_name, optional)
|
||||||
|
|
||||||
|
|
||||||
@audit(is_audit_type(AuditType.OpenStackSecurityGuide),
|
@audit(is_audit_type(AuditType.OpenStackSecurityGuide),
|
||||||
@ -264,14 +226,15 @@ def validate_file_permissions(config):
|
|||||||
raise RuntimeError(
|
raise RuntimeError(
|
||||||
"Invalid ownership configuration: {}".format(key))
|
"Invalid ownership configuration: {}".format(key))
|
||||||
mode = options.get('mode', config.get('permissions', '600'))
|
mode = options.get('mode', config.get('permissions', '600'))
|
||||||
|
optional = options.get('optional', config.get('optional', 'False'))
|
||||||
if '*' in file_name:
|
if '*' in file_name:
|
||||||
for file in glob.glob(file_name):
|
for file in glob.glob(file_name):
|
||||||
if file not in files.keys():
|
if file not in files.keys():
|
||||||
if os.path.isfile(file):
|
if os.path.isfile(file):
|
||||||
_validate_file_mode(mode, file)
|
_validate_file_mode(mode, file, optional)
|
||||||
else:
|
else:
|
||||||
if os.path.isfile(file_name):
|
if os.path.isfile(file_name):
|
||||||
_validate_file_mode(mode, file_name)
|
_validate_file_mode(mode, file_name, optional)
|
||||||
|
|
||||||
|
|
||||||
@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
|
@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
|
||||||
|
@ -159,7 +159,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True):
|
|||||||
if is_address_in_network(bound_cidr, vip):
|
if is_address_in_network(bound_cidr, vip):
|
||||||
resolved_address = vip
|
resolved_address = vip
|
||||||
break
|
break
|
||||||
except NotImplementedError:
|
except (NotImplementedError, NoNetworkBinding):
|
||||||
# If no net-splits configured and no support for extra
|
# If no net-splits configured and no support for extra
|
||||||
# bindings/network spaces so we expect a single vip
|
# bindings/network spaces so we expect a single vip
|
||||||
resolved_address = vips[0]
|
resolved_address = vips[0]
|
||||||
|
@ -194,7 +194,7 @@ SWIFT_CODENAMES = OrderedDict([
|
|||||||
('rocky',
|
('rocky',
|
||||||
['2.18.0', '2.19.0']),
|
['2.18.0', '2.19.0']),
|
||||||
('stein',
|
('stein',
|
||||||
['2.19.0']),
|
['2.20.0']),
|
||||||
])
|
])
|
||||||
|
|
||||||
# >= Liberty version->codename mapping
|
# >= Liberty version->codename mapping
|
||||||
@ -656,7 +656,7 @@ def openstack_upgrade_available(package):
|
|||||||
else:
|
else:
|
||||||
avail_vers = get_os_version_install_source(src)
|
avail_vers = get_os_version_install_source(src)
|
||||||
apt.init()
|
apt.init()
|
||||||
return apt.version_compare(avail_vers, cur_vers) == 1
|
return apt.version_compare(avail_vers, cur_vers) >= 1
|
||||||
|
|
||||||
|
|
||||||
def ensure_block_device(block_device):
|
def ensure_block_device(block_device):
|
||||||
|
@ -186,7 +186,7 @@ class Pool(object):
|
|||||||
elif mode == 'writeback':
|
elif mode == 'writeback':
|
||||||
pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
|
pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
|
||||||
'cache-mode', cache_pool, 'forward']
|
'cache-mode', cache_pool, 'forward']
|
||||||
if cmp_pkgrevno('ceph', '10.1') >= 0:
|
if cmp_pkgrevno('ceph-common', '10.1') >= 0:
|
||||||
# Jewel added a mandatory flag
|
# Jewel added a mandatory flag
|
||||||
pool_forward_cmd.append('--yes-i-really-mean-it')
|
pool_forward_cmd.append('--yes-i-really-mean-it')
|
||||||
|
|
||||||
@ -582,21 +582,24 @@ def remove_pool_snapshot(service, pool_name, snapshot_name):
|
|||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
# max_bytes should be an int or long
|
def set_pool_quota(service, pool_name, max_bytes=None, max_objects=None):
|
||||||
def set_pool_quota(service, pool_name, max_bytes):
|
|
||||||
"""
|
"""
|
||||||
:param service: six.string_types. The Ceph user name to run the command under
|
:param service: The Ceph user name to run the command under
|
||||||
:param pool_name: six.string_types
|
:type service: str
|
||||||
:param max_bytes: int or long
|
:param pool_name: Name of pool
|
||||||
:return: None. Can raise CalledProcessError
|
:type pool_name: str
|
||||||
|
:param max_bytes: Maximum bytes quota to apply
|
||||||
|
:type max_bytes: int
|
||||||
|
:param max_objects: Maximum objects quota to apply
|
||||||
|
:type max_objects: int
|
||||||
|
:raises: subprocess.CalledProcessError
|
||||||
"""
|
"""
|
||||||
# Set a byte quota on a RADOS pool in ceph.
|
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name]
|
||||||
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
|
if max_bytes:
|
||||||
'max_bytes', str(max_bytes)]
|
cmd = cmd + ['max_bytes', str(max_bytes)]
|
||||||
try:
|
if max_objects:
|
||||||
check_call(cmd)
|
cmd = cmd + ['max_objects', str(max_objects)]
|
||||||
except CalledProcessError:
|
check_call(cmd)
|
||||||
raise
|
|
||||||
|
|
||||||
|
|
||||||
def remove_pool_quota(service, pool_name):
|
def remove_pool_quota(service, pool_name):
|
||||||
@ -661,7 +664,7 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
|
|||||||
if locality is not None and durability_estimator is not None:
|
if locality is not None and durability_estimator is not None:
|
||||||
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
|
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
|
||||||
|
|
||||||
luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0
|
luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
|
||||||
# failure_domain changed in luminous
|
# failure_domain changed in luminous
|
||||||
if luminous_or_later:
|
if luminous_or_later:
|
||||||
cmd.append('crush-failure-domain=' + failure_domain)
|
cmd.append('crush-failure-domain=' + failure_domain)
|
||||||
@ -766,7 +769,7 @@ def get_osds(service, device_class=None):
|
|||||||
:param device_class: Class of storage device for OSD's
|
:param device_class: Class of storage device for OSD's
|
||||||
:type device_class: str
|
:type device_class: str
|
||||||
"""
|
"""
|
||||||
luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0
|
luminous_or_later = cmp_pkgrevno('ceph-common', '12.0.0') >= 0
|
||||||
if luminous_or_later and device_class:
|
if luminous_or_later and device_class:
|
||||||
out = check_output(['ceph', '--id', service,
|
out = check_output(['ceph', '--id', service,
|
||||||
'osd', 'crush', 'class',
|
'osd', 'crush', 'class',
|
||||||
@ -832,7 +835,7 @@ def set_app_name_for_pool(client, pool, name):
|
|||||||
|
|
||||||
:raises: CalledProcessError if ceph call fails
|
:raises: CalledProcessError if ceph call fails
|
||||||
"""
|
"""
|
||||||
if cmp_pkgrevno('ceph', '12.0.0') >= 0:
|
if cmp_pkgrevno('ceph-common', '12.0.0') >= 0:
|
||||||
cmd = ['ceph', '--id', client, 'osd', 'pool',
|
cmd = ['ceph', '--id', client, 'osd', 'pool',
|
||||||
'application', 'enable', pool, name]
|
'application', 'enable', pool, name]
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
@ -1153,19 +1156,46 @@ class CephBrokerRq(object):
|
|||||||
|
|
||||||
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
|
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
|
||||||
weight=None, group=None, namespace=None,
|
weight=None, group=None, namespace=None,
|
||||||
app_name=None):
|
app_name=None, max_bytes=None, max_objects=None):
|
||||||
"""Adds an operation to create a pool.
|
"""DEPRECATED: Use ``add_op_create_replicated_pool()`` or
|
||||||
|
``add_op_create_erasure_pool()`` instead.
|
||||||
|
"""
|
||||||
|
return self.add_op_create_replicated_pool(
|
||||||
|
name, replica_count=replica_count, pg_num=pg_num, weight=weight,
|
||||||
|
group=group, namespace=namespace, app_name=app_name,
|
||||||
|
max_bytes=max_bytes, max_objects=max_objects)
|
||||||
|
|
||||||
@param pg_num setting: optional setting. If not provided, this value
|
def add_op_create_replicated_pool(self, name, replica_count=3, pg_num=None,
|
||||||
will be calculated by the broker based on how many OSDs are in the
|
weight=None, group=None, namespace=None,
|
||||||
cluster at the time of creation. Note that, if provided, this value
|
app_name=None, max_bytes=None,
|
||||||
will be capped at the current available maximum.
|
max_objects=None):
|
||||||
@param weight: the percentage of data the pool makes up
|
"""Adds an operation to create a replicated pool.
|
||||||
|
|
||||||
|
:param name: Name of pool to create
|
||||||
|
:type name: str
|
||||||
|
:param replica_count: Number of copies Ceph should keep of your data.
|
||||||
|
:type replica_count: int
|
||||||
|
:param pg_num: Request specific number of Placement Groups to create
|
||||||
|
for pool.
|
||||||
|
:type pg_num: int
|
||||||
|
:param weight: The percentage of data that is expected to be contained
|
||||||
|
in the pool from the total available space on the OSDs.
|
||||||
|
Used to calculate number of Placement Groups to create
|
||||||
|
for pool.
|
||||||
|
:type weight: float
|
||||||
|
:param group: Group to add pool to
|
||||||
|
:type group: str
|
||||||
|
:param namespace: Group namespace
|
||||||
|
:type namespace: str
|
||||||
:param app_name: (Optional) Tag pool with application name. Note that
|
:param app_name: (Optional) Tag pool with application name. Note that
|
||||||
there is certain protocols emerging upstream with
|
there is certain protocols emerging upstream with
|
||||||
regard to meaningful application names to use.
|
regard to meaningful application names to use.
|
||||||
Examples are ``rbd`` and ``rgw``.
|
Examples are ``rbd`` and ``rgw``.
|
||||||
:type app_name: str
|
:type app_name: str
|
||||||
|
:param max_bytes: Maximum bytes quota to apply
|
||||||
|
:type max_bytes: int
|
||||||
|
:param max_objects: Maximum objects quota to apply
|
||||||
|
:type max_objects: int
|
||||||
"""
|
"""
|
||||||
if pg_num and weight:
|
if pg_num and weight:
|
||||||
raise ValueError('pg_num and weight are mutually exclusive')
|
raise ValueError('pg_num and weight are mutually exclusive')
|
||||||
@ -1173,7 +1203,41 @@ class CephBrokerRq(object):
|
|||||||
self.ops.append({'op': 'create-pool', 'name': name,
|
self.ops.append({'op': 'create-pool', 'name': name,
|
||||||
'replicas': replica_count, 'pg_num': pg_num,
|
'replicas': replica_count, 'pg_num': pg_num,
|
||||||
'weight': weight, 'group': group,
|
'weight': weight, 'group': group,
|
||||||
'group-namespace': namespace, 'app-name': app_name})
|
'group-namespace': namespace, 'app-name': app_name,
|
||||||
|
'max-bytes': max_bytes, 'max-objects': max_objects})
|
||||||
|
|
||||||
|
def add_op_create_erasure_pool(self, name, erasure_profile=None,
|
||||||
|
weight=None, group=None, app_name=None,
|
||||||
|
max_bytes=None, max_objects=None):
|
||||||
|
"""Adds an operation to create a erasure coded pool.
|
||||||
|
|
||||||
|
:param name: Name of pool to create
|
||||||
|
:type name: str
|
||||||
|
:param erasure_profile: Name of erasure code profile to use. If not
|
||||||
|
set the ceph-mon unit handling the broker
|
||||||
|
request will set its default value.
|
||||||
|
:type erasure_profile: str
|
||||||
|
:param weight: The percentage of data that is expected to be contained
|
||||||
|
in the pool from the total available space on the OSDs.
|
||||||
|
:type weight: float
|
||||||
|
:param group: Group to add pool to
|
||||||
|
:type group: str
|
||||||
|
:param app_name: (Optional) Tag pool with application name. Note that
|
||||||
|
there is certain protocols emerging upstream with
|
||||||
|
regard to meaningful application names to use.
|
||||||
|
Examples are ``rbd`` and ``rgw``.
|
||||||
|
:type app_name: str
|
||||||
|
:param max_bytes: Maximum bytes quota to apply
|
||||||
|
:type max_bytes: int
|
||||||
|
:param max_objects: Maximum objects quota to apply
|
||||||
|
:type max_objects: int
|
||||||
|
"""
|
||||||
|
self.ops.append({'op': 'create-pool', 'name': name,
|
||||||
|
'pool-type': 'erasure',
|
||||||
|
'erasure-profile': erasure_profile,
|
||||||
|
'weight': weight,
|
||||||
|
'group': group, 'app-name': app_name,
|
||||||
|
'max-bytes': max_bytes, 'max-objects': max_objects})
|
||||||
|
|
||||||
def set_ops(self, ops):
|
def set_ops(self, ops):
|
||||||
"""Set request ops to provided value.
|
"""Set request ops to provided value.
|
||||||
|
@ -17,12 +17,53 @@ import re
|
|||||||
from stat import S_ISBLK
|
from stat import S_ISBLK
|
||||||
|
|
||||||
from subprocess import (
|
from subprocess import (
|
||||||
|
CalledProcessError,
|
||||||
check_call,
|
check_call,
|
||||||
check_output,
|
check_output,
|
||||||
call
|
call
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def _luks_uuid(dev):
|
||||||
|
"""
|
||||||
|
Check to see if dev is a LUKS encrypted volume, returning the UUID
|
||||||
|
of volume if it is.
|
||||||
|
|
||||||
|
:param: dev: path to block device to check.
|
||||||
|
:returns: str. UUID of LUKS device or None if not a LUKS device
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
cmd = ['cryptsetup', 'luksUUID', dev]
|
||||||
|
return check_output(cmd).decode('UTF-8').strip()
|
||||||
|
except CalledProcessError:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def is_luks_device(dev):
|
||||||
|
"""
|
||||||
|
Determine if dev is a LUKS-formatted block device.
|
||||||
|
|
||||||
|
:param: dev: A full path to a block device to check for LUKS header
|
||||||
|
presence
|
||||||
|
:returns: boolean: indicates whether a device is used based on LUKS header.
|
||||||
|
"""
|
||||||
|
return True if _luks_uuid(dev) else False
|
||||||
|
|
||||||
|
|
||||||
|
def is_mapped_luks_device(dev):
|
||||||
|
"""
|
||||||
|
Determine if dev is a mapped LUKS device
|
||||||
|
:param: dev: A full path to a block device to be checked
|
||||||
|
:returns: boolean: indicates whether a device is mapped
|
||||||
|
"""
|
||||||
|
_, dirs, _ = next(os.walk(
|
||||||
|
'/sys/class/block/{}/holders/'
|
||||||
|
.format(os.path.basename(os.path.realpath(dev))))
|
||||||
|
)
|
||||||
|
is_held = len(dirs) > 0
|
||||||
|
return is_held and is_luks_device(dev)
|
||||||
|
|
||||||
|
|
||||||
def is_block_device(path):
|
def is_block_device(path):
|
||||||
'''
|
'''
|
||||||
Confirm device at path is a valid block device node.
|
Confirm device at path is a valid block device node.
|
||||||
|
@ -47,6 +47,7 @@ if __platform__ == "ubuntu":
|
|||||||
cmp_pkgrevno,
|
cmp_pkgrevno,
|
||||||
CompareHostReleases,
|
CompareHostReleases,
|
||||||
get_distrib_codename,
|
get_distrib_codename,
|
||||||
|
arch
|
||||||
) # flake8: noqa -- ignore F401 for this import
|
) # flake8: noqa -- ignore F401 for this import
|
||||||
elif __platform__ == "centos":
|
elif __platform__ == "centos":
|
||||||
from charmhelpers.core.host_factory.centos import ( # NOQA:F401
|
from charmhelpers.core.host_factory.centos import ( # NOQA:F401
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import cached
|
||||||
from charmhelpers.core.strutils import BasicStringComparator
|
from charmhelpers.core.strutils import BasicStringComparator
|
||||||
|
|
||||||
|
|
||||||
@ -97,3 +98,16 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
|
|||||||
pkgcache = apt_cache()
|
pkgcache = apt_cache()
|
||||||
pkg = pkgcache[package]
|
pkg = pkgcache[package]
|
||||||
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
||||||
|
|
||||||
|
|
||||||
|
@cached
|
||||||
|
def arch():
|
||||||
|
"""Return the package architecture as a string.
|
||||||
|
|
||||||
|
:returns: the architecture
|
||||||
|
:rtype: str
|
||||||
|
:raises: subprocess.CalledProcessError if dpkg command fails
|
||||||
|
"""
|
||||||
|
return subprocess.check_output(
|
||||||
|
['dpkg', '--print-architecture']
|
||||||
|
).rstrip().decode('UTF-8')
|
||||||
|
@ -28,7 +28,7 @@ from charmhelpers.core.hookenv import (
|
|||||||
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
||||||
|
|
||||||
|
|
||||||
def create(sysctl_dict, sysctl_file):
|
def create(sysctl_dict, sysctl_file, ignore=False):
|
||||||
"""Creates a sysctl.conf file from a YAML associative array
|
"""Creates a sysctl.conf file from a YAML associative array
|
||||||
|
|
||||||
:param sysctl_dict: a dict or YAML-formatted string of sysctl
|
:param sysctl_dict: a dict or YAML-formatted string of sysctl
|
||||||
@ -36,6 +36,8 @@ def create(sysctl_dict, sysctl_file):
|
|||||||
:type sysctl_dict: str
|
:type sysctl_dict: str
|
||||||
:param sysctl_file: path to the sysctl file to be saved
|
:param sysctl_file: path to the sysctl file to be saved
|
||||||
:type sysctl_file: str or unicode
|
:type sysctl_file: str or unicode
|
||||||
|
:param ignore: If True, ignore "unknown variable" errors.
|
||||||
|
:type ignore: bool
|
||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
if type(sysctl_dict) is not dict:
|
if type(sysctl_dict) is not dict:
|
||||||
@ -52,7 +54,12 @@ def create(sysctl_dict, sysctl_file):
|
|||||||
for key, value in sysctl_dict_parsed.items():
|
for key, value in sysctl_dict_parsed.items():
|
||||||
fd.write("{}={}\n".format(key, value))
|
fd.write("{}={}\n".format(key, value))
|
||||||
|
|
||||||
log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
|
log("Updating sysctl_file: {} values: {}".format(sysctl_file,
|
||||||
|
sysctl_dict_parsed),
|
||||||
level=DEBUG)
|
level=DEBUG)
|
||||||
|
|
||||||
check_call(["sysctl", "-p", sysctl_file])
|
call = ["sysctl", "-p", sysctl_file]
|
||||||
|
if ignore:
|
||||||
|
call.append("-e")
|
||||||
|
|
||||||
|
check_call(call)
|
||||||
|
@ -20,10 +20,8 @@ import six
|
|||||||
import time
|
import time
|
||||||
import subprocess
|
import subprocess
|
||||||
|
|
||||||
from charmhelpers.core.host import (
|
from charmhelpers.core.host import get_distrib_codename
|
||||||
get_distrib_codename,
|
|
||||||
CompareHostReleases,
|
|
||||||
)
|
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
log,
|
log,
|
||||||
DEBUG,
|
DEBUG,
|
||||||
@ -362,14 +360,8 @@ def _get_keyid_by_gpg_key(key_material):
|
|||||||
:returns: A GPG key fingerprint
|
:returns: A GPG key fingerprint
|
||||||
:rtype: str
|
:rtype: str
|
||||||
"""
|
"""
|
||||||
# trusty, xenial and bionic handling differs due to gpg 1.x to 2.x change
|
# Use the same gpg command for both Xenial and Bionic
|
||||||
release = get_distrib_codename()
|
cmd = 'gpg --with-colons --with-fingerprint'
|
||||||
is_gpgv2_distro = CompareHostReleases(release) >= "bionic"
|
|
||||||
if is_gpgv2_distro:
|
|
||||||
# --import is mandatory, otherwise fingerprint is not printed
|
|
||||||
cmd = 'gpg --with-colons --import-options show-only --import --dry-run'
|
|
||||||
else:
|
|
||||||
cmd = 'gpg --with-colons --with-fingerprint'
|
|
||||||
ps = subprocess.Popen(cmd.split(),
|
ps = subprocess.Popen(cmd.split(),
|
||||||
stdout=subprocess.PIPE,
|
stdout=subprocess.PIPE,
|
||||||
stderr=subprocess.PIPE,
|
stderr=subprocess.PIPE,
|
||||||
|
Loading…
Reference in New Issue
Block a user