diff --git a/charmhelpers/contrib/openstack/audits/__init__.py b/charmhelpers/contrib/openstack/audits/__init__.py
new file mode 100644
index 00000000..9fde7b26
--- /dev/null
+++ b/charmhelpers/contrib/openstack/audits/__init__.py
@@ -0,0 +1,134 @@
+# Copyright 2019 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OpenStack Security Audit code"""
+
+import collections
+from enum import Enum
+import traceback
+
+from charmhelpers.core.host import cmp_pkgrevno
+
+import charmhelpers.core.hookenv as hookenv
+
+
+class AuditType(Enum):
+    OpenStackSecurityGuide = 1
+
+
+_audits = {}
+
+Audit = collections.namedtuple('Audit', 'func filters')
+
+
+def audit(*args):
+    """Decorator to register an audit.
+
+    These are used to generate audits that can be run on a
+    deployed system that matches the given configuration
+
+    :param args: List of functions to filter tests against
+    :type args: List[Callable(Config)]
+    """
+    def wrapper(f):
+        test_name = f.__name__
+        if _audits.get(test_name):
+            raise RuntimeError(
+                "Test name '{}' used more than once"
+                .format(test_name))
+        non_callables = [fn for fn in args if not callable(fn)]
+        if non_callables:
+            raise RuntimeError(
+                "Configuration includes non-callable filters: {}"
+                .format(non_callables))
+        _audits[test_name] = Audit(func=f, filters=args)
+        return f
+    return wrapper
+
+
+def is_audit_type(*args):
+    """This audit is included in the specified kinds of audits."""
+    def should_run(audit_options):
+        if audit_options.get('audit_type') in args:
+            return True
+        else:
+            return False
+    return should_run
+
+
+def since_package(pkg, pkg_version):
+    """This audit should be run after the specified package version (incl)."""
+    return lambda audit_options=None: cmp_pkgrevno(pkg, pkg_version) >= 0
+
+
+def before_package(pkg, pkg_version):
+    """This audit should be run before the specified package version (excl)."""
+    return lambda audit_options=None: not since_package(pkg, pkg_version)()
+
+
+def it_has_config(config_key):
+    """This audit should be run based on specified config keys."""
+    return lambda audit_options: audit_options.get(config_key) is not None
+
+
+def run(audit_options):
+    """Run the configured audits with the specified audit_options.
+
+    :param audit_options: Configuration for the audit
+    :type audit_options: Config
+    """
+    errors = {}
+    results = {}
+    for name, audit in sorted(_audits.items()):
+        result_name = name.replace('_', '-')
+        if all(p(audit_options) for p in audit.filters):
+            try:
+                audit.func(audit_options)
+                print("{}: PASS".format(name))
+                results[result_name] = {
+                    'success': True,
+                }
+            except AssertionError as e:
+                print("{}: FAIL ({})".format(name, e))
+                results[result_name] = {
+                    'success': False,
+                    'message': e,
+                }
+            except Exception as e:
+                print("{}: ERROR ({})".format(name, e))
+                errors[name] = e
+                results[result_name] = {
+                    'success': False,
+                    'message': e,
+                }
+    for name, error in errors.items():
+        print("=" * 20)
+        print("Error in {}: ".format(name))
+        traceback.print_tb(error.__traceback__)
+        print()
+    return results
+
+
+def action_parse_results(result):
+    """Parse the result of `run` in the context of an action."""
+    passed = True
+    for test, result in result.items():
+        if result['success']:
+            hookenv.action_set({test: 'PASS'})
+        else:
+            hookenv.action_set({test: 'FAIL - {}'.format(result['message'])})
+            passed = False
+    if not passed:
+        hookenv.action_fail("One or more tests failed")
+    return 0 if passed else 1
diff --git a/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/charmhelpers/contrib/openstack/audits/openstack_security_guide.py
new file mode 100644
index 00000000..ba5e2486
--- /dev/null
+++ b/charmhelpers/contrib/openstack/audits/openstack_security_guide.py
@@ -0,0 +1,303 @@
+# Copyright 2019 Canonical Limited.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#  http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import configparser
+import glob
+import os.path
+import subprocess
+
+from charmhelpers.contrib.openstack.audits import (
+    audit,
+    AuditType,
+    # filters
+    is_audit_type,
+    it_has_config,
+)
+
+from charmhelpers.core.hookenv import (
+    cached,
+)
+
+
+FILE_ASSERTIONS = {
+    'barbican': {
+        # From security guide
+        '/etc/barbican/barbican.conf': {'group': 'barbican', 'mode': '640'},
+        '/etc/barbican/barbican-api-paste.ini':
+            {'group': 'barbican', 'mode': '640'},
+        '/etc/barbican/policy.json': {'group': 'barbican', 'mode': '640'},
+    },
+    'ceph-mon': {
+        '/var/lib/charm/ceph-mon/ceph.conf':
+            {'owner': 'root', 'group': 'root', 'mode': '644'},
+        '/etc/ceph/ceph.client.admin.keyring':
+            {'owner': 'ceph', 'group': 'ceph'},
+        '/etc/ceph/rbdmap': {'mode': '644'},
+        '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'},
+        '/var/lib/ceph/bootstrap-*/ceph.keyring':
+            {'owner': 'ceph', 'group': 'ceph', 'mode': '600'}
+    },
+    'ceph-osd': {
+        '/var/lib/charm/ceph-osd/ceph.conf':
+            {'owner': 'ceph', 'group': 'ceph', 'mode': '644'},
+        '/var/lib/ceph': {'owner': 'ceph', 'group': 'ceph', 'mode': '750'},
+        '/var/lib/ceph/*': {'owner': 'ceph', 'group': 'ceph', 'mode': '755'},
+        '/var/lib/ceph/bootstrap-*/ceph.keyring':
+            {'owner': 'ceph', 'group': 'ceph', 'mode': '600'},
+        '/var/lib/ceph/radosgw':
+            {'owner': 'ceph', 'group': 'ceph', 'mode': '755'},
+    },
+    'cinder': {
+        # From security guide
+        '/etc/cinder/cinder.conf': {'group': 'cinder', 'mode': '640'},
+        '/etc/cinder/api-paste.conf': {'group': 'cinder', 'mode': '640'},
+        '/etc/cinder/rootwrap.conf': {'group': 'cinder', 'mode': '640'},
+    },
+    'glance': {
+        # From security guide
+        '/etc/glance/glance-api-paste.ini': {'group': 'glance', 'mode': '640'},
+        '/etc/glance/glance-api.conf': {'group': 'glance', 'mode': '640'},
+        '/etc/glance/glance-cache.conf': {'group': 'glance', 'mode': '640'},
+        '/etc/glance/glance-manage.conf': {'group': 'glance', 'mode': '640'},
+        '/etc/glance/glance-registry-paste.ini':
+            {'group': 'glance', 'mode': '640'},
+        '/etc/glance/glance-registry.conf': {'group': 'glance', 'mode': '640'},
+        '/etc/glance/glance-scrubber.conf': {'group': 'glance', 'mode': '640'},
+        '/etc/glance/glance-swift-store.conf':
+            {'group': 'glance', 'mode': '640'},
+        '/etc/glance/policy.json': {'group': 'glance', 'mode': '640'},
+        '/etc/glance/schema-image.json': {'group': 'glance', 'mode': '640'},
+        '/etc/glance/schema.json': {'group': 'glance', 'mode': '640'},
+    },
+    'keystone': {
+        # From security guide
+        '/etc/keystone/keystone.conf': {'group': 'keystone', 'mode': '640'},
+        '/etc/keystone/keystone-paste.ini':
+            {'group': 'keystone', 'mode': '640'},
+        '/etc/keystone/policy.json': {'group': 'keystone', 'mode': '640'},
+        '/etc/keystone/logging.conf': {'group': 'keystone', 'mode': '640'},
+        '/etc/keystone/ssl/certs/signing_cert.pem':
+            {'group': 'keystone', 'mode': '640'},
+        '/etc/keystone/ssl/private/signing_key.pem':
+            {'group': 'keystone', 'mode': '640'},
+        '/etc/keystone/ssl/certs/ca.pem': {'group': 'keystone', 'mode': '640'},
+    },
+    'manilla': {
+        # From security guide
+        '/etc/manila/manila.conf': {'group': 'manilla', 'mode': '640'},
+        '/etc/manila/api-paste.ini': {'group': 'manilla', 'mode': '640'},
+        '/etc/manila/policy.json': {'group': 'manilla', 'mode': '640'},
+        '/etc/manila/rootwrap.conf': {'group': 'manilla', 'mode': '640'},
+    },
+    'neutron-gateway': {
+        '/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'},
+        '/etc/neutron/rootwrap.conf': {'mode': '640'},
+        '/etc/neutron/rootwrap.d': {'mode': '755'},
+        '/etc/neutron/*': {'group': 'neutron', 'mode': '644'},
+    },
+    'neutron-api': {
+        # From security guide
+        '/etc/neutron/neutron.conf': {'group': 'neutron', 'mode': '640'},
+        '/etc/nova/api-paste.ini': {'group': 'neutron', 'mode': '640'},
+        '/etc/neutron/rootwrap.conf': {'group': 'neutron', 'mode': '640'},
+        # Additional validations
+        '/etc/neutron/rootwrap.d': {'mode': '755'},
+        '/etc/neutron/neutron_lbaas.conf': {'mode': '644'},
+        '/etc/neutron/neutron_vpnaas.conf': {'mode': '644'},
+        '/etc/neutron/*': {'group': 'neutron', 'mode': '644'},
+    },
+    'nova-cloud-controller': {
+        # From security guide
+        '/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'},
+        '/etc/nova/nova.conf': {'group': 'nova', 'mode': '750'},
+        '/etc/nova/*': {'group': 'nova', 'mode': '640'},
+        # Additional validations
+        '/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'},
+    },
+    'nova-compute': {
+        # From security guide
+        '/etc/nova/nova.conf': {'group': 'nova', 'mode': '640'},
+        '/etc/nova/api-paste.ini': {'group': 'nova', 'mode': '640'},
+        '/etc/nova/rootwrap.conf': {'group': 'nova', 'mode': '640'},
+        # Additional Validations
+        '/etc/nova/nova-compute.conf': {'group': 'nova', 'mode': '640'},
+        '/etc/nova/logging.conf': {'group': 'nova', 'mode': '640'},
+        '/etc/nova/nm.conf': {'mode': '644'},
+        '/etc/nova/*': {'group': 'nova', 'mode': '640'},
+    },
+    'openstack-dashboard': {
+        # From security guide
+        '/etc/openstack-dashboard/local_settings.py':
+            {'group': 'horizon', 'mode': '640'},
+    },
+}
+
+Ownership = collections.namedtuple('Ownership', 'owner group mode')
+
+
+@cached
+def _stat(file):
+    """
+    Get the Ownership information from a file.
+
+    :param file: The path to a file to stat
+    :type file: str
+    :returns: owner, group, and mode of the specified file
+    :rtype: Ownership
+    :raises subprocess.CalledProcessError: If the underlying stat fails
+    """
+    out = subprocess.check_output(
+        ['stat', '-c', '%U %G %a', file]).decode('utf-8')
+    return Ownership(*out.strip().split(' '))
+
+
+@cached
+def _config_ini(path):
+    """
+    Parse an ini file
+
+    :param path: The path to a file to parse
+    :type file: str
+    :returns: Configuration contained in path
+    :rtype: Dict
+    """
+    conf = configparser.ConfigParser()
+    conf.read(path)
+    return dict(conf)
+
+
+def _validate_file_ownership(owner, group, file_name):
+    """
+    Validate that a specified file is owned by `owner:group`.
+
+    :param owner: Name of the owner
+    :type owner: str
+    :param group: Name of the group
+    :type group: str
+    :param file_name: Path to the file to verify
+    :type file_name: str
+    """
+    try:
+        ownership = _stat(file_name)
+    except subprocess.CalledProcessError as e:
+        print("Error reading file: {}".format(e))
+        assert False, "Specified file does not exist: {}".format(file_name)
+    assert owner == ownership.owner, \
+        "{} has an incorrect owner: {} should be {}".format(
+            file_name, ownership.owner, owner)
+    assert group == ownership.group, \
+        "{} has an incorrect group: {} should be {}".format(
+            file_name, ownership.group, group)
+    print("Validate ownership of {}: PASS".format(file_name))
+
+
+def _validate_file_mode(mode, file_name):
+    """
+    Validate that a specified file has the specified permissions.
+
+    :param mode: file mode that is desires
+    :type owner: str
+    :param file_name: Path to the file to verify
+    :type file_name: str
+    """
+    try:
+        ownership = _stat(file_name)
+    except subprocess.CalledProcessError as e:
+        print("Error reading file: {}".format(e))
+        assert False, "Specified file does not exist: {}".format(file_name)
+    assert mode == ownership.mode, \
+        "{} has an incorrect mode: {} should be {}".format(
+            file_name, ownership.mode, mode)
+    print("Validate mode of {}: PASS".format(file_name))
+
+
+@cached
+def _config_section(config, section):
+    """Read the configuration file and return a section."""
+    path = os.path.join(config.get('config_path'), config.get('config_file'))
+    conf = _config_ini(path)
+    return conf.get(section)
+
+
+@audit(is_audit_type(AuditType.OpenStackSecurityGuide),
+       it_has_config('files'))
+def validate_file_ownership(config):
+    """Verify that configuration files are owned by the correct user/group."""
+    files = config.get('files', {})
+    for file_name, options in files.items():
+        for key in options.keys():
+            if key not in ["owner", "group", "mode"]:
+                raise RuntimeError(
+                    "Invalid ownership configuration: {}".format(key))
+        owner = options.get('owner', config.get('owner', 'root'))
+        group = options.get('group', config.get('group', 'root'))
+        if '*' in file_name:
+            for file in glob.glob(file_name):
+                if file not in files.keys():
+                    if os.path.isfile(file):
+                        _validate_file_ownership(owner, group, file)
+        else:
+            if os.path.isfile(file_name):
+                _validate_file_ownership(owner, group, file_name)
+
+
+@audit(is_audit_type(AuditType.OpenStackSecurityGuide),
+       it_has_config('files'))
+def validate_file_permissions(config):
+    """Verify that permissions on configuration files are secure enough."""
+    files = config.get('files', {})
+    for file_name, options in files.items():
+        for key in options.keys():
+            if key not in ["owner", "group", "mode"]:
+                raise RuntimeError(
+                    "Invalid ownership configuration: {}".format(key))
+        mode = options.get('mode', config.get('permissions', '600'))
+        if '*' in file_name:
+            for file in glob.glob(file_name):
+                if file not in files.keys():
+                    if os.path.isfile(file):
+                        _validate_file_mode(mode, file)
+        else:
+            if os.path.isfile(file_name):
+                _validate_file_mode(mode, file_name)
+
+
+@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
+def validate_uses_keystone(audit_options):
+    """Validate that the service uses Keystone for authentication."""
+    section = _config_section(audit_options, 'DEFAULT')
+    assert section is not None, "Missing section 'DEFAULT'"
+    assert section.get('auth_strategy') == "keystone", \
+        "Application is not using Keystone"
+
+
+@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
+def validate_uses_tls_for_keystone(audit_options):
+    """Verify that TLS is used to communicate with Keystone."""
+    section = _config_section(audit_options, 'keystone_authtoken')
+    assert section is not None, "Missing section 'keystone_authtoken'"
+    assert not section.get('insecure') and \
+        "https://" in section.get("auth_uri"), \
+        "TLS is not used for Keystone"
+
+
+@audit(is_audit_type(AuditType.OpenStackSecurityGuide))
+def validate_uses_tls_for_glance(audit_options):
+    """Verify that TLS is used to communicate with Glance."""
+    section = _config_section(audit_options, 'glance')
+    assert section is not None, "Missing section 'glance'"
+    assert not section.get('insecure') and \
+        "https://" in section.get("api_servers"), \
+        "TLS is not used for Glance"
diff --git a/charmhelpers/contrib/openstack/context.py b/charmhelpers/contrib/openstack/context.py
index 78a339f6..fc634cc6 100644
--- a/charmhelpers/contrib/openstack/context.py
+++ b/charmhelpers/contrib/openstack/context.py
@@ -29,6 +29,7 @@ from charmhelpers.fetch import (
     filter_installed_packages,
 )
 from charmhelpers.core.hookenv import (
+    NoNetworkBinding,
     config,
     is_relation_made,
     local_unit,
@@ -868,7 +869,7 @@ class ApacheSSLContext(OSContextGenerator):
                     addr = network_get_primary_address(
                         ADDRESS_MAP[net_type]['binding']
                     )
-                except NotImplementedError:
+                except (NotImplementedError, NoNetworkBinding):
                     addr = fallback
 
             endpoint = resolve_address(net_type)
diff --git a/charmhelpers/contrib/openstack/ip.py b/charmhelpers/contrib/openstack/ip.py
index 73102af7..df83b91b 100644
--- a/charmhelpers/contrib/openstack/ip.py
+++ b/charmhelpers/contrib/openstack/ip.py
@@ -13,6 +13,7 @@
 # limitations under the License.
 
 from charmhelpers.core.hookenv import (
+    NoNetworkBinding,
     config,
     unit_get,
     service_name,
@@ -175,7 +176,7 @@ def resolve_address(endpoint_type=PUBLIC, override=True):
             #       configuration is not in use
             try:
                 resolved_address = network_get_primary_address(binding)
-            except NotImplementedError:
+            except (NotImplementedError, NoNetworkBinding):
                 resolved_address = fallback_addr
 
     if resolved_address is None:
diff --git a/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit b/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit
new file mode 100644
index 00000000..bed2216a
--- /dev/null
+++ b/charmhelpers/contrib/openstack/templates/section-oslo-messaging-rabbit
@@ -0,0 +1,10 @@
+[oslo_messaging_rabbit]
+{% if rabbitmq_ha_queues -%}
+rabbit_ha_queues = True
+{% endif -%}
+{% if rabbit_ssl_port -%}
+ssl = True
+{% endif -%}
+{% if rabbit_ssl_ca -%}
+ssl_ca_file = {{ rabbit_ssl_ca }}
+{% endif -%}
diff --git a/charmhelpers/contrib/storage/linux/ceph.py b/charmhelpers/contrib/storage/linux/ceph.py
index 63c93044..22aa978b 100644
--- a/charmhelpers/contrib/storage/linux/ceph.py
+++ b/charmhelpers/contrib/storage/linux/ceph.py
@@ -59,6 +59,7 @@ from charmhelpers.core.host import (
     service_stop,
     service_running,
     umount,
+    cmp_pkgrevno,
 )
 from charmhelpers.fetch import (
     apt_install,
@@ -178,7 +179,6 @@ class Pool(object):
         """
         # read-only is easy, writeback is much harder
         mode = get_cache_mode(self.service, cache_pool)
-        version = ceph_version()
         if mode == 'readonly':
             check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
             check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
@@ -186,7 +186,7 @@ class Pool(object):
         elif mode == 'writeback':
             pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
                                 'cache-mode', cache_pool, 'forward']
-            if version >= '10.1':
+            if cmp_pkgrevno('ceph', '10.1') >= 0:
                 # Jewel added a mandatory flag
                 pool_forward_cmd.append('--yes-i-really-mean-it')
 
@@ -196,7 +196,8 @@ class Pool(object):
             check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
             check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
 
-    def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT):
+    def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT,
+                device_class=None):
         """Return the number of placement groups to use when creating the pool.
 
         Returns the number of placement groups which should be specified when
@@ -229,6 +230,9 @@ class Pool(object):
             increased. NOTE: the default is primarily to handle the scenario
             where related charms requiring pools has not been upgraded to
             include an update to indicate their relative usage of the pools.
+        :param device_class: str. class of storage to use for basis of pgs
+            calculation; ceph supports nvme, ssd and hdd by default based
+            on presence of devices of each type in the deployment.
         :return: int.  The number of pgs to use.
         """
 
@@ -243,17 +247,20 @@ class Pool(object):
 
         # If the expected-osd-count is specified, then use the max between
         # the expected-osd-count and the actual osd_count
-        osd_list = get_osds(self.service)
+        osd_list = get_osds(self.service, device_class)
         expected = config('expected-osd-count') or 0
 
         if osd_list:
-            osd_count = max(expected, len(osd_list))
+            if device_class:
+                osd_count = len(osd_list)
+            else:
+                osd_count = max(expected, len(osd_list))
 
             # Log a message to provide some insight if the calculations claim
             # to be off because someone is setting the expected count and
             # there are more OSDs in reality. Try to make a proper guess
             # based upon the cluster itself.
-            if expected and osd_count != expected:
+            if not device_class and expected and osd_count != expected:
                 log("Found more OSDs than provided expected count. "
                     "Using the actual count instead", INFO)
         elif expected:
@@ -626,7 +633,8 @@ def remove_erasure_profile(service, profile_name):
 def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
                            failure_domain='host',
                            data_chunks=2, coding_chunks=1,
-                           locality=None, durability_estimator=None):
+                           locality=None, durability_estimator=None,
+                           device_class=None):
     """
     Create a new erasure code profile if one does not already exist for it.  Updates
     the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
@@ -640,10 +648,9 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
     :param coding_chunks: int
     :param locality: int
     :param durability_estimator: int
+    :param device_class: six.string_types
     :return: None.  Can raise CalledProcessError
     """
-    version = ceph_version()
-
     # Ensure this failure_domain is allowed by Ceph
     validator(failure_domain, six.string_types,
               ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
@@ -654,12 +661,20 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
     if locality is not None and durability_estimator is not None:
         raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
 
+    luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0
     # failure_domain changed in luminous
-    if version and version >= '12.0.0':
+    if luminous_or_later:
         cmd.append('crush-failure-domain=' + failure_domain)
     else:
         cmd.append('ruleset-failure-domain=' + failure_domain)
 
+    # device class new in luminous
+    if luminous_or_later and device_class:
+        cmd.append('crush-device-class={}'.format(device_class))
+    else:
+        log('Skipping device class configuration (ceph < 12.0.0)',
+            level=DEBUG)
+
     # Add plugin specific information
     if locality is not None:
         # For local erasure codes
@@ -744,20 +759,26 @@ def pool_exists(service, name):
     return name in out.split()
 
 
-def get_osds(service):
+def get_osds(service, device_class=None):
     """Return a list of all Ceph Object Storage Daemons currently in the
-    cluster.
+    cluster (optionally filtered by storage device class).
+
+    :param device_class: Class of storage device for OSD's
+    :type device_class: str
     """
-    version = ceph_version()
-    if version and version >= '0.56':
+    luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0
+    if luminous_or_later and device_class:
+        out = check_output(['ceph', '--id', service,
+                            'osd', 'crush', 'class',
+                            'ls-osd', device_class,
+                            '--format=json'])
+    else:
         out = check_output(['ceph', '--id', service,
                             'osd', 'ls',
                             '--format=json'])
-        if six.PY3:
-            out = out.decode('UTF-8')
-        return json.loads(out)
-
-    return None
+    if six.PY3:
+        out = out.decode('UTF-8')
+    return json.loads(out)
 
 
 def install():
@@ -811,7 +832,7 @@ def set_app_name_for_pool(client, pool, name):
 
     :raises: CalledProcessError if ceph call fails
     """
-    if ceph_version() >= '12.0.0':
+    if cmp_pkgrevno('ceph', '12.0.0') >= 0:
         cmd = ['ceph', '--id', client, 'osd', 'pool',
                'application', 'enable', pool, name]
         check_call(cmd)
@@ -1091,22 +1112,6 @@ def ensure_ceph_keyring(service, user=None, group=None,
     return True
 
 
-def ceph_version():
-    """Retrieve the local version of ceph."""
-    if os.path.exists('/usr/bin/ceph'):
-        cmd = ['ceph', '-v']
-        output = check_output(cmd)
-        if six.PY3:
-            output = output.decode('UTF-8')
-        output = output.split()
-        if len(output) > 3:
-            return output[2]
-        else:
-            return None
-    else:
-        return None
-
-
 class CephBrokerRq(object):
     """Ceph broker request.
 
@@ -1147,7 +1152,8 @@ class CephBrokerRq(object):
             'object-prefix-permissions': object_prefix_permissions})
 
     def add_op_create_pool(self, name, replica_count=3, pg_num=None,
-                           weight=None, group=None, namespace=None):
+                           weight=None, group=None, namespace=None,
+                           app_name=None):
         """Adds an operation to create a pool.
 
         @param pg_num setting:  optional setting. If not provided, this value
@@ -1155,6 +1161,11 @@ class CephBrokerRq(object):
         cluster at the time of creation. Note that, if provided, this value
         will be capped at the current available maximum.
         @param weight: the percentage of data the pool makes up
+        :param app_name: (Optional) Tag pool with application name.  Note that
+                         there is certain protocols emerging upstream with
+                         regard to meaningful application names to use.
+                         Examples are ``rbd`` and ``rgw``.
+        :type app_name: str
         """
         if pg_num and weight:
             raise ValueError('pg_num and weight are mutually exclusive')
@@ -1162,7 +1173,7 @@ class CephBrokerRq(object):
         self.ops.append({'op': 'create-pool', 'name': name,
                          'replicas': replica_count, 'pg_num': pg_num,
                          'weight': weight, 'group': group,
-                         'group-namespace': namespace})
+                         'group-namespace': namespace, 'app-name': app_name})
 
     def set_ops(self, ops):
         """Set request ops to provided value.
diff --git a/templates/ocata/glance-api.conf b/templates/ocata/glance-api.conf
new file mode 100644
index 00000000..fbbb545f
--- /dev/null
+++ b/templates/ocata/glance-api.conf
@@ -0,0 +1,103 @@
+[DEFAULT]
+verbose = {{ verbose }}
+use_syslog = {{ use_syslog }}
+debug = {{ debug }}
+workers = {{ workers }}
+bind_host = {{ bind_host }}
+
+{% if ext -%}
+bind_port = {{ ext }}
+{% elif bind_port -%}
+bind_port = {{ bind_port }}
+{% else -%}
+bind_port = 9292
+{% endif -%}
+
+{% if transport_url %}
+transport_url = {{ transport_url }}
+{% endif %}
+
+log_file = /var/log/glance/api.log
+backlog = 4096
+
+registry_host = {{ registry_host }}
+registry_port = 9191
+registry_client_protocol = http
+
+{% if expose_image_locations -%}
+show_multiple_locations = {{ expose_image_locations }}
+show_image_direct_url = {{ expose_image_locations }}
+{% endif -%}
+
+{% if api_config_flags -%}
+{% for key, value in api_config_flags.items() -%}
+{{ key }} = {{ value }}
+{% endfor -%}
+{% endif -%}
+
+delayed_delete = False
+scrub_time = 43200
+scrubber_datadir = /var/lib/glance/scrubber
+image_cache_dir = /var/lib/glance/image-cache/
+db_enforce_mysql_charset = False
+
+{% if image_size_cap -%}
+image_size_cap = {{ image_size_cap }}
+{% endif -%}
+
+[glance_store]
+{%- if use_internal_endpoints %}
+catalog_info = {{ volume_catalog_info }}
+{%- endif %}
+
+filesystem_store_datadir = {{ filesystem_store_datadir }}
+
+stores = {{ known_stores }}
+{% if rbd_pool -%}
+default_store = rbd
+{% elif swift_store -%}
+default_store = swift
+{% elif cinder_store -%}
+default_store = cinder
+{% else -%}
+default_store = file
+{% endif -%}
+
+{% if swift_store -%}
+default_swift_reference = swift
+swift_store_config_file = /etc/glance/glance-swift.conf
+swift_store_create_container_on_put = true
+{% endif -%}
+
+{% if rbd_pool -%}
+rbd_store_ceph_conf = /etc/ceph/ceph.conf
+rbd_store_user = {{ rbd_user }}
+rbd_store_pool = {{ rbd_pool }}
+rbd_store_chunk_size = 8
+{% endif -%}
+
+[image_format]
+disk_formats = {{ disk_formats }}
+{% if container_formats -%}
+container_formats = {{ container_formats }}
+{% endif -%}
+
+{% include "section-keystone-authtoken-mitaka" %}
+
+{% if auth_host -%}
+[paste_deploy]
+flavor = keystone
+{% endif %}
+
+[barbican]
+auth_endpoint = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v3
+
+{% include "parts/section-database" %}
+
+{% include "section-oslo-messaging-rabbit" %}
+
+{% include "section-oslo-notifications" %}
+
+{% include "section-oslo-middleware" %}
+
+{% include "parts/section-storage" %}
diff --git a/tests/basic_deployment.py b/tests/basic_deployment.py
index 54114089..4816d08b 100644
--- a/tests/basic_deployment.py
+++ b/tests/basic_deployment.py
@@ -371,97 +371,6 @@ class GlanceBasicDeployment(OpenStackAmuletDeployment):
             message = u.relation_error('glance amqp', ret)
             amulet.raise_status(amulet.FAIL, msg=message)
 
-    def test_300_glance_api_default_config(self):
-        """Verify default section configs in glance-api.conf and
-           compare some of the parameters to relation data."""
-        u.log.debug('Checking glance api config file...')
-        unit = self.glance_sentry
-        rel_mq_gl = self.rabbitmq_sentry.relation('amqp', 'glance:amqp')
-        rel_my_gl = self.pxc_sentry.relation('shared-db', 'glance:shared-db')
-        if self._get_openstack_release() < self.bionic_stein:
-            dialect = 'mysql'
-        else:
-            dialect = 'mysql+pymysql'
-        db_uri = "{}://{}:{}@{}/{}".format(dialect,
-                                           'glance',
-                                           rel_my_gl['password'],
-                                           rel_my_gl['db_host'],
-                                           'glance')
-        conf = '/etc/glance/glance-api.conf'
-        expected = {
-            'DEFAULT': {
-                'debug': 'False',
-                'verbose': 'False',
-                'use_syslog': 'False',
-                'log_file': '/var/log/glance/api.log',
-                'bind_host': '0.0.0.0',
-                'bind_port': '9282',
-                'registry_host': '0.0.0.0',
-                'registry_port': '9191',
-                'registry_client_protocol': 'http',
-                'delayed_delete': 'False',
-                'scrub_time': '43200',
-                'notification_driver': 'rabbit',
-                'scrubber_datadir': '/var/lib/glance/scrubber',
-                'image_cache_dir': '/var/lib/glance/image-cache/',
-                'db_enforce_mysql_charset': 'False'
-            },
-        }
-
-        if self._get_openstack_release() >= self.trusty_kilo:
-            # Kilo or later
-            expected['oslo_messaging_rabbit'] = {
-                'rabbit_userid': 'glance',
-                'rabbit_virtual_host': 'openstack',
-                'rabbit_password': rel_mq_gl['password'],
-                'rabbit_host': rel_mq_gl['hostname']
-            }
-            expected['glance_store'] = {
-                'filesystem_store_datadir': '/var/lib/glance/images/',
-                'stores': 'glance.store.filesystem.'
-                          'Store,glance.store.http.Store',
-                'default_store': 'file'
-            }
-            expected['database'] = {
-                'idle_timeout': '3600',
-                'connection': db_uri
-            }
-
-            if self._get_openstack_release() >= self.trusty_mitaka:
-                del expected['DEFAULT']['notification_driver']
-                connection_uri = (
-                    "rabbit://glance:{}@{}:5672/"
-                    "openstack".format(rel_mq_gl['password'],
-                                       rel_mq_gl['hostname'])
-                )
-                expected['oslo_messaging_notifications'] = {
-                    'driver': 'messagingv2',
-                    'transport_url': connection_uri
-                }
-            else:
-                expected['DEFAULT']['notification_driver'] = 'messagingv2'
-
-        else:
-            # Juno or earlier
-            expected['DEFAULT'].update({
-                'rabbit_userid': 'glance',
-                'rabbit_virtual_host': 'openstack',
-                'rabbit_password': rel_mq_gl['password'],
-                'rabbit_host': rel_mq_gl['hostname'],
-                'filesystem_store_datadir': '/var/lib/glance/images/',
-                'default_store': 'file',
-            })
-            expected['database'] = {
-                'sql_idle_timeout': '3600',
-                'connection': db_uri
-            }
-
-        for section, pairs in expected.iteritems():
-            ret = u.validate_config_data(unit, conf, section, pairs)
-            if ret:
-                message = "glance api config error: {}".format(ret)
-                amulet.raise_status(amulet.FAIL, msg=message)
-
     def test_302_glance_registry_default_config(self):
         """Verify configs in glance-registry.conf"""
         u.log.debug('Checking glance registry config file...')