diff --git a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
index c664c9d0..b01e6cb8 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/deployment.py
@@ -79,9 +79,9 @@ class OpenStackAmuletDeployment(AmuletDeployment):
         services.append(this_service)
         use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
                       'ceph-osd', 'ceph-radosgw']
-        # Openstack subordinate charms do not expose an origin option as that
-        # is controlled by the principle
-        ignore = ['neutron-openvswitch']
+        # Most OpenStack subordinate charms do not expose an origin option
+        # as that is controlled by the principle.
+        ignore = ['cinder-ceph', 'hacluster', 'neutron-openvswitch']
 
         if self.openstack:
             for svc in services:
@@ -148,3 +148,36 @@ class OpenStackAmuletDeployment(AmuletDeployment):
             return os_origin.split('%s-' % self.series)[1].split('/')[0]
         else:
             return releases[self.series]
+
+    def get_ceph_expected_pools(self, radosgw=False):
+        """Return a list of expected ceph pools in a ceph + cinder + glance
+        test scenario, based on OpenStack release and whether ceph radosgw
+        is flagged as present or not."""
+
+        if self._get_openstack_release() >= self.trusty_kilo:
+            # Kilo or later
+            pools = [
+                'rbd',
+                'cinder',
+                'glance'
+            ]
+        else:
+            # Juno or earlier
+            pools = [
+                'data',
+                'metadata',
+                'rbd',
+                'cinder',
+                'glance'
+            ]
+
+        if radosgw:
+            pools.extend([
+                '.rgw.root',
+                '.rgw.control',
+                '.rgw',
+                '.rgw.gc',
+                '.users.uid'
+            ])
+
+        return pools
diff --git a/hooks/charmhelpers/contrib/openstack/amulet/utils.py b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
index 576bf0b5..03f79277 100644
--- a/hooks/charmhelpers/contrib/openstack/amulet/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/amulet/utils.py
@@ -14,16 +14,20 @@
 # You should have received a copy of the GNU Lesser General Public License
 # along with charm-helpers.  If not, see <http://www.gnu.org/licenses/>.
 
+import amulet
+import json
 import logging
 import os
 import six
 import time
 import urllib
 
+import cinderclient.v1.client as cinder_client
 import glanceclient.v1.client as glance_client
 import heatclient.v1.client as heat_client
 import keystoneclient.v2_0 as keystone_client
 import novaclient.v1_1.client as nova_client
+import swiftclient
 
 from charmhelpers.contrib.amulet.utils import (
     AmuletUtils
@@ -171,6 +175,16 @@ class OpenStackAmuletUtils(AmuletUtils):
         self.log.debug('Checking if tenant exists ({})...'.format(tenant))
         return tenant in [t.name for t in keystone.tenants.list()]
 
+    def authenticate_cinder_admin(self, keystone_sentry, username,
+                                  password, tenant):
+        """Authenticates admin user with cinder."""
+        # NOTE(beisner): cinder python client doesn't accept tokens.
+        service_ip = \
+            keystone_sentry.relation('shared-db',
+                                     'mysql:shared-db')['private-address']
+        ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
+        return cinder_client.Client(username, password, tenant, ept)
+
     def authenticate_keystone_admin(self, keystone_sentry, user, password,
                                     tenant):
         """Authenticates admin user with the keystone admin endpoint."""
@@ -212,9 +226,29 @@ class OpenStackAmuletUtils(AmuletUtils):
         return nova_client.Client(username=user, api_key=password,
                                   project_id=tenant, auth_url=ep)
 
+    def authenticate_swift_user(self, keystone, user, password, tenant):
+        """Authenticates a regular user with swift api."""
+        self.log.debug('Authenticating swift user ({})...'.format(user))
+        ep = keystone.service_catalog.url_for(service_type='identity',
+                                              endpoint_type='publicURL')
+        return swiftclient.Connection(authurl=ep,
+                                      user=user,
+                                      key=password,
+                                      tenant_name=tenant,
+                                      auth_version='2.0')
+
     def create_cirros_image(self, glance, image_name):
-        """Download the latest cirros image and upload it to glance."""
-        self.log.debug('Creating glance image ({})...'.format(image_name))
+        """Download the latest cirros image and upload it to glance,
+        validate and return a resource pointer.
+
+        :param glance: pointer to authenticated glance connection
+        :param image_name: display name for new image
+        :returns: glance image pointer
+        """
+        self.log.debug('Creating glance cirros image '
+                       '({})...'.format(image_name))
+
+        # Download cirros image
         http_proxy = os.getenv('AMULET_HTTP_PROXY')
         self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
         if http_proxy:
@@ -223,33 +257,51 @@ class OpenStackAmuletUtils(AmuletUtils):
         else:
             opener = urllib.FancyURLopener()
 
-        f = opener.open("http://download.cirros-cloud.net/version/released")
+        f = opener.open('http://download.cirros-cloud.net/version/released')
         version = f.read().strip()
-        cirros_img = "cirros-{}-x86_64-disk.img".format(version)
+        cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
         local_path = os.path.join('tests', cirros_img)
 
         if not os.path.exists(local_path):
-            cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
+            cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
                                                   version, cirros_img)
             opener.retrieve(cirros_url, local_path)
         f.close()
 
+        # Create glance image
         with open(local_path) as f:
             image = glance.images.create(name=image_name, is_public=True,
                                          disk_format='qcow2',
                                          container_format='bare', data=f)
-        count = 1
-        status = image.status
-        while status != 'active' and count < 10:
-            time.sleep(3)
-            image = glance.images.get(image.id)
-            status = image.status
-            self.log.debug('image status: {}'.format(status))
-            count += 1
 
-        if status != 'active':
-            self.log.error('image creation timed out')
-            return None
+        # Wait for image to reach active status
+        img_id = image.id
+        ret = self.resource_reaches_status(glance.images, img_id,
+                                           expected_stat='active',
+                                           msg='Image status wait')
+        if not ret:
+            msg = 'Glance image failed to reach expected state.'
+            amulet.raise_status(amulet.FAIL, msg=msg)
+
+        # Re-validate new image
+        self.log.debug('Validating image attributes...')
+        val_img_name = glance.images.get(img_id).name
+        val_img_stat = glance.images.get(img_id).status
+        val_img_pub = glance.images.get(img_id).is_public
+        val_img_cfmt = glance.images.get(img_id).container_format
+        val_img_dfmt = glance.images.get(img_id).disk_format
+        msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
+                    'container fmt:{} disk fmt:{}'.format(
+                        val_img_name, val_img_pub, img_id,
+                        val_img_stat, val_img_cfmt, val_img_dfmt))
+
+        if val_img_name == image_name and val_img_stat == 'active' \
+                and val_img_pub is True and val_img_cfmt == 'bare' \
+                and val_img_dfmt == 'qcow2':
+            self.log.debug(msg_attr)
+        else:
+            msg = ('Volume validation failed, {}'.format(msg_attr))
+            amulet.raise_status(amulet.FAIL, msg=msg)
 
         return image
 
@@ -260,22 +312,7 @@ class OpenStackAmuletUtils(AmuletUtils):
         self.log.warn('/!\\ DEPRECATION WARNING:  use '
                       'delete_resource instead of delete_image.')
         self.log.debug('Deleting glance image ({})...'.format(image))
-        num_before = len(list(glance.images.list()))
-        glance.images.delete(image)
-
-        count = 1
-        num_after = len(list(glance.images.list()))
-        while num_after != (num_before - 1) and count < 10:
-            time.sleep(3)
-            num_after = len(list(glance.images.list()))
-            self.log.debug('number of images: {}'.format(num_after))
-            count += 1
-
-        if num_after != (num_before - 1):
-            self.log.error('image deletion timed out')
-            return False
-
-        return True
+        return self.delete_resource(glance.images, image, msg='glance image')
 
     def create_instance(self, nova, image_name, instance_name, flavor):
         """Create the specified instance."""
@@ -308,22 +345,8 @@ class OpenStackAmuletUtils(AmuletUtils):
         self.log.warn('/!\\ DEPRECATION WARNING:  use '
                       'delete_resource instead of delete_instance.')
         self.log.debug('Deleting instance ({})...'.format(instance))
-        num_before = len(list(nova.servers.list()))
-        nova.servers.delete(instance)
-
-        count = 1
-        num_after = len(list(nova.servers.list()))
-        while num_after != (num_before - 1) and count < 10:
-            time.sleep(3)
-            num_after = len(list(nova.servers.list()))
-            self.log.debug('number of instances: {}'.format(num_after))
-            count += 1
-
-        if num_after != (num_before - 1):
-            self.log.error('instance deletion timed out')
-            return False
-
-        return True
+        return self.delete_resource(nova.servers, instance,
+                                    msg='nova instance')
 
     def create_or_get_keypair(self, nova, keypair_name="testkey"):
         """Create a new keypair, or return pointer if it already exists."""
@@ -339,6 +362,88 @@ class OpenStackAmuletUtils(AmuletUtils):
         _keypair = nova.keypairs.create(name=keypair_name)
         return _keypair
 
+    def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
+                             img_id=None, src_vol_id=None, snap_id=None):
+        """Create cinder volume, optionally from a glance image, OR
+        optionally as a clone of an existing volume, OR optionally
+        from a snapshot.  Wait for the new volume status to reach
+        the expected status, validate and return a resource pointer.
+
+        :param vol_name: cinder volume display name
+        :param vol_size: size in gigabytes
+        :param img_id: optional glance image id
+        :param src_vol_id: optional source volume id to clone
+        :param snap_id: optional snapshot id to use
+        :returns: cinder volume pointer
+        """
+        # Handle parameter input and avoid impossible combinations
+        if img_id and not src_vol_id and not snap_id:
+            # Create volume from image
+            self.log.debug('Creating cinder volume from glance image...')
+            bootable = 'true'
+        elif src_vol_id and not img_id and not snap_id:
+            # Clone an existing volume
+            self.log.debug('Cloning cinder volume...')
+            bootable = cinder.volumes.get(src_vol_id).bootable
+        elif snap_id and not src_vol_id and not img_id:
+            # Create volume from snapshot
+            self.log.debug('Creating cinder volume from snapshot...')
+            snap = cinder.volume_snapshots.find(id=snap_id)
+            vol_size = snap.size
+            snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
+            bootable = cinder.volumes.get(snap_vol_id).bootable
+        elif not img_id and not src_vol_id and not snap_id:
+            # Create volume
+            self.log.debug('Creating cinder volume...')
+            bootable = 'false'
+        else:
+            # Impossible combination of parameters
+            msg = ('Invalid method use - name:{} size:{} img_id:{} '
+                   'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
+                                                     img_id, src_vol_id,
+                                                     snap_id))
+            amulet.raise_status(amulet.FAIL, msg=msg)
+
+        # Create new volume
+        try:
+            vol_new = cinder.volumes.create(display_name=vol_name,
+                                            imageRef=img_id,
+                                            size=vol_size,
+                                            source_volid=src_vol_id,
+                                            snapshot_id=snap_id)
+            vol_id = vol_new.id
+        except Exception as e:
+            msg = 'Failed to create volume: {}'.format(e)
+            amulet.raise_status(amulet.FAIL, msg=msg)
+
+        # Wait for volume to reach available status
+        ret = self.resource_reaches_status(cinder.volumes, vol_id,
+                                           expected_stat="available",
+                                           msg="Volume status wait")
+        if not ret:
+            msg = 'Cinder volume failed to reach expected state.'
+            amulet.raise_status(amulet.FAIL, msg=msg)
+
+        # Re-validate new volume
+        self.log.debug('Validating volume attributes...')
+        val_vol_name = cinder.volumes.get(vol_id).display_name
+        val_vol_boot = cinder.volumes.get(vol_id).bootable
+        val_vol_stat = cinder.volumes.get(vol_id).status
+        val_vol_size = cinder.volumes.get(vol_id).size
+        msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
+                    '{} size:{}'.format(val_vol_name, vol_id,
+                                        val_vol_stat, val_vol_boot,
+                                        val_vol_size))
+
+        if val_vol_boot == bootable and val_vol_stat == 'available' \
+                and val_vol_name == vol_name and val_vol_size == vol_size:
+            self.log.debug(msg_attr)
+        else:
+            msg = ('Volume validation failed, {}'.format(msg_attr))
+            amulet.raise_status(amulet.FAIL, msg=msg)
+
+        return vol_new
+
     def delete_resource(self, resource, resource_id,
                         msg="resource", max_wait=120):
         """Delete one openstack resource, such as one instance, keypair,
@@ -350,6 +455,8 @@ class OpenStackAmuletUtils(AmuletUtils):
         :param max_wait: maximum wait time in seconds
         :returns: True if successful, otherwise False
         """
+        self.log.debug('Deleting OpenStack resource '
+                       '{} ({})'.format(resource_id, msg))
         num_before = len(list(resource.list()))
         resource.delete(resource_id)
 
@@ -411,3 +518,87 @@ class OpenStackAmuletUtils(AmuletUtils):
             self.log.debug('{} never reached expected status: '
                            '{}'.format(resource_id, expected_stat))
             return False
+
+    def get_ceph_osd_id_cmd(self, index):
+        """Produce a shell command that will return a ceph-osd id."""
+        return ("`initctl list | grep 'ceph-osd ' | "
+                "awk 'NR=={} {{ print $2 }}' | "
+                "grep -o '[0-9]*'`".format(index + 1))
+
+    def get_ceph_pools(self, sentry_unit):
+        """Return a dict of ceph pools from a single ceph unit, with
+        pool name as keys, pool id as vals."""
+        pools = {}
+        cmd = 'sudo ceph osd lspools'
+        output, code = sentry_unit.run(cmd)
+        if code != 0:
+            msg = ('{} `{}` returned {} '
+                   '{}'.format(sentry_unit.info['unit_name'],
+                               cmd, code, output))
+            amulet.raise_status(amulet.FAIL, msg=msg)
+
+        # Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
+        for pool in str(output).split(','):
+            pool_id_name = pool.split(' ')
+            if len(pool_id_name) == 2:
+                pool_id = pool_id_name[0]
+                pool_name = pool_id_name[1]
+                pools[pool_name] = int(pool_id)
+
+        self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
+                                                pools))
+        return pools
+
+    def get_ceph_df(self, sentry_unit):
+        """Return dict of ceph df json output, including ceph pool state.
+
+        :param sentry_unit: Pointer to amulet sentry instance (juju unit)
+        :returns: Dict of ceph df output
+        """
+        cmd = 'sudo ceph df --format=json'
+        output, code = sentry_unit.run(cmd)
+        if code != 0:
+            msg = ('{} `{}` returned {} '
+                   '{}'.format(sentry_unit.info['unit_name'],
+                               cmd, code, output))
+            amulet.raise_status(amulet.FAIL, msg=msg)
+        return json.loads(output)
+
+    def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
+        """Take a sample of attributes of a ceph pool, returning ceph
+        pool name, object count and disk space used for the specified
+        pool ID number.
+
+        :param sentry_unit: Pointer to amulet sentry instance (juju unit)
+        :param pool_id: Ceph pool ID
+        :returns: List of pool name, object count, kb disk space used
+        """
+        df = self.get_ceph_df(sentry_unit)
+        pool_name = df['pools'][pool_id]['name']
+        obj_count = df['pools'][pool_id]['stats']['objects']
+        kb_used = df['pools'][pool_id]['stats']['kb_used']
+        self.log.debug('Ceph {} pool (ID {}): {} objects, '
+                       '{} kb used'.format(pool_name, pool_id,
+                                           obj_count, kb_used))
+        return pool_name, obj_count, kb_used
+
+    def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
+        """Validate ceph pool samples taken over time, such as pool
+        object counts or pool kb used, before adding, after adding, and
+        after deleting items which affect those pool attributes.  The
+        2nd element is expected to be greater than the 1st; 3rd is expected
+        to be less than the 2nd.
+
+        :param samples: List containing 3 data samples
+        :param sample_type: String for logging and usage context
+        :returns: None if successful, Failure message otherwise
+        """
+        original, created, deleted = range(3)
+        if samples[created] <= samples[original] or \
+                samples[deleted] >= samples[created]:
+            return ('Ceph {} samples ({}) '
+                    'unexpected.'.format(sample_type, samples))
+        else:
+            self.log.debug('Ceph {} samples (OK): '
+                           '{}'.format(sample_type, samples))
+            return None
diff --git a/hooks/charmhelpers/contrib/openstack/context.py b/hooks/charmhelpers/contrib/openstack/context.py
index ab400060..8f3f1b15 100644
--- a/hooks/charmhelpers/contrib/openstack/context.py
+++ b/hooks/charmhelpers/contrib/openstack/context.py
@@ -122,21 +122,24 @@ def config_flags_parser(config_flags):
          of specifying multiple key value pairs within the same string. For
          example, a string in the format of 'key1=value1, key2=value2' will
          return a dict of:
-         {'key1': 'value1',
-          'key2': 'value2'}.
+
+             {'key1': 'value1',
+              'key2': 'value2'}.
 
       2. A string in the above format, but supporting a comma-delimited list
          of values for the same key. For example, a string in the format of
          'key1=value1, key2=value3,value4,value5' will return a dict of:
-         {'key1', 'value1',
-          'key2', 'value2,value3,value4'}
+
+             {'key1', 'value1',
+              'key2', 'value2,value3,value4'}
 
       3. A string containing a colon character (:) prior to an equal
          character (=) will be treated as yaml and parsed as such. This can be
          used to specify more complex key value pairs. For example,
          a string in the format of 'key1: subkey1=value1, subkey2=value2' will
          return a dict of:
-         {'key1', 'subkey1=value1, subkey2=value2'}
+
+             {'key1', 'subkey1=value1, subkey2=value2'}
 
     The provided config_flags string may be a list of comma-separated values
     which themselves may be comma-separated list of values.
@@ -891,8 +894,6 @@ class NeutronContext(OSContextGenerator):
         return ctxt
 
     def __call__(self):
-        self._ensure_packages()
-
         if self.network_manager not in ['quantum', 'neutron']:
             return {}
 
diff --git a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
index 81a9719f..b99851cc 100644
--- a/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
+++ b/hooks/charmhelpers/contrib/openstack/templates/ceph.conf
@@ -5,11 +5,11 @@
 ###############################################################################
 [global]
 {% if auth -%}
- auth_supported = {{ auth }}
- keyring = /etc/ceph/$cluster.$name.keyring
- mon host = {{ mon_hosts }}
+auth_supported = {{ auth }}
+keyring = /etc/ceph/$cluster.$name.keyring
+mon host = {{ mon_hosts }}
 {% endif -%}
- log to syslog = {{ use_syslog }}
- err to syslog = {{ use_syslog }}
- clog to syslog = {{ use_syslog }}
+log to syslog = {{ use_syslog }}
+err to syslog = {{ use_syslog }}
+clog to syslog = {{ use_syslog }}
 
diff --git a/hooks/charmhelpers/contrib/openstack/utils.py b/hooks/charmhelpers/contrib/openstack/utils.py
index 28532c98..4dd000c3 100644
--- a/hooks/charmhelpers/contrib/openstack/utils.py
+++ b/hooks/charmhelpers/contrib/openstack/utils.py
@@ -522,6 +522,7 @@ def git_clone_and_install(projects_yaml, core_project, depth=1):
     Clone/install all specified OpenStack repositories.
 
     The expected format of projects_yaml is:
+
         repositories:
           - {name: keystone,
              repository: 'git://git.openstack.org/openstack/keystone.git',
@@ -529,11 +530,13 @@ def git_clone_and_install(projects_yaml, core_project, depth=1):
           - {name: requirements,
              repository: 'git://git.openstack.org/openstack/requirements.git',
              branch: 'stable/icehouse'}
+
         directory: /mnt/openstack-git
         http_proxy: squid-proxy-url
         https_proxy: squid-proxy-url
 
-        The directory, http_proxy, and https_proxy keys are optional.
+    The directory, http_proxy, and https_proxy keys are optional.
+
     """
     global requirements_dir
     parent_dir = '/mnt/openstack-git'
@@ -555,10 +558,11 @@ def git_clone_and_install(projects_yaml, core_project, depth=1):
 
     pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
 
-    # Upgrade setuptools from default virtualenv version. The default version
-    # in trusty breaks update.py in global requirements master branch.
-    pip_install('setuptools', upgrade=True, proxy=http_proxy,
-                venv=os.path.join(parent_dir, 'venv'))
+    # Upgrade setuptools and pip from default virtualenv versions. The default
+    # versions in trusty break master OpenStack branch deployments.
+    for p in ['pip', 'setuptools']:
+        pip_install(p, upgrade=True, proxy=http_proxy,
+                    venv=os.path.join(parent_dir, 'venv'))
 
     for p in projects['repositories']:
         repo = p['repository']
diff --git a/hooks/charmhelpers/contrib/storage/linux/ceph.py b/hooks/charmhelpers/contrib/storage/linux/ceph.py
index 31ea7f9e..00dbffb4 100644
--- a/hooks/charmhelpers/contrib/storage/linux/ceph.py
+++ b/hooks/charmhelpers/contrib/storage/linux/ceph.py
@@ -60,12 +60,12 @@ KEYRING = '/etc/ceph/ceph.client.{}.keyring'
 KEYFILE = '/etc/ceph/ceph.client.{}.key'
 
 CEPH_CONF = """[global]
- auth supported = {auth}
- keyring = {keyring}
- mon host = {mon_hosts}
- log to syslog = {use_syslog}
- err to syslog = {use_syslog}
- clog to syslog = {use_syslog}
+auth supported = {auth}
+keyring = {keyring}
+mon host = {mon_hosts}
+log to syslog = {use_syslog}
+err to syslog = {use_syslog}
+clog to syslog = {use_syslog}
 """
 
 
diff --git a/hooks/charmhelpers/core/hookenv.py b/hooks/charmhelpers/core/hookenv.py
index 0add16d4..dd8def9a 100644
--- a/hooks/charmhelpers/core/hookenv.py
+++ b/hooks/charmhelpers/core/hookenv.py
@@ -761,6 +761,7 @@ def atstart(callback, *args, **kwargs):
 
     This is useful for modules and classes to perform initialization
     and inject behavior. In particular:
+
         - Run common code before all of your hooks, such as logging
           the hook name or interesting relation data.
         - Defer object or module initialization that requires a hook
diff --git a/hooks/charmhelpers/core/host.py b/hooks/charmhelpers/core/host.py
index 901a4cfe..8ae8ef86 100644
--- a/hooks/charmhelpers/core/host.py
+++ b/hooks/charmhelpers/core/host.py
@@ -63,6 +63,36 @@ def service_reload(service_name, restart_on_failure=False):
     return service_result
 
 
+def service_pause(service_name, init_dir=None):
+    """Pause a system service.
+
+    Stop it, and prevent it from starting again at boot."""
+    if init_dir is None:
+        init_dir = "/etc/init"
+    stopped = service_stop(service_name)
+    # XXX: Support systemd too
+    override_path = os.path.join(
+        init_dir, '{}.conf.override'.format(service_name))
+    with open(override_path, 'w') as fh:
+        fh.write("manual\n")
+    return stopped
+
+
+def service_resume(service_name, init_dir=None):
+    """Resume a system service.
+
+    Reenable starting again at boot. Start the service"""
+    # XXX: Support systemd too
+    if init_dir is None:
+        init_dir = "/etc/init"
+    override_path = os.path.join(
+        init_dir, '{}.conf.override'.format(service_name))
+    if os.path.exists(override_path):
+        os.unlink(override_path)
+    started = service_start(service_name)
+    return started
+
+
 def service(action, service_name):
     """Control a system service"""
     cmd = ['service', service_name, action]
@@ -140,11 +170,7 @@ def add_group(group_name, system_group=False):
 
 def add_user_to_group(username, group):
     """Add a user to a group"""
-    cmd = [
-        'gpasswd', '-a',
-        username,
-        group
-    ]
+    cmd = ['gpasswd', '-a', username, group]
     log("Adding user {} to group {}".format(username, group))
     subprocess.check_call(cmd)
 
diff --git a/hooks/charmhelpers/core/services/helpers.py b/hooks/charmhelpers/core/services/helpers.py
index 3eb5fb44..8005c415 100644
--- a/hooks/charmhelpers/core/services/helpers.py
+++ b/hooks/charmhelpers/core/services/helpers.py
@@ -239,12 +239,12 @@ class TemplateCallback(ManagerCallback):
     action.
 
     :param str source: The template source file, relative to
-    `$CHARM_DIR/templates`
-
+        `$CHARM_DIR/templates`
     :param str target: The target to write the rendered template to
     :param str owner: The owner of the rendered file
     :param str group: The group of the rendered file
     :param int perms: The permissions of the rendered file
+
     """
     def __init__(self, source, target,
                  owner='root', group='root', perms=0o444):
diff --git a/hooks/charmhelpers/fetch/__init__.py b/hooks/charmhelpers/fetch/__init__.py
index 9a1a2515..0a3bb969 100644
--- a/hooks/charmhelpers/fetch/__init__.py
+++ b/hooks/charmhelpers/fetch/__init__.py
@@ -215,9 +215,9 @@ def apt_purge(packages, fatal=False):
     _run_apt_command(cmd, fatal)
 
 
-def apt_hold(packages, fatal=False):
-    """Hold one or more packages"""
-    cmd = ['apt-mark', 'hold']
+def apt_mark(packages, mark, fatal=False):
+    """Flag one or more packages using apt-mark"""
+    cmd = ['apt-mark', mark]
     if isinstance(packages, six.string_types):
         cmd.append(packages)
     else:
@@ -225,9 +225,17 @@ def apt_hold(packages, fatal=False):
     log("Holding {}".format(packages))
 
     if fatal:
-        subprocess.check_call(cmd)
+        subprocess.check_call(cmd, universal_newlines=True)
     else:
-        subprocess.call(cmd)
+        subprocess.call(cmd, universal_newlines=True)
+
+
+def apt_hold(packages, fatal=False):
+    return apt_mark(packages, 'hold', fatal=fatal)
+
+
+def apt_unhold(packages, fatal=False):
+    return apt_mark(packages, 'unhold', fatal=fatal)
 
 
 def add_source(source, key=None):
@@ -370,8 +378,9 @@ def install_remote(source, *args, **kwargs):
     for handler in handlers:
         try:
             installed_to = handler.install(source, *args, **kwargs)
-        except UnhandledSource:
-            pass
+        except UnhandledSource as e:
+            log('Install source attempt unsuccessful: {}'.format(e),
+                level='WARNING')
     if not installed_to:
         raise UnhandledSource("No handler found for source {}".format(source))
     return installed_to
diff --git a/hooks/charmhelpers/fetch/archiveurl.py b/hooks/charmhelpers/fetch/archiveurl.py
index 8dfce505..efd7f9f0 100644
--- a/hooks/charmhelpers/fetch/archiveurl.py
+++ b/hooks/charmhelpers/fetch/archiveurl.py
@@ -77,6 +77,8 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
     def can_handle(self, source):
         url_parts = self.parse_url(source)
         if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
+            # XXX: Why is this returning a boolean and a string? It's
+            # doomed to fail since "bool(can_handle('foo://'))"  will be True.
             return "Wrong source type"
         if get_archive_handler(self.base_url(source)):
             return True
@@ -155,7 +157,11 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
             else:
                 algorithms = hashlib.algorithms_available
             if key in algorithms:
-                check_hash(dld_file, value, key)
+                if len(value) != 1:
+                    raise TypeError(
+                        "Expected 1 hash value, not %d" % len(value))
+                expected = value[0]
+                check_hash(dld_file, expected, key)
         if checksum:
             check_hash(dld_file, checksum, hash_type)
         return extract(dld_file, dest)
diff --git a/hooks/charmhelpers/fetch/giturl.py b/hooks/charmhelpers/fetch/giturl.py
index ddc25b7e..f023b26d 100644
--- a/hooks/charmhelpers/fetch/giturl.py
+++ b/hooks/charmhelpers/fetch/giturl.py
@@ -67,7 +67,7 @@ class GitUrlFetchHandler(BaseFetchHandler):
         try:
             self.clone(source, dest_dir, branch, depth)
         except GitCommandError as e:
-            raise UnhandledSource(e.message)
+            raise UnhandledSource(e)
         except OSError as e:
             raise UnhandledSource(e.strerror)
         return dest_dir