Revert "Add policy.json to allow owner to force delete volumes"

This reverts commit 4ddea990d0.

Change-Id: I77f9351da8516e5af40fea57400101e6dd16b528
This commit is contained in:
Chris MacNaughton (icey) 2018-07-30 15:19:47 +00:00
parent 4ddea990d0
commit 79f9ff5c70
3 changed files with 1 additions and 163 deletions

View File

@ -145,7 +145,6 @@ class CinderCharmError(Exception):
CINDER_CONF_DIR = "/etc/cinder" CINDER_CONF_DIR = "/etc/cinder"
CINDER_CONF = '%s/cinder.conf' % CINDER_CONF_DIR CINDER_CONF = '%s/cinder.conf' % CINDER_CONF_DIR
CINDER_API_CONF = '%s/api-paste.ini' % CINDER_CONF_DIR CINDER_API_CONF = '%s/api-paste.ini' % CINDER_CONF_DIR
CINDER_POLICY_JSON = '%s/policy.json' % CINDER_CONF_DIR
CEPH_CONF = '/etc/ceph/ceph.conf' CEPH_CONF = '/etc/ceph/ceph.conf'
HAPROXY_CONF = '/etc/haproxy/haproxy.cfg' HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
@ -216,10 +215,6 @@ BASE_RESOURCE_MAP = OrderedDict([
'contexts': [context.IdentityServiceContext()], 'contexts': [context.IdentityServiceContext()],
'services': ['cinder-api'], 'services': ['cinder-api'],
}), }),
(CINDER_POLICY_JSON, {
'contexts': [],
'services': ['cinder-api']
}),
(ceph_config_file(), { (ceph_config_file(), {
'contexts': [context.CephContext()], 'contexts': [context.CephContext()],
'services': ['cinder-volume'] 'services': ['cinder-volume']

View File

@ -1,3 +0,0 @@
{
"volume_extension:volume_admin_actions:force_delete": "rule:admin_or_owner"
}

View File

@ -26,8 +26,6 @@ from charmhelpers.contrib.openstack.amulet.utils import (
# ERROR # ERROR
) )
import keystoneclient
# Use DEBUG to turn on debug logging # Use DEBUG to turn on debug logging
u = OpenStackAmuletUtils(DEBUG) u = OpenStackAmuletUtils(DEBUG)
@ -99,6 +97,7 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
# endpoint in the catalog. # endpoint in the catalog.
relations.update({ relations.update({
'nova-compute:image-service': 'glance:image-service', 'nova-compute:image-service': 'glance:image-service',
'nova-compute:shared-db': 'percona-cluster:shared-db',
'nova-compute:amqp': 'rabbitmq-server:amqp', 'nova-compute:amqp': 'rabbitmq-server:amqp',
'nova-cloud-controller:shared-db': 'percona-cluster:shared-db', 'nova-cloud-controller:shared-db': 'percona-cluster:shared-db',
'nova-cloud-controller:identity-service': 'keystone:' 'nova-cloud-controller:identity-service': 'keystone:'
@ -156,147 +155,10 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
else: else:
api_version = 1 api_version = 1
self.cinder = u.authenticate_cinder_admin(self.keystone, api_version) self.cinder = u.authenticate_cinder_admin(self.keystone, api_version)
if self.is_mitaka_or_newer():
self.create_users_v3()
self.keystone_non_admin = u.authenticate_keystone(
self.keystone_sentry.info['public-address'],
user_domain_name=self.demo_domain,
username=self.demo_user_v3,
password='password',
api_version=self.keystone_api_version,
project_domain_name=self.demo_domain,
project_name=self.demo_project,
)
else:
self.create_users_v2()
self.keystone_non_admin = u.authenticate_keystone_user(
self.keystone, user=self.demo_user,
password='password', tenant=self.demo_tenant)
self.cinder_non_admin = u.authenticate_cinder_admin(
self.keystone_non_admin, api_version)
# Authenticate admin with glance endpoint # Authenticate admin with glance endpoint
self.glance = u.authenticate_glance_admin(self.keystone) self.glance = u.authenticate_glance_admin(self.keystone)
def is_mitaka_or_newer(self):
# os_release = self._get_openstack_release_string()
os_release = self._get_openstack_release()
# if os_release >= 'mitaka':
if os_release >= self.xenial_mitaka:
return True
else:
u.log.info('Skipping test, {} < mitaka'.format(os_release))
return False
def create_users_v2(self):
# Create a demo tenant/role/user
self.demo_tenant = 'demoTenant'
self.demo_role = 'demoRole'
self.demo_user = 'demoUser'
self.keystone_api_version = 2
if not u.tenant_exists(self.keystone, self.demo_tenant):
tenant = self.keystone.tenants.create(
tenant_name=self.demo_tenant,
description='demo tenant',
enabled=True)
self.keystone.roles.create(name=self.demo_role)
self.keystone.users.create(name=self.demo_user,
password='password',
tenant_id=tenant.id,
email='demo@demo.com')
def create_users_v3(self):
# Create a demo tenant/role/user
self.demo_project = 'demoProject'
self.demo_user_v3 = 'demoUserV3'
self.demo_role = 'demoRoleV3'
self.demo_domain_admin = 'demoDomainAdminV3'
self.demo_domain = 'demoDomain'
self.keystone_api_version = 3
try:
domain = self.keystone.domains.find(name=self.demo_domain)
except keystoneclient.exceptions.NotFound:
domain = self.keystone.domains.create(
self.demo_domain,
description='Demo Domain',
enabled=True
)
try:
self.keystone.projects.find(name=self.demo_project)
except keystoneclient.exceptions.NotFound:
self.keystone.projects.create(
self.demo_project,
domain,
description='Demo Project',
enabled=True,
)
try:
self.keystone.roles.find(name=self.demo_role)
except keystoneclient.exceptions.NotFound:
self.keystone.roles.create(name=self.demo_role)
try:
self.keystone.roles.find(name='Member')
except keystoneclient.exceptions.NotFound:
self.keystone.roles.create(name='Member')
if not self.find_keystone_v3_user(self.keystone,
self.demo_user_v3,
self.demo_domain):
user = self.keystone.users.create(
self.demo_user_v3,
domain=domain.id,
project=self.demo_project,
password='password',
email='demov3@demo.com',
description='Demo',
enabled=True)
role = self.keystone.roles.find(name='Member')
u.log.debug("self.keystone.roles.grant('{}', user='{}', "
"domain='{}')".format(role.id, user.id, domain.id))
self.keystone.roles.grant(
role.id,
user=user.id,
project=self.keystone.projects.find(name=self.demo_project).id)
try:
self.keystone.roles.find(name='Admin')
except keystoneclient.exceptions.NotFound:
self.keystone.roles.create(name='Admin')
if not self.find_keystone_v3_user(self.keystone,
self.demo_domain_admin,
self.demo_domain):
user = self.keystone.users.create(
self.demo_domain_admin,
domain=domain.id,
project=self.demo_project,
password='password',
email='demoadminv3@demo.com',
description='Demo Admin',
enabled=True)
role = self.keystone.roles.find(name='Admin')
u.log.debug("self.keystone.roles.grant('{}', user='{}', "
"domain='{}')".format(role.id, user.id, domain.id))
self.keystone.roles.grant(
role.id,
user=user.id,
domain=domain.id)
def find_keystone_v3_user(self, client, username, domain):
"""Find a user within a specified keystone v3 domain"""
domain_users = client.users.list(
domain=client.domains.find(name=domain).id
)
for user in domain_users:
if username.lower() == user.name.lower():
return user
return None
def _extend_cinder_volume(self, vol_id, new_size=2): def _extend_cinder_volume(self, vol_id, new_size=2):
"""Extend an existing cinder volume size. """Extend an existing cinder volume size.
@ -929,22 +791,6 @@ class CinderBasicDeployment(OpenStackAmuletDeployment):
u.log.debug('Deleting volume {}...'.format(vol.id)) u.log.debug('Deleting volume {}...'.format(vol.id))
u.delete_resource(self.cinder.volumes, vol.id, msg="cinder volume") u.delete_resource(self.cinder.volumes, vol.id, msg="cinder volume")
def test_404_admin_force_delete_volume(self):
"""Create a cinder volume and delete it."""
u.log.debug('Creating, checking and deleting cinder volume...')
vol_new = u.create_cinder_volume(self.cinder)
vol_new.force_delete()
def test_405_non_admin_force_delete_volume(self):
"""Create a cinder volume and delete it."""
os_release = self._get_openstack_release()
if os_release < self.xenial_queens:
u.log.info('Skipping test, {} < queens'.format(os_release))
return
u.log.debug('Creating, checking and deleting cinder volume...')
vol_new = u.create_cinder_volume(self.cinder_non_admin)
vol_new.force_delete()
def test_900_restart_on_config_change(self): def test_900_restart_on_config_change(self):
"""Verify that the specified services are restarted when the """Verify that the specified services are restarted when the
config is changed.""" config is changed."""