Support create amphora instance from volume based.
In some deploy production, using volume based instead of localdisk to protect data and live migrate can perform. This patch adds: - creation a cinder volume for amphora - boot amphora with cinder volume - config options for cinder client - unit tests for cinder functionality Story: 2001594 Co-authored-by: Vadim Ponomarev <velizarx@gmail.com> Co-authored-by: Margarita Shakhova <shakhova.margarita@gmail.com> Change-Id: I8181ed696b9ab556e7741c08839d79167aff8350
This commit is contained in:
parent
09efc2a423
commit
93b509cfe5
@ -271,6 +271,7 @@ function octavia_configure {
|
|||||||
# Setting other required default options
|
# Setting other required default options
|
||||||
iniset $OCTAVIA_CONF controller_worker amphora_driver ${OCTAVIA_AMPHORA_DRIVER}
|
iniset $OCTAVIA_CONF controller_worker amphora_driver ${OCTAVIA_AMPHORA_DRIVER}
|
||||||
iniset $OCTAVIA_CONF controller_worker compute_driver ${OCTAVIA_COMPUTE_DRIVER}
|
iniset $OCTAVIA_CONF controller_worker compute_driver ${OCTAVIA_COMPUTE_DRIVER}
|
||||||
|
iniset $OCTAVIA_CONF controller_worker volume_driver ${OCTAVIA_VOLUME_DRIVER}
|
||||||
iniset $OCTAVIA_CONF controller_worker network_driver ${OCTAVIA_NETWORK_DRIVER}
|
iniset $OCTAVIA_CONF controller_worker network_driver ${OCTAVIA_NETWORK_DRIVER}
|
||||||
iniset $OCTAVIA_CONF controller_worker amp_image_tag ${OCTAVIA_AMP_IMAGE_TAG}
|
iniset $OCTAVIA_CONF controller_worker amp_image_tag ${OCTAVIA_AMP_IMAGE_TAG}
|
||||||
|
|
||||||
|
@ -20,6 +20,7 @@ OCTAVIA_RUN_DIR=${OCTAVIA_RUN_DIR:-"/var/run/octavia"}
|
|||||||
OCTAVIA_AMPHORA_DRIVER=${OCTAVIA_AMPHORA_DRIVER:-"amphora_haproxy_rest_driver"}
|
OCTAVIA_AMPHORA_DRIVER=${OCTAVIA_AMPHORA_DRIVER:-"amphora_haproxy_rest_driver"}
|
||||||
OCTAVIA_NETWORK_DRIVER=${OCTAVIA_NETWORK_DRIVER:-"allowed_address_pairs_driver"}
|
OCTAVIA_NETWORK_DRIVER=${OCTAVIA_NETWORK_DRIVER:-"allowed_address_pairs_driver"}
|
||||||
OCTAVIA_COMPUTE_DRIVER=${OCTAVIA_COMPUTE_DRIVER:-"compute_nova_driver"}
|
OCTAVIA_COMPUTE_DRIVER=${OCTAVIA_COMPUTE_DRIVER:-"compute_nova_driver"}
|
||||||
|
OCTAVIA_VOLUME_DRIVER=${OCTAVIA_VOLUME_DRIVER:-"volume_noop_driver"}
|
||||||
|
|
||||||
OCTAVIA_USERNAME=${OCTAVIA_ADMIN_USER:-"admin"}
|
OCTAVIA_USERNAME=${OCTAVIA_ADMIN_USER:-"admin"}
|
||||||
OCTAVIA_PASSWORD=${OCTAVIA_PASSWORD:-${ADMIN_PASSWORD}}
|
OCTAVIA_PASSWORD=${OCTAVIA_PASSWORD:-${ADMIN_PASSWORD}}
|
||||||
|
@ -243,6 +243,10 @@
|
|||||||
# allowed_address_pairs_driver
|
# allowed_address_pairs_driver
|
||||||
#
|
#
|
||||||
# network_driver = network_noop_driver
|
# network_driver = network_noop_driver
|
||||||
|
# Volume driver options are volume_noop_driver
|
||||||
|
# volume_cinder_driver
|
||||||
|
#
|
||||||
|
# volume_driver = volume_noop_driver
|
||||||
#
|
#
|
||||||
# Distributor driver options are distributor_noop_driver
|
# Distributor driver options are distributor_noop_driver
|
||||||
# single_VIP_amphora
|
# single_VIP_amphora
|
||||||
@ -421,6 +425,44 @@
|
|||||||
# Nova supports: anti-affinity and soft-anti-affinity
|
# Nova supports: anti-affinity and soft-anti-affinity
|
||||||
# anti_affinity_policy = anti-affinity
|
# anti_affinity_policy = anti-affinity
|
||||||
|
|
||||||
|
[cinder]
|
||||||
|
# The name of the cinder service in the keystone catalog
|
||||||
|
# service_name =
|
||||||
|
# Custom cinder endpoint if override is necessary
|
||||||
|
# endpoint =
|
||||||
|
|
||||||
|
# Region in Identity service catalog to use for communication with the
|
||||||
|
# OpenStack services.
|
||||||
|
# region_name =
|
||||||
|
|
||||||
|
# Endpoint type in Identity service catalog to use for communication with
|
||||||
|
# the OpenStack services.
|
||||||
|
# endpoint_type = publicURL
|
||||||
|
|
||||||
|
# Availability zone to use for creating Volume
|
||||||
|
# availability_zone =
|
||||||
|
|
||||||
|
# CA certificates file to verify cinder connections when TLS is enabled
|
||||||
|
# insecure = False
|
||||||
|
# ca_certificates_file =
|
||||||
|
|
||||||
|
# Size of root volume in GB for Amphora Instance when use Cinder
|
||||||
|
# In some storage backends such as ScaleIO, the size of volume is multiple of 8
|
||||||
|
# volume_size = 16
|
||||||
|
|
||||||
|
# Volume type to be used for Amphora Instance root disk
|
||||||
|
# If not specified, default_volume_type from cinder.conf will be used
|
||||||
|
# volume_type =
|
||||||
|
|
||||||
|
# Interval time to wait until volume becomes available
|
||||||
|
# volume_create_retry_interval = 5
|
||||||
|
|
||||||
|
# Timeout to wait for volume creation success
|
||||||
|
# volume_create_timeout = 300
|
||||||
|
|
||||||
|
# Maximum number of retries to create volume
|
||||||
|
# volume_create_max_retries = 5
|
||||||
|
|
||||||
[glance]
|
[glance]
|
||||||
# The name of the glance service in the keystone catalog
|
# The name of the glance service in the keystone catalog
|
||||||
# service_name =
|
# service_name =
|
||||||
|
@ -172,3 +172,4 @@ WebTest==2.0.29
|
|||||||
Werkzeug==0.14.1
|
Werkzeug==0.14.1
|
||||||
wrapt==1.10.11
|
wrapt==1.10.11
|
||||||
WSME==0.8.0
|
WSME==0.8.0
|
||||||
|
python-cinderclient==3.3.0
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
from cinderclient import client as cinder_client
|
||||||
from glanceclient import client as glance_client
|
from glanceclient import client as glance_client
|
||||||
from neutronclient.neutron import client as neutron_client
|
from neutronclient.neutron import client as neutron_client
|
||||||
from novaclient import api_versions
|
from novaclient import api_versions
|
||||||
@ -26,6 +27,7 @@ CONF = cfg.CONF
|
|||||||
GLANCE_VERSION = '2'
|
GLANCE_VERSION = '2'
|
||||||
NEUTRON_VERSION = '2.0'
|
NEUTRON_VERSION = '2.0'
|
||||||
NOVA_VERSION = '2.15'
|
NOVA_VERSION = '2.15'
|
||||||
|
CINDER_VERSION = '3'
|
||||||
|
|
||||||
|
|
||||||
class NovaAuth(object):
|
class NovaAuth(object):
|
||||||
@ -143,3 +145,43 @@ class GlanceAuth(object):
|
|||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
LOG.exception("Error creating Glance client.")
|
LOG.exception("Error creating Glance client.")
|
||||||
return cls.glance_client
|
return cls.glance_client
|
||||||
|
|
||||||
|
|
||||||
|
class CinderAuth(object):
|
||||||
|
cinder_client = None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def get_cinder_client(cls, region, service_name=None, endpoint=None,
|
||||||
|
endpoint_type='publicURL', insecure=False,
|
||||||
|
cacert=None):
|
||||||
|
"""Create cinder client object.
|
||||||
|
|
||||||
|
:param region: The region of the service
|
||||||
|
:param service_name: The name of the cinder service in the catalog
|
||||||
|
:param endpoint: The endpoint of the service
|
||||||
|
:param endpoint_type: The endpoint type of the service
|
||||||
|
:param insecure: Turn off certificate validation
|
||||||
|
:param cacert: CA Cert file path
|
||||||
|
:return: a Cinder Client object
|
||||||
|
:raise Exception: if the client cannot be created
|
||||||
|
"""
|
||||||
|
ksession = keystone.KeystoneSession()
|
||||||
|
if not cls.cinder_client:
|
||||||
|
kwargs = {'region_name': region,
|
||||||
|
'session': ksession.get_session(),
|
||||||
|
'interface': endpoint_type}
|
||||||
|
if service_name:
|
||||||
|
kwargs['service_name'] = service_name
|
||||||
|
if endpoint:
|
||||||
|
kwargs['endpoint'] = endpoint
|
||||||
|
if endpoint.startwith("https"):
|
||||||
|
kwargs['insecure'] = insecure
|
||||||
|
kwargs['cacert'] = cacert
|
||||||
|
try:
|
||||||
|
cls.cinder_client = cinder_client.Client(
|
||||||
|
CINDER_VERSION, **kwargs
|
||||||
|
)
|
||||||
|
except Exception:
|
||||||
|
with excutils.save_and_reraise_exception():
|
||||||
|
LOG.exception("Error creating Cinder client.")
|
||||||
|
return cls.cinder_client
|
||||||
|
@ -410,6 +410,10 @@ controller_worker_opts = [
|
|||||||
cfg.StrOpt('network_driver',
|
cfg.StrOpt('network_driver',
|
||||||
default='network_noop_driver',
|
default='network_noop_driver',
|
||||||
help=_('Name of the network driver to use')),
|
help=_('Name of the network driver to use')),
|
||||||
|
cfg.StrOpt('volume_driver',
|
||||||
|
default=constants.VOLUME_NOOP_DRIVER,
|
||||||
|
choices=constants.SUPPORTED_VOLUME_DRIVERS,
|
||||||
|
help=_('Name of the volume driver to use')),
|
||||||
cfg.StrOpt('distributor_driver',
|
cfg.StrOpt('distributor_driver',
|
||||||
default='distributor_noop_driver',
|
default='distributor_noop_driver',
|
||||||
help=_('Name of the distributor driver to use')),
|
help=_('Name of the distributor driver to use')),
|
||||||
@ -560,6 +564,38 @@ nova_opts = [
|
|||||||
cfg.StrOpt('availability_zone', default=None,
|
cfg.StrOpt('availability_zone', default=None,
|
||||||
help=_('Availability zone to use for creating Amphorae')),
|
help=_('Availability zone to use for creating Amphorae')),
|
||||||
]
|
]
|
||||||
|
|
||||||
|
cinder_opts = [
|
||||||
|
cfg.StrOpt('service_name',
|
||||||
|
help=_('The name of the cinder service in the keystone '
|
||||||
|
'catalog')),
|
||||||
|
cfg.StrOpt('endpoint', help=_('A new endpoint to override the endpoint '
|
||||||
|
'in the keystone catalog.')),
|
||||||
|
cfg.StrOpt('region_name',
|
||||||
|
help=_('Region in Identity service catalog to use for '
|
||||||
|
'communication with the OpenStack services.')),
|
||||||
|
cfg.StrOpt('endpoint_type', default='publicURL',
|
||||||
|
help=_('Endpoint interface in identity service to use')),
|
||||||
|
cfg.StrOpt('ca_certificates_file',
|
||||||
|
help=_('CA certificates file path')),
|
||||||
|
cfg.StrOpt('availability_zone', default=None,
|
||||||
|
help=_('Availability zone to use for creating Volume')),
|
||||||
|
cfg.BoolOpt('insecure',
|
||||||
|
default=False,
|
||||||
|
help=_('Disable certificate validation on SSL connections')),
|
||||||
|
cfg.IntOpt('volume_size', default=16,
|
||||||
|
help=_('Size of volume for Amphora instance')),
|
||||||
|
cfg.StrOpt('volume_type', default=None,
|
||||||
|
help=_('Type of volume for Amphorae volume root disk')),
|
||||||
|
cfg.IntOpt('volume_create_retry_interval', default=5,
|
||||||
|
help=_('Interval time to wait volume is created in available'
|
||||||
|
'state')),
|
||||||
|
cfg.IntOpt('volume_create_timeout', default=300,
|
||||||
|
help=_('Timeout to wait for volume creation success')),
|
||||||
|
cfg.IntOpt('volume_create_max_retries', default=5,
|
||||||
|
help=_('Maximum number of retries to create volume'))
|
||||||
|
]
|
||||||
|
|
||||||
neutron_opts = [
|
neutron_opts = [
|
||||||
cfg.StrOpt('service_name',
|
cfg.StrOpt('service_name',
|
||||||
help=_('The name of the neutron service in the '
|
help=_('The name of the neutron service in the '
|
||||||
@ -685,6 +721,7 @@ cfg.CONF.register_cli_opts(core_cli_opts)
|
|||||||
cfg.CONF.register_opts(certificate_opts, group='certificates')
|
cfg.CONF.register_opts(certificate_opts, group='certificates')
|
||||||
cfg.CONF.register_cli_opts(healthmanager_opts, group='health_manager')
|
cfg.CONF.register_cli_opts(healthmanager_opts, group='health_manager')
|
||||||
cfg.CONF.register_opts(nova_opts, group='nova')
|
cfg.CONF.register_opts(nova_opts, group='nova')
|
||||||
|
cfg.CONF.register_opts(cinder_opts, group='cinder')
|
||||||
cfg.CONF.register_opts(glance_opts, group='glance')
|
cfg.CONF.register_opts(glance_opts, group='glance')
|
||||||
cfg.CONF.register_opts(neutron_opts, group='neutron')
|
cfg.CONF.register_opts(neutron_opts, group='neutron')
|
||||||
cfg.CONF.register_opts(quota_opts, group='quotas')
|
cfg.CONF.register_opts(quota_opts, group='quotas')
|
||||||
|
@ -687,3 +687,13 @@ L4_PROTOCOL_MAP = {
|
|||||||
PROTOCOL_PROXY: PROTOCOL_TCP,
|
PROTOCOL_PROXY: PROTOCOL_TCP,
|
||||||
PROTOCOL_UDP: PROTOCOL_UDP,
|
PROTOCOL_UDP: PROTOCOL_UDP,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Volume drivers
|
||||||
|
VOLUME_NOOP_DRIVER = 'volume_noop_driver'
|
||||||
|
SUPPORTED_VOLUME_DRIVERS = [VOLUME_NOOP_DRIVER,
|
||||||
|
'volume_cinder_driver']
|
||||||
|
|
||||||
|
# Cinder volume driver constants
|
||||||
|
CINDER_STATUS_AVAILABLE = 'available'
|
||||||
|
CINDER_STATUS_ERROR = 'error'
|
||||||
|
CINDER_ACTION_CREATE_VOLUME = 'create volume'
|
||||||
|
@ -383,3 +383,11 @@ class ObjectInUse(APIException):
|
|||||||
class ProviderFlavorMismatchError(APIException):
|
class ProviderFlavorMismatchError(APIException):
|
||||||
msg = _("Flavor '%(flav)s' is not compatible with provider '%(prov)s'")
|
msg = _("Flavor '%(flav)s' is not compatible with provider '%(prov)s'")
|
||||||
code = 400
|
code = 400
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeDeleteException(OctaviaException):
|
||||||
|
message = _('Failed to delete volume instance.')
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeGetException(OctaviaException):
|
||||||
|
message = _('Failed to retrieve volume instance.')
|
||||||
|
@ -18,6 +18,7 @@ import string
|
|||||||
from novaclient import exceptions as nova_exceptions
|
from novaclient import exceptions as nova_exceptions
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
from stevedore import driver as stevedore_driver
|
||||||
|
|
||||||
from octavia.common import clients
|
from octavia.common import clients
|
||||||
from octavia.common import constants
|
from octavia.common import constants
|
||||||
@ -88,6 +89,11 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
|||||||
self.manager = self._nova_client.servers
|
self.manager = self._nova_client.servers
|
||||||
self.server_groups = self._nova_client.server_groups
|
self.server_groups = self._nova_client.server_groups
|
||||||
self.flavor_manager = self._nova_client.flavors
|
self.flavor_manager = self._nova_client.flavors
|
||||||
|
self.volume_driver = stevedore_driver.DriverManager(
|
||||||
|
namespace='octavia.volume.drivers',
|
||||||
|
name=CONF.controller_worker.volume_driver,
|
||||||
|
invoke_on_load=True
|
||||||
|
).driver
|
||||||
|
|
||||||
def build(self, name="amphora_name", amphora_flavor=None,
|
def build(self, name="amphora_name", amphora_flavor=None,
|
||||||
image_id=None, image_tag=None, image_owner=None,
|
image_id=None, image_tag=None, image_owner=None,
|
||||||
@ -122,6 +128,7 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
|||||||
|
|
||||||
'''
|
'''
|
||||||
|
|
||||||
|
volume_id = None
|
||||||
try:
|
try:
|
||||||
network_ids = network_ids or []
|
network_ids = network_ids or []
|
||||||
port_ids = port_ids or []
|
port_ids = port_ids or []
|
||||||
@ -143,9 +150,25 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
|||||||
[r.choice(string.ascii_uppercase + string.digits)
|
[r.choice(string.ascii_uppercase + string.digits)
|
||||||
for i in range(CONF.nova.random_amphora_name_length - 1)]
|
for i in range(CONF.nova.random_amphora_name_length - 1)]
|
||||||
))
|
))
|
||||||
|
block_device_mapping = {}
|
||||||
|
if CONF.controller_worker.volume_driver != \
|
||||||
|
constants.VOLUME_NOOP_DRIVER:
|
||||||
|
# creating volume
|
||||||
|
LOG.debug('Creating volume for amphora from image %s',
|
||||||
|
image_id)
|
||||||
|
volume_id = self.volume_driver.create_volume_from_image(
|
||||||
|
image_id)
|
||||||
|
LOG.debug('Created boot volume %s for amphora', volume_id)
|
||||||
|
# If use volume based, does not require image ID anymore
|
||||||
|
image_id = None
|
||||||
|
# Boot from volume with parameters: target device name = vda,
|
||||||
|
# device id = volume_id, device type and size unspecified,
|
||||||
|
# delete-on-terminate = true (volume will be deleted by Nova
|
||||||
|
# on instance termination)
|
||||||
|
block_device_mapping = {'vda': '%s:::true' % volume_id}
|
||||||
amphora = self.manager.create(
|
amphora = self.manager.create(
|
||||||
name=name, image=image_id, flavor=amphora_flavor,
|
name=name, image=image_id, flavor=amphora_flavor,
|
||||||
|
block_device_mapping=block_device_mapping,
|
||||||
key_name=key_name, security_groups=sec_groups,
|
key_name=key_name, security_groups=sec_groups,
|
||||||
nics=nics,
|
nics=nics,
|
||||||
files=config_drive_files,
|
files=config_drive_files,
|
||||||
@ -157,6 +180,9 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
|||||||
|
|
||||||
return amphora.id
|
return amphora.id
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
|
if CONF.controller_worker.volume_driver != \
|
||||||
|
constants.VOLUME_NOOP_DRIVER:
|
||||||
|
self.volume_driver.delete_volume(volume_id)
|
||||||
LOG.exception("Nova failed to build the instance due to: %s", e)
|
LOG.exception("Nova failed to build the instance due to: %s", e)
|
||||||
raise exceptions.ComputeBuildException(fault=e)
|
raise exceptions.ComputeBuildException(fault=e)
|
||||||
|
|
||||||
@ -216,6 +242,7 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
|||||||
|
|
||||||
lb_network_ip = None
|
lb_network_ip = None
|
||||||
availability_zone = None
|
availability_zone = None
|
||||||
|
image_id = None
|
||||||
fault = None
|
fault = None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -242,12 +269,34 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
|||||||
'os-interfaces extension failed.')
|
'os-interfaces extension failed.')
|
||||||
|
|
||||||
fault = getattr(nova_response, 'fault', None)
|
fault = getattr(nova_response, 'fault', None)
|
||||||
|
if CONF.controller_worker.volume_driver == \
|
||||||
|
constants.VOLUME_NOOP_DRIVER:
|
||||||
|
image_id = nova_response.image.get("id")
|
||||||
|
else:
|
||||||
|
try:
|
||||||
|
volumes = self._nova_client.volumes.get_server_volumes(
|
||||||
|
nova_response.id)
|
||||||
|
except Exception:
|
||||||
|
LOG.debug('Extracting volumes through nova '
|
||||||
|
'os-volumes extension failed.')
|
||||||
|
volumes = []
|
||||||
|
if not volumes:
|
||||||
|
LOG.warning('Boot volume not found for volume backed '
|
||||||
|
'amphora instance %s ', nova_response.id)
|
||||||
|
else:
|
||||||
|
if len(volumes) > 1:
|
||||||
|
LOG.warning('Found more than one (%s) volumes '
|
||||||
|
'for amphora instance %s',
|
||||||
|
len(volumes), nova_response.id)
|
||||||
|
volume_id = volumes[0].volumeId
|
||||||
|
image_id = self.volume_driver.get_image_from_volume(volume_id)
|
||||||
|
|
||||||
response = models.Amphora(
|
response = models.Amphora(
|
||||||
compute_id=nova_response.id,
|
compute_id=nova_response.id,
|
||||||
status=nova_response.status,
|
status=nova_response.status,
|
||||||
lb_network_ip=lb_network_ip,
|
lb_network_ip=lb_network_ip,
|
||||||
cached_zone=availability_zone,
|
cached_zone=availability_zone,
|
||||||
image_id=nova_response.image.get("id"),
|
image_id=image_id,
|
||||||
compute_flavor=nova_response.flavor.get("id")
|
compute_flavor=nova_response.flavor.get("id")
|
||||||
)
|
)
|
||||||
return response, fault
|
return response, fault
|
||||||
|
@ -10,6 +10,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
import cinderclient.v3
|
||||||
import glanceclient.v2
|
import glanceclient.v2
|
||||||
import mock
|
import mock
|
||||||
import neutronclient.v2_0
|
import neutronclient.v2_0
|
||||||
@ -135,3 +136,41 @@ class TestGlanceAuth(base.TestCase):
|
|||||||
region="test-region", service_name="glanceEndpoint1",
|
region="test-region", service_name="glanceEndpoint1",
|
||||||
endpoint="test-endpoint", endpoint_type='publicURL', insecure=True)
|
endpoint="test-endpoint", endpoint_type='publicURL', insecure=True)
|
||||||
self.assertIs(bc1, bc2)
|
self.assertIs(bc1, bc2)
|
||||||
|
|
||||||
|
|
||||||
|
class TestCinderAuth(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
# Reset the session and client
|
||||||
|
clients.CinderAuth.cinder_client = None
|
||||||
|
keystone._SESSION = None
|
||||||
|
|
||||||
|
super(TestCinderAuth, self).setUp()
|
||||||
|
|
||||||
|
@mock.patch('keystoneauth1.session.Session', mock.Mock())
|
||||||
|
def test_get_cinder_client(self):
|
||||||
|
# There should be no existing client
|
||||||
|
self.assertIsNone(
|
||||||
|
clients.CinderAuth.cinder_client
|
||||||
|
)
|
||||||
|
|
||||||
|
# Mock out the keystone session and get the client
|
||||||
|
keystone._SESSION = mock.MagicMock()
|
||||||
|
bc1 = clients.CinderAuth.get_cinder_client(
|
||||||
|
region=None, endpoint_type='publicURL', insecure=True)
|
||||||
|
|
||||||
|
# Our returned client should also be the saved client
|
||||||
|
self.assertIsInstance(
|
||||||
|
clients.CinderAuth.cinder_client,
|
||||||
|
cinderclient.v3.client.Client
|
||||||
|
)
|
||||||
|
self.assertIs(
|
||||||
|
clients.CinderAuth.cinder_client,
|
||||||
|
bc1
|
||||||
|
)
|
||||||
|
|
||||||
|
# Getting the session again should return the same object
|
||||||
|
bc2 = clients.CinderAuth.get_cinder_client(
|
||||||
|
region="test-region", service_name="cinderEndpoint1",
|
||||||
|
endpoint="test-endpoint", endpoint_type='publicURL', insecure=True)
|
||||||
|
self.assertIs(bc1, bc2)
|
||||||
|
@ -96,12 +96,13 @@ class TestNovaClient(base.TestCase):
|
|||||||
conf.config(group="controller_worker",
|
conf.config(group="controller_worker",
|
||||||
amp_boot_network_list=['1', '2'])
|
amp_boot_network_list=['1', '2'])
|
||||||
self.conf = conf
|
self.conf = conf
|
||||||
|
self.fake_image_uuid = uuidutils.generate_uuid()
|
||||||
|
|
||||||
self.amphora = models.Amphora(
|
self.amphora = models.Amphora(
|
||||||
compute_id=uuidutils.generate_uuid(),
|
compute_id=uuidutils.generate_uuid(),
|
||||||
status='ACTIVE',
|
status='ACTIVE',
|
||||||
lb_network_ip='10.0.0.1',
|
lb_network_ip='10.0.0.1',
|
||||||
image_id=uuidutils.generate_uuid(),
|
image_id=self.fake_image_uuid,
|
||||||
compute_flavor=uuidutils.generate_uuid()
|
compute_flavor=uuidutils.generate_uuid()
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -148,6 +149,9 @@ class TestNovaClient(base.TestCase):
|
|||||||
self.server_group_mock.policy = self.server_group_policy
|
self.server_group_mock.policy = self.server_group_policy
|
||||||
self.server_group_mock.id = self.server_group_id
|
self.server_group_mock.id = self.server_group_id
|
||||||
|
|
||||||
|
self.volume_mock = mock.MagicMock()
|
||||||
|
setattr(self.volume_mock, 'volumeId', '1')
|
||||||
|
|
||||||
self.port_id = uuidutils.generate_uuid()
|
self.port_id = uuidutils.generate_uuid()
|
||||||
self.compute_id = uuidutils.generate_uuid()
|
self.compute_id = uuidutils.generate_uuid()
|
||||||
self.network_id = uuidutils.generate_uuid()
|
self.network_id = uuidutils.generate_uuid()
|
||||||
@ -177,7 +181,39 @@ class TestNovaClient(base.TestCase):
|
|||||||
userdata='Blah',
|
userdata='Blah',
|
||||||
config_drive=True,
|
config_drive=True,
|
||||||
scheduler_hints=None,
|
scheduler_hints=None,
|
||||||
availability_zone=None
|
availability_zone=None,
|
||||||
|
block_device_mapping={}
|
||||||
|
)
|
||||||
|
|
||||||
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
|
def test_build_with_cinder_volume(self, mock_driver):
|
||||||
|
self.conf.config(group="controller_worker",
|
||||||
|
volume_driver='volume_cinder_driver')
|
||||||
|
self.manager.volume_driver = mock_driver
|
||||||
|
mock_driver.create_volume_from_image.return_value = 1
|
||||||
|
amphora_id = self.manager.build(amphora_flavor=1, image_id=1,
|
||||||
|
key_name=1,
|
||||||
|
sec_groups=1,
|
||||||
|
network_ids=[1],
|
||||||
|
port_ids=[2],
|
||||||
|
user_data='Blah',
|
||||||
|
config_drive_files='Files Blah')
|
||||||
|
|
||||||
|
self.assertEqual(self.amphora.compute_id, amphora_id)
|
||||||
|
mock_driver.create_volume_from_image.assert_called_with(1)
|
||||||
|
self.manager.manager.create.assert_called_with(
|
||||||
|
name="amphora_name",
|
||||||
|
nics=[{'net-id': 1}, {'port-id': 2}],
|
||||||
|
image=None,
|
||||||
|
flavor=1,
|
||||||
|
key_name=1,
|
||||||
|
security_groups=1,
|
||||||
|
files='Files Blah',
|
||||||
|
userdata='Blah',
|
||||||
|
config_drive=True,
|
||||||
|
scheduler_hints=None,
|
||||||
|
availability_zone=None,
|
||||||
|
block_device_mapping={'vda': '1:::true'}
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_build_with_availability_zone(self):
|
def test_build_with_availability_zone(self):
|
||||||
@ -205,7 +241,8 @@ class TestNovaClient(base.TestCase):
|
|||||||
userdata='Blah',
|
userdata='Blah',
|
||||||
config_drive=True,
|
config_drive=True,
|
||||||
scheduler_hints=None,
|
scheduler_hints=None,
|
||||||
availability_zone=FAKE_AZ
|
availability_zone=FAKE_AZ,
|
||||||
|
block_device_mapping={}
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_build_with_random_amphora_name_length(self):
|
def test_build_with_random_amphora_name_length(self):
|
||||||
@ -241,7 +278,8 @@ class TestNovaClient(base.TestCase):
|
|||||||
userdata='Blah',
|
userdata='Blah',
|
||||||
config_drive=True,
|
config_drive=True,
|
||||||
scheduler_hints=None,
|
scheduler_hints=None,
|
||||||
availability_zone=None
|
availability_zone=None,
|
||||||
|
block_device_mapping={}
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_bad_build(self):
|
def test_bad_build(self):
|
||||||
@ -312,6 +350,22 @@ class TestNovaClient(base.TestCase):
|
|||||||
self.assertIsNone(amphora.lb_network_ip)
|
self.assertIsNone(amphora.lb_network_ip)
|
||||||
self.nova_response.interface_list.called_with()
|
self.nova_response.interface_list.called_with()
|
||||||
|
|
||||||
|
@mock.patch('stevedore.driver.DriverManager.driver')
|
||||||
|
def test_translate_amphora_use_cinder(self, mock_driver):
|
||||||
|
self.conf.config(group="controller_worker",
|
||||||
|
volume_driver='volume_cinder_driver')
|
||||||
|
volumes_manager = self.manager._nova_client.volumes
|
||||||
|
volumes_manager.get_server_volumes.return_value = [self.volume_mock]
|
||||||
|
self.manager.volume_driver = mock_driver
|
||||||
|
mock_driver.get_image_from_volume.return_value = self.fake_image_uuid
|
||||||
|
amphora, fault = self.manager._translate_amphora(self.nova_response)
|
||||||
|
self.assertEqual(self.amphora, amphora)
|
||||||
|
self.assertEqual(self.nova_response.fault, fault)
|
||||||
|
self.nova_response.interface_list.called_with()
|
||||||
|
volumes_manager.get_server_volumes.assert_called_with(
|
||||||
|
self.nova_response.id)
|
||||||
|
mock_driver.get_image_from_volume.assert_called_with('1')
|
||||||
|
|
||||||
def test_create_server_group(self):
|
def test_create_server_group(self):
|
||||||
self.manager.server_groups.create.return_value = self.server_group_mock
|
self.manager.server_groups.create.return_value = self.server_group_mock
|
||||||
|
|
||||||
|
0
octavia/tests/unit/volume/__init__.py
Normal file
0
octavia/tests/unit/volume/__init__.py
Normal file
0
octavia/tests/unit/volume/drivers/__init__.py
Normal file
0
octavia/tests/unit/volume/drivers/__init__.py
Normal file
99
octavia/tests/unit/volume/drivers/test_cinder_driver.py
Normal file
99
octavia/tests/unit/volume/drivers/test_cinder_driver.py
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from cinderclient import exceptions as cinder_exceptions
|
||||||
|
import mock
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_config import fixture as oslo_fixture
|
||||||
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
|
from octavia.common import exceptions
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
import octavia.volume.drivers.cinder_driver as cinder_common
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
class TestCinderClient(base.TestCase):
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
fake_uuid1 = uuidutils.generate_uuid()
|
||||||
|
fake_uuid2 = uuidutils.generate_uuid()
|
||||||
|
fake_uuid3 = uuidutils.generate_uuid()
|
||||||
|
|
||||||
|
conf = self.useFixture(oslo_fixture.Config(cfg.CONF))
|
||||||
|
self.conf = conf
|
||||||
|
|
||||||
|
self.manager = cinder_common.VolumeManager()
|
||||||
|
self.manager.manager = mock.MagicMock()
|
||||||
|
|
||||||
|
self.cinder_response = mock.Mock()
|
||||||
|
self.cinder_response.id = fake_uuid1
|
||||||
|
|
||||||
|
self.manager.manager.get.return_value.status = 'available'
|
||||||
|
self.manager.manager.create.return_value = self.cinder_response
|
||||||
|
self.image_id = fake_uuid2
|
||||||
|
self.volume_id = fake_uuid3
|
||||||
|
|
||||||
|
super(TestCinderClient, self).setUp()
|
||||||
|
|
||||||
|
def test_create_volume_from_image(self):
|
||||||
|
self.conf.config(group="controller_worker",
|
||||||
|
volume_driver='volume_cinder_driver')
|
||||||
|
self.conf.config(group="cinder", volume_create_retry_interval=0)
|
||||||
|
self.manager.create_volume_from_image(self.image_id)
|
||||||
|
self.manager.manager.create.assert_called_with(
|
||||||
|
size=16,
|
||||||
|
volume_type=None,
|
||||||
|
availability_zone=None,
|
||||||
|
imageRef=self.image_id)
|
||||||
|
|
||||||
|
def test_create_volume_from_image_error(self):
|
||||||
|
self.conf.config(group="controller_worker",
|
||||||
|
volume_driver='volume_cinder_driver')
|
||||||
|
self.conf.config(group="cinder", volume_create_retry_interval=0)
|
||||||
|
self.manager.manager.get.return_value.status = 'error'
|
||||||
|
self.assertRaises(cinder_exceptions.ResourceInErrorState,
|
||||||
|
self.manager.create_volume_from_image,
|
||||||
|
self.image_id)
|
||||||
|
|
||||||
|
def test_build_cinder_volume_timeout(self):
|
||||||
|
self.conf.config(group="controller_worker",
|
||||||
|
volume_driver='volume_cinder_driver')
|
||||||
|
self.conf.config(group="cinder", volume_create_timeout=0)
|
||||||
|
self.conf.config(group="cinder", volume_create_retry_interval=0)
|
||||||
|
self.manager.manager.get.return_value.status = 'build'
|
||||||
|
self.manager.create_volume_from_image.retry.sleep = mock.Mock()
|
||||||
|
self.assertRaises(cinder_exceptions.TimeoutException,
|
||||||
|
self.manager.create_volume_from_image,
|
||||||
|
self.image_id)
|
||||||
|
|
||||||
|
def test_get_image_from_volume(self):
|
||||||
|
self.conf.config(group="controller_worker",
|
||||||
|
volume_driver='volume_cinder_driver')
|
||||||
|
self.conf.config(group="cinder",
|
||||||
|
volume_create_retry_interval=0)
|
||||||
|
self.manager.get_image_from_volume(self.volume_id)
|
||||||
|
self.manager.manager.get.assert_called_with(
|
||||||
|
self.volume_id)
|
||||||
|
|
||||||
|
def test_get_image_from_volume_error(self):
|
||||||
|
self.conf.config(group="controller_worker",
|
||||||
|
volume_driver='volume_cinder_driver')
|
||||||
|
self.conf.config(group="cinder",
|
||||||
|
volume_create_retry_interval=0)
|
||||||
|
self.manager.manager.get.side_effect = [
|
||||||
|
exceptions.VolumeGetException('test_exception')]
|
||||||
|
self.assertRaises(exceptions.VolumeGetException,
|
||||||
|
self.manager.get_image_from_volume,
|
||||||
|
self.volume_id)
|
46
octavia/tests/unit/volume/drivers/test_volume_noop_driver.py
Normal file
46
octavia/tests/unit/volume/drivers/test_volume_noop_driver.py
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
|
import octavia.tests.unit.base as base
|
||||||
|
from octavia.volume.drivers.noop_driver import driver
|
||||||
|
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
class TestNoopVolumeDriver(base.TestCase):
|
||||||
|
FAKE_UUID_1 = uuidutils.generate_uuid()
|
||||||
|
FAKE_UUID_2 = uuidutils.generate_uuid()
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(TestNoopVolumeDriver, self).setUp()
|
||||||
|
self.driver = driver.NoopVolumeDriver()
|
||||||
|
|
||||||
|
self.image_id = self.FAKE_UUID_1
|
||||||
|
self.volume_id = self.FAKE_UUID_2
|
||||||
|
|
||||||
|
def test_create_volume_from_image(self):
|
||||||
|
self.driver.create_volume_from_image(self.image_id)
|
||||||
|
self.assertEqual((self.image_id, 'create_volume_from_image'),
|
||||||
|
self.driver.driver.volumeconfig[(
|
||||||
|
self.image_id
|
||||||
|
)])
|
||||||
|
|
||||||
|
def test_get_image_from_volume(self):
|
||||||
|
self.driver.get_image_from_volume(self.volume_id)
|
||||||
|
self.assertEqual((self.volume_id, 'get_image_from_volume'),
|
||||||
|
self.driver.driver.volumeconfig[(
|
||||||
|
self.volume_id
|
||||||
|
)])
|
0
octavia/volume/__init__.py
Normal file
0
octavia/volume/__init__.py
Normal file
0
octavia/volume/drivers/__init__.py
Normal file
0
octavia/volume/drivers/__init__.py
Normal file
123
octavia/volume/drivers/cinder_driver.py
Normal file
123
octavia/volume/drivers/cinder_driver.py
Normal file
@ -0,0 +1,123 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import time
|
||||||
|
|
||||||
|
from cinderclient import exceptions as cinder_exceptions
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
from tenacity import retry
|
||||||
|
from tenacity import stop_after_attempt
|
||||||
|
|
||||||
|
from octavia.common import clients
|
||||||
|
from octavia.common import constants
|
||||||
|
from octavia.common import exceptions
|
||||||
|
from octavia.volume import volume_base
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
|
||||||
|
class VolumeManager(volume_base.VolumeBase):
|
||||||
|
'''Volume implementation of virtual machines via cinder.'''
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super(VolumeManager, self).__init__()
|
||||||
|
# Must initialize cinder api
|
||||||
|
self._cinder_client = clients.CinderAuth.get_cinder_client(
|
||||||
|
service_name=CONF.cinder.service_name,
|
||||||
|
endpoint=CONF.cinder.endpoint,
|
||||||
|
region=CONF.cinder.region_name,
|
||||||
|
endpoint_type=CONF.cinder.endpoint_type,
|
||||||
|
insecure=CONF.cinder.insecure,
|
||||||
|
cacert=CONF.cinder.ca_certificates_file
|
||||||
|
)
|
||||||
|
self.manager = self._cinder_client.volumes
|
||||||
|
|
||||||
|
@retry(reraise=True,
|
||||||
|
stop=stop_after_attempt(CONF.cinder.volume_create_max_retries))
|
||||||
|
def create_volume_from_image(self, image_id):
|
||||||
|
"""Create cinder volume
|
||||||
|
|
||||||
|
:param image_id: ID of amphora image
|
||||||
|
|
||||||
|
:return volume id
|
||||||
|
"""
|
||||||
|
volume = self.manager.create(
|
||||||
|
size=CONF.cinder.volume_size,
|
||||||
|
volume_type=CONF.cinder.volume_type,
|
||||||
|
availability_zone=CONF.cinder.availability_zone,
|
||||||
|
imageRef=image_id)
|
||||||
|
resource_status = self.manager.get(volume.id).status
|
||||||
|
|
||||||
|
status = constants.CINDER_STATUS_AVAILABLE
|
||||||
|
start = int(time.time())
|
||||||
|
|
||||||
|
while resource_status != status:
|
||||||
|
time.sleep(CONF.cinder.volume_create_retry_interval)
|
||||||
|
instance_volume = self.manager.get(volume.id)
|
||||||
|
resource_status = instance_volume.status
|
||||||
|
if resource_status == constants.CINDER_STATUS_ERROR:
|
||||||
|
LOG.error('Error creating %s', instance_volume.id)
|
||||||
|
instance_volume.delete()
|
||||||
|
raise cinder_exceptions.ResourceInErrorState(
|
||||||
|
obj=volume, fault_msg='Cannot create volume')
|
||||||
|
if int(time.time()) - start >= CONF.cinder.volume_create_timeout:
|
||||||
|
LOG.error('Timed out waiting to create cinder volume %s',
|
||||||
|
instance_volume.id)
|
||||||
|
instance_volume.delete()
|
||||||
|
raise cinder_exceptions.TimeoutException(
|
||||||
|
obj=volume, action=constants.CINDER_ACTION_CREATE_VOLUME)
|
||||||
|
return volume.id
|
||||||
|
|
||||||
|
def delete_volume(self, volume_id):
|
||||||
|
"""Get glance image from volume
|
||||||
|
|
||||||
|
:param volume_id: ID of amphora boot volume
|
||||||
|
|
||||||
|
:return image id
|
||||||
|
"""
|
||||||
|
LOG.debug('Deleting cinder volume %s', volume_id)
|
||||||
|
try:
|
||||||
|
instance_volume = self.manager.get(volume_id)
|
||||||
|
try:
|
||||||
|
instance_volume.delete()
|
||||||
|
LOG.debug("Deleted volume %s", volume_id)
|
||||||
|
except Exception:
|
||||||
|
LOG.exception("Error deleting cinder volume %s",
|
||||||
|
volume_id)
|
||||||
|
raise exceptions.VolumeDeleteException()
|
||||||
|
except cinder_exceptions.NotFound:
|
||||||
|
LOG.warning("Volume %s not found: assuming already deleted",
|
||||||
|
volume_id)
|
||||||
|
|
||||||
|
def get_image_from_volume(self, volume_id):
|
||||||
|
"""Get glance image from volume
|
||||||
|
|
||||||
|
:param volume_id: ID of amphora boot volume
|
||||||
|
|
||||||
|
:return image id
|
||||||
|
"""
|
||||||
|
image_id = None
|
||||||
|
LOG.debug('Get glance image for volume %s', volume_id)
|
||||||
|
try:
|
||||||
|
instance_volume = self.manager.get(volume_id)
|
||||||
|
except cinder_exceptions.NotFound:
|
||||||
|
LOG.exception("Volume %s not found", volume_id)
|
||||||
|
raise exceptions.VolumeGetException()
|
||||||
|
if hasattr(instance_volume, 'volume_image_metadata'):
|
||||||
|
image_id = instance_volume.volume_image_metadata.get("image_id")
|
||||||
|
else:
|
||||||
|
LOG.error("Volume %s has no image metadata", volume_id)
|
||||||
|
image_id = None
|
||||||
|
return image_id
|
0
octavia/volume/drivers/noop_driver/__init__.py
Normal file
0
octavia/volume/drivers/noop_driver/__init__.py
Normal file
60
octavia/volume/drivers/noop_driver/driver.py
Normal file
60
octavia/volume/drivers/noop_driver/driver.py
Normal file
@ -0,0 +1,60 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from oslo_log import log as logging
|
||||||
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
|
from octavia.volume import volume_base as driver_base
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class NoopManager(object):
|
||||||
|
def __init__(self):
|
||||||
|
super(NoopManager, self).__init__()
|
||||||
|
self.volumeconfig = {}
|
||||||
|
|
||||||
|
def create_volume_from_image(self, image_id):
|
||||||
|
LOG.debug("Volume %s no-op, image id %s",
|
||||||
|
self.__class__.__name__, image_id)
|
||||||
|
self.volumeconfig[image_id] = (image_id, 'create_volume_from_image')
|
||||||
|
volume_id = uuidutils.generate_uuid()
|
||||||
|
return volume_id
|
||||||
|
|
||||||
|
def delete_volume(self, volume_id):
|
||||||
|
LOG.debug("Volume %s no-op, volume id %s",
|
||||||
|
self.__class__.__name__, volume_id)
|
||||||
|
self.volumeconfig[volume_id] = (volume_id, 'delete')
|
||||||
|
|
||||||
|
def get_image_from_volume(self, volume_id):
|
||||||
|
LOG.debug("Volume %s no-op, volume id %s",
|
||||||
|
self.__class__.__name__, volume_id)
|
||||||
|
self.volumeconfig[volume_id] = (volume_id, 'get_image_from_volume')
|
||||||
|
image_id = uuidutils.generate_uuid()
|
||||||
|
return image_id
|
||||||
|
|
||||||
|
|
||||||
|
class NoopVolumeDriver(driver_base.VolumeBase):
|
||||||
|
def __init__(self):
|
||||||
|
super(NoopVolumeDriver, self).__init__()
|
||||||
|
self.driver = NoopManager()
|
||||||
|
|
||||||
|
def create_volume_from_image(self, image_id):
|
||||||
|
volume_id = self.driver.create_volume_from_image(image_id)
|
||||||
|
return volume_id
|
||||||
|
|
||||||
|
def delete_volume(self, volume_id):
|
||||||
|
self.driver.delete_volume(volume_id)
|
||||||
|
|
||||||
|
def get_image_from_volume(self, volume_id):
|
||||||
|
image_id = self.driver.get_image_from_volume(volume_id)
|
||||||
|
return image_id
|
46
octavia/volume/volume_base.py
Normal file
46
octavia/volume/volume_base.py
Normal file
@ -0,0 +1,46 @@
|
|||||||
|
# Copyright 2011-2019 OpenStack Foundation
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import abc
|
||||||
|
|
||||||
|
import six
|
||||||
|
|
||||||
|
|
||||||
|
@six.add_metaclass(abc.ABCMeta)
|
||||||
|
class VolumeBase(object):
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def create_volume_from_image(self, image_id):
|
||||||
|
"""Create volume for instance
|
||||||
|
|
||||||
|
:param image_id: ID of amphora image
|
||||||
|
|
||||||
|
:return volume id
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def delete_volume(self, volume_id):
|
||||||
|
"""Delete volume
|
||||||
|
|
||||||
|
:param volume_id: ID of amphora volume
|
||||||
|
"""
|
||||||
|
|
||||||
|
@abc.abstractmethod
|
||||||
|
def get_image_from_volume(self, volume_id):
|
||||||
|
"""Get cinder volume
|
||||||
|
|
||||||
|
:param volume_id: ID of amphora volume
|
||||||
|
|
||||||
|
:return image id
|
||||||
|
"""
|
@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Allow creation of volume based amphora.
|
||||||
|
Many deploy production use volume based instances because of more flexibility.
|
||||||
|
Octavia will create volume and attach this to the amphora.
|
||||||
|
|
||||||
|
Have new settings:
|
||||||
|
* `volume_driver`: Whether to use volume driver (cinder) to create volume backed amphorae.
|
||||||
|
* `volume_size`: Size of root volume for Amphora Instance when using Cinder
|
||||||
|
* `volume_type` : Type of volume for Amphorae volume root disk
|
||||||
|
* `volume_create_retry_interval`: Interval time to wait volume is created in available state
|
||||||
|
* `volume_create_timeout`: Timeout When volume is not create success
|
||||||
|
* `volume_create_max_retries`: Maximum number of retries to create volume
|
@ -34,6 +34,7 @@ PyMySQL>=0.7.6 # MIT License
|
|||||||
python-barbicanclient>=4.5.2 # Apache-2.0
|
python-barbicanclient>=4.5.2 # Apache-2.0
|
||||||
python-glanceclient>=2.8.0 # Apache-2.0
|
python-glanceclient>=2.8.0 # Apache-2.0
|
||||||
python-novaclient>=9.1.0 # Apache-2.0
|
python-novaclient>=9.1.0 # Apache-2.0
|
||||||
|
python-cinderclient>=3.3.0 # Apache-2.0
|
||||||
pyOpenSSL>=17.1.0 # Apache-2.0
|
pyOpenSSL>=17.1.0 # Apache-2.0
|
||||||
WSME>=0.8.0 # MIT
|
WSME>=0.8.0 # MIT
|
||||||
Jinja2>=2.10 # BSD License (3 clause)
|
Jinja2>=2.10 # BSD License (3 clause)
|
||||||
|
@ -79,6 +79,9 @@ octavia.network.drivers =
|
|||||||
network_noop_driver = octavia.network.drivers.noop_driver.driver:NoopNetworkDriver
|
network_noop_driver = octavia.network.drivers.noop_driver.driver:NoopNetworkDriver
|
||||||
allowed_address_pairs_driver = octavia.network.drivers.neutron.allowed_address_pairs:AllowedAddressPairsDriver
|
allowed_address_pairs_driver = octavia.network.drivers.neutron.allowed_address_pairs:AllowedAddressPairsDriver
|
||||||
containers_driver = octavia.network.drivers.neutron.containers:ContainersDriver
|
containers_driver = octavia.network.drivers.neutron.containers:ContainersDriver
|
||||||
|
octavia.volume.drivers =
|
||||||
|
volume_noop_driver = octavia.volume.drivers.noop_driver.driver:NoopVolumeDriver
|
||||||
|
volume_cinder_driver = octavia.volume.drivers.cinder_driver:VolumeManager
|
||||||
octavia.distributor.drivers =
|
octavia.distributor.drivers =
|
||||||
distributor_noop_driver = octavia.distributor.drivers.noop_driver.driver:NoopDistributorDriver
|
distributor_noop_driver = octavia.distributor.drivers.noop_driver.driver:NoopDistributorDriver
|
||||||
single_VIP_amphora = octavia.distributor.drivers.single_VIP_amphora.driver:SingleVIPAmpDistributorDriver
|
single_VIP_amphora = octavia.distributor.drivers.single_VIP_amphora.driver:SingleVIPAmpDistributorDriver
|
||||||
|
Loading…
Reference in New Issue
Block a user