fix UT for cinderclient
- bump version from 2 to 3 for cinderclient - update aws ec2 interface for create_colume and create_network_interface. add cilent_token param - fix describe network interface - something was changed in neutron ports output - set metadata port for OVN conf file also Change-Id: Ie3e5a5930d5a8159050ecc0900239935558dddd7
This commit is contained in:
parent
2a5a97344d
commit
f9e75281b4
|
@ -21,4 +21,4 @@ dist
|
||||||
cover/
|
cover/
|
||||||
.idea
|
.idea
|
||||||
ec2api/tests/unit/test_cert.pem
|
ec2api/tests/unit/test_cert.pem
|
||||||
|
.DS_Store
|
||||||
|
|
|
@ -128,7 +128,8 @@ for Neutron
|
||||||
[DEFAULT]
|
[DEFAULT]
|
||||||
nova_metadata_port = 8789
|
nova_metadata_port = 8789
|
||||||
|
|
||||||
to /etc/neutron/metadata_agent.ini
|
to /etc/neutron/metadata_agent.ini for legacy neutron or
|
||||||
|
to neutron_ovn_metadata_agent.ini for OVN
|
||||||
|
|
||||||
then restart neutron-metadata service.
|
then restart neutron-metadata service.
|
||||||
|
|
||||||
|
|
|
@ -223,8 +223,9 @@ function configure_ec2api {
|
||||||
# metadata configuring
|
# metadata configuring
|
||||||
iniset $EC2API_CONF_FILE DEFAULT metadata_workers "$API_WORKERS"
|
iniset $EC2API_CONF_FILE DEFAULT metadata_workers "$API_WORKERS"
|
||||||
if [[ ,${ENABLED_SERVICES} =~ ,"q-" ]]; then
|
if [[ ,${ENABLED_SERVICES} =~ ,"q-" ]]; then
|
||||||
# with neutron
|
# with neutron (legacy and OVN)
|
||||||
iniset $Q_META_CONF_FILE DEFAULT nova_metadata_port 8789
|
iniset $Q_META_CONF_FILE DEFAULT nova_metadata_port 8789
|
||||||
|
iniset $OVN_META_CONF DEFAULT nova_metadata_port 8789
|
||||||
else
|
else
|
||||||
# with nova-network
|
# with nova-network
|
||||||
iniset $NOVA_CONF DEFAULT metadata_port 8789
|
iniset $NOVA_CONF DEFAULT metadata_port 8789
|
||||||
|
|
|
@ -9,7 +9,8 @@ To configure OpenStack for EC2 API metadata service for Neutron add:
|
||||||
[DEFAULT]
|
[DEFAULT]
|
||||||
nova_metadata_port = 8789
|
nova_metadata_port = 8789
|
||||||
|
|
||||||
to ``/etc/neutron/metadata_agent.ini``
|
to ``/etc/neutron/metadata_agent.ini`` for legacy neutron or
|
||||||
|
to ``neutron_ovn_metadata_agent.ini`` for OVN
|
||||||
|
|
||||||
then restart neutron-metadata service.
|
then restart neutron-metadata service.
|
||||||
|
|
||||||
|
|
|
@ -819,10 +819,10 @@ class CloudController(object):
|
||||||
|
|
||||||
@module_and_param_types(volume, 'str', 'int',
|
@module_and_param_types(volume, 'str', 'int',
|
||||||
'snap_id', 'str', 'int',
|
'snap_id', 'str', 'int',
|
||||||
'bool', 'str')
|
'bool', 'str', 'str')
|
||||||
def create_volume(self, context, availability_zone=None, size=None,
|
def create_volume(self, context, availability_zone=None, size=None,
|
||||||
snapshot_id=None, volume_type=None, iops=None,
|
snapshot_id=None, volume_type=None, iops=None,
|
||||||
encrypted=None, kms_key_id=None):
|
encrypted=None, kms_key_id=None, client_token=None):
|
||||||
"""Creates an EBS volume.
|
"""Creates an EBS volume.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -846,6 +846,8 @@ class CloudController(object):
|
||||||
kms_key_id (str): The full ARN of AWS KMS master key to use when
|
kms_key_id (str): The full ARN of AWS KMS master key to use when
|
||||||
creating the encrypted volume.
|
creating the encrypted volume.
|
||||||
Not used now.
|
Not used now.
|
||||||
|
client_token (str): Unique, case-sensitive identifier that you
|
||||||
|
provide to ensure the idempotency of the request.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
Information about the volume.
|
Information about the volume.
|
||||||
|
@ -1691,13 +1693,15 @@ class VpcCloudController(CloudController):
|
||||||
'dummy',
|
'dummy',
|
||||||
'int',
|
'int',
|
||||||
'str',
|
'str',
|
||||||
'sg_ids')
|
'sg_ids',
|
||||||
|
'str')
|
||||||
def create_network_interface(self, context, subnet_id,
|
def create_network_interface(self, context, subnet_id,
|
||||||
private_ip_address=None,
|
private_ip_address=None,
|
||||||
private_ip_addresses=None,
|
private_ip_addresses=None,
|
||||||
secondary_private_ip_address_count=None,
|
secondary_private_ip_address_count=None,
|
||||||
description=None,
|
description=None,
|
||||||
security_group_id=None):
|
security_group_id=None,
|
||||||
|
client_token=None):
|
||||||
"""Creates a network interface in the specified subnet.
|
"""Creates a network interface in the specified subnet.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
|
@ -1724,6 +1728,8 @@ class VpcCloudController(CloudController):
|
||||||
description (str): A description for the network interface.
|
description (str): A description for the network interface.
|
||||||
security_group_id (list of str): The list of security group IDs
|
security_group_id (list of str): The list of security group IDs
|
||||||
for the network interface.
|
for the network interface.
|
||||||
|
client_token (str): Unique, case-sensitive identifier that you
|
||||||
|
provide to ensure the idempotency of the request.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
The network interface that was created.
|
The network interface that was created.
|
||||||
|
|
|
@ -156,13 +156,13 @@ def create_image(context, instance_id, name=None, description=None,
|
||||||
image['os_id'] = os_image_id
|
image['os_id'] = os_image_id
|
||||||
db_api.update_item(context, image)
|
db_api.update_item(context, image)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.exception('Failed to complete image %s creation', image.id)
|
LOG.exception('Failed to complete image %s creation', image['id'])
|
||||||
try:
|
try:
|
||||||
image['state'] = 'failed'
|
image['state'] = 'failed'
|
||||||
db_api.update_item(context, image)
|
db_api.update_item(context, image)
|
||||||
except Exception:
|
except Exception:
|
||||||
LOG.warning("Couldn't set 'failed' state for db image %s",
|
LOG.warning("Couldn't set 'failed' state for db image %s",
|
||||||
image.id, exc_info=True)
|
image['id'], exc_info=True)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
os_instance.start()
|
os_instance.start()
|
||||||
|
|
|
@ -47,7 +47,22 @@ def create_network_interface(context, subnet_id,
|
||||||
private_ip_addresses=None,
|
private_ip_addresses=None,
|
||||||
secondary_private_ip_address_count=None,
|
secondary_private_ip_address_count=None,
|
||||||
description=None,
|
description=None,
|
||||||
security_group_id=None):
|
security_group_id=None,
|
||||||
|
client_token=None):
|
||||||
|
|
||||||
|
if client_token:
|
||||||
|
result = describe_network_interfaces(context,
|
||||||
|
filter=[{'name': 'client-token',
|
||||||
|
'value': [client_token]}])
|
||||||
|
if result['networkInterfaceSet']:
|
||||||
|
if len(result['networkInterfaceSet']) > 1:
|
||||||
|
LOG.error('describe_network_interfaces returns %s '
|
||||||
|
'network_interfaces, but 1 is expected.',
|
||||||
|
len(result['networkInterfaceSet']))
|
||||||
|
LOG.error('Requested client token: %s', client_token)
|
||||||
|
LOG.error('Result: %s', result)
|
||||||
|
return result['networkInterfaceSet'][0]
|
||||||
|
|
||||||
subnet = ec2utils.get_db_item(context, subnet_id)
|
subnet = ec2utils.get_db_item(context, subnet_id)
|
||||||
if subnet is None:
|
if subnet is None:
|
||||||
raise exception.InvalidSubnetIDNotFound(id=subnet_id)
|
raise exception.InvalidSubnetIDNotFound(id=subnet_id)
|
||||||
|
@ -206,6 +221,7 @@ class NetworkInterfaceDescriber(common.TaggableItemsDescriber):
|
||||||
'attachment.attach.time': ('attachment', 'attachTime'),
|
'attachment.attach.time': ('attachment', 'attachTime'),
|
||||||
'attachment.delete-on-termination': ('attachment',
|
'attachment.delete-on-termination': ('attachment',
|
||||||
'deleteOnTermination'),
|
'deleteOnTermination'),
|
||||||
|
'client-token': 'clientToken',
|
||||||
'description': 'description',
|
'description': 'description',
|
||||||
'group-id': ['groupSet', 'groupId'],
|
'group-id': ['groupSet', 'groupId'],
|
||||||
'group-name': ['groupSet', 'groupName'],
|
'group-name': ['groupSet', 'groupName'],
|
||||||
|
|
|
@ -188,14 +188,15 @@ def _format_subnet(context, subnet, os_subnet, os_network, os_ports):
|
||||||
# NOTE(Alex) First and last IP addresses are system ones.
|
# NOTE(Alex) First and last IP addresses are system ones.
|
||||||
ip_count = pow(2, 32 - cidr_range) - 2
|
ip_count = pow(2, 32 - cidr_range) - 2
|
||||||
# TODO(Alex): Probably performance-killer. Will have to optimize.
|
# TODO(Alex): Probably performance-killer. Will have to optimize.
|
||||||
dhcp_port_accounted = False
|
service_ports = ['network:dhcp', 'network:distributed']
|
||||||
|
service_port_accounted = False
|
||||||
for port in os_ports:
|
for port in os_ports:
|
||||||
for fixed_ip in port.get('fixed_ips', []):
|
for fixed_ip in port.get('fixed_ips', []):
|
||||||
if fixed_ip['subnet_id'] == os_subnet['id']:
|
if fixed_ip['subnet_id'] == os_subnet['id']:
|
||||||
ip_count -= 1
|
ip_count -= 1
|
||||||
if port['device_owner'] == 'network:dhcp':
|
if port['device_owner'] in service_ports:
|
||||||
dhcp_port_accounted = True
|
service_port_accounted = True
|
||||||
if not dhcp_port_accounted:
|
if not service_port_accounted:
|
||||||
ip_count -= 1
|
ip_count -= 1
|
||||||
return {
|
return {
|
||||||
'subnetId': subnet['id'],
|
'subnetId': subnet['id'],
|
||||||
|
|
|
@ -37,7 +37,21 @@ Validator = common.Validator
|
||||||
|
|
||||||
def create_volume(context, availability_zone=None, size=None,
|
def create_volume(context, availability_zone=None, size=None,
|
||||||
snapshot_id=None, volume_type=None, iops=None,
|
snapshot_id=None, volume_type=None, iops=None,
|
||||||
encrypted=None, kms_key_id=None):
|
encrypted=None, kms_key_id=None, client_token=None):
|
||||||
|
|
||||||
|
if client_token:
|
||||||
|
result = describe_volumes(context,
|
||||||
|
filter=[{'name': 'client-token',
|
||||||
|
'value': [client_token]}])
|
||||||
|
if result['volumeSet']:
|
||||||
|
if len(result['volumeSet']) > 1:
|
||||||
|
LOG.error('describe_volumes returns %s '
|
||||||
|
'volumes, but 1 is expected.',
|
||||||
|
len(result['volumeSet']))
|
||||||
|
LOG.error('Requested client token: %s', client_token)
|
||||||
|
LOG.error('Result: %s', result)
|
||||||
|
return result['volumeSet'][0]
|
||||||
|
|
||||||
if snapshot_id is not None:
|
if snapshot_id is not None:
|
||||||
snapshot = ec2utils.get_db_item(context, snapshot_id)
|
snapshot = ec2utils.get_db_item(context, snapshot_id)
|
||||||
os_snapshot_id = snapshot['os_id']
|
os_snapshot_id = snapshot['os_id']
|
||||||
|
@ -121,6 +135,7 @@ class VolumeDescriber(common.TaggableItemsDescriber):
|
||||||
SORT_KEY = 'volumeId'
|
SORT_KEY = 'volumeId'
|
||||||
FILTER_MAP = {
|
FILTER_MAP = {
|
||||||
'availability-zone': 'availabilityZone',
|
'availability-zone': 'availabilityZone',
|
||||||
|
'client-token': 'clientToken',
|
||||||
'create-time': 'createTime',
|
'create-time': 'createTime',
|
||||||
'encrypted': 'encrypted',
|
'encrypted': 'encrypted',
|
||||||
'size': 'size',
|
'size': 'size',
|
||||||
|
|
|
@ -115,6 +115,7 @@ def get_metadata_item(context, path_tokens, os_instance_id, remote_ip,
|
||||||
|
|
||||||
metadata = _build_metadata(context, ec2_instance, ec2_reservation,
|
metadata = _build_metadata(context, ec2_instance, ec2_reservation,
|
||||||
os_instance_id, remote_ip)
|
os_instance_id, remote_ip)
|
||||||
|
LOG.debug('get_metadata_item: result %s', str(metadata))
|
||||||
cache = {'metadata': metadata,
|
cache = {'metadata': metadata,
|
||||||
'owner_id': ec2_reservation['ownerId']}
|
'owner_id': ec2_reservation['ownerId']}
|
||||||
|
|
||||||
|
@ -128,13 +129,18 @@ def get_metadata_item(context, path_tokens, os_instance_id, remote_ip,
|
||||||
|
|
||||||
def _get_ec2_instance_and_reservation(context, os_instance_id):
|
def _get_ec2_instance_and_reservation(context, os_instance_id):
|
||||||
instance_id = ec2utils.os_id_to_ec2_id(context, 'i', os_instance_id)
|
instance_id = ec2utils.os_id_to_ec2_id(context, 'i', os_instance_id)
|
||||||
|
LOG.debug('_get_ec2_instance_and_reservation(%s)', os_instance_id)
|
||||||
try:
|
try:
|
||||||
ec2_reservations = instance_api.describe_instances(
|
ec2_reservations = instance_api.describe_instances(
|
||||||
context, [instance_id])
|
context, [instance_id])
|
||||||
|
LOG.debug('_get_ec2_instance_and_reservation: result by id %s',
|
||||||
|
str(ec2_reservations))
|
||||||
except exception.InvalidInstanceIDNotFound:
|
except exception.InvalidInstanceIDNotFound:
|
||||||
ec2_reservations = instance_api.describe_instances(
|
ec2_reservations = instance_api.describe_instances(
|
||||||
context, filter=[{'name': 'instance-id',
|
context, filter=[{'name': 'instance-id',
|
||||||
'value': [instance_id]}])
|
'value': [instance_id]}])
|
||||||
|
LOG.debug('_get_ec2_instance_and_reservation: result by name %s',
|
||||||
|
str(ec2_reservations))
|
||||||
if (len(ec2_reservations['reservationSet']) != 1 or
|
if (len(ec2_reservations['reservationSet']) != 1 or
|
||||||
len(ec2_reservations['reservationSet'][0]['instancesSet']) != 1):
|
len(ec2_reservations['reservationSet'][0]['instancesSet']) != 1):
|
||||||
LOG.error('Failed to get metadata for instance id: %s',
|
LOG.error('Failed to get metadata for instance id: %s',
|
||||||
|
|
|
@ -58,7 +58,7 @@ def create_context(is_os_admin=False):
|
||||||
if is_os_admin else
|
if is_os_admin else
|
||||||
mock.sentinel.session)
|
mock.sentinel.session)
|
||||||
session.get_endpoint = mock.Mock(name="get_endpoint")
|
session.get_endpoint = mock.Mock(name="get_endpoint")
|
||||||
session.get_endpoint.return_value = 'v2'
|
session.get_endpoint.return_value = 'v3'
|
||||||
return ec2api.context.RequestContext(fakes.ID_OS_USER, fakes.ID_OS_PROJECT,
|
return ec2api.context.RequestContext(fakes.ID_OS_USER, fakes.ID_OS_PROJECT,
|
||||||
is_os_admin=is_os_admin,
|
is_os_admin=is_os_admin,
|
||||||
session=session)
|
session=session)
|
||||||
|
@ -121,7 +121,7 @@ class MockOSMixin(object):
|
||||||
|
|
||||||
def mock_cinder(self):
|
def mock_cinder(self):
|
||||||
cinder_patcher = mock.patch('cinderclient.client.Client')
|
cinder_patcher = mock.patch('cinderclient.client.Client')
|
||||||
cinder = mock.create_autospec(cinderclient.Client('2'))
|
cinder = mock.create_autospec(cinderclient.Client('3'))
|
||||||
cinder_patcher.start().return_value = cinder
|
cinder_patcher.start().return_value = cinder
|
||||||
self.addCleanup(cinder_patcher.stop)
|
self.addCleanup(cinder_patcher.stop)
|
||||||
return cinder
|
return cinder
|
||||||
|
|
|
@ -116,7 +116,7 @@ class ClientsTestCase(base.BaseTestCase):
|
||||||
context = mock.NonCallableMock(session=mock.sentinel.session)
|
context = mock.NonCallableMock(session=mock.sentinel.session)
|
||||||
res = clients.cinder(context)
|
res = clients.cinder(context)
|
||||||
self.assertEqual(cinder.return_value, res)
|
self.assertEqual(cinder.return_value, res)
|
||||||
cinder.assert_called_with('2', service_type='volumev3',
|
cinder.assert_called_with('3', service_type='volumev3',
|
||||||
session=mock.sentinel.session)
|
session=mock.sentinel.session)
|
||||||
|
|
||||||
@mock.patch('keystoneclient.client.Client')
|
@mock.patch('keystoneclient.client.Client')
|
||||||
|
@ -124,5 +124,5 @@ class ClientsTestCase(base.BaseTestCase):
|
||||||
context = mock.NonCallableMock(session=mock.sentinel.session)
|
context = mock.NonCallableMock(session=mock.sentinel.session)
|
||||||
res = clients.keystone(context)
|
res = clients.keystone(context)
|
||||||
self.assertEqual(keystone.return_value, res)
|
self.assertEqual(keystone.return_value, res)
|
||||||
keystone.assert_called_with(auth_url='v2',
|
keystone.assert_called_with(auth_url='v3',
|
||||||
session=mock.sentinel.session)
|
session=mock.sentinel.session)
|
||||||
|
|
Loading…
Reference in New Issue