Merge "Add `additive_only` parameter to Batch Member call"

This commit is contained in:
Zuul 2019-09-11 20:43:47 +00:00 committed by Gerrit Code Review
commit 510525a92b
7 changed files with 127 additions and 22 deletions

View File

@ -76,6 +76,12 @@ path-provider:
############################################################################### ###############################################################################
# Query fields # Query fields
############################################################################### ###############################################################################
additive-only:
description: |
If ``true`` no members will be deleted during the batch operation.
in: query
required: false
type: boolean
cascade-delete: cascade-delete:
description: | description: |
If ``true`` will delete all child objects of the load balancer. If ``true`` will delete all child objects of the load balancer.

View File

@ -367,11 +367,18 @@ For example, assume a pool currently has two members. These members have the
following address/port combinations: '192.0.2.15:80' and '192.0.2.16:80'. following address/port combinations: '192.0.2.15:80' and '192.0.2.16:80'.
Now assume a PUT request is made that includes members with address/port Now assume a PUT request is made that includes members with address/port
combinations: '192.0.2.16:80' and '192.0.2.17:80'. combinations: '192.0.2.16:80' and '192.0.2.17:80'.
The member '192.0.2.15:80' will be deleted, because it was not in the request. The member '192.0.2.15:80' will be deleted, because it was not in the request.
The member '192.0.2.16:80' will be updated to match the request data for that The member '192.0.2.16:80' will be updated to match the request data for that
member, because it was matched. member, because it was matched.
The member '192.0.2.17:80' will be created, because no such member existed. The member '192.0.2.17:80' will be created, because no such member existed.
The optional parameter ``additive_only`` when defined as ``true`` will skip
deletions for members missing from the provided list. If this were set in the
above example, the member '192.0.2.15:80' would have remained in the pool.
If the request is valid, the service returns the ``Accepted (202)`` If the request is valid, the service returns the ``Accepted (202)``
response code. To confirm the updates, check that the member provisioning response code. To confirm the updates, check that the member provisioning
statuses are ``ACTIVE`` for new or updated members, and that any unspecified statuses are ``ACTIVE`` for new or updated members, and that any unspecified
@ -397,6 +404,7 @@ Request
.. rest_parameters:: ../parameters.yaml .. rest_parameters:: ../parameters.yaml
- additive_only: additive-only
- admin_state_up: admin_state_up-default-optional - admin_state_up: admin_state_up-default-optional
- address: address - address: address
- backup: backup-optional - backup: backup-optional

View File

@ -79,6 +79,9 @@ class RootController(rest.RestController):
self._add_a_version(versions, 'v2.9', 'v2', 'SUPPORTED', self._add_a_version(versions, 'v2.9', 'v2', 'SUPPORTED',
'2019-03-04T00:00:00Z', host_url) '2019-03-04T00:00:00Z', host_url)
# Healthmonitor host header # Healthmonitor host header
self._add_a_version(versions, 'v2.10', 'v2', 'CURRENT', self._add_a_version(versions, 'v2.10', 'v2', 'SUPPORTED',
'2019-03-05T00:00:00Z', host_url) '2019-03-05T00:00:00Z', host_url)
# Additive batch member update
self._add_a_version(versions, 'v2.11', 'v2', 'CURRENT',
'2019-06-24T00:00:00Z', host_url)
return {'versions': versions} return {'versions': versions}

View File

@ -16,6 +16,7 @@
from oslo_db import exception as odb_exceptions from oslo_db import exception as odb_exceptions
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import excutils from oslo_utils import excutils
from oslo_utils import strutils
import pecan import pecan
from wsme import types as wtypes from wsme import types as wtypes
from wsmeext import pecan as wsme_pecan from wsmeext import pecan as wsme_pecan
@ -322,9 +323,10 @@ class MembersController(MemberController):
@wsme_pecan.wsexpose(None, wtypes.text, @wsme_pecan.wsexpose(None, wtypes.text,
body=member_types.MembersRootPUT, status_code=202) body=member_types.MembersRootPUT, status_code=202)
def put(self, members_): def put(self, additive_only=False, members_=None):
"""Updates all members.""" """Updates all members."""
members = members_.members members = members_.members
additive_only = strutils.bool_from_string(additive_only)
context = pecan.request.context.get('octavia_context') context = pecan.request.context.get('octavia_context')
db_pool = self._get_db_pool(context.session, self.pool_id) db_pool = self._get_db_pool(context.session, self.pool_id)
@ -336,7 +338,9 @@ class MembersController(MemberController):
# Check POST+PUT+DELETE since this operation is all of 'CUD' # Check POST+PUT+DELETE since this operation is all of 'CUD'
self._auth_validate_action(context, project_id, constants.RBAC_POST) self._auth_validate_action(context, project_id, constants.RBAC_POST)
self._auth_validate_action(context, project_id, constants.RBAC_PUT) self._auth_validate_action(context, project_id, constants.RBAC_PUT)
self._auth_validate_action(context, project_id, constants.RBAC_DELETE) if not additive_only:
self._auth_validate_action(context, project_id,
constants.RBAC_DELETE)
# Validate member subnets # Validate member subnets
for member in members: for member in members:
@ -351,13 +355,6 @@ class MembersController(MemberController):
with db_api.get_lock_session() as lock_session: with db_api.get_lock_session() as lock_session:
self._test_lb_and_listener_and_pool_statuses(lock_session) self._test_lb_and_listener_and_pool_statuses(lock_session)
member_count_diff = len(members) - len(old_members)
if member_count_diff > 0 and self.repositories.check_quota_met(
context.session, lock_session, data_models.Member,
db_pool.project_id, count=member_count_diff):
raise exceptions.QuotaException(
resource=data_models.Member._name())
old_member_uniques = { old_member_uniques = {
(m.ip_address, m.protocol_port): m.id for m in old_members} (m.ip_address, m.protocol_port): m.id for m in old_members}
new_member_uniques = [ new_member_uniques = [
@ -380,6 +377,16 @@ class MembersController(MemberController):
if (m.ip_address, m.protocol_port) not in new_member_uniques: if (m.ip_address, m.protocol_port) not in new_member_uniques:
deleted_members.append(m) deleted_members.append(m)
if additive_only:
member_count_diff = len(new_members)
else:
member_count_diff = len(new_members) - len(deleted_members)
if member_count_diff > 0 and self.repositories.check_quota_met(
context.session, lock_session, data_models.Member,
db_pool.project_id, count=member_count_diff):
raise exceptions.QuotaException(
resource=data_models.Member._name())
provider_members = [] provider_members = []
# Create new members # Create new members
for m in new_members: for m in new_members:
@ -392,6 +399,7 @@ class MembersController(MemberController):
# Update old members # Update old members
for m in updated_members: for m in updated_members:
m.provisioning_status = constants.PENDING_UPDATE m.provisioning_status = constants.PENDING_UPDATE
m.project_id = db_pool.project_id
db_member_dict = m.to_dict(render_unsets=False) db_member_dict = m.to_dict(render_unsets=False)
db_member_dict.pop('id') db_member_dict.pop('id')
self.repositories.member.update( self.repositories.member.update(
@ -402,9 +410,19 @@ class MembersController(MemberController):
driver_utils.db_member_to_provider_member(m)) driver_utils.db_member_to_provider_member(m))
# Delete old members # Delete old members
for m in deleted_members: for m in deleted_members:
self.repositories.member.update( if additive_only:
lock_session, m.id, # Members are appended to the dict and their status remains
provisioning_status=constants.PENDING_DELETE) # unchanged, because they are logically "untouched".
db_member_dict = m.to_dict(render_unsets=False)
db_member_dict.pop('id')
m.pool_id = self.pool_id
provider_members.append(
driver_utils.db_member_to_provider_member(m))
else:
# Members are changed to PENDING_DELETE and not passed.
self.repositories.member.update(
lock_session, m.id,
provisioning_status=constants.PENDING_DELETE)
# Dispatch to the driver # Dispatch to the driver
LOG.info("Sending Pool %s batch member update to provider %s", LOG.info("Sending Pool %s batch member update to provider %s",

View File

@ -43,7 +43,7 @@ class TestRootController(base_db_test.OctaviaDBTestBase):
def test_api_versions(self): def test_api_versions(self):
versions = self._get_versions_with_config() versions = self._get_versions_with_config()
version_ids = tuple(v.get('id') for v in versions) version_ids = tuple(v.get('id') for v in versions)
self.assertEqual(11, len(version_ids)) self.assertEqual(12, len(version_ids))
self.assertIn('v2.0', version_ids) self.assertIn('v2.0', version_ids)
self.assertIn('v2.1', version_ids) self.assertIn('v2.1', version_ids)
self.assertIn('v2.2', version_ids) self.assertIn('v2.2', version_ids)
@ -55,6 +55,7 @@ class TestRootController(base_db_test.OctaviaDBTestBase):
self.assertIn('v2.8', version_ids) self.assertIn('v2.8', version_ids)
self.assertIn('v2.9', version_ids) self.assertIn('v2.9', version_ids)
self.assertIn('v2.10', version_ids) self.assertIn('v2.10', version_ids)
self.assertIn('v2.11', version_ids)
# Each version should have a 'self' 'href' to the API version URL # Each version should have a 'self' 'href' to the API version URL
# [{u'rel': u'self', u'href': u'http://localhost/v2'}] # [{u'rel': u'self', u'href': u'http://localhost/v2'}]

View File

@ -620,7 +620,6 @@ class TestMember(base.BaseAPITest):
('192.0.2.6', 80, 'PENDING_CREATE'), ('192.0.2.6', 80, 'PENDING_CREATE'),
] ]
member_ids = {}
provider_creates = [] provider_creates = []
provider_updates = [] provider_updates = []
for rm in returned_members: for rm in returned_members:
@ -628,7 +627,6 @@ class TestMember(base.BaseAPITest):
(rm['address'], (rm['address'],
rm['protocol_port'], rm['protocol_port'],
rm['provisioning_status']), expected_members) rm['provisioning_status']), expected_members)
member_ids[(rm['address'], rm['protocol_port'])] = rm['id']
provider_dict = driver_utils.member_dict_to_provider_dict(rm) provider_dict = driver_utils.member_dict_to_provider_dict(rm)
# Adjust for API response # Adjust for API response
@ -672,7 +670,6 @@ class TestMember(base.BaseAPITest):
('192.0.2.6', 80, 'PENDING_CREATE', ['test_tag2']), ('192.0.2.6', 80, 'PENDING_CREATE', ['test_tag2']),
] ]
member_ids = {}
provider_members = [] provider_members = []
for rm in returned_members: for rm in returned_members:
self.assertIn( self.assertIn(
@ -680,7 +677,6 @@ class TestMember(base.BaseAPITest):
rm['protocol_port'], rm['protocol_port'],
rm['provisioning_status'], rm['provisioning_status'],
rm['tags']), expected_members) rm['tags']), expected_members)
member_ids[(rm['address'], rm['protocol_port'])] = rm['id']
provider_dict = driver_utils.member_dict_to_provider_dict(rm) provider_dict = driver_utils.member_dict_to_provider_dict(rm)
# Adjust for API response # Adjust for API response
@ -723,6 +719,77 @@ class TestMember(base.BaseAPITest):
err_msg = ("169.254.169.254 is not a valid option for member address") err_msg = ("169.254.169.254 is not a valid option for member address")
self.assertEqual(err_msg, response.get('faultstring')) self.assertEqual(err_msg, response.get('faultstring'))
@mock.patch('octavia.api.drivers.driver_factory.get_driver')
@mock.patch('octavia.api.drivers.utils.call_provider')
def test_additive_only_batch_members(self, mock_provider, mock_get_driver):
mock_driver = mock.MagicMock()
mock_driver.name = 'noop_driver'
mock_get_driver.return_value = mock_driver
member1 = {'address': '192.0.2.1', 'protocol_port': 80}
member2 = {'address': '192.0.2.2', 'protocol_port': 80}
member3 = {'address': '192.0.2.3', 'protocol_port': 80}
member4 = {'address': '192.0.2.4', 'protocol_port': 80}
member5 = {'address': '192.0.2.5', 'protocol_port': 80}
member6 = {'address': '192.0.2.6', 'protocol_port': 80}
members = [member1, member2, member3, member4]
for m in members:
self.create_member(pool_id=self.pool_id, **m)
self.set_lb_status(self.lb_id)
# We are only concerned about the batch update, so clear out the
# create members calls above.
mock_provider.reset_mock()
req_dict = [member1, member2, member5, member6]
body = {self.root_tag_list: req_dict}
path = self.MEMBERS_PATH.format(pool_id=self.pool_id)
path = "{}?additive_only=True".format(path)
self.put(path, body, status=202)
returned_members = self.get(
self.MEMBERS_PATH.format(pool_id=self.pool_id)
).json.get(self.root_tag_list)
# Members 1+2 should be updated, 3+4 left alone, and 5+6 created
expected_members = [
('192.0.2.1', 80, 'PENDING_UPDATE'),
('192.0.2.2', 80, 'PENDING_UPDATE'),
('192.0.2.3', 80, 'ACTIVE'),
('192.0.2.4', 80, 'ACTIVE'),
('192.0.2.5', 80, 'PENDING_CREATE'),
('192.0.2.6', 80, 'PENDING_CREATE'),
]
provider_creates = []
provider_updates = []
provider_ignored = []
for rm in returned_members:
self.assertIn(
(rm['address'],
rm['protocol_port'],
rm['provisioning_status']), expected_members)
provider_dict = driver_utils.member_dict_to_provider_dict(rm)
# Adjust for API response
provider_dict['pool_id'] = self.pool_id
if rm['provisioning_status'] == 'PENDING_UPDATE':
del provider_dict['name']
del provider_dict['subnet_id']
provider_updates.append(driver_dm.Member(**provider_dict))
elif rm['provisioning_status'] == 'PENDING_CREATE':
provider_dict['name'] = None
provider_creates.append(driver_dm.Member(**provider_dict))
elif rm['provisioning_status'] == 'ACTIVE':
provider_dict['name'] = None
provider_ignored.append(driver_dm.Member(**provider_dict))
# Order matters here
provider_creates += provider_updates
provider_creates += provider_ignored
mock_provider.assert_called_once_with(u'noop_driver',
mock_driver.member_batch_update,
provider_creates)
@mock.patch('octavia.api.drivers.driver_factory.get_driver') @mock.patch('octavia.api.drivers.driver_factory.get_driver')
@mock.patch('octavia.api.drivers.utils.call_provider') @mock.patch('octavia.api.drivers.utils.call_provider')
def test_update_batch_members(self, mock_provider, mock_get_driver): def test_update_batch_members(self, mock_provider, mock_get_driver):
@ -756,14 +823,12 @@ class TestMember(base.BaseAPITest):
('192.0.2.2', 80, 'PENDING_UPDATE'), ('192.0.2.2', 80, 'PENDING_UPDATE'),
] ]
member_ids = {}
provider_members = [] provider_members = []
for rm in returned_members: for rm in returned_members:
self.assertIn( self.assertIn(
(rm['address'], (rm['address'],
rm['protocol_port'], rm['protocol_port'],
rm['provisioning_status']), expected_members) rm['provisioning_status']), expected_members)
member_ids[(rm['address'], rm['protocol_port'])] = rm['id']
provider_dict = driver_utils.member_dict_to_provider_dict(rm) provider_dict = driver_utils.member_dict_to_provider_dict(rm)
# Adjust for API response # Adjust for API response
@ -807,14 +872,12 @@ class TestMember(base.BaseAPITest):
('192.0.2.4', 80, 'PENDING_DELETE'), ('192.0.2.4', 80, 'PENDING_DELETE'),
] ]
member_ids = {}
provider_members = [] provider_members = []
for rm in returned_members: for rm in returned_members:
self.assertIn( self.assertIn(
(rm['address'], (rm['address'],
rm['protocol_port'], rm['protocol_port'],
rm['provisioning_status']), expected_members) rm['provisioning_status']), expected_members)
member_ids[(rm['address'], rm['protocol_port'])] = rm['id']
mock_provider.assert_called_once_with(u'noop_driver', mock_provider.assert_called_once_with(u'noop_driver',
mock_driver.member_batch_update, mock_driver.member_batch_update,

View File

@ -0,0 +1,6 @@
---
features:
- |
The batch member update resource can now be used additively by passing the
query parameter ``additive_only=True``. Existing members can be updated and
new members will be created, but missing members will not be deleted.