Merge "Implement provider drivers - Members"
This commit is contained in:
commit
1a807242ba
@ -22,6 +22,8 @@ from octavia.api.drivers import utils as driver_utils
|
||||
from octavia.common import constants as consts
|
||||
from octavia.common import data_models
|
||||
from octavia.common import utils
|
||||
from octavia.db import api as db_apis
|
||||
from octavia.db import repositories
|
||||
from octavia.network import base as network_base
|
||||
|
||||
CONF = cfg.CONF
|
||||
@ -38,6 +40,7 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
|
||||
namespace=consts.RPC_NAMESPACE_CONTROLLER_AGENT,
|
||||
topic=topic, version="1.0", fanout=False)
|
||||
self.client = messaging.RPCClient(self.transport, target=self.target)
|
||||
self.repositories = repositories.Repositories()
|
||||
|
||||
# Load Balancer
|
||||
def create_vip_port(self, loadbalancer_id, project_id, vip_dictionary):
|
||||
@ -128,10 +131,52 @@ class AmphoraProviderDriver(driver_base.ProviderDriver):
|
||||
self.client.cast({}, 'delete_member', **payload)
|
||||
|
||||
def member_update(self, member):
|
||||
pass
|
||||
member_dict = member.to_dict()
|
||||
if 'admin_state_up' in member_dict:
|
||||
member_dict['enabled'] = member_dict.pop('admin_state_up')
|
||||
member_id = member_dict.pop('member_id')
|
||||
|
||||
payload = {consts.MEMBER_ID: member_id,
|
||||
consts.MEMBER_UPDATES: member_dict}
|
||||
self.client.cast({}, 'update_member', **payload)
|
||||
|
||||
def member_batch_update(self, members):
|
||||
pass
|
||||
# Get a list of existing members
|
||||
pool_id = members[0].pool_id
|
||||
db_pool = self.repositories.pool.get(db_apis.get_session(), id=pool_id)
|
||||
old_members = db_pool.members
|
||||
|
||||
old_member_uniques = {
|
||||
(m.ip_address, m.protocol_port): m.id for m in old_members}
|
||||
new_member_uniques = [
|
||||
(m.address, m.protocol_port) for m in members]
|
||||
|
||||
# Find members that are brand new or updated
|
||||
new_members = []
|
||||
updated_members = []
|
||||
for m in members:
|
||||
if (m.address, m.protocol_port) not in old_member_uniques:
|
||||
new_members.append(m)
|
||||
else:
|
||||
m.id = old_member_uniques[(m.address, m.protocol_port)]
|
||||
member_dict = m.to_dict(render_unsets=False)
|
||||
member_dict['id'] = member_dict.pop('member_id')
|
||||
if 'address' in member_dict:
|
||||
member_dict['ip_address'] = member_dict.pop('address')
|
||||
if 'admin_state_up' in member_dict:
|
||||
member_dict['enabled'] = member_dict.pop('admin_state_up')
|
||||
updated_members.append(member_dict)
|
||||
|
||||
# Find members that are deleted
|
||||
deleted_members = []
|
||||
for m in old_members:
|
||||
if (m.ip_address, m.protocol_port) not in new_member_uniques:
|
||||
deleted_members.append(m)
|
||||
|
||||
payload = {'old_member_ids': [m.id for m in deleted_members],
|
||||
'new_member_ids': [m.member_id for m in new_members],
|
||||
'updated_members': updated_members}
|
||||
self.client.cast({}, 'batch_update_members', **payload)
|
||||
|
||||
# Health Monitor
|
||||
def health_monitor_create(self, healthmonitor):
|
||||
|
@ -84,6 +84,8 @@ def _base_to_provider_dict(current_dict, include_project_id=False):
|
||||
new_dict['admin_state_up'] = new_dict.pop('enabled')
|
||||
if 'project_id' in new_dict and not include_project_id:
|
||||
del new_dict['project_id']
|
||||
if 'tenant_id' in new_dict:
|
||||
del new_dict['tenant_id']
|
||||
return new_dict
|
||||
|
||||
|
||||
@ -262,11 +264,15 @@ def pool_dict_to_provider_dict(pool_dict):
|
||||
def db_members_to_provider_members(db_members):
|
||||
provider_members = []
|
||||
for member in db_members:
|
||||
new_member_dict = member_dict_to_provider_dict(member.to_dict())
|
||||
provider_members.append(driver_dm.Member.from_dict(new_member_dict))
|
||||
provider_members.append(db_member_to_provider_member(member))
|
||||
return provider_members
|
||||
|
||||
|
||||
def db_member_to_provider_member(db_member):
|
||||
new_member_dict = member_dict_to_provider_dict(db_member.to_dict())
|
||||
return driver_dm.Member.from_dict(new_member_dict)
|
||||
|
||||
|
||||
def member_dict_to_provider_dict(member_dict):
|
||||
new_member_dict = _base_to_provider_dict(member_dict)
|
||||
new_member_dict['member_id'] = new_member_dict.pop('id')
|
||||
|
@ -21,6 +21,9 @@ import pecan
|
||||
from wsme import types as wtypes
|
||||
from wsmeext import pecan as wsme_pecan
|
||||
|
||||
from octavia.api.drivers import data_models as driver_dm
|
||||
from octavia.api.drivers import driver_factory
|
||||
from octavia.api.drivers import utils as driver_utils
|
||||
from octavia.api.v2.controllers import base
|
||||
from octavia.api.v2.types import member as member_types
|
||||
from octavia.common import constants
|
||||
@ -40,7 +43,6 @@ class MemberController(base.BaseController):
|
||||
def __init__(self, pool_id):
|
||||
super(MemberController, self).__init__()
|
||||
self.pool_id = pool_id
|
||||
self.handler = self.handler.member
|
||||
|
||||
@wsme_pecan.wsexpose(member_types.MemberRootResponse, wtypes.text,
|
||||
[wtypes.text], ignore_extra_args=True)
|
||||
@ -143,24 +145,6 @@ class MemberController(base.BaseController):
|
||||
# do not give any information as to what constraint failed
|
||||
raise exceptions.InvalidOption(value='', option='')
|
||||
|
||||
def _send_member_to_handler(self, session, db_member):
|
||||
try:
|
||||
LOG.info("Sending Creation of Member %s to handler", db_member.id)
|
||||
self.handler.create(db_member)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(
|
||||
reraise=False), db_api.get_lock_session() as lock_session:
|
||||
self._reset_lb_listener_pool_statuses(
|
||||
lock_session, member=db_member)
|
||||
# Member now goes to ERROR
|
||||
self.repositories.member.update(
|
||||
lock_session, db_member.id,
|
||||
provisioning_status=constants.ERROR)
|
||||
db_member = self._get_db_member(session, db_member.id)
|
||||
result = self._convert_db_to_type(db_member,
|
||||
member_types.MemberResponse)
|
||||
return member_types.MemberRootResponse(member=result)
|
||||
|
||||
@wsme_pecan.wsexpose(member_types.MemberRootResponse,
|
||||
body=member_types.MemberRootPOST, status_code=201)
|
||||
def post(self, member_):
|
||||
@ -172,12 +156,15 @@ class MemberController(base.BaseController):
|
||||
raise exceptions.NotFound(resource='Subnet',
|
||||
id=member.subnet_id)
|
||||
pool = self.repositories.pool.get(context.session, id=self.pool_id)
|
||||
member.project_id = self._get_lb_project_id(context.session,
|
||||
pool.load_balancer_id)
|
||||
member.project_id, provider = self._get_lb_project_id_provider(
|
||||
context.session, pool.load_balancer_id)
|
||||
|
||||
self._auth_validate_action(context, member.project_id,
|
||||
constants.RBAC_POST)
|
||||
|
||||
# Load the driver early as it also provides validation
|
||||
driver = driver_factory.get_driver(provider)
|
||||
|
||||
lock_session = db_api.get_session(autocommit=False)
|
||||
try:
|
||||
if self.repositories.check_quota_met(
|
||||
@ -193,12 +180,26 @@ class MemberController(base.BaseController):
|
||||
self._test_lb_and_listener_and_pool_statuses(lock_session)
|
||||
|
||||
db_member = self._validate_create_member(lock_session, member_dict)
|
||||
|
||||
# Prepare the data for the driver data model
|
||||
provider_member = (
|
||||
driver_utils.db_member_to_provider_member(db_member))
|
||||
|
||||
# Dispatch to the driver
|
||||
LOG.info("Sending create Member %s to provider %s",
|
||||
db_member.id, driver.name)
|
||||
driver_utils.call_provider(
|
||||
driver.name, driver.member_create, provider_member)
|
||||
|
||||
lock_session.commit()
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
lock_session.rollback()
|
||||
|
||||
return self._send_member_to_handler(context.session, db_member)
|
||||
db_member = self._get_db_member(context.session, db_member.id)
|
||||
result = self._convert_db_to_type(db_member,
|
||||
member_types.MemberResponse)
|
||||
return member_types.MemberRootResponse(member=result)
|
||||
|
||||
def _graph_create(self, lock_session, member_dict):
|
||||
pool = self.repositories.pool.get(lock_session, id=self.pool_id)
|
||||
@ -218,27 +219,41 @@ class MemberController(base.BaseController):
|
||||
db_member = self._get_db_member(context.session, id,
|
||||
show_deleted=False)
|
||||
|
||||
self._auth_validate_action(context, db_member.project_id,
|
||||
constants.RBAC_PUT)
|
||||
pool = self.repositories.pool.get(context.session,
|
||||
id=db_member.pool_id)
|
||||
project_id, provider = self._get_lb_project_id_provider(
|
||||
context.session, pool.load_balancer_id)
|
||||
|
||||
self._test_lb_and_listener_and_pool_statuses(context.session,
|
||||
member=db_member)
|
||||
self.repositories.member.update(
|
||||
context.session, db_member.id,
|
||||
provisioning_status=constants.PENDING_UPDATE)
|
||||
self._auth_validate_action(context, project_id, constants.RBAC_PUT)
|
||||
|
||||
try:
|
||||
LOG.info("Sending Update of Member %s to handler", id)
|
||||
self.handler.update(db_member, member)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(
|
||||
reraise=False), db_api.get_lock_session() as lock_session:
|
||||
self._reset_lb_listener_pool_statuses(
|
||||
lock_session, member=db_member)
|
||||
# Member now goes to ERROR
|
||||
self.repositories.member.update(
|
||||
lock_session, db_member.id,
|
||||
provisioning_status=constants.ERROR)
|
||||
# Load the driver early as it also provides validation
|
||||
driver = driver_factory.get_driver(provider)
|
||||
|
||||
with db_api.get_lock_session() as lock_session:
|
||||
self._test_lb_and_listener_and_pool_statuses(lock_session,
|
||||
member=db_member)
|
||||
|
||||
# Prepare the data for the driver data model
|
||||
member_dict = member.to_dict(render_unsets=False)
|
||||
member_dict['id'] = id
|
||||
provider_member_dict = (
|
||||
driver_utils.member_dict_to_provider_dict(member_dict))
|
||||
|
||||
# Dispatch to the driver
|
||||
LOG.info("Sending update Member %s to provider %s", id,
|
||||
driver.name)
|
||||
driver_utils.call_provider(
|
||||
driver.name, driver.member_update,
|
||||
driver_dm.Member.from_dict(provider_member_dict))
|
||||
|
||||
# Update the database to reflect what the driver just accepted
|
||||
member.provisioning_status = constants.PENDING_UPDATE
|
||||
db_member_dict = member.to_dict(render_unsets=False)
|
||||
self.repositories.member.update(lock_session, id, **db_member_dict)
|
||||
|
||||
# Force SQL alchemy to query the DB, otherwise we get inconsistent
|
||||
# results
|
||||
context.session.expire_all()
|
||||
db_member = self._get_db_member(context.session, id)
|
||||
result = self._convert_db_to_type(db_member,
|
||||
member_types.MemberResponse)
|
||||
@ -251,27 +266,26 @@ class MemberController(base.BaseController):
|
||||
db_member = self._get_db_member(context.session, id,
|
||||
show_deleted=False)
|
||||
|
||||
self._auth_validate_action(context, db_member.project_id,
|
||||
constants.RBAC_DELETE)
|
||||
pool = self.repositories.pool.get(context.session,
|
||||
id=db_member.pool_id)
|
||||
project_id, provider = self._get_lb_project_id_provider(
|
||||
context.session, pool.load_balancer_id)
|
||||
|
||||
self._test_lb_and_listener_and_pool_statuses(context.session,
|
||||
member=db_member)
|
||||
self.repositories.member.update(
|
||||
context.session, db_member.id,
|
||||
provisioning_status=constants.PENDING_DELETE)
|
||||
self._auth_validate_action(context, project_id, constants.RBAC_DELETE)
|
||||
|
||||
try:
|
||||
LOG.info("Sending Deletion of Member %s to handler", db_member.id)
|
||||
self.handler.delete(db_member)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception(
|
||||
reraise=False), db_api.get_lock_session() as lock_session:
|
||||
self._reset_lb_listener_pool_statuses(
|
||||
lock_session, member=db_member)
|
||||
# Member now goes to ERROR
|
||||
self.repositories.member.update(
|
||||
lock_session, db_member.id,
|
||||
provisioning_status=constants.ERROR)
|
||||
# Load the driver early as it also provides validation
|
||||
driver = driver_factory.get_driver(provider)
|
||||
|
||||
with db_api.get_lock_session() as lock_session:
|
||||
self._test_lb_and_listener_and_pool_statuses(lock_session,
|
||||
member=db_member)
|
||||
self.repositories.member.update(
|
||||
lock_session, db_member.id,
|
||||
provisioning_status=constants.PENDING_DELETE)
|
||||
|
||||
LOG.info("Sending delete Member %s to provider %s", id,
|
||||
driver.name)
|
||||
driver_utils.call_provider(driver.name, driver.member_delete, id)
|
||||
|
||||
|
||||
class MembersController(MemberController):
|
||||
@ -289,13 +303,13 @@ class MembersController(MemberController):
|
||||
db_pool = self._get_db_pool(context.session, self.pool_id)
|
||||
old_members = db_pool.members
|
||||
|
||||
project_id, provider = self._get_lb_project_id_provider(
|
||||
context.session, db_pool.load_balancer_id)
|
||||
|
||||
# Check POST+PUT+DELETE since this operation is all of 'CUD'
|
||||
self._auth_validate_action(context, db_pool.project_id,
|
||||
constants.RBAC_POST)
|
||||
self._auth_validate_action(context, db_pool.project_id,
|
||||
constants.RBAC_PUT)
|
||||
self._auth_validate_action(context, db_pool.project_id,
|
||||
constants.RBAC_DELETE)
|
||||
self._auth_validate_action(context, project_id, constants.RBAC_POST)
|
||||
self._auth_validate_action(context, project_id, constants.RBAC_PUT)
|
||||
self._auth_validate_action(context, project_id, constants.RBAC_DELETE)
|
||||
|
||||
# Validate member subnets
|
||||
for member in members:
|
||||
@ -304,6 +318,9 @@ class MembersController(MemberController):
|
||||
raise exceptions.NotFound(resource='Subnet',
|
||||
id=member.subnet_id)
|
||||
|
||||
# Load the driver early as it also provides validation
|
||||
driver = driver_factory.get_driver(provider)
|
||||
|
||||
with db_api.get_lock_session() as lock_session:
|
||||
self._test_lb_and_listener_and_pool_statuses(lock_session)
|
||||
|
||||
@ -334,25 +351,30 @@ class MembersController(MemberController):
|
||||
if (m.ip_address, m.protocol_port) not in new_member_uniques:
|
||||
deleted_members.append(m)
|
||||
|
||||
provider_members = []
|
||||
# Create new members
|
||||
new_members_created = []
|
||||
for m in new_members:
|
||||
m = m.to_dict(render_unsets=False)
|
||||
m['project_id'] = db_pool.project_id
|
||||
new_members_created.append(self._graph_create(lock_session, m))
|
||||
created_member = self._graph_create(lock_session, m)
|
||||
provider_member = driver_utils.db_member_to_provider_member(
|
||||
created_member)
|
||||
provider_members.append(provider_member)
|
||||
# Update old members
|
||||
for m in updated_members:
|
||||
self.repositories.member.update(
|
||||
lock_session, m.id,
|
||||
provisioning_status=constants.PENDING_UPDATE)
|
||||
provider_members.append(
|
||||
driver_utils.db_member_to_provider_member(m))
|
||||
# Delete old members
|
||||
for m in deleted_members:
|
||||
self.repositories.member.update(
|
||||
lock_session, m.id,
|
||||
provisioning_status=constants.PENDING_DELETE)
|
||||
|
||||
LOG.info("Sending Full Member Update to handler")
|
||||
new_member_ids = [m.id for m in new_members_created]
|
||||
old_member_ids = [m.id for m in deleted_members]
|
||||
self.handler.batch_update(
|
||||
old_member_ids, new_member_ids, updated_members)
|
||||
# Dispatch to the driver
|
||||
LOG.info("Sending Pool %s batch member update to provider %s",
|
||||
db_pool.id, driver.name)
|
||||
driver_utils.call_provider(
|
||||
driver.name, driver.member_batch_update, provider_members)
|
||||
|
@ -228,6 +228,7 @@ L7RULE_ID = 'l7rule_id'
|
||||
LOAD_BALANCER_UPDATES = 'load_balancer_updates'
|
||||
LISTENER_UPDATES = 'listener_updates'
|
||||
POOL_UPDATES = 'pool_updates'
|
||||
MEMBER_UPDATES = 'member_updates'
|
||||
|
||||
CERT_ROTATE_AMPHORA_FLOW = 'octavia-cert-rotate-amphora-flow'
|
||||
CREATE_AMPHORA_FLOW = 'octavia-create-amphora-flow'
|
||||
|
@ -17,9 +17,12 @@ from oslo_config import cfg
|
||||
from oslo_config import fixture as oslo_fixture
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from octavia.api.drivers import data_models as driver_dm
|
||||
from octavia.api.drivers import utils as driver_utils
|
||||
from octavia.common import constants
|
||||
import octavia.common.context
|
||||
from octavia.common import data_models
|
||||
from octavia.common import exceptions
|
||||
from octavia.network import base as network_base
|
||||
from octavia.tests.functional.api.v2 import base
|
||||
|
||||
@ -443,21 +446,22 @@ class TestMember(base.BaseAPITest):
|
||||
member = {'name': 'test1'}
|
||||
self.post(self.members_path, self._build_body(member), status=400)
|
||||
|
||||
def test_create_with_bad_handler(self):
|
||||
self.handler_mock().member.create.side_effect = Exception()
|
||||
api_member = self.create_member(
|
||||
self.pool_with_listener_id, '192.0.2.1', 80).get(self.root_tag)
|
||||
self.assert_correct_status(
|
||||
lb_id=self.lb_id, listener_id=self.listener_id,
|
||||
pool_id=self.pool_with_listener_id,
|
||||
member_id=api_member.get('id'),
|
||||
lb_prov_status=constants.ACTIVE,
|
||||
listener_prov_status=constants.ACTIVE,
|
||||
pool_prov_status=constants.ACTIVE,
|
||||
member_prov_status=constants.ERROR,
|
||||
member_op_status=constants.NO_MONITOR)
|
||||
@mock.patch('octavia.api.drivers.utils.call_provider')
|
||||
def test_create_with_bad_provider(self, mock_provider):
|
||||
mock_provider.side_effect = exceptions.ProviderDriverError(
|
||||
prov='bad_driver', user_msg='broken')
|
||||
response = self.create_member(self.pool_id, '192.0.2.1', 80,
|
||||
status=500)
|
||||
self.assertIn('Provider \'bad_driver\' reports error: broken',
|
||||
response.get('faultstring'))
|
||||
|
||||
@mock.patch('octavia.api.drivers.driver_factory.get_driver')
|
||||
@mock.patch('octavia.api.drivers.utils.call_provider')
|
||||
def test_full_batch_members(self, mock_provider, mock_get_driver):
|
||||
mock_driver = mock.MagicMock()
|
||||
mock_driver.name = 'noop_driver'
|
||||
mock_get_driver.return_value = mock_driver
|
||||
|
||||
def test_full_batch_members(self):
|
||||
member1 = {'address': '192.0.2.1', 'protocol_port': 80}
|
||||
member2 = {'address': '192.0.2.2', 'protocol_port': 80}
|
||||
member3 = {'address': '192.0.2.3', 'protocol_port': 80}
|
||||
@ -469,6 +473,10 @@ class TestMember(base.BaseAPITest):
|
||||
self.create_member(pool_id=self.pool_id, **m)
|
||||
self.set_lb_status(self.lb_id)
|
||||
|
||||
# We are only concerned about the batch update, so clear out the
|
||||
# create members calls above.
|
||||
mock_provider.reset_mock()
|
||||
|
||||
req_dict = [member1, member2, member5, member6]
|
||||
body = {self.root_tag_list: req_dict}
|
||||
path = self.MEMBERS_PATH.format(pool_id=self.pool_id)
|
||||
@ -487,28 +495,39 @@ class TestMember(base.BaseAPITest):
|
||||
]
|
||||
|
||||
member_ids = {}
|
||||
provider_creates = []
|
||||
provider_updates = []
|
||||
for rm in returned_members:
|
||||
self.assertIn(
|
||||
(rm['address'],
|
||||
rm['protocol_port'],
|
||||
rm['provisioning_status']), expected_members)
|
||||
member_ids[(rm['address'], rm['protocol_port'])] = rm['id']
|
||||
handler_args = self.handler_mock().member.batch_update.call_args[0]
|
||||
self.assertEqual(
|
||||
[member_ids[('192.0.2.3', 80)], member_ids[('192.0.2.4', 80)]],
|
||||
handler_args[0])
|
||||
self.assertEqual(
|
||||
[member_ids[('192.0.2.5', 80)], member_ids[('192.0.2.6', 80)]],
|
||||
handler_args[1])
|
||||
self.assertEqual(2, len(handler_args[2]))
|
||||
updated_members = [
|
||||
(handler_args[2][0].address, handler_args[2][0].protocol_port),
|
||||
(handler_args[2][1].address, handler_args[2][1].protocol_port)
|
||||
]
|
||||
self.assertEqual([('192.0.2.1', 80), ('192.0.2.2', 80)],
|
||||
updated_members)
|
||||
|
||||
def test_create_batch_members(self):
|
||||
provider_dict = driver_utils.member_dict_to_provider_dict(rm)
|
||||
# Adjust for API response
|
||||
if rm['provisioning_status'] == 'PENDING_UPDATE':
|
||||
del provider_dict['name']
|
||||
del provider_dict['subnet_id']
|
||||
provider_updates.append(driver_dm.Member(**provider_dict))
|
||||
elif rm['provisioning_status'] == 'PENDING_CREATE':
|
||||
provider_dict['pool_id'] = self.pool_id
|
||||
provider_dict['name'] = None
|
||||
provider_creates.append(driver_dm.Member(**provider_dict))
|
||||
# Order matters here
|
||||
provider_creates += provider_updates
|
||||
|
||||
mock_provider.assert_called_once_with(u'noop_driver',
|
||||
mock_driver.member_batch_update,
|
||||
provider_creates)
|
||||
|
||||
@mock.patch('octavia.api.drivers.driver_factory.get_driver')
|
||||
@mock.patch('octavia.api.drivers.utils.call_provider')
|
||||
def test_create_batch_members(self, mock_provider, mock_get_driver):
|
||||
mock_driver = mock.MagicMock()
|
||||
mock_driver.name = 'noop_driver'
|
||||
mock_get_driver.return_value = mock_driver
|
||||
|
||||
member5 = {'address': '192.0.2.5', 'protocol_port': 80}
|
||||
member6 = {'address': '192.0.2.6', 'protocol_port': 80}
|
||||
|
||||
@ -526,18 +545,23 @@ class TestMember(base.BaseAPITest):
|
||||
]
|
||||
|
||||
member_ids = {}
|
||||
provider_members = []
|
||||
for rm in returned_members:
|
||||
self.assertIn(
|
||||
(rm['address'],
|
||||
rm['protocol_port'],
|
||||
rm['provisioning_status']), expected_members)
|
||||
member_ids[(rm['address'], rm['protocol_port'])] = rm['id']
|
||||
handler_args = self.handler_mock().member.batch_update.call_args[0]
|
||||
self.assertEqual(0, len(handler_args[0]))
|
||||
self.assertEqual(
|
||||
[member_ids[('192.0.2.5', 80)], member_ids[('192.0.2.6', 80)]],
|
||||
handler_args[1])
|
||||
self.assertEqual(0, len(handler_args[2]))
|
||||
|
||||
provider_dict = driver_utils.member_dict_to_provider_dict(rm)
|
||||
# Adjust for API response
|
||||
provider_dict['pool_id'] = self.pool_id
|
||||
provider_dict['name'] = None
|
||||
provider_members.append(driver_dm.Member(**provider_dict))
|
||||
|
||||
mock_provider.assert_called_once_with(u'noop_driver',
|
||||
mock_driver.member_batch_update,
|
||||
provider_members)
|
||||
|
||||
def test_create_batch_members_with_bad_subnet(self):
|
||||
subnet_id = uuidutils.generate_uuid()
|
||||
@ -557,7 +581,13 @@ class TestMember(base.BaseAPITest):
|
||||
err_msg = 'Subnet ' + subnet_id + ' not found.'
|
||||
self.assertEqual(response.get('faultstring'), err_msg)
|
||||
|
||||
def test_update_batch_members(self):
|
||||
@mock.patch('octavia.api.drivers.driver_factory.get_driver')
|
||||
@mock.patch('octavia.api.drivers.utils.call_provider')
|
||||
def test_update_batch_members(self, mock_provider, mock_get_driver):
|
||||
mock_driver = mock.MagicMock()
|
||||
mock_driver.name = 'noop_driver'
|
||||
mock_get_driver.return_value = mock_driver
|
||||
|
||||
member1 = {'address': '192.0.2.1', 'protocol_port': 80}
|
||||
member2 = {'address': '192.0.2.2', 'protocol_port': 80}
|
||||
members = [member1, member2]
|
||||
@ -565,6 +595,10 @@ class TestMember(base.BaseAPITest):
|
||||
self.create_member(pool_id=self.pool_id, **m)
|
||||
self.set_lb_status(self.lb_id)
|
||||
|
||||
# We are only concerned about the batch update, so clear out the
|
||||
# create members calls above.
|
||||
mock_provider.reset_mock()
|
||||
|
||||
req_dict = [member1, member2]
|
||||
body = {self.root_tag_list: req_dict}
|
||||
path = self.MEMBERS_PATH.format(pool_id=self.pool_id)
|
||||
@ -579,24 +613,31 @@ class TestMember(base.BaseAPITest):
|
||||
]
|
||||
|
||||
member_ids = {}
|
||||
provider_members = []
|
||||
for rm in returned_members:
|
||||
self.assertIn(
|
||||
(rm['address'],
|
||||
rm['protocol_port'],
|
||||
rm['provisioning_status']), expected_members)
|
||||
member_ids[(rm['address'], rm['protocol_port'])] = rm['id']
|
||||
handler_args = self.handler_mock().member.batch_update.call_args[0]
|
||||
self.assertEqual(0, len(handler_args[0]))
|
||||
self.assertEqual(0, len(handler_args[1]))
|
||||
self.assertEqual(2, len(handler_args[2]))
|
||||
updated_members = [
|
||||
(handler_args[2][0].address, handler_args[2][0].protocol_port),
|
||||
(handler_args[2][1].address, handler_args[2][1].protocol_port)
|
||||
]
|
||||
self.assertEqual([('192.0.2.1', 80), ('192.0.2.2', 80)],
|
||||
updated_members)
|
||||
|
||||
def test_delete_batch_members(self):
|
||||
provider_dict = driver_utils.member_dict_to_provider_dict(rm)
|
||||
# Adjust for API response
|
||||
del provider_dict['name']
|
||||
del provider_dict['subnet_id']
|
||||
provider_members.append(driver_dm.Member(**provider_dict))
|
||||
|
||||
mock_provider.assert_called_once_with(u'noop_driver',
|
||||
mock_driver.member_batch_update,
|
||||
provider_members)
|
||||
|
||||
@mock.patch('octavia.api.drivers.driver_factory.get_driver')
|
||||
@mock.patch('octavia.api.drivers.utils.call_provider')
|
||||
def test_delete_batch_members(self, mock_provider, mock_get_driver):
|
||||
mock_driver = mock.MagicMock()
|
||||
mock_driver.name = 'noop_driver'
|
||||
mock_get_driver.return_value = mock_driver
|
||||
|
||||
member3 = {'address': '192.0.2.3', 'protocol_port': 80}
|
||||
member4 = {'address': '192.0.2.4', 'protocol_port': 80}
|
||||
members = [member3, member4]
|
||||
@ -604,6 +645,10 @@ class TestMember(base.BaseAPITest):
|
||||
self.create_member(pool_id=self.pool_id, **m)
|
||||
self.set_lb_status(self.lb_id)
|
||||
|
||||
# We are only concerned about the batch update, so clear out the
|
||||
# create members calls above.
|
||||
mock_provider.reset_mock()
|
||||
|
||||
req_dict = []
|
||||
body = {self.root_tag_list: req_dict}
|
||||
path = self.MEMBERS_PATH.format(pool_id=self.pool_id)
|
||||
@ -618,18 +663,17 @@ class TestMember(base.BaseAPITest):
|
||||
]
|
||||
|
||||
member_ids = {}
|
||||
provider_members = []
|
||||
for rm in returned_members:
|
||||
self.assertIn(
|
||||
(rm['address'],
|
||||
rm['protocol_port'],
|
||||
rm['provisioning_status']), expected_members)
|
||||
member_ids[(rm['address'], rm['protocol_port'])] = rm['id']
|
||||
handler_args = self.handler_mock().member.batch_update.call_args[0]
|
||||
self.assertEqual(
|
||||
[member_ids[('192.0.2.3', 80)], member_ids[('192.0.2.4', 80)]],
|
||||
handler_args[0])
|
||||
self.assertEqual(0, len(handler_args[1]))
|
||||
self.assertEqual(0, len(handler_args[2]))
|
||||
|
||||
mock_provider.assert_called_once_with(u'noop_driver',
|
||||
mock_driver.member_batch_update,
|
||||
provider_members)
|
||||
|
||||
def test_create_with_attached_listener(self):
|
||||
api_member = self.create_member(
|
||||
@ -755,7 +799,7 @@ class TestMember(base.BaseAPITest):
|
||||
pool_prov_status=constants.PENDING_UPDATE,
|
||||
member_prov_status=constants.PENDING_UPDATE)
|
||||
self.set_lb_status(self.lb_id)
|
||||
self.assertEqual(old_name, response.get('name'))
|
||||
self.assertEqual(new_name, response.get('name'))
|
||||
self.assertEqual(api_member.get('created_at'),
|
||||
response.get('created_at'))
|
||||
self.assert_correct_status(
|
||||
@ -807,7 +851,7 @@ class TestMember(base.BaseAPITest):
|
||||
pool_prov_status=constants.PENDING_UPDATE,
|
||||
member_prov_status=constants.PENDING_UPDATE)
|
||||
self.set_lb_status(self.lb_id)
|
||||
self.assertEqual(old_name, response.get('name'))
|
||||
self.assertEqual(new_name, response.get('name'))
|
||||
self.assertEqual(api_member.get('created_at'),
|
||||
response.get('created_at'))
|
||||
self.assert_correct_status(
|
||||
@ -863,7 +907,7 @@ class TestMember(base.BaseAPITest):
|
||||
pool_prov_status=constants.PENDING_UPDATE,
|
||||
member_prov_status=constants.PENDING_UPDATE)
|
||||
self.set_lb_status(self.lb_id)
|
||||
self.assertEqual(old_name, response.get('name'))
|
||||
self.assertEqual(new_name, response.get('name'))
|
||||
self.assertEqual(api_member.get('created_at'),
|
||||
response.get('created_at'))
|
||||
self.assert_correct_status(
|
||||
@ -877,19 +921,20 @@ class TestMember(base.BaseAPITest):
|
||||
self.put(self.member_path.format(member_id=api_member.get('id')),
|
||||
self._build_body(new_member), status=400)
|
||||
|
||||
def test_update_with_bad_handler(self):
|
||||
@mock.patch('octavia.api.drivers.utils.call_provider')
|
||||
def test_update_with_bad_provider(self, mock_provider):
|
||||
api_member = self.create_member(
|
||||
self.pool_with_listener_id, '192.0.2.1', 80,
|
||||
name="member1").get(self.root_tag)
|
||||
self.set_lb_status(self.lb_id)
|
||||
new_member = {'name': "member2"}
|
||||
self.handler_mock().member.update.side_effect = Exception()
|
||||
self.put(self.member_path_listener.format(
|
||||
member_id=api_member.get('id')), self._build_body(new_member))
|
||||
self.assert_correct_status(
|
||||
lb_id=self.lb_id, listener_id=self.listener_id,
|
||||
pool_id=self.pool_with_listener_id, member_id=api_member.get('id'),
|
||||
member_prov_status=constants.ERROR)
|
||||
mock_provider.side_effect = exceptions.ProviderDriverError(
|
||||
prov='bad_driver', user_msg='broken')
|
||||
response = self.put(self.member_path_listener.format(
|
||||
member_id=api_member.get('id')), self._build_body(new_member),
|
||||
status=500)
|
||||
self.assertIn('Provider \'bad_driver\' reports error: broken',
|
||||
response.json.get('faultstring'))
|
||||
|
||||
def test_delete(self):
|
||||
api_member = self.create_member(
|
||||
@ -998,7 +1043,8 @@ class TestMember(base.BaseAPITest):
|
||||
self.delete(self.member_path.format(
|
||||
member_id=uuidutils.generate_uuid()), status=404)
|
||||
|
||||
def test_delete_with_bad_handler(self):
|
||||
@mock.patch('octavia.api.drivers.utils.call_provider')
|
||||
def test_delete_with_bad_provider(self, mock_provider):
|
||||
api_member = self.create_member(
|
||||
self.pool_with_listener_id, '192.0.2.1', 80).get(self.root_tag)
|
||||
self.set_lb_status(self.lb_id)
|
||||
@ -1009,16 +1055,11 @@ class TestMember(base.BaseAPITest):
|
||||
self.assertIsNone(api_member.pop('updated_at'))
|
||||
self.assertIsNotNone(member.pop('updated_at'))
|
||||
self.assertEqual(api_member, member)
|
||||
self.handler_mock().member.delete.side_effect = Exception()
|
||||
|
||||
mock_provider.side_effect = exceptions.ProviderDriverError(
|
||||
prov='bad_driver', user_msg='broken')
|
||||
self.delete(self.member_path_listener.format(
|
||||
member_id=api_member.get('id')))
|
||||
self.assert_correct_status(
|
||||
lb_id=self.lb_id, listener_id=self.listener_id,
|
||||
pool_id=self.pool_with_listener_id, member_id=member.get('id'),
|
||||
lb_prov_status=constants.ACTIVE,
|
||||
listener_prov_status=constants.ACTIVE,
|
||||
pool_prov_status=constants.ACTIVE,
|
||||
member_prov_status=constants.ERROR)
|
||||
member_id=api_member.get('id')), status=500)
|
||||
|
||||
def test_create_when_lb_pending_update(self):
|
||||
self.create_member(self.pool_id, address="192.0.2.2",
|
||||
|
Loading…
Reference in New Issue
Block a user