From 1a935b91188efb3b2280615adf4f08876a8c2828 Mon Sep 17 00:00:00 2001 From: Sindhu Devale Date: Fri, 2 Dec 2016 05:26:34 +0000 Subject: [PATCH] Introduce Octavia v2 API for pools Make API path for pools independent of Loadbalancers and Listeners. - /v2.0/lbaas/pools/ - /v2.0/pools/ GET all - /pools/ GET one - /pools/ POST - /pools/ {} PUT - /pools/ {} DELETE - /pools/ Co-Authored-By: Shashank Kumar Shankar Co-Authored-By: Adam Harwell Partially-Implements: #1616641 Change-Id: I7679cc7b3f559db774a9d036580177cf1aa7e693 --- octavia/api/v2/controllers/__init__.py | 15 +- octavia/api/v2/controllers/base.py | 39 + octavia/api/v2/controllers/pool.py | 278 +++++++ octavia/api/v2/types/pool.py | 160 ++++ octavia/db/api.py | 15 + octavia/db/prepare.py | 7 +- octavia/tests/functional/api/v2/base.py | 41 +- .../tests/functional/api/v2/test_listener.py | 106 ++- .../functional/api/v2/test_load_balancer.py | 6 +- octavia/tests/functional/api/v2/test_pool.py | 751 ++++++++++++++++++ octavia/tests/unit/api/v2/types/test_pools.py | 156 ++++ 11 files changed, 1492 insertions(+), 82 deletions(-) create mode 100644 octavia/api/v2/controllers/pool.py create mode 100644 octavia/api/v2/types/pool.py create mode 100644 octavia/tests/functional/api/v2/test_pool.py create mode 100644 octavia/tests/unit/api/v2/types/test_pools.py diff --git a/octavia/api/v2/controllers/__init__.py b/octavia/api/v2/controllers/__init__.py index e02efcf90a..64e30fc549 100644 --- a/octavia/api/v2/controllers/__init__.py +++ b/octavia/api/v2/controllers/__init__.py @@ -18,27 +18,18 @@ from wsmeext import pecan as wsme_pecan from octavia.api.v2.controllers import base from octavia.api.v2.controllers import listener from octavia.api.v2.controllers import load_balancer +from octavia.api.v2.controllers import pool class BaseV2Controller(base.BaseController): loadbalancers = load_balancer.LoadBalancersController() listeners = listener.ListenersController() + pools = pool.PoolsController() @wsme_pecan.wsexpose(wtypes.text) def get(self): return "v2.0" -class LBaaSController(BaseV2Controller): - """Expose /lbaas/ endpoint for the v2.0 controller. - - Provides backwards compatibility with LBaaSV2 - - To be removed once LBaasV2 has been removed. - - """ - pass - - class V2Controller(BaseV2Controller): - lbaas = LBaaSController() + lbaas = BaseV2Controller() diff --git a/octavia/api/v2/controllers/base.py b/octavia/api/v2/controllers/base.py index 43f31086d8..cb1ad55eb6 100644 --- a/octavia/api/v2/controllers/base.py +++ b/octavia/api/v2/controllers/base.py @@ -102,3 +102,42 @@ class BaseController(rest.RestController): lb = self._get_db_obj(session, self.repositories.load_balancer, data_models.LoadBalancer, id) return lb.project_id + + def _get_default_quotas(self, project_id): + """Gets the project's default quotas.""" + quotas = data_models.Quotas( + project_id=project_id, + load_balancer=CONF.quotas.default_load_balancer_quota, + listener=CONF.quotas.default_listener_quota, + pool=CONF.quotas.default_pool_quota, + health_monitor=CONF.quotas.default_health_monitor_quota, + member=CONF.quotas.default_member_quota) + return quotas + + def _get_db_quotas(self, session, project_id): + """Gets the project's quotas from the database, or responds with the + + default quotas. + """ + # At this point project_id should not ever be None or Unset + db_quotas = self.repositories.quotas.get( + session, project_id=project_id) + if not db_quotas: + LOG.debug("No custom quotas for project %s. Returning " + "defaults...", project_id) + db_quotas = self._get_default_quotas(project_id=project_id) + else: + # Fill in any that are using the configured defaults + if db_quotas.load_balancer is None: + db_quotas.load_balancer = (CONF.quotas. + default_load_balancer_quota) + if db_quotas.listener is None: + db_quotas.listener = CONF.quotas.default_listener_quota + if db_quotas.pool is None: + db_quotas.pool = CONF.quotas.default_pool_quota + if db_quotas.health_monitor is None: + db_quotas.health_monitor = (CONF.quotas. + default_health_monitor_quota) + if db_quotas.member is None: + db_quotas.member = CONF.quotas.default_member_quota + return db_quotas diff --git a/octavia/api/v2/controllers/pool.py b/octavia/api/v2/controllers/pool.py new file mode 100644 index 0000000000..052929f6c4 --- /dev/null +++ b/octavia/api/v2/controllers/pool.py @@ -0,0 +1,278 @@ +# Copyright 2014 Rackspace +# Copyright 2016 Blue Box, an IBM Company +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import logging + +from oslo_db import exception as odb_exceptions +from oslo_utils import excutils +import pecan +from wsme import types as wtypes +from wsmeext import pecan as wsme_pecan + + +from octavia.api.v1.controllers import health_monitor +from octavia.api.v1.controllers import member +from octavia.api.v2.controllers import base +from octavia.api.v2.types import pool as pool_types +from octavia.common import constants +from octavia.common import data_models +from octavia.common import exceptions +from octavia.db import api as db_api +from octavia.db import prepare as db_prepare +from octavia.i18n import _LI + + +LOG = logging.getLogger(__name__) + + +class PoolsController(base.BaseController): + + def __init__(self): + super(PoolsController, self).__init__() + self.handler = self.handler.pool + + @wsme_pecan.wsexpose(pool_types.PoolRootResponse, wtypes.text) + def get(self, id): + """Gets a pool's details.""" + context = pecan.request.context.get('octavia_context') + db_pool = self._get_db_pool(context.session, id) + result = self._convert_db_to_type(db_pool, pool_types.PoolResponse) + return pool_types.PoolRootResponse(pool=result) + + @wsme_pecan.wsexpose(pool_types.PoolsRootResponse, wtypes.text) + def get_all(self): + """Lists all pools.""" + context = pecan.request.context.get('octavia_context') + pools = self.repositories.pool.get_all(context.session) + result = self._convert_db_to_type(pools, [pool_types.PoolResponse]) + return pool_types.PoolsRootResponse(pools=result) + + def _get_affected_listener_ids(self, pool): + """Gets a list of all listeners this request potentially affects.""" + listener_ids = [l.id for l in pool.listeners] + return listener_ids + + def _test_lb_and_listener_statuses(self, session, lb_id, listener_ids): + """Verify load balancer is in a mutable state.""" + # We need to verify that any listeners referencing this pool are also + # mutable + if not self.repositories.test_and_set_lb_and_listeners_prov_status( + session, lb_id, + constants.PENDING_UPDATE, constants.PENDING_UPDATE, + listener_ids=listener_ids): + LOG.info(_LI("Pool cannot be created or modified because the Load " + "Balancer is in an immutable state")) + raise exceptions.ImmutableObject(resource=_('Load Balancer'), + id=lb_id) + + def _reset_lb_and_listener_statuses(self, session, lb_id, listener_ids): + # Setting LB + listeners back to active because this should be a + # recoverable error + self.repositories.load_balancer.update( + session, lb_id, + provisioning_status=constants.ACTIVE) + for listener in listener_ids: + self.repositories.listener.update( + session, listener, + provisioning_status=constants.ACTIVE) + + def _validate_create_pool(self, lock_session, pool_dict, listener_id=None): + """Validate creating pool on load balancer. + + Update database for load balancer and (optional) listener based on + provisioning status. + """ + try: + return self.repositories.create_pool_on_load_balancer( + lock_session, pool_dict, + listener_id=listener_id) + except odb_exceptions.DBDuplicateEntry as de: + if ['id'] == de.columns: + raise exceptions.IDAlreadyExists() + except odb_exceptions.DBError: + # TODO(blogan): will have to do separate validation protocol + # before creation or update since the exception messages + # do not give any information as to what constraint failed + raise exceptions.InvalidOption(value='', option='') + + def _send_pool_to_handler(self, session, db_pool, listener_id): + try: + LOG.info(_LI("Sending Creation of Pool %s to handler"), + db_pool.id) + self.handler.create(db_pool) + except Exception: + with (excutils.save_and_reraise_exception(reraise=False) and + db_api.get_lock_session()): + self._reset_lb_and_listener_statuses( + session, lb_id=db_pool.load_balancer_id, + listener_ids=[listener_id] if listener_id else []) + # Pool now goes to ERROR + self.repositories.pool.update( + session, db_pool.id, + provisioning_status=constants.ERROR) + db_pool = self._get_db_pool(session, db_pool.id) + result = self._convert_db_to_type(db_pool, pool_types.PoolResponse) + return pool_types.PoolRootResponse(pool=result) + + @wsme_pecan.wsexpose(pool_types.PoolRootResponse, + body=pool_types.PoolRootPOST, status_code=201) + def post(self, pool_): + """Creates a pool on a load balancer or listener. + + Note that this can optionally take a listener_id with which the pool + should be associated as the listener's default_pool. If specified, + the pool creation will fail if the listener specified already has + a default_pool. + """ + # For some API requests the listener_id will be passed in the + # pool_dict: + pool = pool_.pool + context = pecan.request.context.get('octavia_context') + + if pool.loadbalancer_id: + pool.project_id = self._get_lb_project_id(context.session, + pool.loadbalancer_id) + elif pool.listener_id: + listener = self.repositories.listener.get( + context.session, id=pool.listener_id) + pool.project_id = listener.project_id + pool.loadbalancer_id = listener.load_balancer_id + else: + msg = _("Must provide at least one of: " + "loadbalancer_id, listener_id") + raise exceptions.ValidationException(details=msg) + + lock_session = db_api.get_session(autocommit=False) + if self.repositories.check_quota_met( + context.session, + lock_session, + data_models.Pool, + pool.project_id): + lock_session.rollback() + raise exceptions.QuotaException + + listener_repo = self.repositories.listener + pool_dict = db_prepare.create_pool( + pool.to_dict(render_unsets=True)) + + listener_id = pool_dict.pop('listener_id', None) + if listener_id: + if listener_repo.has_default_pool(lock_session, + listener_id): + raise exceptions.DuplicatePoolEntry() + + try: + self._test_lb_and_listener_statuses( + lock_session, lb_id=pool_dict['load_balancer_id'], + listener_ids=[listener_id] if listener_id else []) + + db_pool = self._validate_create_pool( + lock_session, pool_dict, listener_id) + lock_session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + lock_session.rollback() + + return self._send_pool_to_handler(context.session, db_pool, + listener_id=listener_id) + + @wsme_pecan.wsexpose(pool_types.PoolRootResponse, wtypes.text, + body=pool_types.PoolRootPut, status_code=200) + def put(self, id, pool_): + """Updates a pool on a load balancer.""" + pool = pool_.pool + context = pecan.request.context.get('octavia_context') + db_pool = self._get_db_pool(context.session, id) + self._test_lb_and_listener_statuses( + context.session, lb_id=db_pool.load_balancer_id, + listener_ids=self._get_affected_listener_ids(db_pool)) + self.repositories.pool.update( + context.session, db_pool.id, + provisioning_status=constants.PENDING_UPDATE) + try: + LOG.info(_LI("Sending Update of Pool %s to handler"), id) + self.handler.update(db_pool, pool) + except Exception: + with (excutils.save_and_reraise_exception(reraise=False) and + db_api.get_lock_session()): + self._reset_lb_and_listener_statuses( + context.session, lb_id=db_pool.load_balancer_id, + listener_ids=self._get_affected_listener_ids(db_pool)) + # Pool now goes to ERROR + self.repositories.pool.update( + context.session, db_pool.id, + provisioning_status=constants.ERROR) + db_pool = self._get_db_pool(context.session, id) + result = self._convert_db_to_type(db_pool, pool_types.PoolResponse) + return pool_types.PoolRootResponse(pool=result) + + @wsme_pecan.wsexpose(None, wtypes.text, status_code=204) + def delete(self, id): + """Deletes a pool from a load balancer.""" + context = pecan.request.context.get('octavia_context') + db_pool = self._get_db_pool(context.session, id) + if len(db_pool.l7policies) > 0: + raise exceptions.PoolInUseByL7Policy( + id=db_pool.id, l7policy_id=db_pool.l7policies[0].id) + self._test_lb_and_listener_statuses( + context.session, lb_id=db_pool.load_balancer_id, + listener_ids=self._get_affected_listener_ids(db_pool)) + self.repositories.pool.update( + context.session, db_pool.id, + provisioning_status=constants.PENDING_DELETE) + + try: + LOG.info(_LI("Sending Deletion of Pool %s to handler"), + db_pool.id) + self.handler.delete(db_pool) + except Exception: + with (excutils.save_and_reraise_exception(reraise=False) and + db_api.get_lock_session()): + self._reset_lb_and_listener_statuses( + context.session, lb_id=db_pool.load_balancer_id, + listener_ids=self._get_affected_listener_ids(db_pool)) + # Pool now goes to ERROR + self.repositories.pool.update( + context.session, db_pool.id, + provisioning_status=constants.ERROR) + db_pool = self.repositories.pool.get(context.session, id=db_pool.id) + result = self._convert_db_to_type(db_pool, pool_types.PoolResponse) + return pool_types.PoolRootResponse(pool=result) + + @pecan.expose() + def _lookup(self, pool_id, *remainder): + """Overridden pecan _lookup method for custom routing. + + Verifies that the pool passed in the url exists, and if so decides + which controller, if any, should control be passed. + """ + context = pecan.request.context.get('octavia_context') + if pool_id and len(remainder) and (remainder[0] == 'members' or + remainder[0] == 'healthmonitor'): + controller = remainder[0] + remainder = remainder[1:] + db_pool = self.repositories.pool.get(context.session, id=pool_id) + if not db_pool: + LOG.info(_LI("Pool %s not found."), pool_id) + raise exceptions.NotFound(resource=data_models.Pool._name(), + id=pool_id) + if controller == 'members': + return member.MembersController( + load_balancer_id=db_pool.load_balancer_id, + pool_id=db_pool.id), remainder + elif controller == 'healthmonitor': + return health_monitor.HealthMonitorController( + load_balancer_id=db_pool.load_balancer_id, + pool_id=db_pool.id), remainder diff --git a/octavia/api/v2/types/pool.py b/octavia/api/v2/types/pool.py new file mode 100644 index 0000000000..73f4ff9b7d --- /dev/null +++ b/octavia/api/v2/types/pool.py @@ -0,0 +1,160 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from wsme import types as wtypes + +from octavia.api.common import types +from octavia.api.v1.types import health_monitor +from octavia.api.v1.types import member +from octavia.common import constants + + +class SessionPersistenceResponse(types.BaseType): + """Defines which attributes are to be shown on any response.""" + type = wtypes.wsattr(wtypes.text) + cookie_name = wtypes.wsattr(wtypes.text) + + +class SessionPersistencePOST(types.BaseType): + """Defines mandatory and optional attributes of a POST request.""" + type = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_SP_TYPES), + mandatory=True) + cookie_name = wtypes.wsattr(wtypes.text) + + +class SessionPersistencePUT(types.BaseType): + """Defines attributes that are acceptable of a PUT request.""" + type = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_SP_TYPES)) + cookie_name = wtypes.wsattr(wtypes.text) + + +class BasePoolType(types.BaseType): + _type_to_model_map = {'admin_state_up': 'enabled'} + + +class MinimalLoadBalancer(types.BaseType): + id = wtypes.wsattr(wtypes.UuidType()) + + +class MinimalListener(types.BaseType): + id = wtypes.wsattr(wtypes.UuidType()) + + +class PoolResponse(BasePoolType): + """Defines which attributes are to be shown on any response.""" + id = wtypes.wsattr(wtypes.UuidType()) + name = wtypes.wsattr(wtypes.StringType()) + description = wtypes.wsattr(wtypes.StringType()) + provisioning_status = wtypes.wsattr(wtypes.StringType()) + operating_status = wtypes.wsattr(wtypes.StringType()) + admin_state_up = wtypes.wsattr(bool) + protocol = wtypes.wsattr(wtypes.text) + lb_algorithm = wtypes.wsattr(wtypes.text) + session_persistence = wtypes.wsattr(SessionPersistenceResponse) + # TODO(johnsom) Remove after deprecation (R series) + project_id = wtypes.wsattr(wtypes.StringType()) + # TODO(johnsom) Remove after deprecation (R series) + tenant_id = wtypes.wsattr(wtypes.StringType()) + loadbalancers = wtypes.wsattr([MinimalLoadBalancer]) + listeners = wtypes.wsattr([MinimalListener]) + created_at = wtypes.wsattr(wtypes.datetime.datetime) + updated_at = wtypes.wsattr(wtypes.datetime.datetime) + health_monitor = wtypes.wsattr(health_monitor.HealthMonitorResponse) + health_monitor_id = wtypes.wsattr(wtypes.UuidType()) + members = wtypes.wsattr([member.MemberResponse]) + + @classmethod + def from_data_model(cls, data_model, children=False): + pool = super(PoolResponse, cls).from_data_model( + data_model, children=children) + pool.tenant_id = pool.project_id + if data_model.session_persistence: + pool.session_persistence = ( + SessionPersistenceResponse.from_data_model( + data_model.session_persistence)) + if data_model.load_balancer: + pool.loadbalancers = ( + [MinimalLoadBalancer.from_data_model( + data_model.load_balancer)]) + if data_model.listeners: + pool.listeners = ( + [MinimalListener.from_data_model(i) + for i in data_model.listeners]) + else: + pool.listeners = [] + if not children: + # NOTE(blogan): do not show members or health_monitor if the + # request does not want to see children + del pool.members + del pool.health_monitor + return pool + pool.members = [ + member.MemberResponse.from_data_model(member_dm, children=children) + for member_dm in data_model.members + ] + if data_model.health_monitor: + pool.health_monitor = ( + health_monitor.HealthMonitorResponse.from_data_model( + data_model.health_monitor, children=children)) + pool.health_monitor_id = pool.health_monitor.id + if not pool.health_monitor: + del pool.health_monitor + return pool + + +class PoolRootResponse(types.BaseType): + pool = wtypes.wsattr(PoolResponse) + + +class PoolsRootResponse(types.BaseType): + pools = wtypes.wsattr([PoolResponse]) + + +class PoolPOST(BasePoolType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + admin_state_up = wtypes.wsattr(bool, default=True) + listener_id = wtypes.wsattr(wtypes.UuidType()) + loadbalancer_id = wtypes.wsattr(wtypes.UuidType()) + protocol = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_PROTOCOLS), + mandatory=True) + lb_algorithm = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS), + mandatory=True) + session_persistence = wtypes.wsattr(SessionPersistencePOST) + # TODO(johnsom) Remove after deprecation (R series) + project_id = wtypes.wsattr(wtypes.StringType(max_length=36)) + # TODO(johnsom) Remove after deprecation (R series) + tenant_id = wtypes.wsattr(wtypes.StringType(max_length=36)) + health_monitor = wtypes.wsattr(health_monitor.HealthMonitorPOST) + members = wtypes.wsattr([member.MemberPOST]) + + +class PoolRootPOST(types.BaseType): + pool = wtypes.wsattr(PoolPOST) + + +class PoolPUT(BasePoolType): + """Defines attributes that are acceptable of a PUT request.""" + name = wtypes.wsattr(wtypes.StringType()) + description = wtypes.wsattr(wtypes.StringType()) + admin_state_up = wtypes.wsattr(bool) + lb_algorithm = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS)) + session_persistence = wtypes.wsattr(SessionPersistencePUT) + + +class PoolRootPut(types.BaseType): + pool = wtypes.wsattr(PoolPUT) diff --git a/octavia/db/api.py b/octavia/db/api.py index b4f99ec988..b95535b08f 100644 --- a/octavia/db/api.py +++ b/octavia/db/api.py @@ -12,8 +12,11 @@ # License for the specific language governing permissions and limitations # under the License. +import contextlib + from oslo_config import cfg from oslo_db.sqlalchemy import session as db_session +from oslo_utils import excutils _FACADE = None @@ -35,3 +38,15 @@ def get_session(expire_on_commit=True, autocommit=True): facade = _create_facade_lazily() return facade.get_session(expire_on_commit=expire_on_commit, autocommit=autocommit) + + +@contextlib.contextmanager +def get_lock_session(): + """Context manager for using a locking (not auto-commit) session.""" + lock_session = get_session(autocommit=False) + try: + yield + lock_session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + lock_session.rollback() diff --git a/octavia/db/prepare.py b/octavia/db/prepare.py index f0b20a4164..a3ee9cccfb 100644 --- a/octavia/db/prepare.py +++ b/octavia/db/prepare.py @@ -70,7 +70,7 @@ def create_load_balancer(lb_dict): def create_listener(listener_dict, lb_id): if not listener_dict.get('id'): listener_dict['id'] = uuidutils.generate_uuid() - if 'loadbalancer_id' in listener_dict.keys(): + if 'loadbalancer_id' in listener_dict: listener_dict['load_balancer_id'] = listener_dict.pop( 'loadbalancer_id') else: @@ -136,7 +136,9 @@ def create_l7rule(l7rule_dict, l7policy_id): def create_pool(pool_dict, lb_id=None): if not pool_dict.get('id'): pool_dict['id'] = uuidutils.generate_uuid() - if lb_id: + if 'loadbalancer_id' in pool_dict: + pool_dict['load_balancer_id'] = pool_dict.pop('loadbalancer_id') + else: pool_dict['load_balancer_id'] = lb_id if pool_dict.get('session_persistence'): pool_dict['session_persistence']['pool_id'] = pool_dict.get('id') @@ -146,6 +148,7 @@ def create_pool(pool_dict, lb_id=None): prepped_members = [] for member_dict in pool_dict.get('members'): prepped_members.append(create_member(member_dict, pool_dict['id'])) + pool_dict['provisioning_status'] = constants.PENDING_CREATE pool_dict['operating_status'] = constants.OFFLINE return pool_dict diff --git a/octavia/tests/functional/api/v2/base.py b/octavia/tests/functional/api/v2/base.py index bf8ef35ce4..f05c316c92 100644 --- a/octavia/tests/functional/api/v2/base.py +++ b/octavia/tests/functional/api/v2/base.py @@ -74,8 +74,16 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase): patcher = mock.patch('octavia.api.handlers.controller_simulator.' 'handler.SimulatedControllerHandler') self.handler_mock = patcher.start() - self.addCleanup(self.handler_mock.stop) + self.check_quota_met_true_mock = mock.patch( + 'octavia.db.repositories.Repositories.check_quota_met', + return_value=True) self.app = self._make_app() + # For no apparent reason, the controller code for v2 uses a static + # handler mock (the one generated on the initial run) so we need to + # retrieve it so we use the "correct" mock instead of the one above + self.handler_mock_bug_workaround = getattr( + self.app.app.application.application.application.root, + 'v2.0').handler self.project_id = uuidutils.generate_uuid() def reset_pecan(): @@ -91,6 +99,9 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase): def _get_full_path(self, path): return ''.join([self.BASE_PATH, path]) + def _build_body(self, json): + return {self.root_tag: json} + def delete(self, path, headers=None, status=204, expect_errors=False): headers = headers or {} full_path = self._get_full_path(path) @@ -176,7 +187,7 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase): def create_pool_with_listener(self, lb_id, listener_id, protocol, lb_algorithm, **optionals): - req_dict = {'load_balancer_id': lb_id, 'listener_id': listener_id, + req_dict = {'loadbalancer_id': lb_id, 'listener_id': listener_id, 'protocol': protocol, 'lb_algorithm': lb_algorithm} req_dict.update(optionals) body = {'pool': req_dict} @@ -185,7 +196,7 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase): return response.json def create_pool(self, lb_id, protocol, lb_algorithm, **optionals): - req_dict = {'load_balancer_id': lb_id, 'protocol': protocol, + req_dict = {'loadbalancer_id': lb_id, 'protocol': protocol, 'lb_algorithm': lb_algorithm} req_dict.update(optionals) body = {'pool': req_dict} @@ -267,15 +278,18 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase): lb_listeners = self.listener_repo.get_all(db_api.get_session(), load_balancer_id=lb_id) for listener in lb_listeners: - for pool in listener.pools: - self.pool_repo.update(db_api.get_session(), pool.id, - operating_status=op_status) - for member in pool.members: - self.member_repo.update(db_api.get_session(), member.id, - operating_status=op_status) self.listener_repo.update(db_api.get_session(), listener.id, provisioning_status=prov_status, operating_status=op_status) + lb_pools = self.pool_repo.get_all(db_api.get_session(), + load_balancer_id=lb_id) + for pool in lb_pools: + self.pool_repo.update(db_api.get_session(), pool.id, + provisioning_status=prov_status, + operating_status=op_status) + for member in pool.members: + self.member_repo.update(db_api.get_session(), member.id, + operating_status=op_status) def set_lb_status(self, lb_id, status=constants.ACTIVE): if status == constants.DELETED: @@ -326,3 +340,12 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase): api_listener.get('provisioning_status')) self.assertEqual(operating_status, api_listener.get('operating_status')) + + def assert_correct_pool_status(self, provisioning_status, + operating_status, pool_id): + api_pool = self.get(self.POOL_PATH.format( + pool_id=pool_id)).json.get('pool') + self.assertEqual(provisioning_status, + api_pool.get('provisioning_status')) + self.assertEqual(operating_status, + api_pool.get('operating_status')) diff --git a/octavia/tests/functional/api/v2/test_listener.py b/octavia/tests/functional/api/v2/test_listener.py index 3ab4454e23..0b4d2b7ff7 100644 --- a/octavia/tests/functional/api/v2/test_listener.py +++ b/octavia/tests/functional/api/v2/test_listener.py @@ -21,8 +21,6 @@ from octavia.common import constants import octavia.common.context from octavia.tests.functional.api.v2 import base -import testtools - class TestListener(base.BaseAPITest): @@ -35,13 +33,10 @@ class TestListener(base.BaseAPITest): self.set_lb_status(self.lb['loadbalancer']['id']) self.listeners_path = self.LISTENERS_PATH self.listener_path = self.LISTENERS_PATH + '/{listener_id}' - # self.pool = self.create_pool( - # self.lb['loadbalancer']['id'], constants.PROTOCOL_HTTP, - # constants.LB_ALGORITHM_ROUND_ROBIN, 'pool') - # self.set_lb_status(self.lb['loadbalancer']['id']) - - def _build_body(self, json): - return {self.root_tag: json} + self.pool = self.create_pool( + self.lb['loadbalancer']['id'], constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN) + self.set_lb_status(self.lb['loadbalancer']['id']) def test_get_all_admin(self): project_id = uuidutils.generate_uuid() @@ -211,18 +206,19 @@ class TestListener(base.BaseAPITest): self.lb['loadbalancer']['id'], status=409) - @testtools.skip('Skip until complete v2 merge') def test_create_with_default_pool_id(self): lb_listener = {'name': 'listener1', - 'default_pool_id': self.pool.get('id'), + 'default_pool_id': self.pool['pool']['id'], 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_HTTP, - 'protocol_port': 80} - response = self.post(self.listeners_path, lb_listener) - api_listener = response.json + 'protocol_port': 80, + 'loadbalancer_id': self.lb['loadbalancer']['id']} + body = self._build_body(lb_listener) + response = self.post(self.listeners_path, body) + api_listener = response.json['listener'] self.assertEqual(api_listener.get('default_pool_id'), - self.pool.get('id')) + self.pool['pool']['id']) def test_create_with_bad_default_pool_id(self): lb_listener = {'name': 'listener1', @@ -235,26 +231,27 @@ class TestListener(base.BaseAPITest): body = self._build_body(lb_listener) self.post(self.listeners_path, body, status=404) - @testtools.skip('Skip until complete v2 merge') def test_create_with_shared_default_pool_id(self): lb_listener1 = {'name': 'listener1', - 'default_pool_id': self.pool.get('id'), + 'default_pool_id': self.pool['pool']['id'], 'description': 'desc1', 'admin_state_up': False, 'protocol': constants.PROTOCOL_HTTP, - 'protocol_port': 80} + 'protocol_port': 80, + 'loadbalancer_id': self.lb['loadbalancer']['id']} lb_listener2 = {'name': 'listener2', - 'default_pool_id': self.pool.get('id'), + 'default_pool_id': self.pool['pool']['id'], 'description': 'desc2', 'admin_state_up': False, 'protocol': constants.PROTOCOL_HTTP, - 'protocol_port': 81} + 'protocol_port': 81, + 'loadbalancer_id': self.lb['loadbalancer']['id']} body1 = self._build_body(lb_listener1) body2 = self._build_body(lb_listener2) - listener1 = self.post(self.listeners_path, body1).json - self.set_lb_status(self.lb.get('id'), constants.ACTIVE) - listener2 = self.post(self.listeners_path, body2).json - self.assertEqual(listener1['default_pool_id'], self.pool.get('id')) + listener1 = self.post(self.listeners_path, body1).json['listener'] + self.set_lb_status(self.lb['loadbalancer']['id'], constants.ACTIVE) + listener2 = self.post(self.listeners_path, body2).json['listener'] + self.assertEqual(listener1['default_pool_id'], self.pool['pool']['id']) self.assertEqual(listener1['default_pool_id'], listener2['default_pool_id']) @@ -290,59 +287,60 @@ class TestListener(base.BaseAPITest): self.assert_final_listener_statuses(self.lb['loadbalancer']['id'], listener_api['id']) - @testtools.skip('Skip until complete v2 merge') def test_update(self): tls_uuid = uuidutils.generate_uuid() - listener = self.create_listener(self.lb['loadbalancer']['id'], - constants.PROTOCOL_TCP, 80, - name='listener1', description='desc1', - enabled=False, connection_limit=10, - default_tls_container_ref=tls_uuid, - default_pool_id=None) + listener = self.create_listener( + constants.PROTOCOL_TCP, 80, self.lb['loadbalancer']['id'], + name='listener1', description='desc1', + admin_state_up=False, connection_limit=10, + default_tls_container_ref=tls_uuid, + default_pool_id=None) self.set_lb_status(self.lb['loadbalancer']['id']) new_listener = {'name': 'listener2', 'admin_state_up': True, - 'default_pool_id': self.pool.get('id')} - listener_path = self.LISTENER_PATH.format(listener_id=listener['id']) - api_listener = self.put(listener_path, new_listener).json + 'default_pool_id': self.pool['pool']['id']} + body = self._build_body(new_listener) + listener_path = self.LISTENER_PATH.format( + listener_id=listener['listener']['id']) + api_listener = self.put(listener_path, body).json update_expect = {'name': 'listener2', 'admin_state_up': True, - 'default_pool_id': self.pool.get('id'), + 'default_pool_id': self.pool['pool']['id'], 'provisioning_status': constants.PENDING_UPDATE, 'operating_status': constants.ONLINE} listener.update(update_expect) - self.assertEqual(listener.pop('created_at'), - api_listener.pop('created_at')) - self.assertNotEqual(listener.pop('updated_at'), - api_listener.pop('updated_at')) + self.assertEqual(listener['listener']['created_at'], + api_listener['listener']['created_at']) + self.assertNotEqual(listener['listener']['updated_at'], + api_listener['listener']['updated_at']) self.assertNotEqual(listener, api_listener) - self.assert_correct_lb_status(self.lb.get('id'), + self.assert_correct_lb_status(self.lb['loadbalancer']['id'], constants.PENDING_UPDATE, constants.ONLINE) - self.assert_final_listener_statuses(self.lb.get('id'), - api_listener.get('id')) + self.assert_final_listener_statuses(self.lb['loadbalancer']['id'], + api_listener['listener']['id']) def test_update_bad_listener_id(self): self.put(self.listener_path.format(listener_id='SEAN-CONNERY'), body={}, status=404) - @testtools.skip('Skip until complete v2 merge') def test_update_with_bad_default_pool_id(self): bad_pool_uuid = uuidutils.generate_uuid() - listener = self.create_listener(self.lb.get('id'), - constants.PROTOCOL_TCP, 80, - name='listener1', description='desc1', - enabled=False, connection_limit=10, - default_pool_id=self.pool.get('id')) - self.set_lb_status(self.lb.get('id')) + listener = self.create_listener( + constants.PROTOCOL_TCP, 80, self.lb['loadbalancer']['id'], + name='listener1', description='desc1', + admin_state_up=False, connection_limit=10, + default_pool_id=self.pool['pool']['id']) + self.set_lb_status(self.lb['loadbalancer']['id']) new_listener = {'name': 'listener2', 'admin_state_up': True, 'default_pool_id': bad_pool_uuid} + body = self._build_body(new_listener) listener_path = self.LISTENER_PATH.format( - lb_id=self.lb.get('id'), listener_id=listener.get('id')) - self.put(listener_path, new_listener, status=404) - self.assert_correct_lb_status(self.lb.get('id'), + listener_id=listener['listener']['id']) + self.put(listener_path, body, status=404) + self.assert_correct_lb_status(self.lb['loadbalancer']['id'], constants.ACTIVE, constants.ONLINE) - self.assert_final_listener_statuses(self.lb.get('id'), - listener.get('id')) + self.assert_final_listener_statuses(self.lb['loadbalancer']['id'], + listener['listener']['id']) def test_create_listeners_same_port(self): listener1 = self.create_listener(constants.PROTOCOL_TCP, 80, diff --git a/octavia/tests/functional/api/v2/test_load_balancer.py b/octavia/tests/functional/api/v2/test_load_balancer.py index d6a7eb4af0..d74def0de1 100644 --- a/octavia/tests/functional/api/v2/test_load_balancer.py +++ b/octavia/tests/functional/api/v2/test_load_balancer.py @@ -52,9 +52,6 @@ class TestLoadBalancer(base.BaseAPITest): self.assertEqual(value, req.get(key)) self.assert_final_lb_statuses(resp.get('id')) - def _build_body(self, json): - return {self.root_tag: json} - def test_empty_list(self): response = self.get(self.LBS_PATH) api_list = response.json.get(self.root_tag_list) @@ -415,8 +412,7 @@ class TestLoadBalancer(base.BaseAPITest): lb_json = self._build_body({'name': 'Steve'}) lb_dict = lb.get(self.root_tag) lb = self.set_lb_status(lb_dict.get('id')) - self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json, - status=200) + self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')), lb_json) self.delete(self.LB_PATH.format(lb_id=lb_dict.get('id')), status=409) def test_delete_with_error_status(self): diff --git a/octavia/tests/functional/api/v2/test_pool.py b/octavia/tests/functional/api/v2/test_pool.py new file mode 100644 index 0000000000..35b9c04ca9 --- /dev/null +++ b/octavia/tests/functional/api/v2/test_pool.py @@ -0,0 +1,751 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils + +from octavia.common import constants +from octavia.tests.functional.api.v2 import base + +import testtools + + +class TestPool(base.BaseAPITest): + + root_tag = 'pool' + root_tag_list = 'pools' + root_tag_links = 'pools_links' + + def setUp(self): + super(TestPool, self).setUp() + + self.lb = self.create_load_balancer( + uuidutils.generate_uuid()).get('loadbalancer') + self.lb_id = self.lb['id'] + + self.set_lb_status(self.lb_id) + + self.listener = self.create_listener( + constants.PROTOCOL_HTTP, 80, + self.lb_id).get('listener') + self.listener_id = self.listener['id'] + + self.set_lb_status(self.lb_id) + + def test_get(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + pool = api_pool.get('pool') + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + pool['provisioning_status'] = constants.ACTIVE + pool['operating_status'] = constants.ONLINE + pool.pop('updated_at') + self.set_lb_status(lb_id=self.lb_id) + response = self.get(self.POOL_PATH.format(pool_id=pool.get('id'))).json + response_body = response.get('pool') + response_body.pop('updated_at') + self.assertEqual(pool, response_body) + + def test_bad_get(self): + self.get(self.POOL_PATH.format(pool_id=uuidutils.generate_uuid()), + status=404) + + def test_get_all(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + pool = api_pool.get('pool') + self.set_lb_status(lb_id=self.lb_id) + response = self.get(self.POOLS_PATH).json + response_body = response.get('pools') + self.assertIsInstance(response_body, list) + self.assertEqual(1, len(response_body)) + self.assertEqual( + pool.get('id'), response_body[0].get('id')) + + def test_get_all_with_listener(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + pool = api_pool.get('pool') + self.set_lb_status(lb_id=self.lb_id) + response = self.get(self.POOLS_PATH).json + response_body = response.get('pools') + self.assertIsInstance(response_body, list) + self.assertEqual(1, len(response_body)) + self.assertEqual(pool.get('id'), response_body[0].get('id')) + + def test_empty_get_all(self): + response = self.get(self.POOLS_PATH).json + response_body = response.get('pools') + self.assertIsInstance(response_body, list) + self.assertEqual(0, len(response_body)) + + def test_create(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + pool = api_pool.get('pool') + self.assert_correct_lb_status( + self.lb_id, + constants.PENDING_UPDATE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.PENDING_UPDATE, + constants.ONLINE, + self.listener_id) + self.set_lb_status(self.lb_id) + self.assertEqual(constants.PROTOCOL_HTTP, pool.get('protocol')) + self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, + pool.get('lb_algorithm')) + self.assertIsNotNone(pool.get('created_at')) + self.assertIsNone(pool.get('updated_at')) + self.assert_correct_lb_status( + self.lb_id, + constants.ACTIVE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.ACTIVE, + constants.ONLINE, + self.listener_id) + + def test_create_sans_listener(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN) + pool = api_pool.get('pool') + self.assertEqual(constants.PROTOCOL_HTTP, pool.get('protocol')) + self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, + pool.get('lb_algorithm')) + # Make sure listener status is unchanged, but LB status is changed. + # LB should still be locked even with pool and subordinate object + # updates. + self.assert_correct_listener_status( + constants.ACTIVE, + constants.ONLINE, + self.listener_id) + self.assert_correct_lb_status( + self.lb_id, + constants.PENDING_UPDATE, + constants.ONLINE) + + def test_create_sans_loadbalancer_id(self): + api_pool = self.create_pool( + None, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + pool = api_pool.get('pool') + self.assertEqual(constants.PROTOCOL_HTTP, pool.get('protocol')) + self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, + pool.get('lb_algorithm')) + self.assert_correct_listener_status( + constants.PENDING_UPDATE, + constants.ONLINE, + self.listener_id) + self.assert_correct_lb_status( + self.lb_id, + constants.PENDING_UPDATE, + constants.ONLINE) + + def test_create_with_listener_id_in_pool_dict(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + pool = api_pool.get('pool') + self.assert_correct_lb_status( + self.lb_id, + constants.PENDING_UPDATE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.PENDING_UPDATE, + constants.ONLINE, + self.listener_id) + self.set_lb_status(self.lb_id) + self.assertEqual(constants.PROTOCOL_HTTP, pool.get('protocol')) + self.assertEqual(constants.LB_ALGORITHM_ROUND_ROBIN, + pool.get('lb_algorithm')) + self.assert_correct_lb_status( + self.lb_id, + constants.ACTIVE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.ACTIVE, + constants.ONLINE, + self.listener_id) + + def test_create_with_project_id(self): + pid = self.lb.get('project_id') + optionals = { + 'listener_id': self.listener_id, + 'project_id': pid} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals) + pool = api_pool.get('pool') + self.assertEqual(pid, pool.get('tenant_id')) + + def test_bad_create(self): + lb_pool = {'name': 'test1'} + self.post(self.POOLS_PATH, self._build_body(lb_pool), status=400) + self.assert_correct_lb_status( + self.lb_id, + constants.ACTIVE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.ACTIVE, + constants.ONLINE, + self.listener_id) + + def test_create_with_listener_with_default_pool_id_set(self): + self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + self.set_lb_status(self.lb_id, constants.ACTIVE) + path = self.POOLS_PATH + lb_pool = { + 'loadbalancer_id': self.lb_id, + 'listener_id': self.listener_id, + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'project_id': self.project_id} + self.post(path, self._build_body(lb_pool), status=409) + + def test_create_bad_protocol(self): + lb_pool = { + 'loadbalancer_id': self.lb_id, + 'protocol': 'STUPID_PROTOCOL', + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN} + self.post(self.POOLS_PATH, self._build_body(lb_pool), status=400) + + def test_create_with_bad_handler(self): + self.handler_mock_bug_workaround.pool.create.side_effect = Exception() + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + # This mock doesn't recycle properly so we have to do cleanup manually + self.handler_mock_bug_workaround.pool.create.side_effect = None + self.assert_correct_lb_status( + self.lb_id, + constants.ACTIVE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.ACTIVE, + constants.ONLINE, + self.listener_id) + self.assert_correct_pool_status( + constants.ERROR, + constants.OFFLINE, + api_pool['pool']['id'] + ) + + def test_create_over_quota(self): + self.check_quota_met_true_mock.start() + self.addCleanup(self.check_quota_met_true_mock.stop) + lb_pool = { + 'loadbalancer_id': self.lb_id, + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'project_id': self.project_id} + self.post(self.POOLS_PATH, self._build_body(lb_pool), status=403) + + def test_update(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + pool = api_pool.get('pool') + self.set_lb_status(lb_id=self.lb_id) + new_pool = {'name': 'new_name'} + self.put(self.POOL_PATH.format(pool_id=pool.get('id')), + self._build_body(new_pool)) + self.assert_correct_lb_status( + self.lb_id, + constants.PENDING_UPDATE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.PENDING_UPDATE, + constants.ONLINE, + self.listener_id) + self.assert_correct_pool_status( + constants.PENDING_UPDATE, + constants.ONLINE, + pool.get('id')) + self.set_lb_status(self.lb_id) + response = self.get(self.POOL_PATH.format(pool_id=pool.get('id'))).json + response_body = response.get('pool') + self.assertNotEqual('new_name', response_body.get('name')) + self.assertIsNotNone(response_body.get('created_at')) + self.assertIsNotNone(response_body.get('updated_at')) + self.assert_correct_lb_status( + self.lb_id, + constants.ACTIVE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.ACTIVE, + constants.ONLINE, + self.listener_id) + + def test_bad_update(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + pool = api_pool.get('pool') + self.set_lb_status(self.lb_id) + new_pool = {'enabled': 'one'} + self.put(self.POOL_PATH.format(pool_id=pool.get('id')), + self._build_body(new_pool), status=400) + self.assert_correct_lb_status( + self.lb_id, + constants.ACTIVE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.ACTIVE, + constants.ONLINE, + self.listener_id) + + def test_update_with_bad_handler(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + pool = api_pool.get('pool') + self.set_lb_status(lb_id=self.lb_id) + new_pool = {'name': 'new_name'} + self.handler_mock_bug_workaround.pool.update.side_effect = Exception() + self.put(self.POOL_PATH.format(pool_id=pool.get('id')), + self._build_body(new_pool)) + # This mock doesn't recycle properly so we have to do cleanup manually + self.handler_mock_bug_workaround.pool.update.side_effect = None + self.assert_correct_lb_status( + self.lb_id, + constants.ACTIVE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.ACTIVE, + constants.ONLINE, + self.listener_id) + self.assert_correct_pool_status( + constants.ERROR, + constants.ONLINE, + pool.get('id') + ) + + def test_delete(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + pool = api_pool.get('pool') + self.set_lb_status(lb_id=self.lb_id) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + pool['provisioning_status'] = constants.ACTIVE + pool['operating_status'] = constants.ONLINE + pool.pop('updated_at') + + response = self.get(self.POOL_PATH.format( + pool_id=pool.get('id'))).json + pool_body = response.get('pool') + + pool_body.pop('updated_at') + self.assertEqual(pool, pool_body) + + self.delete(self.POOL_PATH.format(pool_id=pool.get('id'))) + + self.assert_correct_lb_status( + self.lb_id, + constants.PENDING_UPDATE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.PENDING_UPDATE, + constants.ONLINE, + self.listener_id) + self.assert_correct_pool_status( + constants.PENDING_DELETE, + constants.ONLINE, + pool.get('id')) + + def test_bad_delete(self): + self.delete(self.POOL_PATH.format( + pool_id=uuidutils.generate_uuid()), status=404) + + @testtools.skip('Skip until complete v2 merge.') + def test_delete_with_l7policy(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + self.set_lb_status(lb_id=self.lb_id) + pool = api_pool.get('pool') + self.create_l7policy( + self.listener_id, + constants.L7POLICY_ACTION_REDIRECT_TO_POOL, + redirect_pool_id=api_pool.get('id')) + self.set_lb_status(lb_id=self.lb_id) + self.delete(self.POOL_PATH.format( + pool_id=pool.get('id')), status=409) + + def test_delete_with_bad_handler(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + pool = api_pool.get('pool') + self.set_lb_status(lb_id=self.lb_id) + # Set status to ACTIVE/ONLINE because set_lb_status did it in the db + pool['provisioning_status'] = constants.ACTIVE + pool['operating_status'] = constants.ONLINE + response = self.get(self.POOL_PATH.format( + pool_id=pool.get('id'))).json + pool_body = response.get('pool') + + self.assertIsNone(pool.pop('updated_at')) + self.assertIsNotNone(pool_body.pop('updated_at')) + self.assertEqual(pool, pool_body) + self.handler_mock_bug_workaround.pool.delete.side_effect = Exception() + self.delete(self.POOL_PATH.format(pool_id=pool.get('id'))) + # This mock doesn't recycle properly so we have to do cleanup manually + self.handler_mock_bug_workaround.pool.delete.side_effect = None + self.assert_correct_lb_status( + self.lb_id, + constants.ACTIVE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.ACTIVE, + constants.ONLINE, + self.listener_id) + self.assert_correct_pool_status( + constants.ERROR, + constants.ONLINE, + pool.get('id') + ) + + def test_create_with_session_persistence(self): + sp = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE, + "cookie_name": "test_cookie_name"} + optionals = {"listener_id": self.listener_id, + "session_persistence": sp} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals) + pool = api_pool.get('pool') + self.assert_correct_lb_status( + self.lb_id, + constants.PENDING_UPDATE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.PENDING_UPDATE, + constants.ONLINE, + self.listener_id) + self.set_lb_status(self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=pool.get('id'))).json + pool_body = response.get('pool') + sess_p = pool_body.get('session_persistence') + self.assertIsNotNone(sess_p) + self.assertEqual(constants.SESSION_PERSISTENCE_HTTP_COOKIE, + sess_p.get('type')) + self.assertEqual('test_cookie_name', sess_p.get('cookie_name')) + self.assert_correct_lb_status( + self.lb_id, + constants.ACTIVE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.ACTIVE, + constants.ONLINE, + self.listener_id) + + def test_create_with_bad_session_persistence(self): + sp = {"type": "persistence_type", + "cookie_name": "test_cookie_name"} + lb_pool = { + 'loadbalancer_id': self.lb_id, + 'listener_id': self.listener_id, + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'session_persistence': sp} + self.post(self.POOLS_PATH, self._build_body(lb_pool), status=400) + + def test_add_session_persistence(self): + sp = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE, + "cookie_name": "test_cookie_name"} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + pool = api_pool.get('pool') + self.set_lb_status(lb_id=self.lb_id) + new_pool = {'session_persistence': sp} + self.put(self.POOL_PATH.format(pool_id=pool.get('id')), + self._build_body(new_pool)) + response = self.get(self.POOL_PATH.format(pool_id=pool.get('id'))).json + pool_body = response.get('pool') + self.assert_correct_lb_status( + self.lb_id, + constants.PENDING_UPDATE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.PENDING_UPDATE, + constants.ONLINE, + self.listener_id) + self.assertNotEqual(sp, pool_body.get('session_persistence')) + + def test_update_session_persistence(self): + sp = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE, + "cookie_name": "test_cookie_name"} + optionals = {"listener_id": self.listener_id, + "session_persistence": sp} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals) + pool = api_pool.get('pool') + self.set_lb_status(lb_id=self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=pool.get('id'))).json + pool_body = response.get('pool') + sess_p = pool_body.get('session_persistence') + sess_p['cookie_name'] = None + sess_p['type'] = constants.SESSION_PERSISTENCE_SOURCE_IP + new_pool = {'session_persistence': sess_p} + self.put(self.POOL_PATH.format(pool_id=pool.get('id')), + self._build_body(new_pool)) + response = self.get(self.POOL_PATH.format(pool_id=pool.get('id'))).json + pool_body = response.get('pool') + self.assert_correct_lb_status( + self.lb_id, + constants.PENDING_UPDATE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.PENDING_UPDATE, + constants.ONLINE, + self.listener_id) + self.assertNotEqual(sess_p, pool_body.get('session_persistence')) + self.set_lb_status(self.lb_id) + self.assert_correct_lb_status( + self.lb_id, + constants.ACTIVE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.ACTIVE, + constants.ONLINE, + self.listener_id) + + def test_update_preserve_session_persistence(self): + sp = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE, + "cookie_name": "test_cookie_name"} + optionals = {"listener_id": self.listener_id, + "name": "name", "session_persistence": sp} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals) + pool = api_pool.get('pool') + self.set_lb_status(lb_id=self.lb_id) + new_pool = {'name': 'update_name'} + self.put(self.POOL_PATH.format(pool_id=pool.get('id')), + self._build_body(new_pool)) + response = self.get(self.POOL_PATH.format(pool_id=pool.get('id'))).json + pool_body = response.get('pool') + self.assert_correct_lb_status( + self.lb_id, + constants.PENDING_UPDATE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.PENDING_UPDATE, + constants.ONLINE, + self.listener_id) + response = self.get(self.POOL_PATH.format( + pool_id=pool_body.get('id'))).json + pool_body = response.get('pool') + self.assertEqual(sp, pool_body.get('session_persistence')) + + @testtools.skip('This test should pass with a validation layer') + def test_update_bad_session_persistence(self): + sp = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE, + "cookie_name": "test_cookie_name"} + optionals = {"listener_id": self.listener_id, + "session_persistence": sp} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals) + pool = api_pool.get('pool') + self.set_lb_status(lb_id=self.lb_id) + response = self.get(self.POOL_PATH.format( + pool_id=api_pool.get('id'))).json + pool_body = response.get('pools') + sess_p = pool_body.get('session_persistence') + sess_p['type'] = 'persistence_type' + new_pool = {'session_persistence': sess_p} + self.put(self.POOL_PATH.format(pool_id=pool.get('id')), + self._build_body(new_pool), status=400) + + def test_delete_with_session_persistence(self): + sp = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE, + "cookie_name": "test_cookie_name"} + optionals = {"listener_id": self.listener_id, + "session_persistence": sp} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals) + pool = api_pool.get('pool') + self.set_lb_status(lb_id=self.lb_id) + self.delete(self.POOL_PATH.format(pool_id=pool.get('id'))) + self.assert_correct_lb_status( + self.lb_id, + constants.PENDING_UPDATE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.PENDING_UPDATE, + constants.ONLINE, + self.listener_id) + self.set_lb_status(self.lb_id) + self.assert_correct_lb_status( + self.lb_id, + constants.ACTIVE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.ACTIVE, + constants.ONLINE, + self.listener_id) + + def test_delete_session_persistence(self): + sp = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE, + "cookie_name": "test_cookie_name"} + optionals = {"listener_id": self.listener_id, + "session_persistence": sp} + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + **optionals) + pool = api_pool.get('pool') + self.set_lb_status(lb_id=self.lb_id) + new_sp = {"pool": {"session_persistence": None}} + response = self.put(self.POOL_PATH.format(pool_id=pool.get('id')), + new_sp).json + pool_body = response.get('pool') + self.assert_correct_lb_status( + self.lb_id, + constants.PENDING_UPDATE, + constants.ONLINE) + self.assert_correct_listener_status( + constants.PENDING_UPDATE, + constants.ONLINE, + self.listener_id) + self.assertIsNotNone(pool_body.get('session_persistence')) + + def test_create_when_lb_pending_update(self): + self.put(self.LB_PATH.format(lb_id=self.lb_id), + {'loadbalancer': {'name': 'test_name_change'}}) + lb_pool = { + 'loadbalancer_id': self.lb_id, + 'listener_id': self.listener_id, + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'project_id': self.project_id} + self.post(self.POOLS_PATH, self._build_body(lb_pool), status=409) + + def test_update_when_lb_pending_update(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + pool = api_pool.get('pool') + self.set_lb_status(self.lb_id) + self.put(self.LB_PATH.format(lb_id=self.lb_id), + {'loadbalancer': {'name': 'test_name_change'}}) + new_pool = {'admin_state_up': False} + self.put(self.POOL_PATH.format(pool_id=pool.get('id')), + self._build_body(new_pool), status=409) + + def test_delete_when_lb_pending_update(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + pool = api_pool.get('pool') + self.set_lb_status(self.lb_id) + self.put(self.LB_PATH.format(lb_id=self.lb_id), + {"loadbalancer": {'name': 'test_name_change'}}) + self.delete(self.POOL_PATH.format(pool_id=pool.get('id')), status=409) + + def test_create_when_lb_pending_delete(self): + self.delete(self.LB_PATH.format(lb_id=self.lb_id)) + new_pool = { + 'loadbalancer_id': self.lb_id, + 'listener_id': self.listener_id, + 'protocol': constants.PROTOCOL_HTTP, + 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, + 'project_id': self.project_id} + self.post(self.POOLS_PATH, self._build_body(new_pool), status=409) + + def test_update_when_lb_pending_delete(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + pool = api_pool.get('pool') + self.set_lb_status(self.lb_id) + self.delete(self.LB_PATH.format(lb_id=self.lb_id)) + new_pool = {'admin_state_up': False} + self.put(self.POOL_PATH.format(pool_id=pool.get('id')), + self._build_body(new_pool), status=409) + + def test_delete_when_lb_pending_delete(self): + api_pool = self.create_pool( + self.lb_id, + constants.PROTOCOL_HTTP, + constants.LB_ALGORITHM_ROUND_ROBIN, + listener_id=self.listener_id) + pool = api_pool.get('pool') + self.set_lb_status(self.lb_id) + self.delete(self.LB_PATH.format(lb_id=self.lb_id)) + self.delete(self.POOL_PATH.format(pool_id=pool.get('id')), status=409) diff --git a/octavia/tests/unit/api/v2/types/test_pools.py b/octavia/tests/unit/api/v2/types/test_pools.py new file mode 100644 index 0000000000..066e958d17 --- /dev/null +++ b/octavia/tests/unit/api/v2/types/test_pools.py @@ -0,0 +1,156 @@ +# Copyright 2014 Rackspace +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from oslo_utils import uuidutils +from wsme import exc +from wsme.rest import json as wsme_json +from wsme import types as wsme_types + +from octavia.api.v2.types import pool as pool_type +from octavia.common import constants +from octavia.tests.unit.api.common import base + + +class TestSessionPersistence(object): + + _type = None + + def test_session_persistence(self): + body = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE} + sp = wsme_json.fromjson(self._type, body) + self.assertIsNotNone(sp.type) + + def test_invalid_type(self): + body = {"type": "source_ip"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_cookie_name(self): + body = {"type": constants.SESSION_PERSISTENCE_HTTP_COOKIE, + "cookie_name": 10} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestPoolPOST(base.BaseTypesTest): + + _type = pool_type.PoolPOST + + def test_pool(self): + body = { + "loadbalancer_id": uuidutils.generate_uuid(), + "protocol": constants.PROTOCOL_HTTP, + "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} + pool = wsme_json.fromjson(self._type, body) + self.assertTrue(pool.admin_state_up) + + def test_load_balancer_mandatory(self): + body = {"loadbalancer_id": uuidutils.generate_uuid()} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_protocol_mandatory(self): + body = {"lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_lb_algorithm_mandatory(self): + body = {"protocol": constants.PROTOCOL_HTTP} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_name(self): + body = {"name": 10, + "loadbalancer_id": uuidutils.generate_uuid(), + "protocol": constants.PROTOCOL_HTTP, + "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_description(self): + body = {"description": 10, + "loadbalancer_id": uuidutils.generate_uuid(), + "protocol": constants.PROTOCOL_HTTP, + "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_load_balacer_id(self): + body = {"loadbalancer_id": 10, + "protocol": constants.PROTOCOL_HTTP, + "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_protocol(self): + body = {"loadbalancer_id": uuidutils.generate_uuid(), + "protocol": "http", + "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_lb_algorithm(self): + body = {"loadbalancer_id": uuidutils.generate_uuid(), + "protocol": constants.PROTOCOL_HTTP, + "lb_algorithm": "source_ip"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_non_uuid_project_id(self): + body = {"loadbalancer_id": uuidutils.generate_uuid(), + "protocol": constants.PROTOCOL_HTTP, + "lb_algorithm": constants.LB_ALGORITHM_ROUND_ROBIN, + "project_id": "non-uuid"} + pool = wsme_json.fromjson(self._type, body) + self.assertEqual(pool.project_id, body['project_id']) + + +class TestPoolPUT(base.BaseTypesTest): + + _type = pool_type.PoolPUT + + def test_pool(self): + body = {"name": "test_name"} + pool = wsme_json.fromjson(self._type, body) + self.assertEqual(wsme_types.Unset, pool.admin_state_up) + + def test_invalid_name(self): + body = {"name": 10} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_description(self): + body = {"description": 10} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + def test_invalid_lb_algorithm(self): + body = {"lb_algorithm": "source_ip"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestSessionPersistencePOST(base.BaseTypesTest, TestSessionPersistence): + + _type = pool_type.SessionPersistencePOST + + def test_type_mandatory(self): + body = {"cookie_name": "test_name"} + self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type, + body) + + +class TestSessionPersistencePUT(base.BaseTypesTest, TestSessionPersistence): + + _type = pool_type.SessionPersistencePUT