From fb0da76c27735a965340e02cebff8be46271f579 Mon Sep 17 00:00:00 2001 From: Adam Harwell Date: Sat, 22 Apr 2017 11:12:11 +0900 Subject: [PATCH] Add support for single-create for APIv2 Still need to fix the entry-points for each individual type, but that wasn't even in the original spec. Not sure if we even want that. I think this may not do things EXACTLY how the old one did it, we'll need to look into whether it matters, as we never published docs for it and I don't think it ever actually worked properly in neutron-lbaas. Also closing a few bugs that are only peripherally related, because we (possibly me) forgot to tag them on the individual CRs, but I'm considering them closed as of this patch. See below for my reasoning on each individual bug, and feel free to post counter-arguments. For #1673546 (single-call create): This is the obvious one! For #1673499 (lb return pool object): Rolled into this patch as a matter of course, abandoned the original fix as it is no longer relevant. For #1544214 (root tags): All existing resources now have root tags. Any new ones will also need root tags, but I would consider this bug closed. For #1596636 (tenant facing API): Every object is now creatable via the v2 API, so I would consider this to be complete. Quotas and some additional work is being finished, but it's not necessary for this IMO. For #1665446 (hm id): This was resolved in the HM patch, I just forgot to close it, and including it here will ensure it is release-tracked. For #1685789 (listener quota): Just shoving it in here as I do the single-create quotas. For #1685827 (hm quota): Same as listener quota. Closes-Bug: #1673546 Closes-Bug: #1673499 Closes-Bug: #1544214 Closes-Bug: #1596636 Closes-Bug: #1665446 Closes-Bug: #1685789 Closes-Bug: #1685827 Depends-On: I3d86482a2999197a60a81d42afc5ef7a6e71e313 Change-Id: I4ff03593e1cfd8dca00a13c0550d6cf95b93d746 --- .../loadbalancer-full-create-request.json | 8 +- .../loadbalancer-full-create-response.json | 226 ++++----- api-ref/source/v2/loadbalancer.inc | 8 +- octavia/api/common/types.py | 12 + octavia/api/v2/controllers/health_monitor.py | 17 +- octavia/api/v2/controllers/l7policy.py | 17 + octavia/api/v2/controllers/l7rule.py | 18 +- octavia/api/v2/controllers/listener.py | 83 +++- octavia/api/v2/controllers/load_balancer.py | 132 +++++- octavia/api/v2/controllers/member.py | 8 + octavia/api/v2/controllers/pool.py | 40 +- octavia/api/v2/types/health_monitor.py | 48 +- octavia/api/v2/types/l7policy.py | 41 +- octavia/api/v2/types/l7rule.py | 26 +- octavia/api/v2/types/listener.py | 59 ++- octavia/api/v2/types/load_balancer.py | 45 +- octavia/api/v2/types/member.py | 19 + octavia/api/v2/types/pool.py | 71 +-- octavia/common/exceptions.py | 5 + .../worker/flows/load_balancer_flows.py | 16 +- .../controller/worker/tasks/database_tasks.py | 80 +++- octavia/db/prepare.py | 3 + octavia/db/repositories.py | 32 +- octavia/tests/functional/api/v2/base.py | 12 +- .../functional/api/v2/test_health_monitor.py | 11 + .../tests/functional/api/v2/test_l7policy.py | 4 +- .../tests/functional/api/v2/test_l7rule.py | 8 - .../tests/functional/api/v2/test_listener.py | 10 + .../functional/api/v2/test_load_balancer.py | 447 +++++++++++------- .../tests/functional/api/v2/test_member.py | 4 +- octavia/tests/functional/api/v2/test_pool.py | 4 +- .../worker/tasks/test_database_tasks.py | 136 +++++- 32 files changed, 1186 insertions(+), 464 deletions(-) diff --git a/api-ref/source/v2/examples/loadbalancer-full-create-request.json b/api-ref/source/v2/examples/loadbalancer-full-create-request.json index 39b57f3c59..279ae7e188 100644 --- a/api-ref/source/v2/examples/loadbalancer-full-create-request.json +++ b/api-ref/source/v2/examples/loadbalancer-full-create-request.json @@ -24,11 +24,11 @@ }, "members": [ { - "ip_address": "'192.0.2.16'", + "address": "192.0.2.16", "protocol_port": 80 }, { - "ip_address": "'192.0.2.19'", + "address": "192.0.2.19", "protocol_port": 80 } ] @@ -69,11 +69,11 @@ }, "members": [ { - "ip_address": "'192.0.2.16'", + "address": "192.0.2.51", "protocol_port": 80 }, { - "ip_address": "'192.0.2.19'", + "address": "192.0.2.52", "protocol_port": 80 } ] diff --git a/api-ref/source/v2/examples/loadbalancer-full-create-response.json b/api-ref/source/v2/examples/loadbalancer-full-create-response.json index 41cbe343a7..8a1f300adf 100644 --- a/api-ref/source/v2/examples/loadbalancer-full-create-response.json +++ b/api-ref/source/v2/examples/loadbalancer-full-create-response.json @@ -15,54 +15,7 @@ "default_tls_container_ref": null, "admin_state_up": true, "default_pool": { - "lb_algorithm": "ROUND_ROBIN", - "protocol": "HTTP", - "description": "", - "admin_state_up": true, - "tenant_id": "e3cd678b11784734bc366148aa37580e", - "project_id": "e3cd678b11784734bc366148aa37580e", - "session_persistence": null, - "healthmonitor": { - "name": "", - "admin_state_up": true, - "tenant_id": "e3cd678b11784734bc366148aa37580e", - "project_id": "e3cd678b11784734bc366148aa37580e", - "delay": 3, - "expected_codes": "200,201,202", - "max_retries": 2, - "http_method": "GET", - "timeout": 1, - "max_retries_down": 3, - "url_path": "/index.html", - "type": "HTTP", - "id": "a8a2aa3f-d099-4752-8265-e6472f8147f9" - }, - "members": [ - { - "name": "", - "weight": 1, - "admin_state_up": true, - "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", - "tenant_id": "e3cd678b11784734bc366148aa37580e", - "project_id": "e3cd678b11784734bc366148aa37580e", - "address": "192.0.2.16", - "protocol_port": 80, - "id": "7d19ad6c-d549-453e-a5cd-05382c6be96a" - }, - { - "name": "", - "weight": 1, - "admin_state_up": true, - "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", - "tenant_id": "e3cd678b11784734bc366148aa37580e", - "project_id": "e3cd678b11784734bc366148aa37580e", - "address": "192.0.2.19", - "protocol_port": 80, - "id": "a167402b-caa6-41d5-b4d4-bde7f2cbfa5e" - } - ], - "id": "c8cec227-410a-4a5b-af13-ecf38c2b0abb", - "name": "rr_pool" + "id": "c8cec227-410a-4a5b-af13-ecf38c2b0abb" }, "tenant_id": "e3cd678b11784734bc366148aa37580e", "project_id": "e3cd678b11784734bc366148aa37580e", @@ -80,54 +33,7 @@ "default_tls_container_ref": null, "admin_state_up": true, "default_pool": { - "lb_algorithm": "ROUND_ROBIN", - "protocol": "HTTPS", - "description": "", - "admin_state_up": true, - "tenant_id": "e3cd678b11784734bc366148aa37580e", - "project_id": "e3cd678b11784734bc366148aa37580e", - "session_persistence": null, - "healthmonitor": { - "name": "", - "admin_state_up": true, - "tenant_id": "e3cd678b11784734bc366148aa37580e", - "project_id": "e3cd678b11784734bc366148aa37580e", - "delay": 3, - "expected_codes": "200,201,202", - "max_retries": 2, - "http_method": "GET", - "timeout": 1, - "max_retries_down": 3, - "url_path": "/index.html", - "type": "HTTPS", - "id": "d5bb7712-26b7-4809-8c14-3b407c0cb00d" - }, - "members": [ - { - "name": "", - "weight": 1, - "admin_state_up": true, - "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", - "tenant_id": "e3cd678b11784734bc366148aa37580e", - "project_id": "e3cd678b11784734bc366148aa37580e", - "address": "192.0.2.16", - "protocol_port": 80, - "id": "f83832d5-1f22-45fa-866a-4abea36e0886" - }, - { - "name": "", - "weight": 1, - "admin_state_up": true, - "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", - "tenant_id": "e3cd678b11784734bc366148aa37580e", - "project_id": "e3cd678b11784734bc366148aa37580e", - "address": "192.0.2.19", - "protocol_port": 80, - "id": "f83832d5-1f22-45fa-866a-4abea36e0886" - } - ], "id": "b0577aff-c1f9-40c6-9a3b-7b1d2a669136", - "name": "https_pool" }, "tenant_id": "e3cd678b11784734bc366148aa37580e", "project_id": "e3cd678b11784734bc366148aa37580e", @@ -175,54 +81,104 @@ "provider": "octavia", "pools": [ { - "lb_algorithm": "ROUND_ROBIN", - "protocol": "HTTPS", - "description": "", + "lb_algorithm": "ROUND_ROBIN", + "protocol": "HTTP", + "description": "", + "admin_state_up": true, + "tenant_id": "e3cd678b11784734bc366148aa37580e", + "project_id": "e3cd678b11784734bc366148aa37580e", + "session_persistence": null, + "healthmonitor": { + "name": "", "admin_state_up": true, "tenant_id": "e3cd678b11784734bc366148aa37580e", "project_id": "e3cd678b11784734bc366148aa37580e", - "session_persistence": null, - "healthmonitor": { + "delay": 3, + "expected_codes": "200,201,202", + "max_retries": 2, + "http_method": "GET", + "timeout": 1, + "max_retries_down": 3, + "url_path": "/index.html", + "type": "HTTP", + "id": "a8a2aa3f-d099-4752-8265-e6472f8147f9" + }, + "members": [ + { "name": "", + "weight": 1, "admin_state_up": true, + "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", "tenant_id": "e3cd678b11784734bc366148aa37580e", "project_id": "e3cd678b11784734bc366148aa37580e", - "delay": 3, - "expected_codes": "200,201,202", - "max_retries": 2, - "http_method": "GET", - "timeout": 1, - "max_retries_down": 3, - "url_path": "/index.html", - "type": "HTTPS", - "id": "d5bb7712-26b7-4809-8c14-3b407c0cb00d" + "address": "192.0.2.16", + "protocol_port": 80, + "id": "7d19ad6c-d549-453e-a5cd-05382c6be96a" }, - "members": [ - { - "name": "", - "weight": 1, - "admin_state_up": true, - "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", - "tenant_id": "e3cd678b11784734bc366148aa37580e", - "project_id": "e3cd678b11784734bc366148aa37580e", - "address": "192.0.2.16", - "protocol_port": 80, - "id": "f83832d5-1f22-45fa-866a-4abea36e0886" - }, - { - "name": "", - "weight": 1, - "admin_state_up": true, - "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", - "tenant_id": "e3cd678b11784734bc366148aa37580e", - "project_id": "e3cd678b11784734bc366148aa37580e", - "address": "192.0.2.19", - "protocol_port": 80, - "id": "f83832d5-1f22-45fa-866a-4abea36e0886" - } - ], - "id": "b0577aff-c1f9-40c6-9a3b-7b1d2a669136", - "name": "https_pool" + { + "name": "", + "weight": 1, + "admin_state_up": true, + "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", + "tenant_id": "e3cd678b11784734bc366148aa37580e", + "project_id": "e3cd678b11784734bc366148aa37580e", + "address": "192.0.2.19", + "protocol_port": 80, + "id": "a167402b-caa6-41d5-b4d4-bde7f2cbfa5e" + } + ], + "id": "c8cec227-410a-4a5b-af13-ecf38c2b0abb", + "name": "rr_pool" + }, + { + "lb_algorithm": "ROUND_ROBIN", + "protocol": "HTTPS", + "description": "", + "admin_state_up": true, + "tenant_id": "e3cd678b11784734bc366148aa37580e", + "project_id": "e3cd678b11784734bc366148aa37580e", + "session_persistence": null, + "healthmonitor": { + "name": "", + "admin_state_up": true, + "tenant_id": "e3cd678b11784734bc366148aa37580e", + "project_id": "e3cd678b11784734bc366148aa37580e", + "delay": 3, + "expected_codes": "200,201,202", + "max_retries": 2, + "http_method": "GET", + "timeout": 1, + "max_retries_down": 3, + "url_path": "/index.html", + "type": "HTTPS", + "id": "d5bb7712-26b7-4809-8c14-3b407c0cb00d" + }, + "members": [ + { + "name": "", + "weight": 1, + "admin_state_up": true, + "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", + "tenant_id": "e3cd678b11784734bc366148aa37580e", + "project_id": "e3cd678b11784734bc366148aa37580e", + "address": "192.0.2.51", + "protocol_port": 80, + "id": "f83832d5-1f22-45fa-866a-4abea36e0886" + }, + { + "name": "", + "weight": 1, + "admin_state_up": true, + "subnet_id": "bbb35f84-35cc-4b2f-84c2-a6a29bba68aa", + "tenant_id": "e3cd678b11784734bc366148aa37580e", + "project_id": "e3cd678b11784734bc366148aa37580e", + "address": "192.0.2.52", + "protocol_port": 80, + "id": "f83832d5-1f22-45fa-866a-4abea36e0886" + } + ], + "id": "b0577aff-c1f9-40c6-9a3b-7b1d2a669136", + "name": "https_pool" } ], "created_at": "2017-02-28T00:41:44", diff --git a/api-ref/source/v2/loadbalancer.inc b/api-ref/source/v2/loadbalancer.inc index d2091057db..3256440c98 100644 --- a/api-ref/source/v2/loadbalancer.inc +++ b/api-ref/source/v2/loadbalancer.inc @@ -203,8 +203,12 @@ Creating a Fully Populated Load Balancer ---------------------------------------- You can configure all documented features of the load balancer at -creation time by specifying the additional elements or attributes -in the request. +creation time by specifying the additional elements or attributes in the +request. + +Note: all pools must have names, and must only be fully defined once. To +reference a pool from multiple objects, supply the pool name only for all +subsequent references.. Request Example --------------- diff --git a/octavia/api/common/types.py b/octavia/api/common/types.py index df8ff8b4e6..6f389f5938 100644 --- a/octavia/api/common/types.py +++ b/octavia/api/common/types.py @@ -61,6 +61,10 @@ class URLType(wtypes.UserType): class BaseType(wtypes.Base): + @classmethod + def _full_response(cls): + return False + @classmethod def from_data_model(cls, data_model, children=False): """Converts data_model to Octavia WSME type. @@ -135,3 +139,11 @@ class BaseType(wtypes.Base): attr_name = renamed ret_dict[attr_name] = value return ret_dict + + +class IdOnlyType(BaseType): + id = wtypes.wsattr(wtypes.UuidType(), mandatory=True) + + +class NameOnlyType(BaseType): + name = wtypes.wsattr(wtypes.StringType(max_length=255), mandatory=True) diff --git a/octavia/api/v2/controllers/health_monitor.py b/octavia/api/v2/controllers/health_monitor.py index 2de9794bfe..684ab92983 100644 --- a/octavia/api/v2/controllers/health_monitor.py +++ b/octavia/api/v2/controllers/health_monitor.py @@ -64,9 +64,7 @@ class HealthMonitorController(base.BaseController): @wsme_pecan.wsexpose(hm_types.HealthMonitorsRootResponse, wtypes.text) def get_all(self, project_id=None): - """Gets a single health monitor's details.""" - # NOTE(blogan): since a pool can only have one health monitor - # we are using the get_all method to only get the single health monitor + """Gets all health monitors.""" context = pecan.request.context.get('octavia_context') if context.is_admin or CONF.auth_strategy == constants.NOAUTH: if project_id: @@ -161,6 +159,13 @@ class HealthMonitorController(base.BaseController): health_monitor.project_id = pool.project_id lock_session = db_api.get_session(autocommit=False) + if self.repositories.check_quota_met( + context.session, + lock_session, + data_models.HealthMonitor, + health_monitor.project_id): + lock_session.rollback() + raise exceptions.QuotaException hm_dict = db_prepare.create_health_monitor( health_monitor.to_dict(render_unsets=True)) @@ -175,6 +180,12 @@ class HealthMonitorController(base.BaseController): return self._send_hm_to_handler(context.session, db_hm) + def _graph_create(self, lock_session, hm_dict): + hm_dict = db_prepare.create_health_monitor(hm_dict) + db_hm = self._validate_create_hm(lock_session, hm_dict) + + return db_hm + @wsme_pecan.wsexpose(hm_types.HealthMonitorRootResponse, wtypes.text, body=hm_types.HealthMonitorRootPUT, status_code=200) def put(self, id, health_monitor_): diff --git a/octavia/api/v2/controllers/l7policy.py b/octavia/api/v2/controllers/l7policy.py index a449453b94..c801248759 100644 --- a/octavia/api/v2/controllers/l7policy.py +++ b/octavia/api/v2/controllers/l7policy.py @@ -172,6 +172,23 @@ class L7PolicyController(base.BaseController): return self._send_l7policy_to_handler(context.session, db_l7policy, lb_id=load_balancer_id) + def _graph_create(self, lock_session, policy_dict): + load_balancer_id = policy_dict.pop('load_balancer_id', None) + listener_id = policy_dict['listener_id'] + policy_dict = db_prepare.create_l7policy( + policy_dict, load_balancer_id, listener_id) + rules = policy_dict.pop('l7rules', []) or [] + db_policy = self._validate_create_l7policy(lock_session, policy_dict) + + new_rules = [] + for r in rules: + r['project_id'] = db_policy.project_id + new_rules.append( + l7rule.L7RuleController(db_policy.id)._graph_create( + lock_session, r)) + + return db_policy + @wsme_pecan.wsexpose(l7policy_types.L7PolicyRootResponse, wtypes.text, body=l7policy_types.L7PolicyRootPUT, status_code=200) diff --git a/octavia/api/v2/controllers/l7rule.py b/octavia/api/v2/controllers/l7rule.py index 327439ed7d..ed5e3987fe 100644 --- a/octavia/api/v2/controllers/l7rule.py +++ b/octavia/api/v2/controllers/l7rule.py @@ -140,14 +140,6 @@ class L7RuleController(base.BaseController): self._check_l7policy_max_rules(context.session) lock_session = db_api.get_session(autocommit=False) - if self.repositories.check_quota_met( - context.session, - lock_session, - data_models.L7Rule, - l7rule.project_id): - lock_session.rollback() - raise exceptions.QuotaException - l7rule_dict = db_prepare.create_l7rule( l7rule.to_dict(render_unsets=True), self.l7policy_id) try: @@ -161,6 +153,16 @@ class L7RuleController(base.BaseController): return self._send_l7rule_to_handler(context.session, db_l7rule) + def _graph_create(self, lock_session, rule_dict): + try: + validate.l7rule_data(l7rule_types.L7RulePOST(**rule_dict)) + except Exception as e: + raise exceptions.L7RuleValidation(error=e) + rule_dict = db_prepare.create_l7rule(rule_dict, self.l7policy_id) + db_rule = self._validate_create_l7rule(lock_session, rule_dict) + + return db_rule + @wsme_pecan.wsexpose(l7rule_types.L7RuleRootResponse, wtypes.text, body=l7rule_types.L7RuleRootPUT, status_code=200) diff --git a/octavia/api/v2/controllers/listener.py b/octavia/api/v2/controllers/listener.py index 5d6ba9ab71..5bcc72a8ff 100644 --- a/octavia/api/v2/controllers/listener.py +++ b/octavia/api/v2/controllers/listener.py @@ -23,6 +23,7 @@ from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from octavia.api.v2.controllers import base +from octavia.api.v2.controllers import l7policy from octavia.api.v2.types import listener as listener_types from octavia.common import constants from octavia.common import data_models @@ -115,12 +116,11 @@ class ListenersController(base.BaseController): session, lb_id, provisioning_status=constants.ACTIVE) - def _validate_listener(self, session, lb_id, listener_dict): + def _validate_create_listener(self, lock_session, lb_id, listener_dict): """Validate listener for wrong protocol or duplicate listeners Update the load balancer db when provisioning status changes. """ - lb_repo = self.repositories.load_balancer if (listener_dict and listener_dict.get('insert_headers') and list(set(listener_dict['insert_headers'].keys()) - @@ -132,21 +132,17 @@ class ListenersController(base.BaseController): try: sni_containers = listener_dict.pop('sni_containers', []) db_listener = self.repositories.listener.create( - session, **listener_dict) + lock_session, **listener_dict) if sni_containers: for container in sni_containers: sni_dict = {'listener_id': db_listener.id, 'tls_container_id': container.get( 'tls_container_id')} - self.repositories.sni.create(session, **sni_dict) - db_listener = self.repositories.listener.get(session, - id=db_listener.id) + self.repositories.sni.create(lock_session, **sni_dict) + db_listener = self.repositories.listener.get( + lock_session, id=db_listener.id) return db_listener except odb_exceptions.DBDuplicateEntry as de: - # Setting LB back to active because this is just a validation - # failure - lb_repo.update(session, lb_id, - provisioning_status=constants.ACTIVE) column_list = ['load_balancer_id', 'protocol_port'] constraint_list = ['uq_listener_load_balancer_id_protocol_port'] if ['id'] == de.columns: @@ -156,10 +152,6 @@ class ListenersController(base.BaseController): raise exceptions.DuplicateListenerEntry( port=listener_dict.get('protocol_port')) except odb_exceptions.DBError: - # Setting LB back to active because this is just a validation - # failure - lb_repo.update(session, lb_id, - provisioning_status=constants.ACTIVE) raise exceptions.InvalidOption(value=listener_dict.get('protocol'), option='protocol') @@ -189,22 +181,69 @@ class ListenersController(base.BaseController): listener = listener_.listener context = pecan.request.context.get('octavia_context') + load_balancer_id = listener.loadbalancer_id + listener.project_id = self._get_lb_project_id( + context.session, load_balancer_id) + + lock_session = db_api.get_session(autocommit=False) + if self.repositories.check_quota_met( + context.session, + lock_session, + data_models.Listener, + listener.project_id): + lock_session.rollback() + raise exceptions.QuotaException + listener_dict = db_prepare.create_listener( listener.to_dict(render_unsets=True), None) - load_balancer_id = listener_dict['load_balancer_id'] - listener_dict['project_id'] = self._get_lb_project_id( - context.session, load_balancer_id) if listener_dict['default_pool_id']: self._validate_pool(context.session, load_balancer_id, listener_dict['default_pool_id']) - self._test_lb_and_listener_statuses(context.session, load_balancer_id) - # This is the extra validation layer for wrong protocol or duplicate - # listeners on the same load balancer. - db_listener = self._validate_listener( - context.session, load_balancer_id, listener_dict) + + try: + self._test_lb_and_listener_statuses( + lock_session, lb_id=load_balancer_id) + + db_listener = self._validate_create_listener( + lock_session, load_balancer_id, listener_dict) + lock_session.commit() + except Exception: + with excutils.save_and_reraise_exception(): + lock_session.rollback() + return self._send_listener_to_handler(context.session, db_listener) + def _graph_create(self, lock_session, listener_dict, + l7policies=None, pool_name_ids=None): + load_balancer_id = listener_dict['load_balancer_id'] + listener_dict = db_prepare.create_listener( + listener_dict, load_balancer_id) + l7policies = listener_dict.pop('l7policies', l7policies) + if listener_dict.get('default_pool_id'): + self._validate_pool(lock_session, load_balancer_id, + listener_dict['default_pool_id']) + db_listener = self._validate_create_listener( + lock_session, load_balancer_id, listener_dict) + + # Now create l7policies + new_l7ps = [] + for l7p in l7policies: + l7p['project_id'] = db_listener.project_id + l7p['load_balancer_id'] = load_balancer_id + l7p['listener_id'] = db_listener.id + redirect_pool = l7p.pop('redirect_pool', None) + if redirect_pool: + pool_name = redirect_pool['name'] + pool_id = pool_name_ids.get(pool_name) + if not pool_id: + raise exceptions.SingleCreateDetailsMissing( + type='Pool', name=pool_name) + l7p['redirect_pool_id'] = pool_id + new_l7ps.append(l7policy.L7PolicyController()._graph_create( + lock_session, l7p)) + return db_listener, new_l7ps + @wsme_pecan.wsexpose(listener_types.ListenerRootResponse, wtypes.text, body=listener_types.ListenerRootPUT, status_code=200) def put(self, id, listener_): diff --git a/octavia/api/v2/controllers/load_balancer.py b/octavia/api/v2/controllers/load_balancer.py index ce830466cb..d5e337a41f 100644 --- a/octavia/api/v2/controllers/load_balancer.py +++ b/octavia/api/v2/controllers/load_balancer.py @@ -22,6 +22,8 @@ from wsme import types as wtypes from wsmeext import pecan as wsme_pecan from octavia.api.v2.controllers import base +from octavia.api.v2.controllers import listener +from octavia.api.v2.controllers import pool from octavia.api.v2.types import load_balancer as lb_types from octavia.common import constants from octavia.common import data_models @@ -101,7 +103,7 @@ class LoadBalancersController(base.BaseController): "Supplied network does not contain a subnet." )) - @wsme_pecan.wsexpose(lb_types.LoadBalancerRootResponse, + @wsme_pecan.wsexpose(lb_types.LoadBalancerFullRootResponse, body=lb_types.LoadBalancerRootPOST, status_code=201) def post(self, load_balancer): """Creates a load balancer.""" @@ -147,16 +149,24 @@ class LoadBalancersController(base.BaseController): lock_session.rollback() raise exceptions.QuotaException - # TODO(blogan): lb graph, look at v1 code - + db_lb, db_pools, db_lists = None, None, None try: lb_dict = db_prepare.create_load_balancer(load_balancer.to_dict( - render_unsets=True + render_unsets=False )) vip_dict = lb_dict.pop('vip', {}) + # NoneType can be weird here, have to force type a second time + listeners = lb_dict.pop('listeners', []) or [] + pools = lb_dict.pop('pools', []) or [] + db_lb = self.repositories.create_load_balancer_and_vip( lock_session, lb_dict, vip_dict) + + if listeners or pools: + db_pools, db_lists = self._graph_create( + context.session, lock_session, db_lb, listeners, pools) + lock_session.commit() except odb_exceptions.DBDuplicateEntry: lock_session.rollback() @@ -175,8 +185,118 @@ class LoadBalancersController(base.BaseController): self.repositories.load_balancer.update( context.session, db_lb.id, provisioning_status=constants.ERROR) - result = self._convert_db_to_type(db_lb, lb_types.LoadBalancerResponse) - return lb_types.LoadBalancerRootResponse(loadbalancer=result) + + db_lb = self._get_db_lb(context.session, db_lb.id) + + result = self._convert_db_to_type( + db_lb, lb_types.LoadBalancerFullResponse) + return lb_types.LoadBalancerFullRootResponse(loadbalancer=result) + + def _graph_create(self, session, lock_session, db_lb, listeners, pools): + # Track which pools must have a full specification + pools_required = set() + # Look through listeners and find any extra pools, and move them to the + # top level so they are created first. + for l in listeners: + default_pool = l.get('default_pool') + pool_name = ( + default_pool.get('name') if default_pool else None) + # All pools need to have a name so they can be referenced + if default_pool and not pool_name: + raise exceptions.ValidationException( + detail='Pools must be named when creating a fully ' + 'populated loadbalancer.') + # If a pool has more than a name, assume it's a full specification + # (but use >2 because it will also have "enabled" as default) + if default_pool and len(default_pool) > 2: + pools.append(default_pool) + l['default_pool'] = {'name': pool_name} + # Otherwise, it's a reference and we record it and move on + elif default_pool: + pools_required.add(pool_name) + # We also need to check policy redirects + for policy in l.get('l7policies'): + redirect_pool = policy.get('redirect_pool') + pool_name = ( + redirect_pool.get('name') if redirect_pool else None) + # All pools need to have a name so they can be referenced + if default_pool and not pool_name: + raise exceptions.ValidationException( + detail='Pools must be named when creating a fully ' + 'populated loadbalancer.') + # If a pool has more than a name, assume it's a full spec + # (but use >2 because it will also have "enabled" as default) + if redirect_pool and len(redirect_pool) > 2: + pool_name = redirect_pool['name'] + policy['redirect_pool'] = {'name': pool_name} + pools.append(redirect_pool) + # Otherwise, it's a reference and we record it and move on + elif default_pool: + pools_required.add(pool_name) + + # Make sure all pool names are unique. + pool_names = [p.get('name') for p in pools] + if len(set(pool_names)) != len(pool_names): + raise exceptions.ValidationException( + detail="Pool names must be unique when creating a fully " + "populated loadbalancer.") + # Make sure every reference is present in our spec list + for pool_ref in pools_required: + if pool_ref not in pool_names: + raise exceptions.ValidationException( + detail="Pool '{name}' was referenced but no full " + "definition was found.".format(name=pool_ref)) + + # Check quotas for pools. + if pools and self.repositories.check_quota_met( + session, lock_session, data_models.Pool, db_lb.project_id, + count=len(pools)): + raise exceptions.QuotaException + + # Now create all of the pools ahead of the listeners. + new_pools = [] + pool_name_ids = {} + for p in pools: + # Check that pools have mandatory attributes, since we have to + # bypass the normal validation layer to allow for name-only + for attr in ('protocol', 'lb_algorithm'): + if attr not in p: + raise exceptions.ValidationException( + detail="Pool definition for '{name}' missing required " + "attribute: {attr}".format(name=p['name'], + attr=attr)) + p['load_balancer_id'] = db_lb.id + p['project_id'] = db_lb.project_id + new_pool, new_hm, new_members = ( + pool.PoolsController()._graph_create( + session, lock_session, p)) + new_pools.append(new_pool) + pool_name_ids[new_pool.name] = new_pool.id + + # Now check quotas for listeners + if listeners and self.repositories.check_quota_met( + session, lock_session, data_models.Listener, db_lb.project_id, + count=len(listeners)): + raise exceptions.QuotaException + + # Now create all of the listeners + new_lists = [] + for l in listeners: + default_pool = l.pop('default_pool', None) + # If there's a default pool, replace it with the ID + if default_pool: + pool_name = default_pool['name'] + pool_id = pool_name_ids.get(pool_name) + if not pool_id: + raise exceptions.SingleCreateDetailsMissing( + type='Pool', name=pool_name) + l['default_pool_id'] = pool_id + l['load_balancer_id'] = db_lb.id + l['project_id'] = db_lb.project_id + new_lists.append(listener.ListenersController()._graph_create( + lock_session, l, pool_name_ids=pool_name_ids)) + + return new_pools, new_lists @wsme_pecan.wsexpose(lb_types.LoadBalancerRootResponse, wtypes.text, status_code=200, diff --git a/octavia/api/v2/controllers/member.py b/octavia/api/v2/controllers/member.py index 2432e47c7a..8dd6b856f6 100644 --- a/octavia/api/v2/controllers/member.py +++ b/octavia/api/v2/controllers/member.py @@ -174,6 +174,14 @@ class MembersController(base.BaseController): return self._send_member_to_handler(context.session, db_member) + def _graph_create(self, lock_session, member_dict): + pool = self.repositories.pool.get(lock_session, id=self.pool_id) + member_dict = db_prepare.create_member( + member_dict, self.pool_id, bool(pool.health_monitor)) + db_member = self._validate_create_member(lock_session, member_dict) + + return db_member + @wsme_pecan.wsexpose(member_types.MemberRootResponse, wtypes.text, body=member_types.MemberRootPUT, status_code=200) diff --git a/octavia/api/v2/controllers/pool.py b/octavia/api/v2/controllers/pool.py index 6e5f3e8082..4e1e0bd84f 100644 --- a/octavia/api/v2/controllers/pool.py +++ b/octavia/api/v2/controllers/pool.py @@ -22,8 +22,8 @@ import pecan from wsme import types as wtypes from wsmeext import pecan as wsme_pecan -from octavia.api.v1.controllers import health_monitor from octavia.api.v2.controllers import base +from octavia.api.v2.controllers import health_monitor from octavia.api.v2.controllers import member from octavia.api.v2.types import pool as pool_types from octavia.common import constants @@ -195,6 +195,44 @@ class PoolsController(base.BaseController): return self._send_pool_to_handler(context.session, db_pool, listener_id=listener_id) + def _graph_create(self, session, lock_session, pool_dict): + load_balancer_id = pool_dict['load_balancer_id'] + pool_dict = db_prepare.create_pool( + pool_dict, load_balancer_id) + members = pool_dict.pop('members', []) or [] + hm = pool_dict.pop('health_monitor', None) + db_pool = self._validate_create_pool( + lock_session, pool_dict) + + # Check quotas for healthmonitors + if hm and self.repositories.check_quota_met( + session, lock_session, data_models.HealthMonitor, + db_pool.project_id): + raise exceptions.QuotaException + + # Now possibly create a healthmonitor + new_hm = None + if hm: + hm['pool_id'] = db_pool.id + hm['project_id'] = db_pool.project_id + new_hm = health_monitor.HealthMonitorController()._graph_create( + lock_session, hm) + + # Now check quotas for members + if members and self.repositories.check_quota_met( + session, lock_session, data_models.Member, + db_pool.project_id, count=len(members)): + raise exceptions.QuotaException + + # Now create members + new_members = [] + for m in members: + m['project_id'] = db_pool.project_id + new_members.append( + member.MembersController(db_pool.id)._graph_create( + lock_session, m)) + return db_pool, new_hm, new_members + @wsme_pecan.wsexpose(pool_types.PoolRootResponse, wtypes.text, body=pool_types.PoolRootPut, status_code=200) def put(self, id, pool_): diff --git a/octavia/api/v2/types/health_monitor.py b/octavia/api/v2/types/health_monitor.py index ff44c6a4ff..be23243db6 100644 --- a/octavia/api/v2/types/health_monitor.py +++ b/octavia/api/v2/types/health_monitor.py @@ -24,10 +24,6 @@ class BaseHealthMonitorType(types.BaseType): 'max_retries_down': 'fall_threshold'} -class MinimalPool(types.BaseType): - id = wtypes.wsattr(wtypes.UuidType()) - - class HealthMonitorResponse(BaseHealthMonitorType): """Defines which attributes are to be shown on any response.""" id = wtypes.wsattr(wtypes.UuidType()) @@ -42,7 +38,7 @@ class HealthMonitorResponse(BaseHealthMonitorType): expected_codes = wtypes.wsattr(wtypes.text) admin_state_up = wtypes.wsattr(bool) project_id = wtypes.wsattr(wtypes.StringType()) - pools = wtypes.wsattr([MinimalPool]) + pools = wtypes.wsattr([types.IdOnlyType]) provisioning_status = wtypes.wsattr(wtypes.StringType()) operating_status = wtypes.wsattr(wtypes.StringType()) created_at = wtypes.wsattr(wtypes.datetime.datetime) @@ -52,12 +48,21 @@ class HealthMonitorResponse(BaseHealthMonitorType): def from_data_model(cls, data_model, children=False): healthmonitor = super(HealthMonitorResponse, cls).from_data_model( data_model, children=children) - healthmonitor.pools = [ - MinimalPool.from_data_model(data_model.pool) - ] + + if cls._full_response(): + del healthmonitor.pools + else: + healthmonitor.pools = [ + types.IdOnlyType.from_data_model(data_model.pool)] return healthmonitor +class HealthMonitorFullResponse(HealthMonitorResponse): + @classmethod + def _full_response(cls): + return True + + class HealthMonitorRootResponse(types.BaseType): healthmonitor = wtypes.wsattr(HealthMonitorResponse) @@ -121,3 +126,30 @@ class HealthMonitorPUT(BaseHealthMonitorType): class HealthMonitorRootPUT(types.BaseType): healthmonitor = wtypes.wsattr(HealthMonitorPUT) + + +class HealthMonitorSingleCreate(BaseHealthMonitorType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + type = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_HEALTH_MONITOR_TYPES), + mandatory=True) + delay = wtypes.wsattr(wtypes.IntegerType(minimum=0), mandatory=True) + timeout = wtypes.wsattr(wtypes.IntegerType(minimum=0), mandatory=True) + max_retries_down = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_HM_RETRIES, + maximum=constants.MAX_HM_RETRIES), default=3) + max_retries = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_HM_RETRIES, + maximum=constants.MAX_HM_RETRIES), + mandatory=True) + http_method = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_HEALTH_MONITOR_HTTP_METHODS), + default=constants.HEALTH_MONITOR_HTTP_DEFAULT_METHOD) + url_path = wtypes.wsattr( + types.URLType(require_scheme=False), + default=constants.HEALTH_MONITOR_DEFAULT_URL_PATH) + expected_codes = wtypes.wsattr( + wtypes.StringType(pattern=r'^(\d{3}(\s*,\s*\d{3})*)$|^(\d{3}-\d{3})$'), + default=constants.HEALTH_MONITOR_DEFAULT_EXPECTED_CODES) + admin_state_up = wtypes.wsattr(bool, default=True) diff --git a/octavia/api/v2/types/l7policy.py b/octavia/api/v2/types/l7policy.py index 90fed4b954..3c62b86c65 100644 --- a/octavia/api/v2/types/l7policy.py +++ b/octavia/api/v2/types/l7policy.py @@ -15,6 +15,8 @@ from wsme import types as wtypes from octavia.api.common import types +from octavia.api.v2.types import l7rule +from octavia.api.v2.types import pool from octavia.common import constants @@ -22,10 +24,6 @@ class BaseL7PolicyType(types.BaseType): _type_to_model_map = {'admin_state_up': 'enabled'} -class MinimalL7Rule(types.BaseType): - id = wtypes.wsattr(wtypes.UuidType()) - - class L7PolicyResponse(BaseL7PolicyType): """Defines which attributes are to be shown on any response.""" id = wtypes.wsattr(wtypes.UuidType()) @@ -40,7 +38,7 @@ class L7PolicyResponse(BaseL7PolicyType): redirect_pool_id = wtypes.wsattr(wtypes.UuidType()) redirect_url = wtypes.wsattr(wtypes.StringType()) position = wtypes.wsattr(wtypes.IntegerType()) - rules = wtypes.wsattr([MinimalL7Rule]) + rules = wtypes.wsattr([types.IdOnlyType]) created_at = wtypes.wsattr(wtypes.datetime.datetime) updated_at = wtypes.wsattr(wtypes.datetime.datetime) @@ -52,11 +50,24 @@ class L7PolicyResponse(BaseL7PolicyType): policy.name = "" if not policy.description: policy.description = "" + + if cls._full_response(): + rule_model = l7rule.L7RuleFullResponse + else: + rule_model = types.IdOnlyType policy.rules = [ - MinimalL7Rule.from_data_model(i) for i in data_model.l7rules] + rule_model.from_data_model(i) for i in data_model.l7rules] return policy +class L7PolicyFullResponse(L7PolicyResponse): + @classmethod + def _full_response(cls): + return True + + rules = wtypes.wsattr([l7rule.L7RuleFullResponse]) + + class L7PolicyRootResponse(types.BaseType): l7policy = wtypes.wsattr(L7PolicyResponse) @@ -82,6 +93,7 @@ class L7PolicyPOST(BaseL7PolicyType): maximum=constants.MAX_POLICY_POSITION), default=constants.MAX_POLICY_POSITION) listener_id = wtypes.wsattr(wtypes.UuidType(), mandatory=True) + rules = wtypes.wsattr([l7rule.L7RuleSingleCreate]) class L7PolicyRootPOST(types.BaseType): @@ -104,3 +116,20 @@ class L7PolicyPUT(BaseL7PolicyType): class L7PolicyRootPUT(types.BaseType): l7policy = wtypes.wsattr(L7PolicyPUT) + + +class L7PolicySingleCreate(BaseL7PolicyType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + admin_state_up = wtypes.wsattr(bool, default=True) + action = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_L7POLICY_ACTIONS), + mandatory=True) + redirect_pool = wtypes.wsattr(pool.PoolSingleCreate) + redirect_url = wtypes.wsattr(types.URLType()) + position = wtypes.wsattr(wtypes.IntegerType( + minimum=constants.MIN_POLICY_POSITION, + maximum=constants.MAX_POLICY_POSITION), + default=constants.MAX_POLICY_POSITION) + rules = wtypes.wsattr([l7rule.L7RuleSingleCreate]) diff --git a/octavia/api/v2/types/l7rule.py b/octavia/api/v2/types/l7rule.py index 583364ff01..50ed49cb73 100644 --- a/octavia/api/v2/types/l7rule.py +++ b/octavia/api/v2/types/l7rule.py @@ -44,6 +44,12 @@ class L7RuleResponse(BaseL7Type): return rule +class L7RuleFullResponse(L7RuleResponse): + @classmethod + def _full_response(cls): + return True + + class L7RuleRootResponse(types.BaseType): rule = wtypes.wsattr(L7RuleResponse) @@ -55,12 +61,10 @@ class L7RulesRootResponse(types.BaseType): class L7RulePOST(BaseL7Type): """Defines mandatory and optional attributes of a POST request.""" type = wtypes.wsattr( - wtypes.Enum(str, - *constants.SUPPORTED_L7RULE_TYPES), + wtypes.Enum(str, *constants.SUPPORTED_L7RULE_TYPES), mandatory=True) compare_type = wtypes.wsattr( - wtypes.Enum(str, - *constants.SUPPORTED_L7RULE_COMPARE_TYPES), + wtypes.Enum(str, *constants.SUPPORTED_L7RULE_COMPARE_TYPES), mandatory=True) key = wtypes.wsattr(wtypes.StringType(max_length=255)) value = wtypes.wsattr(wtypes.StringType(max_length=255), mandatory=True) @@ -90,3 +94,17 @@ class L7RulePUT(BaseL7Type): class L7RuleRootPUT(types.BaseType): rule = wtypes.wsattr(L7RulePUT) + + +class L7RuleSingleCreate(BaseL7Type): + """Defines mandatory and optional attributes of a POST request.""" + type = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_L7RULE_TYPES), + mandatory=True) + compare_type = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_L7RULE_COMPARE_TYPES), + mandatory=True) + key = wtypes.wsattr(wtypes.StringType(max_length=255)) + value = wtypes.wsattr(wtypes.StringType(max_length=255), mandatory=True) + invert = wtypes.wsattr(bool, default=False) + admin_state_up = wtypes.wsattr(bool, default=True) diff --git a/octavia/api/v2/types/listener.py b/octavia/api/v2/types/listener.py index fa234b266b..6e6e5dcfab 100644 --- a/octavia/api/v2/types/listener.py +++ b/octavia/api/v2/types/listener.py @@ -25,14 +25,6 @@ class BaseListenerType(types.BaseType): 'default_tls_container_ref': 'tls_certificate_id'} -class MinimalLoadBalancer(types.BaseType): - id = wtypes.wsattr(wtypes.UuidType()) - - -class MinimalL7Policy(types.BaseType): - id = wtypes.wsattr(wtypes.UuidType()) - - class ListenerResponse(BaseListenerType): """Defines which attributes are to be shown on any response.""" id = wtypes.wsattr(wtypes.UuidType()) @@ -48,11 +40,11 @@ class ListenerResponse(BaseListenerType): sni_container_refs = [wtypes.StringType()] project_id = wtypes.wsattr(wtypes.StringType()) default_pool_id = wtypes.wsattr(wtypes.UuidType()) - l7policies = wtypes.wsattr([MinimalL7Policy]) + l7policies = wtypes.wsattr([types.IdOnlyType]) insert_headers = wtypes.wsattr(wtypes.DictType(str, str)) created_at = wtypes.wsattr(wtypes.datetime.datetime) updated_at = wtypes.wsattr(wtypes.datetime.datetime) - loadbalancers = wtypes.wsattr([MinimalLoadBalancer]) + loadbalancers = wtypes.wsattr([types.IdOnlyType]) @classmethod def from_data_model(cls, data_model, children=False): @@ -61,10 +53,17 @@ class ListenerResponse(BaseListenerType): listener.sni_container_refs = [ sni_c.tls_container_id for sni_c in data_model.sni_containers] - listener.loadbalancers = [ - MinimalLoadBalancer.from_data_model(data_model.load_balancer)] + + if cls._full_response(): + del listener.loadbalancers + l7policy_type = l7policy.L7PolicyFullResponse + else: + listener.loadbalancers = [ + types.IdOnlyType.from_data_model(data_model.load_balancer)] + l7policy_type = types.IdOnlyType + listener.l7policies = [ - MinimalL7Policy.from_data_model(i) for i in data_model.l7policies] + l7policy_type.from_data_model(i) for i in data_model.l7policies] if not listener.description: listener.description = "" @@ -74,6 +73,14 @@ class ListenerResponse(BaseListenerType): return listener +class ListenerFullResponse(ListenerResponse): + @classmethod + def _full_response(cls): + return True + + l7policies = wtypes.wsattr([l7policy.L7PolicyFullResponse]) + + class ListenerRootResponse(types.BaseType): listener = wtypes.wsattr(ListenerResponse) @@ -100,8 +107,8 @@ class ListenerPOST(BaseListenerType): # TODO(johnsom) Remove after deprecation (R series) project_id = wtypes.wsattr(wtypes.StringType(max_length=36)) default_pool_id = wtypes.wsattr(wtypes.UuidType()) - default_pool = wtypes.wsattr(pool.PoolPOST) - l7policies = wtypes.wsattr([l7policy.L7PolicyPOST], default=[]) + default_pool = wtypes.wsattr(pool.PoolSingleCreate) + l7policies = wtypes.wsattr([l7policy.L7PolicySingleCreate], default=[]) insert_headers = wtypes.wsattr( wtypes.DictType(str, wtypes.StringType(max_length=255))) loadbalancer_id = wtypes.wsattr(wtypes.UuidType(), mandatory=True) @@ -128,3 +135,25 @@ class ListenerPUT(BaseListenerType): class ListenerRootPUT(types.BaseType): listener = wtypes.wsattr(ListenerPUT) + + +class ListenerSingleCreate(BaseListenerType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + admin_state_up = wtypes.wsattr(bool, default=True) + protocol = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_PROTOCOLS), + mandatory=True) + protocol_port = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_PORT_NUMBER, + maximum=constants.MAX_PORT_NUMBER), mandatory=True) + connection_limit = wtypes.wsattr( + wtypes.IntegerType(minimum=constants.MIN_CONNECTION_LIMIT), default=-1) + default_tls_container_ref = wtypes.wsattr( + wtypes.StringType(max_length=255)) + sni_container_refs = [wtypes.StringType(max_length=255)] + default_pool_id = wtypes.wsattr(wtypes.UuidType()) + default_pool = wtypes.wsattr(pool.PoolSingleCreate) + l7policies = wtypes.wsattr([l7policy.L7PolicySingleCreate], default=[]) + insert_headers = wtypes.wsattr( + wtypes.DictType(str, wtypes.StringType(max_length=255))) diff --git a/octavia/api/v2/types/load_balancer.py b/octavia/api/v2/types/load_balancer.py index fa0fa15238..f0400aa2fe 100644 --- a/octavia/api/v2/types/load_balancer.py +++ b/octavia/api/v2/types/load_balancer.py @@ -16,6 +16,7 @@ from wsme import types as wtypes from octavia.api.common import types from octavia.api.v2.types import listener +from octavia.api.v2.types import pool class BaseLoadBalancerType(types.BaseType): @@ -26,10 +27,6 @@ class BaseLoadBalancerType(types.BaseType): 'admin_state_up': 'enabled'} -class MinimalListener(types.BaseType): - id = wtypes.wsattr(wtypes.UuidType()) - - class LoadBalancerResponse(BaseLoadBalancerType): """Defines which attributes are to be shown on any response.""" id = wtypes.wsattr(wtypes.UuidType()) @@ -45,8 +42,10 @@ class LoadBalancerResponse(BaseLoadBalancerType): vip_port_id = wtypes.wsattr(wtypes.UuidType()) vip_subnet_id = wtypes.wsattr(wtypes.UuidType()) vip_network_id = wtypes.wsattr(wtypes.UuidType()) - listeners = wtypes.wsattr([MinimalListener]) - # TODO(ankur-gupta-f): add pools once that has been merged + listeners = wtypes.wsattr([types.IdOnlyType]) + pools = wtypes.wsattr([types.IdOnlyType]) + provider = wtypes.wsattr(wtypes.StringType()) + flavor = wtypes.wsattr(wtypes.StringType()) @classmethod def from_data_model(cls, data_model, children=False): @@ -57,20 +56,47 @@ class LoadBalancerResponse(BaseLoadBalancerType): result.vip_port_id = data_model.vip.port_id result.vip_address = data_model.vip.ip_address result.vip_network_id = data_model.vip.network_id + + if cls._full_response(): + listener_model = listener.ListenerFullResponse + pool_model = pool.PoolFullResponse + else: + listener_model = types.IdOnlyType + pool_model = types.IdOnlyType result.listeners = [ - MinimalListener.from_data_model(i) for i in data_model.listeners] + listener_model.from_data_model(i) for i in data_model.listeners] + result.pools = [ + pool_model.from_data_model(i) for i in data_model.pools] + if not result.description: result.description = "" if not result.name: result.name = "" + if not result.flavor: + result.flavor = "" + if not result.provider: + result.provider = "octavia" return result +class LoadBalancerFullResponse(LoadBalancerResponse): + @classmethod + def _full_response(cls): + return True + + listeners = wtypes.wsattr([listener.ListenerFullResponse]) + pools = wtypes.wsattr([pool.PoolFullResponse]) + + class LoadBalancerRootResponse(types.BaseType): loadbalancer = wtypes.wsattr(LoadBalancerResponse) +class LoadBalancerFullRootResponse(LoadBalancerRootResponse): + loadbalancer = wtypes.wsattr(LoadBalancerFullResponse) + + class LoadBalancersRootResponse(types.BaseType): loadbalancers = wtypes.wsattr([LoadBalancerResponse]) @@ -86,7 +112,10 @@ class LoadBalancerPOST(BaseLoadBalancerType): vip_subnet_id = wtypes.wsattr(wtypes.UuidType()) vip_network_id = wtypes.wsattr(wtypes.UuidType()) project_id = wtypes.wsattr(wtypes.StringType(max_length=36)) - listeners = wtypes.wsattr([listener.ListenerPOST], default=[]) + listeners = wtypes.wsattr([listener.ListenerSingleCreate], default=[]) + pools = wtypes.wsattr([pool.PoolSingleCreate], default=[]) + provider = wtypes.wsattr(wtypes.StringType(max_length=255)) + flavor = wtypes.wsattr(wtypes.StringType(max_length=255)) class LoadBalancerRootPOST(types.BaseType): diff --git a/octavia/api/v2/types/member.py b/octavia/api/v2/types/member.py index cf298ad1c0..e4189b98bb 100644 --- a/octavia/api/v2/types/member.py +++ b/octavia/api/v2/types/member.py @@ -48,6 +48,12 @@ class MemberResponse(BaseMemberType): return member +class MemberFullResponse(MemberResponse): + @classmethod + def _full_response(cls): + return True + + class MemberRootResponse(types.BaseType): member = wtypes.wsattr(MemberResponse) @@ -85,3 +91,16 @@ class MemberPUT(BaseMemberType): class MemberRootPUT(types.BaseType): member = wtypes.wsattr(MemberPUT) + + +class MemberSingleCreate(BaseMemberType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + admin_state_up = wtypes.wsattr(bool, default=True) + address = wtypes.wsattr(types.IPAddressType(), mandatory=True) + protocol_port = wtypes.wsattr(wtypes.IntegerType( + minimum=constants.MIN_PORT_NUMBER, maximum=constants.MAX_PORT_NUMBER), + mandatory=True) + weight = wtypes.wsattr(wtypes.IntegerType( + minimum=constants.MIN_WEIGHT, maximum=constants.MAX_WEIGHT), default=1) + subnet_id = wtypes.wsattr(wtypes.UuidType()) diff --git a/octavia/api/v2/types/pool.py b/octavia/api/v2/types/pool.py index 066ed30fe5..fdeaa104c9 100644 --- a/octavia/api/v2/types/pool.py +++ b/octavia/api/v2/types/pool.py @@ -43,22 +43,6 @@ class BasePoolType(types.BaseType): _type_to_model_map = {'admin_state_up': 'enabled'} -class MinimalLoadBalancer(types.BaseType): - id = wtypes.wsattr(wtypes.UuidType()) - - -class MinimalListener(types.BaseType): - id = wtypes.wsattr(wtypes.UuidType()) - - -class MinimalMember(types.BaseType): - id = wtypes.wsattr(wtypes.UuidType()) - - -class MinimalHealthmonitor(types.BaseType): - id = wtypes.wsattr(wtypes.UuidType()) - - class PoolResponse(BasePoolType): """Defines which attributes are to be shown on any response.""" id = wtypes.wsattr(wtypes.UuidType()) @@ -71,12 +55,12 @@ class PoolResponse(BasePoolType): lb_algorithm = wtypes.wsattr(wtypes.text) session_persistence = wtypes.wsattr(SessionPersistenceResponse) project_id = wtypes.wsattr(wtypes.StringType()) - loadbalancers = wtypes.wsattr([MinimalLoadBalancer]) - listeners = wtypes.wsattr([MinimalListener]) + loadbalancers = wtypes.wsattr([types.IdOnlyType]) + listeners = wtypes.wsattr([types.IdOnlyType]) created_at = wtypes.wsattr(wtypes.datetime.datetime) updated_at = wtypes.wsattr(wtypes.datetime.datetime) health_monitor_id = wtypes.wsattr(wtypes.UuidType()) - members = wtypes.wsattr([MinimalMember]) + members = wtypes.wsattr([types.IdOnlyType]) @classmethod def from_data_model(cls, data_model, children=False): @@ -86,19 +70,39 @@ class PoolResponse(BasePoolType): pool.session_persistence = ( SessionPersistenceResponse.from_data_model( data_model.session_persistence)) - if data_model.load_balancer: - pool.loadbalancers = [ - MinimalLoadBalancer.from_data_model(data_model.load_balancer)] + + if cls._full_response(): + del pool.loadbalancers + member_model = member.MemberFullResponse + if pool.health_monitor: + pool.health_monitor = ( + health_monitor.HealthMonitorFullResponse + .from_data_model(data_model.health_monitor)) else: - pool.loadbalancers = [] + if data_model.load_balancer: + pool.loadbalancers = [ + types.IdOnlyType.from_data_model(data_model.load_balancer)] + else: + pool.loadbalancers = [] + member_model = types.IdOnlyType + pool.listeners = [ - MinimalListener.from_data_model(i) for i in data_model.listeners] + types.IdOnlyType.from_data_model(i) for i in data_model.listeners] pool.members = [ - MinimalMember.from_data_model(i) for i in data_model.members] + member_model.from_data_model(i) for i in data_model.members] return pool +class PoolFullResponse(PoolResponse): + @classmethod + def _full_response(cls): + return True + + members = wtypes.wsattr([member.MemberFullResponse]) + health_monitor = wtypes.wsattr(health_monitor.HealthMonitorFullResponse) + + class PoolRootResponse(types.BaseType): pool = wtypes.wsattr(PoolResponse) @@ -122,8 +126,8 @@ class PoolPOST(BasePoolType): session_persistence = wtypes.wsattr(SessionPersistencePOST) # TODO(johnsom) Remove after deprecation (R series) project_id = wtypes.wsattr(wtypes.StringType(max_length=36)) - health_monitor = wtypes.wsattr(health_monitor.HealthMonitorPOST) - members = wtypes.wsattr([member.MemberPOST]) + health_monitor = wtypes.wsattr(health_monitor.HealthMonitorSingleCreate) + members = wtypes.wsattr([member.MemberSingleCreate]) class PoolRootPOST(types.BaseType): @@ -142,3 +146,16 @@ class PoolPUT(BasePoolType): class PoolRootPut(types.BaseType): pool = wtypes.wsattr(PoolPUT) + + +class PoolSingleCreate(BasePoolType): + """Defines mandatory and optional attributes of a POST request.""" + name = wtypes.wsattr(wtypes.StringType(max_length=255)) + description = wtypes.wsattr(wtypes.StringType(max_length=255)) + admin_state_up = wtypes.wsattr(bool, default=True) + protocol = wtypes.wsattr(wtypes.Enum(str, *constants.SUPPORTED_PROTOCOLS)) + lb_algorithm = wtypes.wsattr( + wtypes.Enum(str, *constants.SUPPORTED_LB_ALGORITHMS)) + session_persistence = wtypes.wsattr(SessionPersistencePOST) + health_monitor = wtypes.wsattr(health_monitor.HealthMonitorSingleCreate) + members = wtypes.wsattr([member.MemberSingleCreate]) diff --git a/octavia/common/exceptions.py b/octavia/common/exceptions.py index 61cf0bb742..fc8fbfe274 100644 --- a/octavia/common/exceptions.py +++ b/octavia/common/exceptions.py @@ -85,6 +85,11 @@ class L7RuleValidation(APIException): code = 400 +class SingleCreateDetailsMissing(APIException): + msg = _("Missing details for %(type)s object: %(name)") + code = 400 + + class InvalidHMACException(OctaviaException): message = _("HMAC hashes didn't match") diff --git a/octavia/controller/worker/flows/load_balancer_flows.py b/octavia/controller/worker/flows/load_balancer_flows.py index 86f81b501f..a27a82dccf 100644 --- a/octavia/controller/worker/flows/load_balancer_flows.py +++ b/octavia/controller/worker/flows/load_balancer_flows.py @@ -67,7 +67,8 @@ class LoadBalancerFlows(object): post_amp_prefix = constants.POST_LB_AMP_ASSOCIATION_SUBFLOW lb_create_flow.add( - self.get_post_lb_amp_association_flow(post_amp_prefix, topology)) + self.get_post_lb_amp_association_flow( + post_amp_prefix, topology, mark_active=(not listeners))) if listeners: lb_create_flow.add(*self._create_listeners_flow()) @@ -144,12 +145,14 @@ class LoadBalancerFlows(object): ) flows.append( database_tasks.MarkLBActiveInDB( - mark_listeners=True, requires=constants.LOADBALANCER + mark_subobjects=True, + requires=constants.LOADBALANCER ) ) return flows - def get_post_lb_amp_association_flow(self, prefix, topology): + def get_post_lb_amp_association_flow(self, prefix, topology, + mark_active=True): """Reload the loadbalancer and create networking subflows for created/allocated amphorae. @@ -179,9 +182,10 @@ class LoadBalancerFlows(object): post_create_LB_flow.add(database_tasks.UpdateLoadbalancerInDB( requires=[constants.LOADBALANCER, constants.UPDATE_DICT])) - post_create_LB_flow.add(database_tasks.MarkLBActiveInDB( - name=sf_name + '-' + constants.MARK_LB_ACTIVE_INDB, - requires=constants.LOADBALANCER)) + if mark_active: + post_create_LB_flow.add(database_tasks.MarkLBActiveInDB( + name=sf_name + '-' + constants.MARK_LB_ACTIVE_INDB, + requires=constants.LOADBALANCER)) return post_create_LB_flow def _get_delete_listeners_flow(self, lb): diff --git a/octavia/controller/worker/tasks/database_tasks.py b/octavia/controller/worker/tasks/database_tasks.py index 0e4f74406e..6a1258ae96 100644 --- a/octavia/controller/worker/tasks/database_tasks.py +++ b/octavia/controller/worker/tasks/database_tasks.py @@ -893,27 +893,25 @@ class MarkLBActiveInDB(BaseDatabaseTask): Since sqlalchemy will likely retry by itself always revert if it fails """ - def __init__(self, mark_listeners=False, **kwargs): + def __init__(self, mark_subobjects=False, **kwargs): super(MarkLBActiveInDB, self).__init__(**kwargs) - self.mark_listeners = mark_listeners + self.mark_subobjects = mark_subobjects def execute(self, loadbalancer): """Mark the load balancer as active in DB. - This also marks ACTIVE all listeners of the load balancer if - self.mark_listeners is True. + This also marks ACTIVE all sub-objects of the load balancer if + self.mark_subobjects is True. :param loadbalancer: Load balancer object to be updated :returns: None """ - if self.mark_listeners: + if self.mark_subobjects: LOG.debug("Marking all listeners of loadbalancer %s ACTIVE", loadbalancer.id) for listener in loadbalancer.listeners: - self.listener_repo.update(db_apis.get_session(), - listener.id, - provisioning_status=constants.ACTIVE) + self._mark_listener_status(listener, constants.ACTIVE) LOG.info("Mark ACTIVE in DB for load balancer id: %s", loadbalancer.id) @@ -921,24 +919,78 @@ class MarkLBActiveInDB(BaseDatabaseTask): loadbalancer.id, provisioning_status=constants.ACTIVE) + def _mark_listener_status(self, listener, status): + self.listener_repo.update(db_apis.get_session(), + listener.id, + provisioning_status=status) + LOG.debug("Marking all l7policies of listener %s %s", + listener.id, status) + for l7policy in listener.l7policies: + self._mark_l7policy_status(l7policy, status) + + if listener.default_pool: + LOG.debug("Marking default pool of listener %s %s", + listener.id, status) + self._mark_pool_status(listener.default_pool, status) + + def _mark_l7policy_status(self, l7policy, status): + self.l7policy_repo.update( + db_apis.get_session(), l7policy.id, + provisioning_status=status) + + LOG.debug("Marking all l7rules of l7policy %s %s", + l7policy.id, status) + for l7rule in l7policy.l7rules: + self._mark_l7rule_status(l7rule, status) + + if l7policy.redirect_pool: + LOG.debug("Marking redirect pool of l7policy %s %s", + l7policy.id, status) + self._mark_pool_status(l7policy.redirect_pool, status) + + def _mark_l7rule_status(self, l7rule, status): + self.l7rule_repo.update( + db_apis.get_session(), l7rule.id, + provisioning_status=status) + + def _mark_pool_status(self, pool, status): + self.pool_repo.update( + db_apis.get_session(), pool.id, + provisioning_status=status) + if pool.health_monitor: + LOG.debug("Marking health monitor of pool %s %s", pool.id, status) + self._mark_hm_status(pool.health_monitor, status) + + LOG.debug("Marking all members of pool %s %s", pool.id, status) + for member in pool.members: + self._mark_member_status(member, status) + + def _mark_hm_status(self, hm, status): + self.health_mon_repo.update( + db_apis.get_session(), hm.id, + provisioning_status=status) + + def _mark_member_status(self, member, status): + self.member_repo.update( + db_apis.get_session(), member.id, + provisioning_status=status) + def revert(self, loadbalancer, *args, **kwargs): """Mark the load balancer as broken and ready to be cleaned up. - This also puts all listeners of the load balancer to ERROR state if - self.mark_listeners is True + This also puts all sub-objects of the load balancer to ERROR state if + self.mark_subobjects is True :param loadbalancer: Load balancer object that failed to update :returns: None """ - if self.mark_listeners: + if self.mark_subobjects: LOG.debug("Marking all listeners of loadbalancer %s ERROR", loadbalancer.id) for listener in loadbalancer.listeners: try: - self.listener_repo.update( - db_apis.get_session(), listener.id, - provisioning_status=constants.ERROR) + self._mark_listener_status(listener, constants.ERROR) except Exception: LOG.warning("Error updating listener %s provisioning " "status", listener.id) diff --git a/octavia/db/prepare.py b/octavia/db/prepare.py index 80619a4ece..8444e2b953 100644 --- a/octavia/db/prepare.py +++ b/octavia/db/prepare.py @@ -113,6 +113,9 @@ def create_l7policy(l7policy_dict, lb_id, listener_id): prepped_pool = create_pool(pool_dict, lb_id) l7policy_dict['redirect_pool'] = prepped_pool l7policy_dict['redirect_pool_id'] = prepped_pool['id'] + rules = l7policy_dict.pop('rules', None) + if rules: + l7policy_dict['l7rules'] = rules if l7policy_dict.get('l7rules'): if (len(l7policy_dict.get('l7rules')) > constants.MAX_L7RULES_PER_L7POLICY): diff --git a/octavia/db/repositories.py b/octavia/db/repositories.py index b99d0ff239..95ee8c648d 100644 --- a/octavia/db/repositories.py +++ b/octavia/db/repositories.py @@ -262,7 +262,8 @@ class Repositories(object): provisioning_status=lb_prov_status) return success - def check_quota_met(self, session, lock_session, _class, project_id): + def check_quota_met(self, session, lock_session, _class, project_id, + count=1): """Checks and updates object quotas. This method makes sure the project has available quota @@ -273,6 +274,7 @@ class Repositories(object): :param lock_session: Locking database session (autocommit=False) :param _class: Data model object requesting quota :param project_id: Project ID requesting quota + :param count: Number of objects we're going to create (default=1) :returns: True if quota is met, False if quota was available """ LOG.debug('Checking quota for project: {proj} object: {obj}'.format( @@ -312,16 +314,16 @@ class Repositories(object): lb_count = session.query(models.LoadBalancer).filter( models.LoadBalancer.project_id == project_id, models.LoadBalancer.provisioning_status != - consts.DELETED).count() + 1 + consts.DELETED).count() + count else: - lb_count = quotas.in_use_load_balancer + 1 + lb_count = quotas.in_use_load_balancer + count # Decide if the quota is met if lb_count <= lb_quota or lb_quota == consts.QUOTA_UNLIMITED: quotas.in_use_load_balancer = lb_count return False else: return True - if _class == data_models.Listener: + elif _class == data_models.Listener: # Decide which quota to use if quotas.listener is None: listener_quota = CONF.quotas.default_listener_quota @@ -333,9 +335,9 @@ class Repositories(object): listener_count = session.query(models.Listener).filter( models.Listener.project_id == project_id, models.Listener.provisioning_status != - consts.DELETED).count() + 1 + consts.DELETED).count() + count else: - listener_count = quotas.in_use_listener + 1 + listener_count = quotas.in_use_listener + count # Decide if the quota is met if (listener_count <= listener_quota or listener_quota == consts.QUOTA_UNLIMITED): @@ -343,7 +345,7 @@ class Repositories(object): return False else: return True - if _class == data_models.Pool: + elif _class == data_models.Pool: # Decide which quota to use if quotas.pool is None: pool_quota = CONF.quotas.default_pool_quota @@ -355,9 +357,9 @@ class Repositories(object): pool_count = session.query(models.Pool).filter( models.Pool.project_id == project_id, models.Pool.provisioning_status != - consts.DELETED).count() + 1 + consts.DELETED).count() + count else: - pool_count = quotas.in_use_pool + 1 + pool_count = quotas.in_use_pool + count # Decide if the quota is met if (pool_count <= pool_quota or pool_quota == consts.QUOTA_UNLIMITED): @@ -365,7 +367,7 @@ class Repositories(object): return False else: return True - if _class == data_models.HealthMonitor: + elif _class == data_models.HealthMonitor: # Decide which quota to use if quotas.health_monitor is None: hm_quota = CONF.quotas.default_health_monitor_quota @@ -377,9 +379,9 @@ class Repositories(object): hm_count = session.query(models.HealthMonitor).filter( models.HealthMonitor.project_id == project_id, models.HealthMonitor.provisioning_status != - consts.DELETED).count() + 1 + consts.DELETED).count() + count else: - hm_count = quotas.in_use_health_monitor + 1 + hm_count = quotas.in_use_health_monitor + count # Decide if the quota is met if (hm_count <= hm_quota or hm_quota == consts.QUOTA_UNLIMITED): @@ -387,7 +389,7 @@ class Repositories(object): return False else: return True - if _class == data_models.Member: + elif _class == data_models.Member: # Decide which quota to use if quotas.member is None: member_quota = CONF.quotas.default_member_quota @@ -399,9 +401,9 @@ class Repositories(object): member_count = session.query(models.Member).filter( models.Member.project_id == project_id, models.Member.provisioning_status != - consts.DELETED).count() + 1 + consts.DELETED).count() + count else: - member_count = quotas.in_use_member + 1 + member_count = quotas.in_use_member + count # Decide if the quota is met if (member_count <= member_quota or member_quota == consts.QUOTA_UNLIMITED): diff --git a/octavia/tests/functional/api/v2/base.py b/octavia/tests/functional/api/v2/base.py index 7c38413d0e..636af1ddbe 100644 --- a/octavia/tests/functional/api/v2/base.py +++ b/octavia/tests/functional/api/v2/base.py @@ -82,9 +82,6 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase): patcher = mock.patch('octavia.api.handlers.controller_simulator.' 'handler.SimulatedControllerHandler') self.handler_mock = patcher.start() - self.check_quota_met_true_mock = mock.patch( - 'octavia.db.repositories.Repositories.check_quota_met', - return_value=True) self.app = self._make_app() self.project_id = uuidutils.generate_uuid() @@ -94,6 +91,15 @@ class BaseAPITest(base_db_test.OctaviaDBTestBase): self.addCleanup(reset_pecan) + def start_quota_mock(self, object_type): + def mock_quota(session, lock_session, _class, project_id, count=1): + return _class == object_type + check_quota_met_true_mock = mock.patch( + 'octavia.db.repositories.Repositories.check_quota_met', + side_effect=mock_quota) + check_quota_met_true_mock.start() + self.addCleanup(check_quota_met_true_mock.stop) + def _make_app(self): return pecan.testing.load_test_app({'app': pconfig.app, 'wsme': pconfig.wsme}) diff --git a/octavia/tests/functional/api/v2/test_health_monitor.py b/octavia/tests/functional/api/v2/test_health_monitor.py index 61bc266a27..8790be777c 100644 --- a/octavia/tests/functional/api/v2/test_health_monitor.py +++ b/octavia/tests/functional/api/v2/test_health_monitor.py @@ -15,6 +15,7 @@ from oslo_utils import uuidutils from octavia.common import constants +from octavia.common import data_models from octavia.tests.functional.api.v2 import base @@ -191,6 +192,16 @@ class TestHealthMonitor(base.BaseAPITest): self.pool_id, constants.HEALTH_MONITOR_HTTP, 1, 1, 1, 1, status=409) + def test_create_over_quota(self): + self.start_quota_mock(data_models.HealthMonitor) + hm = {'pool_id': self.pool_id, + 'type': constants.HEALTH_MONITOR_HTTP, + 'delay': 1, + 'timeout': 1, + 'max_retries_down': 1, + 'max_retries': 1} + self.post(self.HMS_PATH, self._build_body(hm), status=403) + def test_update(self): api_hm = self.create_health_monitor( self.pool_with_listener_id, diff --git a/octavia/tests/functional/api/v2/test_l7policy.py b/octavia/tests/functional/api/v2/test_l7policy.py index 47b8e2e93f..5015f7413a 100644 --- a/octavia/tests/functional/api/v2/test_l7policy.py +++ b/octavia/tests/functional/api/v2/test_l7policy.py @@ -18,6 +18,7 @@ from oslo_utils import uuidutils from octavia.common import constants import octavia.common.context +from octavia.common import data_models from octavia.tests.functional.api.v2 import base @@ -298,8 +299,7 @@ class TestL7Policy(base.BaseAPITest): l7policy_op_status=constants.OFFLINE) def test_create_over_quota(self): - self.check_quota_met_true_mock.start() - self.addCleanup(self.check_quota_met_true_mock.stop) + self.start_quota_mock(data_models.L7Policy) l7policy = {'listener_id': self.listener_id, 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://a.com'} diff --git a/octavia/tests/functional/api/v2/test_l7rule.py b/octavia/tests/functional/api/v2/test_l7rule.py index 3ec0d5e374..b34e1b98a5 100644 --- a/octavia/tests/functional/api/v2/test_l7rule.py +++ b/octavia/tests/functional/api/v2/test_l7rule.py @@ -260,14 +260,6 @@ class TestL7Rule(base.BaseAPITest): l7rule_prov_status=constants.ERROR, l7rule_op_status=constants.OFFLINE) - def test_create_over_quota(self): - self.check_quota_met_true_mock.start() - self.addCleanup(self.check_quota_met_true_mock.stop) - body = {'type': constants.L7RULE_TYPE_PATH, - 'compare_type': constants.L7RULE_COMPARE_TYPE_STARTS_WITH, - 'value': '/api'} - self.post(self.l7rules_path, self._build_body(body), status=403) - def test_update(self): api_l7rule = self.create_l7rule( self.l7policy_id, constants.L7RULE_TYPE_PATH, diff --git a/octavia/tests/functional/api/v2/test_listener.py b/octavia/tests/functional/api/v2/test_listener.py index f3034e40c1..315fd62079 100644 --- a/octavia/tests/functional/api/v2/test_listener.py +++ b/octavia/tests/functional/api/v2/test_listener.py @@ -19,6 +19,7 @@ from oslo_utils import uuidutils from octavia.common import constants import octavia.common.context +from octavia.common import data_models from octavia.tests.functional.api.v2 import base @@ -284,6 +285,15 @@ class TestListener(base.BaseAPITest): self.assert_final_lb_statuses(self.lb_id) self.assert_final_listener_statuses(self.lb_id, listener_api['id']) + def test_create_over_quota(self): + self.start_quota_mock(data_models.Listener) + lb_listener = {'name': 'listener1', + 'protocol': constants.PROTOCOL_HTTP, + 'protocol_port': 80, + 'loadbalancer_id': self.lb_id} + body = self._build_body(lb_listener) + self.post(self.LISTENERS_PATH, body, status=403) + def test_create_with_bad_handler(self): self.handler_mock().listener.create.side_effect = Exception() api_listener = self.create_listener( diff --git a/octavia/tests/functional/api/v2/test_load_balancer.py b/octavia/tests/functional/api/v2/test_load_balancer.py index d8b0b2b517..43f4d2565b 100644 --- a/octavia/tests/functional/api/v2/test_load_balancer.py +++ b/octavia/tests/functional/api/v2/test_load_balancer.py @@ -16,10 +16,10 @@ import copy import mock from oslo_utils import uuidutils -import testtools from octavia.common import constants import octavia.common.context +from octavia.common import data_models from octavia.network import base as network_base from octavia.network import data_models as network_models from octavia.tests.functional.api.v2 import base @@ -361,6 +361,14 @@ class TestLoadBalancer(base.BaseAPITest): path = self.LB_PATH.format(lb_id='SEAN-CONNERY') self.get(path, status=404) + def test_create_over_quota(self): + self.start_quota_mock(data_models.LoadBalancer) + lb_json = {'name': 'test1', + 'vip_subnet_id': uuidutils.generate_uuid(), + 'project_id': self.project_id} + body = self._build_body(lb_json) + self.post(self.LBS_PATH, body, status=403) + def test_update(self): project_id = uuidutils.generate_uuid() lb = self.create_load_balancer(uuidutils.generate_uuid(), @@ -568,18 +576,29 @@ class TestLoadBalancerGraph(base.BaseAPITest): observed_graph_copy = copy.deepcopy(observed_graph) del observed_graph_copy['created_at'] del observed_graph_copy['updated_at'] - obs_lb_id = observed_graph_copy.pop('id') + obs_lb_id = observed_graph_copy.pop('id') self.assertTrue(uuidutils.is_uuid_like(obs_lb_id)) + expected_listeners = expected_graph.pop('listeners', []) observed_listeners = observed_graph_copy.pop('listeners', []) + expected_pools = expected_graph.pop('pools', []) + observed_pools = observed_graph_copy.pop('pools', []) self.assertEqual(expected_graph, observed_graph_copy) + + self.assertEqual(len(expected_pools), len(observed_pools)) + + self.assertEqual(len(expected_listeners), len(observed_listeners)) for observed_listener in observed_listeners: del observed_listener['created_at'] del observed_listener['updated_at'] self.assertTrue(uuidutils.is_uuid_like( observed_listener.pop('id'))) + if observed_listener.get('default_pool_id'): + self.assertTrue(uuidutils.is_uuid_like( + observed_listener.pop('default_pool_id'))) + default_pool = observed_listener.get('default_pool') if default_pool: observed_listener.pop('default_pool_id') @@ -601,50 +620,50 @@ class TestLoadBalancerGraph(base.BaseAPITest): o_l7policies = observed_listener.get('l7policies') if o_l7policies: for o_l7policy in o_l7policies: - if o_l7policy.get('redirect_pool'): - r_pool = o_l7policy.get('redirect_pool') - self.assertTrue(r_pool.get('id')) - r_pool.pop('id') - r_pool.pop('created_at') - r_pool.pop('updated_at') - self.assertTrue(o_l7policy.get('redirect_pool_id')) - o_l7policy.pop('redirect_pool_id') - if r_pool.get('members'): - for r_member in r_pool.get('members'): - self.assertTrue(r_member.get('id')) - r_member.pop('id') - r_member.pop('created_at') - r_member.pop('updated_at') - self.assertTrue(o_l7policy.get('id')) - o_l7policy.pop('id') - l7rules = o_l7policy.get('l7rules') + o_l7policy.pop('created_at') + o_l7policy.pop('updated_at') + if o_l7policy.get('redirect_pool_id'): + r_pool_id = o_l7policy.pop('redirect_pool_id') + self.assertTrue(uuidutils.is_uuid_like(r_pool_id)) + o_l7policy_id = o_l7policy.pop('id') + self.assertTrue(uuidutils.is_uuid_like(o_l7policy_id)) + o_l7policy_l_id = o_l7policy.pop('listener_id') + self.assertTrue(uuidutils.is_uuid_like(o_l7policy_l_id)) + l7rules = o_l7policy.get('rules') or [] for l7rule in l7rules: - self.assertTrue(l7rule.get('id')) - l7rule.pop('id') + l7rule.pop('created_at') + l7rule.pop('updated_at') + self.assertTrue(l7rule.pop('id')) self.assertIn(observed_listener, expected_listeners) - def _get_lb_bodies(self, create_listeners, expected_listeners): + def _get_lb_bodies(self, create_listeners, expected_listeners, + create_pools=None): create_lb = { 'name': 'lb1', 'project_id': self._project_id, - 'vip_subnet_id': None, - 'listeners': create_listeners + 'vip_subnet_id': uuidutils.generate_uuid(), + 'listeners': create_listeners, + 'pools': create_pools or [] } expected_lb = { - 'description': None, - 'enabled': True, + 'description': '', + 'admin_state_up': True, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.OFFLINE, - 'vip_subnet_id': None, 'vip_address': None, + 'vip_network_id': None, + 'vip_port_id': None, + 'flavor': '', + 'provider': 'octavia' } expected_lb.update(create_lb) expected_lb['listeners'] = expected_listeners + expected_lb['pools'] = create_pools or [] return create_lb, expected_lb def _get_listener_bodies(self, name='listener1', protocol_port=80, - create_default_pool=None, - expected_default_pool=None, + create_default_pool_name=None, + create_default_pool_id=None, create_l7policies=None, expected_l7policies=None, create_sni_containers=None, @@ -652,36 +671,39 @@ class TestLoadBalancerGraph(base.BaseAPITest): create_listener = { 'name': name, 'protocol_port': protocol_port, - 'protocol': constants.PROTOCOL_HTTP, - 'project_id': self._project_id + 'protocol': constants.PROTOCOL_HTTP } expected_listener = { - 'description': None, - 'tls_certificate_id': None, - 'sni_containers': [], - 'connection_limit': None, - 'enabled': True, + 'description': '', + 'default_tls_container_ref': None, + 'sni_container_refs': [], + 'connection_limit': -1, + 'admin_state_up': True, 'provisioning_status': constants.PENDING_CREATE, 'operating_status': constants.OFFLINE, - 'insert_headers': {} + 'insert_headers': {}, + 'project_id': self._project_id } if create_sni_containers: - create_listener['sni_containers'] = create_sni_containers + create_listener['sni_container_refs'] = create_sni_containers expected_listener.update(create_listener) - if create_default_pool: - pool = create_default_pool + if create_default_pool_name: + pool = {'name': create_default_pool_name} create_listener['default_pool'] = pool - if pool.get('id'): - create_listener['default_pool_id'] = pool['id'] + elif create_default_pool_id: + create_listener['default_pool_id'] = create_default_pool_id + expected_listener['default_pool_id'] = create_default_pool_id + else: + expected_listener['default_pool_id'] = None if create_l7policies: l7policies = create_l7policies create_listener['l7policies'] = l7policies - if expected_default_pool: - expected_listener['default_pool'] = expected_default_pool if expected_sni_containers: - expected_listener['sni_containers'] = expected_sni_containers + expected_listener['sni_container_refs'] = expected_sni_containers if expected_l7policies: expected_listener['l7policies'] = expected_l7policies + else: + expected_listener['l7policies'] = [] return create_listener, expected_listener def _get_pool_bodies(self, name='pool1', create_members=None, @@ -692,7 +714,6 @@ class TestLoadBalancerGraph(base.BaseAPITest): 'name': name, 'protocol': protocol, 'lb_algorithm': constants.LB_ALGORITHM_ROUND_ROBIN, - 'project_id': self._project_id } if session_persistence: create_pool['session_persistence'] = { @@ -707,7 +728,9 @@ class TestLoadBalancerGraph(base.BaseAPITest): 'session_persistence': None, 'members': [], 'enabled': True, - 'operating_status': constants.OFFLINE + 'provisioning_status': constants.PENDING_CREATE, + 'operating_status': constants.OFFLINE, + 'project_id': self._project_id } expected_pool.update(create_pool) if expected_members: @@ -718,15 +741,15 @@ class TestLoadBalancerGraph(base.BaseAPITest): def _get_member_bodies(self, protocol_port=80): create_member = { - 'ip_address': '10.0.0.1', - 'protocol_port': protocol_port, - 'project_id': self._project_id + 'address': '10.0.0.1', + 'protocol_port': protocol_port } expected_member = { 'weight': 1, 'enabled': True, 'subnet_id': None, - 'operating_status': constants.OFFLINE + 'operating_status': constants.OFFLINE, + 'project_id': self._project_id } expected_member.update(create_member) return create_member, expected_member @@ -736,15 +759,17 @@ class TestLoadBalancerGraph(base.BaseAPITest): 'type': constants.HEALTH_MONITOR_PING, 'delay': 1, 'timeout': 1, - 'fall_threshold': 1, - 'rise_threshold': 1, - 'project_id': self._project_id + 'max_retries_down': 1, + 'max_retries': 1 } expected_hm = { 'http_method': 'GET', 'url_path': '/', 'expected_codes': '200', - 'enabled': True + 'admin_state_up': True, + 'project_id': self._project_id, + 'provisioning_status': constants.PENDING_CREATE, + 'operating_status': constants.OFFLINE } expected_hm.update(create_hm) return create_hm, expected_hm @@ -758,41 +783,44 @@ class TestLoadBalancerGraph(base.BaseAPITest): expected_sni_containers.sort() return create_sni_containers, expected_sni_containers - def _get_l7policies_bodies(self, create_pool=None, expected_pool=None, + def _get_l7policies_bodies(self, + create_pool_name=None, create_pool_id=None, create_l7rules=None, expected_l7rules=None): create_l7policies = [] - if create_pool: + if create_pool_name: create_l7policy = { 'action': constants.L7POLICY_ACTION_REDIRECT_TO_POOL, - 'redirect_pool': create_pool, + 'redirect_pool': {'name': create_pool_name}, 'position': 1, - 'enabled': False + 'admin_state_up': False } else: create_l7policy = { 'action': constants.L7POLICY_ACTION_REDIRECT_TO_URL, 'redirect_url': 'http://127.0.0.1/', 'position': 1, - 'enabled': False + 'admin_state_up': False } create_l7policies.append(create_l7policy) expected_l7policy = { - 'name': None, - 'description': None, + 'name': '', + 'description': '', 'redirect_url': None, - 'l7rules': [] + 'rules': [], + 'project_id': self._project_id, + 'provisioning_status': constants.PENDING_CREATE, + 'operating_status': constants.OFFLINE } expected_l7policy.update(create_l7policy) + expected_l7policy.pop('redirect_pool', None) expected_l7policies = [] - if expected_pool: - if create_pool.get('id'): - expected_l7policy['redirect_pool_id'] = create_pool.get('id') - expected_l7policy['redirect_pool'] = expected_pool + if not create_pool_name: + expected_l7policy['redirect_pool_id'] = create_pool_id expected_l7policies.append(expected_l7policy) if expected_l7rules: - expected_l7policies[0]['l7rules'] = expected_l7rules + expected_l7policies[0]['rules'] = expected_l7rules if create_l7rules: - create_l7policies[0]['l7rules'] = create_l7rules + create_l7policies[0]['rules'] = create_l7rules return create_l7policies, expected_l7policies def _get_l7rules_bodies(self, value="localhost"): @@ -800,15 +828,18 @@ class TestLoadBalancerGraph(base.BaseAPITest): 'type': constants.L7RULE_TYPE_HOST_NAME, 'compare_type': constants.L7RULE_COMPARE_TYPE_EQUAL_TO, 'value': value, - 'invert': False + 'invert': False, + 'admin_state_up': True }] expected_l7rules = [{ - 'key': None + 'key': None, + 'project_id': self._project_id, + 'provisioning_status': constants.PENDING_CREATE, + 'operating_status': constants.OFFLINE }] expected_l7rules[0].update(create_l7rules[0]) return create_l7rules, expected_l7rules - @testtools.skip('Skip until complete v2 merge') def test_with_one_listener(self): create_listener, expected_listener = self._get_listener_bodies() create_lb, expected_lb = self._get_lb_bodies([create_listener], @@ -818,151 +849,139 @@ class TestLoadBalancerGraph(base.BaseAPITest): api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) - @testtools.skip('Skip until complete v2 merge') def test_with_many_listeners(self): create_listener1, expected_listener1 = self._get_listener_bodies() create_listener2, expected_listener2 = self._get_listener_bodies( - name='listener2', protocol_port=81 - ) + name='listener2', protocol_port=81) create_lb, expected_lb = self._get_lb_bodies( [create_listener1, create_listener2], [expected_listener1, expected_listener2]) - response = self.post(self.LBS_PATH, create_lb) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) - @testtools.skip('Skip until complete v2 merge') def test_with_one_listener_one_pool(self): create_pool, expected_pool = self._get_pool_bodies() create_listener, expected_listener = self._get_listener_bodies( - create_default_pool=create_pool, - expected_default_pool=expected_pool - ) - create_lb, expected_lb = self._get_lb_bodies([create_listener], - [expected_listener]) - response = self.post(self.LBS_PATH, create_lb) + create_default_pool_name=create_pool['name']) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) - @testtools.skip('Skip until complete v2 merge') def test_with_many_listeners_one_pool(self): create_pool1, expected_pool1 = self._get_pool_bodies() create_pool2, expected_pool2 = self._get_pool_bodies(name='pool2') create_listener1, expected_listener1 = self._get_listener_bodies( - create_default_pool=create_pool1, - expected_default_pool=expected_pool1 - ) + create_default_pool_name=create_pool1['name']) create_listener2, expected_listener2 = self._get_listener_bodies( - create_default_pool=create_pool2, - expected_default_pool=expected_pool2, - name='listener2', protocol_port=81 - ) + create_default_pool_name=create_pool2['name'], + name='listener2', protocol_port=81) create_lb, expected_lb = self._get_lb_bodies( - [create_listener1, create_listener2], - [expected_listener1, expected_listener2]) - response = self.post(self.LBS_PATH, create_lb) + create_listeners=[create_listener1, create_listener2], + expected_listeners=[expected_listener1, expected_listener2], + create_pools=[create_pool1, create_pool2]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) - @testtools.skip('Skip until complete v2 merge') def test_with_one_listener_one_member(self): create_member, expected_member = self._get_member_bodies() create_pool, expected_pool = self._get_pool_bodies( create_members=[create_member], expected_members=[expected_member]) create_listener, expected_listener = self._get_listener_bodies( - create_default_pool=create_pool, - expected_default_pool=expected_pool) - create_lb, expected_lb = self._get_lb_bodies([create_listener], - [expected_listener]) - response = self.post(self.LBS_PATH, create_lb) + create_default_pool_name=create_pool['name']) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) - @testtools.skip('Skip until complete v2 merge') def test_with_one_listener_one_hm(self): create_hm, expected_hm = self._get_hm_bodies() create_pool, expected_pool = self._get_pool_bodies( create_hm=create_hm, expected_hm=expected_hm) create_listener, expected_listener = self._get_listener_bodies( - create_default_pool=create_pool, - expected_default_pool=expected_pool) - create_lb, expected_lb = self._get_lb_bodies([create_listener], - [expected_listener]) - response = self.post(self.LBS_PATH, create_lb) + create_default_pool_name=create_pool['name']) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) - @testtools.skip('Skip until complete v2 merge') def test_with_one_listener_sni_containers(self): create_sni_containers, expected_sni_containers = ( self._get_sni_container_bodies()) create_listener, expected_listener = self._get_listener_bodies( create_sni_containers=create_sni_containers, expected_sni_containers=expected_sni_containers) - create_lb, expected_lb = self._get_lb_bodies([create_listener], - [expected_listener]) - response = self.post(self.LBS_PATH, create_lb) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) - @testtools.skip('Skip until complete v2 merge') def test_with_l7policy_redirect_pool_no_rule(self): create_pool, expected_pool = self._get_pool_bodies(create_members=[], expected_members=[]) create_l7policies, expected_l7policies = self._get_l7policies_bodies( - create_pool=create_pool, expected_pool=expected_pool) + create_pool_name=create_pool['name']) create_listener, expected_listener = self._get_listener_bodies( create_l7policies=create_l7policies, expected_l7policies=expected_l7policies) - create_lb, expected_lb = self._get_lb_bodies([create_listener], - [expected_listener]) - response = self.post(self.LBS_PATH, create_lb) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) - @testtools.skip('Skip until complete v2 merge') def test_with_l7policy_redirect_pool_one_rule(self): create_pool, expected_pool = self._get_pool_bodies(create_members=[], expected_members=[]) create_l7rules, expected_l7rules = self._get_l7rules_bodies() create_l7policies, expected_l7policies = self._get_l7policies_bodies( - create_pool=create_pool, expected_pool=expected_pool, - create_l7rules=create_l7rules, expected_l7rules=expected_l7rules) + create_pool_name=create_pool['name'], + create_l7rules=create_l7rules, + expected_l7rules=expected_l7rules) create_listener, expected_listener = self._get_listener_bodies( create_l7policies=create_l7policies, expected_l7policies=expected_l7policies) - create_lb, expected_lb = self._get_lb_bodies([create_listener], - [expected_listener]) - response = self.post(self.LBS_PATH, create_lb) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) - @testtools.skip('Skip until complete v2 merge') - def test_with_l7policy_redirect_pool_bad_rule(self): - create_pool, expected_pool = self._get_pool_bodies(create_members=[], - expected_members=[]) - create_l7rules, expected_l7rules = self._get_l7rules_bodies( - value="local host") - create_l7policies, expected_l7policies = self._get_l7policies_bodies( - create_pool=create_pool, expected_pool=expected_pool, - create_l7rules=create_l7rules, expected_l7rules=expected_l7rules) - create_listener, expected_listener = self._get_listener_bodies( - create_l7policies=create_l7policies, - expected_l7policies=expected_l7policies) - create_lb, expected_lb = self._get_lb_bodies([create_listener], - [expected_listener]) - self.post(self.LBS_PATH, create_lb) - - @testtools.skip('Skip until complete v2 merge') def test_with_l7policies_one_redirect_pool_one_rule(self): create_pool, expected_pool = self._get_pool_bodies(create_members=[], expected_members=[]) create_l7rules, expected_l7rules = self._get_l7rules_bodies() create_l7policies, expected_l7policies = self._get_l7policies_bodies( - create_pool=create_pool, expected_pool=expected_pool, - create_l7rules=create_l7rules, expected_l7rules=expected_l7rules) + create_pool_name=create_pool['name'], + create_l7rules=create_l7rules, + expected_l7rules=expected_l7rules) c_l7policies_url, e_l7policies_url = self._get_l7policies_bodies() for policy in c_l7policies_url: policy['position'] = 2 @@ -973,20 +992,22 @@ class TestLoadBalancerGraph(base.BaseAPITest): create_listener, expected_listener = self._get_listener_bodies( create_l7policies=create_l7policies, expected_l7policies=expected_l7policies) - create_lb, expected_lb = self._get_lb_bodies([create_listener], - [expected_listener]) - response = self.post(self.LBS_PATH, create_lb) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) - @testtools.skip('Skip until complete v2 merge') def test_with_l7policies_redirect_pools_no_rules(self): create_pool, expected_pool = self._get_pool_bodies() create_l7policies, expected_l7policies = self._get_l7policies_bodies( - create_pool=create_pool, expected_pool=expected_pool) - r_create_pool, r_expected_pool = self._get_pool_bodies() + create_pool_name=create_pool['name']) + r_create_pool, r_expected_pool = self._get_pool_bodies(name='pool2') c_l7policies_url, e_l7policies_url = self._get_l7policies_bodies( - create_pool=r_create_pool, expected_pool=r_expected_pool) + create_pool_name=r_create_pool['name']) for policy in c_l7policies_url: policy['position'] = 2 create_l7policies.append(policy) @@ -996,14 +1017,37 @@ class TestLoadBalancerGraph(base.BaseAPITest): create_listener, expected_listener = self._get_listener_bodies( create_l7policies=create_l7policies, expected_l7policies=expected_l7policies) - create_lb, expected_lb = self._get_lb_bodies([create_listener], - [expected_listener]) - response = self.post(self.LBS_PATH, create_lb) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool, r_create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) - @testtools.skip('Skip until complete v2 merge') - def test_with_one_of_everything(self): + def test_with_l7policy_redirect_pool_bad_rule(self): + create_pool, expected_pool = self._get_pool_bodies(create_members=[], + expected_members=[]) + create_l7rules, expected_l7rules = self._get_l7rules_bodies( + value="local host") + create_l7policies, expected_l7policies = self._get_l7policies_bodies( + create_pool_name=create_pool['name'], + create_l7rules=create_l7rules, + expected_l7rules=expected_l7rules) + create_listener, expected_listener = self._get_listener_bodies( + create_l7policies=create_l7policies, + expected_l7policies=expected_l7policies) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body, status=400) + self.assertIn('L7Rule: Invalid characters', + response.json.get('faultstring')) + + def _test_with_one_of_everything_helper(self): create_member, expected_member = self._get_member_bodies() create_hm, expected_hm = self._get_hm_bodies() create_pool, expected_pool = self._get_pool_bodies( @@ -1021,27 +1065,106 @@ class TestLoadBalancerGraph(base.BaseAPITest): create_members=[r_create_member], expected_members=[r_expected_member]) create_l7policies, expected_l7policies = self._get_l7policies_bodies( - create_pool=r_create_pool, expected_pool=r_expected_pool, - create_l7rules=create_l7rules, expected_l7rules=expected_l7rules) + create_pool_name=r_create_pool['name'], + create_l7rules=create_l7rules, + expected_l7rules=expected_l7rules) create_listener, expected_listener = self._get_listener_bodies( - create_default_pool=create_pool, - expected_default_pool=expected_pool, + create_default_pool_name=create_pool['name'], create_l7policies=create_l7policies, expected_l7policies=expected_l7policies, create_sni_containers=create_sni_containers, expected_sni_containers=expected_sni_containers) - create_lb, expected_lb = self._get_lb_bodies([create_listener], - [expected_listener]) - response = self.post(self.LBS_PATH, create_lb) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + return body, expected_lb + + def test_with_one_of_everything(self): + body, expected_lb = self._test_with_one_of_everything_helper() + response = self.post(self.LBS_PATH, body) api_lb = response.json.get(self.root_tag) self._assert_graphs_equal(expected_lb, api_lb) - @testtools.skip('Skip until complete v2 merge') def test_db_create_failure(self): create_listener, expected_listener = self._get_listener_bodies() create_lb, _ = self._get_lb_bodies([create_listener], [expected_listener]) + body = self._build_body(create_lb) with mock.patch('octavia.db.repositories.Repositories.' - 'create_load_balancer_tree') as repo_mock: + 'create_load_balancer_and_vip') as repo_mock: repo_mock.side_effect = Exception('I am a DB Error') - self.post(self.LBS_PATH, create_lb, status=500) + self.post(self.LBS_PATH, body, status=500) + + def test_pool_names_not_unique(self): + create_pool1, expected_pool1 = self._get_pool_bodies() + create_pool2, expected_pool2 = self._get_pool_bodies() + create_listener, expected_listener = self._get_listener_bodies( + create_default_pool_name=create_pool1['name']) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool1, create_pool2]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body, status=400) + self.assertIn("Pool names must be unique", + response.json.get('faultstring')) + + def test_pool_names_must_have_specs(self): + create_pool, expected_pool = self._get_pool_bodies() + create_listener, expected_listener = self._get_listener_bodies( + create_default_pool_name="my_nonexistent_pool") + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body, status=400) + self.assertIn("referenced but no full definition", + response.json.get('faultstring')) + + def test_pool_mandatory_attributes(self): + create_pool, expected_pool = self._get_pool_bodies() + create_pool.pop('protocol') + create_listener, expected_listener = self._get_listener_bodies( + create_default_pool_name=create_pool['name']) + create_lb, expected_lb = self._get_lb_bodies( + create_listeners=[create_listener], + expected_listeners=[expected_listener], + create_pools=[create_pool]) + body = self._build_body(create_lb) + response = self.post(self.LBS_PATH, body, status=400) + self.assertIn("missing required attribute: protocol", + response.json.get('faultstring')) + + def test_create_over_quota_lb(self): + body, _ = self._test_with_one_of_everything_helper() + self.start_quota_mock(data_models.LoadBalancer) + self.post(self.LBS_PATH, body, status=403) + + def test_create_over_quota_pools(self): + body, _ = self._test_with_one_of_everything_helper() + self.start_quota_mock(data_models.Pool) + self.post(self.LBS_PATH, body, status=403) + + def test_create_over_quota_listeners(self): + body, _ = self._test_with_one_of_everything_helper() + self.start_quota_mock(data_models.Listener) + self.post(self.LBS_PATH, body, status=403) + + def test_create_over_quota_members(self): + body, _ = self._test_with_one_of_everything_helper() + self.start_quota_mock(data_models.Member) + self.post(self.LBS_PATH, body, status=403) + + def test_create_over_quota_hms(self): + body, _ = self._test_with_one_of_everything_helper() + self.start_quota_mock(data_models.HealthMonitor) + self.post(self.LBS_PATH, body, status=403) + + def test_create_over_quota_sanity_check(self): + # This one should create, as we don't check quotas on L7Policies + body, _ = self._test_with_one_of_everything_helper() + self.start_quota_mock(data_models.L7Policy) + self.post(self.LBS_PATH, body) diff --git a/octavia/tests/functional/api/v2/test_member.py b/octavia/tests/functional/api/v2/test_member.py index a41a040c1b..f1fd32b2b1 100644 --- a/octavia/tests/functional/api/v2/test_member.py +++ b/octavia/tests/functional/api/v2/test_member.py @@ -16,6 +16,7 @@ import mock from oslo_utils import uuidutils from octavia.common import constants +from octavia.common import data_models from octavia.network import base as network_base from octavia.tests.functional.api.v2 import base @@ -232,8 +233,7 @@ class TestMember(base.BaseAPITest): resp.json.get('faultstring')) def test_create_over_quota(self): - self.check_quota_met_true_mock.start() - self.addCleanup(self.check_quota_met_true_mock.stop) + self.start_quota_mock(data_models.Member) member = {'address': '10.0.0.3', 'protocol_port': 81} self.post(self.members_path, self._build_body(member), status=403) diff --git a/octavia/tests/functional/api/v2/test_pool.py b/octavia/tests/functional/api/v2/test_pool.py index e965013809..d1c9eb416e 100644 --- a/octavia/tests/functional/api/v2/test_pool.py +++ b/octavia/tests/functional/api/v2/test_pool.py @@ -18,6 +18,7 @@ from oslo_utils import uuidutils from octavia.common import constants import octavia.common.context +from octavia.common import data_models from octavia.tests.functional.api.v2 import base import testtools @@ -368,8 +369,7 @@ class TestPool(base.BaseAPITest): pool_op_status=constants.OFFLINE) def test_create_over_quota(self): - self.check_quota_met_true_mock.start() - self.addCleanup(self.check_quota_met_true_mock.stop) + self.start_quota_mock(data_models.Pool) lb_pool = { 'loadbalancer_id': self.lb_id, 'protocol': constants.PROTOCOL_HTTP, diff --git a/octavia/tests/unit/controller/worker/tasks/test_database_tasks.py b/octavia/tests/unit/controller/worker/tasks/test_database_tasks.py index 352c98532d..dfa483d525 100644 --- a/octavia/tests/unit/controller/worker/tasks/test_database_tasks.py +++ b/octavia/tests/unit/controller/worker/tasks/test_database_tasks.py @@ -1104,7 +1104,7 @@ class TestDatabaseTasks(base.TestCase): listeners = [data_models.Listener(id='listener1'), data_models.Listener(id='listener2')] lb = data_models.LoadBalancer(id=LB_ID, listeners=listeners) - mark_lb_active = database_tasks.MarkLBActiveInDB(mark_listeners=True) + mark_lb_active = database_tasks.MarkLBActiveInDB(mark_subobjects=True) mark_lb_active.execute(lb) repo.LoadBalancerRepository.update.assert_called_once_with( @@ -1133,6 +1133,140 @@ class TestDatabaseTasks(base.TestCase): mock.call('TEST', listeners[1].id, provisioning_status=constants.ERROR)]) + @mock.patch('octavia.db.repositories.PoolRepository.update') + @mock.patch('octavia.db.repositories.MemberRepository.update') + @mock.patch('octavia.db.repositories.HealthMonitorRepository.update') + @mock.patch('octavia.db.repositories.L7PolicyRepository.update') + @mock.patch('octavia.db.repositories.L7RuleRepository.update') + def test_mark_LB_active_in_db_full_graph(self, + mock_l7r_repo_update, + mock_l7p_repo_update, + mock_hm_repo_update, + mock_member_repo_update, + mock_pool_repo_update, + mock_generate_uuid, + mock_LOG, + mock_get_session, + mock_loadbalancer_repo_update, + mock_listener_repo_update, + mock_amphora_repo_update, + mock_amphora_repo_delete): + unused_pool = data_models.Pool(id='unused_pool') + members1 = [data_models.Member(id='member1'), + data_models.Member(id='member2')] + health_monitor = data_models.HealthMonitor(id='hm1') + default_pool = data_models.Pool(id='default_pool', + members=members1, + health_monitor=health_monitor) + listener1 = data_models.Listener(id='listener1', + default_pool=default_pool) + members2 = [data_models.Member(id='member3'), + data_models.Member(id='member4')] + redirect_pool = data_models.Pool(id='redirect_pool', + members=members2) + l7rules = [data_models.L7Rule(id='rule1')] + redirect_policy = data_models.L7Policy(id='redirect_policy', + redirect_pool=redirect_pool, + l7rules=l7rules) + l7policies = [redirect_policy] + listener2 = data_models.Listener(id='listener2', + l7policies=l7policies) + listener2.l7policies = l7policies + listeners = [listener1, listener2] + pools = [default_pool, redirect_pool, unused_pool] + + lb = data_models.LoadBalancer(id=LB_ID, listeners=listeners, + pools=pools) + mark_lb_active = database_tasks.MarkLBActiveInDB(mark_subobjects=True) + mark_lb_active.execute(lb) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + lb.id, + provisioning_status=constants.ACTIVE) + self.assertEqual(2, repo.ListenerRepository.update.call_count) + repo.ListenerRepository.update.has_calls( + [mock.call('TEST', listeners[0].id, + provisioning_status=constants.ACTIVE), + mock.call('TEST', listeners[1].id, + provisioning_status=constants.ACTIVE)]) + self.assertEqual(2, repo.PoolRepository.update.call_count) + repo.PoolRepository.update.has_calls( + [mock.call('TEST', default_pool.id, + provisioning_status=constants.ACTIVE), + mock.call('TEST', redirect_pool.id, + provisioning_status=constants.ACTIVE)]) + self.assertEqual(4, repo.MemberRepository.update.call_count) + repo.MemberRepository.update.has_calls( + [mock.call('TEST', members1[0].id, + provisioning_status=constants.ACTIVE), + mock.call('TEST', members1[1].id, + provisioning_status=constants.ACTIVE), + mock.call('TEST', members2[0].id, + provisioning_status=constants.ACTIVE), + mock.call('TEST', members2[1].id, + provisioning_status=constants.ACTIVE)]) + self.assertEqual(1, repo.HealthMonitorRepository.update.call_count) + repo.HealthMonitorRepository.update.has_calls( + [mock.call('TEST', health_monitor.id, + provisioning_status=constants.ACTIVE)]) + self.assertEqual(1, repo.L7PolicyRepository.update.call_count) + repo.L7PolicyRepository.update.has_calls( + [mock.call('TEST', l7policies[0].id, + provisioning_status=constants.ACTIVE)]) + self.assertEqual(1, repo.L7RuleRepository.update.call_count) + repo.L7RuleRepository.update.has_calls( + [mock.call('TEST', l7rules[0].id, + provisioning_status=constants.ACTIVE)]) + + mock_loadbalancer_repo_update.reset_mock() + mock_listener_repo_update.reset_mock() + mock_pool_repo_update.reset_mock() + mock_member_repo_update.reset_mock() + mock_hm_repo_update.reset_mock() + mock_l7p_repo_update.reset_mock() + mock_l7r_repo_update.reset_mock() + mark_lb_active.revert(lb) + + repo.LoadBalancerRepository.update.assert_called_once_with( + 'TEST', + id=lb.id, + provisioning_status=constants.ERROR) + self.assertEqual(2, repo.ListenerRepository.update.call_count) + repo.ListenerRepository.update.has_calls( + [mock.call('TEST', listeners[0].id, + provisioning_status=constants.ERROR), + mock.call('TEST', listeners[1].id, + provisioning_status=constants.ERROR)]) + self.assertEqual(2, repo.PoolRepository.update.call_count) + repo.PoolRepository.update.has_calls( + [mock.call('TEST', default_pool.id, + provisioning_status=constants.ERROR), + mock.call('TEST', redirect_pool.id, + provisioning_status=constants.ERROR)]) + self.assertEqual(4, repo.MemberRepository.update.call_count) + repo.MemberRepository.update.has_calls( + [mock.call('TEST', members1[0].id, + provisioning_status=constants.ERROR), + mock.call('TEST', members1[1].id, + provisioning_status=constants.ERROR), + mock.call('TEST', members2[0].id, + provisioning_status=constants.ERROR), + mock.call('TEST', members2[1].id, + provisioning_status=constants.ERROR)]) + self.assertEqual(1, repo.HealthMonitorRepository.update.call_count) + repo.HealthMonitorRepository.update.has_calls( + [mock.call('TEST', health_monitor.id, + provisioning_status=constants.ERROR)]) + self.assertEqual(1, repo.L7PolicyRepository.update.call_count) + repo.L7PolicyRepository.update.has_calls( + [mock.call('TEST', l7policies[0].id, + provisioning_status=constants.ERROR)]) + self.assertEqual(1, repo.L7RuleRepository.update.call_count) + repo.L7RuleRepository.update.has_calls( + [mock.call('TEST', l7rules[0].id, + provisioning_status=constants.ERROR)]) + def test_mark_LB_deleted_in_db(self, mock_generate_uuid, mock_LOG,