Extend api to accept qos_policy_id

This patch extend Octavia v2 API to access qos_policy_id from neutron.
Users can pass it as 'vip_qos_policy_id' to Octavia request body to
create/update Loadbalancers, and the vrrp ports will have the qos
abilities.

This patch modifies the Loadbalancer Post/Put request body and response
body. It also extends the 'vip' table with the new column named
'qos_policy_id' to store the qos_id from neutron.

Co-Authored-By: Reedip <reedip.banerjee@nectechnologies.in>

Change-Id: I43aba9d2ae816b1498d16da077936d6bdb62e30a
This commit is contained in:
ZhaoBo 2017-04-05 13:59:57 +08:00
parent 4b48af9186
commit 51f6f7258b
41 changed files with 681 additions and 23 deletions

View File

@ -1059,6 +1059,18 @@ vip_port_id-optional:
in: body
required: false
type: string
vip_qos_policy_id:
description: |
The ID of the QoS Policy which will apply to the Virtual IP (VIP).
in: body
required: true
type: string
vip_qos_policy_id-optional:
description: |
The ID of the QoS Policy which will apply to the Virtual IP (VIP).
in: body
required: false
type: string
vip_subnet_id:
description: |
The ID of the subnet for the Virtual IP (VIP).

View File

@ -1 +1 @@
curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: <token>" -d '{"loadbalancer": {"description": "My favorite load balancer", "admin_state_up": true, "project_id": "e3cd678b11784734bc366148aa37580e", "flavor": "a7ae5d5a-d855-4f9a-b187-af66b53f4d04", "vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a", "vip_address": "203.0.113.50", "provider": "octavia", "name": "best_load_balancer"}}' http://198.51.100.10:9876/v2.0/lbaas/loadbalancers
curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: <token>" -d '{"loadbalancer": {"description": "My favorite load balancer", "admin_state_up": true, "project_id": "e3cd678b11784734bc366148aa37580e", "flavor": "a7ae5d5a-d855-4f9a-b187-af66b53f4d04", "vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a", "vip_address": "203.0.113.50", "provider": "octavia", "name": "best_load_balancer", "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3"}}' http://198.51.100.10:9876/v2.0/lbaas/loadbalancers

View File

@ -6,6 +6,7 @@
"vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a",
"vip_address": "203.0.113.50",
"provider": "octavia",
"name": "best_load_balancer"
"name": "best_load_balancer",
"vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3"
}
}

View File

@ -14,6 +14,7 @@
"updated_at": "2017-02-28T00:43:30",
"id": "607226db-27ef-4d41-ae89-f2a800e9c2db",
"operating_status": "OFFLINE",
"name": "best_load_balancer"
"name": "best_load_balancer",
"vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3"
}
}

View File

@ -82,6 +82,7 @@
"vip_subnet_id": "d4af86e1-0051-488c-b7a0-527f97490c9a",
"vip_address": "203.0.113.50",
"provider": "octavia",
"name": "best_load_balancer"
"name": "best_load_balancer",
"vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3"
}
}

View File

@ -172,6 +172,7 @@
"updated_at": "2017-02-28T00:43:30",
"id": "607226db-27ef-4d41-ae89-f2a800e9c2db",
"operating_status": "ONLINE",
"name": "best_load_balancer"
"name": "best_load_balancer",
"vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3"
}
}

View File

@ -14,6 +14,7 @@
"updated_at": "2017-02-28T00:43:30",
"id": "8a562351-f0fb-424c-a0af-513461424ea5",
"operating_status": "ONLINE",
"name": "best_load_balancer"
"name": "best_load_balancer",
"vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3"
}
}

View File

@ -1 +1 @@
curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: <token>" -d '{"loadbalancer": {"description": "Temporarily disabled load balancer", "admin_state_up": false, "name": "disabled_load_balancer"}}' http://198.51.100.10:9876/v2.0/lbaas/loadbalancers/8b6fc468-07d5-4d8b-a0b9-695060e72c31
curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: <token>" -d '{"loadbalancer": {"description": "Temporarily disabled load balancer", "admin_state_up": false, "name": "disabled_load_balancer", "vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3"}}' http://198.51.100.10:9876/v2.0/lbaas/loadbalancers/8b6fc468-07d5-4d8b-a0b9-695060e72c31

View File

@ -2,6 +2,7 @@
"loadbalancer": {
"description": "Temporarily disabled load balancer",
"admin_state_up": false,
"name": "disabled_load_balancer"
"name": "disabled_load_balancer",
"vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3"
}
}

View File

@ -14,6 +14,7 @@
"updated_at": "2017-02-28T00:43:30",
"id": "8b6fc468-07d5-4d8b-a0b9-695060e72c31",
"operating_status": "ONLINE",
"name": "disabled_load_balancer"
"name": "disabled_load_balancer",
"vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3"
}
}

View File

@ -25,7 +25,8 @@
"updated_at": "2017-02-28T00:43:30",
"id": "607226db-27ef-4d41-ae89-f2a800e9c2db",
"operating_status": "ONLINE",
"name": "best_load_balancer"
"name": "best_load_balancer",
"vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3"
}
]
}

View File

@ -63,6 +63,7 @@ Response Parameters
- vip_network_id: vip_network_id
- vip_port_id: vip_port_id
- vip_subnet_id: vip_subnet_id
- vip_qos_policy_id: vip_qos_policy_id
Response Example
----------------
@ -106,6 +107,10 @@ using a pre-configured octavia flavor. Flavors are created by the operator
to allow custom load balancer configurations, such as allocating more
memory for the load balancer.
An optional ``vip_qos_policy_id`` attribute from Neutron can be used to
apply QoS policies on a loadbalancer VIP, also could pass a 'null' value to
remove QoS policies.
You can also specify the ``provider`` attribute when you create a
load balancer. The ``provider`` attribute specifies which backend should
be used to create the load balancer. This could be the default provider
@ -167,6 +172,7 @@ Request
- vip_network_id: vip_network_id-optional
- vip_port_id: vip_port_id-optional
- vip_subnet_id: vip_subnet_id-optional
- vip_qos_policy_id: vip_qos_policy_id-optional
Request Example
----------------
@ -203,6 +209,7 @@ Response Parameters
- vip_network_id: vip_network_id
- vip_port_id: vip_port_id
- vip_subnet_id: vip_subnet_id
- vip_qos_policy_id: vip_qos_policy_id
Response Example
----------------
@ -294,6 +301,7 @@ Response Parameters
- vip_network_id: vip_network_id
- vip_port_id: vip_port_id
- vip_subnet_id: vip_subnet_id
- vip_qos_policy_id: vip_qos_policy_id
Response Example
----------------
@ -340,6 +348,7 @@ Request
- loadbalancer: loadbalancer
- loadbalancer_id: path-loadbalancer-id
- name: name-optional
- vip_qos_policy_id: vip_qos_policy_id-optional
Request Example
---------------
@ -376,6 +385,7 @@ Response Parameters
- vip_network_id: vip_network_id
- vip_port_id: vip_port_id
- vip_subnet_id: vip_subnet_id
- vip_qos_policy_id: vip_qos_policy_id
Response Example
----------------

View File

@ -132,6 +132,15 @@ class LoadBalancersController(base.BaseController):
port = validate.port_exists(port_id=load_balancer.vip_port_id)
load_balancer.vip_network_id = port.network_id
# validate the request vip port whether applied the qos_policy and
# store the port_qos_policy to loadbalancer obj if possible. The
# default behavior is that if 'vip_qos_policy_id' is specified in the
# request, it will override the qos_policy applied on vip_port.
port_qos_policy_id = port.qos_policy_id
if (port_qos_policy_id and
isinstance(load_balancer.vip_qos_policy_id, wtypes.UnsetType)):
load_balancer.vip_qos_policy_id = port_qos_policy_id
# Identify the subnet for this port
if load_balancer.vip_subnet_id:
validate.subnet_exists(subnet_id=load_balancer.vip_subnet_id)
@ -193,7 +202,9 @@ class LoadBalancersController(base.BaseController):
subnet = validate.subnet_exists(
subnet_id=load_balancer.vip_subnet_id)
load_balancer.vip_network_id = subnet.network_id
if load_balancer.vip_qos_policy_id:
validate.qos_policy_exists(
qos_policy_id=load_balancer.vip_qos_policy_id)
validate.network_allowed_by_config(load_balancer.vip_network_id)
def _create_vip_port_if_not_exist(self, load_balancer_db):
@ -418,6 +429,11 @@ class LoadBalancersController(base.BaseController):
self._auth_validate_action(context, db_lb.project_id,
constants.RBAC_PUT)
if (load_balancer.vip_qos_policy_id and
not isinstance(load_balancer.vip_qos_policy_id,
wtypes.UnsetType) and
db_lb.vip.qos_policy_id != load_balancer.vip_qos_policy_id):
validate.qos_policy_exists(load_balancer.vip_qos_policy_id)
self._test_lb_status(context.session, id)
try:
LOG.info("Sending updated Load Balancer %s to the handler", id)

View File

@ -25,6 +25,7 @@ class BaseLoadBalancerType(types.BaseType):
'vip_subnet_id': 'vip.subnet_id',
'vip_port_id': 'vip.port_id',
'vip_network_id': 'vip.network_id',
'vip_qos_policy_id': 'vip.qos_policy_id',
'admin_state_up': 'enabled'}
_child_map = {'vip': {
'ip_address': 'vip_address',
@ -52,6 +53,7 @@ class LoadBalancerResponse(BaseLoadBalancerType):
pools = wtypes.wsattr([types.IdOnlyType])
provider = wtypes.wsattr(wtypes.StringType())
flavor = wtypes.wsattr(wtypes.StringType())
vip_qos_policy_id = wtypes.wsattr(wtypes.UuidType())
@classmethod
def from_data_model(cls, data_model, children=False):
@ -62,7 +64,7 @@ class LoadBalancerResponse(BaseLoadBalancerType):
result.vip_port_id = data_model.vip.port_id
result.vip_address = data_model.vip.ip_address
result.vip_network_id = data_model.vip.network_id
result.vip_qos_policy_id = data_model.vip.qos_policy_id
if cls._full_response():
listener_model = listener.ListenerFullResponse
pool_model = pool.PoolFullResponse
@ -114,6 +116,7 @@ class LoadBalancerPOST(BaseLoadBalancerType):
vip_port_id = wtypes.wsattr(wtypes.UuidType())
vip_subnet_id = wtypes.wsattr(wtypes.UuidType())
vip_network_id = wtypes.wsattr(wtypes.UuidType())
vip_qos_policy_id = wtypes.wsattr(wtypes.UuidType())
project_id = wtypes.wsattr(wtypes.StringType(max_length=36))
listeners = wtypes.wsattr([listener.ListenerSingleCreate], default=[])
pools = wtypes.wsattr([pool.PoolSingleCreate], default=[])
@ -134,6 +137,7 @@ class LoadBalancerPUT(BaseLoadBalancerType):
name = wtypes.wsattr(wtypes.StringType(max_length=255))
description = wtypes.wsattr(wtypes.StringType(max_length=255))
vip_qos_policy_id = wtypes.wsattr(wtypes.UuidType())
admin_state_up = wtypes.wsattr(bool)

View File

@ -450,13 +450,14 @@ class Vip(BaseDataModel):
def __init__(self, load_balancer_id=None, ip_address=None,
subnet_id=None, network_id=None, port_id=None,
load_balancer=None):
load_balancer=None, qos_policy_id=None):
self.load_balancer_id = load_balancer_id
self.ip_address = ip_address
self.subnet_id = subnet_id
self.network_id = network_id
self.port_id = port_id
self.load_balancer = load_balancer
self.qos_policy_id = qos_policy_id
class SNI(BaseDataModel):

View File

@ -244,6 +244,16 @@ def subnet_exists(subnet_id):
return subnet
def qos_policy_exists(qos_policy_id):
network_driver = utils.get_network_driver()
try:
qos_policy = network_driver.get_qos_policy(qos_policy_id)
except Exception:
raise exceptions.InvalidSubresource(resource='qos_policy',
id=qos_policy_id)
return qos_policy
def network_exists_optionally_contains_subnet(network_id, subnet_id=None):
"""Raises an exception when a network does not exist.

View File

@ -311,6 +311,9 @@ class LoadBalancerFlows(object):
new_LB_net_subflow.add(network_tasks.PlugVIP(
requires=constants.LOADBALANCER,
provides=constants.AMPS_DATA))
new_LB_net_subflow.add(network_tasks.ApplyQos(
requires=(constants.LOADBALANCER, constants.AMPS_DATA,
constants.UPDATE_DICT)))
new_LB_net_subflow.add(database_tasks.UpdateAmphoraVIPData(
requires=constants.AMPS_DATA))
new_LB_net_subflow.add(database_tasks.ReloadLoadBalancer(
@ -339,6 +342,8 @@ class LoadBalancerFlows(object):
rebind={constants.OBJECT:
constants.LOADBALANCER},
requires=[constants.UPDATE_DICT]))
update_LB_flow.add(network_tasks.ApplyQos(
requires=(constants.LOADBALANCER, constants.UPDATE_DICT)))
update_LB_flow.add(amphora_driver_tasks.ListenersUpdate(
requires=[constants.LOADBALANCER, constants.LISTENERS]))
update_LB_flow.add(database_tasks.UpdateLoadbalancerInDB(

View File

@ -233,3 +233,16 @@ class TaskUtils(object):
LOG.error("Failed to update pool %(pool)s "
"provisioning status to ERROR due to: "
"%(except)s", {'pool': pool_id, 'except': e})
def get_current_loadbalancer_from_db(self, loadbalancer_id):
"""Gets a Loadbalancer from db.
:param: loadbalancer_id: Load balancer ID which to get from db
"""
try:
return self.loadbalancer_repo.get(db_apis.get_session(),
id=loadbalancer_id)
except Exception as e:
LOG.error("Failed to get loadbalancer &(loadbalancer)s "
"due to: %(except)s",
{'loadbalancer': loadbalancer_id, 'except': e})

View File

@ -45,6 +45,7 @@ class BaseDatabaseTask(task.Task):
self.health_mon_repo = repo.HealthMonitorRepository()
self.listener_repo = repo.ListenerRepository()
self.loadbalancer_repo = repo.LoadBalancerRepository()
self.vip_repo = repo.VipRepository()
self.member_repo = repo.MemberRepository()
self.pool_repo = repo.PoolRepository()
self.amp_health_repo = repo.AmphoraHealthRepository()
@ -1272,6 +1273,11 @@ class UpdateLoadbalancerInDB(BaseDatabaseTask):
"""
LOG.debug("Update DB for loadbalancer id: %s ", loadbalancer.id)
if update_dict.get('vip'):
vip_dict = update_dict.pop('vip')
self.vip_repo.update(db_apis.get_session(),
loadbalancer.vip.load_balancer_id,
**vip_dict)
self.loadbalancer_repo.update(db_apis.get_session(), loadbalancer.id,
**update_dict)

View File

@ -13,6 +13,7 @@
# under the License.
#
from octavia.common import data_models
from taskflow import task
@ -28,5 +29,19 @@ class UpdateAttributes(task.Task):
"""Task to update an object for changes."""
def execute(self, object, update_dict):
"""Update an object and its associated resources in nested way.
object.update(update_dict)
Such as LoadBalancer object, will nested update the Vip object if there
is any new field in PUT request.
:param object: The object will be updated.
:param update_dict: The PUT request body in dictionary type.
:returns: None
"""
for key, value in update_dict.items():
if (hasattr(object, key) and
isinstance(getattr(object, key),
data_models.BaseDataModel) and
isinstance(value, dict)):
getattr(object, key).update(value)
else:
setattr(object, key, value)

View File

@ -22,6 +22,7 @@ from taskflow.types import failure
from octavia.common import constants
from octavia.common import utils
from octavia.controller.worker import task_utils
from octavia.network import base
from octavia.network import data_models as n_data_models
@ -35,6 +36,7 @@ class BaseNetworkTask(task.Task):
def __init__(self, **kwargs):
super(BaseNetworkTask, self).__init__(**kwargs)
self._network_driver = None
self.task_utils = task_utils.TaskUtils()
@property
def network_driver(self):
@ -452,3 +454,50 @@ class WaitForPortDetach(BaseNetworkTask):
LOG.debug('Waiting for ports to detach from amphora: %(amp_id)s.',
{'amp_id': amphora.id})
self.network_driver.wait_for_port_detach(amphora)
class ApplyQos(BaseNetworkTask):
"""Apply Quality of Services to the VIP"""
def _apply_qos_on_vrrp_ports(self, loadbalancer, amps_data, qos_policy_id,
is_revert=False, request_qos_id=None):
"""Call network driver to apply QoS Policy on the vrrp ports."""
if not amps_data:
amps_data = loadbalancer.amphorae
vrrp_port_ids = [amp.vrrp_port_id for amp in amps_data]
for port_id in vrrp_port_ids:
try:
self.network_driver.apply_qos_on_port(qos_policy_id, port_id)
except Exception:
if not is_revert:
raise
else:
LOG.warning('Failed to undo qos policy %(qos_id)s '
'on vrrp port: %(port)s from '
'amphorae: %(amp)s',
{'qos_id': request_qos_id,
'port': vrrp_port_ids,
'amp': [amp.id for amp in amps_data]})
def execute(self, loadbalancer, amps_data=None, update_dict=None):
"""Apply qos policy on the vrrp ports which are related with vip."""
qos_policy_id = loadbalancer.vip.qos_policy_id
if not qos_policy_id and (
update_dict and (
'vip' not in update_dict or
'qos_policy_id' not in update_dict['vip'])):
return
self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, qos_policy_id)
def revert(self, result, loadbalancer, amps_data=None, update_dict=None,
*args, **kwargs):
"""Handle a failure to apply QoS to VIP"""
request_qos_id = loadbalancer.vip.qos_policy_id
orig_lb = self.task_utils.get_current_loadbalancer_from_db(
loadbalancer.id)
orig_qos_id = orig_lb.vip.qos_policy_id
if request_qos_id != orig_qos_id:
self._apply_qos_on_vrrp_ports(loadbalancer, amps_data, orig_qos_id,
is_revert=True,
request_qos_id=request_qos_id)
return

View File

@ -0,0 +1,37 @@
# Copyright 2017 Huawei
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add QoS Policy ID column to VIP table
Revision ID: 0aee2b450512
Revises: bf171d0d91c3
Create Date: 2017-02-07 20:47:52.405865
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '0aee2b450512'
down_revision = 'bf171d0d91c3'
def upgrade():
op.add_column('vip',
sa.Column('qos_policy_id',
sa.String(length=36),
nullable=True, server_default=None))

View File

@ -381,6 +381,7 @@ class Vip(base_models.BASE):
load_balancer = orm.relationship("LoadBalancer", uselist=False,
backref=orm.backref("vip", uselist=False,
cascade="delete"))
qos_policy_id = sa.Column(sa.String(36), nullable=True)
class Listener(base_models.BASE, base_models.IdMixin,

View File

@ -79,6 +79,10 @@ class TimeoutException(NetworkException):
pass
class QosPolicyNotFound(NetworkException):
pass
@six.add_metaclass(abc.ABCMeta)
class AbstractNetworkDriver(object):
"""This class defines the methods for a fully functional network driver.

View File

@ -76,7 +76,7 @@ class Port(data_models.BaseDataModel):
def __init__(self, id=None, name=None, device_id=None, device_owner=None,
mac_address=None, network_id=None, status=None,
project_id=None, admin_state_up=None, fixed_ips=None,
network=None):
network=None, qos_policy_id=None):
self.id = id
self.name = name
self.device_id = device_id
@ -88,6 +88,7 @@ class Port(data_models.BaseDataModel):
self.admin_state_up = admin_state_up
self.fixed_ips = fixed_ips or []
self.network = network
self.qos_policy_id = qos_policy_id
def get_subnet_id(self, fixed_ip_address):
for fixed_ip in self.fixed_ips:
@ -122,3 +123,8 @@ class HostRoute(data_models.BaseDataModel):
def __init__(self, nexthop=None, destination=None):
self.nexthop = nexthop
self.destination = destination
class QosPolicy(data_models.BaseDataModel):
def __init__(self, id):
self.id = id

View File

@ -135,6 +135,18 @@ class BaseNeutronDriver(base.AbstractNetworkDriver):
}
self.neutron_client.create_security_group_rule(rule)
def apply_qos_on_port(self, qos_id, port_id):
body = {
'port':
{'qos_policy_id': qos_id}
}
try:
self.neutron_client.update_port(port_id, body)
except neutron_client_exceptions.PortNotFoundClient as e:
raise base.PortNotFound(e.message)
except Exception as e:
raise base.NetworkException(str(e))
def get_plugged_networks(self, compute_id):
# List neutron ports associated with the Amphora
try:
@ -223,3 +235,6 @@ class BaseNeutronDriver(base.AbstractNetworkDriver):
return self._get_resources_by_filters(
'port', unique_item=True,
network_id=network_id, device_id=device_id)
def get_qos_policy(self, qos_policy_id):
return self._get_resource('qos_policy', qos_policy_id)

View File

@ -47,7 +47,8 @@ def convert_port_dict_to_model(port_dict):
status=port.get('status'),
project_id=port.get('tenant_id'),
admin_state_up=port.get('admin_state_up'),
fixed_ips=fixed_ips
fixed_ips=fixed_ips,
qos_policy_id=port.get('qos_policy_id')
)
@ -71,3 +72,8 @@ def convert_fixed_ip_dict_to_model(fixed_ip_dict):
fixed_ip = fixed_ip_dict.get('fixed_ip', fixed_ip_dict)
return network_models.FixedIP(subnet_id=fixed_ip.get('subnet_id'),
ip_address=fixed_ip.get('ip_address'))
def convert_qos_policy_dict_to_model(qos_policy_dict):
qos_policy = qos_policy_dict.get('policy', qos_policy_dict)
return network_models.QosPolicy(id=qos_policy.get('id'))

View File

@ -190,6 +190,19 @@ class NoopManager(object):
LOG.debug("failover %s no-op, wait_for_port_detach, amphora id %s",
self.__class__.__name__, amphora.id)
def get_qos_policy(self, qos_policy_id):
LOG.debug("Qos Policy %s no-op, get_qos_policy qos_policy_id %s",
self.__class__.__name__, qos_policy_id)
self.networkconfigconfig[qos_policy_id] = (qos_policy_id,
'get_qos_policy')
return qos_policy_id
def apply_qos_on_port(self, qos_id, port_id):
LOG.debug("Network %s no-op, apply_qos_on_port qos_id %s, port_id "
"%s", self.__class__.__name__, qos_id, port_id)
self.networkconfigconfig[(qos_id, port_id)] = (
qos_id, port_id, 'apply_qos_on_port')
class NoopNetworkDriver(driver_base.AbstractNetworkDriver):
def __init__(self):
@ -230,6 +243,9 @@ class NoopNetworkDriver(driver_base.AbstractNetworkDriver):
def get_port(self, port_id):
return self.driver.get_port(port_id)
def get_qos_policy(self, qos_policy_id):
return self.driver.get_qos_policy(qos_policy_id)
def get_network_by_name(self, network_name):
return self.driver.get_network_by_name(network_name)
@ -253,3 +269,6 @@ class NoopNetworkDriver(driver_base.AbstractNetworkDriver):
def wait_for_port_detach(self, amphora):
self.driver.wait_for_port_detach(amphora)
def apply_qos_on_port(self, qos_id, port_id):
self.driver.apply_qos_on_port(qos_id, port_id)

View File

@ -75,6 +75,9 @@ MOCK_NEUTRON_PORT = {'port': {'network_id': MOCK_NETWORK_ID,
'mac_address': MOCK_MAC_ADDR,
'fixed_ips': [{'ip_address': MOCK_IP_ADDRESS,
'subnet_id': MOCK_SUBNET_ID}]}}
MOCK_NEUTRON_QOS_POLICY_ID = 'mock-qos-id'
MOCK_QOS_POLICY_ID1 = 'qos1-id'
MOCK_QOS_POLICY_ID2 = 'qos2-id'
MOCK_NEUTRON_PORT2 = {'port': {'network_id': MOCK_NETWORK_ID2,
'device_id': MOCK_DEVICE_ID2,

View File

@ -461,6 +461,129 @@ class TestLoadBalancer(base.BaseAPITest):
# the octavia error message
self.assertIn("neutron_msg", response.json.get("faultstring"))
def test_create_with_qos(self):
subnet = network_models.Subnet(id=uuidutils.generate_uuid(),
network_id=uuidutils.generate_uuid())
qos_policy_id = uuidutils.generate_uuid()
# Test with specific vip_qos_policy_id
lb_json = {'vip_subnet_id': subnet.id,
'project_id': self.project_id,
'vip_qos_policy_id': qos_policy_id}
body = self._build_body(lb_json)
with mock.patch("octavia.network.drivers.noop_driver.driver"
".NoopManager.get_subnet") as mock_get_subnet:
with mock.patch("octavia.common.validate."
"qos_policy_exists") as mock_get_qos:
mock_get_subnet.return_value = subnet
mock_get_qos.return_value = qos_policy_id
response = self.post(self.LBS_PATH, body)
api_lb = response.json.get(self.root_tag)
self._assert_request_matches_response(lb_json, api_lb)
self.assertEqual(subnet.id, api_lb.get('vip_subnet_id'))
self.assertEqual(qos_policy_id, api_lb.get('vip_qos_policy_id'))
def test_create_with_qos_vip_port(self):
# Test with vip_port_id which applied qos_policy
subnet = network_models.Subnet(id=uuidutils.generate_uuid(),
network_id=uuidutils.generate_uuid())
port_qos_policy_id = uuidutils.generate_uuid()
ip_address = '192.168.50.50'
network = network_models.Network(id=uuidutils.generate_uuid(),
subnets=[subnet])
fixed_ip = network_models.FixedIP(subnet_id=subnet.id,
ip_address=ip_address)
port = network_models.Port(id=uuidutils.generate_uuid(),
fixed_ips=[fixed_ip],
network_id=network.id,
qos_policy_id=port_qos_policy_id)
lb_json = {'vip_port_id': port.id,
'project_id': self.project_id}
body = self._build_body(lb_json)
with mock.patch(
"octavia.network.drivers.noop_driver.driver."
"NoopManager.get_network") as m_get_network, mock.patch(
"octavia.network.drivers.noop_driver.driver.NoopManager"
".get_port") as mock_get_port, mock.patch(
"octavia.network.drivers.noop_driver.driver.NoopManager"
".allocate_vip") as mock_allocate_vip, mock.patch(
"octavia.common.validate."
"qos_policy_exists") as m_get_qos:
m_get_qos.return_value = port_qos_policy_id
mock_allocate_vip.return_value = data_models.Vip(
ip_address=ip_address, subnet_id=subnet.id,
network_id=network.id, port_id=port.id)
m_get_network.return_value = network
mock_get_port.return_value = port
response = self.post(self.LBS_PATH, body)
api_lb = response.json.get(self.root_tag)
self._assert_request_matches_response(lb_json, api_lb)
self.assertEqual(port.id, api_lb.get('vip_port_id'))
self.assertEqual(subnet.id, api_lb.get('vip_subnet_id'))
self.assertEqual(network.id, api_lb.get('vip_network_id'))
self.assertEqual(port_qos_policy_id, api_lb.get(
'vip_qos_policy_id'))
def test_create_with_qos_vip_port_and_vip_qos(self):
subnet = network_models.Subnet(id=uuidutils.generate_uuid(),
network_id=uuidutils.generate_uuid())
port_qos_policy_id = uuidutils.generate_uuid()
new_qos_policy_id = uuidutils.generate_uuid()
ip_address = '192.168.50.50'
network = network_models.Network(id=uuidutils.generate_uuid(),
subnets=[subnet])
fixed_ip = network_models.FixedIP(subnet_id=subnet.id,
ip_address=ip_address)
port = network_models.Port(id=uuidutils.generate_uuid(),
fixed_ips=[fixed_ip],
network_id=network.id,
qos_policy_id=port_qos_policy_id)
lb_json = {'vip_port_id': port.id,
'project_id': self.project_id,
'vip_qos_policy_id': new_qos_policy_id}
body = self._build_body(lb_json)
with mock.patch(
"octavia.network.drivers.noop_driver.driver."
"NoopManager.get_network") as m_get_network, mock.patch(
"octavia.network.drivers.noop_driver.driver.NoopManager"
".get_port") as mock_get_port, mock.patch(
"octavia.network.drivers.noop_driver.driver.NoopManager"
".allocate_vip") as mock_allocate_vip, mock.patch(
"octavia.common.validate."
"qos_policy_exists") as m_get_qos:
m_get_qos.return_value = mock.ANY
mock_allocate_vip.return_value = data_models.Vip(
ip_address=ip_address, subnet_id=subnet.id,
network_id=network.id, port_id=port.id)
m_get_network.return_value = network
mock_get_port.return_value = port
response = self.post(self.LBS_PATH, body)
api_lb = response.json.get(self.root_tag)
self._assert_request_matches_response(lb_json, api_lb)
self.assertEqual(port.id, api_lb.get('vip_port_id'))
self.assertEqual(subnet.id, api_lb.get('vip_subnet_id'))
self.assertEqual(network.id, api_lb.get('vip_network_id'))
self.assertEqual(new_qos_policy_id, api_lb.get(
'vip_qos_policy_id'))
def test_create_with_non_exist_qos_policy_id(self):
subnet = network_models.Subnet(id=uuidutils.generate_uuid(),
network_id=uuidutils.generate_uuid())
qos_policy_id = uuidutils.generate_uuid()
lb_json = {'vip_subnet_id': subnet.id,
'project_id': self.project_id,
'vip_qos_policy_id': qos_policy_id}
body = self._build_body(lb_json)
with mock.patch("octavia.network.drivers.noop_driver.driver"
".NoopManager.get_subnet") as mock_get_subnet:
with mock.patch("octavia.network.drivers.noop_driver."
"driver.NoopManager."
"get_qos_policy") as mock_get_qos:
mock_get_subnet.return_value = subnet
mock_get_qos.side_effect = Exception()
response = self.post(self.LBS_PATH, body, status=400)
err_msg = "qos_policy %s not found." % qos_policy_id
self.assertEqual(err_msg, response.json.get('faultstring'))
def test_create_with_long_name(self):
lb_json = {'name': 'n' * 256,
'vip_subnet_id': uuidutils.generate_uuid(),
@ -1113,6 +1236,32 @@ class TestLoadBalancer(base.BaseAPITest):
self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')),
lb_json, status=400)
def test_update_with_qos(self):
project_id = uuidutils.generate_uuid()
lb = self.create_load_balancer(
uuidutils.generate_uuid(), name='lb1',
project_id=project_id,
vip_qos_policy_id=uuidutils.generate_uuid())
lb_dict = lb.get(self.root_tag)
self.set_lb_status(lb_dict.get('id'))
lb_json = self._build_body(
{'vip_qos_policy_id': uuidutils.generate_uuid()})
self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')),
lb_json, status=200)
def test_update_with_bad_qos(self):
project_id = uuidutils.generate_uuid()
vip_qos_policy_id = uuidutils.generate_uuid()
lb = self.create_load_balancer(uuidutils.generate_uuid(),
name='lb1',
project_id=project_id,
vip_qos_policy_id=vip_qos_policy_id)
lb_dict = lb.get(self.root_tag)
lb_json = self._build_body({'vip_qos_policy_id': 'BAD'})
self.set_lb_status(lb_dict.get('id'))
self.put(self.LB_PATH.format(lb_id=lb_dict.get('id')),
lb_json, status=400)
def test_update_bad_lb_id(self):
path = self.LB_PATH.format(lb_id='SEAN-CONNERY')
self.put(path, body={}, status=404)
@ -1726,6 +1875,7 @@ class TestLoadBalancerGraph(base.BaseAPITest):
# for this test without interfering with a ton of stuff, and it is
# expected that this would be overwritten anyway, so 'ANY' is fine?
'vip_network_id': mock.ANY,
'vip_qos_policy_id': None,
'flavor': '',
'provider': 'octavia'
}

View File

@ -133,7 +133,8 @@ class AllRepositoriesTest(base.OctaviaDBTestBase):
vip = {'ip_address': '10.0.0.1',
'port_id': uuidutils.generate_uuid(),
'subnet_id': uuidutils.generate_uuid(),
'network_id': uuidutils.generate_uuid()}
'network_id': uuidutils.generate_uuid(),
'qos_policy_id': None}
lb_dm = self.repos.create_load_balancer_and_vip(self.session, lb, vip)
lb_dm_dict = lb_dm.to_dict()
del lb_dm_dict['vip']

View File

@ -56,6 +56,11 @@ class TestLoadBalancer(object):
self.assertRaises(ValueError, wsme_json.fromjson, self._type,
body)
def test_invalid_qos_policy_id(self):
body = {"vip_qos_policy_id": "invalid_uuid"}
self.assertRaises(exc.InvalidInput, wsme_json.fromjson, self._type,
body)
class TestLoadBalancerPOST(base.BaseTypesTest, TestLoadBalancer):
@ -70,7 +75,8 @@ class TestLoadBalancerPOST(base.BaseTypesTest, TestLoadBalancer):
def test_vip(self):
body = {"vip_subnet_id": uuidutils.generate_uuid(),
"vip_port_id": uuidutils.generate_uuid()}
"vip_port_id": uuidutils.generate_uuid(),
"vip_qos_policy_id": uuidutils.generate_uuid()}
wsme_json.fromjson(self._type, body)
def test_invalid_ip_address(self):

View File

@ -347,3 +347,18 @@ class TestValidations(base.TestCase):
self.assertRaises(
exceptions.ValidationException,
validate.network_allowed_by_config, net_id3)
def test_qos_policy_exists(self):
qos_policy_id = uuidutils.generate_uuid()
qos_policy = network_models.QosPolicy(id=qos_policy_id)
with mock.patch(
'octavia.common.utils.get_network_driver') as net_mock:
net_mock.return_value.get_qos_policy.return_value = qos_policy
self.assertEqual(
validate.qos_policy_exists(qos_policy_id),
qos_policy)
net_mock.return_value.get_qos_policy.side_effect = Exception
self.assertRaises(exceptions.InvalidSubresource,
validate.qos_policy_exists,
qos_policy_id)

View File

@ -132,11 +132,12 @@ class TestLoadBalancerFlows(base.TestCase):
self.assertIn(constants.AMPS_DATA, lb_flow.provides)
self.assertIn(constants.LOADBALANCER, lb_flow.provides)
self.assertIn(constants.UPDATE_DICT, lb_flow.requires)
self.assertIn(constants.LOADBALANCER, lb_flow.requires)
self.assertIn(constants.LOADBALANCER_ID, lb_flow.requires)
self.assertEqual(4, len(lb_flow.provides))
self.assertEqual(2, len(lb_flow.requires))
self.assertEqual(3, len(lb_flow.requires))
def test_get_update_load_balancer_flow(self, mock_get_net_driver):

View File

@ -1427,6 +1427,34 @@ class TestDatabaseTasks(base.TestCase):
id=LB_ID,
provisioning_status=constants.ERROR)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.update')
@mock.patch('octavia.db.repositories.VipRepository.update')
def test_update_vip_in_db_during_update_loadbalancer(self,
mock_vip_update,
mock_listner_update,
mock_generate_uuid,
mock_LOG,
mock_get_session,
mock_lb_update,
mock_listener_update,
mock_amphora_update,
mock_amphora_delete):
self.loadbalancer_mock.vip.load_balancer_id = LB_ID
update_load_balancer = database_tasks.UpdateLoadbalancerInDB()
update_load_balancer.execute(self.loadbalancer_mock,
{'name': 'test',
'description': 'test2',
'vip': {'qos_policy_id': 'fool'}})
repo.LoadBalancerRepository.update.assert_called_once_with(
'TEST',
LB_ID,
name='test', description='test2')
repo.VipRepository.update.assert_called_once_with('TEST', LB_ID,
qos_policy_id='fool')
@mock.patch('octavia.db.repositories.ListenerRepository.update')
def test_update_listener_in_db(self,
mock_listner_repo_update,

View File

@ -15,6 +15,7 @@
import mock
from octavia.common import data_models as o_data_models
from octavia.controller.worker.tasks import model_tasks
import octavia.tests.unit.base as base
@ -41,4 +42,14 @@ class TestObjectUpdateTasks(base.TestCase):
update_attr.execute(self.listener_mock,
{'name': 'TEST2'})
self.listener_mock.update.assert_called_once_with({'name': 'TEST2'})
self.assertEqual('TEST2', getattr(self.listener_mock, 'name'))
@mock.patch('octavia.common.data_models.Vip.update')
def test_update_vip_during_update_loadbalancer(self, mock_vip):
vip_object = o_data_models.Vip()
lb_object = o_data_models.LoadBalancer(vip=vip_object)
update_attr = model_tasks.UpdateAttributes()
update_attr.execute(lb_object, {'vip': {'fool1': 'bar1'},
'description': 'bar2'})
mock_vip.assert_called_once_with({'fool1': 'bar1'})

View File

@ -23,6 +23,7 @@ from octavia.common import data_models as o_data_models
from octavia.controller.worker.tasks import network_tasks
from octavia.network import base as net_base
from octavia.network import data_models
from octavia.tests.common import constants as t_constants
import octavia.tests.unit.base as base
@ -32,14 +33,27 @@ PORT_ID = uuidutils.generate_uuid()
SUBNET_ID = uuidutils.generate_uuid()
NETWORK_ID = uuidutils.generate_uuid()
IP_ADDRESS = "172.24.41.1"
VIP = o_data_models.Vip(port_id=PORT_ID, subnet_id=SUBNET_ID,
ip_address=IP_ADDRESS)
VIP = o_data_models.Vip(port_id=t_constants.MOCK_PORT_ID,
subnet_id=t_constants.MOCK_SUBNET_ID,
qos_policy_id=t_constants.MOCK_QOS_POLICY_ID1)
VIP2 = o_data_models.Vip(port_id=t_constants.MOCK_PORT_ID2,
subnet_id=t_constants.MOCK_SUBNET_ID2,
qos_policy_id=t_constants.MOCK_QOS_POLICY_ID2)
LB = o_data_models.LoadBalancer(vip=VIP)
LB2 = o_data_models.LoadBalancer(vip=VIP2)
FIRST_IP = {"ip_address": IP_ADDRESS, "subnet_id": SUBNET_ID}
FIXED_IPS = [FIRST_IP]
INTERFACE = data_models.Interface(id=uuidutils.generate_uuid(),
compute_id=COMPUTE_ID, fixed_ips=FIXED_IPS,
port_id=PORT_ID)
AMPS_DATA = [o_data_models.Amphora(id=t_constants.MOCK_AMP_ID1,
vrrp_port_id=t_constants.MOCK_VRRP_PORT_ID1,
vrrp_ip=t_constants.MOCK_VRRP_IP1),
o_data_models.Amphora(id=t_constants.MOCK_AMP_ID2,
vrrp_port_id=t_constants.MOCK_VRRP_PORT_ID2,
vrrp_ip=t_constants.MOCK_VRRP_IP2)
]
UPDATE_DICT = {constants.LOADBALANCER_TOPOLOGY: None}
class TestException(Exception):
@ -255,7 +269,7 @@ class TestNetworkTasks(base.TestCase):
net_task = network_tasks.GetMemberPorts()
net_task.execute(LB, self.amphora_mock)
mock_driver.get_port.assert_called_once_with(PORT_ID)
mock_driver.get_port.assert_called_once_with(t_constants.MOCK_PORT_ID)
mock_driver.get_plugged_networks.assert_called_once_with(COMPUTE_ID)
mock_driver.reset_mock()
@ -382,6 +396,119 @@ class TestNetworkTasks(base.TestCase):
net.revert(["vip"], LB)
mock_driver.unplug_vip.assert_called_once_with(LB, LB.vip)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'get_current_loadbalancer_from_db')
def test_apply_qos_on_creation(self, mock_get_lb_db, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
net = network_tasks.ApplyQos()
mock_get_lb_db.return_value = LB
# execute
UPDATE_DICT[
constants.LOADBALANCER_TOPOLOGY] = constants.TOPOLOGY_SINGLE
update_dict = UPDATE_DICT
net.execute(LB, [AMPS_DATA[0]], update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
VIP.qos_policy_id, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
standby_topology = constants.TOPOLOGY_ACTIVE_STANDBY
mock_driver.reset_mock()
update_dict[
constants.LOADBALANCER_TOPOLOGY] = standby_topology
net.execute(LB, AMPS_DATA, update_dict)
mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID1, mock.ANY)
self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)
# revert
mock_driver.reset_mock()
update_dict = UPDATE_DICT
net.revert(None, LB, [AMPS_DATA[0]], update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict[
constants.LOADBALANCER_TOPOLOGY] = standby_topology
net.revert(None, LB, AMPS_DATA, update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
@mock.patch('octavia.controller.worker.task_utils.TaskUtils.'
'get_current_loadbalancer_from_db')
def test_apply_qos_on_update(self, mock_get_lb_db, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver
net = network_tasks.ApplyQos()
null_qos_vip = o_data_models.Vip(qos_policy_id=None)
null_qos_lb = o_data_models.LoadBalancer(
vip=null_qos_vip, topology=constants.TOPOLOGY_SINGLE,
amphorae=[AMPS_DATA[0]])
tmp_vip_object = o_data_models.Vip(
qos_policy_id=t_constants.MOCK_QOS_POLICY_ID1)
tmp_lb = o_data_models.LoadBalancer(
vip=tmp_vip_object, topology=constants.TOPOLOGY_SINGLE,
amphorae=[AMPS_DATA[0]])
# execute
update_dict = {'description': 'fool'}
net.execute(tmp_lb, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
t_constants.MOCK_QOS_POLICY_ID1, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict = {'vip': {'qos_policy_id': None}}
net.execute(null_qos_lb, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
None, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict = {'name': '123'}
net.execute(null_qos_lb, update_dict=update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict = {'description': 'fool'}
tmp_lb.amphorae = AMPS_DATA
tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY
net.execute(tmp_lb, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID1, mock.ANY)
self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)
# revert
mock_driver.reset_mock()
tmp_lb.amphorae = [AMPS_DATA[0]]
tmp_lb.topology = constants.TOPOLOGY_SINGLE
update_dict = {'description': 'fool'}
mock_get_lb_db.return_value = tmp_lb
net.revert(None, tmp_lb, update_dict=update_dict)
self.assertEqual(0, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict = {'vip': {'qos_policy_id': None}}
ori_lb_db = LB2
ori_lb_db.amphorae = [AMPS_DATA[0]]
mock_get_lb_db.return_value = ori_lb_db
net.revert(None, null_qos_lb, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_once_with(
t_constants.MOCK_QOS_POLICY_ID2, AMPS_DATA[0].vrrp_port_id)
self.assertEqual(1, mock_driver.apply_qos_on_port.call_count)
mock_driver.reset_mock()
update_dict = {'vip': {
'qos_policy_id': t_constants.MOCK_QOS_POLICY_ID2}}
tmp_lb.amphorae = AMPS_DATA
tmp_lb.topology = constants.TOPOLOGY_ACTIVE_STANDBY
ori_lb_db = LB2
ori_lb_db.amphorae = [AMPS_DATA[0]]
mock_get_lb_db.return_value = ori_lb_db
net.revert(None, tmp_lb, update_dict=update_dict)
mock_driver.apply_qos_on_port.assert_called_with(
t_constants.MOCK_QOS_POLICY_ID2, mock.ANY)
self.assertEqual(2, mock_driver.apply_qos_on_port.call_count)
def test_unplug_vip(self, mock_get_net_driver):
mock_driver = mock.MagicMock()
mock_get_net_driver.return_value = mock_driver

View File

@ -262,3 +262,22 @@ class TestTaskUtils(base.TestCase):
self.task_utils.mark_pool_prov_status_error(self.POOL_ID)
self.assertFalse(mock_pool_repo_update.called)
@mock.patch('octavia.db.api.get_session', return_value=TEST_SESSION)
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
def test_get_current_loadbalancer_from_db(self, mock_lb_repo_get,
mock_get_session):
# Happy path
self.task_utils.get_current_loadbalancer_from_db(self.LOADBALANCER_ID)
mock_lb_repo_get.assert_called_once_with(
TEST_SESSION,
id=self.LOADBALANCER_ID)
# Exception path
mock_lb_repo_get.reset_mock()
mock_get_session.side_effect = Exception('fail')
self.task_utils.get_current_loadbalancer_from_db(self.POOL_ID)
self.assertFalse(mock_lb_repo_get.called)

View File

@ -410,3 +410,39 @@ class TestBaseNeutronNetworkDriver(base.TestCase):
port1.fixed_ips[0].ip_address)
self.assertEqual(t_constants.MOCK_IP_ADDRESS2