Add support for SR-IOV ports in Octavia
Change-Id: I16622add64076370dad85620043f71077bc9acbb
This commit is contained in:
parent
ffc9d83197
commit
75c1bdd104
@ -1793,6 +1793,14 @@ vip_subnet_id-optional:
|
|||||||
in: body
|
in: body
|
||||||
required: false
|
required: false
|
||||||
type: uuid
|
type: uuid
|
||||||
|
vip_vnic_type:
|
||||||
|
description: |
|
||||||
|
The VIP vNIC type used for the load balancer. One of ``normal`` or
|
||||||
|
``direct``.
|
||||||
|
in: body
|
||||||
|
required: true
|
||||||
|
type: string
|
||||||
|
min_version: 2.28
|
||||||
vrrp-id:
|
vrrp-id:
|
||||||
description: |
|
description: |
|
||||||
The vrrp group's ID for the amphora.
|
The vrrp group's ID for the amphora.
|
||||||
|
@ -21,6 +21,7 @@
|
|||||||
"name": "best_load_balancer",
|
"name": "best_load_balancer",
|
||||||
"vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3",
|
"vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3",
|
||||||
"availability_zone": "my_az",
|
"availability_zone": "my_az",
|
||||||
"tags": ["test_tag"]
|
"tags": ["test_tag"],
|
||||||
|
"vip_vnic_type": "normal"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -177,6 +177,7 @@
|
|||||||
"name": "best_load_balancer",
|
"name": "best_load_balancer",
|
||||||
"vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3",
|
"vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3",
|
||||||
"availability_zone": "my_az",
|
"availability_zone": "my_az",
|
||||||
"tags": ["test_tag"]
|
"tags": ["test_tag"],
|
||||||
|
"vip_vnic_type": "normal"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -18,6 +18,7 @@
|
|||||||
"name": "best_load_balancer",
|
"name": "best_load_balancer",
|
||||||
"vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3",
|
"vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3",
|
||||||
"availability_zone": "my_az",
|
"availability_zone": "my_az",
|
||||||
"tags": []
|
"tags": [],
|
||||||
|
"vip_vnic_type": "normal"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
"operating_status": "ONLINE",
|
"operating_status": "ONLINE",
|
||||||
"name": "disabled_load_balancer",
|
"name": "disabled_load_balancer",
|
||||||
"vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3",
|
"vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3",
|
||||||
"tags": ["updated_tag"]
|
"tags": ["updated_tag"],
|
||||||
|
"vip_vnic_type": "normal"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -29,7 +29,8 @@
|
|||||||
"name": "best_load_balancer",
|
"name": "best_load_balancer",
|
||||||
"vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3",
|
"vip_qos_policy_id": "ec4f78ca-8da8-4e99-8a1a-e3b94595a7a3",
|
||||||
"availability_zone": "my_az",
|
"availability_zone": "my_az",
|
||||||
"tags": []
|
"tags": [],
|
||||||
|
"vip_vnic_type": "normal"
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@ -67,6 +67,7 @@ Response Parameters
|
|||||||
- vip_port_id: vip_port_id
|
- vip_port_id: vip_port_id
|
||||||
- vip_qos_policy_id: vip_qos_policy_id
|
- vip_qos_policy_id: vip_qos_policy_id
|
||||||
- vip_subnet_id: vip_subnet_id
|
- vip_subnet_id: vip_subnet_id
|
||||||
|
- vip_vnic_type: vip_vnic_type
|
||||||
|
|
||||||
Response Example
|
Response Example
|
||||||
----------------
|
----------------
|
||||||
@ -225,6 +226,7 @@ Response Parameters
|
|||||||
- vip_port_id: vip_port_id
|
- vip_port_id: vip_port_id
|
||||||
- vip_qos_policy_id: vip_qos_policy_id
|
- vip_qos_policy_id: vip_qos_policy_id
|
||||||
- vip_subnet_id: vip_subnet_id
|
- vip_subnet_id: vip_subnet_id
|
||||||
|
- vip_vnic_type: vip_vnic_type
|
||||||
|
|
||||||
Response Example
|
Response Example
|
||||||
----------------
|
----------------
|
||||||
@ -320,6 +322,7 @@ Response Parameters
|
|||||||
- vip_port_id: vip_port_id
|
- vip_port_id: vip_port_id
|
||||||
- vip_qos_policy_id: vip_qos_policy_id
|
- vip_qos_policy_id: vip_qos_policy_id
|
||||||
- vip_subnet_id: vip_subnet_id
|
- vip_subnet_id: vip_subnet_id
|
||||||
|
- vip_vnic_type: vip_vnic_type
|
||||||
|
|
||||||
Response Example
|
Response Example
|
||||||
----------------
|
----------------
|
||||||
@ -407,6 +410,7 @@ Response Parameters
|
|||||||
- vip_port_id: vip_port_id
|
- vip_port_id: vip_port_id
|
||||||
- vip_qos_policy_id: vip_qos_policy_id
|
- vip_qos_policy_id: vip_qos_policy_id
|
||||||
- vip_subnet_id: vip_subnet_id
|
- vip_subnet_id: vip_subnet_id
|
||||||
|
- vip_vnic_type: vip_vnic_type
|
||||||
|
|
||||||
Response Example
|
Response Example
|
||||||
----------------
|
----------------
|
||||||
|
@ -37,6 +37,7 @@ Optional Installation and Configuration Guides
|
|||||||
flavors.rst
|
flavors.rst
|
||||||
apache-httpd.rst
|
apache-httpd.rst
|
||||||
failover-circuit-breaker.rst
|
failover-circuit-breaker.rst
|
||||||
|
sr-iov.rst
|
||||||
|
|
||||||
Maintenance and Operations
|
Maintenance and Operations
|
||||||
--------------------------
|
--------------------------
|
||||||
|
89
doc/source/admin/sr-iov.rst
Normal file
89
doc/source/admin/sr-iov.rst
Normal file
@ -0,0 +1,89 @@
|
|||||||
|
..
|
||||||
|
Copyright 2023 Red Hat, Inc. All rights reserved.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
not use this file except in compliance with the License. You may obtain
|
||||||
|
a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
License for the specific language governing permissions and limitations
|
||||||
|
under the License.
|
||||||
|
|
||||||
|
===============================
|
||||||
|
Using SR-IOV Ports with Octavia
|
||||||
|
===============================
|
||||||
|
|
||||||
|
Single Root I/O Virtualization (SR-IOV) can significantly reduce the latency
|
||||||
|
through an Octavia Amphora based load balancer while maximizing bandwith and
|
||||||
|
request rates. With Octavia Amphora load balancers, you can attach SR-IOV
|
||||||
|
Virtual Functions (VF) as the VIP port and/or backend member ports.
|
||||||
|
|
||||||
|
Enabling SR-IOV on Your Compute Hosts
|
||||||
|
-------------------------------------
|
||||||
|
|
||||||
|
To allow Octavia load balancers to use SR-IOV, you must configure nova and
|
||||||
|
neutron to make SR-IOV available on at least one compute host. Please follow
|
||||||
|
the `Networking Guide <https://docs.openstack.org/neutron/latest/admin/config-sriov.html>`_ to setup your compute hosts for SR-IOV.
|
||||||
|
|
||||||
|
Configuring Host Aggregates, Compute and Octavia Flavors
|
||||||
|
--------------------------------------------------------
|
||||||
|
|
||||||
|
Octavia hot-plugs the network ports into the Amphora as the load balancer is
|
||||||
|
being provisioned. This means we need to use host aggregates and compute flavor
|
||||||
|
properties to make sure the Amphora are created on SR-IOV enable compute hosts
|
||||||
|
with the correct networks.
|
||||||
|
|
||||||
|
Host Aggregates
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
This configuration can be as simple or complex as you need it to be. A simple
|
||||||
|
approach would be to add one property for the SR-IOV host aggregate, such as:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ openstack aggregate create sriov_aggregate
|
||||||
|
$ openstack aggregate add host sriov_aggregate sriov-host.example.org
|
||||||
|
$ openstack aggregate set --property sriov-nic=true sriov_aggregate
|
||||||
|
|
||||||
|
A more advanced configuration may list out the specific networks that are
|
||||||
|
available via the SR-IOV VFs:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ openstack aggregate create sriov_aggregate
|
||||||
|
$ openstack aggregate add host sriov_aggregate sriov-host.example.org
|
||||||
|
$ openstack aggregate set --property public-sriov=true --property members-sriov=true sriov_aggregate
|
||||||
|
|
||||||
|
Compute Flavors
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Next we need to create a compute flavor that includes the required properties
|
||||||
|
to match the host aggregate. Here is an example for a basic Octavia Amphora
|
||||||
|
compute flavor using the advanced host aggregate discussed in the previous
|
||||||
|
section:
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ openstack flavor create --id amphora-sriov-flavor --ram 1024 --disk 3 --vcpus 1 --private sriov.amphora --property hw_rng:allowed=True --property public-sriov=true --property members-sriov=true
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
This flavor is marked "private" so must be created inside the Octavia
|
||||||
|
service account project.
|
||||||
|
|
||||||
|
Octavia Flavors
|
||||||
|
~~~~~~~~~~~~~~~
|
||||||
|
|
||||||
|
Now that we have the compute service setup to properly place our Amphora
|
||||||
|
instances on hosts with SR-IOV NICs on the right networks, we can create an
|
||||||
|
Octavia flavor that will use the compute flavor.
|
||||||
|
|
||||||
|
.. code-block:: bash
|
||||||
|
|
||||||
|
$ openstack loadbalancer flavorprofile create --name amphora-sriov-profile --provider amphora --flavor-data '{"compute_flavor": "amphora-sriov-flavor", "sriov_vip": true}'
|
||||||
|
$ openstack loadbalancer flavor create --name SRIOV-public-members --flavorprofile amphora-sriov-profile --description "A load balancer that uses SR-IOV for the 'public' network and 'members' network." --enable
|
||||||
|
|
||||||
|
|
@ -47,6 +47,11 @@ SUPPORTED_FLAVOR_SCHEMA = {
|
|||||||
consts.AMP_IMAGE_TAG: {
|
consts.AMP_IMAGE_TAG: {
|
||||||
"type": "string",
|
"type": "string",
|
||||||
"description": "The amphora image tag."
|
"description": "The amphora image tag."
|
||||||
}
|
},
|
||||||
|
consts.SRIOV_VIP: {
|
||||||
|
"type": "boolean",
|
||||||
|
"description": "When true, the VIP port will be created using an "
|
||||||
|
"SR-IOV VF port."
|
||||||
|
},
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -146,6 +146,9 @@ class RootController(object):
|
|||||||
self._add_a_version(versions, 'v2.26', 'v2', 'SUPPORTED',
|
self._add_a_version(versions, 'v2.26', 'v2', 'SUPPORTED',
|
||||||
'2022-08-29T00:00:00Z', host_url)
|
'2022-08-29T00:00:00Z', host_url)
|
||||||
# HTTP Strict Transport Security (HSTS)
|
# HTTP Strict Transport Security (HSTS)
|
||||||
self._add_a_version(versions, 'v2.27', 'v2', 'CURRENT',
|
self._add_a_version(versions, 'v2.27', 'v2', 'SUPPORTED',
|
||||||
'2023-05-05T00:00:00Z', host_url)
|
'2023-05-05T00:00:00Z', host_url)
|
||||||
|
# Add port vnic_type for SR-IOV
|
||||||
|
self._add_a_version(versions, 'v2.28', 'v2', 'CURRENT',
|
||||||
|
'2023-11-08T00:00:00Z', host_url)
|
||||||
return {'versions': versions}
|
return {'versions': versions}
|
||||||
|
@ -496,6 +496,13 @@ class LoadBalancersController(base.BaseController):
|
|||||||
load_balancer.vip_network_id,
|
load_balancer.vip_network_id,
|
||||||
valid_networks=az_dict.get(constants.VALID_VIP_NETWORKS))
|
valid_networks=az_dict.get(constants.VALID_VIP_NETWORKS))
|
||||||
|
|
||||||
|
# Apply the anticipated vNIC type so the create will return the
|
||||||
|
# right vip_vnic_type
|
||||||
|
if flavor_dict and flavor_dict.get(constants.SRIOV_VIP, False):
|
||||||
|
vip_dict[constants.VNIC_TYPE] = constants.VNIC_TYPE_DIRECT
|
||||||
|
else:
|
||||||
|
vip_dict[constants.VNIC_TYPE] = constants.VNIC_TYPE_NORMAL
|
||||||
|
|
||||||
db_lb = self.repositories.create_load_balancer_and_vip(
|
db_lb = self.repositories.create_load_balancer_and_vip(
|
||||||
lock_session, lb_dict, vip_dict, additional_vip_dicts)
|
lock_session, lb_dict, vip_dict, additional_vip_dicts)
|
||||||
|
|
||||||
|
@ -25,13 +25,15 @@ class BaseLoadBalancerType(types.BaseType):
|
|||||||
'vip_port_id': 'vip.port_id',
|
'vip_port_id': 'vip.port_id',
|
||||||
'vip_network_id': 'vip.network_id',
|
'vip_network_id': 'vip.network_id',
|
||||||
'vip_qos_policy_id': 'vip.qos_policy_id',
|
'vip_qos_policy_id': 'vip.qos_policy_id',
|
||||||
|
'vip_vnic_type': 'vip.vnic_type',
|
||||||
'admin_state_up': 'enabled'}
|
'admin_state_up': 'enabled'}
|
||||||
_child_map = {'vip': {
|
_child_map = {'vip': {
|
||||||
'ip_address': 'vip_address',
|
'ip_address': 'vip_address',
|
||||||
'subnet_id': 'vip_subnet_id',
|
'subnet_id': 'vip_subnet_id',
|
||||||
'port_id': 'vip_port_id',
|
'port_id': 'vip_port_id',
|
||||||
'network_id': 'vip_network_id',
|
'network_id': 'vip_network_id',
|
||||||
'qos_policy_id': 'vip_qos_policy_id'}}
|
'qos_policy_id': 'vip_qos_policy_id',
|
||||||
|
'vnic_type': 'vip_vnic_type'}}
|
||||||
|
|
||||||
|
|
||||||
class AdditionalVipsType(types.BaseType):
|
class AdditionalVipsType(types.BaseType):
|
||||||
@ -63,6 +65,7 @@ class LoadBalancerResponse(BaseLoadBalancerType):
|
|||||||
vip_qos_policy_id = wtypes.wsattr(wtypes.UuidType())
|
vip_qos_policy_id = wtypes.wsattr(wtypes.UuidType())
|
||||||
tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType()))
|
tags = wtypes.wsattr(wtypes.ArrayType(wtypes.StringType()))
|
||||||
availability_zone = wtypes.wsattr(wtypes.StringType())
|
availability_zone = wtypes.wsattr(wtypes.StringType())
|
||||||
|
vip_vnic_type = wtypes.wsattr(wtypes.StringType())
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_data_model(cls, data_model, children=False):
|
def from_data_model(cls, data_model, children=False):
|
||||||
@ -74,6 +77,7 @@ class LoadBalancerResponse(BaseLoadBalancerType):
|
|||||||
result.vip_address = data_model.vip.ip_address
|
result.vip_address = data_model.vip.ip_address
|
||||||
result.vip_network_id = data_model.vip.network_id
|
result.vip_network_id = data_model.vip.network_id
|
||||||
result.vip_qos_policy_id = data_model.vip.qos_policy_id
|
result.vip_qos_policy_id = data_model.vip.qos_policy_id
|
||||||
|
result.vip_vnic_type = data_model.vip.vnic_type
|
||||||
result.additional_vips = [
|
result.additional_vips = [
|
||||||
AdditionalVipsType.from_data_model(i)
|
AdditionalVipsType.from_data_model(i)
|
||||||
for i in data_model.additional_vips]
|
for i in data_model.additional_vips]
|
||||||
|
@ -318,6 +318,8 @@ AMPS_DATA = 'amps_data'
|
|||||||
ANTI_AFFINITY = 'anti-affinity'
|
ANTI_AFFINITY = 'anti-affinity'
|
||||||
ATTEMPT_NUMBER = 'attempt_number'
|
ATTEMPT_NUMBER = 'attempt_number'
|
||||||
BASE_PORT = 'base_port'
|
BASE_PORT = 'base_port'
|
||||||
|
BINDING_VNIC_TYPE = 'binding_vnic_type'
|
||||||
|
BUILD_AMP_DATA = 'build_amp_data'
|
||||||
BYTES_IN = 'bytes_in'
|
BYTES_IN = 'bytes_in'
|
||||||
BYTES_OUT = 'bytes_out'
|
BYTES_OUT = 'bytes_out'
|
||||||
CACHED_ZONE = 'cached_zone'
|
CACHED_ZONE = 'cached_zone'
|
||||||
@ -388,6 +390,7 @@ MESSAGE = 'message'
|
|||||||
NAME = 'name'
|
NAME = 'name'
|
||||||
NETWORK = 'network'
|
NETWORK = 'network'
|
||||||
NETWORK_ID = 'network_id'
|
NETWORK_ID = 'network_id'
|
||||||
|
NEW_AMPHORAE = 'new_amphorae'
|
||||||
NEW_AMPHORA_ID = 'new_amphora_id'
|
NEW_AMPHORA_ID = 'new_amphora_id'
|
||||||
NEXTHOP = 'nexthop'
|
NEXTHOP = 'nexthop'
|
||||||
NICS = 'nics'
|
NICS = 'nics'
|
||||||
@ -406,6 +409,7 @@ POOL_CHILD_COUNT = 'pool_child_count'
|
|||||||
POOL_ID = 'pool_id'
|
POOL_ID = 'pool_id'
|
||||||
POOL_UPDATES = 'pool_updates'
|
POOL_UPDATES = 'pool_updates'
|
||||||
PORT = 'port'
|
PORT = 'port'
|
||||||
|
PORT_DATA = 'port_data'
|
||||||
PORT_ID = 'port_id'
|
PORT_ID = 'port_id'
|
||||||
PORTS = 'ports'
|
PORTS = 'ports'
|
||||||
PROJECT_ID = 'project_id'
|
PROJECT_ID = 'project_id'
|
||||||
@ -451,6 +455,10 @@ VIP_QOS_POLICY_ID = 'vip_qos_policy_id'
|
|||||||
VIP_SG_ID = 'vip_sg_id'
|
VIP_SG_ID = 'vip_sg_id'
|
||||||
VIP_SUBNET = 'vip_subnet'
|
VIP_SUBNET = 'vip_subnet'
|
||||||
VIP_SUBNET_ID = 'vip_subnet_id'
|
VIP_SUBNET_ID = 'vip_subnet_id'
|
||||||
|
VIP_VNIC_TYPE = 'vip_vnic_type'
|
||||||
|
VNIC_TYPE = 'vnic_type'
|
||||||
|
VNIC_TYPE_DIRECT = 'direct'
|
||||||
|
VNIC_TYPE_NORMAL = 'normal'
|
||||||
VRRP_ID = 'vrrp_id'
|
VRRP_ID = 'vrrp_id'
|
||||||
VRRP_IP = 'vrrp_ip'
|
VRRP_IP = 'vrrp_ip'
|
||||||
VRRP_GROUP = 'vrrp_group'
|
VRRP_GROUP = 'vrrp_group'
|
||||||
@ -564,6 +572,7 @@ DELETE_MEMBER_INDB = 'octavia-delete-member-indb'
|
|||||||
ADMIN_DOWN_PORT = 'admin-down-port'
|
ADMIN_DOWN_PORT = 'admin-down-port'
|
||||||
AMPHORA_POST_VIP_PLUG = 'amphora-post-vip-plug'
|
AMPHORA_POST_VIP_PLUG = 'amphora-post-vip-plug'
|
||||||
AMPHORA_RELOAD_LISTENER = 'amphora-reload-listener'
|
AMPHORA_RELOAD_LISTENER = 'amphora-reload-listener'
|
||||||
|
AMPHORA_TO_AMPHORAE_VRRP_IP = 'amphora-to-amphorae-vrrp-ip'
|
||||||
AMPHORA_TO_ERROR_ON_REVERT = 'amphora-to-error-on-revert'
|
AMPHORA_TO_ERROR_ON_REVERT = 'amphora-to-error-on-revert'
|
||||||
AMPHORAE_GET_CONNECTIVITY_STATUS = 'amphorae-get-connectivity-status'
|
AMPHORAE_GET_CONNECTIVITY_STATUS = 'amphorae-get-connectivity-status'
|
||||||
AMPHORAE_POST_NETWORK_PLUG = 'amphorae-post-network-plug'
|
AMPHORAE_POST_NETWORK_PLUG = 'amphorae-post-network-plug'
|
||||||
@ -575,6 +584,7 @@ DELETE_PORT = 'delete-port'
|
|||||||
DISABLE_AMP_HEALTH_MONITORING = 'disable-amphora-health-monitoring'
|
DISABLE_AMP_HEALTH_MONITORING = 'disable-amphora-health-monitoring'
|
||||||
GET_AMPHORA_NETWORK_CONFIGS_BY_ID = 'get-amphora-network-configs-by-id'
|
GET_AMPHORA_NETWORK_CONFIGS_BY_ID = 'get-amphora-network-configs-by-id'
|
||||||
GET_AMPHORAE_FROM_LB = 'get-amphorae-from-lb'
|
GET_AMPHORAE_FROM_LB = 'get-amphorae-from-lb'
|
||||||
|
GET_SUBNET_FROM_VIP = 'get-subnet-from-vip'
|
||||||
HANDLE_NETWORK_DELTA = 'handle-network-delta'
|
HANDLE_NETWORK_DELTA = 'handle-network-delta'
|
||||||
MARK_AMPHORA_DELETED = 'mark-amphora-deleted'
|
MARK_AMPHORA_DELETED = 'mark-amphora-deleted'
|
||||||
MARK_AMPHORA_PENDING_DELETE = 'mark-amphora-pending-delete'
|
MARK_AMPHORA_PENDING_DELETE = 'mark-amphora-pending-delete'
|
||||||
@ -900,6 +910,7 @@ VIP_SECURITY_GROUP_PREFIX = 'lb-'
|
|||||||
|
|
||||||
AMP_BASE_PORT_PREFIX = 'octavia-lb-vrrp-'
|
AMP_BASE_PORT_PREFIX = 'octavia-lb-vrrp-'
|
||||||
OCTAVIA_OWNED = 'octavia_owned'
|
OCTAVIA_OWNED = 'octavia_owned'
|
||||||
|
OCTAVIA_OWNER = 'Octavia'
|
||||||
|
|
||||||
# Sadly in the LBaaS v2 API, header insertions are on the listener objects
|
# Sadly in the LBaaS v2 API, header insertions are on the listener objects
|
||||||
# but they should be on the pool. Dealing with it until v3.
|
# but they should be on the pool. Dealing with it until v3.
|
||||||
@ -914,6 +925,8 @@ AMPHORA_SUPPORTED_ALPN_PROTOCOLS = [lib_consts.ALPN_PROTOCOL_HTTP_2,
|
|||||||
lib_consts.ALPN_PROTOCOL_HTTP_1_1,
|
lib_consts.ALPN_PROTOCOL_HTTP_1_1,
|
||||||
lib_consts.ALPN_PROTOCOL_HTTP_1_0]
|
lib_consts.ALPN_PROTOCOL_HTTP_1_0]
|
||||||
|
|
||||||
|
SRIOV_VIP = 'sriov_vip'
|
||||||
|
|
||||||
# Amphora interface fields
|
# Amphora interface fields
|
||||||
IF_TYPE = 'if_type'
|
IF_TYPE = 'if_type'
|
||||||
BACKEND = 'backend'
|
BACKEND = 'backend'
|
||||||
|
@ -557,7 +557,8 @@ class Vip(BaseDataModel):
|
|||||||
|
|
||||||
def __init__(self, load_balancer_id=None, ip_address=None,
|
def __init__(self, load_balancer_id=None, ip_address=None,
|
||||||
subnet_id=None, network_id=None, port_id=None,
|
subnet_id=None, network_id=None, port_id=None,
|
||||||
load_balancer=None, qos_policy_id=None, octavia_owned=None):
|
load_balancer=None, qos_policy_id=None, octavia_owned=None,
|
||||||
|
vnic_type=None):
|
||||||
self.load_balancer_id = load_balancer_id
|
self.load_balancer_id = load_balancer_id
|
||||||
self.ip_address = ip_address
|
self.ip_address = ip_address
|
||||||
self.subnet_id = subnet_id
|
self.subnet_id = subnet_id
|
||||||
@ -566,6 +567,7 @@ class Vip(BaseDataModel):
|
|||||||
self.load_balancer = load_balancer
|
self.load_balancer = load_balancer
|
||||||
self.qos_policy_id = qos_policy_id
|
self.qos_policy_id = qos_policy_id
|
||||||
self.octavia_owned = octavia_owned
|
self.octavia_owned = octavia_owned
|
||||||
|
self.vnic_type = vnic_type
|
||||||
|
|
||||||
|
|
||||||
class AdditionalVip(BaseDataModel):
|
class AdditionalVip(BaseDataModel):
|
||||||
|
@ -254,6 +254,11 @@ class ComputePortInUseException(OctaviaException):
|
|||||||
message = _('Compute driver reports port %(port)s is already in use.')
|
message = _('Compute driver reports port %(port)s is already in use.')
|
||||||
|
|
||||||
|
|
||||||
|
class ComputeNoResourcesException(OctaviaException):
|
||||||
|
message = _('The compute service does not have the resources available to '
|
||||||
|
'fulfill the request')
|
||||||
|
|
||||||
|
|
||||||
class ComputeUnknownException(OctaviaException):
|
class ComputeUnknownException(OctaviaException):
|
||||||
message = _('Unknown exception from the compute driver: %(exc)s.')
|
message = _('Unknown exception from the compute driver: %(exc)s.')
|
||||||
|
|
||||||
|
@ -348,15 +348,33 @@ class VirtualMachineManager(compute_base.ComputeBase):
|
|||||||
if 'Port' in str(e):
|
if 'Port' in str(e):
|
||||||
raise exceptions.NotFound(resource='Port', id=port_id)
|
raise exceptions.NotFound(resource='Port', id=port_id)
|
||||||
raise exceptions.NotFound(resource=str(e), id=compute_id)
|
raise exceptions.NotFound(resource=str(e), id=compute_id)
|
||||||
|
except nova_exceptions.BadRequest as e:
|
||||||
|
if 'Failed to claim PCI device' in str(e):
|
||||||
|
message = ('Nova failed to claim PCI devices during '
|
||||||
|
f'interface attach for port {port_id} on '
|
||||||
|
f'instance {compute_id}')
|
||||||
|
LOG.error(message)
|
||||||
|
raise exceptions.ComputeNoResourcesException(message,
|
||||||
|
exc=str(e))
|
||||||
|
raise
|
||||||
|
except nova_exceptions.ClientException as e:
|
||||||
|
if 'PortBindingFailed' in str(e):
|
||||||
|
message = ('Nova failed to bind the port during '
|
||||||
|
f'interface attach for port {port_id} on '
|
||||||
|
f'instance {compute_id}')
|
||||||
|
LOG.error(message)
|
||||||
|
raise exceptions.ComputeNoResourcesException(message,
|
||||||
|
exc=str(e))
|
||||||
|
raise
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error('Error attaching network %(network_id)s with ip '
|
LOG.error('Error attaching network %(network_id)s with ip '
|
||||||
'%(ip_address)s and port %(port)s to amphora '
|
'%(ip_address)s and port %(port_id)s to amphora '
|
||||||
'(compute_id: %(compute_id)s) ',
|
'(compute_id: %(compute_id)s) ',
|
||||||
{
|
{
|
||||||
'compute_id': compute_id,
|
constants.COMPUTE_ID: compute_id,
|
||||||
'network_id': network_id,
|
constants.NETWORK_ID: network_id,
|
||||||
'ip_address': ip_address,
|
constants.IP_ADDRESS: ip_address,
|
||||||
'port': port_id
|
constants.PORT_ID: port_id
|
||||||
})
|
})
|
||||||
raise exceptions.ComputeUnknownException(exc=str(e))
|
raise exceptions.ComputeUnknownException(exc=str(e))
|
||||||
return interface
|
return interface
|
||||||
|
@ -375,7 +375,7 @@ class ControllerWorker(object):
|
|||||||
}
|
}
|
||||||
self.run_flow(
|
self.run_flow(
|
||||||
flow_utils.get_create_load_balancer_flow,
|
flow_utils.get_create_load_balancer_flow,
|
||||||
topology, listeners=listeners_dicts,
|
topology, listeners=listeners_dicts, flavor_dict=flavor,
|
||||||
store=store)
|
store=store)
|
||||||
|
|
||||||
def delete_load_balancer(self, load_balancer, cascade=False):
|
def delete_load_balancer(self, load_balancer, cascade=False):
|
||||||
@ -1035,7 +1035,7 @@ class ControllerWorker(object):
|
|||||||
|
|
||||||
self.run_flow(
|
self.run_flow(
|
||||||
flow_utils.get_failover_amphora_flow,
|
flow_utils.get_failover_amphora_flow,
|
||||||
amphora.to_dict(), lb_amp_count,
|
amphora.to_dict(), lb_amp_count, flavor_dict=flavor_dict,
|
||||||
store=stored_params)
|
store=stored_params)
|
||||||
|
|
||||||
LOG.info("Successfully completed the failover for an amphora: %s",
|
LOG.info("Successfully completed the failover for an amphora: %s",
|
||||||
|
@ -371,7 +371,8 @@ class AmphoraFlows(object):
|
|||||||
|
|
||||||
def get_amphora_for_lb_failover_subflow(
|
def get_amphora_for_lb_failover_subflow(
|
||||||
self, prefix, role=constants.ROLE_STANDALONE,
|
self, prefix, role=constants.ROLE_STANDALONE,
|
||||||
failed_amp_vrrp_port_id=None, is_vrrp_ipv6=False):
|
failed_amp_vrrp_port_id=None, is_vrrp_ipv6=False,
|
||||||
|
flavor_dict=None):
|
||||||
"""Creates a new amphora that will be used in a failover flow.
|
"""Creates a new amphora that will be used in a failover flow.
|
||||||
|
|
||||||
:requires: loadbalancer_id, flavor, vip, vip_sg_id, loadbalancer
|
:requires: loadbalancer_id, flavor, vip, vip_sg_id, loadbalancer
|
||||||
@ -392,13 +393,24 @@ class AmphoraFlows(object):
|
|||||||
prefix=prefix + '-' + constants.FAILOVER_LOADBALANCER_FLOW,
|
prefix=prefix + '-' + constants.FAILOVER_LOADBALANCER_FLOW,
|
||||||
role=role))
|
role=role))
|
||||||
|
|
||||||
# Create the VIP base (aka VRRP) port for the amphora.
|
if flavor_dict and flavor_dict.get(constants.SRIOV_VIP, False):
|
||||||
amp_for_failover_flow.add(network_tasks.CreateVIPBasePort(
|
amp_for_failover_flow.add(network_tasks.GetSubnetFromVIP(
|
||||||
name=prefix + '-' + constants.CREATE_VIP_BASE_PORT,
|
name=prefix + '-' + constants.GET_SUBNET_FROM_VIP,
|
||||||
requires=(constants.VIP, constants.VIP_SG_ID,
|
requires=constants.LOADBALANCER,
|
||||||
constants.AMPHORA_ID,
|
provides=constants.SUBNET))
|
||||||
constants.ADDITIONAL_VIPS),
|
amp_for_failover_flow.add(network_tasks.CreateSRIOVBasePort(
|
||||||
provides=constants.BASE_PORT))
|
name=prefix + '-' + constants.PLUG_VIP_AMPHORA,
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMPHORA,
|
||||||
|
constants.SUBNET),
|
||||||
|
provides=constants.BASE_PORT))
|
||||||
|
else:
|
||||||
|
# Create the VIP base (aka VRRP) port for the amphora.
|
||||||
|
amp_for_failover_flow.add(network_tasks.CreateVIPBasePort(
|
||||||
|
name=prefix + '-' + constants.CREATE_VIP_BASE_PORT,
|
||||||
|
requires=(constants.VIP, constants.VIP_SG_ID,
|
||||||
|
constants.AMPHORA_ID,
|
||||||
|
constants.ADDITIONAL_VIPS),
|
||||||
|
provides=constants.BASE_PORT))
|
||||||
|
|
||||||
# Attach the VIP base (aka VRRP) port to the amphora.
|
# Attach the VIP base (aka VRRP) port to the amphora.
|
||||||
amp_for_failover_flow.add(compute_tasks.AttachPort(
|
amp_for_failover_flow.add(compute_tasks.AttachPort(
|
||||||
@ -449,7 +461,8 @@ class AmphoraFlows(object):
|
|||||||
|
|
||||||
return amp_for_failover_flow
|
return amp_for_failover_flow
|
||||||
|
|
||||||
def get_failover_amphora_flow(self, failed_amphora, lb_amp_count):
|
def get_failover_amphora_flow(self, failed_amphora, lb_amp_count,
|
||||||
|
flavor_dict=None):
|
||||||
"""Get a Taskflow flow to failover an amphora.
|
"""Get a Taskflow flow to failover an amphora.
|
||||||
|
|
||||||
1. Build a replacement amphora.
|
1. Build a replacement amphora.
|
||||||
@ -459,6 +472,7 @@ class AmphoraFlows(object):
|
|||||||
|
|
||||||
:param failed_amphora: The amphora dict to failover.
|
:param failed_amphora: The amphora dict to failover.
|
||||||
:param lb_amp_count: The number of amphora on this load balancer.
|
:param lb_amp_count: The number of amphora on this load balancer.
|
||||||
|
:param flavor_dict: The load balancer flavor dictionary.
|
||||||
:returns: The flow that will provide the failover.
|
:returns: The flow that will provide the failover.
|
||||||
"""
|
"""
|
||||||
failover_amp_flow = linear_flow.Flow(
|
failover_amp_flow = linear_flow.Flow(
|
||||||
@ -519,7 +533,7 @@ class AmphoraFlows(object):
|
|||||||
role=failed_amphora[constants.ROLE],
|
role=failed_amphora[constants.ROLE],
|
||||||
failed_amp_vrrp_port_id=failed_amphora.get(
|
failed_amp_vrrp_port_id=failed_amphora.get(
|
||||||
constants.VRRP_PORT_ID),
|
constants.VRRP_PORT_ID),
|
||||||
is_vrrp_ipv6=is_vrrp_ipv6))
|
is_vrrp_ipv6=is_vrrp_ipv6, flavor_dict=flavor_dict))
|
||||||
|
|
||||||
failover_amp_flow.add(
|
failover_amp_flow.add(
|
||||||
self.get_delete_amphora_flow(
|
self.get_delete_amphora_flow(
|
||||||
|
@ -32,9 +32,9 @@ M_FLOWS = member_flows.MemberFlows()
|
|||||||
P_FLOWS = pool_flows.PoolFlows()
|
P_FLOWS = pool_flows.PoolFlows()
|
||||||
|
|
||||||
|
|
||||||
def get_create_load_balancer_flow(topology, listeners=None):
|
def get_create_load_balancer_flow(topology, listeners=None, flavor_dict=None):
|
||||||
return LB_FLOWS.get_create_load_balancer_flow(topology,
|
return LB_FLOWS.get_create_load_balancer_flow(
|
||||||
listeners=listeners)
|
topology, listeners=listeners, flavor_dict=flavor_dict)
|
||||||
|
|
||||||
|
|
||||||
def get_delete_load_balancer_flow(lb):
|
def get_delete_load_balancer_flow(lb):
|
||||||
@ -90,8 +90,9 @@ def get_failover_LB_flow(amps, lb):
|
|||||||
return LB_FLOWS.get_failover_LB_flow(amps, lb)
|
return LB_FLOWS.get_failover_LB_flow(amps, lb)
|
||||||
|
|
||||||
|
|
||||||
def get_failover_amphora_flow(amphora_dict, lb_amp_count):
|
def get_failover_amphora_flow(amphora_dict, lb_amp_count, flavor_dict=None):
|
||||||
return AMP_FLOWS.get_failover_amphora_flow(amphora_dict, lb_amp_count)
|
return AMP_FLOWS.get_failover_amphora_flow(amphora_dict, lb_amp_count,
|
||||||
|
flavor_dict=flavor_dict)
|
||||||
|
|
||||||
|
|
||||||
def cert_rotate_amphora_flow():
|
def cert_rotate_amphora_flow():
|
||||||
|
@ -13,7 +13,6 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
#
|
#
|
||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from taskflow.patterns import linear_flow
|
from taskflow.patterns import linear_flow
|
||||||
@ -47,7 +46,8 @@ class LoadBalancerFlows(object):
|
|||||||
self.member_flows = member_flows.MemberFlows()
|
self.member_flows = member_flows.MemberFlows()
|
||||||
self.lb_repo = repo.LoadBalancerRepository()
|
self.lb_repo = repo.LoadBalancerRepository()
|
||||||
|
|
||||||
def get_create_load_balancer_flow(self, topology, listeners=None):
|
def get_create_load_balancer_flow(self, topology, listeners=None,
|
||||||
|
flavor_dict=None):
|
||||||
"""Creates a conditional graph flow that allocates a loadbalancer.
|
"""Creates a conditional graph flow that allocates a loadbalancer.
|
||||||
|
|
||||||
:raises InvalidTopology: Invalid topology specified
|
:raises InvalidTopology: Invalid topology specified
|
||||||
@ -59,7 +59,7 @@ class LoadBalancerFlows(object):
|
|||||||
lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask(
|
lb_create_flow.add(lifecycle_tasks.LoadBalancerIDToErrorOnRevertTask(
|
||||||
requires=constants.LOADBALANCER_ID))
|
requires=constants.LOADBALANCER_ID))
|
||||||
|
|
||||||
# allocate VIP
|
# allocate VIP - Saves the VIP IP(s) in neutron
|
||||||
lb_create_flow.add(database_tasks.ReloadLoadBalancer(
|
lb_create_flow.add(database_tasks.ReloadLoadBalancer(
|
||||||
name=constants.RELOAD_LB_BEFOR_ALLOCATE_VIP,
|
name=constants.RELOAD_LB_BEFOR_ALLOCATE_VIP,
|
||||||
requires=constants.LOADBALANCER_ID,
|
requires=constants.LOADBALANCER_ID,
|
||||||
@ -81,9 +81,11 @@ class LoadBalancerFlows(object):
|
|||||||
provides=constants.SUBNET))
|
provides=constants.SUBNET))
|
||||||
|
|
||||||
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
if topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
||||||
lb_create_flow.add(*self._create_active_standby_topology())
|
lb_create_flow.add(*self._create_active_standby_topology(
|
||||||
|
flavor_dict=flavor_dict))
|
||||||
elif topology == constants.TOPOLOGY_SINGLE:
|
elif topology == constants.TOPOLOGY_SINGLE:
|
||||||
lb_create_flow.add(*self._create_single_topology())
|
lb_create_flow.add(*self._create_single_topology(
|
||||||
|
flavor_dict=flavor_dict))
|
||||||
else:
|
else:
|
||||||
LOG.error("Unknown topology: %s. Unable to build load balancer.",
|
LOG.error("Unknown topology: %s. Unable to build load balancer.",
|
||||||
topology)
|
topology)
|
||||||
@ -112,7 +114,7 @@ class LoadBalancerFlows(object):
|
|||||||
|
|
||||||
return lb_create_flow
|
return lb_create_flow
|
||||||
|
|
||||||
def _create_single_topology(self):
|
def _create_single_topology(self, flavor_dict=None):
|
||||||
sf_name = (constants.ROLE_STANDALONE + '-' +
|
sf_name = (constants.ROLE_STANDALONE + '-' +
|
||||||
constants.AMP_PLUG_NET_SUBFLOW)
|
constants.AMP_PLUG_NET_SUBFLOW)
|
||||||
amp_for_lb_net_flow = linear_flow.Flow(sf_name)
|
amp_for_lb_net_flow = linear_flow.Flow(sf_name)
|
||||||
@ -120,11 +122,13 @@ class LoadBalancerFlows(object):
|
|||||||
prefix=constants.ROLE_STANDALONE,
|
prefix=constants.ROLE_STANDALONE,
|
||||||
role=constants.ROLE_STANDALONE)
|
role=constants.ROLE_STANDALONE)
|
||||||
amp_for_lb_net_flow.add(amp_for_lb_flow)
|
amp_for_lb_net_flow.add(amp_for_lb_flow)
|
||||||
amp_for_lb_net_flow.add(*self._get_amp_net_subflow(sf_name))
|
amp_for_lb_net_flow.add(*self._get_amp_net_subflow(
|
||||||
|
sf_name, flavor_dict=flavor_dict))
|
||||||
return amp_for_lb_net_flow
|
return amp_for_lb_net_flow
|
||||||
|
|
||||||
def _create_active_standby_topology(
|
def _create_active_standby_topology(
|
||||||
self, lf_name=constants.CREATE_LOADBALANCER_FLOW):
|
self, lf_name=constants.CREATE_LOADBALANCER_FLOW,
|
||||||
|
flavor_dict=None):
|
||||||
# When we boot up amphora for an active/standby topology,
|
# When we boot up amphora for an active/standby topology,
|
||||||
# we should leverage the Nova anti-affinity capabilities
|
# we should leverage the Nova anti-affinity capabilities
|
||||||
# to place the amphora on different hosts, also we need to check
|
# to place the amphora on different hosts, also we need to check
|
||||||
@ -156,26 +160,45 @@ class LoadBalancerFlows(object):
|
|||||||
master_amp_sf = linear_flow.Flow(master_sf_name)
|
master_amp_sf = linear_flow.Flow(master_sf_name)
|
||||||
master_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow(
|
master_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow(
|
||||||
prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER))
|
prefix=constants.ROLE_MASTER, role=constants.ROLE_MASTER))
|
||||||
master_amp_sf.add(*self._get_amp_net_subflow(master_sf_name))
|
master_amp_sf.add(*self._get_amp_net_subflow(master_sf_name,
|
||||||
|
flavor_dict=flavor_dict))
|
||||||
|
|
||||||
backup_sf_name = (constants.ROLE_BACKUP + '-' +
|
backup_sf_name = (constants.ROLE_BACKUP + '-' +
|
||||||
constants.AMP_PLUG_NET_SUBFLOW)
|
constants.AMP_PLUG_NET_SUBFLOW)
|
||||||
backup_amp_sf = linear_flow.Flow(backup_sf_name)
|
backup_amp_sf = linear_flow.Flow(backup_sf_name)
|
||||||
backup_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow(
|
backup_amp_sf.add(self.amp_flows.get_amphora_for_lb_subflow(
|
||||||
prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP))
|
prefix=constants.ROLE_BACKUP, role=constants.ROLE_BACKUP))
|
||||||
backup_amp_sf.add(*self._get_amp_net_subflow(backup_sf_name))
|
backup_amp_sf.add(*self._get_amp_net_subflow(backup_sf_name,
|
||||||
|
flavor_dict=flavor_dict))
|
||||||
|
|
||||||
amps_flow.add(master_amp_sf, backup_amp_sf)
|
amps_flow.add(master_amp_sf, backup_amp_sf)
|
||||||
|
|
||||||
return flows + [amps_flow]
|
return flows + [amps_flow]
|
||||||
|
|
||||||
def _get_amp_net_subflow(self, sf_name):
|
def _get_amp_net_subflow(self, sf_name, flavor_dict=None):
|
||||||
flows = []
|
flows = []
|
||||||
flows.append(network_tasks.PlugVIPAmphora(
|
if flavor_dict and flavor_dict.get(constants.SRIOV_VIP, False):
|
||||||
name=sf_name + '-' + constants.PLUG_VIP_AMPHORA,
|
flows.append(network_tasks.CreateSRIOVBasePort(
|
||||||
requires=(constants.LOADBALANCER, constants.AMPHORA,
|
name=sf_name + '-' + constants.PLUG_VIP_AMPHORA,
|
||||||
constants.SUBNET),
|
requires=(constants.LOADBALANCER, constants.AMPHORA,
|
||||||
provides=constants.AMP_DATA))
|
constants.SUBNET),
|
||||||
|
provides=constants.PORT_DATA))
|
||||||
|
flows.append(compute_tasks.AttachPort(
|
||||||
|
name=sf_name + '-' + constants.ATTACH_PORT,
|
||||||
|
requires=(constants.AMPHORA),
|
||||||
|
rebind={constants.PORT: constants.PORT_DATA}))
|
||||||
|
flows.append(network_tasks.BuildAMPData(
|
||||||
|
name=sf_name + '-' + constants.BUILD_AMP_DATA,
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMPHORA,
|
||||||
|
constants.PORT_DATA),
|
||||||
|
provides=constants.AMP_DATA))
|
||||||
|
# TODO(johnsom) nftables need to be handled here in the SG patch
|
||||||
|
else:
|
||||||
|
flows.append(network_tasks.PlugVIPAmphora(
|
||||||
|
name=sf_name + '-' + constants.PLUG_VIP_AMPHORA,
|
||||||
|
requires=(constants.LOADBALANCER, constants.AMPHORA,
|
||||||
|
constants.SUBNET),
|
||||||
|
provides=constants.AMP_DATA))
|
||||||
|
|
||||||
flows.append(network_tasks.ApplyQosAmphora(
|
flows.append(network_tasks.ApplyQosAmphora(
|
||||||
name=sf_name + '-' + constants.APPLY_QOS_AMP,
|
name=sf_name + '-' + constants.APPLY_QOS_AMP,
|
||||||
@ -466,12 +489,13 @@ class LoadBalancerFlows(object):
|
|||||||
role=new_amp_role,
|
role=new_amp_role,
|
||||||
failed_amp_vrrp_port_id=failed_amp.get(
|
failed_amp_vrrp_port_id=failed_amp.get(
|
||||||
constants.VRRP_PORT_ID),
|
constants.VRRP_PORT_ID),
|
||||||
is_vrrp_ipv6=failed_vrrp_is_ipv6))
|
is_vrrp_ipv6=failed_vrrp_is_ipv6,
|
||||||
|
flavor_dict=lb[constants.FLAVOR]))
|
||||||
else:
|
else:
|
||||||
failover_LB_flow.add(
|
failover_LB_flow.add(
|
||||||
self.amp_flows.get_amphora_for_lb_failover_subflow(
|
self.amp_flows.get_amphora_for_lb_failover_subflow(
|
||||||
prefix=constants.FAILOVER_LOADBALANCER_FLOW,
|
prefix=constants.FAILOVER_LOADBALANCER_FLOW,
|
||||||
role=new_amp_role))
|
role=new_amp_role, flavor_dict=lb[constants.FLAVOR]))
|
||||||
|
|
||||||
if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
if lb_topology == constants.TOPOLOGY_ACTIVE_STANDBY:
|
||||||
failover_LB_flow.add(database_tasks.MarkAmphoraBackupInDB(
|
failover_LB_flow.add(database_tasks.MarkAmphoraBackupInDB(
|
||||||
@ -593,7 +617,8 @@ class LoadBalancerFlows(object):
|
|||||||
self.amp_flows.get_amphora_for_lb_failover_subflow(
|
self.amp_flows.get_amphora_for_lb_failover_subflow(
|
||||||
prefix=(new_amp_role + '-' +
|
prefix=(new_amp_role + '-' +
|
||||||
constants.FAILOVER_LOADBALANCER_FLOW),
|
constants.FAILOVER_LOADBALANCER_FLOW),
|
||||||
role=new_amp_role))
|
role=new_amp_role,
|
||||||
|
flavor_dict=lb[constants.FLAVOR]))
|
||||||
|
|
||||||
failover_LB_flow.add(database_tasks.MarkAmphoraMasterInDB(
|
failover_LB_flow.add(database_tasks.MarkAmphoraMasterInDB(
|
||||||
name=constants.MARK_AMP_MASTER_INDB,
|
name=constants.MARK_AMP_MASTER_INDB,
|
||||||
|
@ -1038,3 +1038,60 @@ class GetVIPSecurityGroupID(BaseNetworkTask):
|
|||||||
else:
|
else:
|
||||||
ctxt.reraise = False
|
ctxt.reraise = False
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class CreateSRIOVBasePort(BaseNetworkTask):
|
||||||
|
"""Task to create a SRIOV base port for an amphora."""
|
||||||
|
|
||||||
|
@tenacity.retry(retry=tenacity.retry_if_exception_type(),
|
||||||
|
stop=tenacity.stop_after_attempt(
|
||||||
|
CONF.networking.max_retries),
|
||||||
|
wait=tenacity.wait_exponential(
|
||||||
|
multiplier=CONF.networking.retry_backoff,
|
||||||
|
min=CONF.networking.retry_interval,
|
||||||
|
max=CONF.networking.retry_max), reraise=True)
|
||||||
|
def execute(self, loadbalancer, amphora, subnet):
|
||||||
|
session = db_apis.get_session()
|
||||||
|
with session.begin():
|
||||||
|
db_lb = self.loadbalancer_repo.get(
|
||||||
|
session, id=loadbalancer[constants.LOADBALANCER_ID])
|
||||||
|
port_name = constants.AMP_BASE_PORT_PREFIX + amphora[constants.ID]
|
||||||
|
fixed_ips = [{constants.SUBNET_ID: subnet[constants.ID]}]
|
||||||
|
addl_vips = [obj.ip_address for obj in db_lb.additional_vips]
|
||||||
|
addl_vips.append(loadbalancer[constants.VIP_ADDRESS])
|
||||||
|
port = self.network_driver.create_port(
|
||||||
|
loadbalancer[constants.VIP_NETWORK_ID],
|
||||||
|
name=port_name, fixed_ips=fixed_ips,
|
||||||
|
secondary_ips=addl_vips,
|
||||||
|
qos_policy_id=loadbalancer[constants.VIP_QOS_POLICY_ID],
|
||||||
|
vnic_type=constants.VNIC_TYPE_DIRECT)
|
||||||
|
LOG.info('Created port %s with ID %s for amphora %s',
|
||||||
|
port_name, port.id, amphora[constants.ID])
|
||||||
|
return port.to_dict(recurse=True)
|
||||||
|
|
||||||
|
def revert(self, result, loadbalancer, amphora, subnet, *args, **kwargs):
|
||||||
|
if isinstance(result, failure.Failure):
|
||||||
|
return
|
||||||
|
try:
|
||||||
|
port_name = constants.AMP_BASE_PORT_PREFIX + amphora['id']
|
||||||
|
self.network_driver.delete_port(result[constants.ID])
|
||||||
|
LOG.info('Deleted port %s with ID %s for amphora %s due to a '
|
||||||
|
'revert.', port_name, result[constants.ID], amphora['id'])
|
||||||
|
except Exception as e:
|
||||||
|
LOG.error('Failed to delete port %s. Resources may still be in '
|
||||||
|
'use for a port intended for amphora %s due to error '
|
||||||
|
'%s. Search for a port named %s',
|
||||||
|
result, amphora['id'], str(e), port_name)
|
||||||
|
|
||||||
|
|
||||||
|
class BuildAMPData(BaseNetworkTask):
|
||||||
|
"""Glue task to store the AMP_DATA dict from netork port information."""
|
||||||
|
|
||||||
|
def execute(self, loadbalancer, amphora, port_data):
|
||||||
|
amphora[constants.HA_IP] = loadbalancer[constants.VIP_ADDRESS]
|
||||||
|
amphora[constants.HA_PORT_ID] = loadbalancer[constants.VIP_PORT_ID]
|
||||||
|
amphora[constants.VRRP_ID] = 1
|
||||||
|
amphora[constants.VRRP_PORT_ID] = port_data[constants.ID]
|
||||||
|
amphora[constants.VRRP_IP] = port_data[
|
||||||
|
constants.FIXED_IPS][0][constants.IP_ADDRESS]
|
||||||
|
return amphora
|
||||||
|
@ -0,0 +1,36 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""Add vnic_type for VIP
|
||||||
|
|
||||||
|
Revision ID: db2a73e82626
|
||||||
|
Revises: 632152d2d32e
|
||||||
|
Create Date: 2023-11-09 21:57:05.302435
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = 'db2a73e82626'
|
||||||
|
down_revision = '632152d2d32e'
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade():
|
||||||
|
op.add_column(
|
||||||
|
u'vip',
|
||||||
|
sa.Column(u'vnic_type', sa.String(64), nullable=False,
|
||||||
|
server_default=constants.VNIC_TYPE_NORMAL)
|
||||||
|
)
|
@ -506,6 +506,7 @@ class Vip(base_models.BASE):
|
|||||||
network_id = sa.Column(sa.String(36), nullable=True)
|
network_id = sa.Column(sa.String(36), nullable=True)
|
||||||
qos_policy_id = sa.Column(sa.String(36), nullable=True)
|
qos_policy_id = sa.Column(sa.String(36), nullable=True)
|
||||||
octavia_owned = sa.Column(sa.Boolean(), nullable=True)
|
octavia_owned = sa.Column(sa.Boolean(), nullable=True)
|
||||||
|
vnic_type = sa.Column(sa.String(64), nullable=True)
|
||||||
|
|
||||||
|
|
||||||
class AdditionalVip(base_models.BASE):
|
class AdditionalVip(base_models.BASE):
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
import abc
|
import abc
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
|
|
||||||
|
|
||||||
@ -108,7 +109,8 @@ class AbstractNetworkDriver(object, metaclass=abc.ABCMeta):
|
|||||||
@abc.abstractmethod
|
@abc.abstractmethod
|
||||||
def create_port(self, network_id, name=None, fixed_ips=(),
|
def create_port(self, network_id, name=None, fixed_ips=(),
|
||||||
secondary_ips=(), security_group_ids=(),
|
secondary_ips=(), security_group_ids=(),
|
||||||
admin_state_up=True, qos_policy_id=None):
|
admin_state_up=True, qos_policy_id=None,
|
||||||
|
vnic_type=constants.VNIC_TYPE_NORMAL):
|
||||||
"""Creates a network port.
|
"""Creates a network port.
|
||||||
|
|
||||||
fixed_ips = [{'subnet_id': <id>, ('ip_address': <IP>')},]
|
fixed_ips = [{'subnet_id': <id>, ('ip_address': <IP>')},]
|
||||||
|
@ -12,6 +12,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
from octavia.common import data_models
|
from octavia.common import data_models
|
||||||
|
|
||||||
|
|
||||||
@ -81,7 +82,8 @@ class Port(data_models.BaseDataModel):
|
|||||||
def __init__(self, id=None, name=None, device_id=None, device_owner=None,
|
def __init__(self, id=None, name=None, device_id=None, device_owner=None,
|
||||||
mac_address=None, network_id=None, status=None,
|
mac_address=None, network_id=None, status=None,
|
||||||
project_id=None, admin_state_up=None, fixed_ips=None,
|
project_id=None, admin_state_up=None, fixed_ips=None,
|
||||||
network=None, qos_policy_id=None, security_group_ids=None):
|
network=None, qos_policy_id=None, security_group_ids=None,
|
||||||
|
vnic_type=constants.VNIC_TYPE_NORMAL):
|
||||||
self.id = id
|
self.id = id
|
||||||
self.name = name
|
self.name = name
|
||||||
self.device_id = device_id
|
self.device_id = device_id
|
||||||
@ -95,6 +97,7 @@ class Port(data_models.BaseDataModel):
|
|||||||
self.network = network
|
self.network = network
|
||||||
self.qos_policy_id = qos_policy_id
|
self.qos_policy_id = qos_policy_id
|
||||||
self.security_group_ids = security_group_ids or []
|
self.security_group_ids = security_group_ids or []
|
||||||
|
self.vnic_type = vnic_type
|
||||||
|
|
||||||
def get_subnet_id(self, fixed_ip_address):
|
def get_subnet_id(self, fixed_ip_address):
|
||||||
for fixed_ip in self.fixed_ips:
|
for fixed_ip in self.fixed_ips:
|
||||||
|
@ -34,7 +34,6 @@ from octavia.network.drivers.neutron import utils
|
|||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
AAP_EXT_ALIAS = 'allowed-address-pairs'
|
AAP_EXT_ALIAS = 'allowed-address-pairs'
|
||||||
PROJECT_ID_ALIAS = 'project-id'
|
PROJECT_ID_ALIAS = 'project-id'
|
||||||
OCTAVIA_OWNER = 'Octavia'
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
@ -89,7 +88,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
constants.NETWORK_ID: subnet.network_id,
|
constants.NETWORK_ID: subnet.network_id,
|
||||||
constants.FIXED_IPS: [{'subnet_id': subnet.id}],
|
constants.FIXED_IPS: [{'subnet_id': subnet.id}],
|
||||||
constants.ADMIN_STATE_UP: True,
|
constants.ADMIN_STATE_UP: True,
|
||||||
constants.DEVICE_OWNER: OCTAVIA_OWNER,
|
constants.DEVICE_OWNER: constants.OCTAVIA_OWNER,
|
||||||
}
|
}
|
||||||
new_port = self.network_proxy.create_port(**port)
|
new_port = self.network_proxy.create_port(**port)
|
||||||
new_port = utils.convert_port_to_model(new_port)
|
new_port = utils.convert_port_to_model(new_port)
|
||||||
@ -385,7 +384,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
|
|
||||||
self._delete_security_group(vip, port)
|
self._delete_security_group(vip, port)
|
||||||
|
|
||||||
if port and port.device_owner == OCTAVIA_OWNER:
|
if port and port.device_owner == constants.OCTAVIA_OWNER:
|
||||||
try:
|
try:
|
||||||
self.network_proxy.delete_port(vip.port_id)
|
self.network_proxy.delete_port(vip.port_id)
|
||||||
except os_exceptions.ResourceNotFound:
|
except os_exceptions.ResourceNotFound:
|
||||||
@ -468,6 +467,16 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
return list_of_dicts
|
return list_of_dicts
|
||||||
|
|
||||||
def allocate_vip(self, load_balancer):
|
def allocate_vip(self, load_balancer):
|
||||||
|
"""Allocates a virtual ip.
|
||||||
|
|
||||||
|
Reserves the IP for later use as the frontend connection of a load
|
||||||
|
balancer.
|
||||||
|
|
||||||
|
:param load_balancer: octavia.common.data_models.LoadBalancer instance
|
||||||
|
:return: octavia.common.data_models.Vip,
|
||||||
|
list(octavia.common.data_models.AdditionalVip)
|
||||||
|
:raises: AllocateVIPException, PortNotFound, SubnetNotFound
|
||||||
|
"""
|
||||||
if load_balancer.vip.port_id:
|
if load_balancer.vip.port_id:
|
||||||
try:
|
try:
|
||||||
port = self.get_port(load_balancer.vip.port_id)
|
port = self.get_port(load_balancer.vip.port_id)
|
||||||
@ -512,7 +521,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
|
|
||||||
fixed_ip = {}
|
fixed_ip = {}
|
||||||
if load_balancer.vip.subnet_id:
|
if load_balancer.vip.subnet_id:
|
||||||
fixed_ip['subnet_id'] = load_balancer.vip.subnet_id
|
fixed_ip[constants.SUBNET_ID] = load_balancer.vip.subnet_id
|
||||||
if load_balancer.vip.ip_address:
|
if load_balancer.vip.ip_address:
|
||||||
fixed_ip[constants.IP_ADDRESS] = load_balancer.vip.ip_address
|
fixed_ip[constants.IP_ADDRESS] = load_balancer.vip.ip_address
|
||||||
|
|
||||||
@ -544,7 +553,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
constants.NETWORK_ID: load_balancer.vip.network_id,
|
constants.NETWORK_ID: load_balancer.vip.network_id,
|
||||||
constants.ADMIN_STATE_UP: False,
|
constants.ADMIN_STATE_UP: False,
|
||||||
'device_id': 'lb-{0}'.format(load_balancer.id),
|
'device_id': 'lb-{0}'.format(load_balancer.id),
|
||||||
constants.DEVICE_OWNER: OCTAVIA_OWNER,
|
constants.DEVICE_OWNER: constants.OCTAVIA_OWNER,
|
||||||
project_id_key: load_balancer.project_id}
|
project_id_key: load_balancer.project_id}
|
||||||
|
|
||||||
if fixed_ips:
|
if fixed_ips:
|
||||||
@ -817,7 +826,8 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
|
|
||||||
def create_port(self, network_id, name=None, fixed_ips=(),
|
def create_port(self, network_id, name=None, fixed_ips=(),
|
||||||
secondary_ips=(), security_group_ids=(),
|
secondary_ips=(), security_group_ids=(),
|
||||||
admin_state_up=True, qos_policy_id=None):
|
admin_state_up=True, qos_policy_id=None,
|
||||||
|
vnic_type=constants.VNIC_TYPE_NORMAL):
|
||||||
"""Creates a network port.
|
"""Creates a network port.
|
||||||
|
|
||||||
fixed_ips = [{'subnet_id': <id>, ('ip_addrss': <IP>')},]
|
fixed_ips = [{'subnet_id': <id>, ('ip_addrss': <IP>')},]
|
||||||
@ -829,6 +839,7 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
:param secondary_ips: A list of secondary IPs to add to the port.
|
:param secondary_ips: A list of secondary IPs to add to the port.
|
||||||
:param security_group_ids: A list of security group IDs for the port.
|
:param security_group_ids: A list of security group IDs for the port.
|
||||||
:param qos_policy_id: The QoS policy ID to apply to the port.
|
:param qos_policy_id: The QoS policy ID to apply to the port.
|
||||||
|
:param vnic_type: The vNIC type this port should attach to.
|
||||||
:returns port: A port data model object.
|
:returns port: A port data model object.
|
||||||
"""
|
"""
|
||||||
try:
|
try:
|
||||||
@ -837,7 +848,8 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
aap_list.append({constants.IP_ADDRESS: ip})
|
aap_list.append({constants.IP_ADDRESS: ip})
|
||||||
port = {constants.NETWORK_ID: network_id,
|
port = {constants.NETWORK_ID: network_id,
|
||||||
constants.ADMIN_STATE_UP: admin_state_up,
|
constants.ADMIN_STATE_UP: admin_state_up,
|
||||||
constants.DEVICE_OWNER: OCTAVIA_OWNER}
|
constants.DEVICE_OWNER: constants.OCTAVIA_OWNER,
|
||||||
|
constants.BINDING_VNIC_TYPE: vnic_type}
|
||||||
if aap_list:
|
if aap_list:
|
||||||
port[constants.ALLOWED_ADDRESS_PAIRS] = aap_list
|
port[constants.ALLOWED_ADDRESS_PAIRS] = aap_list
|
||||||
if fixed_ips:
|
if fixed_ips:
|
||||||
|
@ -15,6 +15,7 @@
|
|||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from oslo_utils import uuidutils
|
from oslo_utils import uuidutils
|
||||||
|
|
||||||
|
from octavia.common import constants
|
||||||
from octavia.common import data_models
|
from octavia.common import data_models
|
||||||
from octavia.network import base as driver_base
|
from octavia.network import base as driver_base
|
||||||
from octavia.network import data_models as network_models
|
from octavia.network import data_models as network_models
|
||||||
@ -381,7 +382,8 @@ class NoopManager(object):
|
|||||||
|
|
||||||
def create_port(self, network_id, name=None, fixed_ips=(),
|
def create_port(self, network_id, name=None, fixed_ips=(),
|
||||||
secondary_ips=(), security_group_ids=(),
|
secondary_ips=(), security_group_ids=(),
|
||||||
admin_state_up=True, qos_policy_id=None):
|
admin_state_up=True, qos_policy_id=None,
|
||||||
|
vnic_type=constants.VNIC_TYPE_NORMAL):
|
||||||
LOG.debug("Network %s no-op, create_port network_id %s",
|
LOG.debug("Network %s no-op, create_port network_id %s",
|
||||||
self.__class__.__name__, network_id)
|
self.__class__.__name__, network_id)
|
||||||
if not name:
|
if not name:
|
||||||
@ -407,13 +409,14 @@ class NoopManager(object):
|
|||||||
|
|
||||||
self.networkconfigconfig[(network_id, 'create_port')] = (
|
self.networkconfigconfig[(network_id, 'create_port')] = (
|
||||||
network_id, name, fixed_ip_obj_list, secondary_ips,
|
network_id, name, fixed_ip_obj_list, secondary_ips,
|
||||||
security_group_ids, admin_state_up, qos_policy_id)
|
security_group_ids, admin_state_up, qos_policy_id, vnic_type)
|
||||||
return network_models.Port(
|
return network_models.Port(
|
||||||
id=port_id, name=name, device_id='no-op-device-id',
|
id=port_id, name=name, device_id='no-op-device-id',
|
||||||
device_owner='Octavia', mac_address='00:00:5E:00:53:05',
|
device_owner='Octavia', mac_address='00:00:5E:00:53:05',
|
||||||
network_id=network_id, status='UP', project_id=project_id,
|
network_id=network_id, status='UP', project_id=project_id,
|
||||||
admin_state_up=admin_state_up, fixed_ips=fixed_ip_obj_list,
|
admin_state_up=admin_state_up, fixed_ips=fixed_ip_obj_list,
|
||||||
qos_policy_id=qos_policy_id, security_group_ids=security_group_ids)
|
qos_policy_id=qos_policy_id, security_group_ids=security_group_ids,
|
||||||
|
vnic_type=vnic_type)
|
||||||
|
|
||||||
def plug_fixed_ip(self, port_id, subnet_id, ip_address=None):
|
def plug_fixed_ip(self, port_id, subnet_id, ip_address=None):
|
||||||
LOG.debug("Network %s no-op, plug_fixed_ip port_id %s, subnet_id "
|
LOG.debug("Network %s no-op, plug_fixed_ip port_id %s, subnet_id "
|
||||||
@ -525,10 +528,11 @@ class NoopNetworkDriver(driver_base.AbstractNetworkDriver):
|
|||||||
|
|
||||||
def create_port(self, network_id, name=None, fixed_ips=(),
|
def create_port(self, network_id, name=None, fixed_ips=(),
|
||||||
secondary_ips=(), security_group_ids=(),
|
secondary_ips=(), security_group_ids=(),
|
||||||
admin_state_up=True, qos_policy_id=None):
|
admin_state_up=True, qos_policy_id=None,
|
||||||
|
vnic_type=constants.VNIC_TYPE_NORMAL):
|
||||||
return self.driver.create_port(
|
return self.driver.create_port(
|
||||||
network_id, name, fixed_ips, secondary_ips, security_group_ids,
|
network_id, name, fixed_ips, secondary_ips, security_group_ids,
|
||||||
admin_state_up, qos_policy_id)
|
admin_state_up, qos_policy_id, vnic_type)
|
||||||
|
|
||||||
def plug_fixed_ip(self, port_id, subnet_id, ip_address=None):
|
def plug_fixed_ip(self, port_id, subnet_id, ip_address=None):
|
||||||
return self.driver.plug_fixed_ip(port_id, subnet_id, ip_address)
|
return self.driver.plug_fixed_ip(port_id, subnet_id, ip_address)
|
||||||
|
@ -2829,6 +2829,7 @@ class TestLoadBalancerGraph(base.BaseAPITest):
|
|||||||
'flavor_id': None,
|
'flavor_id': None,
|
||||||
'provider': 'noop_driver',
|
'provider': 'noop_driver',
|
||||||
'tags': [],
|
'tags': [],
|
||||||
|
'vip_vnic_type': constants.VNIC_TYPE_NORMAL,
|
||||||
}
|
}
|
||||||
expected_lb.update(create_lb)
|
expected_lb.update(create_lb)
|
||||||
expected_lb['listeners'] = expected_listeners
|
expected_lb['listeners'] = expected_listeners
|
||||||
@ -3194,6 +3195,22 @@ class TestLoadBalancerGraph(base.BaseAPITest):
|
|||||||
self.assertIn('All VIP subnets must belong to the same network.',
|
self.assertIn('All VIP subnets must belong to the same network.',
|
||||||
error_text)
|
error_text)
|
||||||
|
|
||||||
|
@mock.patch('octavia.api.v2.controllers.load_balancer.'
|
||||||
|
'LoadBalancersController._apply_flavor_to_lb_dict',
|
||||||
|
return_value={constants.SRIOV_VIP: True})
|
||||||
|
def test_with_vip_vnic_type_direct(self, mock_flavor_dict):
|
||||||
|
create_lb, expected_lb = self._get_lb_bodies(
|
||||||
|
[], [])
|
||||||
|
expected_lb[constants.VIP_VNIC_TYPE] = constants.VNIC_TYPE_DIRECT
|
||||||
|
|
||||||
|
body = self._build_body(create_lb)
|
||||||
|
|
||||||
|
response = self.post(self.LBS_PATH, body)
|
||||||
|
self._assert_graphs_equal(expected_lb, response.json['loadbalancer'])
|
||||||
|
|
||||||
|
api_lb = response.json.get(self.root_tag)
|
||||||
|
self._assert_graphs_equal(expected_lb, api_lb)
|
||||||
|
|
||||||
def test_with_one_listener(self):
|
def test_with_one_listener(self):
|
||||||
create_listener, expected_listener = self._get_listener_bodies()
|
create_listener, expected_listener = self._get_listener_bodies()
|
||||||
create_lb, expected_lb = self._get_lb_bodies([create_listener],
|
create_lb, expected_lb = self._get_lb_bodies([create_listener],
|
||||||
|
@ -622,7 +622,8 @@ class VipModelTest(base.OctaviaDBTestBase, ModelTestMixin):
|
|||||||
self.assertEqual(f"Vip(ip_address=None, "
|
self.assertEqual(f"Vip(ip_address=None, "
|
||||||
f"load_balancer_id={obj.load_balancer_id!r}, "
|
f"load_balancer_id={obj.load_balancer_id!r}, "
|
||||||
f"network_id=None, octavia_owned=None, port_id=None, "
|
f"network_id=None, octavia_owned=None, port_id=None, "
|
||||||
f"qos_policy_id=None, subnet_id=None)", str(obj))
|
f"qos_policy_id=None, subnet_id=None, "
|
||||||
|
f"vnic_type=None)", str(obj))
|
||||||
|
|
||||||
def test_update(self):
|
def test_update(self):
|
||||||
vip = self.create_vip(self.session, self.load_balancer.id)
|
vip = self.create_vip(self.session, self.load_balancer.id)
|
||||||
|
@ -162,7 +162,8 @@ class AllRepositoriesTest(base.OctaviaDBTestBase):
|
|||||||
'port_id': uuidutils.generate_uuid(),
|
'port_id': uuidutils.generate_uuid(),
|
||||||
'subnet_id': uuidutils.generate_uuid(),
|
'subnet_id': uuidutils.generate_uuid(),
|
||||||
'network_id': uuidutils.generate_uuid(),
|
'network_id': uuidutils.generate_uuid(),
|
||||||
'qos_policy_id': None, 'octavia_owned': True}
|
'qos_policy_id': None, 'octavia_owned': True,
|
||||||
|
'vnic_type': None}
|
||||||
additional_vips = [{'subnet_id': uuidutils.generate_uuid(),
|
additional_vips = [{'subnet_id': uuidutils.generate_uuid(),
|
||||||
'ip_address': '192.0.2.2'}]
|
'ip_address': '192.0.2.2'}]
|
||||||
lb_dm = self.repos.create_load_balancer_and_vip(self.session, lb, vip,
|
lb_dm = self.repos.create_load_balancer_and_vip(self.session, lb, vip,
|
||||||
|
@ -457,6 +457,28 @@ class TestNovaClient(base.TestCase):
|
|||||||
self.manager.attach_network_or_port,
|
self.manager.attach_network_or_port,
|
||||||
self.compute_id, self.network_id)
|
self.compute_id, self.network_id)
|
||||||
|
|
||||||
|
def test_attach_network_or_port_fail_claim_pci_exception(self):
|
||||||
|
self.manager.manager.interface_attach.side_effect = [
|
||||||
|
nova_exceptions.BadRequest('Failed to claim PCI device'),
|
||||||
|
nova_exceptions.BadRequest('NotAClaimFailure')]
|
||||||
|
self.assertRaises(exceptions.ComputeNoResourcesException,
|
||||||
|
self.manager.attach_network_or_port,
|
||||||
|
self.compute_id, self.network_id)
|
||||||
|
self.assertRaises(nova_exceptions.BadRequest,
|
||||||
|
self.manager.attach_network_or_port,
|
||||||
|
self.compute_id, self.network_id)
|
||||||
|
|
||||||
|
def test_attach_network_or_port_port_bind_fail_exception(self):
|
||||||
|
self.manager.manager.interface_attach.side_effect = [
|
||||||
|
nova_exceptions.ClientException('PortBindingFailed'),
|
||||||
|
nova_exceptions.ClientException('NotABindFailure')]
|
||||||
|
self.assertRaises(exceptions.ComputeNoResourcesException,
|
||||||
|
self.manager.attach_network_or_port,
|
||||||
|
self.compute_id, self.network_id)
|
||||||
|
self.assertRaises(nova_exceptions.ClientException,
|
||||||
|
self.manager.attach_network_or_port,
|
||||||
|
self.compute_id, self.network_id)
|
||||||
|
|
||||||
def test_attach_network_or_port_unknown_exception(self):
|
def test_attach_network_or_port_unknown_exception(self):
|
||||||
self.manager.manager.interface_attach.side_effect = [Exception('boom')]
|
self.manager.manager.interface_attach.side_effect = [Exception('boom')]
|
||||||
self.assertRaises(exceptions.ComputeUnknownException,
|
self.assertRaises(exceptions.ComputeUnknownException,
|
||||||
|
@ -96,6 +96,32 @@ class TestLoadBalancerFlows(base.TestCase):
|
|||||||
self.LBFlow.get_create_load_balancer_flow,
|
self.LBFlow.get_create_load_balancer_flow,
|
||||||
'BOGUS')
|
'BOGUS')
|
||||||
|
|
||||||
|
@mock.patch('octavia.common.rpc.NOTIFIER',
|
||||||
|
new_callable=MockNOTIFIER)
|
||||||
|
def test_get_create_load_balancer_flow_SRIOV(self, mock_get_net_driver,
|
||||||
|
mock_notifier):
|
||||||
|
amp_flow = self.LBFlow.get_create_load_balancer_flow(
|
||||||
|
constants.TOPOLOGY_SINGLE, flavor_dict={constants.SRIOV_VIP: True})
|
||||||
|
self.assertIsInstance(amp_flow, flow.Flow)
|
||||||
|
self.assertIn(constants.AVAILABILITY_ZONE, amp_flow.requires)
|
||||||
|
self.assertIn(constants.BUILD_TYPE_PRIORITY, amp_flow.requires)
|
||||||
|
self.assertIn(constants.FLAVOR, amp_flow.requires)
|
||||||
|
self.assertIn(constants.LOADBALANCER_ID, amp_flow.requires)
|
||||||
|
self.assertIn(constants.SERVER_GROUP_ID, amp_flow.requires)
|
||||||
|
self.assertIn(constants.UPDATE_DICT, amp_flow.requires)
|
||||||
|
self.assertIn(constants.ADDITIONAL_VIPS, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMP_DATA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.AMPHORA_NETWORK_CONFIG, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_ID, amp_flow.provides)
|
||||||
|
self.assertIn(constants.COMPUTE_OBJ, amp_flow.provides)
|
||||||
|
self.assertIn(constants.LOADBALANCER, amp_flow.provides)
|
||||||
|
self.assertIn(constants.PORT_DATA, amp_flow.provides)
|
||||||
|
self.assertIn(constants.SERVER_PEM, amp_flow.provides)
|
||||||
|
self.assertIn(constants.SUBNET, amp_flow.provides)
|
||||||
|
self.assertIn(constants.VIP, amp_flow.provides)
|
||||||
|
|
||||||
@mock.patch('octavia.common.rpc.NOTIFIER',
|
@mock.patch('octavia.common.rpc.NOTIFIER',
|
||||||
new_callable=MockNOTIFIER)
|
new_callable=MockNOTIFIER)
|
||||||
def test_get_delete_load_balancer_flow(self, mock_get_net_driver,
|
def test_get_delete_load_balancer_flow(self, mock_get_net_driver,
|
||||||
@ -336,7 +362,7 @@ class TestLoadBalancerFlows(base.TestCase):
|
|||||||
|
|
||||||
self.assertEqual(6, len(failover_flow.requires),
|
self.assertEqual(6, len(failover_flow.requires),
|
||||||
failover_flow.requires)
|
failover_flow.requires)
|
||||||
self.assertEqual(13, len(failover_flow.provides),
|
self.assertEqual(14, len(failover_flow.provides),
|
||||||
failover_flow.provides)
|
failover_flow.provides)
|
||||||
|
|
||||||
@mock.patch('octavia.common.rpc.NOTIFIER',
|
@mock.patch('octavia.common.rpc.NOTIFIER',
|
||||||
@ -412,7 +438,7 @@ class TestLoadBalancerFlows(base.TestCase):
|
|||||||
|
|
||||||
self.assertEqual(6, len(failover_flow.requires),
|
self.assertEqual(6, len(failover_flow.requires),
|
||||||
failover_flow.requires)
|
failover_flow.requires)
|
||||||
self.assertEqual(13, len(failover_flow.provides),
|
self.assertEqual(14, len(failover_flow.provides),
|
||||||
failover_flow.provides)
|
failover_flow.provides)
|
||||||
|
|
||||||
@mock.patch('octavia.common.rpc.NOTIFIER',
|
@mock.patch('octavia.common.rpc.NOTIFIER',
|
||||||
|
@ -1800,3 +1800,108 @@ class TestNetworkTasks(base.TestCase):
|
|||||||
self.assertIsNone(result)
|
self.assertIsNone(result)
|
||||||
mock_driver.get_security_group.assert_called_once_with(SG_NAME)
|
mock_driver.get_security_group.assert_called_once_with(SG_NAME)
|
||||||
mock_get_sg_name.assert_called_once_with(LB_ID)
|
mock_get_sg_name.assert_called_once_with(LB_ID)
|
||||||
|
|
||||||
|
@mock.patch('octavia.db.repositories.LoadBalancerRepository.get')
|
||||||
|
@mock.patch('octavia.db.api.get_session', return_value=_session_mock)
|
||||||
|
def test_create_SRIOV_base_port(self, mock_get_session, mock_lb_repo_get,
|
||||||
|
mock_get_net_driver):
|
||||||
|
AMP_ID = uuidutils.generate_uuid()
|
||||||
|
LB_ID = uuidutils.generate_uuid()
|
||||||
|
PORT_ID = uuidutils.generate_uuid()
|
||||||
|
VIP_NETWORK_ID = uuidutils.generate_uuid()
|
||||||
|
VIP_QOS_ID = uuidutils.generate_uuid()
|
||||||
|
VIP_SUBNET_ID = uuidutils.generate_uuid()
|
||||||
|
VIP_IP_ADDRESS = '203.0.113.81'
|
||||||
|
VIP_IP_ADDRESS2 = 'fd08::1'
|
||||||
|
mock_driver = mock.MagicMock()
|
||||||
|
mock_get_net_driver.return_value = mock_driver
|
||||||
|
port_mock = mock.MagicMock()
|
||||||
|
port_mock.id = PORT_ID
|
||||||
|
subnet_dict = {constants.ID: VIP_SUBNET_ID}
|
||||||
|
amphora_dict = {constants.ID: AMP_ID}
|
||||||
|
lb_dict = {constants.LOADBALANCER_ID: LB_ID,
|
||||||
|
constants.VIP_ADDRESS: VIP_IP_ADDRESS,
|
||||||
|
constants.VIP_NETWORK_ID: VIP_NETWORK_ID,
|
||||||
|
constants.VIP_QOS_POLICY_ID: VIP_QOS_ID}
|
||||||
|
addl_vips = [o_data_models.AdditionalVip(
|
||||||
|
ip_address=VIP_IP_ADDRESS2)]
|
||||||
|
lb_mock = mock.MagicMock()
|
||||||
|
lb_mock.additional_vips = addl_vips
|
||||||
|
mock_lb_repo_get.return_value = lb_mock
|
||||||
|
|
||||||
|
mock_driver.create_port.side_effect = [
|
||||||
|
port_mock, exceptions.OctaviaException('boom'),
|
||||||
|
exceptions.OctaviaException('boom'),
|
||||||
|
exceptions.OctaviaException('boom')]
|
||||||
|
mock_driver.delete_port.side_effect = [mock.DEFAULT, Exception('boom')]
|
||||||
|
|
||||||
|
net_task = network_tasks.CreateSRIOVBasePort()
|
||||||
|
|
||||||
|
# Limit the retry attempts for the test run to save time
|
||||||
|
net_task.execute.retry.stop = tenacity.stop_after_attempt(2)
|
||||||
|
|
||||||
|
# Test execute
|
||||||
|
result = net_task.execute(lb_dict, amphora_dict, subnet_dict)
|
||||||
|
|
||||||
|
self.assertEqual(port_mock.to_dict(), result)
|
||||||
|
mock_driver.create_port.assert_called_once_with(
|
||||||
|
VIP_NETWORK_ID, name=constants.AMP_BASE_PORT_PREFIX + AMP_ID,
|
||||||
|
fixed_ips=[{constants.SUBNET_ID: VIP_SUBNET_ID}],
|
||||||
|
secondary_ips=[VIP_IP_ADDRESS2, VIP_IP_ADDRESS],
|
||||||
|
qos_policy_id=VIP_QOS_ID, vnic_type=constants.VNIC_TYPE_DIRECT)
|
||||||
|
|
||||||
|
# Test execute exception
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
self.assertRaises(exceptions.OctaviaException, net_task.execute,
|
||||||
|
lb_dict, amphora_dict, subnet_dict)
|
||||||
|
|
||||||
|
# Test revert when this task failed
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
net_task.revert(failure.Failure.from_exception(Exception('boom')),
|
||||||
|
lb_dict, amphora_dict, subnet_dict)
|
||||||
|
|
||||||
|
mock_driver.delete_port.assert_not_called()
|
||||||
|
|
||||||
|
# Test revert
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
# The execute path generates a port dict, so this will be the result
|
||||||
|
# passed into the revert method by Taskflow
|
||||||
|
port_dict = {constants.ID: PORT_ID}
|
||||||
|
|
||||||
|
net_task.revert(port_dict, lb_dict, amphora_dict, subnet_dict)
|
||||||
|
|
||||||
|
mock_driver.delete_port.assert_called_once_with(PORT_ID)
|
||||||
|
|
||||||
|
# Test revert exception
|
||||||
|
mock_driver.reset_mock()
|
||||||
|
|
||||||
|
net_task.revert(port_dict, lb_dict, amphora_dict, subnet_dict)
|
||||||
|
|
||||||
|
mock_driver.delete_port.assert_called_once_with(PORT_ID)
|
||||||
|
|
||||||
|
def test_build_amp_data(self, mock_get_net_driver):
|
||||||
|
VIP_ADDRESS = '203.0.113.33'
|
||||||
|
VIP_PORT_ID = uuidutils.generate_uuid()
|
||||||
|
lb_dict = {constants.VIP_ADDRESS: VIP_ADDRESS,
|
||||||
|
constants.VIP_PORT_ID: VIP_PORT_ID}
|
||||||
|
amphora_dict = {}
|
||||||
|
BASE_PORT_ID = uuidutils.generate_uuid()
|
||||||
|
BASE_PORT_IP = '203.0.113.50'
|
||||||
|
port_data_dict = {
|
||||||
|
constants.ID: BASE_PORT_ID,
|
||||||
|
constants.FIXED_IPS: [{constants.IP_ADDRESS: BASE_PORT_IP}]}
|
||||||
|
|
||||||
|
expected_amp_data = {constants.HA_IP: VIP_ADDRESS,
|
||||||
|
constants.HA_PORT_ID: VIP_PORT_ID,
|
||||||
|
constants.VRRP_ID: 1,
|
||||||
|
constants.VRRP_PORT_ID: BASE_PORT_ID,
|
||||||
|
constants.VRRP_IP: BASE_PORT_IP}
|
||||||
|
|
||||||
|
net_task = network_tasks.BuildAMPData()
|
||||||
|
|
||||||
|
result = net_task.execute(lb_dict, amphora_dict, port_data_dict)
|
||||||
|
|
||||||
|
self.assertEqual(expected_amp_data, result)
|
||||||
|
@ -522,7 +522,8 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
cw.services_controller.run_poster.assert_called_with(
|
cw.services_controller.run_poster.assert_called_with(
|
||||||
flow_utils.get_create_load_balancer_flow,
|
flow_utils.get_create_load_balancer_flow,
|
||||||
constants.TOPOLOGY_SINGLE, listeners=[], store=store)
|
constants.TOPOLOGY_SINGLE, listeners=[],
|
||||||
|
flavor_dict=None, store=store)
|
||||||
self.assertEqual(4, mock_lb_repo_get.call_count)
|
self.assertEqual(4, mock_lb_repo_get.call_count)
|
||||||
|
|
||||||
def test_create_load_balancer_active_standby(
|
def test_create_load_balancer_active_standby(
|
||||||
@ -561,7 +562,8 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
cw.services_controller.run_poster.assert_called_with(
|
cw.services_controller.run_poster.assert_called_with(
|
||||||
flow_utils.get_create_load_balancer_flow,
|
flow_utils.get_create_load_balancer_flow,
|
||||||
constants.TOPOLOGY_ACTIVE_STANDBY, listeners=[], store=store)
|
constants.TOPOLOGY_ACTIVE_STANDBY, listeners=[],
|
||||||
|
flavor_dict=None, store=store)
|
||||||
|
|
||||||
def test_create_load_balancer_full_graph_single(
|
def test_create_load_balancer_full_graph_single(
|
||||||
self,
|
self,
|
||||||
@ -603,7 +605,8 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
cw.services_controller.run_poster.assert_called_with(
|
cw.services_controller.run_poster.assert_called_with(
|
||||||
flow_utils.get_create_load_balancer_flow,
|
flow_utils.get_create_load_balancer_flow,
|
||||||
constants.TOPOLOGY_SINGLE, listeners=dict_listeners, store=store)
|
constants.TOPOLOGY_SINGLE, listeners=dict_listeners,
|
||||||
|
flavor_dict=None, store=store)
|
||||||
|
|
||||||
def test_create_load_balancer_full_graph_active_standby(
|
def test_create_load_balancer_full_graph_active_standby(
|
||||||
self,
|
self,
|
||||||
@ -650,7 +653,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
cw.services_controller.run_poster.assert_called_with(
|
cw.services_controller.run_poster.assert_called_with(
|
||||||
flow_utils.get_create_load_balancer_flow,
|
flow_utils.get_create_load_balancer_flow,
|
||||||
constants.TOPOLOGY_ACTIVE_STANDBY, listeners=dict_listeners,
|
constants.TOPOLOGY_ACTIVE_STANDBY, listeners=dict_listeners,
|
||||||
store=store)
|
store=store, flavor_dict=None)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.v2.flows.load_balancer_flows.'
|
@mock.patch('octavia.controller.worker.v2.flows.load_balancer_flows.'
|
||||||
'LoadBalancerFlows.get_create_load_balancer_flow')
|
'LoadBalancerFlows.get_create_load_balancer_flow')
|
||||||
@ -695,7 +698,8 @@ class TestControllerWorker(base.TestCase):
|
|||||||
cw.create_load_balancer(_load_balancer_mock)
|
cw.create_load_balancer(_load_balancer_mock)
|
||||||
|
|
||||||
mock_get_create_load_balancer_flow.assert_called_with(
|
mock_get_create_load_balancer_flow.assert_called_with(
|
||||||
constants.TOPOLOGY_SINGLE, listeners=dict_listeners)
|
constants.TOPOLOGY_SINGLE, listeners=dict_listeners,
|
||||||
|
flavor_dict=None)
|
||||||
mock_base_taskflow_load.assert_called_with(
|
mock_base_taskflow_load.assert_called_with(
|
||||||
mock_get_create_load_balancer_flow.return_value, store=store)
|
mock_get_create_load_balancer_flow.return_value, store=store)
|
||||||
|
|
||||||
@ -1461,12 +1465,13 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_amphora.load_balancer_id = LB_ID
|
mock_amphora.load_balancer_id = LB_ID
|
||||||
mock_amphora.status = constants.AMPHORA_READY
|
mock_amphora.status = constants.AMPHORA_READY
|
||||||
mock_amp_repo_get.return_value = mock_amphora
|
mock_amp_repo_get.return_value = mock_amphora
|
||||||
|
flavor_dict = {constants.LOADBALANCER_TOPOLOGY:
|
||||||
|
constants.TOPOLOGY_SINGLE}
|
||||||
expected_stored_params = {
|
expected_stored_params = {
|
||||||
constants.AVAILABILITY_ZONE: {},
|
constants.AVAILABILITY_ZONE: {},
|
||||||
constants.BUILD_TYPE_PRIORITY:
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
constants.LB_CREATE_FAILOVER_PRIORITY,
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
constants.FLAVOR: {constants.LOADBALANCER_TOPOLOGY:
|
constants.FLAVOR: flavor_dict,
|
||||||
constants.TOPOLOGY_SINGLE},
|
|
||||||
constants.LOADBALANCER: mock_provider_lb.to_dict(),
|
constants.LOADBALANCER: mock_provider_lb.to_dict(),
|
||||||
constants.LOADBALANCER_ID: LB_ID,
|
constants.LOADBALANCER_ID: LB_ID,
|
||||||
constants.SERVER_GROUP_ID: None,
|
constants.SERVER_GROUP_ID: None,
|
||||||
@ -1479,7 +1484,8 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
cw.services_controller.run_poster.assert_called_once_with(
|
cw.services_controller.run_poster.assert_called_once_with(
|
||||||
flow_utils.get_failover_amphora_flow,
|
flow_utils.get_failover_amphora_flow,
|
||||||
mock_amphora.to_dict(), 1, store=expected_stored_params)
|
mock_amphora.to_dict(), 1, flavor_dict=flavor_dict,
|
||||||
|
store=expected_stored_params)
|
||||||
|
|
||||||
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
|
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
|
||||||
'get_availability_zone_metadata_dict', return_value={})
|
'get_availability_zone_metadata_dict', return_value={})
|
||||||
@ -1516,12 +1522,13 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_amphora.load_balancer_id = LB_ID
|
mock_amphora.load_balancer_id = LB_ID
|
||||||
mock_amphora.status = constants.AMPHORA_READY
|
mock_amphora.status = constants.AMPHORA_READY
|
||||||
mock_amp_repo_get.return_value = mock_amphora
|
mock_amp_repo_get.return_value = mock_amphora
|
||||||
|
flavor_dict = {constants.LOADBALANCER_TOPOLOGY:
|
||||||
|
constants.TOPOLOGY_ACTIVE_STANDBY}
|
||||||
expected_stored_params = {
|
expected_stored_params = {
|
||||||
constants.AVAILABILITY_ZONE: {},
|
constants.AVAILABILITY_ZONE: {},
|
||||||
constants.BUILD_TYPE_PRIORITY:
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
constants.LB_CREATE_FAILOVER_PRIORITY,
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
constants.FLAVOR: {constants.LOADBALANCER_TOPOLOGY:
|
constants.FLAVOR: flavor_dict,
|
||||||
constants.TOPOLOGY_ACTIVE_STANDBY},
|
|
||||||
constants.LOADBALANCER: mock_provider_lb.to_dict(),
|
constants.LOADBALANCER: mock_provider_lb.to_dict(),
|
||||||
constants.LOADBALANCER_ID: LB_ID,
|
constants.LOADBALANCER_ID: LB_ID,
|
||||||
constants.SERVER_GROUP_ID: None,
|
constants.SERVER_GROUP_ID: None,
|
||||||
@ -1534,7 +1541,8 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
cw.services_controller.run_poster.assert_called_once_with(
|
cw.services_controller.run_poster.assert_called_once_with(
|
||||||
flow_utils.get_failover_amphora_flow,
|
flow_utils.get_failover_amphora_flow,
|
||||||
mock_amphora.to_dict(), 2, store=expected_stored_params)
|
mock_amphora.to_dict(), 2, flavor_dict=flavor_dict,
|
||||||
|
store=expected_stored_params)
|
||||||
|
|
||||||
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
|
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
|
||||||
'get_availability_zone_metadata_dict', return_value={})
|
'get_availability_zone_metadata_dict', return_value={})
|
||||||
@ -1571,12 +1579,13 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_amphora.load_balancer_id = LB_ID
|
mock_amphora.load_balancer_id = LB_ID
|
||||||
mock_amphora.status = constants.AMPHORA_READY
|
mock_amphora.status = constants.AMPHORA_READY
|
||||||
mock_amp_repo_get.return_value = mock_amphora
|
mock_amp_repo_get.return_value = mock_amphora
|
||||||
|
flavor_dict = {constants.LOADBALANCER_TOPOLOGY:
|
||||||
|
constants.TOPOLOGY_ACTIVE_STANDBY}
|
||||||
expected_stored_params = {
|
expected_stored_params = {
|
||||||
constants.AVAILABILITY_ZONE: {},
|
constants.AVAILABILITY_ZONE: {},
|
||||||
constants.BUILD_TYPE_PRIORITY:
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
constants.LB_CREATE_FAILOVER_PRIORITY,
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
constants.FLAVOR: {constants.LOADBALANCER_TOPOLOGY:
|
constants.FLAVOR: flavor_dict,
|
||||||
constants.TOPOLOGY_ACTIVE_STANDBY},
|
|
||||||
constants.LOADBALANCER: mock_provider_lb.to_dict(),
|
constants.LOADBALANCER: mock_provider_lb.to_dict(),
|
||||||
constants.LOADBALANCER_ID: LB_ID,
|
constants.LOADBALANCER_ID: LB_ID,
|
||||||
constants.SERVER_GROUP_ID: SERVER_GROUP_ID,
|
constants.SERVER_GROUP_ID: SERVER_GROUP_ID,
|
||||||
@ -1589,7 +1598,8 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
cw.services_controller.run_poster.assert_called_once_with(
|
cw.services_controller.run_poster.assert_called_once_with(
|
||||||
flow_utils.get_failover_amphora_flow,
|
flow_utils.get_failover_amphora_flow,
|
||||||
mock_amphora.to_dict(), 2, store=expected_stored_params)
|
mock_amphora.to_dict(), 2, flavor_dict=flavor_dict,
|
||||||
|
store=expected_stored_params)
|
||||||
|
|
||||||
@mock.patch('octavia.api.drivers.utils.'
|
@mock.patch('octavia.api.drivers.utils.'
|
||||||
'db_loadbalancer_to_provider_loadbalancer')
|
'db_loadbalancer_to_provider_loadbalancer')
|
||||||
@ -1623,12 +1633,12 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_amphora.load_balancer_id = LB_ID
|
mock_amphora.load_balancer_id = LB_ID
|
||||||
mock_amphora.status = constants.AMPHORA_READY
|
mock_amphora.status = constants.AMPHORA_READY
|
||||||
mock_amp_repo_get.return_value = mock_amphora
|
mock_amp_repo_get.return_value = mock_amphora
|
||||||
|
flavor_dict = {constants.LOADBALANCER_TOPOLOGY: mock_lb.topology}
|
||||||
expected_stored_params = {
|
expected_stored_params = {
|
||||||
constants.AVAILABILITY_ZONE: {},
|
constants.AVAILABILITY_ZONE: {},
|
||||||
constants.BUILD_TYPE_PRIORITY:
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
constants.LB_CREATE_FAILOVER_PRIORITY,
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
constants.FLAVOR: {constants.LOADBALANCER_TOPOLOGY:
|
constants.FLAVOR: flavor_dict,
|
||||||
mock_lb.topology},
|
|
||||||
constants.LOADBALANCER: mock_provider_lb.to_dict(),
|
constants.LOADBALANCER: mock_provider_lb.to_dict(),
|
||||||
constants.LOADBALANCER_ID: LB_ID,
|
constants.LOADBALANCER_ID: LB_ID,
|
||||||
constants.SERVER_GROUP_ID: SERVER_GROUP_ID,
|
constants.SERVER_GROUP_ID: SERVER_GROUP_ID,
|
||||||
@ -1641,7 +1651,8 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
cw.services_controller.run_poster.assert_called_once_with(
|
cw.services_controller.run_poster.assert_called_once_with(
|
||||||
flow_utils.get_failover_amphora_flow,
|
flow_utils.get_failover_amphora_flow,
|
||||||
mock_amphora.to_dict(), None, store=expected_stored_params)
|
mock_amphora.to_dict(), None, flavor_dict=flavor_dict,
|
||||||
|
store=expected_stored_params)
|
||||||
|
|
||||||
@mock.patch('octavia.db.repositories.FlavorRepository.'
|
@mock.patch('octavia.db.repositories.FlavorRepository.'
|
||||||
'get_flavor_metadata_dict', return_value={})
|
'get_flavor_metadata_dict', return_value={})
|
||||||
@ -1678,13 +1689,13 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_amphora.load_balancer_id = LB_ID
|
mock_amphora.load_balancer_id = LB_ID
|
||||||
mock_amphora.status = constants.AMPHORA_READY
|
mock_amphora.status = constants.AMPHORA_READY
|
||||||
mock_amp_repo_get.return_value = mock_amphora
|
mock_amp_repo_get.return_value = mock_amphora
|
||||||
|
flavor_dict = {constants.LOADBALANCER_TOPOLOGY:
|
||||||
|
constants.TOPOLOGY_SINGLE, 'taste': 'spicy'}
|
||||||
expected_stored_params = {
|
expected_stored_params = {
|
||||||
constants.AVAILABILITY_ZONE: {},
|
constants.AVAILABILITY_ZONE: {},
|
||||||
constants.BUILD_TYPE_PRIORITY:
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
constants.LB_CREATE_FAILOVER_PRIORITY,
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
constants.FLAVOR: {constants.LOADBALANCER_TOPOLOGY:
|
constants.FLAVOR: flavor_dict,
|
||||||
constants.TOPOLOGY_SINGLE,
|
|
||||||
'taste': 'spicy'},
|
|
||||||
constants.LOADBALANCER: mock_provider_lb.to_dict(),
|
constants.LOADBALANCER: mock_provider_lb.to_dict(),
|
||||||
constants.LOADBALANCER_ID: LB_ID,
|
constants.LOADBALANCER_ID: LB_ID,
|
||||||
constants.SERVER_GROUP_ID: None,
|
constants.SERVER_GROUP_ID: None,
|
||||||
@ -1698,7 +1709,8 @@ class TestControllerWorker(base.TestCase):
|
|||||||
|
|
||||||
cw.services_controller.run_poster.assert_called_once_with(
|
cw.services_controller.run_poster.assert_called_once_with(
|
||||||
flow_utils.get_failover_amphora_flow,
|
flow_utils.get_failover_amphora_flow,
|
||||||
mock_amphora.to_dict(), 1, store=expected_stored_params)
|
mock_amphora.to_dict(), 1, flavor_dict=flavor_dict,
|
||||||
|
store=expected_stored_params)
|
||||||
|
|
||||||
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
|
@mock.patch('octavia.db.repositories.AvailabilityZoneRepository.'
|
||||||
'get_availability_zone_metadata_dict', return_value={})
|
'get_availability_zone_metadata_dict', return_value={})
|
||||||
@ -1735,12 +1747,13 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_amphora.load_balancer_id = LB_ID
|
mock_amphora.load_balancer_id = LB_ID
|
||||||
mock_amphora.status = constants.AMPHORA_READY
|
mock_amphora.status = constants.AMPHORA_READY
|
||||||
mock_amp_repo_get.return_value = mock_amphora
|
mock_amp_repo_get.return_value = mock_amphora
|
||||||
|
flavor_dict = {constants.LOADBALANCER_TOPOLOGY:
|
||||||
|
constants.TOPOLOGY_SINGLE}
|
||||||
expected_stored_params = {
|
expected_stored_params = {
|
||||||
constants.AVAILABILITY_ZONE: {'planet': 'jupiter'},
|
constants.AVAILABILITY_ZONE: {'planet': 'jupiter'},
|
||||||
constants.BUILD_TYPE_PRIORITY:
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
constants.LB_CREATE_FAILOVER_PRIORITY,
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
constants.FLAVOR: {constants.LOADBALANCER_TOPOLOGY:
|
constants.FLAVOR: flavor_dict,
|
||||||
constants.TOPOLOGY_SINGLE},
|
|
||||||
constants.LOADBALANCER: mock_provider_lb.to_dict(),
|
constants.LOADBALANCER: mock_provider_lb.to_dict(),
|
||||||
constants.LOADBALANCER_ID: LB_ID,
|
constants.LOADBALANCER_ID: LB_ID,
|
||||||
constants.SERVER_GROUP_ID: None,
|
constants.SERVER_GROUP_ID: None,
|
||||||
@ -1752,12 +1765,10 @@ class TestControllerWorker(base.TestCase):
|
|||||||
cw.services_controller.reset_mock()
|
cw.services_controller.reset_mock()
|
||||||
cw.failover_amphora(AMP_ID)
|
cw.failover_amphora(AMP_ID)
|
||||||
|
|
||||||
print(cw, flush=True)
|
|
||||||
print(cw.services_controller, flush=True)
|
|
||||||
print(cw.services_controller.run_poster, flush=True)
|
|
||||||
cw.services_controller.run_poster.assert_called_once_with(
|
cw.services_controller.run_poster.assert_called_once_with(
|
||||||
flow_utils.get_failover_amphora_flow,
|
flow_utils.get_failover_amphora_flow,
|
||||||
mock_amphora.to_dict(), 1, store=expected_stored_params)
|
mock_amphora.to_dict(), 1, flavor_dict=flavor_dict,
|
||||||
|
store=expected_stored_params)
|
||||||
|
|
||||||
@mock.patch('octavia.api.drivers.utils.'
|
@mock.patch('octavia.api.drivers.utils.'
|
||||||
'db_loadbalancer_to_provider_loadbalancer')
|
'db_loadbalancer_to_provider_loadbalancer')
|
||||||
@ -1794,12 +1805,13 @@ class TestControllerWorker(base.TestCase):
|
|||||||
mock_amphora.load_balancer_id = LB_ID
|
mock_amphora.load_balancer_id = LB_ID
|
||||||
mock_amphora.status = constants.AMPHORA_READY
|
mock_amphora.status = constants.AMPHORA_READY
|
||||||
mock_amp_repo_get.return_value = mock_amphora
|
mock_amp_repo_get.return_value = mock_amphora
|
||||||
|
flavor_dict = {constants.LOADBALANCER_TOPOLOGY:
|
||||||
|
constants.TOPOLOGY_SINGLE}
|
||||||
expected_stored_params = {
|
expected_stored_params = {
|
||||||
constants.AVAILABILITY_ZONE: {},
|
constants.AVAILABILITY_ZONE: {},
|
||||||
constants.BUILD_TYPE_PRIORITY:
|
constants.BUILD_TYPE_PRIORITY:
|
||||||
constants.LB_CREATE_FAILOVER_PRIORITY,
|
constants.LB_CREATE_FAILOVER_PRIORITY,
|
||||||
constants.FLAVOR: {constants.LOADBALANCER_TOPOLOGY:
|
constants.FLAVOR: flavor_dict,
|
||||||
constants.TOPOLOGY_SINGLE},
|
|
||||||
constants.LOADBALANCER: mock_provider_lb.to_dict(),
|
constants.LOADBALANCER: mock_provider_lb.to_dict(),
|
||||||
constants.LOADBALANCER_ID: LB_ID,
|
constants.LOADBALANCER_ID: LB_ID,
|
||||||
constants.SERVER_GROUP_ID: None,
|
constants.SERVER_GROUP_ID: None,
|
||||||
@ -1813,12 +1825,10 @@ class TestControllerWorker(base.TestCase):
|
|||||||
cw.services_controller.reset_mock()
|
cw.services_controller.reset_mock()
|
||||||
cw.failover_amphora(AMP_ID)
|
cw.failover_amphora(AMP_ID)
|
||||||
|
|
||||||
print(cw, flush=True)
|
|
||||||
print(cw.services_controller, flush=True)
|
|
||||||
print(cw.services_controller.run_poster, flush=True)
|
|
||||||
cw.services_controller.run_poster.assert_called_once_with(
|
cw.services_controller.run_poster.assert_called_once_with(
|
||||||
flow_utils.get_failover_amphora_flow,
|
flow_utils.get_failover_amphora_flow,
|
||||||
mock_amphora.to_dict(), 1, store=expected_stored_params)
|
mock_amphora.to_dict(), 1, flavor_dict=flavor_dict,
|
||||||
|
store=expected_stored_params)
|
||||||
|
|
||||||
@mock.patch('octavia.controller.worker.v2.flows.amphora_flows.'
|
@mock.patch('octavia.controller.worker.v2.flows.amphora_flows.'
|
||||||
'AmphoraFlows.get_failover_amphora_flow')
|
'AmphoraFlows.get_failover_amphora_flow')
|
||||||
@ -1931,7 +1941,7 @@ class TestControllerWorker(base.TestCase):
|
|||||||
cw.services_controller.run_poster.assert_called_once_with(
|
cw.services_controller.run_poster.assert_called_once_with(
|
||||||
flow_utils.get_failover_amphora_flow,
|
flow_utils.get_failover_amphora_flow,
|
||||||
mock_amphora.to_dict(),
|
mock_amphora.to_dict(),
|
||||||
None, store=expected_stored_params)
|
None, flavor_dict={}, store=expected_stored_params)
|
||||||
|
|
||||||
@mock.patch('octavia.db.repositories.AmphoraHealthRepository.delete')
|
@mock.patch('octavia.db.repositories.AmphoraHealthRepository.delete')
|
||||||
def test_failover_deleted_amphora(self,
|
def test_failover_deleted_amphora(self,
|
||||||
|
@ -109,8 +109,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
vip = lb.vip
|
vip = lb.vip
|
||||||
sec_grp_id = 'lb-sec-grp1'
|
sec_grp_id = 'lb-sec-grp1'
|
||||||
show_port = self.driver.network_proxy.get_port
|
show_port = self.driver.network_proxy.get_port
|
||||||
show_port.return_value = Port(
|
show_port.return_value = Port(device_owner=constants.OCTAVIA_OWNER)
|
||||||
device_owner=allowed_address_pairs.OCTAVIA_OWNER)
|
|
||||||
delete_port = self.driver.network_proxy.delete_port
|
delete_port = self.driver.network_proxy.delete_port
|
||||||
delete_sec_grp = self.driver.network_proxy.delete_security_group
|
delete_sec_grp = self.driver.network_proxy.delete_security_group
|
||||||
list_security_groups = self.driver.network_proxy.find_security_group
|
list_security_groups = self.driver.network_proxy.find_security_group
|
||||||
@ -131,7 +130,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
sec_grp_id = 'lb-sec-grp1'
|
sec_grp_id = 'lb-sec-grp1'
|
||||||
show_port = self.driver.network_proxy.get_port
|
show_port = self.driver.network_proxy.get_port
|
||||||
show_port.return_value = Port(
|
show_port.return_value = Port(
|
||||||
device_owner=allowed_address_pairs.OCTAVIA_OWNER)
|
device_owner=constants.OCTAVIA_OWNER)
|
||||||
delete_port = self.driver.network_proxy.delete_port
|
delete_port = self.driver.network_proxy.delete_port
|
||||||
delete_sec_grp = self.driver.network_proxy.delete_security_group
|
delete_sec_grp = self.driver.network_proxy.delete_security_group
|
||||||
list_security_groups = self.driver.network_proxy.find_security_group
|
list_security_groups = self.driver.network_proxy.find_security_group
|
||||||
@ -150,7 +149,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
vip = lb.vip
|
vip = lb.vip
|
||||||
sec_grp_id = 'lb-sec-grp1'
|
sec_grp_id = 'lb-sec-grp1'
|
||||||
show_port = self.driver.network_proxy.get_port
|
show_port = self.driver.network_proxy.get_port
|
||||||
port = Port(device_owner=allowed_address_pairs.OCTAVIA_OWNER)
|
port = Port(device_owner=constants.OCTAVIA_OWNER)
|
||||||
show_port.side_effect = [port, Exception]
|
show_port.side_effect = [port, Exception]
|
||||||
list_security_groups = self.driver.network_proxy.find_security_group
|
list_security_groups = self.driver.network_proxy.find_security_group
|
||||||
list_security_groups.return_value = SecurityGroup(id=sec_grp_id)
|
list_security_groups.return_value = SecurityGroup(id=sec_grp_id)
|
||||||
@ -164,7 +163,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
sec_grp_id = 'lb-sec-grp1'
|
sec_grp_id = 'lb-sec-grp1'
|
||||||
show_port = self.driver.network_proxy.get_port
|
show_port = self.driver.network_proxy.get_port
|
||||||
show_port.return_value = Port(
|
show_port.return_value = Port(
|
||||||
device_owner=allowed_address_pairs.OCTAVIA_OWNER)
|
device_owner=constants.OCTAVIA_OWNER)
|
||||||
delete_port = self.driver.network_proxy.delete_port
|
delete_port = self.driver.network_proxy.delete_port
|
||||||
delete_port.side_effect = os_exceptions.ResourceNotFound
|
delete_port.side_effect = os_exceptions.ResourceNotFound
|
||||||
delete_sec_grp = self.driver.network_proxy.delete_security_group
|
delete_sec_grp = self.driver.network_proxy.delete_security_group
|
||||||
@ -183,7 +182,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
vip = lb.vip
|
vip = lb.vip
|
||||||
show_port = self.driver.network_proxy.get_port
|
show_port = self.driver.network_proxy.get_port
|
||||||
show_port.return_value = Port(
|
show_port.return_value = Port(
|
||||||
device_owner=allowed_address_pairs.OCTAVIA_OWNER)
|
device_owner=constants.OCTAVIA_OWNER)
|
||||||
delete_port = self.driver.network_proxy.delete_port
|
delete_port = self.driver.network_proxy.delete_port
|
||||||
delete_sec_grp = self.driver.network_proxy.delete_security_group
|
delete_sec_grp = self.driver.network_proxy.delete_security_group
|
||||||
list_security_groups = self.driver.network_proxy.find_security_group
|
list_security_groups = self.driver.network_proxy.find_security_group
|
||||||
@ -198,7 +197,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
vip.load_balancer = lb
|
vip.load_balancer = lb
|
||||||
show_port = self.driver.network_proxy.get_port
|
show_port = self.driver.network_proxy.get_port
|
||||||
show_port.return_value = Port(
|
show_port.return_value = Port(
|
||||||
device_owner=allowed_address_pairs.OCTAVIA_OWNER)
|
device_owner=constants.OCTAVIA_OWNER)
|
||||||
delete_port = self.driver.network_proxy.delete_port
|
delete_port = self.driver.network_proxy.delete_port
|
||||||
delete_port.side_effect = [None, None, TypeError]
|
delete_port.side_effect = [None, None, TypeError]
|
||||||
self.assertRaises(network_base.DeallocateVIPException,
|
self.assertRaises(network_base.DeallocateVIPException,
|
||||||
@ -214,7 +213,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
vip = lb.vip
|
vip = lb.vip
|
||||||
show_port = self.driver.network_proxy.get_port
|
show_port = self.driver.network_proxy.get_port
|
||||||
show_port.return_value = Port(
|
show_port.return_value = Port(
|
||||||
device_owner=allowed_address_pairs.OCTAVIA_OWNER)
|
device_owner=constants.OCTAVIA_OWNER)
|
||||||
delete_port = self.driver.network_proxy.delete_port
|
delete_port = self.driver.network_proxy.delete_port
|
||||||
list_ports = self.driver.network_proxy.ports
|
list_ports = self.driver.network_proxy.ports
|
||||||
find_security_group = self.driver.network_proxy.find_security_group
|
find_security_group = self.driver.network_proxy.find_security_group
|
||||||
@ -256,7 +255,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
vip.load_balancer = lb
|
vip.load_balancer = lb
|
||||||
show_port = self.driver.network_proxy.get_port
|
show_port = self.driver.network_proxy.get_port
|
||||||
show_port.return_value = Port(
|
show_port.return_value = Port(
|
||||||
device_owner=allowed_address_pairs.OCTAVIA_OWNER)
|
device_owner=constants.OCTAVIA_OWNER)
|
||||||
update_port = self.driver.network_proxy.update_port
|
update_port = self.driver.network_proxy.update_port
|
||||||
update_port.side_effect = os_exceptions.ResourceNotFound
|
update_port.side_effect = os_exceptions.ResourceNotFound
|
||||||
self.driver.deallocate_vip(vip)
|
self.driver.deallocate_vip(vip)
|
||||||
@ -557,8 +556,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
show_port = self.driver.network_proxy.get_port
|
show_port = self.driver.network_proxy.get_port
|
||||||
show_port.return_value = bad_existing_port
|
show_port.return_value = bad_existing_port
|
||||||
port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict())
|
port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict())
|
||||||
port_create_dict['device_owner'] = (
|
port_create_dict['device_owner'] = constants.OCTAVIA_OWNER
|
||||||
allowed_address_pairs.OCTAVIA_OWNER)
|
|
||||||
port_create_dict['device_id'] = 'lb-1'
|
port_create_dict['device_id'] = 'lb-1'
|
||||||
create_port = self.driver.network_proxy.create_port
|
create_port = self.driver.network_proxy.create_port
|
||||||
create_port.return_value = port_create_dict
|
create_port.return_value = port_create_dict
|
||||||
@ -578,7 +576,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'name': 'octavia-lb-1',
|
'name': 'octavia-lb-1',
|
||||||
'network_id': t_constants.MOCK_NETWORK_ID,
|
'network_id': t_constants.MOCK_NETWORK_ID,
|
||||||
'device_id': 'lb-1',
|
'device_id': 'lb-1',
|
||||||
'device_owner': allowed_address_pairs.OCTAVIA_OWNER,
|
'device_owner': constants.OCTAVIA_OWNER,
|
||||||
'admin_state_up': False,
|
'admin_state_up': False,
|
||||||
'project_id': 'test-project',
|
'project_id': 'test-project',
|
||||||
'fixed_ips': [{'subnet_id': t_constants.MOCK_SUBNET_ID}]
|
'fixed_ips': [{'subnet_id': t_constants.MOCK_SUBNET_ID}]
|
||||||
@ -600,8 +598,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
def test_allocate_vip_when_port_not_found(self, mock_check_ext,
|
def test_allocate_vip_when_port_not_found(self, mock_check_ext,
|
||||||
mock_get_port):
|
mock_get_port):
|
||||||
port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict())
|
port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict())
|
||||||
port_create_dict['device_owner'] = (
|
port_create_dict['device_owner'] = constants.OCTAVIA_OWNER
|
||||||
allowed_address_pairs.OCTAVIA_OWNER)
|
|
||||||
port_create_dict['device_id'] = 'lb-1'
|
port_create_dict['device_id'] = 'lb-1'
|
||||||
create_port = self.driver.network_proxy.create_port
|
create_port = self.driver.network_proxy.create_port
|
||||||
create_port.return_value = port_create_dict
|
create_port.return_value = port_create_dict
|
||||||
@ -620,7 +617,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'name': 'octavia-lb-1',
|
'name': 'octavia-lb-1',
|
||||||
'network_id': t_constants.MOCK_NETWORK_ID,
|
'network_id': t_constants.MOCK_NETWORK_ID,
|
||||||
'device_id': 'lb-1',
|
'device_id': 'lb-1',
|
||||||
'device_owner': allowed_address_pairs.OCTAVIA_OWNER,
|
'device_owner': constants.OCTAVIA_OWNER,
|
||||||
'admin_state_up': False,
|
'admin_state_up': False,
|
||||||
'project_id': 'test-project',
|
'project_id': 'test-project',
|
||||||
'fixed_ips': [{'subnet_id': t_constants.MOCK_SUBNET_ID}]
|
'fixed_ips': [{'subnet_id': t_constants.MOCK_SUBNET_ID}]
|
||||||
@ -660,8 +657,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'_check_extension_enabled', return_value=True)
|
'_check_extension_enabled', return_value=True)
|
||||||
def test_allocate_vip_when_no_port_provided(self, mock_check_ext):
|
def test_allocate_vip_when_no_port_provided(self, mock_check_ext):
|
||||||
port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict())
|
port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict())
|
||||||
port_create_dict['device_owner'] = (
|
port_create_dict['device_owner'] = constants.OCTAVIA_OWNER
|
||||||
allowed_address_pairs.OCTAVIA_OWNER)
|
|
||||||
port_create_dict['device_id'] = 'lb-1'
|
port_create_dict['device_id'] = 'lb-1'
|
||||||
create_port = self.driver.network_proxy.create_port
|
create_port = self.driver.network_proxy.create_port
|
||||||
create_port.return_value = port_create_dict
|
create_port.return_value = port_create_dict
|
||||||
@ -680,7 +676,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'name': 'octavia-lb-1',
|
'name': 'octavia-lb-1',
|
||||||
'network_id': t_constants.MOCK_NETWORK_ID,
|
'network_id': t_constants.MOCK_NETWORK_ID,
|
||||||
'device_id': 'lb-1',
|
'device_id': 'lb-1',
|
||||||
'device_owner': allowed_address_pairs.OCTAVIA_OWNER,
|
'device_owner': constants.OCTAVIA_OWNER,
|
||||||
'admin_state_up': False,
|
'admin_state_up': False,
|
||||||
'project_id': 'test-project',
|
'project_id': 'test-project',
|
||||||
'fixed_ips': [{'ip_address': t_constants.MOCK_IP_ADDRESS,
|
'fixed_ips': [{'ip_address': t_constants.MOCK_IP_ADDRESS,
|
||||||
@ -698,8 +694,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'_check_extension_enabled', return_value=True)
|
'_check_extension_enabled', return_value=True)
|
||||||
def test_allocate_vip_when_no_port_fixed_ip(self, mock_check_ext):
|
def test_allocate_vip_when_no_port_fixed_ip(self, mock_check_ext):
|
||||||
port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict())
|
port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict())
|
||||||
port_create_dict['device_owner'] = (
|
port_create_dict['device_owner'] = constants.OCTAVIA_OWNER
|
||||||
allowed_address_pairs.OCTAVIA_OWNER)
|
|
||||||
port_create_dict['device_id'] = 'lb-1'
|
port_create_dict['device_id'] = 'lb-1'
|
||||||
create_port = self.driver.network_proxy.create_port
|
create_port = self.driver.network_proxy.create_port
|
||||||
create_port.return_value = port_create_dict
|
create_port.return_value = port_create_dict
|
||||||
@ -718,7 +713,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'name': 'octavia-lb-1',
|
'name': 'octavia-lb-1',
|
||||||
'network_id': t_constants.MOCK_NETWORK_ID,
|
'network_id': t_constants.MOCK_NETWORK_ID,
|
||||||
'device_id': 'lb-1',
|
'device_id': 'lb-1',
|
||||||
'device_owner': allowed_address_pairs.OCTAVIA_OWNER,
|
'device_owner': constants.OCTAVIA_OWNER,
|
||||||
'admin_state_up': False,
|
'admin_state_up': False,
|
||||||
'project_id': 'test-project',
|
'project_id': 'test-project',
|
||||||
'fixed_ips': [{'subnet_id': t_constants.MOCK_SUBNET_ID,
|
'fixed_ips': [{'subnet_id': t_constants.MOCK_SUBNET_ID,
|
||||||
@ -736,8 +731,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'_check_extension_enabled', return_value=True)
|
'_check_extension_enabled', return_value=True)
|
||||||
def test_allocate_vip_when_no_port_no_fixed_ip(self, mock_check_ext):
|
def test_allocate_vip_when_no_port_no_fixed_ip(self, mock_check_ext):
|
||||||
port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict())
|
port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict())
|
||||||
port_create_dict['device_owner'] = (
|
port_create_dict['device_owner'] = constants.OCTAVIA_OWNER
|
||||||
allowed_address_pairs.OCTAVIA_OWNER)
|
|
||||||
port_create_dict['device_id'] = 'lb-1'
|
port_create_dict['device_id'] = 'lb-1'
|
||||||
create_port = self.driver.network_proxy.create_port
|
create_port = self.driver.network_proxy.create_port
|
||||||
create_port.return_value = port_create_dict
|
create_port.return_value = port_create_dict
|
||||||
@ -754,7 +748,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'name': 'octavia-lb-1',
|
'name': 'octavia-lb-1',
|
||||||
'network_id': t_constants.MOCK_NETWORK_ID,
|
'network_id': t_constants.MOCK_NETWORK_ID,
|
||||||
'device_id': 'lb-1',
|
'device_id': 'lb-1',
|
||||||
'device_owner': allowed_address_pairs.OCTAVIA_OWNER,
|
'device_owner': constants.OCTAVIA_OWNER,
|
||||||
'admin_state_up': False,
|
'admin_state_up': False,
|
||||||
'project_id': 'test-project'}
|
'project_id': 'test-project'}
|
||||||
create_port.assert_called_once_with(**exp_create_port_call)
|
create_port.assert_called_once_with(**exp_create_port_call)
|
||||||
@ -767,8 +761,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'_check_extension_enabled', return_value=False)
|
'_check_extension_enabled', return_value=False)
|
||||||
def test_allocate_vip_when_no_port_provided_tenant(self, mock_check_ext):
|
def test_allocate_vip_when_no_port_provided_tenant(self, mock_check_ext):
|
||||||
port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict())
|
port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict())
|
||||||
port_create_dict['device_owner'] = (
|
port_create_dict['device_owner'] = constants.OCTAVIA_OWNER
|
||||||
allowed_address_pairs.OCTAVIA_OWNER)
|
|
||||||
port_create_dict['device_id'] = 'lb-1'
|
port_create_dict['device_id'] = 'lb-1'
|
||||||
create_port = self.driver.network_proxy.create_port
|
create_port = self.driver.network_proxy.create_port
|
||||||
create_port.return_value = port_create_dict
|
create_port.return_value = port_create_dict
|
||||||
@ -787,7 +780,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'name': 'octavia-lb-1',
|
'name': 'octavia-lb-1',
|
||||||
'network_id': t_constants.MOCK_NETWORK_ID,
|
'network_id': t_constants.MOCK_NETWORK_ID,
|
||||||
'device_id': 'lb-1',
|
'device_id': 'lb-1',
|
||||||
'device_owner': allowed_address_pairs.OCTAVIA_OWNER,
|
'device_owner': constants.OCTAVIA_OWNER,
|
||||||
'admin_state_up': False,
|
'admin_state_up': False,
|
||||||
'tenant_id': 'test-project',
|
'tenant_id': 'test-project',
|
||||||
'fixed_ips': [{'ip_address': t_constants.MOCK_IP_ADDRESS,
|
'fixed_ips': [{'ip_address': t_constants.MOCK_IP_ADDRESS,
|
||||||
@ -805,8 +798,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'_check_extension_enabled', return_value=False)
|
'_check_extension_enabled', return_value=False)
|
||||||
def test_allocate_vip_with_additional_vips(self, mock_check_ext):
|
def test_allocate_vip_with_additional_vips(self, mock_check_ext):
|
||||||
port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict())
|
port_create_dict = Port(**t_constants.MOCK_NEUTRON_PORT.to_dict())
|
||||||
port_create_dict['device_owner'] = (
|
port_create_dict['device_owner'] = constants.OCTAVIA_OWNER
|
||||||
allowed_address_pairs.OCTAVIA_OWNER)
|
|
||||||
port_create_dict['device_id'] = 'lb-1'
|
port_create_dict['device_id'] = 'lb-1'
|
||||||
create_port = self.driver.network_proxy.create_port
|
create_port = self.driver.network_proxy.create_port
|
||||||
create_port.return_value = port_create_dict
|
create_port.return_value = port_create_dict
|
||||||
@ -830,7 +822,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'name': 'octavia-lb-1',
|
'name': 'octavia-lb-1',
|
||||||
'network_id': t_constants.MOCK_NETWORK_ID,
|
'network_id': t_constants.MOCK_NETWORK_ID,
|
||||||
'device_id': 'lb-1',
|
'device_id': 'lb-1',
|
||||||
'device_owner': allowed_address_pairs.OCTAVIA_OWNER,
|
'device_owner': constants.OCTAVIA_OWNER,
|
||||||
'admin_state_up': False,
|
'admin_state_up': False,
|
||||||
'tenant_id': 'test-project',
|
'tenant_id': 'test-project',
|
||||||
'fixed_ips': [
|
'fixed_ips': [
|
||||||
@ -1579,7 +1571,8 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'project_id': t_constants.MOCK_PROJECT_ID,
|
'project_id': t_constants.MOCK_PROJECT_ID,
|
||||||
'qos_policy_id': QOS_POLICY_ID,
|
'qos_policy_id': QOS_POLICY_ID,
|
||||||
'security_group_ids': [],
|
'security_group_ids': [],
|
||||||
'status': t_constants.MOCK_STATUS}
|
'status': t_constants.MOCK_STATUS,
|
||||||
|
'vnic_type': constants.VNIC_TYPE_NORMAL}
|
||||||
|
|
||||||
self.driver.network_proxy.create_port.side_effect = [
|
self.driver.network_proxy.create_port.side_effect = [
|
||||||
MOCK_NEUTRON_PORT, MOCK_NEUTRON_PORT, Exception('boom')]
|
MOCK_NEUTRON_PORT, MOCK_NEUTRON_PORT, Exception('boom')]
|
||||||
@ -1595,13 +1588,14 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
self.driver.network_proxy.create_port.assert_called_once_with(
|
self.driver.network_proxy.create_port.assert_called_once_with(
|
||||||
**{
|
**{
|
||||||
'network_id': NETWORK_ID, 'admin_state_up': ADMIN_STATE_UP,
|
'network_id': NETWORK_ID, 'admin_state_up': ADMIN_STATE_UP,
|
||||||
'device_owner': allowed_address_pairs.OCTAVIA_OWNER,
|
'device_owner': constants.OCTAVIA_OWNER,
|
||||||
'allowed_address_pairs': [
|
'allowed_address_pairs': [
|
||||||
{'ip_address': IP_ADDRESS2}, {'ip_address': IP_ADDRESS3}],
|
{'ip_address': IP_ADDRESS2}, {'ip_address': IP_ADDRESS3}],
|
||||||
'fixed_ips': [{
|
'fixed_ips': [{
|
||||||
'subnet_id': SUBNET1_ID, 'ip_address': IP_ADDRESS1}],
|
'subnet_id': SUBNET1_ID, 'ip_address': IP_ADDRESS1}],
|
||||||
'name': FAKE_NAME, 'qos_policy_id': QOS_POLICY_ID,
|
'name': FAKE_NAME, 'qos_policy_id': QOS_POLICY_ID,
|
||||||
'security_groups': [SECURITY_GROUP_ID]})
|
'security_groups': [SECURITY_GROUP_ID],
|
||||||
|
'binding_vnic_type': constants.VNIC_TYPE_NORMAL})
|
||||||
|
|
||||||
# Test minimal successful path
|
# Test minimal successful path
|
||||||
result = self.driver.create_port(NETWORK_ID)
|
result = self.driver.create_port(NETWORK_ID)
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
from octavia.common import constants
|
||||||
from octavia.network.drivers.neutron import utils
|
from octavia.network.drivers.neutron import utils
|
||||||
from octavia.tests.common import constants as t_constants
|
from octavia.tests.common import constants as t_constants
|
||||||
from octavia.tests.unit import base
|
from octavia.tests.unit import base
|
||||||
@ -67,6 +68,7 @@ class TestNeutronUtils(base.TestCase):
|
|||||||
admin_state_up=t_constants.MOCK_ADMIN_STATE_UP,
|
admin_state_up=t_constants.MOCK_ADMIN_STATE_UP,
|
||||||
fixed_ips=[],
|
fixed_ips=[],
|
||||||
security_group_ids=[],
|
security_group_ids=[],
|
||||||
|
vnic_type=constants.VNIC_TYPE_NORMAL,
|
||||||
)
|
)
|
||||||
self._compare_ignore_value_none(model_obj.to_dict(), assert_dict)
|
self._compare_ignore_value_none(model_obj.to_dict(), assert_dict)
|
||||||
fixed_ips = t_constants.MOCK_NEUTRON_PORT['fixed_ips']
|
fixed_ips = t_constants.MOCK_NEUTRON_PORT['fixed_ips']
|
||||||
|
@ -77,7 +77,6 @@ octavia.driver_agent.provider_agents =
|
|||||||
octavia.network.drivers =
|
octavia.network.drivers =
|
||||||
network_noop_driver = octavia.network.drivers.noop_driver.driver:NoopNetworkDriver
|
network_noop_driver = octavia.network.drivers.noop_driver.driver:NoopNetworkDriver
|
||||||
allowed_address_pairs_driver = octavia.network.drivers.neutron.allowed_address_pairs:AllowedAddressPairsDriver
|
allowed_address_pairs_driver = octavia.network.drivers.neutron.allowed_address_pairs:AllowedAddressPairsDriver
|
||||||
containers_driver = octavia.network.drivers.neutron.containers:ContainersDriver
|
|
||||||
octavia.volume.drivers =
|
octavia.volume.drivers =
|
||||||
volume_noop_driver = octavia.volume.drivers.noop_driver.driver:NoopVolumeDriver
|
volume_noop_driver = octavia.volume.drivers.noop_driver.driver:NoopVolumeDriver
|
||||||
volume_cinder_driver = octavia.volume.drivers.cinder_driver:VolumeManager
|
volume_cinder_driver = octavia.volume.drivers.cinder_driver:VolumeManager
|
||||||
|
Loading…
Reference in New Issue
Block a user