Add VIP access control list
This patch extends the listener API to include the new parameter 'allowed_cidrs'. This parameter is a list of IPv4 or IPv6 CIDRs. Leaving this list unset defaults to the traditional behavior of allowing all ingress traffic to the listener. Setting it will deny all traffic but all CIDRs set in the 'allowed_cidrs' list. Note that the API will validate that all CIDRs match the same IP version of the VIP. This may change later as part of work to allow multiple VIPs per LB (Change-Id Id7153dbf33b9616d7af685fcf13ad9a79793c06b). Task: 26210 Story: 2003686 Change-Id: Id2b560df1cde9ce9403afbd593bbaa6cae5f06d6
This commit is contained in:
parent
05f5257075
commit
f3b48bc2f7
@ -155,6 +155,21 @@ admin_state_up-optional:
|
|||||||
in: body
|
in: body
|
||||||
required: false
|
required: false
|
||||||
type: boolean
|
type: boolean
|
||||||
|
allowed_cidrs:
|
||||||
|
description: |
|
||||||
|
A list of IPv4, IPv6 or mix of both CIDRs.
|
||||||
|
in: body
|
||||||
|
min_version: 2.12
|
||||||
|
required: true
|
||||||
|
type: array
|
||||||
|
allowed_cidrs-optional:
|
||||||
|
description: |
|
||||||
|
A list of IPv4, IPv6 or mix of both CIDRs. The default is all allowed.
|
||||||
|
When a list of CIDRs is provided, the default switches to deny all.
|
||||||
|
in: body
|
||||||
|
min_version: 2.12
|
||||||
|
required: false
|
||||||
|
type: array
|
||||||
amphora-id:
|
amphora-id:
|
||||||
description: |
|
description: |
|
||||||
The associated amphora ID.
|
The associated amphora ID.
|
||||||
|
@ -1 +1 @@
|
|||||||
curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: <token>" -d '{"listener": {"protocol": "TERMINATED_HTTPS", "description": "A great TLS listener", "admin_state_up": true, "connection_limit": 200, "protocol_port": "443", "loadbalancer_id": "607226db-27ef-4d41-ae89-f2a800e9c2db", "name": "great_tls_listener", "insert_headers": {"X-Forwarded-For": "true", "X-Forwarded-Port": "true"}, "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "sni_container_refs": ["http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee"], "timeout_client_data": 50000, "timeout_member_connect": 5000, "timeout_member_data": 50000, "timeout_tcp_inspect": 0, "tags": ["test_tag"], "client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5", "client_authentication": "MANDATORY", "client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c"}}' http://198.51.100.10:9876/v2/lbaas/listeners
|
curl -X POST -H "Content-Type: application/json" -H "X-Auth-Token: <token>" -d '{"listener": {"protocol": "TERMINATED_HTTPS", "description": "A great TLS listener", "admin_state_up": true, "connection_limit": 200, "protocol_port": "443", "loadbalancer_id": "607226db-27ef-4d41-ae89-f2a800e9c2db", "name": "great_tls_listener", "insert_headers": {"X-Forwarded-For": "true", "X-Forwarded-Port": "true"}, "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "sni_container_refs": ["http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee"], "timeout_client_data": 50000, "timeout_member_connect": 5000, "timeout_member_data": 50000, "timeout_tcp_inspect": 0, "tags": ["test_tag"], "client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5", "client_authentication": "MANDATORY", "client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c", "allowed_cidrs": ["192.0.2.0/24", "198.51.100.0/24"]}}' http://198.51.100.10:9876/v2/lbaas/listeners
|
||||||
|
@ -23,6 +23,10 @@
|
|||||||
"tags": ["test_tag"],
|
"tags": ["test_tag"],
|
||||||
"client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5",
|
"client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5",
|
||||||
"client_authentication": "MANDATORY",
|
"client_authentication": "MANDATORY",
|
||||||
"client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c"
|
"client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c",
|
||||||
|
"allowed_cidrs": [
|
||||||
|
"192.0.2.0/24",
|
||||||
|
"198.51.100.0/24"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -38,6 +38,10 @@
|
|||||||
"tags": ["test_tag"],
|
"tags": ["test_tag"],
|
||||||
"client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5",
|
"client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5",
|
||||||
"client_authentication": "MANDATORY",
|
"client_authentication": "MANDATORY",
|
||||||
"client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c"
|
"client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c",
|
||||||
|
"allowed_cidrs": [
|
||||||
|
"192.0.2.0/24",
|
||||||
|
"198.51.100.0/24"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -38,6 +38,10 @@
|
|||||||
"tags": ["test_tag"],
|
"tags": ["test_tag"],
|
||||||
"client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5",
|
"client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5",
|
||||||
"client_authentication": "MANDATORY",
|
"client_authentication": "MANDATORY",
|
||||||
"client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c"
|
"client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c",
|
||||||
|
"allowed_cidrs": [
|
||||||
|
"192.0.2.0/24",
|
||||||
|
"198.51.100.0/24"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1 +1 @@
|
|||||||
curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: <token>" -d '{"listener": {"description": "An updated great TLS listener", "admin_state_up": true, "connection_limit": 200, "name": "great_updated_tls_listener", "insert_headers": {"X-Forwarded-For": "false", "X-Forwarded-Port": "true"}, "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "sni_container_refs": ["http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee"], "timeout_client_data": 100000, "timeout_member_connect": 1000, "timeout_member_data": 100000, "timeout_tcp_inspect": 5, "tags": ["updated_tag"], "client_ca_tls_container_ref": null}}' http://198.51.100.10:9876/v2/lbaas/listeners/023f2e34-7806-443b-bfae-16c324569a3d
|
curl -X PUT -H "Content-Type: application/json" -H "X-Auth-Token: <token>" -d '{"listener": {"description": "An updated great TLS listener", "admin_state_up": true, "connection_limit": 200, "name": "great_updated_tls_listener", "insert_headers": {"X-Forwarded-For": "false", "X-Forwarded-Port": "true"}, "default_tls_container_ref": "http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "sni_container_refs": ["http://198.51.100.10:9311/v1/containers/a570068c-d295-4780-91d4-3046a325db51", "http://198.51.100.10:9311/v1/containers/aaebb31e-7761-4826-8cb4-2b829caca3ee"], "timeout_client_data": 100000, "timeout_member_connect": 1000, "timeout_member_data": 100000, "timeout_tcp_inspect": 5, "tags": ["updated_tag"], "client_ca_tls_container_ref": null, "allowed_cidrs": ["192.0.2.0/24", "198.51.100.0/24"]}}' http://198.51.100.10:9876/v2/lbaas/listeners/023f2e34-7806-443b-bfae-16c324569a3d
|
||||||
|
@ -19,6 +19,10 @@
|
|||||||
"timeout_member_data": 100000,
|
"timeout_member_data": 100000,
|
||||||
"timeout_tcp_inspect": 5,
|
"timeout_tcp_inspect": 5,
|
||||||
"tags": ["updated_tag"],
|
"tags": ["updated_tag"],
|
||||||
"client_ca_tls_container_ref": null
|
"client_ca_tls_container_ref": null,
|
||||||
|
"allowed_cidrs": [
|
||||||
|
"192.0.2.0/24",
|
||||||
|
"198.51.100.0/24"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -38,6 +38,10 @@
|
|||||||
"tags": ["updated_tag"],
|
"tags": ["updated_tag"],
|
||||||
"client_ca_tls_container_ref": null,
|
"client_ca_tls_container_ref": null,
|
||||||
"client_authentication": "NONE",
|
"client_authentication": "NONE",
|
||||||
"client_crl_container_ref": null
|
"client_crl_container_ref": null,
|
||||||
|
"allowed_cidrs": [
|
||||||
|
"192.0.2.0/24",
|
||||||
|
"198.51.100.0/24"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -40,7 +40,11 @@
|
|||||||
"tags": ["test_tag"],
|
"tags": ["test_tag"],
|
||||||
"client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5",
|
"client_ca_tls_container_ref": "http://198.51.100.10:9311/v1/containers/35649991-49f3-4625-81ce-2465fe8932e5",
|
||||||
"client_authentication": "NONE",
|
"client_authentication": "NONE",
|
||||||
"client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c"
|
"client_crl_container_ref": "http://198.51.100.10:9311/v1/containers/e222b065-b93b-4e2a-9a02-804b7a118c3c",
|
||||||
|
"allowed_cidrs": [
|
||||||
|
"192.0.2.0/24",
|
||||||
|
"198.51.100.0/24"
|
||||||
|
]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@ -46,6 +46,7 @@ Response Parameters
|
|||||||
.. rest_parameters:: ../parameters.yaml
|
.. rest_parameters:: ../parameters.yaml
|
||||||
|
|
||||||
- admin_state_up: admin_state_up
|
- admin_state_up: admin_state_up
|
||||||
|
- allowed_cidrs: allowed_cidrs
|
||||||
- client_authentication: client_authentication
|
- client_authentication: client_authentication
|
||||||
- client_ca_tls_container_ref: client_ca_tls_container_ref
|
- client_ca_tls_container_ref: client_ca_tls_container_ref
|
||||||
- client_crl_container_ref: client_crl_container_ref
|
- client_crl_container_ref: client_crl_container_ref
|
||||||
@ -139,6 +140,7 @@ Request
|
|||||||
.. rest_parameters:: ../parameters.yaml
|
.. rest_parameters:: ../parameters.yaml
|
||||||
|
|
||||||
- admin_state_up: admin_state_up-default-optional
|
- admin_state_up: admin_state_up-default-optional
|
||||||
|
- allowed_cidrs: allowed_cidrs-optional
|
||||||
- client_authentication: client_authentication-optional
|
- client_authentication: client_authentication-optional
|
||||||
- client_ca_tls_container_ref: client_ca_tls_container_ref-optional
|
- client_ca_tls_container_ref: client_ca_tls_container_ref-optional
|
||||||
- client_crl_container_ref: client_crl_container_ref-optional
|
- client_crl_container_ref: client_crl_container_ref-optional
|
||||||
@ -259,6 +261,7 @@ Response Parameters
|
|||||||
.. rest_parameters:: ../parameters.yaml
|
.. rest_parameters:: ../parameters.yaml
|
||||||
|
|
||||||
- admin_state_up: admin_state_up
|
- admin_state_up: admin_state_up
|
||||||
|
- allowed_cidrs: allowed_cidrs
|
||||||
- client_authentication: client_authentication
|
- client_authentication: client_authentication
|
||||||
- client_ca_tls_container_ref: client_ca_tls_container_ref
|
- client_ca_tls_container_ref: client_ca_tls_container_ref
|
||||||
- client_crl_container_ref: client_crl_container_ref
|
- client_crl_container_ref: client_crl_container_ref
|
||||||
@ -336,6 +339,7 @@ Response Parameters
|
|||||||
.. rest_parameters:: ../parameters.yaml
|
.. rest_parameters:: ../parameters.yaml
|
||||||
|
|
||||||
- admin_state_up: admin_state_up
|
- admin_state_up: admin_state_up
|
||||||
|
- allowed_cidrs: allowed_cidrs
|
||||||
- client_authentication: client_authentication
|
- client_authentication: client_authentication
|
||||||
- client_ca_tls_container_ref: client_ca_tls_container_ref
|
- client_ca_tls_container_ref: client_ca_tls_container_ref
|
||||||
- client_crl_container_ref: client_crl_container_ref
|
- client_crl_container_ref: client_crl_container_ref
|
||||||
@ -403,6 +407,7 @@ Request
|
|||||||
.. rest_parameters:: ../parameters.yaml
|
.. rest_parameters:: ../parameters.yaml
|
||||||
|
|
||||||
- admin_state_up: admin_state_up-default-optional
|
- admin_state_up: admin_state_up-default-optional
|
||||||
|
- allowed_cidrs: allowed_cidrs-optional
|
||||||
- client_authentication: client_authentication-optional
|
- client_authentication: client_authentication-optional
|
||||||
- client_ca_tls_container_ref: client_ca_tls_container_ref-optional
|
- client_ca_tls_container_ref: client_ca_tls_container_ref-optional
|
||||||
- client_crl_container_ref: client_crl_container_ref-optional
|
- client_crl_container_ref: client_crl_container_ref-optional
|
||||||
@ -438,6 +443,7 @@ Response Parameters
|
|||||||
.. rest_parameters:: ../parameters.yaml
|
.. rest_parameters:: ../parameters.yaml
|
||||||
|
|
||||||
- admin_state_up: admin_state_up
|
- admin_state_up: admin_state_up
|
||||||
|
- allowed_cidrs: allowed_cidrs
|
||||||
- client_authentication: client_authentication
|
- client_authentication: client_authentication
|
||||||
- client_ca_tls_container_ref: client_ca_tls_container_ref
|
- client_ca_tls_container_ref: client_ca_tls_container_ref
|
||||||
- client_crl_container_ref: client_crl_container_ref
|
- client_crl_container_ref: client_crl_container_ref
|
||||||
|
@ -454,6 +454,8 @@ contain the following:
|
|||||||
| | | additional TCP packets for content |
|
| | | additional TCP packets for content |
|
||||||
| | | inspection. |
|
| | | inspection. |
|
||||||
+------------------------------+--------+-------------------------------------+
|
+------------------------------+--------+-------------------------------------+
|
||||||
|
| allowed_cidrs | list | List of IPv4 or IPv6 CIDRs. |
|
||||||
|
+------------------------------+--------+-------------------------------------+
|
||||||
|
|
||||||
.. _TLS container:
|
.. _TLS container:
|
||||||
|
|
||||||
@ -649,6 +651,8 @@ contain the following:
|
|||||||
| | | additional TCP packets for content |
|
| | | additional TCP packets for content |
|
||||||
| | | inspection. |
|
| | | inspection. |
|
||||||
+----------------------------+--------+-------------------------------------+
|
+----------------------------+--------+-------------------------------------+
|
||||||
|
| allowed_cidrs | list | List of IPv4 or IPv6 CIDRs. |
|
||||||
|
+----------------------------+--------+-------------------------------------+
|
||||||
|
|
||||||
The listener will be in the ``PENDING_UPDATE`` provisioning_status when
|
The listener will be in the ``PENDING_UPDATE`` provisioning_status when
|
||||||
it is passed to the driver. The driver will update the provisioning_status
|
it is passed to the driver. The driver will update the provisioning_status
|
||||||
|
@ -304,6 +304,43 @@ incoming or outgoing traffic.
|
|||||||
openstack loadbalancer member create --subnet-id <private_subnet_id> --address 192.0.2.11 --protocol-port 80 pool1
|
openstack loadbalancer member create --subnet-id <private_subnet_id> --address 192.0.2.11 --protocol-port 80 pool1
|
||||||
|
|
||||||
|
|
||||||
|
Deploy a load balancer with access control list
|
||||||
|
-----------------------------------------------
|
||||||
|
This solution limits incoming traffic to a listener to a set of allowed
|
||||||
|
source IP addresses. Any other incoming traffic will be rejected.
|
||||||
|
|
||||||
|
|
||||||
|
**Scenario description**:
|
||||||
|
|
||||||
|
* Back-end servers 192.0.2.10 and 192.0.2.11 on subnet *private-subnet* have
|
||||||
|
been configured with an custom application on TCP port 23456
|
||||||
|
* Subnet *public-subnet* is a shared external subnet created by the cloud
|
||||||
|
operator which is reachable from the internet.
|
||||||
|
* We want to configure a basic load balancer that is accessible from the
|
||||||
|
internet, which distributes requests to the back-end servers.
|
||||||
|
* The application on TCP port 23456 is accessible to a limited source IP
|
||||||
|
addresses (192.0.2.0/24 and 198.51.100/24).
|
||||||
|
|
||||||
|
**Solution**:
|
||||||
|
|
||||||
|
1. Create load balancer *lb1* on subnet *public-subnet*.
|
||||||
|
2. Create listener *listener1* with allowed CIDRs.
|
||||||
|
3. Create pool *pool1* as *listener1*'s default pool.
|
||||||
|
4. Add members 192.0.2.10 and 192.0.2.11 on *private-subnet* to *pool1*.
|
||||||
|
|
||||||
|
**CLI commands**:
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
openstack loadbalancer create --name lb1 --vip-subnet-id public-subnet
|
||||||
|
# Re-run the following until lb1 shows ACTIVE and ONLINE statuses:
|
||||||
|
openstack loadbalancer show lb1
|
||||||
|
openstack loadbalancer listener create --name listener1 --protocol TCP --protocol-port 23456 --allowed-cidr 192.0.2.0/24 --allowed-cidr 198.51.100/24 lb1
|
||||||
|
openstack loadbalancer pool create --name pool1 --lb-algorithm ROUND_ROBIN --listener listener1 --protocol TCP
|
||||||
|
openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.10 --protocol-port 80 pool1
|
||||||
|
openstack loadbalancer member create --subnet-id private-subnet --address 192.0.2.11 --protocol-port 80 pool1
|
||||||
|
|
||||||
|
|
||||||
Deploy a non-terminated HTTPS load balancer
|
Deploy a non-terminated HTTPS load balancer
|
||||||
-------------------------------------------
|
-------------------------------------------
|
||||||
A non-terminated HTTPS load balancer acts effectively like a generic TCP load
|
A non-terminated HTTPS load balancer acts effectively like a generic TCP load
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
import copy
|
import copy
|
||||||
|
|
||||||
|
import netaddr
|
||||||
import six
|
import six
|
||||||
from wsme import types as wtypes
|
from wsme import types as wtypes
|
||||||
|
|
||||||
@ -43,6 +44,20 @@ class IPAddressType(wtypes.UserType):
|
|||||||
raise ValueError(error)
|
raise ValueError(error)
|
||||||
|
|
||||||
|
|
||||||
|
class CidrType(wtypes.UserType):
|
||||||
|
basetype = unicode
|
||||||
|
name = 'cidr'
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def validate(value):
|
||||||
|
"""Validates whether value is an IPv4 or IPv6 CIDR."""
|
||||||
|
try:
|
||||||
|
return str(netaddr.IPNetwork(value).cidr)
|
||||||
|
except (ValueError, netaddr.core.AddrFormatError):
|
||||||
|
error = 'Value should be IPv4 or IPv6 CIDR format'
|
||||||
|
raise ValueError(error)
|
||||||
|
|
||||||
|
|
||||||
class URLType(wtypes.UserType):
|
class URLType(wtypes.UserType):
|
||||||
basetype = unicode
|
basetype = unicode
|
||||||
name = 'url'
|
name = 'url'
|
||||||
|
@ -266,6 +266,14 @@ def listener_dict_to_provider_dict(listener_dict):
|
|||||||
listener_obj.client_crl_container_id)
|
listener_obj.client_crl_container_id)
|
||||||
new_listener_dict['client_crl_container_data'] = crl_file
|
new_listener_dict['client_crl_container_data'] = crl_file
|
||||||
|
|
||||||
|
# Format the allowed_cidrs
|
||||||
|
if ('allowed_cidrs' in new_listener_dict and
|
||||||
|
new_listener_dict['allowed_cidrs'] and
|
||||||
|
'cidr' in new_listener_dict['allowed_cidrs'][0]):
|
||||||
|
cidrs_dict_list = new_listener_dict.pop('allowed_cidrs')
|
||||||
|
new_listener_dict['allowed_cidrs'] = [cidr_dict['cidr'] for
|
||||||
|
cidr_dict in cidrs_dict_list]
|
||||||
|
|
||||||
# Remove the DB back references
|
# Remove the DB back references
|
||||||
if 'load_balancer' in new_listener_dict:
|
if 'load_balancer' in new_listener_dict:
|
||||||
del new_listener_dict['load_balancer']
|
del new_listener_dict['load_balancer']
|
||||||
|
@ -82,6 +82,9 @@ class RootController(rest.RestController):
|
|||||||
self._add_a_version(versions, 'v2.10', 'v2', 'SUPPORTED',
|
self._add_a_version(versions, 'v2.10', 'v2', 'SUPPORTED',
|
||||||
'2019-03-05T00:00:00Z', host_url)
|
'2019-03-05T00:00:00Z', host_url)
|
||||||
# Additive batch member update
|
# Additive batch member update
|
||||||
self._add_a_version(versions, 'v2.11', 'v2', 'CURRENT',
|
self._add_a_version(versions, 'v2.11', 'v2', 'SUPPORTED',
|
||||||
'2019-06-24T00:00:00Z', host_url)
|
'2019-06-24T00:00:00Z', host_url)
|
||||||
|
# VIP ACL
|
||||||
|
self._add_a_version(versions, 'v2.12', 'v2', 'CURRENT',
|
||||||
|
'2019-09-11T00:00:00Z', host_url)
|
||||||
return {'versions': versions}
|
return {'versions': versions}
|
||||||
|
@ -31,6 +31,7 @@ from octavia.common import constants
|
|||||||
from octavia.common import data_models
|
from octavia.common import data_models
|
||||||
from octavia.common import exceptions
|
from octavia.common import exceptions
|
||||||
from octavia.common import stats
|
from octavia.common import stats
|
||||||
|
from octavia.common import utils as common_utils
|
||||||
from octavia.db import api as db_api
|
from octavia.db import api as db_api
|
||||||
from octavia.db import prepare as db_prepare
|
from octavia.db import prepare as db_prepare
|
||||||
from octavia.i18n import _
|
from octavia.i18n import _
|
||||||
@ -151,6 +152,15 @@ class ListenersController(base.BaseController):
|
|||||||
value=headers,
|
value=headers,
|
||||||
option=('%s protocol listener.' % listener_protocol))
|
option=('%s protocol listener.' % listener_protocol))
|
||||||
|
|
||||||
|
def _validate_cidr_compatible_with_vip(self, vip, allowed_cidrs):
|
||||||
|
for cidr in allowed_cidrs:
|
||||||
|
# Check if CIDR IP version matches VIP IP version
|
||||||
|
if common_utils.is_cidr_ipv6(cidr) != common_utils.is_ipv6(vip):
|
||||||
|
msg = _("CIDR %(cidr)s IP version incompatible with VIP "
|
||||||
|
"%(vip)s IP version.")
|
||||||
|
raise exceptions.ValidationException(
|
||||||
|
detail=msg % {'cidr': cidr, 'vip': vip})
|
||||||
|
|
||||||
def _validate_create_listener(self, lock_session, listener_dict):
|
def _validate_create_listener(self, lock_session, listener_dict):
|
||||||
"""Validate listener for wrong protocol or duplicate listeners
|
"""Validate listener for wrong protocol or duplicate listeners
|
||||||
|
|
||||||
@ -263,6 +273,14 @@ class ListenersController(base.BaseController):
|
|||||||
protocol=db_l.protocol,
|
protocol=db_l.protocol,
|
||||||
port=listener_dict.get('protocol_port'))
|
port=listener_dict.get('protocol_port'))
|
||||||
|
|
||||||
|
# Validate allowed CIDRs
|
||||||
|
allowed_cidrs = listener_dict.get('allowed_cidrs', []) or []
|
||||||
|
lb_id = listener_dict.get('load_balancer_id')
|
||||||
|
vip_db = self.repositories.vip.get(
|
||||||
|
lock_session, load_balancer_id=lb_id)
|
||||||
|
vip_address = vip_db.ip_address
|
||||||
|
self._validate_cidr_compatible_with_vip(vip_address, allowed_cidrs)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
db_listener = self.repositories.listener.create(
|
db_listener = self.repositories.listener.create(
|
||||||
lock_session, **listener_dict)
|
lock_session, **listener_dict)
|
||||||
@ -272,8 +290,10 @@ class ListenersController(base.BaseController):
|
|||||||
'tls_container_id': container.get(
|
'tls_container_id': container.get(
|
||||||
'tls_container_id')}
|
'tls_container_id')}
|
||||||
self.repositories.sni.create(lock_session, **sni_dict)
|
self.repositories.sni.create(lock_session, **sni_dict)
|
||||||
|
# DB listener needs to be refreshed
|
||||||
db_listener = self.repositories.listener.get(
|
db_listener = self.repositories.listener.get(
|
||||||
lock_session, id=db_listener.id)
|
lock_session, id=db_listener.id)
|
||||||
|
|
||||||
return db_listener
|
return db_listener
|
||||||
except odb_exceptions.DBDuplicateEntry as de:
|
except odb_exceptions.DBDuplicateEntry as de:
|
||||||
column_list = ['load_balancer_id', 'protocol', 'protocol_port']
|
column_list = ['load_balancer_id', 'protocol', 'protocol_port']
|
||||||
@ -452,6 +472,12 @@ class ListenersController(base.BaseController):
|
|||||||
if ca_ref or crl_ref:
|
if ca_ref or crl_ref:
|
||||||
self._validate_client_ca_and_crl_refs(ca_ref, crl_ref)
|
self._validate_client_ca_and_crl_refs(ca_ref, crl_ref)
|
||||||
|
|
||||||
|
# Validate allowed CIDRs
|
||||||
|
if (listener.allowed_cidrs and listener.allowed_cidrs != wtypes.Unset):
|
||||||
|
vip_address = db_listener.load_balancer.vip.ip_address
|
||||||
|
self._validate_cidr_compatible_with_vip(
|
||||||
|
vip_address, listener.allowed_cidrs)
|
||||||
|
|
||||||
def _set_default_on_none(self, listener):
|
def _set_default_on_none(self, listener):
|
||||||
"""Reset settings to their default values if None/null was passed in
|
"""Reset settings to their default values if None/null was passed in
|
||||||
|
|
||||||
@ -513,6 +539,7 @@ class ListenersController(base.BaseController):
|
|||||||
# Prepare the data for the driver data model
|
# Prepare the data for the driver data model
|
||||||
listener_dict = listener.to_dict(render_unsets=False)
|
listener_dict = listener.to_dict(render_unsets=False)
|
||||||
listener_dict['id'] = id
|
listener_dict['id'] = id
|
||||||
|
|
||||||
provider_listener_dict = (
|
provider_listener_dict = (
|
||||||
driver_utils.listener_dict_to_provider_dict(listener_dict))
|
driver_utils.listener_dict_to_provider_dict(listener_dict))
|
||||||
|
|
||||||
|
@ -61,6 +61,7 @@ class ListenerResponse(BaseListenerType):
|
|||||||
client_ca_tls_container_ref = wtypes.StringType()
|
client_ca_tls_container_ref = wtypes.StringType()
|
||||||
client_authentication = wtypes.wsattr(wtypes.StringType())
|
client_authentication = wtypes.wsattr(wtypes.StringType())
|
||||||
client_crl_container_ref = wtypes.wsattr(wtypes.StringType())
|
client_crl_container_ref = wtypes.wsattr(wtypes.StringType())
|
||||||
|
allowed_cidrs = wtypes.wsattr([types.CidrType()])
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def from_data_model(cls, data_model, children=False):
|
def from_data_model(cls, data_model, children=False):
|
||||||
@ -69,6 +70,8 @@ class ListenerResponse(BaseListenerType):
|
|||||||
|
|
||||||
listener.sni_container_refs = [
|
listener.sni_container_refs = [
|
||||||
sni_c.tls_container_id for sni_c in data_model.sni_containers]
|
sni_c.tls_container_id for sni_c in data_model.sni_containers]
|
||||||
|
listener.allowed_cidrs = [
|
||||||
|
c.cidr for c in data_model.allowed_cidrs] or None
|
||||||
if cls._full_response():
|
if cls._full_response():
|
||||||
del listener.loadbalancers
|
del listener.loadbalancers
|
||||||
l7policy_type = l7policy.L7PolicyFullResponse
|
l7policy_type = l7policy.L7PolicyFullResponse
|
||||||
@ -146,6 +149,7 @@ class ListenerPOST(BaseListenerType):
|
|||||||
wtypes.Enum(str, *constants.SUPPORTED_CLIENT_AUTH_MODES),
|
wtypes.Enum(str, *constants.SUPPORTED_CLIENT_AUTH_MODES),
|
||||||
default=constants.CLIENT_AUTH_NONE)
|
default=constants.CLIENT_AUTH_NONE)
|
||||||
client_crl_container_ref = wtypes.StringType(max_length=255)
|
client_crl_container_ref = wtypes.StringType(max_length=255)
|
||||||
|
allowed_cidrs = wtypes.wsattr([types.CidrType()])
|
||||||
|
|
||||||
|
|
||||||
class ListenerRootPOST(types.BaseType):
|
class ListenerRootPOST(types.BaseType):
|
||||||
@ -182,6 +186,7 @@ class ListenerPUT(BaseListenerType):
|
|||||||
client_authentication = wtypes.wsattr(
|
client_authentication = wtypes.wsattr(
|
||||||
wtypes.Enum(str, *constants.SUPPORTED_CLIENT_AUTH_MODES))
|
wtypes.Enum(str, *constants.SUPPORTED_CLIENT_AUTH_MODES))
|
||||||
client_crl_container_ref = wtypes.StringType(max_length=255)
|
client_crl_container_ref = wtypes.StringType(max_length=255)
|
||||||
|
allowed_cidrs = wtypes.wsattr([types.CidrType()])
|
||||||
|
|
||||||
|
|
||||||
class ListenerRootPUT(types.BaseType):
|
class ListenerRootPUT(types.BaseType):
|
||||||
@ -231,6 +236,7 @@ class ListenerSingleCreate(BaseListenerType):
|
|||||||
wtypes.Enum(str, *constants.SUPPORTED_CLIENT_AUTH_MODES),
|
wtypes.Enum(str, *constants.SUPPORTED_CLIENT_AUTH_MODES),
|
||||||
default=constants.CLIENT_AUTH_NONE)
|
default=constants.CLIENT_AUTH_NONE)
|
||||||
client_crl_container_ref = wtypes.StringType(max_length=255)
|
client_crl_container_ref = wtypes.StringType(max_length=255)
|
||||||
|
allowed_cidrs = wtypes.wsattr([types.CidrType()])
|
||||||
|
|
||||||
|
|
||||||
class ListenerStatusResponse(BaseListenerType):
|
class ListenerStatusResponse(BaseListenerType):
|
||||||
|
@ -108,6 +108,8 @@ class BaseDataModel(object):
|
|||||||
return obj.__class__.__name__ + obj.pool_id
|
return obj.__class__.__name__ + obj.pool_id
|
||||||
if obj.__class__.__name__ in ['ListenerStatistics']:
|
if obj.__class__.__name__ in ['ListenerStatistics']:
|
||||||
return obj.__class__.__name__ + obj.listener_id + obj.amphora_id
|
return obj.__class__.__name__ + obj.listener_id + obj.amphora_id
|
||||||
|
if obj.__class__.__name__ in ['ListenerCidr']:
|
||||||
|
return obj.__class__.__name__ + obj.listener_id + obj.cidr
|
||||||
if obj.__class__.__name__ in ['VRRPGroup', 'Vip']:
|
if obj.__class__.__name__ in ['VRRPGroup', 'Vip']:
|
||||||
return obj.__class__.__name__ + obj.load_balancer_id
|
return obj.__class__.__name__ + obj.load_balancer_id
|
||||||
if obj.__class__.__name__ in ['AmphoraHealth']:
|
if obj.__class__.__name__ in ['AmphoraHealth']:
|
||||||
@ -384,7 +386,8 @@ class Listener(BaseDataModel):
|
|||||||
timeout_client_data=None, timeout_member_connect=None,
|
timeout_client_data=None, timeout_member_connect=None,
|
||||||
timeout_member_data=None, timeout_tcp_inspect=None,
|
timeout_member_data=None, timeout_tcp_inspect=None,
|
||||||
tags=None, client_ca_tls_certificate_id=None,
|
tags=None, client_ca_tls_certificate_id=None,
|
||||||
client_authentication=None, client_crl_container_id=None):
|
client_authentication=None, client_crl_container_id=None,
|
||||||
|
allowed_cidrs=None):
|
||||||
self.id = id
|
self.id = id
|
||||||
self.project_id = project_id
|
self.project_id = project_id
|
||||||
self.name = name
|
self.name = name
|
||||||
@ -416,6 +419,7 @@ class Listener(BaseDataModel):
|
|||||||
self.client_ca_tls_certificate_id = client_ca_tls_certificate_id
|
self.client_ca_tls_certificate_id = client_ca_tls_certificate_id
|
||||||
self.client_authentication = client_authentication
|
self.client_authentication = client_authentication
|
||||||
self.client_crl_container_id = client_crl_container_id
|
self.client_crl_container_id = client_crl_container_id
|
||||||
|
self.allowed_cidrs = allowed_cidrs or []
|
||||||
|
|
||||||
def update(self, update_dict):
|
def update(self, update_dict):
|
||||||
for key, value in update_dict.items():
|
for key, value in update_dict.items():
|
||||||
@ -775,3 +779,16 @@ class FlavorProfile(BaseDataModel):
|
|||||||
self.name = name
|
self.name = name
|
||||||
self.provider_name = provider_name
|
self.provider_name = provider_name
|
||||||
self.flavor_data = flavor_data
|
self.flavor_data = flavor_data
|
||||||
|
|
||||||
|
|
||||||
|
class ListenerCidr(BaseDataModel):
|
||||||
|
|
||||||
|
def __init__(self, listener_id=None, cidr=None):
|
||||||
|
self.listener_id = listener_id
|
||||||
|
self.cidr = cidr
|
||||||
|
|
||||||
|
# SQLAlchemy kindly attaches the whole listener object so
|
||||||
|
# let's keep this simple by overriding the to_dict for this
|
||||||
|
# object. Otherwise we recurse down the "ghost" listener object.
|
||||||
|
def to_dict(self, **kwargs):
|
||||||
|
return {'cidr': self.cidr, 'listener_id': self.listener_id}
|
||||||
|
@ -64,6 +64,12 @@ def is_ipv6(ip_address):
|
|||||||
return ip.version == 6
|
return ip.version == 6
|
||||||
|
|
||||||
|
|
||||||
|
def is_cidr_ipv6(cidr):
|
||||||
|
"""Check if CIDR is IPv6 address with subnet prefix."""
|
||||||
|
ip = netaddr.IPNetwork(cidr)
|
||||||
|
return ip.version == 6
|
||||||
|
|
||||||
|
|
||||||
def is_ipv6_lla(ip_address):
|
def is_ipv6_lla(ip_address):
|
||||||
"""Check if ip address is IPv6 link local address."""
|
"""Check if ip address is IPv6 link local address."""
|
||||||
ip = netaddr.IPAddress(ip_address)
|
ip = netaddr.IPAddress(ip_address)
|
||||||
|
@ -116,6 +116,8 @@ class ListenerFlows(object):
|
|||||||
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
update_listener_flow.add(amphora_driver_tasks.ListenersUpdate(
|
update_listener_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
requires=constants.LOADBALANCER))
|
requires=constants.LOADBALANCER))
|
||||||
|
update_listener_flow.add(network_tasks.UpdateVIP(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
update_listener_flow.add(database_tasks.UpdateListenerInDB(
|
update_listener_flow.add(database_tasks.UpdateListenerInDB(
|
||||||
requires=[constants.LISTENER, constants.UPDATE_DICT]))
|
requires=[constants.LISTENER, constants.UPDATE_DICT]))
|
||||||
update_listener_flow.add(database_tasks.
|
update_listener_flow.add(database_tasks.
|
||||||
|
@ -116,6 +116,8 @@ class ListenerFlows(object):
|
|||||||
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
requires=[constants.LOADBALANCER, constants.LISTENERS]))
|
||||||
update_listener_flow.add(amphora_driver_tasks.ListenersUpdate(
|
update_listener_flow.add(amphora_driver_tasks.ListenersUpdate(
|
||||||
requires=constants.LOADBALANCER))
|
requires=constants.LOADBALANCER))
|
||||||
|
update_listener_flow.add(network_tasks.UpdateVIP(
|
||||||
|
requires=constants.LOADBALANCER))
|
||||||
update_listener_flow.add(database_tasks.UpdateListenerInDB(
|
update_listener_flow.add(database_tasks.UpdateListenerInDB(
|
||||||
requires=[constants.LISTENER, constants.UPDATE_DICT]))
|
requires=[constants.LISTENER, constants.UPDATE_DICT]))
|
||||||
update_listener_flow.add(database_tasks.
|
update_listener_flow.add(database_tasks.
|
||||||
|
@ -37,6 +37,8 @@ class OctaviaBase(models.ModelBase):
|
|||||||
return obj.__class__.__name__ + obj.pool_id
|
return obj.__class__.__name__ + obj.pool_id
|
||||||
if obj.__class__.__name__ in ['ListenerStatistics']:
|
if obj.__class__.__name__ in ['ListenerStatistics']:
|
||||||
return obj.__class__.__name__ + obj.listener_id + obj.amphora_id
|
return obj.__class__.__name__ + obj.listener_id + obj.amphora_id
|
||||||
|
if obj.__class__.__name__ in ['ListenerCidr']:
|
||||||
|
return obj.__class__.__name__ + obj.listener_id + obj.cidr
|
||||||
if obj.__class__.__name__ in ['VRRPGroup', 'Vip']:
|
if obj.__class__.__name__ in ['VRRPGroup', 'Vip']:
|
||||||
return obj.__class__.__name__ + obj.load_balancer_id
|
return obj.__class__.__name__ + obj.load_balancer_id
|
||||||
if obj.__class__.__name__ in ['AmphoraHealth']:
|
if obj.__class__.__name__ in ['AmphoraHealth']:
|
||||||
|
@ -0,0 +1,40 @@
|
|||||||
|
# Copyright 2018 Red Hat, Inc.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""Add CIDRs for listeners
|
||||||
|
|
||||||
|
Revision ID: da371b422669
|
||||||
|
Revises: a5762a99609a
|
||||||
|
Create Date: 2018-11-22 12:31:39.864238
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
from alembic import op
|
||||||
|
import sqlalchemy as sa
|
||||||
|
|
||||||
|
# revision identifiers, used by Alembic.
|
||||||
|
revision = 'da371b422669'
|
||||||
|
down_revision = 'a5762a99609a'
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade():
|
||||||
|
op.create_table(
|
||||||
|
u'listener_cidr',
|
||||||
|
sa.Column(u'listener_id', sa.String(36), nullable=False),
|
||||||
|
sa.Column(u'cidr', sa.String(64), nullable=False),
|
||||||
|
|
||||||
|
sa.ForeignKeyConstraint([u'listener_id'],
|
||||||
|
[u'listener.id'],
|
||||||
|
name=u'fk_listener_cidr_listener_id'),
|
||||||
|
sa.PrimaryKeyConstraint(u'listener_id', u'cidr')
|
||||||
|
)
|
@ -542,6 +542,10 @@ class Listener(base_models.BASE, base_models.IdMixin,
|
|||||||
_p_ids.append(p.id)
|
_p_ids.append(p.id)
|
||||||
return _pools
|
return _pools
|
||||||
|
|
||||||
|
allowed_cidrs = orm.relationship(
|
||||||
|
'ListenerCidr', cascade='all,delete-orphan',
|
||||||
|
uselist=True, backref=orm.backref('listener', uselist=False))
|
||||||
|
|
||||||
|
|
||||||
class SNI(base_models.BASE):
|
class SNI(base_models.BASE):
|
||||||
|
|
||||||
@ -790,3 +794,19 @@ class SparesPool(base_models.BASE):
|
|||||||
__tablename__ = "spares_pool"
|
__tablename__ = "spares_pool"
|
||||||
|
|
||||||
updated_at = sa.Column(sa.DateTime, primary_key=True, nullable=True)
|
updated_at = sa.Column(sa.DateTime, primary_key=True, nullable=True)
|
||||||
|
|
||||||
|
|
||||||
|
class ListenerCidr(base_models.BASE):
|
||||||
|
|
||||||
|
__data_model__ = data_models.ListenerCidr
|
||||||
|
|
||||||
|
__tablename__ = "listener_cidr"
|
||||||
|
__table_args__ = (
|
||||||
|
sa.PrimaryKeyConstraint('listener_id', 'cidr'),
|
||||||
|
)
|
||||||
|
|
||||||
|
listener_id = sa.Column(
|
||||||
|
sa.String(36),
|
||||||
|
sa.ForeignKey("listener.id", name="fk_listener_cidr_listener_id"),
|
||||||
|
nullable=False)
|
||||||
|
cidr = sa.Column(sa.String(64), nullable=False)
|
||||||
|
@ -214,6 +214,7 @@ class Repositories(object):
|
|||||||
self.pool = PoolRepository()
|
self.pool = PoolRepository()
|
||||||
self.member = MemberRepository()
|
self.member = MemberRepository()
|
||||||
self.listener = ListenerRepository()
|
self.listener = ListenerRepository()
|
||||||
|
self.listener_cidr = ListenerCidrRepository()
|
||||||
self.listener_stats = ListenerStatisticsRepository()
|
self.listener_stats = ListenerStatisticsRepository()
|
||||||
self.amphora = AmphoraRepository()
|
self.amphora = AmphoraRepository()
|
||||||
self.sni = SNIRepository()
|
self.sni = SNIRepository()
|
||||||
@ -889,6 +890,25 @@ class SessionPersistenceRepository(BaseRepository):
|
|||||||
pool_id=pool_id).first())
|
pool_id=pool_id).first())
|
||||||
|
|
||||||
|
|
||||||
|
class ListenerCidrRepository(BaseRepository):
|
||||||
|
model_class = models.ListenerCidr
|
||||||
|
|
||||||
|
def create(self, session, listener_id, allowed_cidrs):
|
||||||
|
if allowed_cidrs:
|
||||||
|
with session.begin(subtransactions=True):
|
||||||
|
for cidr in set(allowed_cidrs):
|
||||||
|
cidr_dict = {'listener_id': listener_id, 'cidr': cidr}
|
||||||
|
model = self.model_class(**cidr_dict)
|
||||||
|
session.add(model)
|
||||||
|
|
||||||
|
def update(self, session, listener_id, allowed_cidrs):
|
||||||
|
"""Updates allowed CIDRs in the database by listener_id."""
|
||||||
|
with session.begin(subtransactions=True):
|
||||||
|
session.query(self.model_class).filter_by(
|
||||||
|
listener_id=listener_id).delete()
|
||||||
|
self.create(session, listener_id, allowed_cidrs)
|
||||||
|
|
||||||
|
|
||||||
class PoolRepository(BaseRepository):
|
class PoolRepository(BaseRepository):
|
||||||
model_class = models.Pool
|
model_class = models.Pool
|
||||||
|
|
||||||
@ -993,6 +1013,7 @@ class ListenerRepository(BaseRepository):
|
|||||||
subqueryload(models.Listener.load_balancer),
|
subqueryload(models.Listener.load_balancer),
|
||||||
subqueryload(models.Listener.sni_containers),
|
subqueryload(models.Listener.sni_containers),
|
||||||
subqueryload(models.Listener._tags),
|
subqueryload(models.Listener._tags),
|
||||||
|
subqueryload(models.Listener.allowed_cidrs),
|
||||||
noload('*'))
|
noload('*'))
|
||||||
|
|
||||||
return super(ListenerRepository, self).get_all(
|
return super(ListenerRepository, self).get_all(
|
||||||
@ -1054,11 +1075,25 @@ class ListenerRepository(BaseRepository):
|
|||||||
sni = models.SNI(listener_id=id,
|
sni = models.SNI(listener_id=id,
|
||||||
tls_certificate_id=container_ref)
|
tls_certificate_id=container_ref)
|
||||||
listener_db.sni_containers.append(sni)
|
listener_db.sni_containers.append(sni)
|
||||||
|
if 'allowed_cidrs' in model_kwargs:
|
||||||
|
# allowed_cidrs is being updated. It is either being set or
|
||||||
|
# unset/cleared. We need to update in DB side.
|
||||||
|
allowed_cidrs = model_kwargs.pop('allowed_cidrs', []) or []
|
||||||
|
listener_db.allowed_cidrs = []
|
||||||
|
if allowed_cidrs:
|
||||||
|
listener_db.allowed_cidrs = [
|
||||||
|
models.ListenerCidr(listener_id=id, cidr=cidr)
|
||||||
|
for cidr in allowed_cidrs]
|
||||||
listener_db.update(model_kwargs)
|
listener_db.update(model_kwargs)
|
||||||
|
|
||||||
def create(self, session, **model_kwargs):
|
def create(self, session, **model_kwargs):
|
||||||
"""Creates a new Listener with some validation."""
|
"""Creates a new Listener with some validation."""
|
||||||
with session.begin(subtransactions=True):
|
with session.begin(subtransactions=True):
|
||||||
|
listener_id = model_kwargs.get('id')
|
||||||
|
allowed_cidrs = set(model_kwargs.pop('allowed_cidrs', []) or [])
|
||||||
|
model_kwargs['allowed_cidrs'] = [
|
||||||
|
models.ListenerCidr(listener_id=listener_id, cidr=cidr)
|
||||||
|
for cidr in allowed_cidrs]
|
||||||
model = self.model_class(**model_kwargs)
|
model = self.model_class(**model_kwargs)
|
||||||
if model.default_pool_id:
|
if model.default_pool_id:
|
||||||
model.default_pool = self._pool_check(
|
model.default_pool = self._pool_check(
|
||||||
|
@ -133,42 +133,53 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
def _update_security_group_rules(self, load_balancer, sec_grp_id):
|
def _update_security_group_rules(self, load_balancer, sec_grp_id):
|
||||||
rules = self.neutron_client.list_security_group_rules(
|
rules = self.neutron_client.list_security_group_rules(
|
||||||
security_group_id=sec_grp_id)
|
security_group_id=sec_grp_id)
|
||||||
updated_ports = [
|
|
||||||
(listener.protocol_port,
|
updated_ports = []
|
||||||
constants.PROTOCOL_TCP.lower()
|
for l in load_balancer.listeners:
|
||||||
if listener.protocol != constants.PROTOCOL_UDP else
|
if (l.provisioning_status in [constants.PENDING_DELETE,
|
||||||
constants.PROTOCOL_UDP.lower())
|
constants.DELETED]):
|
||||||
for listener in load_balancer.listeners
|
continue
|
||||||
if listener.provisioning_status != constants.PENDING_DELETE and
|
|
||||||
listener.provisioning_status != constants.DELETED]
|
protocol = constants.PROTOCOL_TCP.lower()
|
||||||
|
if l.protocol == constants.PROTOCOL_UDP:
|
||||||
|
protocol = constants.PROTOCOL_UDP.lower()
|
||||||
|
|
||||||
|
if l.allowed_cidrs:
|
||||||
|
for ac in l.allowed_cidrs:
|
||||||
|
port = (l.protocol_port, protocol, ac.cidr)
|
||||||
|
updated_ports.append(port)
|
||||||
|
else:
|
||||||
|
port = (l.protocol_port, protocol, None)
|
||||||
|
updated_ports.append(port)
|
||||||
|
|
||||||
# As the peer port will hold the tcp connection for keepalived and
|
# As the peer port will hold the tcp connection for keepalived and
|
||||||
# haproxy session synchronization, so here the security group rule
|
# haproxy session synchronization, so here the security group rule
|
||||||
# should be just related with tcp protocol only.
|
# should be just related with tcp protocol only.
|
||||||
peer_ports = [
|
updated_ports.append(
|
||||||
(listener.peer_port,
|
(l.peer_port, constants.PROTOCOL_TCP.lower(), None))
|
||||||
constants.PROTOCOL_TCP.lower())
|
|
||||||
for listener in load_balancer.listeners
|
|
||||||
if listener.provisioning_status != constants.PENDING_DELETE and
|
|
||||||
listener.provisioning_status != constants.DELETED]
|
|
||||||
updated_ports.extend(peer_ports)
|
|
||||||
# Just going to use port_range_max for now because we can assume that
|
# Just going to use port_range_max for now because we can assume that
|
||||||
# port_range_max and min will be the same since this driver is
|
# port_range_max and min will be the same since this driver is
|
||||||
# responsible for creating these rules
|
# responsible for creating these rules
|
||||||
old_ports = [(rule.get('port_range_max'), rule.get('protocol'))
|
old_ports = []
|
||||||
for rule in rules.get('security_group_rules', [])
|
for rule in rules.get('security_group_rules', []):
|
||||||
# Don't remove egress rules and don't
|
# Don't remove egress rules and don't confuse other protocols with
|
||||||
# confuse other protocols with None ports
|
# None ports with the egress rules. VRRP uses protocol 51 and 112
|
||||||
# with the egress rules. VRRP uses protocol
|
if (rule.get('direction') == 'egress' or
|
||||||
# 51 and 112
|
rule.get('protocol').upper() not in
|
||||||
if rule.get('direction') != 'egress' and
|
[constants.PROTOCOL_TCP, constants.PROTOCOL_UDP]):
|
||||||
rule.get('protocol', '').lower() in ['tcp', 'udp']]
|
continue
|
||||||
|
old_ports.append((rule.get('port_range_max'),
|
||||||
|
rule.get('protocol').lower(),
|
||||||
|
rule.get('remote_ip_prefix')))
|
||||||
|
|
||||||
add_ports = set(updated_ports) - set(old_ports)
|
add_ports = set(updated_ports) - set(old_ports)
|
||||||
del_ports = set(old_ports) - set(updated_ports)
|
del_ports = set(old_ports) - set(updated_ports)
|
||||||
for rule in rules.get('security_group_rules', []):
|
for rule in rules.get('security_group_rules', []):
|
||||||
if (rule.get('protocol', '') and
|
if (rule.get('protocol', '') and
|
||||||
rule.get('protocol', '').lower() in ['tcp', 'udp'] and
|
rule.get('protocol', '').lower() in ['tcp', 'udp'] and
|
||||||
(rule.get('port_range_max'),
|
(rule.get('port_range_max'), rule.get('protocol'),
|
||||||
rule.get('protocol')) in del_ports):
|
rule.get('remote_ip_prefix')) in del_ports):
|
||||||
rule_id = rule.get('id')
|
rule_id = rule.get('id')
|
||||||
try:
|
try:
|
||||||
self.neutron_client.delete_security_group_rule(rule_id)
|
self.neutron_client.delete_security_group_rule(rule_id)
|
||||||
@ -181,7 +192,8 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
|
|||||||
self._create_security_group_rule(sec_grp_id, port_protocol[1],
|
self._create_security_group_rule(sec_grp_id, port_protocol[1],
|
||||||
port_min=port_protocol[0],
|
port_min=port_protocol[0],
|
||||||
port_max=port_protocol[0],
|
port_max=port_protocol[0],
|
||||||
ethertype=ethertype)
|
ethertype=ethertype,
|
||||||
|
cidr=port_protocol[2])
|
||||||
|
|
||||||
# Currently we are using the VIP network for VRRP
|
# Currently we are using the VIP network for VRRP
|
||||||
# so we need to open up the protocols for it
|
# so we need to open up the protocols for it
|
||||||
|
@ -134,7 +134,8 @@ class BaseNeutronDriver(base.AbstractNetworkDriver):
|
|||||||
|
|
||||||
def _create_security_group_rule(self, sec_grp_id, protocol,
|
def _create_security_group_rule(self, sec_grp_id, protocol,
|
||||||
direction='ingress', port_min=None,
|
direction='ingress', port_min=None,
|
||||||
port_max=None, ethertype='IPv6'):
|
port_max=None, ethertype='IPv6',
|
||||||
|
cidr=None):
|
||||||
rule = {
|
rule = {
|
||||||
'security_group_rule': {
|
'security_group_rule': {
|
||||||
'security_group_id': sec_grp_id,
|
'security_group_id': sec_grp_id,
|
||||||
@ -143,8 +144,10 @@ class BaseNeutronDriver(base.AbstractNetworkDriver):
|
|||||||
'port_range_min': port_min,
|
'port_range_min': port_min,
|
||||||
'port_range_max': port_max,
|
'port_range_max': port_max,
|
||||||
'ethertype': ethertype,
|
'ethertype': ethertype,
|
||||||
|
'remote_ip_prefix': cidr,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.neutron_client.create_security_group_rule(rule)
|
self.neutron_client.create_security_group_rule(rule)
|
||||||
|
|
||||||
def apply_qos_on_port(self, qos_id, port_id):
|
def apply_qos_on_port(self, qos_id, port_id):
|
||||||
|
@ -462,7 +462,8 @@ class SampleDriverDataModels(object):
|
|||||||
constants.CLIENT_CA_TLS_CERTIFICATE_ID:
|
constants.CLIENT_CA_TLS_CERTIFICATE_ID:
|
||||||
self.client_ca_tls_certificate_ref,
|
self.client_ca_tls_certificate_ref,
|
||||||
lib_consts.CLIENT_AUTHENTICATION: constants.CLIENT_AUTH_NONE,
|
lib_consts.CLIENT_AUTHENTICATION: constants.CLIENT_AUTH_NONE,
|
||||||
constants.CLIENT_CRL_CONTAINER_ID: self.client_crl_container_ref
|
constants.CLIENT_CRL_CONTAINER_ID: self.client_crl_container_ref,
|
||||||
|
lib_consts.ALLOWED_CIDRS: ['192.0.2.0/24', '198.51.100.0/24']
|
||||||
}
|
}
|
||||||
|
|
||||||
self.test_listener1_dict.update(self._common_test_dict)
|
self.test_listener1_dict.update(self._common_test_dict)
|
||||||
@ -501,7 +502,7 @@ class SampleDriverDataModels(object):
|
|||||||
|
|
||||||
self.provider_listener1_dict = {
|
self.provider_listener1_dict = {
|
||||||
lib_consts.ADMIN_STATE_UP: True,
|
lib_consts.ADMIN_STATE_UP: True,
|
||||||
lib_consts.ALLOWED_CIDRS: None,
|
lib_consts.ALLOWED_CIDRS: ['192.0.2.0/24', '198.51.100.0/24'],
|
||||||
lib_consts.CONNECTION_LIMIT: 10000,
|
lib_consts.CONNECTION_LIMIT: 10000,
|
||||||
lib_consts.DEFAULT_POOL: self.provider_pool1_dict,
|
lib_consts.DEFAULT_POOL: self.provider_pool1_dict,
|
||||||
lib_consts.DEFAULT_POOL_ID: self.pool1_id,
|
lib_consts.DEFAULT_POOL_ID: self.pool1_id,
|
||||||
|
@ -228,7 +228,6 @@ class DriverAgentTest(base.OctaviaDBTestBase):
|
|||||||
# Add our live certs in that differ from the fake certs in sample_data
|
# Add our live certs in that differ from the fake certs in sample_data
|
||||||
self.provider_listener_dict = copy.deepcopy(
|
self.provider_listener_dict = copy.deepcopy(
|
||||||
self.sample_data.provider_listener1_dict)
|
self.sample_data.provider_listener1_dict)
|
||||||
self.provider_listener_dict['allowed_cidrs'] = None
|
|
||||||
self.provider_listener_dict[
|
self.provider_listener_dict[
|
||||||
lib_consts.DEFAULT_TLS_CONTAINER_REF] = self.cert_ref
|
lib_consts.DEFAULT_TLS_CONTAINER_REF] = self.cert_ref
|
||||||
self.provider_listener_dict[
|
self.provider_listener_dict[
|
||||||
|
@ -43,7 +43,7 @@ class TestRootController(base_db_test.OctaviaDBTestBase):
|
|||||||
def test_api_versions(self):
|
def test_api_versions(self):
|
||||||
versions = self._get_versions_with_config()
|
versions = self._get_versions_with_config()
|
||||||
version_ids = tuple(v.get('id') for v in versions)
|
version_ids = tuple(v.get('id') for v in versions)
|
||||||
self.assertEqual(12, len(version_ids))
|
self.assertEqual(13, len(version_ids))
|
||||||
self.assertIn('v2.0', version_ids)
|
self.assertIn('v2.0', version_ids)
|
||||||
self.assertIn('v2.1', version_ids)
|
self.assertIn('v2.1', version_ids)
|
||||||
self.assertIn('v2.2', version_ids)
|
self.assertIn('v2.2', version_ids)
|
||||||
@ -56,6 +56,7 @@ class TestRootController(base_db_test.OctaviaDBTestBase):
|
|||||||
self.assertIn('v2.9', version_ids)
|
self.assertIn('v2.9', version_ids)
|
||||||
self.assertIn('v2.10', version_ids)
|
self.assertIn('v2.10', version_ids)
|
||||||
self.assertIn('v2.11', version_ids)
|
self.assertIn('v2.11', version_ids)
|
||||||
|
self.assertIn('v2.12', version_ids)
|
||||||
|
|
||||||
# Each version should have a 'self' 'href' to the API version URL
|
# Each version should have a 'self' 'href' to the API version URL
|
||||||
# [{u'rel': u'self', u'href': u'http://localhost/v2'}]
|
# [{u'rel': u'self', u'href': u'http://localhost/v2'}]
|
||||||
|
@ -1136,6 +1136,101 @@ class TestListener(base.BaseAPITest):
|
|||||||
"It must be a valid x509 PEM format certificate.",
|
"It must be a valid x509 PEM format certificate.",
|
||||||
response['faultstring'])
|
response['faultstring'])
|
||||||
|
|
||||||
|
def _test_create_with_allowed_cidrs(self, allowed_cidrs):
|
||||||
|
listener = self.create_listener(constants.PROTOCOL_TCP,
|
||||||
|
80, self.lb_id,
|
||||||
|
allowed_cidrs=allowed_cidrs)
|
||||||
|
listener_path = self.LISTENER_PATH.format(
|
||||||
|
listener_id=listener['listener']['id'])
|
||||||
|
get_listener = self.get(listener_path).json['listener']
|
||||||
|
self.assertEqual(allowed_cidrs, get_listener.get('allowed_cidrs'))
|
||||||
|
|
||||||
|
def test_create_with_allowed_cidrs_ipv4(self):
|
||||||
|
allowed_cidrs = ['10.0.1.0/24', '172.16.55.0/25']
|
||||||
|
self._test_create_with_allowed_cidrs(allowed_cidrs)
|
||||||
|
|
||||||
|
def test_create_with_allowed_cidrs_ipv6(self):
|
||||||
|
allowed_cidrs = ['2001:db8:a0b:12f0::/64', '2a02:8071:69e::/64']
|
||||||
|
with mock.patch('octavia.db.repositories.VipRepository.'
|
||||||
|
'get') as repo_mock:
|
||||||
|
repo_mock.return_value.ip_address = "2001:db9:a1b:13f0::1"
|
||||||
|
self._test_create_with_allowed_cidrs(allowed_cidrs)
|
||||||
|
|
||||||
|
def test_create_with_bad_allowed_cidrs(self):
|
||||||
|
allowed_cidrs = [u'10.0.1.0/33', u'172.16.55.1.0/25']
|
||||||
|
lb_listener = {
|
||||||
|
'protocol': constants.PROTOCOL_TCP,
|
||||||
|
'protocol_port': 80,
|
||||||
|
'project_id': self.project_id,
|
||||||
|
'loadbalancer_id': self.lb_id,
|
||||||
|
'allowed_cidrs': allowed_cidrs}
|
||||||
|
body = self._build_body(lb_listener)
|
||||||
|
response = self.post(self.LISTENERS_PATH, body, status=400).json
|
||||||
|
self.assertIn("Invalid input for field/attribute allowed_cidrs. "
|
||||||
|
"Value: '%s'. Value should be IPv4 or IPv6 CIDR format"
|
||||||
|
% allowed_cidrs, response['faultstring'])
|
||||||
|
|
||||||
|
def test_create_with_incompatible_allowed_cidrs_ipv6(self):
|
||||||
|
lb_listener = {
|
||||||
|
'protocol': constants.PROTOCOL_TCP,
|
||||||
|
'protocol_port': 80,
|
||||||
|
'project_id': self.project_id,
|
||||||
|
'loadbalancer_id': self.lb_id,
|
||||||
|
'allowed_cidrs': ['2001:db8:a0b:12f0::/64']}
|
||||||
|
body = self._build_body(lb_listener)
|
||||||
|
response = self.post(self.LISTENERS_PATH, body, status=400).json
|
||||||
|
self.assertIn("Validation failure: CIDR 2001:db8:a0b:12f0::/64 IP "
|
||||||
|
"version incompatible with VIP 198.0.2.5 IP version.",
|
||||||
|
response['faultstring'])
|
||||||
|
|
||||||
|
def test_create_with_incompatible_allowed_cidrs_ipv4(self):
|
||||||
|
lb_listener = {
|
||||||
|
'protocol': constants.PROTOCOL_TCP,
|
||||||
|
'protocol_port': 80,
|
||||||
|
'project_id': self.project_id,
|
||||||
|
'loadbalancer_id': self.lb_id,
|
||||||
|
'allowed_cidrs': ['10.0.1.0/24']}
|
||||||
|
with mock.patch('octavia.db.repositories.VipRepository.'
|
||||||
|
'get') as repo_mock:
|
||||||
|
repo_mock.return_value.ip_address = "2001:db9:a1b:13f0::1"
|
||||||
|
body = self._build_body(lb_listener)
|
||||||
|
response = self.post(self.LISTENERS_PATH, body, status=400).json
|
||||||
|
self.assertIn("Validation failure: CIDR 10.0.1.0/24 IP version "
|
||||||
|
"incompatible with VIP 2001:db9:a1b:13f0::1 IP "
|
||||||
|
"version.", response['faultstring'])
|
||||||
|
|
||||||
|
def test_create_with_duplicated_allowed_cidrs(self):
|
||||||
|
allowed_cidrs = ['10.0.1.0/24', '10.0.2.0/24', '10.0.2.0/24']
|
||||||
|
self.create_listener(constants.PROTOCOL_TCP, 80,
|
||||||
|
self.lb_id, allowed_cidrs=allowed_cidrs)
|
||||||
|
|
||||||
|
def test_update_allowed_cidrs(self):
|
||||||
|
allowed_cidrs = ['10.0.1.0/24', '10.0.2.0/24']
|
||||||
|
new_cidrs = ['10.0.1.0/24', '10.0.3.0/24']
|
||||||
|
listener = self.create_listener(constants.PROTOCOL_TCP,
|
||||||
|
80, self.lb_id,
|
||||||
|
allowed_cidrs=allowed_cidrs)
|
||||||
|
self.set_lb_status(self.lb_id)
|
||||||
|
listener_path = self.LISTENER_PATH.format(
|
||||||
|
listener_id=listener['listener']['id'])
|
||||||
|
lb_listener = {'allowed_cidrs': new_cidrs}
|
||||||
|
body = self._build_body(lb_listener)
|
||||||
|
response = self.put(listener_path, body).json.get(self.root_tag)
|
||||||
|
self.assertEqual(new_cidrs, response.get('allowed_cidrs'))
|
||||||
|
|
||||||
|
def test_update_unset_allowed_cidrs(self):
|
||||||
|
allowed_cidrs = ['10.0.1.0/24', '10.0.2.0/24']
|
||||||
|
listener = self.create_listener(constants.PROTOCOL_TCP,
|
||||||
|
80, self.lb_id,
|
||||||
|
allowed_cidrs=allowed_cidrs)
|
||||||
|
self.set_lb_status(self.lb_id)
|
||||||
|
listener_path = self.LISTENER_PATH.format(
|
||||||
|
listener_id=listener['listener']['id'])
|
||||||
|
lb_listener = {'allowed_cidrs': None}
|
||||||
|
body = self._build_body(lb_listener)
|
||||||
|
api_listener = self.put(listener_path, body).json.get(self.root_tag)
|
||||||
|
self.assertIsNone(api_listener.get('allowed_cidrs'))
|
||||||
|
|
||||||
@mock.patch('octavia.api.drivers.utils.call_provider')
|
@mock.patch('octavia.api.drivers.utils.call_provider')
|
||||||
def test_update_with_bad_provider(self, mock_provider):
|
def test_update_with_bad_provider(self, mock_provider):
|
||||||
api_listener = self.create_listener(
|
api_listener = self.create_listener(
|
||||||
|
@ -2374,7 +2374,9 @@ class TestLoadBalancerGraph(base.BaseAPITest):
|
|||||||
create_client_authentication=None,
|
create_client_authentication=None,
|
||||||
expected_client_authentication=constants.CLIENT_AUTH_NONE,
|
expected_client_authentication=constants.CLIENT_AUTH_NONE,
|
||||||
create_client_crl_container=None,
|
create_client_crl_container=None,
|
||||||
expected_client_crl_container=None):
|
expected_client_crl_container=None,
|
||||||
|
create_allowed_cidrs=None,
|
||||||
|
expected_allowed_cidrs=None):
|
||||||
create_listener = {
|
create_listener = {
|
||||||
'name': name,
|
'name': name,
|
||||||
'protocol_port': protocol_port,
|
'protocol_port': protocol_port,
|
||||||
@ -2397,7 +2399,8 @@ class TestLoadBalancerGraph(base.BaseAPITest):
|
|||||||
'tags': [],
|
'tags': [],
|
||||||
'client_ca_tls_container_ref': None,
|
'client_ca_tls_container_ref': None,
|
||||||
'client_authentication': constants.CLIENT_AUTH_NONE,
|
'client_authentication': constants.CLIENT_AUTH_NONE,
|
||||||
'client_crl_container_ref': None
|
'client_crl_container_ref': None,
|
||||||
|
'allowed_cidrs': None
|
||||||
}
|
}
|
||||||
if create_sni_containers:
|
if create_sni_containers:
|
||||||
create_listener['sni_container_refs'] = create_sni_containers
|
create_listener['sni_container_refs'] = create_sni_containers
|
||||||
@ -2422,6 +2425,8 @@ class TestLoadBalancerGraph(base.BaseAPITest):
|
|||||||
if create_client_crl_container:
|
if create_client_crl_container:
|
||||||
create_listener['client_crl_container_ref'] = (
|
create_listener['client_crl_container_ref'] = (
|
||||||
create_client_crl_container)
|
create_client_crl_container)
|
||||||
|
if create_allowed_cidrs:
|
||||||
|
create_listener['allowed_cidrs'] = create_allowed_cidrs
|
||||||
if expected_sni_containers:
|
if expected_sni_containers:
|
||||||
expected_listener['sni_container_refs'] = expected_sni_containers
|
expected_listener['sni_container_refs'] = expected_sni_containers
|
||||||
if expected_l7policies:
|
if expected_l7policies:
|
||||||
@ -2439,6 +2444,9 @@ class TestLoadBalancerGraph(base.BaseAPITest):
|
|||||||
if expected_client_crl_container:
|
if expected_client_crl_container:
|
||||||
expected_listener['client_crl_container_ref'] = (
|
expected_listener['client_crl_container_ref'] = (
|
||||||
expected_client_crl_container)
|
expected_client_crl_container)
|
||||||
|
if expected_allowed_cidrs:
|
||||||
|
expected_listener['allowed_cidrs'] = expected_allowed_cidrs
|
||||||
|
|
||||||
return create_listener, expected_listener
|
return create_listener, expected_listener
|
||||||
|
|
||||||
def _get_pool_bodies(self, name='pool1', create_members=None,
|
def _get_pool_bodies(self, name='pool1', create_members=None,
|
||||||
@ -2666,6 +2674,18 @@ class TestLoadBalancerGraph(base.BaseAPITest):
|
|||||||
api_lb = response.json.get(self.root_tag)
|
api_lb = response.json.get(self.root_tag)
|
||||||
self._assert_graphs_equal(expected_lb, api_lb)
|
self._assert_graphs_equal(expected_lb, api_lb)
|
||||||
|
|
||||||
|
def test_with_one_listener_allowed_cidrs(self):
|
||||||
|
allowed_cidrs = ['10.0.1.0/24', '172.16.0.0/16']
|
||||||
|
create_listener, expected_listener = self._get_listener_bodies(
|
||||||
|
create_allowed_cidrs=allowed_cidrs,
|
||||||
|
expected_allowed_cidrs=allowed_cidrs)
|
||||||
|
create_lb, expected_lb = self._get_lb_bodies([create_listener],
|
||||||
|
[expected_listener])
|
||||||
|
body = self._build_body(create_lb)
|
||||||
|
response = self.post(self.LBS_PATH, body)
|
||||||
|
api_lb = response.json.get(self.root_tag)
|
||||||
|
self._assert_graphs_equal(expected_lb, api_lb)
|
||||||
|
|
||||||
# TODO(johnsom) Fix this when there is a noop certificate manager
|
# TODO(johnsom) Fix this when there is a noop certificate manager
|
||||||
@mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data')
|
@mock.patch('octavia.common.tls_utils.cert_parser.load_certificates_data')
|
||||||
def test_with_one_listener_sni_containers(self, mock_cert_data):
|
def test_with_one_listener_sni_containers(self, mock_cert_data):
|
||||||
|
@ -193,6 +193,10 @@ class ModelTestMixin(object):
|
|||||||
kwargs.update(overrides)
|
kwargs.update(overrides)
|
||||||
return self._insert(session, models.L7Rule, kwargs)
|
return self._insert(session, models.L7Rule, kwargs)
|
||||||
|
|
||||||
|
def create_listener_cidr(self, session, listener_id, cidr):
|
||||||
|
kwargs = {'listener_id': listener_id, 'cidr': cidr}
|
||||||
|
return self._insert(session, models.ListenerCidr, kwargs)
|
||||||
|
|
||||||
|
|
||||||
class PoolModelTest(base.OctaviaDBTestBase, ModelTestMixin):
|
class PoolModelTest(base.OctaviaDBTestBase, ModelTestMixin):
|
||||||
|
|
||||||
@ -893,6 +897,8 @@ class TestDataModelConversionTest(base.OctaviaDBTestBase, ModelTestMixin):
|
|||||||
redirect_pool_id=self.pool.id)
|
redirect_pool_id=self.pool.id)
|
||||||
self.l7rule = self.create_l7rule(self.session,
|
self.l7rule = self.create_l7rule(self.session,
|
||||||
l7policy_id=self.l7policy.id)
|
l7policy_id=self.l7policy.id)
|
||||||
|
self.listener_cidr = self.create_listener_cidr(
|
||||||
|
self.session, listener_id=self.listener.id, cidr='10.0.1.0/24')
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def _get_unique_key(obj):
|
def _get_unique_key(obj):
|
||||||
@ -907,6 +913,8 @@ class TestDataModelConversionTest(base.OctaviaDBTestBase, ModelTestMixin):
|
|||||||
return obj.__class__.__name__ + obj.pool_id
|
return obj.__class__.__name__ + obj.pool_id
|
||||||
elif obj.__class__.__name__ in ['ListenerStatistics']:
|
elif obj.__class__.__name__ in ['ListenerStatistics']:
|
||||||
return obj.__class__.__name__ + obj.listener_id + obj.amphora_id
|
return obj.__class__.__name__ + obj.listener_id + obj.amphora_id
|
||||||
|
elif obj.__class__.__name__ in ['ListenerCidr']:
|
||||||
|
return obj.__class__.__name__ + obj.listener_id + obj.cidr
|
||||||
elif obj.__class__.__name__ in ['VRRPGroup', 'Vip']:
|
elif obj.__class__.__name__ in ['VRRPGroup', 'Vip']:
|
||||||
return obj.__class__.__name__ + obj.load_balancer_id
|
return obj.__class__.__name__ + obj.load_balancer_id
|
||||||
elif obj.__class__.__name__ in ['AmphoraHealth']:
|
elif obj.__class__.__name__ in ['AmphoraHealth']:
|
||||||
@ -972,6 +980,8 @@ class TestDataModelConversionTest(base.OctaviaDBTestBase, ModelTestMixin):
|
|||||||
self.l7policy.to_data_model()._get_unique_key())
|
self.l7policy.to_data_model()._get_unique_key())
|
||||||
self.assertEqual(self._get_unique_key(self.l7rule),
|
self.assertEqual(self._get_unique_key(self.l7rule),
|
||||||
self.l7rule.to_data_model()._get_unique_key())
|
self.l7rule.to_data_model()._get_unique_key())
|
||||||
|
self.assertEqual(self._get_unique_key(self.listener_cidr),
|
||||||
|
self.listener_cidr.to_data_model()._get_unique_key())
|
||||||
|
|
||||||
def test_graph_completeness(self):
|
def test_graph_completeness(self):
|
||||||
# Generate equivalent graphs starting arbitrarily from different
|
# Generate equivalent graphs starting arbitrarily from different
|
||||||
|
@ -119,7 +119,8 @@ class AllRepositoriesTest(base.OctaviaDBTestBase):
|
|||||||
'listener_stats', 'amphora', 'sni',
|
'listener_stats', 'amphora', 'sni',
|
||||||
'amphorahealth', 'vrrpgroup', 'l7rule', 'l7policy',
|
'amphorahealth', 'vrrpgroup', 'l7rule', 'l7policy',
|
||||||
'amp_build_slots', 'amp_build_req', 'quotas',
|
'amp_build_slots', 'amp_build_req', 'quotas',
|
||||||
'flavor', 'flavor_profile', 'spares_pool')
|
'flavor', 'flavor_profile', 'spares_pool',
|
||||||
|
'listener_cidr')
|
||||||
for repo_attr in repo_attr_names:
|
for repo_attr in repo_attr_names:
|
||||||
single_repo = getattr(self.repos, repo_attr, None)
|
single_repo = getattr(self.repos, repo_attr, None)
|
||||||
message = ("Class Repositories should have %s instance"
|
message = ("Class Repositories should have %s instance"
|
||||||
|
@ -141,13 +141,6 @@ class TestUtils(base.TestCase):
|
|||||||
'flavor_id': 'flavor_id',
|
'flavor_id': 'flavor_id',
|
||||||
'provider': 'noop_driver'}
|
'provider': 'noop_driver'}
|
||||||
ref_listeners = copy.deepcopy(self.sample_data.provider_listeners)
|
ref_listeners = copy.deepcopy(self.sample_data.provider_listeners)
|
||||||
# TODO(johnsom) Remove this once the listener ACLs patch merges
|
|
||||||
# https://review.opendev.org/#/c/659626/
|
|
||||||
for listener in ref_listeners:
|
|
||||||
try:
|
|
||||||
del listener.allowed_cidrs
|
|
||||||
except AttributeError:
|
|
||||||
pass
|
|
||||||
ref_prov_lb_dict = {
|
ref_prov_lb_dict = {
|
||||||
'vip_address': self.sample_data.ip_address,
|
'vip_address': self.sample_data.ip_address,
|
||||||
'admin_state_up': True,
|
'admin_state_up': True,
|
||||||
@ -220,13 +213,6 @@ class TestUtils(base.TestCase):
|
|||||||
provider_listeners = utils.db_listeners_to_provider_listeners(
|
provider_listeners = utils.db_listeners_to_provider_listeners(
|
||||||
self.sample_data.test_db_listeners)
|
self.sample_data.test_db_listeners)
|
||||||
ref_listeners = copy.deepcopy(self.sample_data.provider_listeners)
|
ref_listeners = copy.deepcopy(self.sample_data.provider_listeners)
|
||||||
# TODO(johnsom) Remove this once the listener ACLs patch merges
|
|
||||||
# https://review.opendev.org/#/c/659626/
|
|
||||||
for listener in ref_listeners:
|
|
||||||
try:
|
|
||||||
del listener.allowed_cidrs
|
|
||||||
except AttributeError:
|
|
||||||
pass
|
|
||||||
self.assertEqual(ref_listeners, provider_listeners)
|
self.assertEqual(ref_listeners, provider_listeners)
|
||||||
|
|
||||||
@mock.patch('octavia.api.drivers.utils._get_secret_data')
|
@mock.patch('octavia.api.drivers.utils._get_secret_data')
|
||||||
@ -253,9 +239,6 @@ class TestUtils(base.TestCase):
|
|||||||
expect_prov['default_pool'] = expect_pool_prov
|
expect_prov['default_pool'] = expect_pool_prov
|
||||||
provider_listener = utils.listener_dict_to_provider_dict(
|
provider_listener = utils.listener_dict_to_provider_dict(
|
||||||
self.sample_data.test_listener1_dict)
|
self.sample_data.test_listener1_dict)
|
||||||
# TODO(johnsom) Remove this once the listener ACLs patch merges
|
|
||||||
# https://review.opendev.org/#/c/659626/
|
|
||||||
del expect_prov['allowed_cidrs']
|
|
||||||
self.assertEqual(expect_prov, provider_listener)
|
self.assertEqual(expect_prov, provider_listener)
|
||||||
|
|
||||||
@mock.patch('octavia.api.drivers.utils._get_secret_data')
|
@mock.patch('octavia.api.drivers.utils._get_secret_data')
|
||||||
|
@ -659,10 +659,15 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
compute_id=t_constants.MOCK_COMPUTE_ID, port_id=port2.get('id'))
|
compute_id=t_constants.MOCK_COMPUTE_ID, port_id=port2.get('id'))
|
||||||
|
|
||||||
def test_update_vip(self):
|
def test_update_vip(self):
|
||||||
|
lc_1 = data_models.ListenerCidr('l1', '10.0.101.0/24')
|
||||||
|
lc_2 = data_models.ListenerCidr('l2', '10.0.102.0/24')
|
||||||
|
lc_3 = data_models.ListenerCidr('l2', '10.0.103.0/24')
|
||||||
listeners = [data_models.Listener(protocol_port=80, peer_port=1024,
|
listeners = [data_models.Listener(protocol_port=80, peer_port=1024,
|
||||||
protocol=constants.PROTOCOL_TCP),
|
protocol=constants.PROTOCOL_TCP,
|
||||||
|
allowed_cidrs=[lc_1]),
|
||||||
data_models.Listener(protocol_port=443, peer_port=1025,
|
data_models.Listener(protocol_port=443, peer_port=1025,
|
||||||
protocol=constants.PROTOCOL_TCP),
|
protocol=constants.PROTOCOL_TCP,
|
||||||
|
allowed_cidrs=[lc_2, lc_3]),
|
||||||
data_models.Listener(protocol_port=50, peer_port=1026,
|
data_models.Listener(protocol_port=50, peer_port=1026,
|
||||||
protocol=constants.PROTOCOL_UDP)]
|
protocol=constants.PROTOCOL_UDP)]
|
||||||
vip = data_models.Vip(ip_address='10.0.0.2')
|
vip = data_models.Vip(ip_address='10.0.0.2')
|
||||||
@ -671,7 +676,8 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
list_sec_grps.return_value = {'security_groups': [{'id': 'secgrp-1'}]}
|
list_sec_grps.return_value = {'security_groups': [{'id': 'secgrp-1'}]}
|
||||||
fake_rules = {
|
fake_rules = {
|
||||||
'security_group_rules': [
|
'security_group_rules': [
|
||||||
{'id': 'rule-80', 'port_range_max': 80, 'protocol': 'tcp'},
|
{'id': 'rule-80', 'port_range_max': 80, 'protocol': 'tcp',
|
||||||
|
'remote_ip_prefix': '10.0.101.0/24'},
|
||||||
{'id': 'rule-22', 'port_range_max': 22, 'protocol': 'tcp'}
|
{'id': 'rule-22', 'port_range_max': 22, 'protocol': 'tcp'}
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
@ -688,7 +694,8 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'protocol': 'tcp',
|
'protocol': 'tcp',
|
||||||
'port_range_min': 1024,
|
'port_range_min': 1024,
|
||||||
'port_range_max': 1024,
|
'port_range_max': 1024,
|
||||||
'ethertype': 'IPv4'
|
'ethertype': 'IPv4',
|
||||||
|
'remote_ip_prefix': None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
expected_create_rule_udp_peer = {
|
expected_create_rule_udp_peer = {
|
||||||
@ -698,7 +705,8 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'protocol': 'tcp',
|
'protocol': 'tcp',
|
||||||
'port_range_min': 1026,
|
'port_range_min': 1026,
|
||||||
'port_range_max': 1026,
|
'port_range_max': 1026,
|
||||||
'ethertype': 'IPv4'
|
'ethertype': 'IPv4',
|
||||||
|
'remote_ip_prefix': None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
expected_create_rule_2 = {
|
expected_create_rule_2 = {
|
||||||
@ -708,7 +716,8 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'protocol': 'tcp',
|
'protocol': 'tcp',
|
||||||
'port_range_min': 1025,
|
'port_range_min': 1025,
|
||||||
'port_range_max': 1025,
|
'port_range_max': 1025,
|
||||||
'ethertype': 'IPv4'
|
'ethertype': 'IPv4',
|
||||||
|
'remote_ip_prefix': None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
expected_create_rule_3 = {
|
expected_create_rule_3 = {
|
||||||
@ -718,7 +727,19 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'protocol': 'tcp',
|
'protocol': 'tcp',
|
||||||
'port_range_min': 443,
|
'port_range_min': 443,
|
||||||
'port_range_max': 443,
|
'port_range_max': 443,
|
||||||
'ethertype': 'IPv4'
|
'ethertype': 'IPv4',
|
||||||
|
'remote_ip_prefix': '10.0.102.0/24'
|
||||||
|
}
|
||||||
|
}
|
||||||
|
expected_create_rule_4 = {
|
||||||
|
'security_group_rule': {
|
||||||
|
'security_group_id': 'secgrp-1',
|
||||||
|
'direction': 'ingress',
|
||||||
|
'protocol': 'tcp',
|
||||||
|
'port_range_min': 443,
|
||||||
|
'port_range_max': 443,
|
||||||
|
'ethertype': 'IPv4',
|
||||||
|
'remote_ip_prefix': '10.0.103.0/24'
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
expected_create_rule_udp = {
|
expected_create_rule_udp = {
|
||||||
@ -728,7 +749,8 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
'protocol': 'udp',
|
'protocol': 'udp',
|
||||||
'port_range_min': 50,
|
'port_range_min': 50,
|
||||||
'port_range_max': 50,
|
'port_range_max': 50,
|
||||||
'ethertype': 'IPv4'
|
'ethertype': 'IPv4',
|
||||||
|
'remote_ip_prefix': None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -736,6 +758,7 @@ class TestAllowedAddressPairsDriver(base.TestCase):
|
|||||||
mock.call(expected_create_rule_udp_peer),
|
mock.call(expected_create_rule_udp_peer),
|
||||||
mock.call(expected_create_rule_2),
|
mock.call(expected_create_rule_2),
|
||||||
mock.call(expected_create_rule_3),
|
mock.call(expected_create_rule_3),
|
||||||
|
mock.call(expected_create_rule_4),
|
||||||
mock.call(expected_create_rule_udp)],
|
mock.call(expected_create_rule_udp)],
|
||||||
any_order=True)
|
any_order=True)
|
||||||
|
|
||||||
|
@ -137,7 +137,8 @@ class TestBaseNeutronNetworkDriver(base.TestCase):
|
|||||||
protocol=2,
|
protocol=2,
|
||||||
port_min=3,
|
port_min=3,
|
||||||
port_max=4,
|
port_max=4,
|
||||||
ethertype=5)
|
ethertype=5,
|
||||||
|
cidr="10.0.0.0/24")
|
||||||
expected_sec_grp_rule_dict = {
|
expected_sec_grp_rule_dict = {
|
||||||
'security_group_rule': {
|
'security_group_rule': {
|
||||||
'security_group_id': t_constants.MOCK_SECURITY_GROUP_ID,
|
'security_group_id': t_constants.MOCK_SECURITY_GROUP_ID,
|
||||||
@ -145,7 +146,8 @@ class TestBaseNeutronNetworkDriver(base.TestCase):
|
|||||||
'protocol': 2,
|
'protocol': 2,
|
||||||
'port_range_min': 3,
|
'port_range_min': 3,
|
||||||
'port_range_max': 4,
|
'port_range_max': 4,
|
||||||
'ethertype': 5}}
|
'ethertype': 5,
|
||||||
|
'remote_ip_prefix': '10.0.0.0/24'}}
|
||||||
self.driver.neutron_client.create_security_group_rule.assert_has_calls(
|
self.driver.neutron_client.create_security_group_rule.assert_has_calls(
|
||||||
[mock.call(expected_sec_grp_rule_dict)])
|
[mock.call(expected_sec_grp_rule_dict)])
|
||||||
|
|
||||||
|
5
releasenotes/notes/add-vip-acl-4a7e20d167fe4a49.yaml
Normal file
5
releasenotes/notes/add-vip-acl-4a7e20d167fe4a49.yaml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Added support to VIP access control list. Users can now limit incoming
|
||||||
|
traffic to a set of allowed CIDRs.
|
Loading…
Reference in New Issue
Block a user