Clean useless tables in Tricircle after splitting

1. What is the problem?
Tricircle now is dedicated for networking automation across Neutron. Some
tables used in APIs gateway should be removed, like aggregation table, pod
binding table, etc. They should not reside in the Tricircle any more. Other
tables containing old meanings but are still in use should be renamed for
better understanding. We can see the blueprint[1] for further explanation.

2. What is the solution to the problem?
The data models, tables and APIs about aggregation, pod binding, etc. should
be removed. After the pod binding table is removed, the az_hint used for
external network creation is hard to match. So special handle needs to be
implemented. Other tables will have vague meaning after this splitting, but
they still take effective in the Tricircle, So they should be renamed for
better understanding. What's more, the pod_name in the pod table is renamed
to region_name, which coordinates better with its availability zone.

  1)Tables to be removed:
    *aggregates
    *aggregate_metadata
    *instance_types
    *instance_type_projects
    *instance_type_extra_specs
    *key_pairs
    *pod_binding
  2)Tables need to be renamed:
    *cascaded_pod_service_configuration (new name: cached_endpoints)
    *cascaded_pods (new name: pods)
    *cascaded_pods_resource_routing (new name: resource_routings)
    *job (new name: async_jobs)

3. What the features need to be implemented to the Tricircle to realize
the solution?
After the pod binding table is removed, the az_hint used for external
network creation is hard to match. New features will be implemented to solve
this problem.

[1] https://blueprints.launchpad.net/tricircle/+spec/clean-legacy-tables
Change-Id: I025b4fb48c70abf424bd458fac0dc888e5fa19fd
This commit is contained in:
Dongfeng Huang 2016-12-19 09:28:35 +08:00
parent a88640d044
commit d65601a4ff
42 changed files with 508 additions and 1906 deletions

View File

@ -75,7 +75,7 @@ admin_username=admin
admin_password=$ADMIN_PASSWORD
admin_tenant=demo
auto_refresh_endpoint=True
top_pod_name=$CENTRAL_REGION_NAME
top_region_name=$CENTRAL_REGION_NAME
[tricircle]
real_core_plugin=neutron.plugins.ml2.plugin.Ml2Plugin

View File

@ -76,7 +76,7 @@ admin_username=admin
admin_password=$ADMIN_PASSWORD
admin_tenant=demo
auto_refresh_endpoint=True
top_pod_name=$CENTRAL_REGION_NAME
top_region_name=$CENTRAL_REGION_NAME
[tricircle]
real_core_plugin=neutron.plugins.ml2.plugin.Ml2Plugin

View File

@ -65,7 +65,7 @@ admin_username=admin
admin_password=$ADMIN_PASSWORD
admin_tenant=demo
auto_refresh_endpoint=True
top_pod_name=$CENTRAL_REGION_NAME
top_region_name=$CENTRAL_REGION_NAME
[tricircle]
real_core_plugin=neutron.plugins.ml2.plugin.Ml2Plugin

View File

@ -49,7 +49,7 @@ function init_common_tricircle_conf {
iniset $conf_file client admin_password $ADMIN_PASSWORD
iniset $conf_file client admin_tenant demo
iniset $conf_file client auto_refresh_endpoint True
iniset $conf_file client top_pod_name $CENTRAL_REGION_NAME
iniset $conf_file client top_region_name $CENTRAL_REGION_NAME
iniset $conf_file oslo_concurrency lock_path $TRICIRCLE_STATE_PATH/lock
}
@ -115,7 +115,7 @@ function start_central_neutron_server {
iniset $NEUTRON_CONF.$server_index client admin_password $ADMIN_PASSWORD
iniset $NEUTRON_CONF.$server_index client admin_tenant demo
iniset $NEUTRON_CONF.$server_index client auto_refresh_endpoint True
iniset $NEUTRON_CONF.$server_index client top_pod_name $CENTRAL_REGION_NAME
iniset $NEUTRON_CONF.$server_index client top_region_name $CENTRAL_REGION_NAME
if [ "$Q_ML2_PLUGIN_VLAN_TYPE_OPTIONS" != "" ]; then
iniset $NEUTRON_CONF.$server_index tricircle type_drivers local,shared_vlan

View File

@ -48,13 +48,13 @@ token=$(openstack token issue | awk 'NR==5 {print $4}')
echo $token
curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
-H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "RegionOne"}}'
-H "X-Auth-Token: $token" -d '{"pod": {"region_name": "RegionOne"}}'
curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
-H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "Pod1", "az_name": "az1"}}'
-H "X-Auth-Token: $token" -d '{"pod": {"region_name": "Pod1", "az_name": "az1"}}'
curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
-H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "Pod2", "az_name": "az2"}}'
-H "X-Auth-Token: $token" -d '{"pod": {"region_name": "Pod2", "az_name": "az2"}}'
echo "******************************"
echo "* Verify Nova *"

View File

@ -44,10 +44,10 @@ token=$(openstack token issue | awk 'NR==5 {print $4}')
echo $token
curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
-H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "RegionOne"}}'
-H "X-Auth-Token: $token" -d '{"pod": {"region_name": "RegionOne"}}'
curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
-H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "Pod1", "az_name": "az1"}}'
-H "X-Auth-Token: $token" -d '{"pod": {"region_name": "Pod1", "az_name": "az1"}}'
echo "******************************"
echo "* Verify Nova *"

View File

@ -58,18 +58,14 @@ following table.
+===========+=======+===============+=====================================================+
|pod_id |body | string |pod_id is a uuid attribute of the pod object. |
+-----------+-------+---------------+-----------------------------------------------------+
|pod_name |body | string |pod_name is specified by user but must match the |
| | | |region name registered in Keystone. When creating a |
| | | |pod for local Neutron, the Tricircle automatically |
| | | |creates a host aggregation and assigns the new |
| | | |availability zone id to it. |
|region_name|body | string |region_name is specified by user but must match the |
| | | |region name registered in Keystone. |
+-----------+-------+---------------+-----------------------------------------------------+
|az_name |body | string |When az_name is empty, it means this is a pod for |
| | | |central Neutron, no host aggregation will be |
| | | |generated. If az_name is not empty, it means the pod |
| | | |will belong to this availability zone. Multiple pods |
| | | |with the same az_name means that these pods are under|
| | | |the same availability zone. |
| | | |central Neutron. If az_name is not empty, it means |
| | | |the pod will belong to this availability zone. |
| | | |Multiple pods with the same az_name means that these |
| | | |pods are under the same availability zone. |
+-----------+-------+---------------+-----------------------------------------------------+
|pod_az_name|body | string |pod_az_name is the az name used in the pod for local |
| | | |Neutron when creating network, router objects. It |
@ -97,21 +93,21 @@ This is an example of response information for GET /pods.
"pod_az_name": "",
"pod_id": "1a51bee7-10f0-47e8-bb4a-70f51394069c",
"az_name": "",
"pod_name": "RegionOne"
"region_name": "RegionOne"
},
{
"dc_name": "",
"pod_az_name": "",
"pod_id": "22cca6ad-b791-4805-af14-923c5224fcd2",
"az_name": "az2",
"pod_name": "Pod2"
"region_name": "Pod2"
},
{
"dc_name": "",
"pod_az_name": "",
"pod_id": "3c22e5d4-5fed-45ed-a1e9-d532668cedc2",
"az_name": "az1",
"pod_name": "Pod1"
"region_name": "Pod1"
}
]
}
@ -144,18 +140,14 @@ All of its attributes are described in the following table.
+===========+=======+===============+=====================================================+
|pod_id |body | string |pod_id is a uuid attribute of the pod object. |
+-----------+-------+---------------+-----------------------------------------------------+
|pod_name |body | string |pod_name is specified by user but must match the |
| | | |region name registered in Keystone. When creating a |
| | | |pod for local Neutron, the Tricircle automatically |
| | | |creates a host aggregation and assigns the new |
| | | |availability zone id to it. |
|region_name|body | string |region_name is specified by user but must match the |
| | | |region name registered in Keystone. |
+-----------+-------+---------------+-----------------------------------------------------+
|az_name |body | string |When az_name is empty, it means this is a pod for |
| | | |central Neutron, no host aggregation will be |
| | | |generated. If az_name is not empty, it means the pod |
| | | |will belong to this availability zone. Multiple pods |
| | | |with the same az_name means that these pods are under|
| | | |the same availability zone. |
| | | |central Neutron. If az_name is not empty, it means |
| | | |the pod will belong to this availability zone. |
| | | |Multiple pods with the same az_name means that these |
| | | |pods are under the same availability zone. |
+-----------+-------+---------------+-----------------------------------------------------+
|pod_az_name|body | string |pod_az_name is the az name used in the pod for local |
| | | |Neutron when creating network, router objects. It |
@ -182,7 +174,7 @@ This is an example of response information for GET /pods/{pod_id}.
"pod_az_name": "",
"pod_id": "3c22e5d4-5fed-45ed-a1e9-d532668cedc2",
"az_name": "az1",
"pod_name": "Pod1"
"region_name": "Pod1"
}
}
@ -202,18 +194,14 @@ in the following table.
+-----------+-------+---------------+-----------------------------------------------------+
|Name |In | Type | Description |
+===========+=======+===============+=====================================================+
|pod_name |body | string |pod_name is specified by user but must match the |
| | | |region name registered in Keystone. When creating a |
| | | |pod for local Neutron, the Tricircle automatically |
| | | |creates a host aggregation and assigns the new |
| | | |availability zone id to it. |
|region_name|body | string |region_name is specified by user but must match the |
| | | |region name registered in Keystone. |
+-----------+-------+---------------+-----------------------------------------------------+
|az_name |body | string |When az_name is empty, it means this is a pod for |
| | | |central Neutron, no host aggregation will be |
| | | |generated. If az_name is not empty, it means the pod |
| | | |will belong to this availability zone. Multiple pods |
| | | |with the same az_name means that these pods are under|
| | | |the same availability zone. |
| | | |central Neutron. If az_name is not empty, it means |
| | | |the pod will belong to this availability zone. |
| | | |Multiple pods with the same az_name means that these |
| | | |pods are under the same availability zone. |
+-----------+-------+---------------+-----------------------------------------------------+
|pod_az_name|body | string |pod_az_name is the az name used in the pod for local |
| | | |Neutron when creating network, router objects. It |
@ -238,18 +226,14 @@ are listed below.
+===========+=======+===============+=====================================================+
|pod_id |body | string |pod_id is automatically generated when creating a pod|
+-----------+-------+---------------+-----------------------------------------------------+
|pod_name |body | string |pod_name is specified by user but must match the |
| | | |region name registered in Keystone. When creating a |
| | | |pod for local Neutron, the Tricircle automatically |
| | | |creates a host aggregation and assigns the new |
| | | |availability zone id to it. |
|region_name|body | string |region_name is specified by user but must match the |
| | | |region name registered in Keystone. |
+-----------+-------+---------------+-----------------------------------------------------+
|az_name |body | string |When az_name is empty, it means this is a pod for |
| | | |central Neutron, no host aggregation will be |
| | | |generated. If az_name is not empty, it means the pod |
| | | |will belong to this availability zone. Multiple pods |
| | | |with the same az_name means that these pods are under|
| | | |the same availability zone. |
| | | |central Neutron. If az_name is not empty, it means |
| | | |the pod will belong to this availability zone. |
| | | |Multiple pods with the same az_name means that these |
| | | |pods are under the same availability zone. |
+-----------+-------+---------------+-----------------------------------------------------+
|pod_az_name|body | string |pod_az_name is the az name used in the pod for local |
| | | |Neutron when creating network, router objects. It |
@ -272,7 +256,7 @@ This is an example of request information for POST /pods.
{
"pod": {
"pod_name": "Pod3",
"region_name": "Pod3",
"az_name": "az1",
"pod_az_name": "az1",
"dc_name": "data center 1"
@ -291,7 +275,7 @@ This is an example of response information for POST /pods.
"pod_az_name": "az1",
"pod_id": "e02e03b8-a94f-4eb1-991e-a8a271cc2313",
"az_name": "az1",
"pod_name": "Pod3"
"region_name": "Pod3"
}
}
@ -318,235 +302,6 @@ Normal Response Code: 200
There is no response. But we can list all the pods to verify whether the
specific pod has been deleted or not.
Pod Binding
===========
A pod binding represents a mapping relationship between tenant and pod. Pods
for local Neutron are classified into different categories. A tenant will be
bound to different pod groups for different purposes. Only the pod for local
Neutron could be bound with a tenant. Pod for central Neutron serves as the
coordinator of networking automation across local Neutron servers.
+------------------+------------+---------------------+-------------------------------------+
|**GET** |/bindings | |Retrieve Pod Binding List |
+------------------+------------+---------------------+-------------------------------------+
This fetches all the pod bindings.
Normal Response Code: 200
**Response**
Pod bindings contain one or more binding instances whose attributes
are listed in the following table.
+-------------+-------+---------------+-----------------------------------------------------+
|Name |In | Type | Description |
+=============+=======+===============+=====================================================+
|tenant_id |body | string |tenant_id is automatically generated when adding a |
| | | |uuid of a project object in KeyStone. "Tenant" is an |
| | | |old term for a project in Keystone. Starting in API |
| | | |version 3, "project" is the preferred term. |
| | | |Accordingly, project_id is used instead of tenant_id.|
+-------------+-------+---------------+-----------------------------------------------------+
|pod_id |body | string |pod_id is a uuid attribute of the pod object. |
+-------------+-------+---------------+-----------------------------------------------------+
|id |body | string |id is a uuid attribute of the pod binding. It is |
| | | |automatically generated when new binding relation |
| | | |happens between tenant and pod. |
+-------------+-------+---------------+-----------------------------------------------------+
|created_at |body | timestamp |created time of the pod binding. |
+-------------+-------+---------------+-----------------------------------------------------+
|updated_at |body | timestamp |updated time of the pod binding. |
+-------------+-------+---------------+-----------------------------------------------------+
**Response Example**
This is an example of response information for GET /bindings.
::
{
"pod_bindings": [
{
"updated_at": null,
"tenant_id": "1782b3310f144836aa73c1ac5117d8da",
"created_at": "2016-06-03 07:37:50",
"id": "6ba7510c-baeb-44ad-8815-c4d229b52e46",
"pod_id": "22cca6ad-b791-4805-af14-923c5224fcd2"
},
{
"updated_at": null,
"tenant_id": "1782b3310f144836aa73c1ac5117d8da",
"created_at": "2016-06-03 07:37:06",
"id": "f0a54f30-6208-499d-b087-0ac64f6f2756",
"pod_id": "3c22e5d4-5fed-45ed-a1e9-d532668cedc2"
}
]
}
+------------------+---------------+-------------+---------------------------------------+
|**GET** |/bindings/{id} | |Retrieve a Single Pod Binding |
+------------------+---------------+-------------+---------------------------------------+
This fetches a single pod binding.
Normal Response Code: 200
**Request**
+-------------+-------+---------------+-----------------------------------------------------+
|Name |In | Type | Description |
+=============+=======+===============+=====================================================+
|id |path | string |id is a uuid attribute of the pod binding. It is |
| | | |automatically generated when new binding relation |
| | | |happens between tenant and pod. |
+-------------+-------+---------------+-----------------------------------------------------+
**Response**
Pod binding represents a mapping relationship between tenant and pod. All
of its attributes are described in the following table.
+-------------+-------+---------------+-----------------------------------------------------+
|Name |In | Type | Description |
+=============+=======+===============+=====================================================+
|tenant_id |body | string |tenant_id is automatically generated when adding a |
| | | |uuid of a project object in KeyStone. "Tenant" is an |
| | | |old term for a project in Keystone. Starting in API |
| | | |version 3, "project" is the preferred term. |
| | | |Accordingly, project_id is used instead of tenant_id.|
+-------------+-------+---------------+-----------------------------------------------------+
|pod_id |body | string |pod_id is a uuid attribute of the pod object. |
+-------------+-------+---------------+-----------------------------------------------------+
|id |body | string |id is a uuid attribute of the pod binding. It is |
| | | |automatically generated when new binding relation |
| | | |happens between tenant and pod. |
+-------------+-------+---------------+-----------------------------------------------------+
|created_at |body | timestamp |created time of the pod binding. |
+-------------+-------+---------------+-----------------------------------------------------+
|updated_at |body | timestamp |updated time of the pod binding. |
+-------------+-------+---------------+-----------------------------------------------------+
**Response Example**
This is an example of response information for GET /bindings/{id}.
::
{
"pod_binding": {
"updated_at": null,
"tenant_id": "1782b3310f144836aa73c1ac5117d8da",
"created_at": "2016-06-03 07:37:06",
"id": "f0a54f30-6208-499d-b087-0ac64f6f2756",
"pod_id": "3c22e5d4-5fed-45ed-a1e9-d532668cedc2"
}
}
+---------------+-----------+--------------------+------------------------------------------+
|**POST** |/bindings | |Create a Pod Binding |
+---------------+-----------+--------------------+------------------------------------------+
This creates a pod binding.
Normal Response Code: 200
**Request**
Some essential attributes of the pod binding instance are required and
described in the following table.
+-------------+-------+---------------+-----------------------------------------------------+
|Name |In | Type | Description |
+=============+=======+===============+=====================================================+
|tenant_id |body | string |tenant_id is automatically generated when adding a |
| | | |uuid of a project object in KeyStone. "Tenant" is an |
| | | |old term for a project in Keystone. Starting in API |
| | | |version 3, "project" is the preferred term. |
| | | |Accordingly, project_id is used instead of tenant_id.|
+-------------+-------+---------------+-----------------------------------------------------+
|pod_id |body | string |pod_id is a uuid attribute of the pod object. |
+-------------+-------+---------------+-----------------------------------------------------+
**Response**
An id is assigned to a pod binding instance when it is created, and some other
attribute values are given meanwhile. All of its fields are listed below.
+-------------+-------+---------------+-----------------------------------------------------+
|Name |In | Type | Description |
+=============+=======+===============+=====================================================+
|tenant_id |body | string |tenant_id is automatically generated when adding a |
| | | |uuid of a project object in KeyStone. "Tenant" is an |
| | | |old term for a project in Keystone. Starting in API |
| | | |version 3, "project" is the preferred term. |
| | | |Accordingly, project_id is used instead of tenant_id.|
+-------------+-------+---------------+-----------------------------------------------------+
|pod_id |body | string |pod_id is a uuid attribute of the pod object. |
+-------------+-------+---------------+-----------------------------------------------------+
|id |body | string |id is a uuid attribute of the pod binding. It is |
| | | |automatically generated when new binding relation |
| | | |happens between tenant and pod. |
+-------------+-------+---------------+-----------------------------------------------------+
|created_at |body | timestamp |created time of the pod binding. |
+-------------+-------+---------------+-----------------------------------------------------+
|updated_at |body | timestamp |updated time of the pod binding. |
+-------------+-------+---------------+-----------------------------------------------------+
**Request Example**
This is an example of request information for POST /bindings.
::
{
"pod_binding": {
"tenant_id": "1782b3310f144836aa73c1ac5117d8da",
"pod_id": "e02e03b8-a94f-4eb1-991e-a8a271cc2313"
}
}
**Response Example**
This is an example of response information for POST /bindings.
::
{
"pod_binding": {
"updated_at": null,
"tenant_id": "1782b3310f144836aa73c1ac5117d8da",
"created_at": "2016-08-18 14:06:33",
"id": "b17ac347-c898-4cea-a09d-7b0a6ec34f56",
"pod_id": "e02e03b8-a94f-4eb1-991e-a8a271cc2313"
}
}
+---------------+----------------+---------------+------------------------------------------+
|**DELETE** |/bindings/{id} | |Delete a Pod Binding |
+---------------+----------------+---------------+------------------------------------------+
This deletes a pod binding.
Normal Response Code: 200
**Request**
+-----------+-------+---------------+-----------------------------------------------------+
|Name |In | Type | Description |
+===========+=======+===============+=====================================================+
|id |path | string |id is a uuid attribute of the pod binding. It is |
| | | |automatically generated when new binding relation |
| | | |happens between tenant and pod. |
+-----------+-------+---------------+-----------------------------------------------------+
**Response**
There is no response. But we can list all the pod bindings to verify
whether the specific pod binding has been deleted or not.
Resource Routing
================
The Tricircle is responsible for resource(for example, network, subnet, port,
@ -954,3 +709,4 @@ from the database.
}
}

View File

@ -51,7 +51,7 @@ Central Plugin.
- (Integer) timeout for neutron client in seconds.
* - ``ns_bridge_cidr`` = ``100.128.0.0/9``
- (String) cidr pool of the north-south bridge network, for example, 100.128.0.0/9
* - ``top_pod_name`` = ``None``
* - ``top_region_name`` = ``None``
- (String) region name of Central Neutron in which client needs to access, for example, CentralRegion.
@ -146,6 +146,8 @@ configured in central Neutron's neutron.conf.
-
* - ``bridge_network_type`` = ``shared_vlan``
- (String) Type of l3 bridge network, this type should be enabled in tenant_network_types and is not local type, for example, shared_vlan.
* - ``default_region_for_external_network`` = ``RegionOne``
- (String) Default region where the external network belongs to, it must exist, for example, RegionOne.
* - ``network_vlan_ranges`` = ``None``
- (String) List of <physical_network>:<vlan_min>:<vlan_max> or <physical_network> specifying physical_network names usable for VLAN provider and tenant networks, as well as ranges of VLAN tags on each available for allocation to tenant networks, for example,bridge:2001:3000.
* - ``tenant_network_types`` = ``local,shared_vlan``

View File

@ -78,7 +78,7 @@ Installation with Central Neutron Server
[client] auth_url, "keystone authorization url", http://$keystone_service_host:5000/v3
[client] identity_url, "keystone service url", http://$keystone_service_host:35357/v3
[client] auto_refresh_endpoint, "if set to True, endpoint will be automatically refreshed if timeout accessing", True
[client] top_pod_name, "name of central region which client needs to access", CentralRegion
[client] top_region_name, "name of central region which client needs to access", CentralRegion
[client] admin_username, "username of admin account", admin
[client] admin_password, "password of admin account", password
[client] admin_tenant, "project name of admin account", demo
@ -118,7 +118,7 @@ Installation with Central Neutron Server
[client] auth_url, "keystone authorization url", http://$keystone_service_host:5000/v3
[client] identity_url, "keystone service url", http://$keystone_service_host:35357/v3
[client] auto_refresh_endpoint, "if set to True, endpoint will be automatically refreshed if timeout accessing", True
[client] top_pod_name, "name of central region which client needs to access", CentralRegion
[client] top_region_name, "name of central region which client needs to access", CentralRegion
[client] admin_username, "username of admin account", admin
[client] admin_password, "password of admin account", password
[client] admin_tenant, "project name of admin account", demo
@ -161,7 +161,7 @@ Installation with Central Neutron Server
[client] auth_url, "keystone authorization url", http://$keystone_service_host:5000/v3
[client] identity_url, "keystone service url", http://$keystone_service_host:35357/v3
[client] auto_refresh_endpoint, "if set to True, endpoint will be automatically refreshed if timeout accessing", True
[client] top_pod_name, "name of central region which client needs to access", CentralRegion
[client] top_region_name, "name of central region which client needs to access", CentralRegion
[client] admin_username, "username of admin account", admin
[client] admin_password, "password of admin account", password
[client] admin_tenant, "project name of admin account", demo
@ -211,7 +211,7 @@ Installation with Local Neutron Server
[client] auth_url, "keystone authorization url", http://$keystone_service_host:5000/v3
[client] identity_url, "keystone service url", http://$keystone_service_host:35357/v3
[client] auto_refresh_endpoint, "if set to True, endpoint will be automatically refreshed if timeout accessing", True
[client] top_pod_name, "name of central region which client needs to access", CentralRegion
[client] top_region_name, "name of central region which client needs to access", CentralRegion
[client] admin_username, "username of admin account", admin
[client] admin_password, "password of admin account", password
[client] admin_tenant, "project name of admin account", demo

View File

@ -261,15 +261,15 @@ How to play
availability zones and OpenStack instances, "$token" is obtained in step 4 ::
curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
-H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "CentralRegion"}}'
-H "X-Auth-Token: $token" -d '{"pod": {"region_name": "CentralRegion"}}'
curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
-H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "RegionOne", "az_name": "az1"}}'
-H "X-Auth-Token: $token" -d '{"pod": {"region_name": "RegionOne", "az_name": "az1"}}'
curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
-H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "RegionTwo", "az_name": "az2"}}'
-H "X-Auth-Token: $token" -d '{"pod": {"region_name": "RegionTwo", "az_name": "az2"}}'
Pay attention to "pod_name" parameter we specify when creating pod. Pod name
Pay attention to "region_name" parameter we specify when creating pod. Pod name
should exactly match the region name registered in Keystone. In the above
commands, we create pods named "CentralRegion", "RegionOne" and "RegionTwo".

View File

@ -68,12 +68,12 @@ installing DevStack in virtual machine.
step 7::
curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
-H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "CentralRegion"}}'
-H "X-Auth-Token: $token" -d '{"pod": {"region_name": "CentralRegion"}}'
curl -X POST http://127.0.0.1:19999/v1.0/pods -H "Content-Type: application/json" \
-H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "RegionOne", "az_name": "az1"}}'
-H "X-Auth-Token: $token" -d '{"pod": {"region_name": "RegionOne", "az_name": "az1"}}'
Pay attention to "pod_name" parameter we specify when creating pod. Pod name
Pay attention to "region_name" parameter we specify when creating pod. Pod name
should exactly match the region name registered in Keystone. In the above
commands, we create pods named "CentralRegion" and "RegionOne".

View File

@ -22,17 +22,16 @@ of external network creation, it will take 'availability_zone_hints' (AZ or
az will be used for short for availability zone) as a parameter. Previously
az_hints was searched in the pod binding table by az_name and tenant_id, now
the pod binding table is deprecated and new search strategy is needed to fix
the problem[2]. A function named find_by_az will be developed to find the
the problem[2]. A function named find_pod_by_az will be developed to find the
az_hints by az_name in the pod table. Given the az_name, if it is not empty,
we first match it with region_name in the pod table. When a pod with the same
region_name is found, it will be returned back. The search procedure is
complete. If no pod is found with the same region_name, then we try to match
it with az_name in the pod table. If multiple pods are found, then we will
raise an exception. If only one pod is found, this pod will be returned back.
However, if no pod is matched at the end of the previous search procedure or
the az_name is empty, a new configuration item
"default_region_for_external_network" will be used, a pod with this region_name
will be returned back.
An exception will be raised if no pod is matched at the end of the previous
search procedure. However, if the az_name is empty, we will return None, a new
configuration item "default_region_for_external_network" will be used.
Proposed Change
===============

View File

@ -22,13 +22,11 @@ import oslo_db.exception as db_exc
from oslo_log import log as logging
from oslo_utils import uuidutils
from tricircle.common import az_ag
import tricircle.common.context as t_context
import tricircle.common.exceptions as t_exc
from tricircle.common.i18n import _
from tricircle.common.i18n import _LE
from tricircle.common import policy
from tricircle.common import utils
from tricircle.db import api as db_api
from tricircle.db import core
@ -57,18 +55,18 @@ class PodsController(rest.RestController):
pod = kw['pod']
# if az_name is null, and there is already one in db
pod_name = pod.get('pod_name', '').strip()
region_name = pod.get('region_name', '').strip()
pod_az_name = pod.get('pod_az_name', '').strip()
dc_name = pod.get('dc_name', '').strip()
az_name = pod.get('az_name', '').strip()
_uuid = uuidutils.generate_uuid()
if az_name == '' and pod_name == '':
return Response(_('Valid pod_name is required for top region'),
if az_name == '' and region_name == '':
return Response(_('Valid region_name is required for top region'),
422)
if az_name != '' and pod_name == '':
return Response(_('Valid pod_name is required for pod'), 422)
if az_name != '' and region_name == '':
return Response(_('Valid region_name is required for pod'), 422)
if pod.get('az_name') is None:
if self._get_top_region(context) != '':
@ -77,7 +75,8 @@ class PodsController(rest.RestController):
# if az_name is not null, then the pod region name should not
# be same as that the top region
if az_name != '':
if self._get_top_region(context) == pod_name and pod_name != '':
if (self._get_top_region(context) == region_name and
region_name != ''):
return Response(
_('Pod region name duplicated with the top region name'),
409)
@ -88,36 +87,26 @@ class PodsController(rest.RestController):
try:
with context.session.begin():
# if not top region,
# then add corresponding ag and az for the pod
if az_name != '':
ag_name = utils.get_ag_name(pod_name)
aggregate = az_ag.create_ag_az(context,
ag_name=ag_name,
az_name=az_name)
if aggregate is None:
return Response(_('Ag creation failure'), 400)
new_pod = core.create_resource(
context, models.Pod,
{'pod_id': _uuid,
'pod_name': pod_name,
'region_name': region_name,
'pod_az_name': pod_az_name,
'dc_name': dc_name,
'az_name': az_name})
except db_exc.DBDuplicateEntry as e1:
LOG.exception(_LE('Record already exists on %(pod_name)s: '
LOG.exception(_LE('Record already exists on %(region_name)s: '
'%(exception)s'),
{'pod_name': pod_name,
{'region_name': region_name,
'exception': e1})
return Response(_('Record already exists'), 409)
except Exception as e2:
LOG.exception(_LE('Failed to create pod: %(pod_name)s,'
LOG.exception(_LE('Failed to create pod: %(region_name)s,'
'pod_az_name: %(pod_az_name)s,'
'dc_name: %(dc_name)s,'
'az_name: %(az_name)s'
'%(exception)s '),
{'pod_name': pod_name,
{'region_name': region_name,
'pod_az_name': pod_az_name,
'dc_name': dc_name,
'az_name': az_name,
@ -167,12 +156,6 @@ class PodsController(rest.RestController):
try:
with context.session.begin():
pod = core.get_resource(context, models.Pod, _id)
if pod is not None:
ag_name = utils.get_ag_name(pod['pod_name'])
ag = az_ag.get_ag_by_name(context, ag_name)
if ag is not None:
az_ag.delete_ag(context, ag['id'])
core.delete_resource(context, models.Pod, _id)
pecan.response.status = 200
return {}
@ -193,8 +176,8 @@ class PodsController(rest.RestController):
pods = core.query_resource(ctx,
models.Pod, [], [])
for pod in pods:
if pod['az_name'] == '' and pod['pod_name'] != '':
return pod['pod_name']
if pod['az_name'] == '' and pod['region_name'] != '':
return pod['region_name']
except Exception as e:
LOG.exception(_LE('Failed to get top region: %(exception)s '),
{'exception': e})
@ -202,119 +185,3 @@ class PodsController(rest.RestController):
return top_region_name
return top_region_name
class BindingsController(rest.RestController):
def __init__(self):
pass
@expose(generic=True, template='json')
def post(self, **kw):
context = t_context.extract_context_from_environ()
if not policy.enforce(context, policy.ADMIN_API_BINDINGS_CREATE):
pecan.abort(401, _('Unauthorized to create bindings'))
return
if 'pod_binding' not in kw:
pecan.abort(400, _('Request body not found'))
return
pod_b = kw['pod_binding']
tenant_id = pod_b.get('tenant_id', '').strip()
pod_id = pod_b.get('pod_id', '').strip()
if tenant_id == '' or pod_id == '':
return Response(
_('Tenant_id and pod_id can not be empty'),
422)
# the az_pod_map_id should be exist for in the pod map table
try:
with context.session.begin():
pod = core.get_resource(context, models.Pod,
pod_id)
if pod.get('az_name') == '':
return Response(_('Top region can not be bound'), 422)
except t_exc.ResourceNotFound:
return Response(_('pod_id not found in pod'), 422)
except Exception as e:
LOG.exception(_LE('Failed to get_resource for pod_id: '
'%(pod_id)s ,'
'%(exception)s '),
{'pod_id': pod_id,
'exception': e})
pecan.abort(500, _('Failed to create pod binding'))
return
try:
pod_binding = db_api.create_pod_binding(
context, tenant_id, pod_id)
except db_exc.DBDuplicateEntry:
return Response(_('Pod binding already exists'), 409)
except db_exc.DBConstraintError:
return Response(_('pod_id not exists in pod'), 422)
except db_exc.DBReferenceError:
return Response(_('DB reference not exists in pod'), 422)
except Exception as e:
LOG.exception(_LE('Failed to create pod binding: %(exception)s '),
{'exception': e})
pecan.abort(500, _('Failed to create pod binding'))
return
return {'pod_binding': pod_binding}
@expose(generic=True, template='json')
def get_one(self, _id):
context = t_context.extract_context_from_environ()
if not policy.enforce(context, policy.ADMIN_API_BINDINGS_SHOW):
pecan.abort(401, _('Unauthorized to show bindings'))
return
try:
with context.session.begin():
pod_binding = core.get_resource(context,
models.PodBinding,
_id)
return {'pod_binding': pod_binding}
except t_exc.ResourceNotFound:
pecan.abort(404, _('Tenant pod binding not found'))
return
@expose(generic=True, template='json')
def get_all(self):
context = t_context.extract_context_from_environ()
if not policy.enforce(context, policy.ADMIN_API_BINDINGS_LIST):
pecan.abort(401, _('Unauthorized to list bindings'))
return
try:
with context.session.begin():
pod_bindings = core.query_resource(context,
models.PodBinding,
[], [])
except Exception:
pecan.abort(500, _('Fail to list tenant pod bindings'))
return
return {'pod_bindings': pod_bindings}
@expose(generic=True, template='json')
def delete(self, _id):
context = t_context.extract_context_from_environ()
if not policy.enforce(context, policy.ADMIN_API_BINDINGS_DELETE):
pecan.abort(401, _('Unauthorized to delete bindings'))
return
try:
with context.session.begin():
core.delete_resource(context, models.PodBinding, _id)
pecan.response.status = 200
return {}
except t_exc.ResourceNotFound:
pecan.abort(404, _('Pod binding not found'))
return

View File

@ -74,7 +74,6 @@ class V1Controller(object):
self.sub_controllers = {
"pods": pod.PodsController(),
"bindings": pod.BindingsController(),
"routings": routing.RoutingController()
}

View File

@ -60,22 +60,6 @@ class RoutingController(rest.RestController):
400, _("Field %(field)s can not be empty") % {
'field': field})
# verify the integrity: the pod_id and the project_id should be bound
pod_id = routing.get('pod_id').strip()
project_id = routing.get('project_id').strip()
bindings = db_api.list_pod_bindings(context,
[{'key': 'pod_id',
'comparator': 'eq',
'value': pod_id
},
{'key': 'tenant_id',
'comparator': 'eq',
'value': project_id}
], [])
if len(bindings) == 0:
return utils.format_api_error(
400, _('The pod_id and project_id have not been bound'))
# the resource type should be properly provisioned.
resource_type = routing.get('resource_type').strip()
if not constants.is_valid_resource_type(resource_type):
@ -85,6 +69,8 @@ class RoutingController(rest.RestController):
try:
top_id = routing.get('top_id').strip()
bottom_id = routing.get('bottom_id').strip()
pod_id = routing.get('pod_id').strip()
project_id = routing.get('project_id').strip()
routing = db_api.create_resource_mapping(context, top_id,
bottom_id, pod_id,
@ -184,7 +170,7 @@ class RoutingController(rest.RestController):
403, _('Unauthorized to update resource routing'))
try:
routing = db_api.get_resource_routing(context, _id)
db_api.get_resource_routing(context, _id)
except t_exc.ResourceNotFound:
return utils.format_api_error(404,
_('Resource routing not found'))
@ -210,32 +196,6 @@ class RoutingController(rest.RestController):
return utils.format_api_error(
400, _('There is no such resource type'))
# verify the integrity: the pod_id and project_id should be bound
if 'pod_id' in update_dict or 'project_id' in update_dict:
if 'pod_id' in update_dict:
pod_id = update_dict['pod_id']
else:
pod_id = routing['pod_id']
if 'project_id' in update_dict:
project_id = update_dict['project_id']
else:
project_id = routing['project_id']
bindings = db_api.list_pod_bindings(context,
[{'key': 'pod_id',
'comparator': 'eq',
'value': pod_id
},
{'key': 'tenant_id',
'comparator': 'eq',
'value': project_id}
], [])
if len(bindings) == 0:
return utils.format_api_error(
400, _('The pod_id and project_id have not been '
'bound'))
try:
routing_updated = db_api.update_resource_routing(
context, _id, update_dict)

View File

@ -1,178 +0,0 @@
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_log import log as logging
from oslo_utils import uuidutils
from tricircle.common.i18n import _LE
from tricircle.db import api as db_api
from tricircle.db import core
from tricircle.db import models
LOG = logging.getLogger(__name__)
def create_ag_az(context, ag_name, az_name):
aggregate = core.create_resource(context, models.Aggregate,
{'name': ag_name})
core.create_resource(
context, models.AggregateMetadata,
{'key': 'availability_zone',
'value': az_name,
'aggregate_id': aggregate['id']})
extra_fields = {
'availability_zone': az_name,
'metadata': {'availability_zone': az_name}
}
aggregate.update(extra_fields)
return aggregate
def get_one_ag(context, aggregate_id):
aggregate = core.get_resource(context, models.Aggregate, aggregate_id)
metadatas = core.query_resource(
context, models.AggregateMetadata,
[{'key': 'key', 'comparator': 'eq',
'value': 'availability_zone'},
{'key': 'aggregate_id', 'comparator': 'eq',
'value': aggregate['id']}], [])
if metadatas:
aggregate['availability_zone'] = metadatas[0]['value']
aggregate['metadata'] = {
'availability_zone': metadatas[0]['value']}
else:
aggregate['availability_zone'] = ''
aggregate['metadata'] = {}
return aggregate
def get_ag_by_name(context, ag_name):
filters = [{'key': 'name',
'comparator': 'eq',
'value': ag_name}]
aggregates = get_all_ag(context, filters)
if aggregates is not None:
if len(aggregates) == 1:
return aggregates[0]
return None
def delete_ag(context, aggregate_id):
core.delete_resources(context, models.AggregateMetadata,
[{'key': 'aggregate_id',
'comparator': 'eq',
'value': aggregate_id}])
core.delete_resource(context, models.Aggregate, aggregate_id)
return
def get_all_ag(context, filters=None, sorts=None):
aggregates = core.query_resource(context,
models.Aggregate,
filters or [],
sorts or [])
metadatas = core.query_resource(
context, models.AggregateMetadata,
[{'key': 'key',
'comparator': 'eq',
'value': 'availability_zone'}], [])
agg_meta_map = {}
for metadata in metadatas:
agg_meta_map[metadata['aggregate_id']] = metadata
for aggregate in aggregates:
extra_fields = {
'availability_zone': '',
'metadata': {}
}
if aggregate['id'] in agg_meta_map:
metadata = agg_meta_map[aggregate['id']]
extra_fields['availability_zone'] = metadata['value']
extra_fields['metadata'] = {
'availability_zone': metadata['value']}
aggregate.update(extra_fields)
return aggregates
def get_pod_by_az_tenant(context, az_name, tenant_id):
pod_bindings = core.query_resource(context,
models.PodBinding,
[{'key': 'tenant_id',
'comparator': 'eq',
'value': tenant_id}],
[])
for pod_b in pod_bindings:
pod = core.get_resource(context,
models.Pod,
pod_b['pod_id'])
if az_name and pod['az_name'] == az_name:
return pod, pod['pod_az_name']
elif az_name == '' and pod['az_name'] != '':
# if the az_name is not specified, a defult bottom
# pod will be selected
return pod, pod['pod_az_name']
else:
pass
# TODO(joehuang): schedule one dynamically in the future
if az_name != '':
filters = [{'key': 'az_name', 'comparator': 'eq', 'value': az_name}]
else:
filters = None
# if az_name is valid, select a pod under this az_name
# if az_name is '', select the first valid bottom pod.
# change to dynamic schedluing in the future
pods = db_api.list_pods(context, filters=filters)
for pod in pods:
if pod['pod_name'] != '' and pod['az_name'] != '':
try:
with context.session.begin():
core.create_resource(
context, models.PodBinding,
{'id': uuidutils.generate_uuid(),
'tenant_id': tenant_id,
'pod_id': pod['pod_id'],
'is_binding': True})
return pod, pod['pod_az_name']
except Exception as e:
LOG.error(_LE('Fail to create pod binding: %(exception)s'),
{'exception': e})
return None, None
return None, None
def list_pods_by_tenant(context, tenant_id):
pod_bindings = core.query_resource(context,
models.PodBinding,
[{'key': 'tenant_id',
'comparator': 'eq',
'value': tenant_id}],
[])
pods = []
if pod_bindings:
for pod_b in pod_bindings:
pod = core.get_resource(context,
models.Pod,
pod_b['pod_id'])
pods.append(pod)
return pods

View File

@ -45,7 +45,7 @@ client_opts = [
default=False,
help='if set to True, endpoint will be automatically'
'refreshed if timeout accessing endpoint'),
cfg.StrOpt('top_pod_name',
cfg.StrOpt('top_region_name',
help='name of top pod which client needs to access'),
cfg.StrOpt('admin_username',
help='username of admin account, needed when'
@ -167,14 +167,14 @@ class Client(object):
you can call create_resources(self, resource, cxt, body) directly to create
a network, or use create_networks(self, cxt, body) for short.
"""
def __init__(self, pod_name=None):
def __init__(self, region_name=None):
self.auth_url = cfg.CONF.client.auth_url
self.resource_service_map = {}
self.operation_resources_map = collections.defaultdict(set)
self.service_handle_map = {}
self.pod_name = pod_name
if not self.pod_name:
self.pod_name = cfg.CONF.client.top_pod_name
self.region_name = region_name
if not self.region_name:
self.region_name = cfg.CONF.client.top_region_name
for _, handle_class in inspect.getmembers(resource_handle):
if not inspect.isclass(handle_class):
continue
@ -238,7 +238,7 @@ class Client(object):
return region_service_endpoint_map
def _get_config_with_retry(self, cxt, filters, pod, service, retry):
conf_list = api.list_pod_service_configurations(cxt, filters)
conf_list = api.list_cached_endpoints(cxt, filters)
if len(conf_list) == 0:
if not retry:
raise exceptions.EndpointNotFound(pod, service)
@ -250,14 +250,14 @@ class Client(object):
def _ensure_endpoint_set(self, cxt, service):
handle = self.service_handle_map[service]
if not handle.is_endpoint_url_set():
pod_filters = [{'key': 'pod_name',
pod_filters = [{'key': 'region_name',
'comparator': 'eq',
'value': self.pod_name}]
'value': self.region_name}]
pod_list = api.list_pods(cxt, pod_filters)
if len(pod_list) == 0:
raise exceptions.ResourceNotFound(models.Pod,
self.pod_name)
# pod_name is unique key, safe to get the first element
self.region_name)
# region_name is unique key, safe to get the first element
pod_id = pod_list[0]['pod_id']
config_filters = [
{'key': 'pod_id', 'comparator': 'eq', 'value': pod_id},
@ -287,7 +287,7 @@ class Client(object):
for region in endpoint_map:
# use region name to query pod
pod_filters = [{'key': 'pod_name', 'comparator': 'eq',
pod_filters = [{'key': 'region_name', 'comparator': 'eq',
'value': region}]
pod_list = api.list_pods(cxt, pod_filters)
# skip region/pod not registered in cascade service
@ -299,7 +299,7 @@ class Client(object):
'value': pod_id},
{'key': 'service_type', 'comparator': 'eq',
'value': service}]
config_list = api.list_pod_service_configurations(
config_list = api.list_cached_endpoints(
cxt, config_filters)
if len(config_list) > 1:
@ -308,7 +308,7 @@ class Client(object):
config_id = config_list[0]['service_id']
update_dict = {
'service_url': endpoint_map[region][service]}
api.update_pod_service_configuration(
api.update_cached_endpoints(
cxt, config_id, update_dict)
else:
config_dict = {
@ -317,7 +317,7 @@ class Client(object):
'service_type': service,
'service_url': endpoint_map[region][service]
}
api.create_pod_service_configuration(
api.create_cached_endpoints(
cxt, config_dict)
def get_endpoint(self, cxt, pod_id, service):

View File

@ -191,10 +191,10 @@ class ExternalNetPodNotSpecify(TricircleException):
class PodNotFound(NotFound):
message = "Pod %(pod_name)s could not be found."
message = "Pod %(region_name)s could not be found."
def __init__(self, pod_name):
super(PodNotFound, self).__init__(pod_name=pod_name)
def __init__(self, region_name):
super(PodNotFound, self).__init__(region_name=region_name)
# parameter validation error

View File

@ -116,9 +116,9 @@ def get_bottom_url(t_ver, t_url, b_ver, b_endpoint):
return b_url
def get_pod_service_endpoint(context, pod_name, st):
def get_pod_service_endpoint(context, region_name, st):
pod = db_api.get_pod_by_name(context, pod_name)
pod = db_api.get_pod_by_name(context, region_name)
if pod:
c = client.Client()
@ -127,10 +127,10 @@ def get_pod_service_endpoint(context, pod_name, st):
return ''
def get_pod_service_ctx(context, t_url, pod_name, s_type=cons.ST_NOVA):
def get_pod_service_ctx(context, t_url, region_name, s_type=cons.ST_NOVA):
t_ver = get_version_from_url(t_url)
b_endpoint = get_pod_service_endpoint(context,
pod_name,
region_name,
s_type)
b_ver = get_version_from_url(b_endpoint)
b_url = ''
@ -169,14 +169,14 @@ def get_res_routing_ref(context, _id, t_url, s_type):
if not pod:
return None
pod_name = pod['pod_name']
region_name = pod['region_name']
s_ctx = get_pod_service_ctx(context, t_url, pod_name,
s_ctx = get_pod_service_ctx(context, t_url, region_name,
s_type=s_type)
if s_ctx['b_url'] == '':
LOG.error(_LE("bottom pod endpoint incorrect %s") %
pod_name)
region_name)
return s_ctx

View File

@ -52,11 +52,6 @@ ADMIN_API_PODS_DELETE = 'admin_api:pods:delete'
ADMIN_API_PODS_SHOW = 'admin_api:pods:show'
ADMIN_API_PODS_LIST = 'admin_api:pods:list'
ADMIN_API_BINDINGS_CREATE = 'admin_api:bindings:create'
ADMIN_API_BINDINGS_DELETE = 'admin_api:bindings:delete'
ADMIN_API_BINDINGS_SHOW = 'admin_api:bindings:show'
ADMIN_API_BINDINGS_LIST = 'admin_api:bindings:list'
ADMIN_API_ROUTINGS_CREATE = 'admin_api:routings:create'
ADMIN_API_ROUTINGS_DELETE = 'admin_api:routings:delete'
ADMIN_API_ROUTINGS_PUT = 'admin_api:routings:put'
@ -77,19 +72,6 @@ tricircle_admin_api_policies = [
'rule:admin_api',
description='List pods'),
policy.RuleDefault(ADMIN_API_BINDINGS_CREATE,
'rule:admin_api',
description='Create pod binding'),
policy.RuleDefault(ADMIN_API_BINDINGS_DELETE,
'rule:admin_api',
description='Delete pod binding'),
policy.RuleDefault(ADMIN_API_BINDINGS_SHOW,
'rule:admin_api',
description='Show pod binding detail'),
policy.RuleDefault(ADMIN_API_BINDINGS_LIST,
'rule:admin_api',
description='List pod bindings'),
policy.RuleDefault(ADMIN_API_ROUTINGS_CREATE,
'rule:admin_api',
description='Create resource routing'),

View File

@ -212,7 +212,6 @@ class NovaResourceHandle(ResourceHandle):
service_type = cons.ST_NOVA
support_resource = {'flavor': LIST,
'server': LIST | CREATE | DELETE | GET | ACTION,
'aggregate': LIST | CREATE | DELETE | ACTION,
'server_volume': ACTION}
def _get_client(self, cxt):

View File

@ -29,16 +29,16 @@ def get_import_path(cls):
return cls.__module__ + "." + cls.__name__
def get_ag_name(pod_name):
return 'ag_%s' % pod_name
def get_ag_name(region_name):
return 'ag_%s' % region_name
def get_az_name(pod_name):
return 'az_%s' % pod_name
def get_az_name(region_name):
return 'az_%s' % region_name
def get_node_name(pod_name):
return "cascade_%s" % pod_name
def get_node_name(region_name):
return "cascade_%s" % region_name
def validate_required_fields_set(body, fields):

View File

@ -61,69 +61,38 @@ def update_pod(context, pod_id, update_dict):
return core.update_resource(context, models.Pod, pod_id, update_dict)
def change_pod_binding(context, pod_binding, pod_id):
with context.session.begin():
core.update_resource(context, models.PodBinding,
pod_binding['id'], pod_binding)
core.create_resource(context, models.PodBinding,
{'id': uuidutils.generate_uuid(),
'tenant_id': pod_binding['tenant_id'],
'pod_id': pod_id,
'is_binding': True})
def get_pod_binding_by_tenant_id(context, filter_):
with context.session.begin():
return core.query_resource(context, models.PodBinding, filter_, [])
def get_pod_by_pod_id(context, pod_id):
with context.session.begin():
return core.get_resource(context, models.Pod, pod_id)
def create_pod_service_configuration(context, config_dict):
def create_cached_endpoints(context, config_dict):
with context.session.begin():
return core.create_resource(context, models.PodServiceConfiguration,
return core.create_resource(context, models.CachedEndpoint,
config_dict)
def create_pod_binding(context, tenant_id, pod_id):
def delete_cached_endpoints(context, config_id):
with context.session.begin():
return core.create_resource(context, models.PodBinding,
{'id': uuidutils.generate_uuid(),
'tenant_id': tenant_id,
'pod_id': pod_id,
'is_binding': True})
def list_pod_bindings(context, filters=None, sorts=None):
with context.session.begin():
return core.query_resource(context, models.PodBinding,
filters or [], sorts or [])
def delete_pod_service_configuration(context, config_id):
with context.session.begin():
return core.delete_resource(context, models.PodServiceConfiguration,
return core.delete_resource(context, models.CachedEndpoint,
config_id)
def get_pod_service_configuration(context, config_id):
def get_cached_endpoints(context, config_id):
with context.session.begin():
return core.get_resource(context, models.PodServiceConfiguration,
return core.get_resource(context, models.CachedEndpoint,
config_id)
def list_pod_service_configurations(context, filters=None, sorts=None):
return core.query_resource(context, models.PodServiceConfiguration,
def list_cached_endpoints(context, filters=None, sorts=None):
return core.query_resource(context, models.CachedEndpoint,
filters or [], sorts or [])
def update_pod_service_configuration(context, config_id, update_dict):
def update_cached_endpoints(context, config_id, update_dict):
with context.session.begin():
return core.update_resource(
context, models.PodServiceConfiguration, config_id, update_dict)
context, models.CachedEndpoint, config_id, update_dict)
def create_resource_mapping(context, top_id, bottom_id, pod_id, project_id,
@ -207,18 +176,19 @@ def delete_pre_created_resource_mapping(context, name):
entries[0]['id'])
def get_bottom_id_by_top_id_pod_name(context, top_id, pod_name, resource_type):
def get_bottom_id_by_top_id_region_name(context, top_id,
region_name, resource_type):
"""Get resource bottom id by top id and bottom pod name
:param context: context object
:param top_id: resource id on top
:param pod_name: name of bottom pod
:param region_name: name of bottom pod
:param resource_type: resource type
:return:
"""
mappings = get_bottom_mappings_by_top_id(context, top_id, resource_type)
for pod, bottom_id in mappings:
if pod['pod_name'] == pod_name:
if pod['region_name'] == region_name:
return bottom_id
return None
@ -290,26 +260,58 @@ def get_top_pod(context):
# only one should be searched
for pod in pods:
if (pod['pod_name'] != '') and \
if (pod['region_name'] != '') and \
(pod['az_name'] == ''):
return pod
return None
def get_pod_by_name(context, pod_name):
def get_pod_by_name(context, region_name):
filters = [{'key': 'pod_name', 'comparator': 'eq', 'value': pod_name}]
filters = [{'key': 'region_name',
'comparator': 'eq', 'value': region_name}]
pods = list_pods(context, filters=filters)
# only one should be searched
for pod in pods:
if pod['pod_name'] == pod_name:
if pod['region_name'] == region_name:
return pod
return None
def find_pod_by_az(context, az_name):
# if az_name is None or empty, returning None value directly.
if az_name is None or az_name == '':
return None
# if the az_name is not empty, first match it with the region_name in the
# pod table, if no pod is found, then match it with az_name
filters = [{'key': 'region_name',
'comparator': 'eq', 'value': az_name}]
pods = list_pods(context, filters=filters)
if pods:
return pods[0]
# if no pod with the same region_name is found, then match
# it with az_name
filters = [{'key': 'az_name',
'comparator': 'eq', 'value': az_name}]
pods = list_pods(context, filters=filters)
# if no pod is matched, then we will raise an exception
if len(pods) < 1:
raise exceptions.PodNotFound(az_name)
# if the pods list only contain one pod, then this pod will be
# returned back
if len(pods) == 1:
return pods[0]
# if the pods list contains more than one pod, then we will raise an
# exception
if len(pods) > 1:
raise Exception('Multiple pods with the same az_name are found')
def new_job(context, _type, resource_id):
with context.session.begin():
job_dict = {'id': uuidutils.generate_uuid(),
@ -317,7 +319,8 @@ def new_job(context, _type, resource_id):
'status': constants.JS_New,
'resource_id': resource_id,
'extra_id': uuidutils.generate_uuid()}
job = core.create_resource(context, models.Job, job_dict)
job = core.create_resource(context,
models.AsyncJob, job_dict)
return job
@ -329,7 +332,8 @@ def register_job(context, _type, resource_id):
'status': constants.JS_Running,
'resource_id': resource_id,
'extra_id': constants.SP_EXTRA_ID}
job = core.create_resource(context, models.Job, job_dict)
job = core.create_resource(context,
models.AsyncJob, job_dict)
context.session.commit()
return job
except db_exc.DBDuplicateEntry:
@ -344,15 +348,17 @@ def register_job(context, _type, resource_id):
def get_latest_failed_jobs(context):
jobs = []
query = context.session.query(models.Job.type, models.Job.resource_id,
sql.func.count(models.Job.id))
query = query.group_by(models.Job.type, models.Job.resource_id)
query = context.session.query(models.AsyncJob.type,
models.AsyncJob.resource_id,
sql.func.count(models.AsyncJob.id))
query = query.group_by(models.AsyncJob.type, models.AsyncJob.resource_id)
for job_type, resource_id, count in query:
_query = context.session.query(models.Job)
_query = context.session.query(models.AsyncJob)
_query = _query.filter_by(type=job_type, resource_id=resource_id)
_query = _query.order_by(sql.desc('timestamp'))
# when timestamps of job entries are the same, sort entries by status
# so "Fail" job is placed before "New" and "Success" jobs
# when timestamps of async job entries are the same, sort entries by
# status so "Fail" async job is placed before "New" and "Success"
# async jobs
_query = _query.order_by(sql.asc('status'))
latest_job = _query[0].to_dict()
if latest_job['status'] == constants.JS_Fail:
@ -362,7 +368,7 @@ def get_latest_failed_jobs(context):
def get_latest_timestamp(context, status, _type, resource_id):
jobs = core.query_resource(
context, models.Job,
context, models.AsyncJob,
[{'key': 'status', 'comparator': 'eq', 'value': status},
{'key': 'type', 'comparator': 'eq', 'value': _type},
{'key': 'resource_id', 'comparator': 'eq', 'value': resource_id}],
@ -375,7 +381,7 @@ def get_latest_timestamp(context, status, _type, resource_id):
def get_running_job(context, _type, resource_id):
jobs = core.query_resource(
context, models.Job,
context, models.AsyncJob,
[{'key': 'resource_id', 'comparator': 'eq', 'value': resource_id},
{'key': 'status', 'comparator': 'eq', 'value': constants.JS_Running},
{'key': 'type', 'comparator': 'eq', 'value': _type}], [])
@ -391,7 +397,8 @@ def finish_job(context, job_id, successful, timestamp):
job_dict = {'status': status,
'timestamp': timestamp,
'extra_id': uuidutils.generate_uuid()}
core.update_resource(context, models.Job, job_id, job_dict)
core.update_resource(context, models.AsyncJob,
job_id, job_dict)
def _is_user_context(context):

View File

@ -22,10 +22,10 @@ def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
cascaded_pods = sql.Table(
'cascaded_pods', meta,
pods = sql.Table(
'pods', meta,
sql.Column('pod_id', sql.String(length=36), primary_key=True),
sql.Column('pod_name', sql.String(length=255), unique=True,
sql.Column('region_name', sql.String(length=255), unique=True,
nullable=False),
sql.Column('pod_az_name', sql.String(length=255), nullable=True),
sql.Column('dc_name', sql.String(length=255), nullable=True),
@ -33,8 +33,8 @@ def upgrade(migrate_engine):
mysql_engine='InnoDB',
mysql_charset='utf8')
cascaded_pod_service_configuration = sql.Table(
'cascaded_pod_service_configuration', meta,
cached_endpoints = sql.Table(
'cached_endpoints', meta,
sql.Column('service_id', sql.String(length=64), primary_key=True),
sql.Column('pod_id', sql.String(length=64), nullable=False),
sql.Column('service_type', sql.String(length=64), nullable=False),
@ -42,33 +42,12 @@ def upgrade(migrate_engine):
mysql_engine='InnoDB',
mysql_charset='utf8')
pod_binding = sql.Table(
'pod_binding', meta,
sql.Column('id', sql.String(36), primary_key=True),
sql.Column('tenant_id', sql.String(length=255), nullable=False),
sql.Column('pod_id', sql.String(length=255), nullable=False),
sql.Column('is_binding', sql.Boolean, nullable=False),
sql.Column('created_at', sql.DateTime),
sql.Column('updated_at', sql.DateTime),
migrate.UniqueConstraint(
'tenant_id', 'pod_id',
name='pod_binding0tenant_id0pod_id'),
mysql_engine='InnoDB',
mysql_charset='utf8')
tables = [cascaded_pods, cascaded_pod_service_configuration,
pod_binding]
tables = [pods, cached_endpoints]
for table in tables:
table.create()
fkey = {'columns': [cascaded_pod_service_configuration.c.pod_id],
'references': [cascaded_pods.c.pod_id]}
migrate.ForeignKeyConstraint(columns=fkey['columns'],
refcolumns=fkey['references'],
name=fkey.get('name')).create()
fkey = {'columns': [pod_binding.c.pod_id],
'references': [cascaded_pods.c.pod_id]}
fkey = {'columns': [cached_endpoints.c.pod_id],
'references': [pods.c.pod_id]}
migrate.ForeignKeyConstraint(columns=fkey['columns'],
refcolumns=fkey['references'],
name=fkey.get('name')).create()

View File

@ -27,96 +27,8 @@ def upgrade(migrate_engine):
meta = sql.MetaData()
meta.bind = migrate_engine
aggregates = sql.Table(
'aggregates', meta,
sql.Column('id', sql.Integer, primary_key=True),
sql.Column('name', sql.String(255), unique=True),
sql.Column('created_at', sql.DateTime),
sql.Column('updated_at', sql.DateTime),
mysql_engine='InnoDB',
mysql_charset='utf8')
aggregate_metadata = sql.Table(
'aggregate_metadata', meta,
sql.Column('id', sql.Integer, primary_key=True),
sql.Column('key', sql.String(255), nullable=False),
sql.Column('value', sql.String(255), nullable=False),
sql.Column('aggregate_id', sql.Integer, nullable=False),
sql.Column('created_at', sql.DateTime),
sql.Column('updated_at', sql.DateTime),
migrate.UniqueConstraint(
'aggregate_id', 'key',
name='uniq_aggregate_metadata0aggregate_id0key'),
mysql_engine='InnoDB',
mysql_charset='utf8')
instance_types = sql.Table(
'instance_types', meta,
sql.Column('id', sql.Integer, primary_key=True),
sql.Column('name', sql.String(255), unique=True),
sql.Column('memory_mb', sql.Integer, nullable=False),
sql.Column('vcpus', sql.Integer, nullable=False),
sql.Column('root_gb', sql.Integer),
sql.Column('ephemeral_gb', sql.Integer),
sql.Column('flavorid', sql.String(255), unique=True),
sql.Column('swap', sql.Integer, nullable=False, default=0),
sql.Column('rxtx_factor', sql.Float, default=1),
sql.Column('vcpu_weight', sql.Integer),
sql.Column('disabled', sql.Boolean, default=False),
sql.Column('is_public', sql.Boolean, default=True),
sql.Column('created_at', sql.DateTime),
sql.Column('updated_at', sql.DateTime),
mysql_engine='InnoDB',
mysql_charset='utf8')
instance_type_projects = sql.Table(
'instance_type_projects', meta,
sql.Column('id', sql.Integer, primary_key=True),
sql.Column('instance_type_id', sql.Integer, nullable=False),
sql.Column('project_id', sql.String(255)),
sql.Column('created_at', sql.DateTime),
sql.Column('updated_at', sql.DateTime),
migrate.UniqueConstraint(
'instance_type_id', 'project_id',
name='uniq_instance_type_projects0instance_type_id0project_id'),
mysql_engine='InnoDB',
mysql_charset='utf8')
instance_type_extra_specs = sql.Table(
'instance_type_extra_specs', meta,
sql.Column('id', sql.Integer, primary_key=True),
sql.Column('key', sql.String(255)),
sql.Column('value', sql.String(255)),
sql.Column('instance_type_id', sql.Integer, nullable=False),
sql.Column('created_at', sql.DateTime),
sql.Column('updated_at', sql.DateTime),
migrate.UniqueConstraint(
'instance_type_id', 'key',
name='uniq_instance_type_extra_specs0instance_type_id0key'),
mysql_engine='InnoDB',
mysql_charset='utf8')
enum = sql.Enum('ssh', 'x509', metadata=meta, name='keypair_types')
enum.create()
key_pairs = sql.Table(
'key_pairs', meta,
sql.Column('id', sql.Integer, primary_key=True, nullable=False),
sql.Column('name', sql.String(255), nullable=False),
sql.Column('user_id', sql.String(255)),
sql.Column('fingerprint', sql.String(255)),
sql.Column('public_key', MediumText()),
sql.Column('type', enum, nullable=False, server_default='ssh'),
sql.Column('created_at', sql.DateTime),
sql.Column('updated_at', sql.DateTime),
migrate.UniqueConstraint(
'user_id', 'name',
name='uniq_key_pairs0user_id0name'),
mysql_engine='InnoDB',
mysql_charset='utf8')
cascaded_pods_resource_routing = sql.Table(
'cascaded_pods_resource_routing', meta,
resource_routings = sql.Table(
'resource_routings', meta,
sql.Column('id', sql.BigInteger, primary_key=True),
sql.Column('top_id', sql.String(length=127), nullable=False),
sql.Column('bottom_id', sql.String(length=36)),
@ -127,13 +39,13 @@ def upgrade(migrate_engine):
sql.Column('updated_at', sql.DateTime),
migrate.UniqueConstraint(
'top_id', 'pod_id',
name='cascaded_pods_resource_routing0top_id0pod_id'
name='resource_routings0top_id0pod_id'
),
mysql_engine='InnoDB',
mysql_charset='utf8')
job = sql.Table(
'job', meta,
async_jobs = sql.Table(
'async_jobs', meta,
sql.Column('id', sql.String(length=36), primary_key=True),
sql.Column('type', sql.String(length=36)),
sql.Column('timestamp', sql.TIMESTAMP,
@ -143,26 +55,18 @@ def upgrade(migrate_engine):
sql.Column('extra_id', sql.String(length=36)),
migrate.UniqueConstraint(
'type', 'status', 'resource_id', 'extra_id',
name='job0type0status0resource_id0extra_id'),
name='async_jobs0type0status0resource_id0extra_id'),
mysql_engine='InnoDB',
mysql_charset='utf8')
tables = [aggregates, aggregate_metadata, instance_types,
instance_type_projects, instance_type_extra_specs, key_pairs,
job, cascaded_pods_resource_routing]
tables = [async_jobs, resource_routings]
for table in tables:
table.create()
cascaded_pods = sql.Table('cascaded_pods', meta, autoload=True)
pods = sql.Table('pods', meta, autoload=True)
fkeys = [{'columns': [instance_type_projects.c.instance_type_id],
'references': [instance_types.c.id]},
{'columns': [instance_type_extra_specs.c.instance_type_id],
'references': [instance_types.c.id]},
{'columns': [aggregate_metadata.c.aggregate_id],
'references': [aggregates.c.id]},
{'columns': [cascaded_pods_resource_routing.c.pod_id],
'references': [cascaded_pods.c.pod_id]}]
fkeys = [{'columns': [resource_routings.c.pod_id],
'references': [pods.c.pod_id]}]
for fkey in fkeys:
migrate.ForeignKeyConstraint(columns=fkey['columns'],
refcolumns=fkey['references'],

View File

@ -26,145 +26,28 @@ def MediumText():
return sql.Text().with_variant(mysql.MEDIUMTEXT(), 'mysql')
# Resource Model
class Aggregate(core.ModelBase, core.DictBase, models.TimestampMixin):
"""Represents a cluster of hosts that exists in this zone."""
__tablename__ = 'aggregates'
attributes = ['id', 'name', 'created_at', 'updated_at']
id = sql.Column(sql.Integer, primary_key=True)
name = sql.Column(sql.String(255), unique=True)
class AggregateMetadata(core.ModelBase, core.DictBase, models.TimestampMixin):
"""Represents a metadata key/value pair for an aggregate."""
__tablename__ = 'aggregate_metadata'
__table_args__ = (
sql.Index('aggregate_metadata_key_idx', 'key'),
schema.UniqueConstraint(
'aggregate_id', 'key',
name='uniq_aggregate_metadata0aggregate_id0key'),
)
attributes = ['id', 'key', 'value', 'aggregate_id',
'created_at', 'updated_at']
id = sql.Column(sql.Integer, primary_key=True)
key = sql.Column(sql.String(255), nullable=False)
value = sql.Column(sql.String(255), nullable=False)
aggregate_id = sql.Column(sql.Integer,
sql.ForeignKey('aggregates.id'), nullable=False)
class InstanceTypes(core.ModelBase, core.DictBase, models.TimestampMixin):
"""Represents possible flavors for instances.
Note: instance_type and flavor are synonyms and the term instance_type is
deprecated and in the process of being removed.
"""
__tablename__ = 'instance_types'
attributes = ['id', 'name', 'memory_mb', 'vcpus', 'root_gb',
'ephemeral_gb', 'flavorid', 'swap', 'rxtx_factor',
'vcpu_weight', 'disabled', 'is_public', 'created_at',
'updated_at']
# Internal only primary key/id
id = sql.Column(sql.Integer, primary_key=True)
name = sql.Column(sql.String(255), unique=True)
memory_mb = sql.Column(sql.Integer, nullable=False)
vcpus = sql.Column(sql.Integer, nullable=False)
root_gb = sql.Column(sql.Integer)
ephemeral_gb = sql.Column(sql.Integer)
# Public facing id will be renamed public_id
flavorid = sql.Column(sql.String(255), unique=True)
swap = sql.Column(sql.Integer, nullable=False, default=0)
rxtx_factor = sql.Column(sql.Float, default=1)
vcpu_weight = sql.Column(sql.Integer)
disabled = sql.Column(sql.Boolean, default=False)
is_public = sql.Column(sql.Boolean, default=True)
class InstanceTypeProjects(core.ModelBase, core.DictBase,
models.TimestampMixin):
"""Represent projects associated instance_types."""
__tablename__ = 'instance_type_projects'
__table_args__ = (schema.UniqueConstraint(
'instance_type_id', 'project_id',
name='uniq_instance_type_projects0instance_type_id0project_id'),
)
attributes = ['id', 'instance_type_id', 'project_id', 'created_at',
'updated_at']
id = sql.Column(sql.Integer, primary_key=True)
instance_type_id = sql.Column(sql.Integer,
sql.ForeignKey('instance_types.id'),
nullable=False)
project_id = sql.Column(sql.String(255))
class InstanceTypeExtraSpecs(core.ModelBase, core.DictBase,
models.TimestampMixin):
"""Represents additional specs as key/value pairs for an instance_type."""
__tablename__ = 'instance_type_extra_specs'
__table_args__ = (
sql.Index('instance_type_extra_specs_instance_type_id_key_idx',
'instance_type_id', 'key'),
schema.UniqueConstraint(
'instance_type_id', 'key',
name='uniq_instance_type_extra_specs0instance_type_id0key'),
{'mysql_collate': 'utf8_bin'},
)
attributes = ['id', 'key', 'value', 'instance_type_id', 'created_at',
'updated_at']
id = sql.Column(sql.Integer, primary_key=True)
key = sql.Column(sql.String(255))
value = sql.Column(sql.String(255))
instance_type_id = sql.Column(sql.Integer,
sql.ForeignKey('instance_types.id'),
nullable=False)
class KeyPair(core.ModelBase, core.DictBase, models.TimestampMixin):
"""Represents a public key pair for ssh / WinRM."""
__tablename__ = 'key_pairs'
__table_args__ = (
schema.UniqueConstraint('user_id', 'name',
name='uniq_key_pairs0user_id0name'),
)
attributes = ['id', 'name', 'user_id', 'fingerprint', 'public_key', 'type',
'created_at', 'updated_at']
id = sql.Column(sql.Integer, primary_key=True, nullable=False)
name = sql.Column(sql.String(255), nullable=False)
user_id = sql.Column(sql.String(255))
fingerprint = sql.Column(sql.String(255))
public_key = sql.Column(MediumText())
type = sql.Column(sql.Enum('ssh', 'x509', name='keypair_types'),
nullable=False, server_default='ssh')
# Pod Model
class Pod(core.ModelBase, core.DictBase):
__tablename__ = 'cascaded_pods'
attributes = ['pod_id', 'pod_name', 'pod_az_name', 'dc_name', 'az_name']
__tablename__ = 'pods'
attributes = ['pod_id', 'region_name', 'pod_az_name', 'dc_name', 'az_name']
pod_id = sql.Column('pod_id', sql.String(length=36), primary_key=True)
pod_name = sql.Column('pod_name', sql.String(length=255), unique=True,
nullable=False)
region_name = sql.Column('region_name', sql.String(length=255),
unique=True, nullable=False)
pod_az_name = sql.Column('pod_az_name', sql.String(length=255),
nullable=True)
dc_name = sql.Column('dc_name', sql.String(length=255), nullable=True)
az_name = sql.Column('az_name', sql.String(length=255), nullable=False)
class PodServiceConfiguration(core.ModelBase, core.DictBase):
__tablename__ = 'cascaded_pod_service_configuration'
class CachedEndpoint(core.ModelBase, core.DictBase):
__tablename__ = 'cached_endpoints'
attributes = ['service_id', 'pod_id', 'service_type', 'service_url']
service_id = sql.Column('service_id', sql.String(length=64),
primary_key=True)
pod_id = sql.Column('pod_id', sql.String(length=64),
sql.ForeignKey('cascaded_pods.pod_id'),
sql.ForeignKey('pods.pod_id'),
nullable=False)
service_type = sql.Column('service_type', sql.String(length=64),
nullable=False)
@ -172,32 +55,13 @@ class PodServiceConfiguration(core.ModelBase, core.DictBase):
nullable=False)
# Tenant and pod binding model
class PodBinding(core.ModelBase, core.DictBase, models.TimestampMixin):
__tablename__ = 'pod_binding'
__table_args__ = (
schema.UniqueConstraint(
'tenant_id', 'pod_id',
name='pod_binding0tenant_id0pod_id'),
)
attributes = ['id', 'tenant_id', 'pod_id', 'is_binding',
'created_at', 'updated_at']
id = sql.Column(sql.String(36), primary_key=True)
tenant_id = sql.Column('tenant_id', sql.String(36), nullable=False)
pod_id = sql.Column('pod_id', sql.String(36),
sql.ForeignKey('cascaded_pods.pod_id'),
nullable=False)
is_binding = sql.Column('is_binding', sql.Boolean, nullable=False)
# Routing Model
class ResourceRouting(core.ModelBase, core.DictBase, models.TimestampMixin):
__tablename__ = 'cascaded_pods_resource_routing'
__tablename__ = 'resource_routings'
__table_args__ = (
schema.UniqueConstraint(
'top_id', 'pod_id',
name='cascaded_pods_resource_routing0top_id0pod_id'),
name='resource_routings0top_id0pod_id'),
)
attributes = ['id', 'top_id', 'bottom_id', 'pod_id', 'project_id',
'resource_type', 'created_at', 'updated_at']
@ -209,19 +73,19 @@ class ResourceRouting(core.ModelBase, core.DictBase, models.TimestampMixin):
top_id = sql.Column('top_id', sql.String(length=127), nullable=False)
bottom_id = sql.Column('bottom_id', sql.String(length=36))
pod_id = sql.Column('pod_id', sql.String(length=64),
sql.ForeignKey('cascaded_pods.pod_id'),
sql.ForeignKey('pods.pod_id'),
nullable=False)
project_id = sql.Column('project_id', sql.String(length=36))
resource_type = sql.Column('resource_type', sql.String(length=64),
nullable=False)
class Job(core.ModelBase, core.DictBase):
__tablename__ = 'job'
class AsyncJob(core.ModelBase, core.DictBase):
__tablename__ = 'async_jobs'
__table_args__ = (
schema.UniqueConstraint(
'type', 'status', 'resource_id', 'extra_id',
name='job0type0status0resource_id0extra_id'),
name='async_jobs0type0status0resource_id0extra_id'),
)
attributes = ['id', 'type', 'timestamp', 'status', 'resource_id',

View File

@ -42,7 +42,6 @@ import neutronclient.common.exceptions as q_cli_exceptions
from sqlalchemy import sql
from tricircle.common import az_ag
import tricircle.common.client as t_client
import tricircle.common.constants as t_constants
import tricircle.common.context as t_context
@ -79,6 +78,10 @@ tricircle_opts = [
default='',
help=_('Type of l3 bridge network, this type should be enabled '
'in tenant_network_types and is not local type.')),
cfg.StrOpt('default_region_for_external_network',
default='RegionOne',
help=_('Default Region where the external network belongs'
' to.')),
cfg.BoolOpt('enable_api_gateway',
default=True,
help=_('Whether the Nova API gateway is enabled'))
@ -133,10 +136,10 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
def _setup_rpc(self):
self.endpoints = []
def _get_client(self, pod_name):
if pod_name not in self.clients:
self.clients[pod_name] = t_client.Client(pod_name)
return self.clients[pod_name]
def _get_client(self, region_name):
if region_name not in self.clients:
self.clients[region_name] = t_client.Client(region_name)
return self.clients[region_name]
@log_helpers.log_method_call
def start_rpc_listeners(self):
@ -155,15 +158,14 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
with context.session.begin():
pods = core.query_resource(t_ctx, models.Pod, [], [])
az_set = set(az_list)
if external:
known_az_set = set([pod['pod_name'] for pod in pods])
else:
known_az_set = set([pod['az_name'] for pod in pods])
if external:
known_az_set = (known_az_set |
set([pod['region_name'] for pod in pods]))
diff = az_set - known_az_set
if diff:
if external:
raise t_exceptions.PodNotFound(pod_name=diff.pop())
else:
raise az_ext.AvailabilityZoneNotFound(
availability_zone=diff.pop())
@ -183,21 +185,16 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
return False
if az_ext.AZ_HINTS in req_data and req_data[az_ext.AZ_HINTS]:
return True
t_ctx = t_context.get_context_from_neutron_context(context)
pod, pod_az = az_ag.get_pod_by_az_tenant(
t_ctx,
az_name='',
tenant_id=req_data['tenant_id'])
if pod:
req_data[az_ext.AZ_HINTS] = [pod['pod_name']]
# if no az_hints are specified, we will use default region_name
req_data[az_ext.AZ_HINTS] = \
[cfg.CONF.tricircle.default_region_for_external_network]
return True
raise t_exceptions.ExternalNetPodNotSpecify()
def _create_bottom_external_network(self, context, net, top_id):
t_ctx = t_context.get_context_from_neutron_context(context)
# use the first pod
pod_name = net[az_ext.AZ_HINTS][0]
pod = db_api.get_pod_by_name(t_ctx, pod_name)
az_name = net[az_ext.AZ_HINTS][0]
pod = db_api.find_pod_by_az(t_ctx, az_name)
body = {
'network': {
'name': top_id,
@ -218,10 +215,10 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
def _create_bottom_external_subnet(self, context, subnet, net, top_id):
t_ctx = t_context.get_context_from_neutron_context(context)
pod_name = net[az_ext.AZ_HINTS][0]
pod = db_api.get_pod_by_name(t_ctx, pod_name)
b_net_id = db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, net['id'], pod_name, t_constants.RT_NETWORK)
region_name = net[az_ext.AZ_HINTS][0]
pod = db_api.get_pod_by_name(t_ctx, region_name)
b_net_id = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, net['id'], region_name, t_constants.RT_NETWORK)
body = {
'subnet': {
'name': top_id,
@ -278,9 +275,9 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
mappings = db_api.get_bottom_mappings_by_top_id(
t_ctx, network_id, t_constants.RT_NETWORK)
for mapping in mappings:
pod_name = mapping[0]['pod_name']
region_name = mapping[0]['region_name']
bottom_network_id = mapping[1]
self._get_client(pod_name).delete_networks(
self._get_client(region_name).delete_networks(
t_ctx, bottom_network_id)
with t_ctx.session.begin():
core.delete_resources(
@ -390,12 +387,12 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
mappings = db_api.get_bottom_mappings_by_top_id(
t_ctx, subnet_id, t_constants.RT_SUBNET)
for mapping in mappings:
pod_name = mapping[0]['pod_name']
region_name = mapping[0]['region_name']
bottom_subnet_id = mapping[1]
self._get_client(pod_name).delete_subnets(
self._get_client(region_name).delete_subnets(
t_ctx, bottom_subnet_id)
interface_name = t_constants.interface_port_name % (
mapping[0]['pod_name'], subnet_id)
mapping[0]['region_name'], subnet_id)
self._delete_pre_created_port(t_ctx, context, interface_name)
with t_ctx.session.begin():
core.delete_resources(
@ -457,8 +454,8 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
entries.append((sg_id, t_constants.RT_SG))
for resource_id, resource_type in entries:
if db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, resource_id, pod['pod_name'], resource_type):
if db_api.get_bottom_id_by_top_id_region_name(
t_ctx, resource_id, pod['region_name'], resource_type):
continue
db_api.create_resource_mapping(t_ctx, resource_id, resource_id,
pod['pod_id'], res['tenant_id'],
@ -518,9 +515,9 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
mappings = db_api.get_bottom_mappings_by_top_id(
t_ctx, port_id, t_constants.RT_PORT)
if mappings:
pod_name = mappings[0][0]['pod_name']
region_name = mappings[0][0]['region_name']
bottom_port_id = mappings[0][1]
port = self._get_client(pod_name).get_ports(
port = self._get_client(region_name).get_ports(
t_ctx, bottom_port_id)
# TODO(zhiyuan) handle the case that bottom port does not exist
port['id'] = port_id
@ -674,7 +671,7 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
t_ctx = t_context.get_context_from_neutron_context(context)
q_client = self._get_client(
current_pod['pod_name']).get_native_client('port', t_ctx)
current_pod['region_name']).get_native_client('port', t_ctx)
params = {'limit': number}
if filters:
_filters = dict(filters)
@ -844,7 +841,7 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
_filters.append({'key': key,
'comparator': 'eq',
'value': value})
client = self._get_client(pod['pod_name'])
client = self._get_client(pod['region_name'])
ret.extend(client.list_ports(t_ctx, filters=_filters))
ret = self._map_ports_from_bottom_to_top(ret, bottom_top_map)
ret.extend(self._get_ports_from_top(context, top_bottom_map,
@ -914,7 +911,7 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
mappings = db_api.get_bottom_mappings_by_top_id(t_ctx, _id,
t_constants.RT_ROUTER)
for pod, b_router_id in mappings:
b_client = self._get_client(pod['pod_name'])
b_client = self._get_client(pod['region_name'])
ew_port_name = t_constants.ew_bridge_port_name % (project_id,
b_router_id)
@ -922,8 +919,9 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
context, {'name': [ew_port_name]})
if ew_ports:
t_ew_port_id = ew_ports[0]['id']
b_ew_port_id = db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, t_ew_port_id, pod['pod_name'], t_constants.RT_PORT)
b_ew_port_id = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, t_ew_port_id, pod['region_name'],
t_constants.RT_PORT)
if b_ew_port_id:
request_body = {'port_id': b_ew_port_id}
try:
@ -955,8 +953,9 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
context, {'name': [ns_subnet_name]})
if ns_subnets:
t_ns_subnet_id = ns_subnets[0]['id']
b_ns_subnet_id = db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, t_ns_subnet_id, pod['pod_name'],
b_ns_subnet_id = \
db_api.get_bottom_id_by_top_id_region_name(
t_ctx, t_ns_subnet_id, pod['region_name'],
t_constants.RT_SUBNET)
if b_ns_subnet_id:
request_body = {'subnet_id': b_ns_subnet_id}
@ -1114,10 +1113,10 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
# network ID from resource routing table.
if not network.get(az_ext.AZ_HINTS):
raise t_exceptions.ExternalNetPodNotSpecify()
pod_name = network[az_ext.AZ_HINTS][0]
pod = db_api.get_pod_by_name(t_ctx, pod_name)
b_net_id = db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, ext_net_id, pod_name, t_constants.RT_NETWORK)
region_name = network[az_ext.AZ_HINTS][0]
pod = db_api.get_pod_by_name(t_ctx, region_name)
b_net_id = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, ext_net_id, region_name, t_constants.RT_NETWORK)
# create corresponding bottom router in the pod where external network
# is located.
@ -1134,7 +1133,7 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
# both router and external network in bottom pod are ready, attach
# external network to router in bottom pod.
b_client = self._get_client(pod_name)
b_client = self._get_client(region_name)
t_info = router_data[l3.EXTERNAL_GW_INFO]
b_info = {'network_id': b_net_id}
if 'enable_snat' in t_info:
@ -1143,8 +1142,8 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
fixed_ips = []
for ip in t_info['external_fixed_ips']:
t_subnet_id = ip['subnet_id']
b_subnet_id = db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, t_subnet_id, pod_name,
b_subnet_id = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, t_subnet_id, region_name,
t_constants.RT_SUBNET)
fixed_ips.append({'subnet_id': b_subnet_id,
'ip_address': ip['ip_address']})
@ -1198,10 +1197,10 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
if not t_network[az_ext.AZ_HINTS]:
raise t_exceptions.ExternalNetPodNotSpecify()
pod_name = t_network[az_ext.AZ_HINTS][0]
b_router_id = db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, router_id, pod_name, t_constants.RT_ROUTER)
b_client = self._get_client(pod_name)
region_name = t_network[az_ext.AZ_HINTS][0]
b_router_id = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, router_id, region_name, t_constants.RT_ROUTER)
b_client = self._get_client(region_name)
b_client.action_routers(t_ctx, 'remove_gateway', b_router_id)
def update_router(self, context, router_id, router):
@ -1263,11 +1262,11 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
if not ext_nets:
need_ns_bridge = False
else:
ext_net_pod_names = set(
ext_net_region_names = set(
[ext_net[az_ext.AZ_HINTS][0] for ext_net in ext_nets])
need_ns_bridge = False
for b_pod in b_pods:
if b_pod['pod_name'] not in ext_net_pod_names:
if b_pod['region_name'] not in ext_net_region_names:
need_ns_bridge = True
break
if need_ns_bridge:
@ -1360,7 +1359,7 @@ class TricirclePlugin(db_base_plugin_v2.NeutronDbPluginV2,
# this is rare case that we got IpAddressInUseClient exception
# a second ago but now the floating ip is missing
raise t_network_exc.BottomPodOperationFailure(
resource='floating ip', pod_name=pod['pod_name'])
resource='floating ip', region_name=pod['region_name'])
associated_port_id = fips[0].get('port_id')
if associated_port_id == port_id:
pass

View File

@ -27,7 +27,8 @@ class DefaultGroupUpdateNotSupported(exceptions.InvalidInput):
class BottomPodOperationFailure(exceptions.NeutronException):
message = _('Operation for %(resource)s on bottom pod %(pod_name)s fails')
message = _(
'Operation for %(resource)s on bottom pod %(region_name)s fails')
class DhcpPortNotFound(exceptions.NotFound):

View File

@ -44,14 +44,14 @@ class NetworkHelper(object):
network_type_map = {t_constants.NT_SHARED_VLAN: TYPE_VLAN}
return network_type_map.get(network_type, network_type)
def _get_client(self, pod_name=None):
if not pod_name:
def _get_client(self, region_name=None):
if not region_name:
if t_constants.TOP not in self.clients:
self.clients[t_constants.TOP] = client.Client()
return self.clients[t_constants.TOP]
if pod_name not in self.clients:
self.clients[pod_name] = client.Client(pod_name)
return self.clients[pod_name]
if region_name not in self.clients:
self.clients[region_name] = client.Client(region_name)
return self.clients[region_name]
# operate top resource
def _prepare_top_element_by_call(self, t_ctx, q_ctx,
@ -173,7 +173,7 @@ class NetworkHelper(object):
created or already exists and id of the resource
"""
def list_resources(t_ctx_, q_ctx, pod_, ele_, _type_):
client = self._get_client(pod_['pod_name'])
client = self._get_client(pod_['region_name'])
if _type_ == t_constants.RT_NETWORK:
value = utils.get_bottom_network_name(ele_)
else:
@ -183,7 +183,7 @@ class NetworkHelper(object):
'value': value}])
def create_resources(t_ctx_, q_ctx, pod_, body_, _type_):
client = self._get_client(pod_['pod_name'])
client = self._get_client(pod_['region_name'])
return client.create_resources(_type_, t_ctx_, body_)
return t_lock.get_or_create_element(
@ -292,17 +292,17 @@ class NetworkHelper(object):
body['port']['security_groups'] = b_security_group_ids
return body
def get_create_interface_body(self, project_id, t_net_id, b_pod_name,
def get_create_interface_body(self, project_id, t_net_id, b_region_name,
t_subnet_id):
"""Get request body to create top interface
:param project_id: project id
:param t_net_id: top network id
:param b_pod_name: bottom pod name
:param b_region_name: bottom pod name
:param t_subnet_id: top subnet id
:return:
"""
t_interface_name = t_constants.interface_port_name % (b_pod_name,
t_interface_name = t_constants.interface_port_name % (b_region_name,
t_subnet_id)
t_interface_body = {
'port': {
@ -354,10 +354,10 @@ class NetworkHelper(object):
for subnet in t_subnets:
# gateway
t_interface_name = t_constants.interface_port_name % (
pod['pod_name'], subnet['id'])
pod['region_name'], subnet['id'])
t_interface_body = self.get_create_interface_body(
project_id, t_net['id'], pod['pod_name'], subnet['id'])
project_id, t_net['id'], pod['region_name'], subnet['id'])
_, t_interface_id = self.prepare_top_element(
t_ctx, q_ctx, project_id, pod, {'id': t_interface_name},
@ -380,7 +380,7 @@ class NetworkHelper(object):
continue
self.prepare_dhcp_port(t_ctx, project_id, pod, t_net['id'],
t_subnet_id, b_net_id, b_subnet_id)
b_client = self._get_client(pod['pod_name'])
b_client = self._get_client(pod['region_name'])
b_client.update_subnets(t_ctx, b_subnet_id,
{'subnet': {'enable_dhcp': True}})
@ -548,7 +548,7 @@ class NetworkHelper(object):
# this is rare case that we got IpAddressInUseClient exception
# a second ago but now the floating ip is missing
raise t_network_exc.BottomPodOperationFailure(
resource='floating ip', pod_name=pod['pod_name'])
resource='floating ip', region_name=pod['region_name'])
associated_port_id = fips[0].get('port_id')
if associated_port_id == port_id:
# the internal port associated with the existing fip is what

View File

@ -65,7 +65,7 @@ class TricircleSecurityGroupMixin(securitygroups_db.SecurityGroupDbMixin):
try:
for pod, b_sg_id in mappings:
client = self._get_client(pod['pod_name'])
client = self._get_client(pod['region_name'])
rule['security_group_id'] = b_sg_id
self._safe_create_security_group_rule(
t_context, client, {'security_group_rule': rule})
@ -73,7 +73,7 @@ class TricircleSecurityGroupMixin(securitygroups_db.SecurityGroupDbMixin):
super(TricircleSecurityGroupMixin,
self).delete_security_group_rule(q_context, new_rule['id'])
raise n_exceptions.BottomPodOperationFailure(
resource='security group rule', pod_name=pod['pod_name'])
resource='security group rule', region_name=pod['region_name'])
return new_rule
def delete_security_group_rule(self, q_context, _id):
@ -91,7 +91,7 @@ class TricircleSecurityGroupMixin(securitygroups_db.SecurityGroupDbMixin):
try:
for pod, b_sg_id in mappings:
client = self._get_client(pod['pod_name'])
client = self._get_client(pod['region_name'])
rule['security_group_id'] = b_sg_id
b_sg = client.get_security_groups(t_context, b_sg_id)
for b_rule in b_sg['security_group_rules']:
@ -102,7 +102,7 @@ class TricircleSecurityGroupMixin(securitygroups_db.SecurityGroupDbMixin):
break
except Exception:
raise n_exceptions.BottomPodOperationFailure(
resource='security group rule', pod_name=pod['pod_name'])
resource='security group rule', region_name=pod['region_name'])
super(TricircleSecurityGroupMixin,
self).delete_security_group_rule(q_context, _id)

View File

@ -30,12 +30,12 @@ echo $token
curl -X POST http://127.0.0.1:19999/v1.0/pods \
-H "Content-Type: application/json" \
-H "X-Auth-Token: $token" -d '{"pod": {"pod_name": "RegionOne"}}'
-H "X-Auth-Token: $token" -d '{"pod": {"region_name": "RegionOne"}}'
curl -X POST http://127.0.0.1:19999/v1.0/pods \
-H "Content-Type: application/json" \
-H "X-Auth-Token: $token" \
-d '{"pod": {"pod_name": "Pod1", "az_name": "az1"}}'
-d '{"pod": {"region_name": "Pod1", "az_name": "az1"}}'
# the usage of "nova flavor-create":
# nova flavor-create [--ephemeral <ephemeral>] [--swap <swap>]

View File

@ -22,10 +22,8 @@ from oslo_config import fixture as fixture_config
import oslo_db.exception as db_exc
from tricircle.api import app
from tricircle.common import az_ag
from tricircle.common import context
from tricircle.common import policy
from tricircle.common import utils
from tricircle.db import core
from tricircle.tests import base
@ -120,26 +118,6 @@ class TestPodController(API_FunctionalTest):
def fake_create_ag_az(context, ag_name, az_name):
raise db_exc.DBDuplicateEntry
@patch.object(context, 'extract_context_from_environ',
new=fake_admin_context)
@patch.object(az_ag, 'create_ag_az',
new=fake_create_ag_az)
def test_post_dup_db_exception(self):
pods = [
{
"pod":
{
"pod_name": "Pod1",
"pod_az_name": "az1",
"dc_name": "dc1",
"az_name": "AZ1"
},
"expected_error": 409
},
]
self._test_and_check(pods)
def fake_create_ag_az_exp(context, ag_name, az_name):
raise Exception
@ -152,7 +130,7 @@ class TestPodController(API_FunctionalTest):
{
"pod":
{
"pod_name": "Pod1",
"region_name": "Pod1",
"pod_az_name": "az1",
"dc_name": "dc1",
"az_name": "AZ1"
@ -205,7 +183,7 @@ class TestPodController(API_FunctionalTest):
{
"pod":
{
"pod_name": "",
"region_name": "",
"pod_az_name": "az1",
"dc_name": "dc1"
},
@ -216,7 +194,7 @@ class TestPodController(API_FunctionalTest):
{
"pod":
{
"pod_name": "",
"region_name": "",
"pod_az_name": "az1",
"dc_name": "dc1",
"az_name": ""
@ -228,7 +206,7 @@ class TestPodController(API_FunctionalTest):
{
"pod":
{
"pod_name": "",
"region_name": "",
"pod_az_name": "az1",
"dc_name": "dc1",
"az_name": "az1"
@ -251,7 +229,7 @@ class TestPodController(API_FunctionalTest):
{
"pod":
{
"pod_name": "TopRegion",
"region_name": "TopRegion",
"pod_az_name": "az1",
"dc_name": "dc1"
},
@ -261,7 +239,7 @@ class TestPodController(API_FunctionalTest):
{
"pod":
{
"pod_name": "TopRegion2",
"region_name": "TopRegion2",
"pod_az_name": "",
"dc_name": "dc1"
},
@ -281,7 +259,7 @@ class TestPodController(API_FunctionalTest):
{
"pod":
{
"pod_name": "Pod1",
"region_name": "Pod1",
"pod_az_name": "az1",
"dc_name": "dc1",
"az_name": "AZ1"
@ -292,7 +270,7 @@ class TestPodController(API_FunctionalTest):
{
"pod":
{
"pod_name": "Pod1",
"region_name": "Pod1",
"pod_az_name": "az2",
"dc_name": "dc2",
"az_name": "AZ1"
@ -314,7 +292,7 @@ class TestPodController(API_FunctionalTest):
{
"pod":
{
"pod_name": "TopRegion",
"region_name": "TopRegion",
"pod_az_name": "az1",
"dc_name": "dc1"
},
@ -324,7 +302,7 @@ class TestPodController(API_FunctionalTest):
{
"pod":
{
"pod_name": "TopRegion",
"region_name": "TopRegion",
"pod_az_name": "az2",
"dc_name": "dc2",
"az_name": "AZ1"
@ -357,7 +335,7 @@ class TestPodController(API_FunctionalTest):
{
"pod":
{
"pod_name": "TopRegion",
"region_name": "TopRegion",
"pod_az_name": "",
"dc_name": "dc1",
"az_name": ""
@ -368,7 +346,7 @@ class TestPodController(API_FunctionalTest):
{
"pod":
{
"pod_name": "Pod1",
"region_name": "Pod1",
"pod_az_name": "az1",
"dc_name": "dc2",
"az_name": "AZ1"
@ -379,7 +357,7 @@ class TestPodController(API_FunctionalTest):
{
"pod":
{
"pod_name": "Pod2",
"region_name": "Pod2",
"pod_az_name": "az1",
"dc_name": "dc2",
"az_name": "AZ1"
@ -407,7 +385,7 @@ class TestPodController(API_FunctionalTest):
{
"pod":
{
"pod_name": "Pod1",
"region_name": "Pod1",
"pod_az_name": "az1",
"dc_name": "dc2",
"az_name": "AZ1"
@ -418,7 +396,7 @@ class TestPodController(API_FunctionalTest):
{
"pod":
{
"pod_name": "Pod2",
"region_name": "Pod2",
"pod_az_name": "az1",
"dc_name": "dc2",
"az_name": "AZ1"
@ -429,7 +407,7 @@ class TestPodController(API_FunctionalTest):
{
"pod":
{
"pod_name": "Pod3",
"region_name": "Pod3",
"pod_az_name": "az1",
"dc_name": "dc2",
"az_name": "AZ2"
@ -459,8 +437,8 @@ class TestPodController(API_FunctionalTest):
self.assertEqual(get_one_pod['pod_id'],
ret_pod['pod_id'])
self.assertEqual(get_one_pod['pod_name'],
ret_pod['pod_name'])
self.assertEqual(get_one_pod['region_name'],
ret_pod['region_name'])
self.assertEqual(get_one_pod['pod_az_name'],
ret_pod['pod_az_name'])
@ -471,23 +449,6 @@ class TestPodController(API_FunctionalTest):
self.assertEqual(get_one_pod['az_name'],
ret_pod['az_name'])
_id = ret_pod['pod_id']
# check ag and az automatically added
ag_name = utils.get_ag_name(ret_pod['pod_name'])
ag = az_ag.get_ag_by_name(self.context, ag_name)
self.assertIsNotNone(ag)
self.assertEqual(ag['name'],
utils.get_ag_name(ret_pod['pod_name']))
self.assertEqual(ag['availability_zone'], ret_pod['az_name'])
single_ret = self.app.delete('/v1.0/pods/' + str(_id))
self.assertEqual(single_ret.status_int, 200)
# make sure ag is deleted
ag = az_ag.get_ag_by_name(self.context, ag_name)
self.assertIsNone(ag)
@patch.object(context, 'extract_context_from_environ',
new=fake_non_admin_context)
def test_non_admin_action(self):
@ -496,7 +457,7 @@ class TestPodController(API_FunctionalTest):
{
"pod":
{
"pod_name": "Pod1",
"region_name": "Pod1",
"pod_az_name": "az1",
"dc_name": "dc2",
"az_name": "AZ1"
@ -517,219 +478,3 @@ class TestPodController(API_FunctionalTest):
response = self.app.delete('/v1.0/pods/1234567890',
expect_errors=True)
self.assertEqual(response.status_int, 401)
class TestBindingController(API_FunctionalTest):
"""Test version listing on root URI."""
@patch.object(context, 'extract_context_from_environ',
new=fake_admin_context)
def test_post_no_input(self):
pod_bindings = [
# missing pod_binding
{
"pod_xxx":
{
"tenant_id": "dddddd",
"pod_id": "0ace0db2-ef33-43a6-a150-42703ffda643"
},
"expected_error": 400
}]
for test_pod in pod_bindings:
response = self.app.post_json(
'/v1.0/bindings',
dict(pod_xxx=test_pod['pod_xxx']),
expect_errors=True)
self.assertEqual(response.status_int,
test_pod['expected_error'])
@patch.object(context, 'extract_context_from_environ',
new=fake_admin_context)
def test_post_invalid_input(self):
pod_bindings = [
# missing tenant_id and or az_pod_map_id
{
"pod_binding":
{
"tenant_id": "dddddd",
"pod_id": ""
},
"expected_error": 422
},
{
"pod_binding":
{
"tenant_id": "",
"pod_id": "0ace0db2-ef33-43a6-a150-42703ffda643"
},
"expected_error": 422
},
{
"pod_binding":
{
"tenant_id": "dddddd",
},
"expected_error": 422
},
{
"pod_binding":
{
"pod_id": "0ace0db2-ef33-43a6-a150-42703ffda643"
},
"expected_error": 422
}
]
self._test_and_check(pod_bindings)
@patch.object(context, 'extract_context_from_environ',
new=fake_admin_context)
def test_bindings(self):
pods = [
{
"pod":
{
"pod_name": "Pod1",
"pod_az_name": "az1",
"dc_name": "dc2",
"az_name": "AZ1"
},
"expected_error": 200
}
]
pod_bindings = [
{
"pod_binding":
{
"tenant_id": "dddddd",
"pod_id": "0ace0db2-ef33-43a6-a150-42703ffda643",
"is_binding": "True"
},
"expected_error": 200
},
{
"pod_binding":
{
"tenant_id": "aaaaa",
"pod_id": "0ace0db2-ef33-43a6-a150-42703ffda643",
"is_binding": "True"
},
"expected_error": 200
},
{
"pod_binding":
{
"tenant_id": "dddddd",
"pod_id": "0ace0db2-ef33-43a6-a150-42703ffda643"
},
"expected_error": 409
}
]
self._test_and_check_pod(pods)
_id = self._get_az_pod_id()
self._test_and_check(pod_bindings, _id)
# get all
response = self.app.get('/v1.0/bindings')
self.assertEqual(response.status_int, 200)
# get one
return_pod_bindings = response.json
for ret_pod in return_pod_bindings['pod_bindings']:
_id = ret_pod['id']
single_ret = self.app.get('/v1.0/bindings/' + str(_id))
self.assertEqual(single_ret.status_int, 200)
one_pot_ret = single_ret.json
get_one_pod = one_pot_ret['pod_binding']
self.assertEqual(get_one_pod['id'],
ret_pod['id'])
self.assertEqual(get_one_pod['tenant_id'],
ret_pod['tenant_id'])
self.assertEqual(get_one_pod['pod_id'],
ret_pod['pod_id'])
_id = ret_pod['id']
single_ret = self.app.delete('/v1.0/bindings/' + str(_id))
self.assertEqual(single_ret.status_int, 200)
def _get_az_pod_id(self):
response = self.app.get('/v1.0/pods')
self.assertEqual(response.status_int, 200)
return_pods = response.json
for ret_pod in return_pods['pods']:
_id = ret_pod['pod_id']
return _id
def _test_and_check(self, pod_bindings, _id=None):
for test_pod in pod_bindings:
if _id is not None:
test_pod['pod_binding']['pod_id'] = str(_id)
response = self.app.post_json(
'/v1.0/bindings',
dict(pod_binding=test_pod['pod_binding']),
expect_errors=True)
self.assertEqual(response.status_int,
test_pod['expected_error'])
def _test_and_check_pod(self, pods):
for test_pod in pods:
response = self.app.post_json(
'/v1.0/pods',
dict(pod=test_pod['pod']),
expect_errors=True)
self.assertEqual(response.status_int,
test_pod['expected_error'])
@patch.object(context, 'extract_context_from_environ',
new=fake_non_admin_context)
def test_non_admin_action(self):
pod_bindings = [
{
"pod_binding":
{
"tenant_id": "dddddd",
"pod_id": "0ace0db2-ef33-43a6-a150-42703ffda643"
},
"expected_error": 401
},
]
self._test_and_check(pod_bindings)
response = self.app.get('/v1.0/bindings/1234567890',
expect_errors=True)
self.assertEqual(response.status_int, 401)
response = self.app.get('/v1.0/bindings',
expect_errors=True)
self.assertEqual(response.status_int, 401)
response = self.app.delete('/v1.0/bindings/1234567890',
expect_errors=True)
self.assertEqual(response.status_int, 401)

View File

@ -22,7 +22,6 @@ import pecan
from tricircle.api.controllers import pod
from tricircle.common import context
from tricircle.common import policy
from tricircle.common import utils
from tricircle.db import core
from tricircle.db import models
@ -38,15 +37,15 @@ class PodsControllerTest(unittest.TestCase):
@patch.object(context, 'extract_context_from_environ')
def test_post_top_pod(self, mock_context):
mock_context.return_value = self.context
kw = {'pod': {'pod_name': 'TopPod', 'az_name': ''}}
kw = {'pod': {'region_name': 'TopPod', 'az_name': ''}}
pod_id = self.controller.post(**kw)['pod']['pod_id']
with self.context.session.begin():
pod = core.get_resource(self.context, models.Pod, pod_id)
self.assertEqual('TopPod', pod['pod_name'])
self.assertEqual('TopPod', pod['region_name'])
self.assertEqual('', pod['az_name'])
pods = core.query_resource(self.context, models.Pod,
[{'key': 'pod_name',
[{'key': 'region_name',
'comparator': 'eq',
'value': 'TopPod'}], [])
self.assertEqual(1, len(pods))
@ -54,53 +53,39 @@ class PodsControllerTest(unittest.TestCase):
@patch.object(context, 'extract_context_from_environ')
def test_post_bottom_pod(self, mock_context):
mock_context.return_value = self.context
kw = {'pod': {'pod_name': 'BottomPod', 'az_name': 'TopAZ'}}
kw = {'pod': {'region_name': 'BottomPod', 'az_name': 'TopAZ'}}
pod_id = self.controller.post(**kw)['pod']['pod_id']
with self.context.session.begin():
pod = core.get_resource(self.context, models.Pod, pod_id)
self.assertEqual('BottomPod', pod['pod_name'])
self.assertEqual('BottomPod', pod['region_name'])
self.assertEqual('TopAZ', pod['az_name'])
pods = core.query_resource(self.context, models.Pod,
[{'key': 'pod_name',
[{'key': 'region_name',
'comparator': 'eq',
'value': 'BottomPod'}], [])
self.assertEqual(1, len(pods))
ag_name = utils.get_ag_name('BottomPod')
aggregates = core.query_resource(self.context, models.Aggregate,
[{'key': 'name',
'comparator': 'eq',
'value': ag_name}], [])
self.assertEqual(1, len(aggregates))
metadatas = core.query_resource(
self.context, models.AggregateMetadata,
[{'key': 'key', 'comparator': 'eq',
'value': 'availability_zone'},
{'key': 'aggregate_id', 'comparator': 'eq',
'value': aggregates[0]['id']}], [])
self.assertEqual(1, len(metadatas))
self.assertEqual('TopAZ', metadatas[0]['value'])
@patch.object(context, 'extract_context_from_environ')
def test_get_one(self, mock_context):
mock_context.return_value = self.context
kw = {'pod': {'pod_name': 'TopPod', 'az_name': ''}}
kw = {'pod': {'region_name': 'TopPod', 'az_name': ''}}
pod_id = self.controller.post(**kw)['pod']['pod_id']
pod = self.controller.get_one(pod_id)
self.assertEqual('TopPod', pod['pod']['pod_name'])
self.assertEqual('TopPod', pod['pod']['region_name'])
self.assertEqual('', pod['pod']['az_name'])
@patch.object(context, 'extract_context_from_environ')
def test_get_all(self, mock_context):
mock_context.return_value = self.context
kw1 = {'pod': {'pod_name': 'TopPod', 'az_name': ''}}
kw2 = {'pod': {'pod_name': 'BottomPod', 'az_name': 'TopAZ'}}
kw1 = {'pod': {'region_name': 'TopPod', 'az_name': ''}}
kw2 = {'pod': {'region_name': 'BottomPod', 'az_name': 'TopAZ'}}
self.controller.post(**kw1)
self.controller.post(**kw2)
pods = self.controller.get_all()
actual = [(pod['pod_name'],
actual = [(pod['region_name'],
pod['az_name']) for pod in pods['pods']]
expect = [('TopPod', ''), ('BottomPod', 'TopAZ')]
self.assertItemsEqual(expect, actual)
@ -109,29 +94,16 @@ class PodsControllerTest(unittest.TestCase):
@patch.object(context, 'extract_context_from_environ')
def test_delete(self, mock_context):
mock_context.return_value = self.context
kw = {'pod': {'pod_name': 'BottomPod', 'az_name': 'TopAZ'}}
kw = {'pod': {'region_name': 'BottomPod', 'az_name': 'TopAZ'}}
pod_id = self.controller.post(**kw)['pod']['pod_id']
self.controller.delete(pod_id)
with self.context.session.begin():
pods = core.query_resource(self.context, models.Pod,
[{'key': 'pod_name',
[{'key': 'region_name',
'comparator': 'eq',
'value': 'BottomPod'}], [])
self.assertEqual(0, len(pods))
ag_name = utils.get_ag_name('BottomPod')
aggregates = core.query_resource(self.context, models.Aggregate,
[{'key': 'name',
'comparator': 'eq',
'value': ag_name}], [])
self.assertEqual(0, len(aggregates))
metadatas = core.query_resource(
self.context, models.AggregateMetadata,
[{'key': 'key', 'comparator': 'eq',
'value': 'availability_zone'},
{'key': 'value', 'comparator': 'eq',
'value': 'TopAZ'}], [])
self.assertEqual(0, len(metadatas))
def tearDown(self):
core.ModelBase.metadata.drop_all(core.get_engine())

View File

@ -12,6 +12,7 @@
import mock
from mock import patch
from oslo_utils import uuidutils
import unittest
import pecan
@ -47,13 +48,12 @@ class RoutingControllerTest(unittest.TestCase):
def test_post(self, mock_context):
mock_context.return_value = self.context
# prepare the foreign key: pod_id, project_id
kw_pod = {'pod': {'pod_name': 'pod1', 'az_name': 'az1'}}
# prepare the foreign key: pod_id
kw_pod = {'pod': {'region_name': 'pod1', 'az_name': 'az1'}}
pod_id = pod.PodsController().post(**kw_pod)['pod']['pod_id']
kw_binding = {'pod_binding': {'tenant_id': '01', 'pod_id': pod_id}}
project_id = pod.BindingsController().post(**kw_binding)[
'pod_binding']['tenant_id']
# a variable used for later test
project_id = uuidutils.generate_uuid()
kw_routing = {'routing':
{'top_id': '09fd7cc9-d169-4b5a-88e8-436ecf4d0bfe',
@ -135,25 +135,6 @@ class RoutingControllerTest(unittest.TestCase):
res = self.controller.post(**kw_routing4)
self._validate_error_code(res, 400)
# failure case, the pod_id and the project_id should be bound
kw_pod2 = {'pod': {'pod_name': 'pod2', 'az_name': 'az1'}}
pod_id2 = pod.PodsController().post(**kw_pod2)['pod']['pod_id']
# the tenant_id binds with pod_id rather than pod_id2
kw_binding2 = {'pod_binding': {'tenant_id': '02', 'pod_id': pod_id}}
project_id2 = pod.BindingsController().post(**kw_binding2)[
'pod_binding']['tenant_id']
kw_routing5 = {'routing':
{'top_id': '09fd7cc9-d169-4b5a-88e8-436ecf4d0bfe',
'bottom_id': 'dc80f9de-abb7-4ec6-ab7a-94f8fd1e20ef',
'pod_id': pod_id2,
'project_id': project_id2,
'resource_type': 'subnet'
}}
res = self.controller.post(**kw_routing5)
self._validate_error_code(res, 400)
# failure case, wrong resource type
kw_routing6 = {'routing':
{'top_id': '09fd7cc9-d169-4b5a-88e8-436ecf4d0b09',
@ -174,13 +155,12 @@ class RoutingControllerTest(unittest.TestCase):
def test_get_one(self, mock_context):
mock_context.return_value = self.context
# prepare the foreign key: pod_id, project_id
kw_pod = {'pod': {'pod_name': 'pod1', 'az_name': 'az1'}}
# prepare the foreign key: pod_id
kw_pod = {'pod': {'region_name': 'pod1', 'az_name': 'az1'}}
pod_id = pod.PodsController().post(**kw_pod)['pod']['pod_id']
kw_binding = {'pod_binding': {'tenant_id': '01', 'pod_id': pod_id}}
project_id = pod.BindingsController().post(**kw_binding)[
'pod_binding']['tenant_id']
# a variable used for later test
project_id = uuidutils.generate_uuid()
kw_routing = {'routing':
{'top_id': '09fd7cc9-d169-4b5a-88e8-436ecf4d0bfe',
@ -198,10 +178,8 @@ class RoutingControllerTest(unittest.TestCase):
self.assertEqual('dc80f9de-abb7-4ec6-ab7a-94f8fd1e20ef',
routing['routing']['bottom_id'])
self.assertEqual(pod_id, routing['routing']['pod_id'])
self.assertEqual(project_id,
routing['routing']['project_id'])
self.assertEqual('port',
routing['routing']['resource_type'])
self.assertEqual(project_id, routing['routing']['project_id'])
self.assertEqual('port', routing['routing']['resource_type'])
# failure case, only admin can get resource routing
self.context.is_admin = False
@ -219,35 +197,30 @@ class RoutingControllerTest(unittest.TestCase):
def test_get_all(self, mock_context):
mock_context.return_value = self.context
# prepare the foreign key: pod_id, project_id
kw_pod1 = {'pod': {'pod_name': 'pod1', 'az_name': 'az1'}}
# prepare the foreign key: pod_id
kw_pod1 = {'pod': {'region_name': 'pod1', 'az_name': 'az1'}}
pod_id1 = pod.PodsController().post(**kw_pod1)['pod']['pod_id']
kw_binding1 = {'pod_binding': {'tenant_id': '01', 'pod_id': pod_id1}}
project_id1 = pod.BindingsController().post(**kw_binding1)[
'pod_binding']['tenant_id']
# a variable used for later test
project_id = uuidutils.generate_uuid()
kw_routing1 = {'routing':
{'top_id': 'c7f641c9-8462-4007-84b2-3035d8cfb7a3',
'bottom_id': 'dc80f9de-abb7-4ec6-ab7a-94f8fd1e20ef',
'pod_id': pod_id1,
'project_id': project_id1,
'project_id': project_id,
'resource_type': 'subnet'
}}
# prepare the foreign key: pod_id, project_id
kw_pod2 = {'pod': {'pod_name': 'pod2', 'az_name': 'az1'}}
# prepare the foreign key: pod_id
kw_pod2 = {'pod': {'region_name': 'pod2', 'az_name': 'az1'}}
pod_id2 = pod.PodsController().post(**kw_pod2)['pod']['pod_id']
kw_binding2 = {'pod_binding': {'tenant_id': '02', 'pod_id': pod_id2}}
project_id2 = pod.BindingsController().post(**kw_binding2)[
'pod_binding']['tenant_id']
kw_routing2 = {'routing':
{'top_id': 'b669a2da-ca95-47db-a2a9-ba9e546d82ee',
'bottom_id': 'fd72c010-6e62-4866-b999-6dcb718dd7b4',
'pod_id': pod_id2,
'project_id': project_id2,
'project_id': project_id,
'resource_type': 'port'
}}
@ -301,13 +274,12 @@ class RoutingControllerTest(unittest.TestCase):
def test_delete(self, mock_context):
mock_context.return_value = self.context
# prepare the foreign key: pod_id, project_id
kw_pod = {'pod': {'pod_name': 'pod1', 'az_name': 'az1'}}
# prepare the foreign key: pod_id
kw_pod = {'pod': {'region_name': 'pod1', 'az_name': 'az1'}}
pod_id = pod.PodsController().post(**kw_pod)['pod']['pod_id']
kw_binding = {'pod_binding': {'tenant_id': '01', 'pod_id': pod_id}}
project_id = pod.BindingsController().post(**kw_binding)[
'pod_binding']['tenant_id']
# a variable used for later test
project_id = uuidutils.generate_uuid()
kw_routing = {'routing':
{'top_id': '09fd7cc9-d169-4b5a-88e8-436ecf4d0bfe',
@ -351,19 +323,18 @@ class RoutingControllerTest(unittest.TestCase):
def test_put(self, mock_context):
mock_context.return_value = self.context
# prepare the foreign key: pod_id, project_id
kw_pod1 = {'pod': {'pod_name': 'pod1', 'az_name': 'az1'}}
# prepare the foreign key: pod_id
kw_pod1 = {'pod': {'region_name': 'pod1', 'az_name': 'az1'}}
pod_id1 = pod.PodsController().post(**kw_pod1)['pod']['pod_id']
kw_binding1 = {'pod_binding': {'tenant_id': '01', 'pod_id': pod_id1}}
project_id1 = pod.BindingsController().post(**kw_binding1)[
'pod_binding']['tenant_id']
# a variable used for later test
project_id = uuidutils.generate_uuid()
body = {'routing':
{'top_id': 'c7f641c9-8462-4007-84b2-3035d8cfb7a3',
'bottom_id': 'dc80f9de-abb7-4ec6-ab7a-94f8fd1e20ef',
'pod_id': pod_id1,
'project_id': project_id1,
'project_id': project_id,
'resource_type': 'router'
}}
@ -419,55 +390,5 @@ class RoutingControllerTest(unittest.TestCase):
res = self.controller.put(-123, **body_update1)
self._validate_error_code(res, 404)
# failure case, the pod_id and the project_id should be bound
kw_pod2 = {'pod': {'pod_name': 'pod2', 'az_name': 'az1'}}
pod_id2 = pod.PodsController().post(**kw_pod2)['pod']['pod_id']
# only update pod_id, and the new pod id is pod_id2. pod_id2
# has not been bound to tenant whose tenant_id is project_id1
body_update6 = {'routing':
{'pod_id': pod_id2}}
res = self.controller.put(id, **body_update6)
self._validate_error_code(res, 400)
# failure case, the pod_id and the project_id should be bound
kw_binding2 = {'pod_binding': {'tenant_id': '02', 'pod_id': pod_id2}}
project_id2 = pod.BindingsController().post(**kw_binding2)[
'pod_binding']['tenant_id']
# only update project_id, and the new project id is project_id2,
# this tenant has not been bound to pod whose pod_id is pod_id1
body_update7 = {'routing':
{'project_id': project_id2}}
res = self.controller.put(id, **body_update7)
self._validate_error_code(res, 400)
# failure case, the pod_id and the project_id should be bound
# both the pod_id and project_id are specified, and the
# tenant1 bound to pod1, pod3, tenant2 bound to pod1, pod2
# original routing: pod=pod1, tenant=tenant1
# updated routing: pod=pod3, tenant=tenant2
# this case should be failed
# bind the tenant1 to pod3
kw_pod3 = {'pod': {'pod_name': 'pod3', 'az_name': 'az1'}}
pod_id3 = pod.PodsController().post(**kw_pod3)['pod']['pod_id']
kw_binding3 = {'pod_binding': {'tenant_id': '01', 'pod_id': pod_id3}}
pod_id3 = pod.BindingsController().post(**kw_binding3)[
'pod_binding']['pod_id']
# bind the tenant2 to pod1
kw_binding4 = {'pod_binding': {'tenant_id': '02', 'pod_id': pod_id1}}
project_id2 = pod.BindingsController().post(**kw_binding4)[
'pod_binding']['tenant_id']
body_update8 = {'routing':
{'pod_id': pod_id3,
'project_id': project_id2}}
res = self.controller.put(id, **body_update8)
self._validate_error_code(res, 400)
def tearDown(self):
core.ModelBase.metadata.drop_all(core.get_engine())

View File

@ -1,169 +0,0 @@
# Copyright 2015 Huawei Technologies Co., Ltd.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from tricircle.common import az_ag
from tricircle.common import context
from tricircle.db import api
from tricircle.db import core
from tricircle.db import models
FAKE_AZ = 'fake_az'
FAKE_SITE_ID = 'fake_pod_id'
FAKE_SITE_NAME = 'fake_pod_name'
FAKE_SERVICE_ID = 'fake_service_id'
FAKE_SITE_ID_2 = 'fake_pod_id_2'
FAKE_SITE_NAME_2 = 'fake_pod_name_2'
FAKE_SERVICE_ID_2 = 'fake_service_id_2'
FAKE_TOP_NAME = 'RegionOne'
FAKE_TOP_ID = 'fake_top_pod_id'
FAKE_TOP_SERVICE_ID = 'fake_top_service_id'
FAKE_TOP_ENDPOINT = 'http://127.0.0.1:8774/v2/$(tenant_id)s'
FAKE_TYPE = 'fake_type'
FAKE_URL = 'http://127.0.0.1:12345'
FAKE_URL_INVALID = 'http://127.0.0.1:23456'
FAKE_SERVICE_TYPE = 'cinder'
FAKE_SERVICE_ENDPOINT = 'http://127.0.0.1:8774/v2.1/$(tenant_id)s'
FAKE_SERVICE_ENDPOINT_2 = 'http://127.0.0.2:8774/v2.1/$(tenant_id)s'
FAKE_TENANT_ID = 'my tenant'
class FakeException(Exception):
pass
class AZAGTest(unittest.TestCase):
def setUp(self):
core.initialize()
core.ModelBase.metadata.create_all(core.get_engine())
# enforce foreign key constraint for sqlite
core.get_engine().execute('pragma foreign_keys=on')
self.context = context.Context()
top_pod = {
'pod_id': FAKE_TOP_ID,
'pod_name': FAKE_TOP_NAME,
'az_name': ''
}
config_dict_top = {
'service_id': FAKE_TOP_SERVICE_ID,
'pod_id': FAKE_TOP_ID,
'service_type': FAKE_SERVICE_TYPE,
'service_url': FAKE_TOP_ENDPOINT
}
pod_dict = {
'pod_id': FAKE_SITE_ID,
'pod_name': FAKE_SITE_NAME,
'az_name': FAKE_AZ
}
pod_dict2 = {
'pod_id': FAKE_SITE_ID_2,
'pod_name': FAKE_SITE_NAME_2,
'az_name': FAKE_AZ
}
config_dict = {
'service_id': FAKE_SERVICE_ID,
'pod_id': FAKE_SITE_ID,
'service_type': FAKE_SERVICE_TYPE,
'service_url': FAKE_SERVICE_ENDPOINT
}
config_dict2 = {
'service_id': FAKE_SERVICE_ID_2,
'pod_id': FAKE_SITE_ID_2,
'service_type': FAKE_SERVICE_TYPE,
'service_url': FAKE_SERVICE_ENDPOINT_2
}
api.create_pod(self.context, pod_dict)
api.create_pod(self.context, pod_dict2)
api.create_pod(self.context, top_pod)
api.create_pod_service_configuration(self.context, config_dict)
api.create_pod_service_configuration(self.context, config_dict2)
api.create_pod_service_configuration(self.context, config_dict_top)
def test_get_pod_by_az_tenant(self):
pod1, _ = az_ag.get_pod_by_az_tenant(self.context,
FAKE_AZ + FAKE_AZ,
FAKE_TENANT_ID)
self.assertIsNone(pod1)
pods = az_ag.list_pods_by_tenant(self.context, FAKE_TENANT_ID)
self.assertEqual(len(pods), 0)
# schedule one
pod2, _ = az_ag.get_pod_by_az_tenant(self.context,
FAKE_AZ,
FAKE_TENANT_ID)
pod_bindings = core.query_resource(self.context,
models.PodBinding,
[{'key': 'tenant_id',
'comparator': 'eq',
'value': FAKE_TENANT_ID}],
[])
self.assertIsNotNone(pod_bindings)
if pod_bindings[0]['pod_id'] == FAKE_SITE_ID:
self.assertEqual(pod2['pod_name'], FAKE_SITE_NAME)
self.assertEqual(pod2['pod_id'], FAKE_SITE_ID)
self.assertEqual(pod2['az_name'], FAKE_AZ)
else:
self.assertEqual(pod2['pod_name'], FAKE_SITE_NAME_2)
self.assertEqual(pod2['pod_id'], FAKE_SITE_ID_2)
self.assertEqual(pod2['az_name'], FAKE_AZ)
# scheduled one should always be bound
pod3, _ = az_ag.get_pod_by_az_tenant(self.context,
FAKE_AZ,
FAKE_TENANT_ID)
self.assertEqual(pod2['pod_name'], pod3['pod_name'])
self.assertEqual(pod2['pod_id'], pod3['pod_id'])
self.assertEqual(pod2['az_name'], pod3['az_name'])
def test_list_pods_by_tenant(self):
pod1, _ = az_ag.get_pod_by_az_tenant(self.context,
FAKE_AZ + FAKE_AZ,
FAKE_TENANT_ID)
pods = az_ag.list_pods_by_tenant(self.context, FAKE_TENANT_ID)
self.assertIsNone(pod1)
self.assertEqual(len(pods), 0)
# TODO(joehuang): tenant bound to multiple pods in one AZ
# schedule one
pod2, _ = az_ag.get_pod_by_az_tenant(self.context,
FAKE_AZ,
FAKE_TENANT_ID)
pods = az_ag.list_pods_by_tenant(self.context, FAKE_TENANT_ID)
self.assertDictEqual(pods[0], pod2)
def tearDown(self):
core.ModelBase.metadata.drop_all(core.get_engine())

View File

@ -32,7 +32,7 @@ from tricircle.db import core
FAKE_AZ = 'fake_az'
FAKE_RESOURCE = 'fake_res'
FAKE_SITE_ID = 'fake_pod_id'
FAKE_SITE_NAME = 'fake_pod_name'
FAKE_SITE_NAME = 'fake_region_name'
FAKE_SERVICE_ID = 'fake_service_id'
FAKE_TYPE = 'fake_type'
FAKE_URL = 'http://127.0.0.1:12345'
@ -129,7 +129,7 @@ class ClientTest(unittest.TestCase):
pod_dict = {
'pod_id': FAKE_SITE_ID,
'pod_name': FAKE_SITE_NAME,
'region_name': FAKE_SITE_NAME,
'az_name': FAKE_AZ
}
config_dict = {
@ -139,12 +139,12 @@ class ClientTest(unittest.TestCase):
'service_url': FAKE_URL
}
api.create_pod(self.context, pod_dict)
api.create_pod_service_configuration(self.context, config_dict)
api.create_cached_endpoints(self.context, config_dict)
global FAKE_RESOURCES
FAKE_RESOURCES = [{'name': 'res1'}, {'name': 'res2'}]
cfg.CONF.set_override(name='top_pod_name', override=FAKE_SITE_NAME,
cfg.CONF.set_override(name='top_region_name', override=FAKE_SITE_NAME,
group='client')
self.client = client.Client()
self.client.resource_service_map[FAKE_RESOURCE] = FAKE_TYPE
@ -189,7 +189,7 @@ class ClientTest(unittest.TestCase):
cfg.CONF.set_override(name='auto_refresh_endpoint', override=False,
group='client')
# delete the configuration so endpoint cannot be found
api.delete_pod_service_configuration(self.context, FAKE_SERVICE_ID)
api.delete_cached_endpoints(self.context, FAKE_SERVICE_ID)
# auto refresh set to False, directly raise exception
self.assertRaises(exceptions.EndpointNotFound,
self.client.list_resources,
@ -211,7 +211,7 @@ class ClientTest(unittest.TestCase):
cfg.CONF.set_override(name='auto_refresh_endpoint', override=True,
group='client')
# delete the configuration so endpoint cannot be found
api.delete_pod_service_configuration(self.context, FAKE_SERVICE_ID)
api.delete_cached_endpoints(self.context, FAKE_SERVICE_ID)
self.client._get_admin_token = mock.Mock()
self.client._get_endpoint_from_keystone = mock.Mock()
@ -228,7 +228,7 @@ class ClientTest(unittest.TestCase):
group='client')
update_dict = {'service_url': FAKE_URL_INVALID}
# update url to an invalid one
api.update_pod_service_configuration(self.context,
api.update_cached_endpoints(self.context,
FAKE_SERVICE_ID,
update_dict)
@ -242,7 +242,7 @@ class ClientTest(unittest.TestCase):
group='client')
update_dict = {'service_url': FAKE_URL_INVALID}
# update url to an invalid one
api.update_pod_service_configuration(self.context,
api.update_cached_endpoints(self.context,
FAKE_SERVICE_ID,
update_dict)
@ -257,8 +257,8 @@ class ClientTest(unittest.TestCase):
self.assertEqual(resources, [{'name': 'res1'}, {'name': 'res2'}])
@patch.object(uuid, 'uuid4')
@patch.object(api, 'create_pod_service_configuration')
@patch.object(api, 'update_pod_service_configuration')
@patch.object(api, 'create_cached_endpoints')
@patch.object(api, 'update_cached_endpoints')
def test_update_endpoint_from_keystone(self, update_mock, create_mock,
uuid_mock):
self.client._get_admin_token = mock.Mock()

View File

@ -25,15 +25,15 @@ from tricircle.db import api
from tricircle.db import core
def fake_get_pod_service_endpoint(ctx, pod_name, st):
def fake_get_pod_service_endpoint(ctx, region_name, st):
pod = api.get_pod_by_name(ctx, pod_name)
pod = api.get_pod_by_name(ctx, region_name)
if pod:
f = [{'key': 'pod_id', 'comparator': 'eq',
'value': pod['pod_id']},
{'key': 'service_type', 'comparator': 'eq',
'value': st}]
pod_services = api.list_pod_service_configurations(
pod_services = api.list_cached_endpoints(
ctx,
filters=f,
sorts=[])
@ -129,7 +129,7 @@ class HttpClientTest(unittest.TestCase):
def test_get_pod_service_ctx(self):
pod_dict = {
'pod_id': 'fake_pod_id',
'pod_name': 'fake_pod_name',
'region_name': 'fake_region_name',
'az_name': 'fake_az'
}
@ -141,18 +141,18 @@ class HttpClientTest(unittest.TestCase):
}
t_url = 'http://127.0.0.1:8774/v2/my_tenant_id/volumes'
api.create_pod(self.context, pod_dict)
api.create_pod_service_configuration(self.context, config_dict)
api.create_cached_endpoints(self.context, config_dict)
b_url = 'http://127.0.0.1:8774/v2.1/my_tenant_id/volumes'
b_endpoint = hclient.get_pod_service_endpoint(self.context,
pod_dict['pod_name'],
pod_dict['region_name'],
cons.ST_CINDER)
self.assertEqual(b_endpoint, config_dict['service_url'])
b_ctx = hclient.get_pod_service_ctx(self.context,
t_url,
pod_dict['pod_name'],
pod_dict['region_name'],
cons.ST_CINDER)
self.assertEqual(b_ctx['t_ver'], 'v2')
self.assertEqual(b_ctx['t_url'], t_url)
@ -162,7 +162,7 @@ class HttpClientTest(unittest.TestCase):
# wrong pod name
b_ctx = hclient.get_pod_service_ctx(self.context,
t_url,
pod_dict['pod_name'] + '1',
pod_dict['region_name'] + '1',
cons.ST_CINDER)
self.assertEqual(b_ctx['t_ver'], 'v2')
self.assertEqual(b_ctx['t_url'], t_url)
@ -172,7 +172,7 @@ class HttpClientTest(unittest.TestCase):
# wrong service_type
b_ctx = hclient.get_pod_service_ctx(self.context,
t_url,
pod_dict['pod_name'],
pod_dict['region_name'],
cons.ST_CINDER + '1')
self.assertEqual(b_ctx['t_ver'], 'v2')
self.assertEqual(b_ctx['t_url'], t_url)
@ -184,17 +184,17 @@ class HttpClientTest(unittest.TestCase):
def test_get_pod_and_endpoint_by_name(self):
pod_dict = {
'pod_id': 'fake_pod_id',
'pod_name': 'fake_pod_name',
'region_name': 'fake_region_name',
'az_name': 'fake_az'
}
api.create_pod(self.context, pod_dict)
pod = api.get_pod_by_name(self.context, pod_dict['pod_name'] + '1')
pod = api.get_pod_by_name(self.context, pod_dict['region_name'] + '1')
self.assertIsNone(pod)
pod = api.get_pod_by_name(self.context, pod_dict['pod_name'])
pod = api.get_pod_by_name(self.context, pod_dict['region_name'])
self.assertEqual(pod['pod_id'], pod_dict['pod_id'])
self.assertEqual(pod['pod_name'], pod_dict['pod_name'])
self.assertEqual(pod['region_name'], pod_dict['region_name'])
self.assertEqual(pod['az_name'], pod_dict['az_name'])
config_dict = {
@ -203,11 +203,11 @@ class HttpClientTest(unittest.TestCase):
'service_type': cons.ST_CINDER,
'service_url': 'http://127.0.0.1:8774/v2.1/$(tenant_id)s'
}
api.create_pod_service_configuration(self.context, config_dict)
api.create_cached_endpoints(self.context, config_dict)
endpoint = hclient.get_pod_service_endpoint(
self.context,
pod_dict['pod_name'],
pod_dict['region_name'],
config_dict['service_type'])
self.assertEqual(endpoint, config_dict['service_url'])

View File

@ -31,7 +31,7 @@ class APITest(unittest.TestCase):
def test_get_bottom_mappings_by_top_id(self):
for i in xrange(3):
pod = {'pod_id': 'test_pod_uuid_%d' % i,
'pod_name': 'test_pod_%d' % i,
'region_name': 'test_pod_%d' % i,
'az_name': 'test_az_uuid_%d' % i}
api.create_pod(self.context, pod)
route1 = {
@ -61,7 +61,7 @@ class APITest(unittest.TestCase):
def test_get_bottom_mappings_by_tenant_pod(self):
for i in xrange(3):
pod = {'pod_id': 'test_pod_uuid_%d' % i,
'pod_name': 'test_pod_%d' % i,
'region_name': 'test_pod_%d' % i,
'az_name': 'test_az_uuid_%d' % i}
api.create_pod(self.context, pod)
routes = [
@ -161,7 +161,7 @@ class APITest(unittest.TestCase):
pods = []
for i in xrange(5):
pod = {'pod_id': 'test_pod_uuid_%d' % i,
'pod_name': 'test_pod_%d' % i,
'region_name': 'test_pod_%d' % i,
'pod_az_name': 'test_pod_az_name_%d' % i,
'dc_name': 'test_dc_name_%d' % i,
'az_name': 'test_az_uuid_%d' % i,

View File

@ -114,7 +114,7 @@ class ModelsTest(unittest.TestCase):
def test_obj_to_dict(self):
pod = {'pod_id': 'test_pod_uuid',
'pod_name': 'test_pod',
'region_name': 'test_pod',
'pod_az_name': 'test_pod_az_name',
'dc_name': 'test_dc_name',
'az_name': 'test_az_uuid'}
@ -124,7 +124,7 @@ class ModelsTest(unittest.TestCase):
def test_create(self):
pod = {'pod_id': 'test_pod_uuid',
'pod_name': 'test_pod',
'region_name': 'test_pod',
'pod_az_name': 'test_pod_az_name',
'dc_name': 'test_dc_name',
'az_name': 'test_az_uuid'}
@ -137,27 +137,27 @@ class ModelsTest(unittest.TestCase):
'service_type': 'nova',
'service_url': 'http://test_url'
}
config_ret = api.create_pod_service_configuration(self.context,
config_ret = api.create_cached_endpoints(self.context,
configuration)
self.assertEqual(config_ret, configuration)
def test_update(self):
pod = {'pod_id': 'test_pod_uuid',
'pod_name': 'test_pod',
'region_name': 'test_pod',
'az_name': 'test_az1_uuid'}
api.create_pod(self.context, pod)
update_dict = {'pod_id': 'fake_uuid',
'pod_name': 'test_pod2',
'region_name': 'test_pod2',
'az_name': 'test_az2_uuid'}
ret = api.update_pod(self.context, 'test_pod_uuid', update_dict)
# primary key value will not be updated
self.assertEqual(ret['pod_id'], 'test_pod_uuid')
self.assertEqual(ret['pod_name'], 'test_pod2')
self.assertEqual(ret['region_name'], 'test_pod2')
self.assertEqual(ret['az_name'], 'test_az2_uuid')
def test_delete(self):
pod = {'pod_id': 'test_pod_uuid',
'pod_name': 'test_pod',
'region_name': 'test_pod',
'az_name': 'test_az_uuid'}
api.create_pod(self.context, pod)
api.delete_pod(self.context, 'test_pod_uuid')
@ -166,24 +166,24 @@ class ModelsTest(unittest.TestCase):
def test_query(self):
pod1 = {'pod_id': 'test_pod1_uuid',
'pod_name': 'test_pod1',
'region_name': 'test_pod1',
'pod_az_name': 'test_pod_az_name1',
'dc_name': 'test_dc_name1',
'az_name': 'test_az1_uuid'}
pod2 = {'pod_id': 'test_pod2_uuid',
'pod_name': 'test_pod2',
'region_name': 'test_pod2',
'pod_az_name': 'test_pod_az_name2',
'dc_name': 'test_dc_name1',
'az_name': 'test_az2_uuid'}
api.create_pod(self.context, pod1)
api.create_pod(self.context, pod2)
filters = [{'key': 'pod_name',
filters = [{'key': 'region_name',
'comparator': 'eq',
'value': 'test_pod2'}]
pods = api.list_pods(self.context, filters)
self.assertEqual(len(pods), 1)
self.assertEqual(pods[0], pod2)
filters = [{'key': 'pod_name',
filters = [{'key': 'region_name',
'comparator': 'eq',
'value': 'test_pod3'}]
pods = api.list_pods(self.context, filters)
@ -191,17 +191,17 @@ class ModelsTest(unittest.TestCase):
def test_sort(self):
pod1 = {'pod_id': 'test_pod1_uuid',
'pod_name': 'test_pod1',
'region_name': 'test_pod1',
'pod_az_name': 'test_pod_az_name1',
'dc_name': 'test_dc_name1',
'az_name': 'test_az1_uuid'}
pod2 = {'pod_id': 'test_pod2_uuid',
'pod_name': 'test_pod2',
'region_name': 'test_pod2',
'pod_az_name': 'test_pod_az_name2',
'dc_name': 'test_dc_name1',
'az_name': 'test_az2_uuid'}
pod3 = {'pod_id': 'test_pod3_uuid',
'pod_name': 'test_pod3',
'region_name': 'test_pod3',
'pod_az_name': 'test_pod_az_name3',
'dc_name': 'test_dc_name1',
'az_name': 'test_az3_uuid'}
@ -231,7 +231,7 @@ class ModelsTest(unittest.TestCase):
def test_resource_routing_unique_key(self):
pod = {'pod_id': 'test_pod1_uuid',
'pod_name': 'test_pod1',
'region_name': 'test_pod1',
'az_name': 'test_az1_uuid'}
api.create_pod(self.context, pod)
routing = {'top_id': 'top_uuid',

View File

@ -52,7 +52,6 @@ from oslo_utils import uuidutils
from tricircle.common import client
from tricircle.common import constants
from tricircle.common import context
from tricircle.common import exceptions
import tricircle.db.api as db_api
from tricircle.db import core
from tricircle.db import models
@ -238,12 +237,12 @@ class FakeNeutronClient(object):
'pod_1': {'port': BOTTOM1_PORTS},
'pod_2': {'port': BOTTOM2_PORTS}}
def __init__(self, pod_name):
self.pod_name = pod_name
def __init__(self, region_name):
self.region_name = region_name
self.ports_path = ''
def _get(self, params=None):
port_list = self._res_map[self.pod_name]['port']
port_list = self._res_map[self.region_name]['port']
if not params:
return {'ports': port_list}
@ -267,11 +266,11 @@ class FakeNeutronClient(object):
return {'ports': port_list}
def get(self, path, params=None):
if self.pod_name in ['pod_1', 'pod_2', 'top']:
if self.region_name in ['pod_1', 'pod_2', 'top']:
res_list = self._get(params)['ports']
return_list = []
for res in res_list:
if self.pod_name != 'top':
if self.region_name != 'top':
res = copy.copy(res)
return_list.append(res)
return {'ports': return_list}
@ -295,12 +294,12 @@ class FakeClient(object):
'security_group': BOTTOM2_SGS,
'floatingip': BOTTOM2_FIPS}}
def __init__(self, pod_name):
if not pod_name:
self.pod_name = 'top'
def __init__(self, region_name):
if not region_name:
self.region_name = 'top'
else:
self.pod_name = pod_name
self.client = FakeNeutronClient(self.pod_name)
self.region_name = region_name
self.client = FakeNeutronClient(self.region_name)
def get_native_client(self, resource, ctx):
return self.client
@ -310,7 +309,7 @@ class FakeClient(object):
pass
def _allocate_ip(self, port_body):
subnet_list = self._res_map[self.pod_name]['subnet']
subnet_list = self._res_map[self.region_name]['subnet']
for subnet in subnet_list:
if subnet['network_id'] == port_body['port']['network_id']:
cidr = subnet['cidr']
@ -320,7 +319,7 @@ class FakeClient(object):
def create_resources(self, _type, ctx, body):
if _type == 'port':
res_list = self._res_map[self.pod_name][_type]
res_list = self._res_map[self.region_name][_type]
subnet_ips_map = {}
for res in res_list:
fixed_ips = res.get('fixed_ips', [])
@ -331,7 +330,7 @@ class FakeClient(object):
fixed_ip['ip_address'])
fixed_ips = body[_type].get('fixed_ips', [])
for fixed_ip in fixed_ips:
for subnet in self._res_map[self.pod_name]['subnet']:
for subnet in self._res_map[self.region_name]['subnet']:
ip_range = netaddr.IPNetwork(subnet['cidr'])
ip = netaddr.IPAddress(fixed_ip['ip_address'])
if ip in ip_range:
@ -350,16 +349,16 @@ class FakeClient(object):
body[_type]['gateway_ip'] = cidr[:cidr.rindex('.')] + '.1'
if 'id' not in body[_type]:
body[_type]['id'] = uuidutils.generate_uuid()
res_list = self._res_map[self.pod_name][_type]
res_list = self._res_map[self.region_name][_type]
res = dict(body[_type])
res_list.append(res)
return res
def list_resources(self, _type, ctx, filters=None):
if self.pod_name == 'top':
res_list = self._res_map[self.pod_name][_type + 's']
if self.region_name == 'top':
res_list = self._res_map[self.region_name][_type + 's']
else:
res_list = self._res_map[self.pod_name][_type]
res_list = self._res_map[self.region_name][_type]
ret_list = []
for res in res_list:
is_selected = True
@ -376,10 +375,10 @@ class FakeClient(object):
def delete_resources(self, _type, ctx, _id):
index = -1
if self.pod_name == 'top':
res_list = self._res_map[self.pod_name][_type + 's']
if self.region_name == 'top':
res_list = self._res_map[self.region_name][_type + 's']
else:
res_list = self._res_map[self.pod_name][_type]
res_list = self._res_map[self.region_name][_type]
for i, res in enumerate(res_list):
if res['id'] == _id:
index = i
@ -388,7 +387,7 @@ class FakeClient(object):
def list_networks(self, ctx, filters=None):
networks = self.list_resources('network', ctx, filters)
if self.pod_name != 'top':
if self.region_name != 'top':
return networks
ret_list = []
for network in networks:
@ -405,7 +404,7 @@ class FakeClient(object):
def update_networks(self, ctx, net_id, network):
net_data = network[neutron_attributes.NETWORK]
if self.pod_name == 'pod_1':
if self.region_name == 'pod_1':
bottom_nets = BOTTOM1_NETS
else:
bottom_nets = BOTTOM2_NETS
@ -465,7 +464,7 @@ class FakeClient(object):
router_id, body = args
if 'port_id' in body:
for port in self._res_map[self.pod_name]['port']:
for port in self._res_map[self.region_name]['port']:
if port['id'] == body['port_id']:
port['device_id'] = router_id
port['device_owner'] = 'network:router_interface'
@ -531,7 +530,7 @@ class FakeClient(object):
def create_security_group_rules(self, ctx, body):
sg_id = body['security_group_rule']['security_group_id']
res_list = self._res_map[self.pod_name]['security_group']
res_list = self._res_map[self.region_name]['security_group']
for sg in res_list:
if sg['id'] == sg_id:
target_sg = sg
@ -547,7 +546,7 @@ class FakeClient(object):
target_sg['security_group_rules'].append(body['security_group_rule'])
def delete_security_group_rules(self, ctx, rule_id):
res_list = self._res_map[self.pod_name]['security_group']
res_list = self._res_map[self.region_name]['security_group']
for sg in res_list:
for rule in sg['security_group_rules']:
if rule['id'] == rule_id:
@ -555,7 +554,7 @@ class FakeClient(object):
return
def get_security_groups(self, ctx, sg_id):
res_list = self._res_map[self.pod_name]['security_group']
res_list = self._res_map[self.region_name]['security_group']
for sg in res_list:
if sg['id'] == sg_id:
# need to do a deep copy because we will traverse the security
@ -897,8 +896,8 @@ class FakeBaseManager(xmanager.XManager):
constants.JT_PORT_DELETE: self.delete_server_port}
self.helper = FakeHelper(fake_plugin)
def _get_client(self, pod_name=None):
return FakeClient(pod_name)
def _get_client(self, region_name=None):
return FakeClient(region_name)
class FakeXManager(FakeBaseManager):
@ -939,8 +938,8 @@ class FakeExtension(object):
class FakeHelper(helper.NetworkHelper):
def _get_client(self, pod_name=None):
return FakeClient(pod_name)
def _get_client(self, region_name=None):
return FakeClient(region_name)
def _prepare_top_element_by_call(self, t_ctx, q_ctx,
project_id, pod, ele, _type, body):
@ -987,8 +986,8 @@ class FakePlugin(plugin.TricirclePlugin):
self.xjob_handler = FakeRPCAPI(self)
self.type_manager = FakeTypeManager()
def _get_client(self, pod_name):
return FakeClient(pod_name)
def _get_client(self, region_name):
return FakeClient(region_name)
def _make_network_dict(self, network, fields=None,
process_extensions=True, context=None):
@ -1033,8 +1032,8 @@ def fake_get_context_from_neutron_context(q_context):
return context.get_db_context()
def fake_get_client(self, pod_name):
return FakeClient(pod_name)
def fake_get_client(self, region_name):
return FakeClient(region_name)
def fake_make_network_dict(self, network, fields=None,
@ -1112,6 +1111,8 @@ class PluginTest(unittest.TestCase,
group='tricircle')
cfg.CONF.set_override('bridge_network_type', 'shared_vlan',
group='tricircle')
cfg.CONF.set_override('default_region_for_external_network',
'pod_1', group='tricircle')
for vlan in (vlan_min, vlan_max):
TOP_VLANALLOCATIONS.append(
DotDict({'physical_network': phynet,
@ -1119,13 +1120,13 @@ class PluginTest(unittest.TestCase,
def _basic_pod_route_setup(self):
pod1 = {'pod_id': 'pod_id_1',
'pod_name': 'pod_1',
'region_name': 'pod_1',
'az_name': 'az_name_1'}
pod2 = {'pod_id': 'pod_id_2',
'pod_name': 'pod_2',
'region_name': 'pod_2',
'az_name': 'az_name_2'}
pod3 = {'pod_id': 'pod_id_0',
'pod_name': 'top_pod',
'region_name': 'top_pod',
'az_name': ''}
for pod in (pod1, pod2, pod3):
db_api.create_pod(self.context, pod)
@ -1463,7 +1464,7 @@ class PluginTest(unittest.TestCase,
# test _prepare_bottom_element
_, b_port_id, _, _ = fake_plugin._get_bottom_bridge_elements(
q_ctx, 'project_id', b_pod, net, False, subnet, port)
b_port = fake_plugin._get_client(b_pod['pod_name']).get_ports(
b_port = fake_plugin._get_client(b_pod['region_name']).get_ports(
t_ctx, b_port_id)
bottom_entry_map = {}
@ -1483,7 +1484,7 @@ class PluginTest(unittest.TestCase,
self.assertEqual(bottom_entry_map['port']['bottom_id'], b_port_id)
@staticmethod
def _prepare_network_test(tenant_id, ctx, pod_name, index):
def _prepare_network_test(tenant_id, ctx, region_name, index):
t_net_id = uuidutils.generate_uuid()
t_subnet_id = uuidutils.generate_uuid()
b_net_id = uuidutils.generate_uuid()
@ -1542,14 +1543,14 @@ class PluginTest(unittest.TestCase,
'ipv6_ra_mode': '',
'tenant_id': tenant_id
}
if pod_name == 'pod_1':
if region_name == 'pod_1':
BOTTOM1_NETS.append(DotDict(b_net))
BOTTOM1_SUBNETS.append(DotDict(b_subnet))
else:
BOTTOM2_NETS.append(DotDict(b_net))
BOTTOM2_SUBNETS.append(DotDict(b_subnet))
pod_id = 'pod_id_1' if pod_name == 'pod_1' else 'pod_id_2'
pod_id = 'pod_id_1' if region_name == 'pod_1' else 'pod_id_2'
core.create_resource(ctx, models.ResourceRouting,
{'top_id': t_net_id,
'bottom_id': b_net_id,
@ -1564,9 +1565,9 @@ class PluginTest(unittest.TestCase,
'resource_type': constants.RT_SUBNET})
return t_net_id, t_subnet_id, b_net_id, b_subnet_id
def _prepare_router_test(self, tenant_id, ctx, pod_name, index):
def _prepare_router_test(self, tenant_id, ctx, region_name, index):
(t_net_id, t_subnet_id, b_net_id,
b_subnet_id) = self._prepare_network_test(tenant_id, ctx, pod_name,
b_subnet_id) = self._prepare_network_test(tenant_id, ctx, region_name,
index)
if len(TOP_ROUTERS) == 0:
@ -1733,7 +1734,7 @@ class PluginTest(unittest.TestCase,
fake_plugin.add_router_interface(
q_ctx, t_router_id, {'subnet_id': t_subnet_id})['port_id']
b_router_id = db_api.get_bottom_id_by_top_id_pod_name(
b_router_id = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, t_router_id, 'pod_1', 'router')
mock_rpc.assert_called_once_with(t_ctx, t_router_id)
@ -1767,9 +1768,9 @@ class PluginTest(unittest.TestCase,
for subnet in TOP_SUBNETS:
if subnet['name'].startswith('ns_bridge'):
t_ns_bridge_subnet_id = subnet['id']
b_ns_bridge_net_id = db_api.get_bottom_id_by_top_id_pod_name(
b_ns_bridge_net_id = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, t_ns_bridge_net_id, 'pod_1', constants.RT_NETWORK)
b_ns_bridge_subnet_id = db_api.get_bottom_id_by_top_id_pod_name(
b_ns_bridge_subnet_id = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, t_ns_bridge_subnet_id, 'pod_1', constants.RT_SUBNET)
# internal network and external network are in different pods, need
# to create N-S bridge network and set gateway, add_router_interface
@ -1809,7 +1810,7 @@ class PluginTest(unittest.TestCase,
fake_plugin.add_router_interface(
q_ctx, t_router_id, {'subnet_id': t_subnet_id})['port_id']
b_router_id = db_api.get_bottom_id_by_top_id_pod_name(
b_router_id = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, t_router_id, 'pod_2', 'router')
bridge_port_name = constants.ew_bridge_port_name % (tenant_id,
b_router_id)
@ -2000,16 +2001,6 @@ class PluginTest(unittest.TestCase,
t_ctx = context.get_db_context()
mock_context.return_value = t_ctx
# create external network specifying az name
body = {
'network': {
'router:external': True,
'tenant_id': TEST_TENANT_ID,
'availability_zone_hints': ['az_name_1']
}
}
self.assertRaises(exceptions.PodNotFound,
fake_plugin.create_network, q_ctx, body)
body = {
'network': {
'name': 'ext-net',
@ -2092,15 +2083,15 @@ class PluginTest(unittest.TestCase,
'ip_address': '100.64.0.5'}]}}})
b_router_id = BOTTOM1_ROUTERS[0]['id']
b_net_id = db_api.get_bottom_id_by_top_id_pod_name(
b_net_id = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, t_net_id, 'pod_1', constants.RT_NETWORK)
b_subnet_id = db_api.get_bottom_id_by_top_id_pod_name(
b_subnet_id = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, t_subnet_id, 'pod_1', constants.RT_SUBNET)
for subnet in TOP_SUBNETS:
if subnet['name'].startswith('ns_bridge_subnet'):
t_ns_bridge_subnet_id = subnet['id']
b_ns_bridge_subnet_id = db_api.get_bottom_id_by_top_id_pod_name(
b_ns_bridge_subnet_id = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, t_ns_bridge_subnet_id, 'pod_1', constants.RT_SUBNET)
body = {'network_id': b_net_id,
'enable_snat': False,
@ -2241,11 +2232,11 @@ class PluginTest(unittest.TestCase,
b_port = {
'id': b_port_id,
'name': t_port_id,
'network_id': db_api.get_bottom_id_by_top_id_pod_name(
'network_id': db_api.get_bottom_id_by_top_id_region_name(
t_ctx, t_net_id, 'pod_1', constants.RT_NETWORK),
'mac_address': 'fa:16:3e:96:41:03',
'fixed_ips': [
{'subnet_id': db_api.get_bottom_id_by_top_id_pod_name(
{'subnet_id': db_api.get_bottom_id_by_top_id_region_name(
t_ctx, t_subnet_id, 'pod_1', constants.RT_SUBNET),
'ip_address': '10.0.0.4'}]
}
@ -2287,14 +2278,14 @@ class PluginTest(unittest.TestCase,
fake_plugin.update_floatingip(q_ctx, fip['id'],
{'floatingip': fip_body})
b_ext_net_id = db_api.get_bottom_id_by_top_id_pod_name(
b_ext_net_id = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, e_net['id'], 'pod_2', constants.RT_NETWORK)
for port in BOTTOM2_PORTS:
if port['name'] == 'ns_bridge_port':
ns_bridge_port = port
for net in TOP_NETS:
if net['name'].startswith('ns_bridge'):
b_bridge_net_id = db_api.get_bottom_id_by_top_id_pod_name(
b_bridge_net_id = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, net['id'], 'pod_1', constants.RT_NETWORK)
calls = [mock.call(t_ctx,
{'floatingip': {
@ -2376,8 +2367,8 @@ class PluginTest(unittest.TestCase,
bridge_port_name = constants.ns_bridge_port_name % (
e_net['tenant_id'], None, b_port_id)
t_pod = db_api.get_top_pod(t_ctx)
mapping = db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, bridge_port_name, t_pod['pod_name'], constants.RT_PORT)
mapping = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, bridge_port_name, t_pod['region_name'], constants.RT_PORT)
# check routing for bridge port in top pod exists
self.assertIsNotNone(mapping)
@ -2392,8 +2383,8 @@ class PluginTest(unittest.TestCase,
calls = [mock.call(t_ctx, fip_id1),
mock.call(t_ctx, fip_id2)]
mock_delete.assert_has_calls(calls)
mapping = db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, bridge_port_name, t_pod['pod_name'], constants.RT_PORT)
mapping = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, bridge_port_name, t_pod['region_name'], constants.RT_PORT)
# check routing for bridge port in top pod is deleted
self.assertIsNone(mapping)
@ -2431,8 +2422,8 @@ class PluginTest(unittest.TestCase,
bridge_port_name = constants.ns_bridge_port_name % (
e_net['tenant_id'], None, b_port_id)
t_pod = db_api.get_top_pod(t_ctx)
mapping = db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, bridge_port_name, t_pod['pod_name'], constants.RT_PORT)
mapping = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, bridge_port_name, t_pod['region_name'], constants.RT_PORT)
# check routing for bridge port in top pod exists
self.assertIsNotNone(mapping)
@ -2444,8 +2435,8 @@ class PluginTest(unittest.TestCase,
calls = [mock.call(t_ctx, fip_id1),
mock.call(t_ctx, fip_id2)]
mock_delete.assert_has_calls(calls)
mapping = db_api.get_bottom_id_by_top_id_pod_name(
t_ctx, bridge_port_name, t_pod['pod_name'], constants.RT_PORT)
mapping = db_api.get_bottom_id_by_top_id_region_name(
t_ctx, bridge_port_name, t_pod['region_name'], constants.RT_PORT)
# check routing for bridge port in top pod is deleted
self.assertIsNone(mapping)

View File

@ -69,16 +69,16 @@ class FakeXManager(xmanager.XManager):
class FakeClient(object):
def __init__(self, pod_name=None):
if pod_name:
self.pod_name = pod_name
def __init__(self, region_name=None):
if region_name:
self.region_name = region_name
else:
self.pod_name = 'top'
self.region_name = 'top'
def list_resources(self, resource, cxt, filters=None):
res_list = []
filters = filters or []
for res in RES_MAP[self.pod_name][resource]:
for res in RES_MAP[self.region_name][resource]:
is_selected = True
for _filter in filters:
if _filter['key'] not in res:
@ -139,7 +139,7 @@ class XManagerTest(unittest.TestCase):
top_router_id = 'router_id'
for i in xrange(1, 3):
pod_dict = {'pod_id': 'pod_id_%d' % i,
'pod_name': 'pod_%d' % i,
'region_name': 'pod_%d' % i,
'az_name': 'az_name_%d' % i}
db_api.create_pod(self.context, pod_dict)
@ -179,15 +179,15 @@ class XManagerTest(unittest.TestCase):
'fixed_ips': [{'subnet_id': bridge_subnet['id'],
'ip_address': bridge_subnet['gateway_ip']}]
}
pod_name = 'pod_%d' % i
RES_MAP[pod_name]['network'].append(network)
RES_MAP[pod_name]['network'].append(bridge_network)
RES_MAP[pod_name]['subnet'].append(subnet)
RES_MAP[pod_name]['subnet'].append(bridge_subnet)
RES_MAP[pod_name]['port'].append(port)
RES_MAP[pod_name]['port'].append(vm_port)
RES_MAP[pod_name]['port'].append(bridge_port)
RES_MAP[pod_name]['router'].append(router)
region_name = 'pod_%d' % i
RES_MAP[region_name]['network'].append(network)
RES_MAP[region_name]['network'].append(bridge_network)
RES_MAP[region_name]['subnet'].append(subnet)
RES_MAP[region_name]['subnet'].append(bridge_subnet)
RES_MAP[region_name]['port'].append(port)
RES_MAP[region_name]['port'].append(vm_port)
RES_MAP[region_name]['port'].append(bridge_port)
RES_MAP[region_name]['router'].append(router)
route = {'top_id': top_router_id, 'bottom_id': router['id'],
'pod_id': pod_dict['pod_id'], 'resource_type': 'router'}
@ -257,7 +257,7 @@ class XManagerTest(unittest.TestCase):
for i in xrange(1, 3):
pod_dict = {'pod_id': 'pod_id_%d' % i,
'pod_name': 'pod_%d' % i,
'region_name': 'pod_%d' % i,
'az_name': 'az_name_%d' % i}
db_api.create_pod(self.context, pod_dict)
@ -271,8 +271,8 @@ class XManagerTest(unittest.TestCase):
RES_MAP['top']['network'].append(network)
RES_MAP['top']['subnet'].append(subnet)
pod_name = 'pod_%d' % i
RES_MAP[pod_name]['security_group'].append(sg)
region_name = 'pod_%d' % i
RES_MAP[region_name]['security_group'].append(sg)
route = {'top_id': sg_id, 'bottom_id': sg_id,
'pod_id': pod_dict['pod_id'],
'resource_type': 'security_group'}
@ -314,7 +314,7 @@ class XManagerTest(unittest.TestCase):
payload = {'fake_resource': fake_id}
fake_handle(None, self.context, payload=payload)
jobs = core.query_resource(self.context, models.Job, [], [])
jobs = core.query_resource(self.context, models.AsyncJob, [], [])
expected_status = [constants.JS_New, constants.JS_Success]
job_status = [job['status'] for job in jobs]
self.assertItemsEqual(expected_status, job_status)
@ -333,7 +333,7 @@ class XManagerTest(unittest.TestCase):
payload = {'fake_resource': fake_id}
fake_handle(None, self.context, payload=payload)
jobs = core.query_resource(self.context, models.Job, [], [])
jobs = core.query_resource(self.context, models.AsyncJob, [], [])
expected_status = [constants.JS_New, constants.JS_Fail]
job_status = [job['status'] for job in jobs]
self.assertItemsEqual(expected_status, job_status)
@ -358,10 +358,10 @@ class XManagerTest(unittest.TestCase):
'resource_id': fake_id,
'extra_id': constants.SP_EXTRA_ID
}
core.create_resource(self.context, models.Job, expired_job)
core.create_resource(self.context, models.AsyncJob, expired_job)
fake_handle(None, self.context, payload=payload)
jobs = core.query_resource(self.context, models.Job, [], [])
jobs = core.query_resource(self.context, models.AsyncJob, [], [])
expected_status = ['New', 'Fail', 'Success']
job_status = [job['status'] for job in jobs]
self.assertItemsEqual(expected_status, job_status)
@ -411,11 +411,11 @@ class XManagerTest(unittest.TestCase):
for i, job_dict in enumerate(job_dict_list, 1):
job_dict['id'] = 'job_uuid%d' % (2 * i - 1)
job_dict['extra_id'] = 'extra_uuid%d' % (2 * i - 1)
core.create_resource(self.context, models.Job, job_dict)
core.create_resource(self.context, models.AsyncJob, job_dict)
job_dict['id'] = 'job_uuid%d' % (2 * i)
job_dict['extra_id'] = 'extra_uuid%d' % (2 * i)
job_dict['status'] = constants.JS_New
core.create_resource(self.context, models.Job, job_dict)
core.create_resource(self.context, models.AsyncJob, job_dict)
# for res3 + uuid3, the latest job's status is "Success", not returned
expected_ids = ['job_uuid3', 'job_uuid5']

View File

@ -155,12 +155,12 @@ class XManager(PeriodicTasks):
self.xjob_handler = xrpcapi.XJobAPI()
super(XManager, self).__init__()
def _get_client(self, pod_name=None):
if not pod_name:
def _get_client(self, region_name=None):
if not region_name:
return self.clients[constants.TOP]
if pod_name not in self.clients:
self.clients[pod_name] = client.Client(pod_name)
return self.clients[pod_name]
if region_name not in self.clients:
self.clients[region_name] = client.Client(region_name)
return self.clients[region_name]
def periodic_tasks(self, context, raise_on_error=False):
"""Tasks to be run at a periodic interval."""
@ -279,7 +279,7 @@ class XManager(PeriodicTasks):
# this is rare case that we got IpAddressInUseClient exception
# a second ago but now the floating ip is missing
raise t_network_exc.BottomPodOperationFailure(
resource='floating ip', pod_name=pod['pod_name'])
resource='floating ip', region_name=pod['region_name'])
associated_port_id = fips[0].get('port_id')
if associated_port_id == port_id:
# if the internal port associated with the existing fip is what
@ -296,7 +296,7 @@ class XManager(PeriodicTasks):
def _setup_router_one_pod(self, ctx, t_pod, b_pod, t_client, t_net,
t_router, t_ew_bridge_net, t_ew_bridge_subnet,
need_ns_bridge):
b_client = self._get_client(b_pod['pod_name'])
b_client = self._get_client(b_pod['region_name'])
router_body = {'router': {'name': t_router['id'],
'distributed': False}}
@ -364,8 +364,8 @@ class XManager(PeriodicTasks):
# attach internal port to bottom router
t_ports = self._get_router_interfaces(t_client, ctx, t_router['id'],
t_net['id'])
b_net_id = db_api.get_bottom_id_by_top_id_pod_name(
ctx, t_net['id'], b_pod['pod_name'], constants.RT_NETWORK)
b_net_id = db_api.get_bottom_id_by_top_id_region_name(
ctx, t_net['id'], b_pod['region_name'], constants.RT_NETWORK)
if b_net_id:
b_ports = self._get_router_interfaces(b_client, ctx, b_router_id,
b_net_id)
@ -415,7 +415,7 @@ class XManager(PeriodicTasks):
constants.RT_NETWORK)
# bottom external network should exist
b_ext_pod, b_ext_net_id = mappings[0]
b_ext_client = self._get_client(b_ext_pod['pod_name'])
b_ext_client = self._get_client(b_ext_pod['region_name'])
b_fips = b_ext_client.list_floatingips(
ctx, [{'key': 'floating_network_id', 'comparator': 'eq',
'value': b_ext_net_id}])
@ -427,8 +427,8 @@ class XManager(PeriodicTasks):
for add_fip in add_fips:
fip = t_ip_fip_map[add_fip]
t_int_port_id = fip['port_id']
b_int_port_id = db_api.get_bottom_id_by_top_id_pod_name(
ctx, t_int_port_id, b_pod['pod_name'], constants.RT_PORT)
b_int_port_id = db_api.get_bottom_id_by_top_id_region_name(
ctx, t_int_port_id, b_pod['region_name'], constants.RT_PORT)
if not b_int_port_id:
LOG.warning(_LW('Port %(port_id)s associated with floating ip '
'%(fip)s is not mapped to bottom pod'),
@ -444,8 +444,9 @@ class XManager(PeriodicTasks):
ctx, q_ctx, project_id, t_pod, t_ns_bridge_net['id'], None,
b_int_port_id, False)
t_ns_bridge_port = t_client.get_ports(ctx, t_ns_bridge_port_id)
b_ext_bridge_net_id = db_api.get_bottom_id_by_top_id_pod_name(
ctx, t_ns_bridge_net['id'], b_ext_pod['pod_name'],
b_ext_bridge_net_id = \
db_api.get_bottom_id_by_top_id_region_name(
ctx, t_ns_bridge_net['id'], b_ext_pod['region_name'],
constants.RT_NETWORK)
port_body = {
'port': {
@ -580,12 +581,12 @@ class XManager(PeriodicTasks):
filters=[{'key': 'router:external',
'comparator': 'eq',
'value': True}])
ext_net_pod_names = set(
ext_net_region_names = set(
[ext_net[AZ_HINTS][0] for ext_net in ext_nets])
if not ext_net_pod_names:
if not ext_net_region_names:
need_ns_bridge = False
elif b_pod['pod_name'] in ext_net_pod_names:
elif b_pod['region_name'] in ext_net_region_names:
need_ns_bridge = False
else:
need_ns_bridge = True
@ -609,7 +610,7 @@ class XManager(PeriodicTasks):
router_bridge_ip_map = {}
router_ips_map = {}
for i, b_pod in enumerate(b_pods):
bottom_client = self._get_client(pod_name=b_pod['pod_name'])
bottom_client = self._get_client(region_name=b_pod['region_name'])
b_interfaces = bottom_client.list_ports(
ctx, filters=[{'key': 'device_id',
'comparator': 'eq',
@ -641,7 +642,8 @@ class XManager(PeriodicTasks):
router_ips_map[b_router_ids[i]][b_subnet['cidr']] = ips
for i, b_router_id in enumerate(b_router_ids):
bottom_client = self._get_client(pod_name=b_pods[i]['pod_name'])
bottom_client = self._get_client(
region_name=b_pods[i]['region_name'])
extra_routes = []
if not router_ips_map[b_router_id]:
bottom_client.update_routers(
@ -664,7 +666,7 @@ class XManager(PeriodicTasks):
def delete_server_port(self, ctx, payload):
b_pod_id, b_port_id = payload[constants.JT_PORT_DELETE].split('#')
b_pod = db_api.get_pod(ctx, b_pod_id)
self._get_client(b_pod['pod_name']).delete_ports(ctx, b_port_id)
self._get_client(b_pod['region_name']).delete_ports(ctx, b_port_id)
@staticmethod
def _safe_create_security_group_rule(context, client, body):
@ -742,7 +744,7 @@ class XManager(PeriodicTasks):
mappings = db_api.get_bottom_mappings_by_top_id(
ctx, top_sg['id'], constants.RT_SG)
for pod, b_sg_id in mappings:
client = self._get_client(pod['pod_name'])
client = self._get_client(pod['region_name'])
b_sg = client.get_security_groups(ctx, b_sg_id)
add_rules = []
del_rules = []
@ -800,10 +802,10 @@ class XManager(PeriodicTasks):
if not t_network:
return
b_pod = db_api.get_pod(ctx, b_pod_id)
b_pod_name = b_pod['pod_name']
b_client = self._get_client(pod_name=b_pod_name)
b_network_id = db_api.get_bottom_id_by_top_id_pod_name(
ctx, t_network_id, b_pod_name, constants.RT_NETWORK)
b_region_name = b_pod['region_name']
b_client = self._get_client(region_name=b_region_name)
b_network_id = db_api.get_bottom_id_by_top_id_region_name(
ctx, t_network_id, b_region_name, constants.RT_NETWORK)
# name is not allowed to be updated, because it is used by
# lock_handle to retrieve bottom/local resources that have been
# created but not registered in the resource routing table
@ -820,4 +822,4 @@ class XManager(PeriodicTasks):
except q_cli_exceptions.NotFound:
LOG.error(_LE('network: %(net_id)s not found,'
'pod name: %(name)s'),
{'net_id': b_network_id, 'name': b_pod_name})
{'net_id': b_network_id, 'name': b_region_name})