Rename instance to server and instance type to flavor
Change-Id: I35a7fb0b4f3fa9e09f2fc2c739c0a9b39a8576ab
This commit is contained in:
parent
49b26986f6
commit
7d0f8ab360
@ -6,9 +6,9 @@
|
||||
|
||||
Show and manage server flavors.
|
||||
|
||||
Flavors are a way to describe the basic dimensions of a instance to be
|
||||
Flavors are a way to describe the basic dimensions of a server to be
|
||||
created including how much ``cpu``, ``ram``, and ``disk space`` are
|
||||
allocated to an instance built with this flavor.
|
||||
allocated to a server built with this flavor.
|
||||
|
||||
List Flavors
|
||||
============
|
||||
@ -189,7 +189,7 @@ Delete Flavor
|
||||
Deletes a flavor.
|
||||
|
||||
This is typically an admin only action. Deleting a flavor that is in use by
|
||||
existing instances is not recommended as it can cause incorrect data to
|
||||
existing servers is not recommended as it can cause incorrect data to
|
||||
be returned to the user under some operations.
|
||||
|
||||
Normal response codes: 204
|
||||
|
@ -7,9 +7,9 @@ Baremetal Compute API V1 (CURRENT)
|
||||
.. rest_expand_all::
|
||||
|
||||
.. include:: urls.inc
|
||||
.. include:: instances.inc
|
||||
.. include:: instance_states.inc
|
||||
.. include:: instance_networks.inc
|
||||
.. include:: servers.inc
|
||||
.. include:: server_states.inc
|
||||
.. include:: server_networks.inc
|
||||
.. include:: flavors.inc
|
||||
.. include:: flavor_access.inc
|
||||
.. include:: availability_zones.inc
|
||||
|
@ -1,35 +0,0 @@
|
||||
.. -*- rst -*-
|
||||
|
||||
========================
|
||||
Instance Serial Console
|
||||
========================
|
||||
|
||||
Instances Serial Console can be managed through serial_console sub-resource.
|
||||
|
||||
|
||||
Instance Serial Console Summary
|
||||
===============================
|
||||
|
||||
.. rest_method:: GET /v1/instances/{instance_uuid}/serial_console
|
||||
|
||||
Get the console url info of the Instance.
|
||||
|
||||
Normal response code: 200
|
||||
|
||||
Request
|
||||
-------
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- instance_uuid: instance_ident
|
||||
|
||||
Response
|
||||
--------
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- console: console_info
|
||||
|
||||
**Example instance network:**
|
||||
|
||||
.. literalinclude:: samples/instance_console/instance-serial-console-get.json
|
@ -1,140 +0,0 @@
|
||||
.. -*- rst -*-
|
||||
|
||||
=================
|
||||
Instance States
|
||||
=================
|
||||
|
||||
Instances States can be managed through states sub-resource.
|
||||
|
||||
A Instance can be rebooted, turned on, or turned off by requesting a change to
|
||||
its power state.
|
||||
|
||||
|
||||
Instance State Summary
|
||||
======================
|
||||
|
||||
.. rest_method:: GET /v1/instances/{instance_uuid}/states
|
||||
|
||||
Get a summary of the Instance's current states.
|
||||
|
||||
Normal response code: 200
|
||||
|
||||
Request
|
||||
-------
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- instance_uuid: instance_ident
|
||||
|
||||
Response
|
||||
--------
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- power_state: power_state
|
||||
- locked: lock_state
|
||||
- status: instance_status
|
||||
|
||||
**Example instance state:**
|
||||
|
||||
.. literalinclude:: samples/instance_states/instance-get-state-response.json
|
||||
|
||||
|
||||
Change Instance Power State
|
||||
===========================
|
||||
|
||||
.. rest_method:: PUT /v1/instances/{instance_uuid}/states/power
|
||||
|
||||
Request a change to the Instance's power state.
|
||||
|
||||
Normal response code: 202
|
||||
|
||||
Error codes:
|
||||
- 409 (ClientError)
|
||||
- 400 (InvalidState)
|
||||
- 406 (NotAcceptable)
|
||||
|
||||
Request
|
||||
-------
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- instance_uuid: instance_ident
|
||||
- target: power_state_target
|
||||
|
||||
**Example request to power off a Instance:**
|
||||
|
||||
.. literalinclude:: samples/instance_states/instance-set-power-off.json
|
||||
|
||||
Response
|
||||
--------
|
||||
|
||||
If successful, this method does not return content in the response body.
|
||||
|
||||
|
||||
Change Instance Lock State
|
||||
===========================
|
||||
|
||||
.. rest_method:: PUT /v1/instances/{instance_uuid}/states/lock
|
||||
|
||||
Request a change to the Instance's lockstate.
|
||||
|
||||
Normal response code: 202
|
||||
|
||||
Error codes:
|
||||
- 409 (ClientError)
|
||||
- 400 (BadRequest)
|
||||
- 403 (Forbidden)
|
||||
|
||||
Request
|
||||
-------
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- instance_uuid: instance_ident
|
||||
- target: lock_state
|
||||
|
||||
**Example request to lock an Instance:**
|
||||
|
||||
.. literalinclude:: samples/instance_states/lock-instance.json
|
||||
|
||||
**Example request to unlock an Instance:**
|
||||
|
||||
.. literalinclude:: samples/instance_states/unlock-instance.json
|
||||
|
||||
Response
|
||||
--------
|
||||
|
||||
If successful, this method does not return content in the response body.
|
||||
|
||||
|
||||
Change Instance Provision State
|
||||
===============================
|
||||
|
||||
.. rest_method:: PUT /v1/instances/{instance_uuid}/states/provision
|
||||
|
||||
Request a change to the Instance's provision state.
|
||||
|
||||
Normal response code: 202
|
||||
|
||||
Error codes:
|
||||
- 409 (ClientError)
|
||||
- 400 (BadRequest)
|
||||
- 403 (Forbidden)
|
||||
|
||||
Request
|
||||
-------
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- instance_uuid: instance_ident
|
||||
- target: provision_state
|
||||
|
||||
**Example request to rebuild an Instance:**
|
||||
|
||||
.. literalinclude:: samples/instance_states/rebuild-instance.json
|
||||
|
||||
Response
|
||||
--------
|
||||
|
||||
If successful, this method does not return content in the response body.
|
@ -27,9 +27,9 @@ flavor_uuid_path:
|
||||
in: path
|
||||
required: true
|
||||
type: string
|
||||
instance_ident:
|
||||
server_ident:
|
||||
description: |
|
||||
The UUID of the instance.
|
||||
The UUID of the server.
|
||||
in: path
|
||||
required: true
|
||||
type: string
|
||||
@ -49,7 +49,7 @@ tenant_id_path:
|
||||
# variables in query
|
||||
all_tenants:
|
||||
description: |
|
||||
Specify the ``all_tenants=1`` query parameter to list all instances
|
||||
Specify the ``all_tenants=1`` query parameter to list all servers
|
||||
for all projects. By default this is only allowed by admin users.
|
||||
in: query
|
||||
required: false
|
||||
@ -59,11 +59,11 @@ fields:
|
||||
One or more fields to be returned in the response.
|
||||
|
||||
For example, the following request returns only the ``uuid``
|
||||
and ``name`` fields for each instance:
|
||||
and ``name`` fields for each server:
|
||||
|
||||
::
|
||||
|
||||
GET /v1/instances?fields=uuid,name
|
||||
GET /v1/servers?fields=uuid,name
|
||||
in: query
|
||||
required: false
|
||||
type: array
|
||||
@ -84,11 +84,11 @@ address:
|
||||
type: string
|
||||
availability_zone:
|
||||
description: |
|
||||
The availability zone from which to launch the instance. When you provision resources,
|
||||
you specify from which availability zone you want your instance to be built. Typically,
|
||||
The availability zone from which to launch the server. When you provision resources,
|
||||
you specify from which availability zone you want your server to be built. Typically,
|
||||
you use availability zones to arrange bare metal nodes into logical groups.
|
||||
An availability zone provides a form of physical isolation and redundancy from
|
||||
other availability zones. For instance, if some racks in your data center are
|
||||
other availability zones. For server, if some racks in your data center are
|
||||
on a separate power source, you can put servers in those racks in their own availability
|
||||
zone. Availability zones can also help separate different classes of hardware. By
|
||||
segregating resources into availability zones, you can ensure that your application
|
||||
@ -183,7 +183,7 @@ flavor_uuid_not_required:
|
||||
type: string
|
||||
flavorRef:
|
||||
description: |
|
||||
The flavor reference, as a UUID for the flavor for your server instance.
|
||||
The flavor reference, as a UUID for the flavor for your server server.
|
||||
in: body
|
||||
required: true
|
||||
type: string
|
||||
@ -195,39 +195,7 @@ flavors:
|
||||
type: array
|
||||
imageRef:
|
||||
description: |
|
||||
The UUID of the image to use for your instance.
|
||||
in: body
|
||||
required: true
|
||||
type: string
|
||||
instance_description:
|
||||
description: |
|
||||
A free form description of the instance. Limited to 255 characters
|
||||
in length.
|
||||
in: body
|
||||
required: false
|
||||
type: string
|
||||
instance_name:
|
||||
description: |
|
||||
The instance name.
|
||||
in: body
|
||||
required: true
|
||||
type: string
|
||||
instance_power_state:
|
||||
description: |
|
||||
The current power state of this instance. Usually, “power on” or “power off”, but may be “None”
|
||||
if Mogan is unable to determine the power state (eg, due to hardware failure)
|
||||
in: body
|
||||
required: true
|
||||
type: string
|
||||
instance_status:
|
||||
description: |
|
||||
The status of this instance. Usually, "building", "active", "error", or "None".
|
||||
in: body
|
||||
required: true
|
||||
type: string
|
||||
instance_uuid:
|
||||
description: |
|
||||
The UUID of the instance
|
||||
The UUID of the image to use for your server.
|
||||
in: body
|
||||
required: true
|
||||
type: string
|
||||
@ -304,7 +272,7 @@ keypairs:
|
||||
Array of Keypair objects
|
||||
launched_at:
|
||||
description: |
|
||||
The date and time when the instance was launched. The date and time
|
||||
The date and time when the server was launched. The date and time
|
||||
stamp format is `ISO 8601 <https://en.wikipedia.org/wiki/ISO_8601>`_
|
||||
|
||||
::
|
||||
@ -325,23 +293,23 @@ links:
|
||||
type: array
|
||||
lock_state:
|
||||
description: |
|
||||
The request to lock/unlock instances.
|
||||
The request to lock/unlock servers.
|
||||
in: body
|
||||
required: true
|
||||
type: boolean
|
||||
max_count_body:
|
||||
description: |
|
||||
The max number of instances to be created. Defaults to the value of ``min_count``.
|
||||
The max number of servers to be created. Defaults to the value of ``min_count``.
|
||||
in: body
|
||||
required: false
|
||||
type: integer
|
||||
min_count_body:
|
||||
description: |
|
||||
The min number of instances to be created. Defaults to 1.
|
||||
The min number of servers to be created. Defaults to 1.
|
||||
in: body
|
||||
required: false
|
||||
type: integer
|
||||
multi_instacne_name_body:
|
||||
multi_server_name_body:
|
||||
description: |
|
||||
A base name for creating unique names during multiple create. A unique
|
||||
string will be appended to the end of this base name for every instacne
|
||||
@ -351,30 +319,30 @@ multi_instacne_name_body:
|
||||
type: string
|
||||
network_info:
|
||||
description: |
|
||||
The port info in the requested network for the instance, with fixed_ip, mac_address, and
|
||||
The port info in the requested network for the server, with fixed_ip, mac_address, and
|
||||
network uuid
|
||||
in: body
|
||||
required: true
|
||||
type: dict
|
||||
network_port_type:
|
||||
description: |
|
||||
To provision the server instance with a specified type of NIC(like 1GE or 10 GE) for a
|
||||
To provision the server server with a specified type of NIC(like 1GE or 10 GE) for a
|
||||
network, specify the type of the NIC in the ``port_type`` key in a dict in ``networks`` list.
|
||||
in: body
|
||||
required: false
|
||||
type: string
|
||||
network_uuid:
|
||||
description: |
|
||||
To provision the server instance with a NIC for a network, specify the UUID of
|
||||
To provision the server server with a NIC for a network, specify the UUID of
|
||||
the network in the ``uuid`` key in a dict in ``networks`` list.
|
||||
in: body
|
||||
required: true
|
||||
type: string
|
||||
networks:
|
||||
description: |
|
||||
A list of networks of the tenant. Optionally, you can create one or more NICs on the instance.
|
||||
To provision the server instance with a NIC for a network, specify the UUID of the network
|
||||
in the ``uuid`` key in a dict in ``networks`` list. To provision the server instance with a
|
||||
A list of networks of the tenant. Optionally, you can create one or more NICs on the server.
|
||||
To provision the server server with a NIC for a network, specify the UUID of the network
|
||||
in the ``uuid`` key in a dict in ``networks`` list. To provision the server server with a
|
||||
specified type of NIC, specify the port-type key in a dict in a ``networks`` list.
|
||||
in: body
|
||||
required: true
|
||||
@ -389,7 +357,7 @@ personality:
|
||||
type: string
|
||||
power_state:
|
||||
description: |
|
||||
The current power state of this Instance. Usually, "power on" or
|
||||
The current power state of this Server. Usually, "power on" or
|
||||
"power off", but may be "None" if Mogan is unable to determine the power
|
||||
state (eg, due to hardware failure).
|
||||
in: body
|
||||
@ -414,6 +382,38 @@ provision_state:
|
||||
in: body
|
||||
required: true
|
||||
type: string
|
||||
server_description:
|
||||
description: |
|
||||
A free form description of the server. Limited to 255 characters
|
||||
in length.
|
||||
in: body
|
||||
required: false
|
||||
type: string
|
||||
server_name:
|
||||
description: |
|
||||
The server name.
|
||||
in: body
|
||||
required: true
|
||||
type: string
|
||||
server_power_state:
|
||||
description: |
|
||||
The current power state of this server. Usually, “power on” or “power off”, but may be “None”
|
||||
if Mogan is unable to determine the power state (eg, due to hardware failure)
|
||||
in: body
|
||||
required: true
|
||||
type: string
|
||||
server_status:
|
||||
description: |
|
||||
The status of this server. Usually, "building", "active", "error", or "None".
|
||||
in: body
|
||||
required: true
|
||||
type: string
|
||||
server_uuid:
|
||||
description: |
|
||||
The UUID of the server
|
||||
in: body
|
||||
required: true
|
||||
type: string
|
||||
tenant_id_body:
|
||||
description: |
|
||||
The UUID of the tenant in a multi-tenancy cloud.
|
||||
@ -443,7 +443,7 @@ user_data:
|
||||
type: string
|
||||
user_id_body:
|
||||
description: |
|
||||
The user ID of the user who owns the instance.
|
||||
The user ID of the user who owns the server.
|
||||
in: body
|
||||
required: true
|
||||
type: string
|
||||
|
@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "test_instance",
|
||||
"description": "this is a test instance",
|
||||
"instance_type_uuid": "0607b5f3-6111-424d-ba46-f5de39a6fa69",
|
||||
"name": "test_server",
|
||||
"description": "this is a test server",
|
||||
"flavor_uuid": "0607b5f3-6111-424d-ba46-f5de39a6fa69",
|
||||
"image_uuid": "efe0a06f-ca95-4808-b41e-9f55b9c5eb98",
|
||||
"networks": [
|
||||
{
|
@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "test_instance",
|
||||
"description": "this is a test instance",
|
||||
"instance_type_uuid": "0607b5f3-6111-424d-ba46-f5de39a6fa69",
|
||||
"name": "test_server",
|
||||
"description": "this is a test server",
|
||||
"flavor_uuid": "0607b5f3-6111-424d-ba46-f5de39a6fa69",
|
||||
"image_uuid": "efe0a06f-ca95-4808-b41e-9f55b9c5eb98",
|
||||
"availability_zone": "mogan",
|
||||
"networks": [
|
@ -1,18 +1,18 @@
|
||||
{
|
||||
"name": "test_instance",
|
||||
"description": "this is a test instance",
|
||||
"instance_type_uuid": "0607b5f3-6111-424d-ba46-f5de39a6fa69",
|
||||
"name": "test_server",
|
||||
"description": "this is a test server",
|
||||
"flavor_uuid": "0607b5f3-6111-424d-ba46-f5de39a6fa69",
|
||||
"image_uuid": "efe0a06f-ca95-4808-b41e-9f55b9c5eb98",
|
||||
"availability_zone" : "Beijing-01",
|
||||
"status": "active",
|
||||
"power_state": "on",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://10.3.150.17:6688/v1/instances/7de2859d-ec6d-42c7-bb86-9d630ba5ac94",
|
||||
"href": "http://10.3.150.17:6688/v1/servers/7de2859d-ec6d-42c7-bb86-9d630ba5ac94",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "http://10.3.150.17:6688/instances/7de2859d-ec6d-42c7-bb86-9d630ba5ac94",
|
||||
"href": "http://10.3.150.17:6688/servers/7de2859d-ec6d-42c7-bb86-9d630ba5ac94",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
],
|
@ -1,20 +1,20 @@
|
||||
{
|
||||
"availability_zone": null,
|
||||
"created_at": "2016-10-17T04:12:41+00:00",
|
||||
"description": "this is a test instance",
|
||||
"description": "this is a test server",
|
||||
"image_uuid": "ac3b2291-b9ef-45f6-8eeb-21ac568a64a5",
|
||||
"instance_type_uuid": "28708dff-283c-449e-9bfa-a48c93480c86",
|
||||
"flavor_uuid": "28708dff-283c-449e-9bfa-a48c93480c86",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://localhost:6688/v1/instances/f978ef48-d4af-4dad-beec-e6174309bc71",
|
||||
"href": "http://localhost:6688/v1/servers/f978ef48-d4af-4dad-beec-e6174309bc71",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "http://localhost:6688/instances/f978ef48-d4af-4dad-beec-e6174309bc71",
|
||||
"href": "http://localhost:6688/servers/f978ef48-d4af-4dad-beec-e6174309bc71",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
],
|
||||
"name": "test_instance",
|
||||
"name": "test_server",
|
||||
"network_info": {
|
||||
"12cffc4a-b845-409e-b589-7c84be4b10d9": {
|
||||
"fixed_ips": [
|
@ -1,22 +1,22 @@
|
||||
{
|
||||
"instances": [
|
||||
"servers": [
|
||||
{
|
||||
"availability_zone": null,
|
||||
"created_at": "2016-10-17T04:12:41+00:00",
|
||||
"description": "this is a test instance",
|
||||
"description": "this is a test server",
|
||||
"image_uuid": "ac3b2291-b9ef-45f6-8eeb-21ac568a64a5",
|
||||
"instance_type_uuid": "28708dff-283c-449e-9bfa-a48c93480c86",
|
||||
"flavor_uuid": "28708dff-283c-449e-9bfa-a48c93480c86",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://localhost:6688/v1/instances/f978ef48-d4af-4dad-beec-e6174309bc71",
|
||||
"href": "http://localhost:6688/v1/servers/f978ef48-d4af-4dad-beec-e6174309bc71",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "http://localhost:6688/instances/f978ef48-d4af-4dad-beec-e6174309bc71",
|
||||
"href": "http://localhost:6688/servers/f978ef48-d4af-4dad-beec-e6174309bc71",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
],
|
||||
"name": "test_instance",
|
||||
"name": "test_server",
|
||||
"network_info": {
|
||||
"12cffc4a-b845-409e-b589-7c84be4b10d9": {
|
||||
"fixed_ips": [
|
@ -1,18 +1,18 @@
|
||||
{
|
||||
"instances": [
|
||||
"servers": [
|
||||
{
|
||||
"description": "this is a test instance",
|
||||
"description": "this is a test server",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://localhost:6688/v1/instances/f978ef48-d4af-4dad-beec-e6174309bc71",
|
||||
"href": "http://localhost:6688/v1/servers/f978ef48-d4af-4dad-beec-e6174309bc71",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "http://localhost:6688/instances/f978ef48-d4af-4dad-beec-e6174309bc71",
|
||||
"href": "http://localhost:6688/servers/f978ef48-d4af-4dad-beec-e6174309bc71",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
],
|
||||
"name": "test_instance",
|
||||
"name": "test_server",
|
||||
"status": "building",
|
||||
"power_state": "power on",
|
||||
"uuid": "f978ef48-d4af-4dad-beec-e6174309bc71"
|
@ -1,18 +1,18 @@
|
||||
{
|
||||
"name": "test_instance",
|
||||
"description": "this is a test instance",
|
||||
"instance_type_uuid": "0607b5f3-6111-424d-ba46-f5de39a6fa69",
|
||||
"name": "test_server",
|
||||
"description": "this is a test server",
|
||||
"flavor_uuid": "0607b5f3-6111-424d-ba46-f5de39a6fa69",
|
||||
"image_uuid": "efe0a06f-ca95-4808-b41e-9f55b9c5eb98",
|
||||
"availability_zone": "Beijing-01",
|
||||
"status": "active",
|
||||
"power_state": "on",
|
||||
"links": [
|
||||
{
|
||||
"href": "http://10.3.150.17:6688/v1/instances/7de2859d-ec6d-42c7-bb86-9d630ba5ac94",
|
||||
"href": "http://10.3.150.17:6688/v1/servers/7de2859d-ec6d-42c7-bb86-9d630ba5ac94",
|
||||
"rel": "self"
|
||||
},
|
||||
{
|
||||
"href": "http://10.3.150.17:6688/instances/7de2859d-ec6d-42c7-bb86-9d630ba5ac94",
|
||||
"href": "http://10.3.150.17:6688/servers/7de2859d-ec6d-42c7-bb86-9d630ba5ac94",
|
||||
"rel": "bookmark"
|
||||
}
|
||||
],
|
@ -1,21 +1,21 @@
|
||||
.. -*- rst -*-
|
||||
|
||||
===================
|
||||
Instance Networks
|
||||
Server Networks
|
||||
===================
|
||||
|
||||
Instances Networks can be managed through networks sub-resource.
|
||||
Servers Networks can be managed through networks sub-resource.
|
||||
|
||||
A Instance can be associated or dissociated with a floating IP by requesting
|
||||
A Server can be associated or dissociated with a floating IP by requesting
|
||||
the floatingip sub-resource.
|
||||
|
||||
|
||||
Instance Network Summary
|
||||
Server Network Summary
|
||||
========================
|
||||
|
||||
.. rest_method:: GET /v1/instances/{instance_uuid}/networks
|
||||
.. rest_method:: GET /v1/servers/{server_uuid}/networks
|
||||
|
||||
Get a summary of the Instance's networks.
|
||||
Get a summary of the Server's networks.
|
||||
|
||||
Normal response code: 200
|
||||
|
||||
@ -24,7 +24,7 @@ Request
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- instance_uuid: instance_ident
|
||||
- server_uuid: server_ident
|
||||
|
||||
Response
|
||||
--------
|
||||
@ -33,20 +33,20 @@ Response
|
||||
|
||||
- ports: network_info
|
||||
|
||||
**Example instance network:**
|
||||
**Example server network:**
|
||||
|
||||
.. literalinclude:: samples/instance_networks/instance-get-network-response.json
|
||||
.. literalinclude:: samples/server_networks/server-get-network-response.json
|
||||
|
||||
|
||||
Add (Associate) Floating IP
|
||||
===========================
|
||||
|
||||
.. rest_method:: POST /v1/instances/{instance_uuid}/networks/floatingips
|
||||
.. rest_method:: POST /v1/servers/{server_uuid}/networks/floatingips
|
||||
|
||||
Adds a floating IP address to an instance, which associates
|
||||
that address with the instance.
|
||||
Adds a floating IP address to a server, which associates
|
||||
that address with the server.
|
||||
|
||||
If an instance is connected to multiple networks, you can associate a
|
||||
If a server is connected to multiple networks, you can associate a
|
||||
floating IP address with a specific fixed IP address by using the
|
||||
optional ``fixed_address`` parameter.
|
||||
|
||||
@ -59,13 +59,13 @@ Request
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- instance_uuid: instance_ident
|
||||
- server_uuid: server_ident
|
||||
- address: address
|
||||
- fixed_address: fixed_address
|
||||
|
||||
**Example request to Add (Associate) Floating IP to an instance:**
|
||||
**Example request to Add (Associate) Floating IP to a server:**
|
||||
|
||||
.. literalinclude:: samples/instance_networks/instance-associate-fip-req.json
|
||||
.. literalinclude:: samples/server_networks/server-associate-fip-req.json
|
||||
|
||||
Response
|
||||
--------
|
||||
@ -76,9 +76,9 @@ If successful, this method does not return content in the response body.
|
||||
Remove (Disassociate) Floating IP
|
||||
=================================
|
||||
|
||||
.. rest_method:: DELETE /v1/instances/{instance_uuid}/networks/floatingips/{fip_address}
|
||||
.. rest_method:: DELETE /v1/servers/{server_uuid}/networks/floatingips/{fip_address}
|
||||
|
||||
Removes, or disassociates, a floating IP address from an instance.
|
||||
Removes, or disassociates, a floating IP address from a server.
|
||||
|
||||
Normal response codes: 204
|
||||
|
||||
@ -90,7 +90,7 @@ Request
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- instance_uuid: instance_ident
|
||||
- server_uuid: server_ident
|
||||
- fip_address: address_path
|
||||
|
||||
Response
|
35
api-ref/source/v1/server_serial_console.inc
Normal file
35
api-ref/source/v1/server_serial_console.inc
Normal file
@ -0,0 +1,35 @@
|
||||
.. -*- rst -*-
|
||||
|
||||
========================
|
||||
Server Serial Console
|
||||
========================
|
||||
|
||||
Servers Serial Console can be managed through serial_console sub-resource.
|
||||
|
||||
|
||||
Server Serial Console Summary
|
||||
===============================
|
||||
|
||||
.. rest_method:: GET /v1/servers/{server_uuid}/serial_console
|
||||
|
||||
Get the console url info of the Server.
|
||||
|
||||
Normal response code: 200
|
||||
|
||||
Request
|
||||
-------
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- server_uuid: server_ident
|
||||
|
||||
Response
|
||||
--------
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- console: console_info
|
||||
|
||||
**Example server network:**
|
||||
|
||||
.. literalinclude:: samples/server_console/server-serial-console-get.json
|
140
api-ref/source/v1/server_states.inc
Normal file
140
api-ref/source/v1/server_states.inc
Normal file
@ -0,0 +1,140 @@
|
||||
.. -*- rst -*-
|
||||
|
||||
=================
|
||||
Server States
|
||||
=================
|
||||
|
||||
Servers States can be managed through states sub-resource.
|
||||
|
||||
A Server can be rebooted, turned on, or turned off by requesting a change to
|
||||
its power state.
|
||||
|
||||
|
||||
Server State Summary
|
||||
======================
|
||||
|
||||
.. rest_method:: GET /v1/servers/{server_uuid}/states
|
||||
|
||||
Get a summary of the Server's current states.
|
||||
|
||||
Normal response code: 200
|
||||
|
||||
Request
|
||||
-------
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- server_uuid: server_ident
|
||||
|
||||
Response
|
||||
--------
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- power_state: power_state
|
||||
- locked: lock_state
|
||||
- status: server_status
|
||||
|
||||
**Example server state:**
|
||||
|
||||
.. literalinclude:: samples/server_states/server-get-state-response.json
|
||||
|
||||
|
||||
Change Server Power State
|
||||
===========================
|
||||
|
||||
.. rest_method:: PUT /v1/servers/{server_uuid}/states/power
|
||||
|
||||
Request a change to the Server's power state.
|
||||
|
||||
Normal response code: 202
|
||||
|
||||
Error codes:
|
||||
- 409 (ClientError)
|
||||
- 400 (InvalidState)
|
||||
- 406 (NotAcceptable)
|
||||
|
||||
Request
|
||||
-------
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- server_uuid: server_ident
|
||||
- target: power_state_target
|
||||
|
||||
**Example request to power off a Server:**
|
||||
|
||||
.. literalinclude:: samples/server_states/server-set-power-off.json
|
||||
|
||||
Response
|
||||
--------
|
||||
|
||||
If successful, this method does not return content in the response body.
|
||||
|
||||
|
||||
Change Server Lock State
|
||||
===========================
|
||||
|
||||
.. rest_method:: PUT /v1/servers/{server_uuid}/states/lock
|
||||
|
||||
Request a change to the Server's lockstate.
|
||||
|
||||
Normal response code: 202
|
||||
|
||||
Error codes:
|
||||
- 409 (ClientError)
|
||||
- 400 (BadRequest)
|
||||
- 403 (Forbidden)
|
||||
|
||||
Request
|
||||
-------
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- server_uuid: server_ident
|
||||
- target: lock_state
|
||||
|
||||
**Example request to lock a Server:**
|
||||
|
||||
.. literalinclude:: samples/server_states/lock-server.json
|
||||
|
||||
**Example request to unlock a Server:**
|
||||
|
||||
.. literalinclude:: samples/server_states/unlock-server.json
|
||||
|
||||
Response
|
||||
--------
|
||||
|
||||
If successful, this method does not return content in the response body.
|
||||
|
||||
|
||||
Change Server Provision State
|
||||
===============================
|
||||
|
||||
.. rest_method:: PUT /v1/servers/{server_uuid}/states/provision
|
||||
|
||||
Request a change to the Server's provision state.
|
||||
|
||||
Normal response code: 202
|
||||
|
||||
Error codes:
|
||||
- 409 (ClientError)
|
||||
- 400 (BadRequest)
|
||||
- 403 (Forbidden)
|
||||
|
||||
Request
|
||||
-------
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- server_uuid: server_ident
|
||||
- target: provision_state
|
||||
|
||||
**Example request to rebuild a Server:**
|
||||
|
||||
.. literalinclude:: samples/server_states/rebuild-server.json
|
||||
|
||||
Response
|
||||
--------
|
||||
|
||||
If successful, this method does not return content in the response body.
|
@ -1,24 +1,24 @@
|
||||
.. -*- rst -*-
|
||||
|
||||
===========
|
||||
Instances
|
||||
Servers
|
||||
===========
|
||||
|
||||
Lists, creates, shows details for, updates, and deletes instances.
|
||||
Lists, creates, shows details for, updates, and deletes servers.
|
||||
|
||||
Create Instance
|
||||
Create Server
|
||||
===============
|
||||
|
||||
.. rest_method:: POST /instances
|
||||
.. rest_method:: POST /servers
|
||||
|
||||
Creates an instance.
|
||||
Creates a server.
|
||||
|
||||
The progress of this operation depends on the location of the
|
||||
requested image, network I/O, selected type, and other factors.
|
||||
|
||||
The ``Location`` header returns the full URL to the newly created
|
||||
instance and is available as a ``self`` and ``bookmark`` link in the
|
||||
instance representation.
|
||||
server and is available as a ``self`` and ``bookmark`` link in the
|
||||
server representation.
|
||||
|
||||
Normal response codes: 201
|
||||
|
||||
@ -30,9 +30,9 @@ Request
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- name: instance_name
|
||||
- description: instance_description
|
||||
- instance_type_uuid: flavorRef
|
||||
- name: server_name
|
||||
- description: server_description
|
||||
- flavor_uuid: flavorRef
|
||||
- image_uuid: imageRef
|
||||
- availability_zone: availability_zone
|
||||
- networks: networks
|
||||
@ -42,9 +42,9 @@ Request
|
||||
- personality: personality
|
||||
- key_name: key_name
|
||||
|
||||
**Example Create Instance: JSON request**
|
||||
**Example Create Server: JSON request**
|
||||
|
||||
.. literalinclude:: samples/instances/instance-create-req.json
|
||||
.. literalinclude:: samples/servers/server-create-req.json
|
||||
:language: javascript
|
||||
|
||||
Response
|
||||
@ -52,40 +52,40 @@ Response
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- name: instance_name
|
||||
- description: instance_description
|
||||
- instance_type_uuid: flavorRef
|
||||
- name: server_name
|
||||
- description: server_description
|
||||
- flavor_uuid: flavorRef
|
||||
- image_uuid: imageRef
|
||||
- availability_zone: availability_zone
|
||||
- network_info: network_info
|
||||
- links: links
|
||||
- uuid: instance_uuid
|
||||
- status: instance_status
|
||||
- power_state: instance_power_state
|
||||
- uuid: server_uuid
|
||||
- status: server_status
|
||||
- power_state: server_power_state
|
||||
- project_id: project_id_body
|
||||
- user_id: user_id_body
|
||||
- updated_at: updated_at
|
||||
- created_at: created_at
|
||||
- extra: extra
|
||||
|
||||
**Example Create Instance: JSON response**
|
||||
**Example Create Server: JSON response**
|
||||
|
||||
.. literalinclude:: samples/instances/instance-create-resp.json
|
||||
.. literalinclude:: samples/servers/server-create-resp.json
|
||||
:language: javascript
|
||||
|
||||
Create Multiple Instances
|
||||
Create Multiple Servers
|
||||
=========================
|
||||
|
||||
.. rest_method:: POST /instances
|
||||
.. rest_method:: POST /servers
|
||||
|
||||
Create Multiple Instances.
|
||||
Create Multiple Servers.
|
||||
|
||||
There is a second kind of create call which can create multiple instances
|
||||
There is a second kind of create call which can create multiple servers
|
||||
at once. This supports all the same parameters as create with a few additional
|
||||
attributes specific to multiple create.
|
||||
|
||||
Error handling for multiple create is not as consistent as for single instance
|
||||
create, and there is no guarantee that all the instances will be created
|
||||
Error handling for multiple create is not as consistent as for single server
|
||||
create, and there is no guarantee that all the servers will be created
|
||||
successfully.
|
||||
|
||||
Normal response codes: 201
|
||||
@ -100,37 +100,37 @@ These are the parameters beyond single create that are supported.
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- name: multi_instacne_name_body
|
||||
- name: multi_server_name_body
|
||||
- min_count: min_count_body
|
||||
- max_count: max_count_body
|
||||
|
||||
**Example Create Multiple Instance: JSON request**
|
||||
**Example Create Multiple Server: JSON request**
|
||||
|
||||
.. literalinclude:: samples/instances/multi-instance-create-req.json
|
||||
.. literalinclude:: samples/servers/multi-server-create-req.json
|
||||
:language: javascript
|
||||
|
||||
Response
|
||||
--------
|
||||
|
||||
The first instance will be returned. The returned paramaters is same to creating
|
||||
a single instance's.
|
||||
The first server will be returned. The returned paramaters is same to creating
|
||||
a single server's.
|
||||
|
||||
**Example Create Multiple Instance: JSON response**
|
||||
**Example Create Multiple Server: JSON response**
|
||||
|
||||
.. literalinclude:: samples/instances/instance-create-resp.json
|
||||
.. literalinclude:: samples/servers/server-create-resp.json
|
||||
:language: javascript
|
||||
|
||||
|
||||
List Instances
|
||||
List Servers
|
||||
===============
|
||||
|
||||
.. rest_method:: GET /instances
|
||||
.. rest_method:: GET /servers
|
||||
|
||||
Return a list of bare metal Instances, with some useful information about each
|
||||
Instance.
|
||||
Return a list of bare metal Servers, with some useful information about each
|
||||
Server.
|
||||
|
||||
By default, this query will return the name, instance uuid, instance status
|
||||
and description for each Instance.
|
||||
By default, this query will return the name, server uuid, server status
|
||||
and description for each Server.
|
||||
|
||||
Normal response codes: 200
|
||||
|
||||
@ -150,25 +150,25 @@ Response
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- name: instance_name
|
||||
- description: instance_description
|
||||
- uuid: instance_uuid
|
||||
- status: instance_status
|
||||
- power_state: instance_power_state
|
||||
- name: server_name
|
||||
- description: server_description
|
||||
- uuid: server_uuid
|
||||
- status: server_status
|
||||
- power_state: server_power_state
|
||||
- links: links
|
||||
|
||||
**Example List of Instances: JSON response**
|
||||
**Example List of Servers: JSON response**
|
||||
|
||||
.. literalinclude:: samples/instances/instance-list-resp.json
|
||||
.. literalinclude:: samples/servers/server-list-resp.json
|
||||
:language: javascript
|
||||
|
||||
|
||||
List Instances Detailed
|
||||
List Servers Detailed
|
||||
=======================
|
||||
|
||||
.. rest_method:: GET /instances/detail
|
||||
.. rest_method:: GET /servers/detail
|
||||
|
||||
Return a list of bare metal Instances with complete details.
|
||||
Return a list of bare metal Servers with complete details.
|
||||
|
||||
Normal response codes: 200
|
||||
|
||||
@ -185,16 +185,16 @@ Response
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- name: instance_name
|
||||
- description: instance_description
|
||||
- instance_type_uuid: flavorRef
|
||||
- name: server_name
|
||||
- description: server_description
|
||||
- flavor_uuid: flavorRef
|
||||
- image_uuid: imageRef
|
||||
- availability_zone: availability_zone
|
||||
- network_info: network_info
|
||||
- links: links
|
||||
- uuid: instance_uuid
|
||||
- status: instance_status
|
||||
- power_state: instance_power_state
|
||||
- uuid: server_uuid
|
||||
- status: server_status
|
||||
- power_state: server_power_state
|
||||
- project_id: project_id_body
|
||||
- user_id: user_id_body
|
||||
- updated_at: updated_at
|
||||
@ -202,18 +202,18 @@ Response
|
||||
- launched_at: launched_at
|
||||
- extra: extra
|
||||
|
||||
**Example Detailed list of Instances: JSON response**
|
||||
**Example Detailed list of Servers: JSON response**
|
||||
|
||||
.. literalinclude:: samples/instances/instance-list-detail-resp.json
|
||||
.. literalinclude:: samples/servers/server-list-detail-resp.json
|
||||
:language: javascript
|
||||
|
||||
|
||||
Show Instance Details
|
||||
Show Server Details
|
||||
=====================
|
||||
|
||||
.. rest_method:: GET /instances/{instance_uuid}
|
||||
.. rest_method:: GET /servers/{server_uuid}
|
||||
|
||||
Shows details of an instance. By default, this will return the full
|
||||
Shows details of a server. By default, this will return the full
|
||||
representation of the resource; an optional fields parameter can be supplied to
|
||||
return only the specified set.
|
||||
|
||||
@ -227,7 +227,7 @@ Request
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- instance_uuid: instance_ident
|
||||
- server_uuid: server_ident
|
||||
- fields: fields
|
||||
|
||||
Response
|
||||
@ -235,16 +235,16 @@ Response
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- name: instance_name
|
||||
- description: instance_description
|
||||
- instance_type_uuid: flavorRef
|
||||
- name: server_name
|
||||
- description: server_description
|
||||
- flavor_uuid: flavorRef
|
||||
- image_uuid: imageRef
|
||||
- availability_zone: availability_zone
|
||||
- network_info: network_info
|
||||
- links: links
|
||||
- uuid: instance_uuid
|
||||
- status: instance_status
|
||||
- power_state: instance_power_state
|
||||
- uuid: server_uuid
|
||||
- status: server_status
|
||||
- power_state: server_power_state
|
||||
- project_id: project_id_body
|
||||
- user_id: user_id_body
|
||||
- updated_at: updated_at
|
||||
@ -252,18 +252,18 @@ Response
|
||||
- launched_at: launched_at
|
||||
- extra: extra
|
||||
|
||||
**Example Instance Details: JSON response**
|
||||
**Example Server Details: JSON response**
|
||||
|
||||
.. literalinclude:: samples/instances/instance-detail-resp.json
|
||||
.. literalinclude:: samples/servers/server-detail-resp.json
|
||||
:language: javascript
|
||||
|
||||
|
||||
Update Instance
|
||||
Update Server
|
||||
===============
|
||||
|
||||
.. rest_method:: PATCH /instances/{instance_uuid}
|
||||
.. rest_method:: PATCH /servers/{server_uuid}
|
||||
|
||||
Updates the infromation stored about an instance.
|
||||
Updates the infromation stored about a server.
|
||||
|
||||
Normal response codes: 200
|
||||
|
||||
@ -278,11 +278,11 @@ The BODY of the PATCH request must be a JSON PATCH document, adhering to
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- instance_uuid: instance_ident
|
||||
- server_uuid: server_ident
|
||||
|
||||
**Example Update Instance: JSON request**
|
||||
**Example Update Server: JSON request**
|
||||
|
||||
.. literalinclude:: samples/instances/instance-update-req.json
|
||||
.. literalinclude:: samples/servers/server-update-req.json
|
||||
:language: javascript
|
||||
|
||||
Response
|
||||
@ -290,38 +290,38 @@ Response
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- name: instance_name
|
||||
- description: instance_description
|
||||
- instance_type_uuid: flavorRef
|
||||
- name: server_name
|
||||
- description: server_description
|
||||
- flavor_uuid: flavorRef
|
||||
- image_uuid: imageRef
|
||||
- availability_zone: availability_zone
|
||||
- network_info: network_info
|
||||
- links: links
|
||||
- uuid: instance_uuid
|
||||
- status: instance_status
|
||||
- power_state: instance_power_state
|
||||
- uuid: server_uuid
|
||||
- status: server_status
|
||||
- power_state: server_power_state
|
||||
- project_id: project_id_body
|
||||
- user_id: user_id_body
|
||||
- updated_at: updated_at
|
||||
- created_at: created_at
|
||||
- extra: extra
|
||||
|
||||
**Example Update Instance: JSON response**
|
||||
**Example Update Server: JSON response**
|
||||
|
||||
.. literalinclude:: samples/instances/instance-update-resp.json
|
||||
.. literalinclude:: samples/servers/server-update-resp.json
|
||||
:language: javascript
|
||||
|
||||
|
||||
Delete Instance
|
||||
Delete Server
|
||||
===============
|
||||
|
||||
.. rest_method:: DELETE /instances/{instance_uuid}
|
||||
.. rest_method:: DELETE /servers/{server_uuid}
|
||||
|
||||
Deletes an instance.
|
||||
Deletes a server.
|
||||
|
||||
Preconditions
|
||||
|
||||
- The instance must exist.
|
||||
- The server must exist.
|
||||
|
||||
Normal response codes: 204
|
||||
|
||||
@ -332,7 +332,7 @@ Request
|
||||
|
||||
.. rest_parameters:: parameters.yaml
|
||||
|
||||
- instance_uuid: instance_ident
|
||||
- server_uuid: server_ident
|
||||
|
||||
Response
|
||||
--------
|
@ -12,7 +12,7 @@ will be added to build a full path.
|
||||
|
||||
For instance, if the ``service url`` is
|
||||
``http://mycompute.pvt/mogan/v1`` then the full API call for
|
||||
``/instances`` is ``http://mycompute.pvt/mogan/v1/instances``.
|
||||
``/servers`` is ``http://mycompute.pvt/mogan/v1/servers``.
|
||||
|
||||
Depending on the deployment the baremetal compute service url might
|
||||
be http or https, a custom port, a custom path, and include your
|
||||
@ -23,5 +23,5 @@ to work at a single site. It should always be discovered from the
|
||||
Identity token.
|
||||
|
||||
As such, for the rest of this document we will be using short hand
|
||||
where ``GET /instances`` really means ``GET
|
||||
{your_service_url}/instances``.
|
||||
where ``GET /servers`` really means ``GET
|
||||
{your_service_url}/servers``.
|
||||
|
@ -193,7 +193,7 @@ if is_service_enabled mogan; then
|
||||
echo_summary "Initializing mogan"
|
||||
init_mogan
|
||||
start_mogan
|
||||
echo_summary "Creating instance type"
|
||||
echo_summary "Creating flavor"
|
||||
create_flavor
|
||||
echo_summary "Updating ironic node properties"
|
||||
update_ironic_node_type
|
||||
|
@ -1,11 +1,11 @@
|
||||
{
|
||||
"event_type": "instance.create.start",
|
||||
"event_type": "server.create.start",
|
||||
"payload": {
|
||||
"mogan_object.name": "InstanceActionPayload",
|
||||
"mogan_object.name": "ServerActionPayload",
|
||||
"mogan_object.namespace": "mogan",
|
||||
"mogan_object.version": "1.0",
|
||||
"mogan_object.data": {
|
||||
"instance_type_uuid": "6ce9904f-c61f-4ee8-afbe-c852c05258f6",
|
||||
"flavor_uuid": "6ce9904f-c61f-4ee8-afbe-c852c05258f6",
|
||||
"status": "building",
|
||||
"user_id": "dfc14a6e939646d1929362de1758d7b2",
|
||||
"uuid": "e1a7b5b7-c76c-4459-8328-10deda95819f",
|
@ -350,14 +350,14 @@ Run stack.sh::
|
||||
|
||||
./stack.sh
|
||||
|
||||
Source credentials, and spawn an instance as the ``demo`` user::
|
||||
Source credentials, and spawn a server as the ``demo`` user::
|
||||
|
||||
source ~/devstack/openrc
|
||||
|
||||
# query the image id of the default cirros image
|
||||
image=$(openstack image show $DEFAULT_IMAGE_NAME -f value -c id)
|
||||
|
||||
# spawn instance
|
||||
# spawn server
|
||||
As our moganclient is not ready now, will add this soon...
|
||||
|
||||
Building developer documentation
|
||||
|
@ -27,8 +27,8 @@ from mogan.api.controllers import base
|
||||
from mogan.api.controllers import link
|
||||
from mogan.api.controllers.v1 import availability_zone
|
||||
from mogan.api.controllers.v1 import flavors
|
||||
from mogan.api.controllers.v1 import instances
|
||||
from mogan.api.controllers.v1 import keypairs
|
||||
from mogan.api.controllers.v1 import servers
|
||||
from mogan.api import expose
|
||||
|
||||
|
||||
@ -38,11 +38,11 @@ class V1(base.APIBase):
|
||||
id = wtypes.text
|
||||
"""The ID of the version, also acts as the release number"""
|
||||
|
||||
instances = [link.Link]
|
||||
"""Links to the instances resource"""
|
||||
servers = [link.Link]
|
||||
"""Links to the servers resource"""
|
||||
|
||||
flavors = [link.Link]
|
||||
"""Links to the instance types resource"""
|
||||
"""Links to the server types resource"""
|
||||
|
||||
availability_zones = [link.Link]
|
||||
"""Links to the availability zones resource"""
|
||||
@ -54,13 +54,13 @@ class V1(base.APIBase):
|
||||
def convert():
|
||||
v1 = V1()
|
||||
v1.id = "v1"
|
||||
v1.instances = [link.Link.make_link('self', pecan.request.public_url,
|
||||
'instances', ''),
|
||||
link.Link.make_link('bookmark',
|
||||
pecan.request.public_url,
|
||||
'instances', '',
|
||||
bookmark=True)
|
||||
]
|
||||
v1.servers = [link.Link.make_link('self', pecan.request.public_url,
|
||||
'servers', ''),
|
||||
link.Link.make_link('bookmark',
|
||||
pecan.request.public_url,
|
||||
'servers', '',
|
||||
bookmark=True)
|
||||
]
|
||||
v1.flavors = [link.Link.make_link('self', pecan.request.public_url,
|
||||
'flavors', ''),
|
||||
link.Link.make_link('bookmark',
|
||||
@ -91,7 +91,7 @@ class Controller(rest.RestController):
|
||||
"""Version 1 API controller root."""
|
||||
|
||||
flavors = flavors.FlavorsController()
|
||||
instances = instances.InstanceController()
|
||||
servers = servers.ServerController()
|
||||
availability_zones = availability_zone.AvailabilityZoneController()
|
||||
keypairs = keypairs.KeyPairController()
|
||||
|
||||
|
@ -65,7 +65,7 @@ class Flavor(base.APIBase):
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
self.fields = []
|
||||
for field in objects.InstanceType.fields:
|
||||
for field in objects.Flavor.fields:
|
||||
# Skip fields we do not expose.
|
||||
if not hasattr(self, field):
|
||||
continue
|
||||
@ -109,8 +109,7 @@ class FlavorExtraSpecsController(rest.RestController):
|
||||
def get_all(self, flavor_uuid):
|
||||
"""Retrieve a list of extra specs of the queried flavor."""
|
||||
|
||||
flavor = objects.InstanceType.get(pecan.request.context,
|
||||
flavor_uuid)
|
||||
flavor = objects.Flavor.get(pecan.request.context, flavor_uuid)
|
||||
return dict(extra_specs=flavor.extra_specs)
|
||||
|
||||
@expose.expose(types.jsontype, types.uuid, body=types.jsontype,
|
||||
@ -118,8 +117,7 @@ class FlavorExtraSpecsController(rest.RestController):
|
||||
def patch(self, flavor_uuid, extra_spec):
|
||||
"""Create/update extra specs for the given flavor."""
|
||||
|
||||
flavor = objects.InstanceType.get(pecan.request.context,
|
||||
flavor_uuid)
|
||||
flavor = objects.Flavor.get(pecan.request.context, flavor_uuid)
|
||||
flavor.extra_specs = dict(flavor.extra_specs, **extra_spec)
|
||||
flavor.save()
|
||||
return dict(extra_specs=flavor.extra_specs)
|
||||
@ -129,8 +127,7 @@ class FlavorExtraSpecsController(rest.RestController):
|
||||
def delete(self, flavor_uuid, spec_name):
|
||||
"""Delete an extra specs for the given flavor."""
|
||||
|
||||
flavor = objects.InstanceType.get(pecan.request.context,
|
||||
flavor_uuid)
|
||||
flavor = objects.Flavor.get(pecan.request.context, flavor_uuid)
|
||||
del flavor.extra_specs[spec_name]
|
||||
flavor.save()
|
||||
|
||||
@ -142,8 +139,8 @@ class FlavorAccessController(rest.RestController):
|
||||
def get_all(self, flavor_uuid):
|
||||
"""Retrieve a list of extra specs of the queried flavor."""
|
||||
|
||||
flavor = objects.InstanceType.get(pecan.request.context,
|
||||
flavor_uuid)
|
||||
flavor = objects.Flavor.get(pecan.request.context,
|
||||
flavor_uuid)
|
||||
|
||||
# public flavor to all projects
|
||||
if flavor.is_public:
|
||||
@ -160,8 +157,8 @@ class FlavorAccessController(rest.RestController):
|
||||
"""Add flavor access for the given tenant."""
|
||||
validation.check_schema(tenant, flavor_access.add_tenant_access)
|
||||
|
||||
flavor = objects.InstanceType.get(pecan.request.context,
|
||||
flavor_uuid)
|
||||
flavor = objects.Flavor.get(pecan.request.context,
|
||||
flavor_uuid)
|
||||
if flavor.is_public:
|
||||
msg = _("Can not add access to a public flavor.")
|
||||
raise wsme.exc.ClientSideError(
|
||||
@ -182,8 +179,8 @@ class FlavorAccessController(rest.RestController):
|
||||
def delete(self, flavor_uuid, tenant_id):
|
||||
"""Remove flavor access for the given tenant."""
|
||||
|
||||
flavor = objects.InstanceType.get(pecan.request.context,
|
||||
flavor_uuid)
|
||||
flavor = objects.Flavor.get(pecan.request.context,
|
||||
flavor_uuid)
|
||||
try:
|
||||
# TODO(zhenguo): this should be synchronized.
|
||||
if tenant_id in flavor.projects:
|
||||
@ -208,7 +205,7 @@ class FlavorsController(rest.RestController):
|
||||
def get_all(self):
|
||||
"""Retrieve a list of flavor."""
|
||||
|
||||
flavors = objects.InstanceType.list(pecan.request.context)
|
||||
flavors = objects.Flavor.list(pecan.request.context)
|
||||
return FlavorCollection.convert_with_links(flavors)
|
||||
|
||||
@expose.expose(Flavor, types.uuid)
|
||||
@ -217,8 +214,7 @@ class FlavorsController(rest.RestController):
|
||||
|
||||
:param flavor_uuid: UUID of a flavor.
|
||||
"""
|
||||
rpc_flavor = objects.InstanceType.get(pecan.request.context,
|
||||
flavor_uuid)
|
||||
rpc_flavor = objects.Flavor.get(pecan.request.context, flavor_uuid)
|
||||
return Flavor.convert_with_links(rpc_flavor)
|
||||
|
||||
@expose.expose(Flavor, body=Flavor,
|
||||
@ -228,8 +224,8 @@ class FlavorsController(rest.RestController):
|
||||
|
||||
:param flavor: a flavor within the request body.
|
||||
"""
|
||||
new_flavor = objects.InstanceType(pecan.request.context,
|
||||
**flavor.as_dict())
|
||||
new_flavor = objects.Flavor(pecan.request.context,
|
||||
**flavor.as_dict())
|
||||
new_flavor.create()
|
||||
# Set the HTTP Location Header
|
||||
pecan.response.location = link.build_url('flavors',
|
||||
@ -244,10 +240,10 @@ class FlavorsController(rest.RestController):
|
||||
:param flavor: a flavor within the request body.
|
||||
"""
|
||||
try:
|
||||
flavor_in_db = objects.InstanceType.get(
|
||||
flavor_in_db = objects.Flavor.get(
|
||||
pecan.request.context, flavor_uuid)
|
||||
except exception.FlavorTypeNotFound:
|
||||
msg = (_("InstanceType %s could not be found") %
|
||||
msg = (_("Flavor %s could not be found") %
|
||||
flavor_uuid)
|
||||
raise wsme.exc.ClientSideError(
|
||||
msg, status_code=http_client.BAD_REQUEST)
|
||||
@ -270,6 +266,5 @@ class FlavorsController(rest.RestController):
|
||||
|
||||
:param flavor_uuid: UUID of a flavor.
|
||||
"""
|
||||
rpc_flavor = objects.InstanceType.get(pecan.request.context,
|
||||
flavor_uuid)
|
||||
rpc_flavor = objects.Flavor.get(pecan.request.context, flavor_uuid)
|
||||
rpc_flavor.destroy()
|
||||
|
@ -93,7 +93,7 @@ class KeyPairCollection(base.APIBase):
|
||||
"""API representation of a collection of keypairs."""
|
||||
|
||||
keypairs = [KeyPair]
|
||||
"""A list containing Instance Type objects"""
|
||||
"""A list containing Flavor objects"""
|
||||
|
||||
@staticmethod
|
||||
def convert_with_links(keypairs, url=None, **kwargs):
|
||||
|
@ -17,14 +17,14 @@
|
||||
from mogan.api.validation import parameter_types
|
||||
|
||||
|
||||
create_instance = {
|
||||
create_server = {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
'name': parameter_types.name,
|
||||
'description': parameter_types.description,
|
||||
'availability_zone': parameter_types.availability_zone,
|
||||
'image_uuid': parameter_types.image_id,
|
||||
'instance_type_uuid': parameter_types.instance_type_id,
|
||||
'flavor_uuid': parameter_types.flavor_id,
|
||||
'networks': {
|
||||
'type': 'array', 'minItems': 1,
|
||||
'items': {
|
||||
@ -44,6 +44,6 @@ create_instance = {
|
||||
'max_count': {'type': 'integer', 'minimum': 1},
|
||||
'extra': parameter_types.extra,
|
||||
},
|
||||
'required': ['name', 'image_uuid', 'instance_type_uuid', 'networks'],
|
||||
'required': ['name', 'image_uuid', 'flavor_uuid', 'networks'],
|
||||
'additionalProperties': False,
|
||||
}
|
@ -26,7 +26,7 @@ from wsme import types as wtypes
|
||||
from mogan.api.controllers import base
|
||||
from mogan.api.controllers import link
|
||||
from mogan.api.controllers.v1.schemas import floating_ips as fip_schemas
|
||||
from mogan.api.controllers.v1.schemas import instances as inst_schemas
|
||||
from mogan.api.controllers.v1.schemas import servers as server_schemas
|
||||
from mogan.api.controllers.v1 import types
|
||||
from mogan.api.controllers.v1 import utils as api_utils
|
||||
from mogan.api import expose
|
||||
@ -38,23 +38,23 @@ from mogan.common import states
|
||||
from mogan import network
|
||||
from mogan import objects
|
||||
|
||||
_DEFAULT_INSTANCE_RETURN_FIELDS = ('uuid', 'name', 'description',
|
||||
'status', 'power_state')
|
||||
_DEFAULT_SERVER_RETURN_FIELDS = ('uuid', 'name', 'description',
|
||||
'status', 'power_state')
|
||||
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class InstanceStates(base.APIBase):
|
||||
"""API representation of the states of a instance."""
|
||||
class ServerStates(base.APIBase):
|
||||
"""API representation of the states of a server."""
|
||||
|
||||
power_state = wtypes.text
|
||||
"""Represent the current power state of the instance"""
|
||||
"""Represent the current power state of the server"""
|
||||
|
||||
status = wtypes.text
|
||||
"""Represent the current status of the instance"""
|
||||
"""Represent the current status of the server"""
|
||||
|
||||
locked = types.boolean
|
||||
"""Represent the current lock state of the instance"""
|
||||
"""Represent the current lock state of the server"""
|
||||
|
||||
@classmethod
|
||||
def sample(cls):
|
||||
@ -63,16 +63,16 @@ class InstanceStates(base.APIBase):
|
||||
return sample
|
||||
|
||||
|
||||
class InstanceControllerBase(rest.RestController):
|
||||
class ServerControllerBase(rest.RestController):
|
||||
_resource = None
|
||||
|
||||
# This _resource is used for authorization.
|
||||
def _get_resource(self, uuid, *args, **kwargs):
|
||||
self._resource = objects.Instance.get(pecan.request.context, uuid)
|
||||
self._resource = objects.Server.get(pecan.request.context, uuid)
|
||||
return self._resource
|
||||
|
||||
|
||||
class InstanceStatesController(InstanceControllerBase):
|
||||
class ServerStatesController(ServerControllerBase):
|
||||
|
||||
_custom_actions = {
|
||||
'power': ['PUT'],
|
||||
@ -80,148 +80,148 @@ class InstanceStatesController(InstanceControllerBase):
|
||||
'provision': ['PUT'],
|
||||
}
|
||||
|
||||
@policy.authorize_wsgi("mogan:instance", "get_states")
|
||||
@expose.expose(InstanceStates, types.uuid)
|
||||
def get(self, instance_uuid):
|
||||
"""List the states of the instance, just support power state at present.
|
||||
@policy.authorize_wsgi("mogan:server", "get_states")
|
||||
@expose.expose(ServerStates, types.uuid)
|
||||
def get(self, server_uuid):
|
||||
"""List the states of the server, just support power state at present.
|
||||
|
||||
:param instance_uuid: the UUID of a instance.
|
||||
:param server_uuid: the UUID of a server.
|
||||
"""
|
||||
rpc_instance = self._resource or self._get_resource(instance_uuid)
|
||||
rpc_server = self._resource or self._get_resource(server_uuid)
|
||||
|
||||
return InstanceStates(power_state=rpc_instance.power_state,
|
||||
status=rpc_instance.status,
|
||||
locked=rpc_instance.locked)
|
||||
return ServerStates(power_state=rpc_server.power_state,
|
||||
status=rpc_server.status,
|
||||
locked=rpc_server.locked)
|
||||
|
||||
@policy.authorize_wsgi("mogan:instance", "set_power_state")
|
||||
@policy.authorize_wsgi("mogan:server", "set_power_state")
|
||||
@expose.expose(None, types.uuid, wtypes.text,
|
||||
status_code=http_client.ACCEPTED)
|
||||
def power(self, instance_uuid, target):
|
||||
"""Set the power state of the instance.
|
||||
def power(self, server_uuid, target):
|
||||
"""Set the power state of the server.
|
||||
|
||||
:param instance_uuid: the UUID of a instance.
|
||||
:param server_uuid: the UUID of a server.
|
||||
:param target: the desired target to change power state,
|
||||
on, off or reboot.
|
||||
:raises Conflict (HTTP 409): if a power operation is
|
||||
already in progress.
|
||||
:raises BadRequest (HTTP 400): if the requested target
|
||||
state is not valid or if the instance is in CLEANING state.
|
||||
state is not valid or if the server is in CLEANING state.
|
||||
|
||||
"""
|
||||
if target not in ["on", "off", "reboot", "soft_off", "soft_reboot"]:
|
||||
# ironic will throw InvalidStateRequested
|
||||
raise exception.InvalidActionParameterValue(
|
||||
value=target, action="power",
|
||||
instance=instance_uuid)
|
||||
server=server_uuid)
|
||||
|
||||
rpc_instance = self._resource or self._get_resource(instance_uuid)
|
||||
rpc_server = self._resource or self._get_resource(server_uuid)
|
||||
pecan.request.engine_api.power(
|
||||
pecan.request.context, rpc_instance, target)
|
||||
pecan.request.context, rpc_server, target)
|
||||
# At present we do not catch the Exception from ironicclient.
|
||||
# Such as Conflict and BadRequest.
|
||||
# varify provision_state, if instance is being cleaned,
|
||||
# varify provision_state, if server is being cleaned,
|
||||
# don't change power state?
|
||||
|
||||
# Set the HTTP Location Header, user can get the power_state
|
||||
# by locaton.
|
||||
url_args = '/'.join([instance_uuid, 'states'])
|
||||
pecan.response.location = link.build_url('instances', url_args)
|
||||
url_args = '/'.join([server_uuid, 'states'])
|
||||
pecan.response.location = link.build_url('servers', url_args)
|
||||
|
||||
@policy.authorize_wsgi("mogan:instance", "set_lock_state")
|
||||
@policy.authorize_wsgi("mogan:server", "set_lock_state")
|
||||
@expose.expose(None, types.uuid, types.boolean,
|
||||
status_code=http_client.ACCEPTED)
|
||||
def lock(self, instance_uuid, target):
|
||||
"""Set the lock state of the instance.
|
||||
def lock(self, server_uuid, target):
|
||||
"""Set the lock state of the server.
|
||||
|
||||
:param instance_uuid: the UUID of a instance.
|
||||
:param server_uuid: the UUID of a server.
|
||||
:param target: the desired target to change lock state,
|
||||
true or false
|
||||
"""
|
||||
rpc_instance = self._resource or self._get_resource(instance_uuid)
|
||||
rpc_server = self._resource or self._get_resource(server_uuid)
|
||||
context = pecan.request.context
|
||||
|
||||
# Target is True, means lock an instance
|
||||
# Target is True, means lock a server
|
||||
if target:
|
||||
pecan.request.engine_api.lock(context, rpc_instance)
|
||||
pecan.request.engine_api.lock(context, rpc_server)
|
||||
|
||||
# Else, unlock the instance
|
||||
# Else, unlock the server
|
||||
else:
|
||||
# Try to unlock an instance with non-admin or non-owner
|
||||
# Try to unlock a server with non-admin or non-owner
|
||||
if not pecan.request.engine_api.is_expected_locked_by(
|
||||
context, rpc_instance):
|
||||
context, rpc_server):
|
||||
raise exception.Forbidden()
|
||||
pecan.request.engine_api.unlock(context, rpc_instance)
|
||||
pecan.request.engine_api.unlock(context, rpc_server)
|
||||
|
||||
@policy.authorize_wsgi("mogan:instance", "set_provision_state")
|
||||
@policy.authorize_wsgi("mogan:server", "set_provision_state")
|
||||
@expose.expose(None, types.uuid, wtypes.text,
|
||||
status_code=http_client.ACCEPTED)
|
||||
def provision(self, instance_uuid, target):
|
||||
"""Asynchronous trigger the provisioning of the instance.
|
||||
def provision(self, server_uuid, target):
|
||||
"""Asynchronous trigger the provisioning of the server.
|
||||
|
||||
This will set the target provision state of the instance, and
|
||||
This will set the target provision state of the server, and
|
||||
a background task will begin which actually applies the state
|
||||
change. This call will return a 202 (Accepted) indicating the
|
||||
request was accepted and is in progress; the client should
|
||||
continue to GET the status of this instance to observe the
|
||||
continue to GET the status of this server to observe the
|
||||
status of the requested action.
|
||||
|
||||
:param instance_uuid: UUID of an instance.
|
||||
:param target: The desired provision state of the instance or verb.
|
||||
:param server_uuid: UUID of a server.
|
||||
:param target: The desired provision state of the server or verb.
|
||||
"""
|
||||
|
||||
# Currently we only support rebuild target
|
||||
if target not in (states.REBUILD,):
|
||||
raise exception.InvalidActionParameterValue(
|
||||
value=target, action="provision",
|
||||
instance=instance_uuid)
|
||||
server=server_uuid)
|
||||
|
||||
rpc_instance = self._resource or self._get_resource(instance_uuid)
|
||||
rpc_server = self._resource or self._get_resource(server_uuid)
|
||||
if target == states.REBUILD:
|
||||
try:
|
||||
pecan.request.engine_api.rebuild(pecan.request.context,
|
||||
rpc_instance)
|
||||
except exception.InstanceNotFound:
|
||||
msg = (_("Instance %s could not be found") %
|
||||
instance_uuid)
|
||||
rpc_server)
|
||||
except exception.ServerNotFound:
|
||||
msg = (_("Server %s could not be found") %
|
||||
server_uuid)
|
||||
raise wsme.exc.ClientSideError(
|
||||
msg, status_code=http_client.NOT_FOUND)
|
||||
|
||||
# Set the HTTP Location Header
|
||||
url_args = '/'.join([instance_uuid, 'states'])
|
||||
pecan.response.location = link.build_url('instances', url_args)
|
||||
url_args = '/'.join([server_uuid, 'states'])
|
||||
pecan.response.location = link.build_url('servers', url_args)
|
||||
|
||||
|
||||
class FloatingIPController(InstanceControllerBase):
|
||||
"""REST controller for Instance floatingips."""
|
||||
class FloatingIPController(ServerControllerBase):
|
||||
"""REST controller for Server floatingips."""
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(FloatingIPController, self).__init__(*args, **kwargs)
|
||||
self.network_api = network.API()
|
||||
|
||||
@policy.authorize_wsgi("mogan:instance", "associate_floatingip", False)
|
||||
@policy.authorize_wsgi("mogan:server", "associate_floatingip", False)
|
||||
@expose.expose(None, types.uuid, body=types.jsontype,
|
||||
status_code=http_client.NO_CONTENT)
|
||||
def post(self, instance_uuid, floatingip):
|
||||
def post(self, server_uuid, floatingip):
|
||||
"""Add(Associate) Floating Ip.
|
||||
|
||||
:param instance_uuid: UUID of a instance.
|
||||
:param server_uuid: UUID of a server.
|
||||
:param floatingip: The floating IP within the request body.
|
||||
"""
|
||||
validation.check_schema(floatingip, fip_schemas.add_floating_ip)
|
||||
|
||||
instance = self._resource or self._get_resource(instance_uuid)
|
||||
server = self._resource or self._get_resource(server_uuid)
|
||||
address = floatingip['address']
|
||||
instance_nics = instance.nics
|
||||
server_nics = server.nics
|
||||
|
||||
if not instance_nics:
|
||||
msg = _('No ports associated to instance')
|
||||
if not server_nics:
|
||||
msg = _('No ports associated to server')
|
||||
raise wsme.exc.ClientSideError(
|
||||
msg, status_code=http_client.BAD_REQUEST)
|
||||
|
||||
fixed_address = None
|
||||
if 'fixed_address' in floatingip:
|
||||
fixed_address = floatingip['fixed_address']
|
||||
for nic in instance_nics:
|
||||
for nic in server_nics:
|
||||
for port_address in nic.fixed_ips:
|
||||
if port_address['ip_address'] == fixed_address:
|
||||
break
|
||||
@ -229,12 +229,12 @@ class FloatingIPController(InstanceControllerBase):
|
||||
continue
|
||||
break
|
||||
else:
|
||||
msg = _('Specified fixed address not assigned to instance')
|
||||
msg = _('Specified fixed address not assigned to server')
|
||||
raise wsme.exc.ClientSideError(
|
||||
msg, status_code=http_client.BAD_REQUEST)
|
||||
|
||||
if not fixed_address:
|
||||
for nic in instance_nics:
|
||||
for nic in server_nics:
|
||||
for port_address in nic.fixed_ips:
|
||||
if netutils.is_valid_ipv4(port_address['ip_address']):
|
||||
fixed_address = port_address['ip_address']
|
||||
@ -244,13 +244,13 @@ class FloatingIPController(InstanceControllerBase):
|
||||
break
|
||||
else:
|
||||
msg = _('Unable to associate floating IP %(address)s '
|
||||
'to any fixed IPs for instance %(id)s. '
|
||||
'Instance has no fixed IPv4 addresses to '
|
||||
'to any fixed IPs for server %(id)s. '
|
||||
'Server has no fixed IPv4 addresses to '
|
||||
'associate.') % ({'address': address,
|
||||
'id': instance.uuid})
|
||||
'id': server.uuid})
|
||||
raise wsme.exc.ClientSideError(
|
||||
msg, status_code=http_client.BAD_REQUEST)
|
||||
if len(instance_nics) > 1:
|
||||
if len(server_nics) > 1:
|
||||
LOG.warning('multiple ports exist, using the first '
|
||||
'IPv4 fixed_ip: %s', fixed_address)
|
||||
|
||||
@ -266,21 +266,21 @@ class FloatingIPController(InstanceControllerBase):
|
||||
e.message, status_code=http_client.FORBIDDEN)
|
||||
except Exception as e:
|
||||
msg = _('Unable to associate floating IP %(address)s to '
|
||||
'fixed IP %(fixed_address)s for instance %(id)s. '
|
||||
'fixed IP %(fixed_address)s for server %(id)s. '
|
||||
'Error: %(error)s') % ({'address': address,
|
||||
'fixed_address': fixed_address,
|
||||
'id': instance.uuid, 'error': e})
|
||||
'id': server.uuid, 'error': e})
|
||||
LOG.exception(msg)
|
||||
raise wsme.exc.ClientSideError(
|
||||
msg, status_code=http_client.BAD_REQUEST)
|
||||
|
||||
@policy.authorize_wsgi("mogan:instance", "disassociate_floatingip")
|
||||
@policy.authorize_wsgi("mogan:server", "disassociate_floatingip")
|
||||
@expose.expose(None, types.uuid, wtypes.text,
|
||||
status_code=http_client.NO_CONTENT)
|
||||
def delete(self, instance_uuid, address):
|
||||
"""Dissociate floating_ip from an instance.
|
||||
def delete(self, server_uuid, address):
|
||||
"""Dissociate floating_ip from a server.
|
||||
|
||||
:param instance_uuid: UUID of a instance.
|
||||
:param server_uuid: UUID of a server.
|
||||
:param floatingip: The floating IP within the request body.
|
||||
"""
|
||||
if not netutils.is_valid_ipv4(address):
|
||||
@ -296,10 +296,10 @@ class FloatingIPController(InstanceControllerBase):
|
||||
raise wsme.exc.ClientSideError(
|
||||
msg, status_code=http_client.NOT_FOUND)
|
||||
|
||||
# get the associated instance object (if any)
|
||||
# get the associated server object (if any)
|
||||
try:
|
||||
instance_id =\
|
||||
self.network_api.get_instance_id_by_floating_address(
|
||||
server_id =\
|
||||
self.network_api.get_server_id_by_floating_address(
|
||||
pecan.request.context, address)
|
||||
except exception.FloatingIpNotFoundForAddress as e:
|
||||
raise wsme.exc.ClientSideError(
|
||||
@ -309,7 +309,7 @@ class FloatingIPController(InstanceControllerBase):
|
||||
e.message, status_code=http_client.CONFLICT)
|
||||
|
||||
# disassociate if associated
|
||||
if (floating_ip.get('port_id') and instance_id == instance_uuid):
|
||||
if (floating_ip.get('port_id') and server_id == server_uuid):
|
||||
try:
|
||||
self.network_api.disassociate_floating_ip(
|
||||
pecan.request.context, address)
|
||||
@ -325,91 +325,91 @@ class FloatingIPController(InstanceControllerBase):
|
||||
raise wsme.exc.ClientSideError(
|
||||
msg, status_code=http_client.BAD_REQUEST)
|
||||
else:
|
||||
msg = _("Floating IP %(address)s is not associated with instance "
|
||||
"%(id)s.") % {'address': address, 'id': instance_uuid}
|
||||
msg = _("Floating IP %(address)s is not associated with server "
|
||||
"%(id)s.") % {'address': address, 'id': server_uuid}
|
||||
raise wsme.exc.ClientSideError(
|
||||
msg, status_code=http_client.BAD_REQUEST)
|
||||
|
||||
|
||||
class InstanceNetworks(base.APIBase):
|
||||
"""API representation of the networks of an instance."""
|
||||
class ServerNetworks(base.APIBase):
|
||||
"""API representation of the networks of a server."""
|
||||
|
||||
ports = {wtypes.text: types.jsontype}
|
||||
"""The network information of the instance"""
|
||||
"""The network information of the server"""
|
||||
|
||||
|
||||
class InstanceNetworksController(InstanceControllerBase):
|
||||
"""REST controller for Instance networks."""
|
||||
class ServerNetworksController(ServerControllerBase):
|
||||
"""REST controller for Server networks."""
|
||||
|
||||
floatingips = FloatingIPController()
|
||||
"""Expose floatingip as a sub-element of networks"""
|
||||
|
||||
@policy.authorize_wsgi("mogan:instance", "get_networks")
|
||||
@expose.expose(InstanceNetworks, types.uuid)
|
||||
def get(self, instance_uuid):
|
||||
"""List the networks info of the instance.
|
||||
@policy.authorize_wsgi("mogan:server", "get_networks")
|
||||
@expose.expose(ServerNetworks, types.uuid)
|
||||
def get(self, server_uuid):
|
||||
"""List the networks info of the server.
|
||||
|
||||
:param instance_uuid: the UUID of a instance.
|
||||
:param server_uuid: the UUID of a server.
|
||||
"""
|
||||
rpc_instance = self._resource or self._get_resource(instance_uuid)
|
||||
rpc_server = self._resource or self._get_resource(server_uuid)
|
||||
|
||||
return InstanceNetworks(
|
||||
ports=rpc_instance.instance_nics.to_legacy_dict())
|
||||
return ServerNetworks(
|
||||
ports=rpc_server.server_nics.to_legacy_dict())
|
||||
|
||||
|
||||
class Instance(base.APIBase):
|
||||
"""API representation of a instance.
|
||||
class Server(base.APIBase):
|
||||
"""API representation of a server.
|
||||
|
||||
This class enforces type checking and value constraints, and converts
|
||||
between the internal object model and the API representation of
|
||||
a instance.
|
||||
a server.
|
||||
"""
|
||||
uuid = types.uuid
|
||||
"""The UUID of the instance"""
|
||||
"""The UUID of the server"""
|
||||
|
||||
name = wsme.wsattr(wtypes.text, mandatory=True)
|
||||
"""The name of the instance"""
|
||||
"""The name of the server"""
|
||||
|
||||
description = wtypes.text
|
||||
"""The description of the instance"""
|
||||
"""The description of the server"""
|
||||
|
||||
project_id = types.uuid
|
||||
"""The project UUID of the instance"""
|
||||
"""The project UUID of the server"""
|
||||
|
||||
user_id = types.uuid
|
||||
"""The user UUID of the instance"""
|
||||
"""The user UUID of the server"""
|
||||
|
||||
status = wtypes.text
|
||||
"""The status of the instance"""
|
||||
"""The status of the server"""
|
||||
|
||||
power_state = wtypes.text
|
||||
"""The power state of the instance"""
|
||||
"""The power state of the server"""
|
||||
|
||||
availability_zone = wtypes.text
|
||||
"""The availability zone of the instance"""
|
||||
"""The availability zone of the server"""
|
||||
|
||||
instance_type_uuid = types.uuid
|
||||
"""The instance type UUID of the instance"""
|
||||
flavor_uuid = types.uuid
|
||||
"""The server type UUID of the server"""
|
||||
|
||||
image_uuid = types.uuid
|
||||
"""The image UUID of the instance"""
|
||||
"""The image UUID of the server"""
|
||||
|
||||
network_info = {wtypes.text: types.jsontype}
|
||||
"""The network information of the instance"""
|
||||
"""The network information of the server"""
|
||||
|
||||
links = wsme.wsattr([link.Link], readonly=True)
|
||||
"""A list containing a self link"""
|
||||
|
||||
launched_at = datetime.datetime
|
||||
"""The UTC date and time of the instance launched"""
|
||||
"""The UTC date and time of the server launched"""
|
||||
|
||||
extra = {wtypes.text: types.jsontype}
|
||||
"""The meta data of the instance"""
|
||||
"""The meta data of the server"""
|
||||
|
||||
def __init__(self, **kwargs):
|
||||
super(Instance, self).__init__(**kwargs)
|
||||
super(Server, self).__init__(**kwargs)
|
||||
self.fields = []
|
||||
for field in objects.Instance.fields:
|
||||
for field in objects.Server.fields:
|
||||
# TODO(liusheng) workaround to keep the output of API request same
|
||||
# as before
|
||||
if field == 'nics':
|
||||
@ -426,105 +426,105 @@ class Instance(base.APIBase):
|
||||
setattr(self, field, kwargs.get(field, wtypes.Unset))
|
||||
|
||||
@classmethod
|
||||
def convert_with_links(cls, instance_data, fields=None):
|
||||
instance = Instance(**instance_data)
|
||||
instance_uuid = instance.uuid
|
||||
def convert_with_links(cls, server_data, fields=None):
|
||||
server = Server(**server_data)
|
||||
server_uuid = server.uuid
|
||||
if fields is not None:
|
||||
instance.unset_fields_except(fields)
|
||||
server.unset_fields_except(fields)
|
||||
url = pecan.request.public_url
|
||||
instance.links = [link.Link.make_link('self',
|
||||
url,
|
||||
'instances', instance_uuid),
|
||||
link.Link.make_link('bookmark',
|
||||
url,
|
||||
'instances', instance_uuid,
|
||||
bookmark=True)
|
||||
]
|
||||
return instance
|
||||
server.links = [link.Link.make_link('self',
|
||||
url,
|
||||
'servers', server_uuid),
|
||||
link.Link.make_link('bookmark',
|
||||
url,
|
||||
'servers', server_uuid,
|
||||
bookmark=True)
|
||||
]
|
||||
return server
|
||||
|
||||
|
||||
class InstancePatchType(types.JsonPatchType):
|
||||
class ServerPatchType(types.JsonPatchType):
|
||||
|
||||
_api_base = Instance
|
||||
_api_base = Server
|
||||
|
||||
@staticmethod
|
||||
def internal_attrs():
|
||||
defaults = types.JsonPatchType.internal_attrs()
|
||||
return defaults + ['/project_id', '/user_id', '/status',
|
||||
'/power_state', '/availability_zone',
|
||||
'/instance_type_uuid', 'image_uuid',
|
||||
'/flavor_uuid', 'image_uuid',
|
||||
'/isntance_nics', '/launched_at']
|
||||
|
||||
|
||||
class InstanceCollection(base.APIBase):
|
||||
"""API representation of a collection of instance."""
|
||||
class ServerCollection(base.APIBase):
|
||||
"""API representation of a collection of server."""
|
||||
|
||||
instances = [Instance]
|
||||
"""A list containing instance objects"""
|
||||
servers = [Server]
|
||||
"""A list containing server objects"""
|
||||
|
||||
@staticmethod
|
||||
def convert_with_links(instances_data, fields=None):
|
||||
collection = InstanceCollection()
|
||||
collection.instances = [Instance.convert_with_links(inst, fields)
|
||||
for inst in instances_data]
|
||||
def convert_with_links(servers_data, fields=None):
|
||||
collection = ServerCollection()
|
||||
collection.servers = [Server.convert_with_links(server, fields)
|
||||
for server in servers_data]
|
||||
return collection
|
||||
|
||||
|
||||
class InstanceConsole(base.APIBase):
|
||||
"""API representation of the console of an instance."""
|
||||
class ServerConsole(base.APIBase):
|
||||
"""API representation of the console of a server."""
|
||||
|
||||
console = {wtypes.text: types.jsontype}
|
||||
"""The console information of the instance"""
|
||||
"""The console information of the server"""
|
||||
|
||||
|
||||
class InstanceSerialConsoleController(InstanceControllerBase):
|
||||
"""REST controller for Instance."""
|
||||
class ServerSerialConsoleController(ServerControllerBase):
|
||||
"""REST controller for Server."""
|
||||
|
||||
@policy.authorize_wsgi("mogan:instance", "get_serial_console")
|
||||
@expose.expose(InstanceConsole, types.uuid)
|
||||
def get(self, instance_uuid):
|
||||
"""Get the serial console info of the instance.
|
||||
@policy.authorize_wsgi("mogan:server", "get_serial_console")
|
||||
@expose.expose(ServerConsole, types.uuid)
|
||||
def get(self, server_uuid):
|
||||
"""Get the serial console info of the server.
|
||||
|
||||
:param instance_uuid: the UUID of a instance.
|
||||
:param server_uuid: the UUID of a server.
|
||||
"""
|
||||
instance_obj = self._resource or self._get_resource(instance_uuid)
|
||||
server_obj = self._resource or self._get_resource(server_uuid)
|
||||
console = pecan.request.engine_api.get_serial_console(
|
||||
pecan.request.context, instance_obj)
|
||||
return InstanceConsole(console=console)
|
||||
pecan.request.context, server_obj)
|
||||
return ServerConsole(console=console)
|
||||
|
||||
|
||||
class InstanceController(InstanceControllerBase):
|
||||
"""REST controller for Instance."""
|
||||
class ServerController(ServerControllerBase):
|
||||
"""REST controller for Server."""
|
||||
|
||||
states = InstanceStatesController()
|
||||
"""Expose the state controller action as a sub-element of instances"""
|
||||
states = ServerStatesController()
|
||||
"""Expose the state controller action as a sub-element of servers"""
|
||||
|
||||
networks = InstanceNetworksController()
|
||||
"""Expose the network controller action as a sub-element of instances"""
|
||||
networks = ServerNetworksController()
|
||||
"""Expose the network controller action as a sub-element of servers"""
|
||||
|
||||
serial_console = InstanceSerialConsoleController()
|
||||
"""Expose the console controller of instances"""
|
||||
serial_console = ServerSerialConsoleController()
|
||||
"""Expose the console controller of servers"""
|
||||
|
||||
_custom_actions = {
|
||||
'detail': ['GET']
|
||||
}
|
||||
|
||||
def _get_instance_collection(self, fields=None, all_tenants=False):
|
||||
def _get_server_collection(self, fields=None, all_tenants=False):
|
||||
context = pecan.request.context
|
||||
project_only = True
|
||||
if context.is_admin and all_tenants:
|
||||
project_only = False
|
||||
|
||||
instances = objects.Instance.list(pecan.request.context,
|
||||
project_only=project_only)
|
||||
instances_data = [instance.as_dict() for instance in instances]
|
||||
servers = objects.Server.list(pecan.request.context,
|
||||
project_only=project_only)
|
||||
servers_data = [server.as_dict() for server in servers]
|
||||
|
||||
return InstanceCollection.convert_with_links(instances_data,
|
||||
fields=fields)
|
||||
return ServerCollection.convert_with_links(servers_data,
|
||||
fields=fields)
|
||||
|
||||
@expose.expose(InstanceCollection, types.listtype, types.boolean)
|
||||
@expose.expose(ServerCollection, types.listtype, types.boolean)
|
||||
def get_all(self, fields=None, all_tenants=None):
|
||||
"""Retrieve a list of instance.
|
||||
"""Retrieve a list of server.
|
||||
|
||||
:param fields: Optional, a list with a specified set of fields
|
||||
of the resource to be returned.
|
||||
@ -534,57 +534,57 @@ class InstanceController(InstanceControllerBase):
|
||||
included in the response.
|
||||
"""
|
||||
if fields is None:
|
||||
fields = _DEFAULT_INSTANCE_RETURN_FIELDS
|
||||
return self._get_instance_collection(fields=fields,
|
||||
all_tenants=all_tenants)
|
||||
fields = _DEFAULT_SERVER_RETURN_FIELDS
|
||||
return self._get_server_collection(fields=fields,
|
||||
all_tenants=all_tenants)
|
||||
|
||||
@policy.authorize_wsgi("mogan:instance", "get")
|
||||
@expose.expose(Instance, types.uuid, types.listtype)
|
||||
def get_one(self, instance_uuid, fields=None):
|
||||
"""Retrieve information about the given instance.
|
||||
@policy.authorize_wsgi("mogan:server", "get")
|
||||
@expose.expose(Server, types.uuid, types.listtype)
|
||||
def get_one(self, server_uuid, fields=None):
|
||||
"""Retrieve information about the given server.
|
||||
|
||||
:param instance_uuid: UUID of a instance.
|
||||
:param server_uuid: UUID of a server.
|
||||
:param fields: Optional, a list with a specified set of fields
|
||||
of the resource to be returned.
|
||||
"""
|
||||
rpc_instance = self._resource or self._get_resource(instance_uuid)
|
||||
instance_data = rpc_instance.as_dict()
|
||||
rpc_server = self._resource or self._get_resource(server_uuid)
|
||||
server_data = rpc_server.as_dict()
|
||||
|
||||
return Instance.convert_with_links(instance_data, fields=fields)
|
||||
return Server.convert_with_links(server_data, fields=fields)
|
||||
|
||||
@expose.expose(InstanceCollection, types.boolean)
|
||||
@expose.expose(ServerCollection, types.boolean)
|
||||
def detail(self, all_tenants=None):
|
||||
"""Retrieve detail of a list of instances."""
|
||||
"""Retrieve detail of a list of servers."""
|
||||
# /detail should only work against collections
|
||||
parent = pecan.request.path.split('/')[:-1][-1]
|
||||
if parent != "instances":
|
||||
if parent != "servers":
|
||||
raise exception.NotFound()
|
||||
return self._get_instance_collection(all_tenants=all_tenants)
|
||||
return self._get_server_collection(all_tenants=all_tenants)
|
||||
|
||||
@policy.authorize_wsgi("mogan:instance", "create", False)
|
||||
@expose.expose(Instance, body=types.jsontype,
|
||||
@policy.authorize_wsgi("mogan:server", "create", False)
|
||||
@expose.expose(Server, body=types.jsontype,
|
||||
status_code=http_client.CREATED)
|
||||
def post(self, instance):
|
||||
"""Create a new instance.
|
||||
def post(self, server):
|
||||
"""Create a new server.
|
||||
|
||||
:param instance: a instance within the request body.
|
||||
:param server: a server within the request body.
|
||||
"""
|
||||
validation.check_schema(instance, inst_schemas.create_instance)
|
||||
validation.check_schema(server, server_schemas.create_server)
|
||||
|
||||
min_count = instance.get('min_count', 1)
|
||||
max_count = instance.get('max_count', min_count)
|
||||
min_count = server.get('min_count', 1)
|
||||
max_count = server.get('max_count', min_count)
|
||||
|
||||
if min_count > max_count:
|
||||
msg = _('min_count must be <= max_count')
|
||||
raise wsme.exc.ClientSideError(
|
||||
msg, status_code=http_client.BAD_REQUEST)
|
||||
|
||||
requested_networks = instance.pop('networks', None)
|
||||
instance_type_uuid = instance.get('instance_type_uuid')
|
||||
image_uuid = instance.get('image_uuid')
|
||||
user_data = instance.get('user_data')
|
||||
key_name = instance.get('key_name')
|
||||
personality = instance.pop('personality', None)
|
||||
requested_networks = server.pop('networks', None)
|
||||
flavor_uuid = server.get('flavor_uuid')
|
||||
image_uuid = server.get('image_uuid')
|
||||
user_data = server.get('user_data')
|
||||
key_name = server.get('key_name')
|
||||
personality = server.pop('personality', None)
|
||||
|
||||
injected_files = []
|
||||
if personality:
|
||||
@ -592,17 +592,16 @@ class InstanceController(InstanceControllerBase):
|
||||
injected_files.append((item['path'], item['contents']))
|
||||
|
||||
try:
|
||||
instance_type = objects.InstanceType.get(pecan.request.context,
|
||||
instance_type_uuid)
|
||||
flavor = objects.Flavor.get(pecan.request.context, flavor_uuid)
|
||||
|
||||
instances = pecan.request.engine_api.create(
|
||||
servers = pecan.request.engine_api.create(
|
||||
pecan.request.context,
|
||||
instance_type,
|
||||
flavor,
|
||||
image_uuid=image_uuid,
|
||||
name=instance.get('name'),
|
||||
description=instance.get('description'),
|
||||
availability_zone=instance.get('availability_zone'),
|
||||
extra=instance.get('extra'),
|
||||
name=server.get('name'),
|
||||
description=server.get('description'),
|
||||
availability_zone=server.get('availability_zone'),
|
||||
extra=server.get('extra'),
|
||||
requested_networks=requested_networks,
|
||||
user_data=user_data,
|
||||
injected_files=injected_files,
|
||||
@ -611,7 +610,7 @@ class InstanceController(InstanceControllerBase):
|
||||
max_count=max_count)
|
||||
except exception.FlavorNotFound:
|
||||
msg = (_("Flavor %s could not be found") %
|
||||
instance_type_uuid)
|
||||
flavor_uuid)
|
||||
raise wsme.exc.ClientSideError(
|
||||
msg, status_code=http_client.BAD_REQUEST)
|
||||
except exception.ImageNotFound:
|
||||
@ -630,57 +629,57 @@ class InstanceController(InstanceControllerBase):
|
||||
raise wsme.exc.ClientSideError(
|
||||
msg, status_code=http_client.BAD_REQUEST)
|
||||
except (exception.GlanceConnectionFailed,
|
||||
exception.InstanceUserDataMalformed,
|
||||
exception.InstanceUserDataTooLarge,
|
||||
exception.ServerUserDataMalformed,
|
||||
exception.ServerUserDataTooLarge,
|
||||
exception.Base64Exception,
|
||||
exception.NetworkRequiresSubnet,
|
||||
exception.NetworkNotFound) as e:
|
||||
raise wsme.exc.ClientSideError(
|
||||
e.message, status_code=http_client.BAD_REQUEST)
|
||||
|
||||
# Set the HTTP Location Header for the first instance.
|
||||
pecan.response.location = link.build_url('instance', instances[0].uuid)
|
||||
return Instance.convert_with_links(instances[0])
|
||||
# Set the HTTP Location Header for the first server.
|
||||
pecan.response.location = link.build_url('server', servers[0].uuid)
|
||||
return Server.convert_with_links(servers[0])
|
||||
|
||||
@policy.authorize_wsgi("mogan:instance", "update")
|
||||
@wsme.validate(types.uuid, [InstancePatchType])
|
||||
@expose.expose(Instance, types.uuid, body=[InstancePatchType])
|
||||
def patch(self, instance_uuid, patch):
|
||||
"""Update an instance.
|
||||
@policy.authorize_wsgi("mogan:server", "update")
|
||||
@wsme.validate(types.uuid, [ServerPatchType])
|
||||
@expose.expose(Server, types.uuid, body=[ServerPatchType])
|
||||
def patch(self, server_uuid, patch):
|
||||
"""Update a server.
|
||||
|
||||
:param instance_uuid: UUID of an instance.
|
||||
:param patch: a json PATCH document to apply to this instance.
|
||||
:param server_uuid: UUID of a server.
|
||||
:param patch: a json PATCH document to apply to this server.
|
||||
"""
|
||||
rpc_instance = self._resource or self._get_resource(instance_uuid)
|
||||
rpc_server = self._resource or self._get_resource(server_uuid)
|
||||
try:
|
||||
instance = Instance(
|
||||
**api_utils.apply_jsonpatch(rpc_instance.as_dict(), patch))
|
||||
server = Server(
|
||||
**api_utils.apply_jsonpatch(rpc_server.as_dict(), patch))
|
||||
|
||||
except api_utils.JSONPATCH_EXCEPTIONS as e:
|
||||
raise exception.PatchError(patch=patch, reason=e)
|
||||
|
||||
# Update only the fields that have changed
|
||||
for field in objects.Instance.fields:
|
||||
for field in objects.Server.fields:
|
||||
try:
|
||||
patch_val = getattr(instance, field)
|
||||
patch_val = getattr(server, field)
|
||||
except AttributeError:
|
||||
# Ignore fields that aren't exposed in the API
|
||||
continue
|
||||
if patch_val == wtypes.Unset:
|
||||
patch_val = None
|
||||
if rpc_instance[field] != patch_val:
|
||||
rpc_instance[field] = patch_val
|
||||
if rpc_server[field] != patch_val:
|
||||
rpc_server[field] = patch_val
|
||||
|
||||
rpc_instance.save()
|
||||
rpc_server.save()
|
||||
|
||||
return Instance.convert_with_links(rpc_instance)
|
||||
return Server.convert_with_links(rpc_server)
|
||||
|
||||
@policy.authorize_wsgi("mogan:instance", "delete")
|
||||
@policy.authorize_wsgi("mogan:server", "delete")
|
||||
@expose.expose(None, types.uuid, status_code=http_client.NO_CONTENT)
|
||||
def delete(self, instance_uuid):
|
||||
"""Delete a instance.
|
||||
def delete(self, server_uuid):
|
||||
"""Delete a server.
|
||||
|
||||
:param instance_uuid: UUID of a instance.
|
||||
:param server_uuid: UUID of a server.
|
||||
"""
|
||||
rpc_instance = self._resource or self._get_resource(instance_uuid)
|
||||
pecan.request.engine_api.delete(pecan.request.context, rpc_instance)
|
||||
rpc_server = self._resource or self._get_resource(server_uuid)
|
||||
pecan.request.engine_api.delete(pecan.request.context, rpc_server)
|
@ -35,7 +35,7 @@ class DBHook(hooks.PecanHook):
|
||||
"""Attach the dbapi object to the request so controllers can get to it."""
|
||||
|
||||
def before(self, state):
|
||||
state.request.dbapi = dbapi.get_instance()
|
||||
state.request.dbapi = dbapi.get_server()
|
||||
|
||||
|
||||
class EngineAPIHook(hooks.PecanHook):
|
||||
|
@ -53,7 +53,7 @@ port_type = {
|
||||
}
|
||||
|
||||
|
||||
instance_type_id = {
|
||||
flavor_id = {
|
||||
'type': 'string', 'format': 'uuid'
|
||||
}
|
||||
|
||||
|
@ -156,12 +156,12 @@ class FlavorNotFound(NotFound):
|
||||
_msg_fmt = _("Flavor %(type_id)s could not be found.")
|
||||
|
||||
|
||||
class InstanceAlreadyExists(MoganException):
|
||||
_msg_fmt = _("Instance with name %(name)s already exists.")
|
||||
class ServerAlreadyExists(MoganException):
|
||||
_msg_fmt = _("Server with name %(name)s already exists.")
|
||||
|
||||
|
||||
class InstanceNotFound(NotFound):
|
||||
_msg_fmt = _("Instance %(instance)s could not be found.")
|
||||
class ServerNotFound(NotFound):
|
||||
_msg_fmt = _("Server %(server)s could not be found.")
|
||||
|
||||
|
||||
class FlavorAccessExists(MoganException):
|
||||
@ -199,21 +199,21 @@ class ComputeDiskNotFound(NotFound):
|
||||
|
||||
|
||||
class NodeNotFound(NotFound):
|
||||
_msg_fmt = _("Node associated with instance %(instance)s "
|
||||
_msg_fmt = _("Node associated with server %(server)s "
|
||||
"could not be found.")
|
||||
|
||||
|
||||
class InvalidActionParameterValue(Invalid):
|
||||
_msg_fmt = _("The Parameter value: %(value)s for %(action) action of "
|
||||
"instance %(instance)s is invalid.")
|
||||
"server %(server)s is invalid.")
|
||||
|
||||
|
||||
class InstanceDeployFailure(Invalid):
|
||||
_msg_fmt = _("Failed to deploy instance: %(reason)s")
|
||||
class ServerDeployFailure(Invalid):
|
||||
_msg_fmt = _("Failed to deploy server: %(reason)s")
|
||||
|
||||
|
||||
class InstanceDeployAborted(Invalid):
|
||||
_msg_fmt = _("Instance deployment is aborted: %(reason)s")
|
||||
class ServerDeployAborted(Invalid):
|
||||
_msg_fmt = _("Server deployment is aborted: %(reason)s")
|
||||
|
||||
|
||||
class NoFreeEngineWorker(TemporaryFailure):
|
||||
@ -223,7 +223,7 @@ class NoFreeEngineWorker(TemporaryFailure):
|
||||
|
||||
|
||||
class DuplicateName(Conflict):
|
||||
_msg_fmt = _("A instance with name %(name)s already exists.")
|
||||
_msg_fmt = _("A server with name %(name)s already exists.")
|
||||
|
||||
|
||||
class KeystoneUnauthorized(MoganException):
|
||||
@ -325,15 +325,15 @@ class NetworkNotFound(NotFound):
|
||||
|
||||
class NetworkRequiresSubnet(Invalid):
|
||||
_msg_fmt = _("Network %(network_uuid)s requires a subnet in order to boot"
|
||||
" instances on.")
|
||||
" servers on.")
|
||||
|
||||
|
||||
class InstanceIsLocked(Invalid):
|
||||
_msg_fmt = _("Instance %(instance_uuid)s is locked")
|
||||
class ServerIsLocked(Invalid):
|
||||
_msg_fmt = _("Server %(server_uuid)s is locked")
|
||||
|
||||
|
||||
class InstanceInMaintenance(Invalid):
|
||||
_msg_fmt = _("Instance %(instance_uuid)s is in maintenance mode")
|
||||
class ServerInMaintenance(Invalid):
|
||||
_msg_fmt = _("Server %(server_uuid)s is in maintenance mode")
|
||||
|
||||
|
||||
class InvalidReservationExpiration(Invalid):
|
||||
@ -396,13 +396,13 @@ class ConfigDriveUnknownFormat(MoganException):
|
||||
"iso9660 or vfat.")
|
||||
|
||||
|
||||
class InstanceUserDataTooLarge(MoganException):
|
||||
class ServerUserDataTooLarge(MoganException):
|
||||
_msg_fmt = _("User data too large. User data must be no larger than "
|
||||
"%(maxsize)s bytes once base64 encoded. Your data is "
|
||||
"%(length)d bytes")
|
||||
|
||||
|
||||
class InstanceUserDataMalformed(MoganException):
|
||||
class ServerUserDataMalformed(MoganException):
|
||||
_msg_fmt = _("User data needs to be valid base 64.")
|
||||
|
||||
|
||||
|
@ -110,7 +110,7 @@ class IronicClientWrapper(object):
|
||||
:param retry_on_conflict: Boolean value. Whether the request should be
|
||||
retried in case of a conflict error
|
||||
(HTTP 409) or not. If retry_on_conflict is
|
||||
False the cached instance of the client
|
||||
False the cached server of the client
|
||||
won't be used. Defaults to True.
|
||||
"""
|
||||
retry_on_conflict = kwargs.pop('retry_on_conflict', True)
|
||||
|
@ -42,10 +42,10 @@ default_policies = [
|
||||
policy.RuleDefault('public_api',
|
||||
'is_public_api:True',
|
||||
description='Internal flag for public API routes'),
|
||||
# Generic default to hide instance secrets
|
||||
policy.RuleDefault('show_instance_secrets',
|
||||
# Generic default to hide server secrets
|
||||
policy.RuleDefault('show_server_secrets',
|
||||
'!',
|
||||
description='Show or mask secrets within instance information in API responses'), # noqa
|
||||
description='Show or mask secrets within server information in API responses'), # noqa
|
||||
# The policy check "@" will always accept an access. The empty list
|
||||
# (``[]``) or the empty string (``""``) is equivalent to the "@"
|
||||
policy.RuleDefault('allow',
|
||||
@ -74,43 +74,43 @@ default_policies = [
|
||||
# All of these may be overridden by configuration, but we can
|
||||
# depend on their existence throughout the code.
|
||||
|
||||
instance_policies = [
|
||||
policy.RuleDefault('mogan:instance:get',
|
||||
server_policies = [
|
||||
policy.RuleDefault('mogan:server:get',
|
||||
'rule:default',
|
||||
description='Retrieve Instance records'),
|
||||
policy.RuleDefault('mogan:instance:get_states',
|
||||
description='Retrieve Server records'),
|
||||
policy.RuleDefault('mogan:server:get_states',
|
||||
'rule:default',
|
||||
description='View Instance power and provision state'),
|
||||
policy.RuleDefault('mogan:instance:create',
|
||||
description='View Server power and provision state'),
|
||||
policy.RuleDefault('mogan:server:create',
|
||||
'rule:allow',
|
||||
description='Create Instance records'),
|
||||
policy.RuleDefault('mogan:instance:delete',
|
||||
description='Create Server records'),
|
||||
policy.RuleDefault('mogan:server:delete',
|
||||
'rule:default',
|
||||
description='Delete Instance records'),
|
||||
policy.RuleDefault('mogan:instance:update',
|
||||
description='Delete Server records'),
|
||||
policy.RuleDefault('mogan:server:update',
|
||||
'rule:default',
|
||||
description='Update Instance records'),
|
||||
policy.RuleDefault('mogan:instance:set_power_state',
|
||||
description='Update Server records'),
|
||||
policy.RuleDefault('mogan:server:set_power_state',
|
||||
'rule:default',
|
||||
description='Perform the power action on an instance'),
|
||||
policy.RuleDefault('mogan:instance:get_networks',
|
||||
description='Perform the power action on a server'),
|
||||
policy.RuleDefault('mogan:server:get_networks',
|
||||
'rule:default',
|
||||
description='Get Instance network information'),
|
||||
policy.RuleDefault('mogan:instance:associate_floatingip',
|
||||
description='Get Server network information'),
|
||||
policy.RuleDefault('mogan:server:associate_floatingip',
|
||||
'rule:default',
|
||||
description='Associate a floating ip with an instance'),
|
||||
policy.RuleDefault('mogan:instance:disassociate_floatingip',
|
||||
description='Associate a floating ip with a server'),
|
||||
policy.RuleDefault('mogan:server:disassociate_floatingip',
|
||||
'rule:default',
|
||||
description='Disassociate a floating ip'),
|
||||
policy.RuleDefault('mogan:instance:set_lock_state',
|
||||
policy.RuleDefault('mogan:server:set_lock_state',
|
||||
'rule:default',
|
||||
description='Lock/UnLock an instance'),
|
||||
policy.RuleDefault('mogan:instance:set_provision_state',
|
||||
description='Lock/UnLock a server'),
|
||||
policy.RuleDefault('mogan:server:set_provision_state',
|
||||
'rule:default',
|
||||
description='Set the provision state of an instance'),
|
||||
policy.RuleDefault('mogan:instance:get_serial_console',
|
||||
description='Set the provision state of a server'),
|
||||
policy.RuleDefault('mogan:server:get_serial_console',
|
||||
'rule:default',
|
||||
description='Get serial console for an instance'),
|
||||
description='Get serial console for a server'),
|
||||
policy.RuleDefault('mogan:availability_zone:get_all',
|
||||
'rule:default',
|
||||
description='Get the availability zone list'),
|
||||
@ -119,7 +119,7 @@ instance_policies = [
|
||||
|
||||
def list_policies():
|
||||
policies = (default_policies
|
||||
+ instance_policies)
|
||||
+ server_policies)
|
||||
return policies
|
||||
|
||||
|
||||
@ -155,7 +155,7 @@ def init_enforcer(policy_file=None, rules=None,
|
||||
|
||||
|
||||
def get_enforcer():
|
||||
"""Provides access to the single instance of Policy enforcer."""
|
||||
"""Provides access to the single server of Policy enforcer."""
|
||||
|
||||
if not _ENFORCER:
|
||||
init_enforcer()
|
||||
@ -196,9 +196,9 @@ def authorize_wsgi(api_name, act=None, need_target=True):
|
||||
when create some resource , maybe target is not needed.
|
||||
example:
|
||||
from mogan.common import policy
|
||||
class InstancesController(rest.RestController):
|
||||
class ServersController(rest.RestController):
|
||||
....
|
||||
@policy.authorize_wsgi("mogan:instance", "delete")
|
||||
@policy.authorize_wsgi("mogan:server", "delete")
|
||||
@wsme_pecan.wsexpose(None, types.uuid_or_name, status_code=204)
|
||||
def delete(self, bay_ident):
|
||||
...
|
||||
|
@ -14,11 +14,11 @@
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Mapping of bare metal instance states.
|
||||
Mapping of bare metal server states.
|
||||
|
||||
Setting the instance `power_state` is handled by the engine's power
|
||||
Setting the server `power_state` is handled by the engine's power
|
||||
synchronization thread. Based on the power state retrieved from the
|
||||
hypervisor for the instance.
|
||||
hypervisor for the server.
|
||||
"""
|
||||
|
||||
from oslo_log import log as logging
|
||||
@ -32,10 +32,10 @@ LOG = logging.getLogger(__name__)
|
||||
##############
|
||||
|
||||
POWER_ON = 'power on'
|
||||
""" Instance is powered on. """
|
||||
""" Server is powered on. """
|
||||
|
||||
POWER_OFF = 'power off'
|
||||
""" Instance is powered off. """
|
||||
""" Server is powered off. """
|
||||
|
||||
NOSTATE = None
|
||||
""" No state information """
|
||||
@ -61,7 +61,7 @@ provision_state via the REST API.
|
||||
|
||||
|
||||
#################
|
||||
# Instance states
|
||||
# Server states
|
||||
#################
|
||||
|
||||
""" Mapping of state-changing events that are PUT to the REST API
|
||||
|
@ -99,7 +99,7 @@ def validate_and_normalize_mac(address):
|
||||
def make_pretty_name(method):
|
||||
"""Makes a pretty name for a function/method."""
|
||||
meth_pieces = [method.__name__]
|
||||
# If its an instance method attempt to tack on the class name
|
||||
# If its a server method attempt to tack on the class name
|
||||
if hasattr(method, '__self__') and method.__self__ is not None:
|
||||
try:
|
||||
meth_pieces.insert(0, method.__self__.__class__.__name__)
|
||||
@ -122,10 +122,10 @@ def get_state_machine(start_state=None, target_state=None):
|
||||
return fsm
|
||||
|
||||
|
||||
def process_event(fsm, instance, event=None):
|
||||
def process_event(fsm, server, event=None):
|
||||
fsm.process_event(event)
|
||||
instance.status = fsm.current_state
|
||||
instance.save()
|
||||
server.status = fsm.current_state
|
||||
server.save()
|
||||
|
||||
|
||||
def get_wrapped_function(function):
|
||||
@ -196,8 +196,8 @@ def safe_truncate(value, length):
|
||||
return u_value
|
||||
|
||||
|
||||
def add_instance_fault_from_exc(context, instance, fault, exc_info=None,
|
||||
fault_message=None):
|
||||
def add_server_fault_from_exc(context, server, fault, exc_info=None,
|
||||
fault_message=None):
|
||||
"""Adds the specified fault to the database."""
|
||||
code = 500
|
||||
if hasattr(fault, "kwargs"):
|
||||
@ -216,8 +216,8 @@ def add_instance_fault_from_exc(context, instance, fault, exc_info=None,
|
||||
fault_dict = dict(exception=fault)
|
||||
fault_dict["message"] = message
|
||||
fault_dict["code"] = code
|
||||
fault_obj = objects.InstanceFault(context=context)
|
||||
fault_obj.instance_uuid = instance.uuid
|
||||
fault_obj = objects.ServerFault(context=context)
|
||||
fault_obj.server_uuid = server.uuid
|
||||
fault_obj.update(fault_dict)
|
||||
code = fault_obj.code
|
||||
fault_obj.detail = _get_fault_detail(exc_info, code)
|
||||
|
@ -48,13 +48,13 @@ opts = [
|
||||
"the service, this option should be False; note, you "
|
||||
"will want to change public API endpoint to represent "
|
||||
"SSL termination URL with 'public_endpoint' option.")),
|
||||
cfg.StrOpt('multi_instance_name_template',
|
||||
cfg.StrOpt('multi_server_name_template',
|
||||
default='%(name)s-%(count)d',
|
||||
help='When creating multiple instances with a single request '
|
||||
'this template will be used to build the instance name '
|
||||
'for each instance. The benefit is that the instances '
|
||||
help='When creating multiple servers with a single request '
|
||||
'this template will be used to build the server name '
|
||||
'for each server. The benefit is that the servers '
|
||||
'end up with different names. To restore legacy '
|
||||
'behavior of every instance having the same name, set '
|
||||
'behavior of every server having the same name, set '
|
||||
'this option to "%(name)s". Valid keys for the '
|
||||
'template are: name, uuid, count.'),
|
||||
]
|
||||
|
@ -22,7 +22,7 @@ opts = [
|
||||
default='iso9660',
|
||||
choices=('iso9660', 'vfat'),
|
||||
help=_('Configuration drive format that will contain '
|
||||
'metadata attached to the instance when it boots.')),
|
||||
'metadata attached to the server when it boots.')),
|
||||
cfg.StrOpt('mkisofs_cmd',
|
||||
default='genisoimage',
|
||||
help=_('Name or path of the tool used for ISO image '
|
||||
|
@ -33,7 +33,7 @@ opts = [
|
||||
cfg.ListOpt('scheduler_default_filters',
|
||||
default=[
|
||||
'AvailabilityZoneFilter',
|
||||
'InstanceTypeFilter',
|
||||
'FlavorFilter',
|
||||
'CapabilitiesFilter',
|
||||
'PortsFilter'
|
||||
],
|
||||
|
@ -29,7 +29,7 @@ The IP address which is used by the ``mogan-shellinaboxproxy`` service to
|
||||
listen for incoming requests.
|
||||
|
||||
The ``mogan-shellinaboxproxy`` service listens on this IP address for incoming
|
||||
connection requests to instances which expose shellinabox serial console.
|
||||
connection requests to servers which expose shellinabox serial console.
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -55,7 +55,7 @@ The port number which is used by the ``mogan-shellinaboxproxy`` service to
|
||||
listen for incoming requests.
|
||||
|
||||
The ``mogan-shellinaboxproxy`` service listens on this port number for incoming
|
||||
connection requests to instances which expose shellinabox serial console.
|
||||
connection requests to servers which expose shellinabox serial console.
|
||||
|
||||
Possible values:
|
||||
|
||||
@ -78,7 +78,7 @@ The URL an end user would use to connect to the ``mogan-shellinaboxproxy``
|
||||
service.
|
||||
|
||||
The ``mogan-shellinaboxproxy`` service is called with this token enriched URL
|
||||
and establishes the connection to the proper instance.
|
||||
and establishes the connection to the proper server.
|
||||
|
||||
Possible values:
|
||||
|
||||
|
@ -42,7 +42,7 @@ class ConsoleAuthManager(object):
|
||||
self.topic = topic
|
||||
self._started = False
|
||||
self._cache = None
|
||||
self._cache_instance = None
|
||||
self._cache_server = None
|
||||
self.engine_rpcapi = rpcapi.EngineAPI()
|
||||
|
||||
def init_host(self):
|
||||
@ -74,25 +74,25 @@ class ConsoleAuthManager(object):
|
||||
return self._cache
|
||||
|
||||
@property
|
||||
def cache_instance(self):
|
||||
"""Init a permanent cache region for instance token storage."""
|
||||
if self._cache_instance is None:
|
||||
def cache_server(self):
|
||||
"""Init a permanent cache region for server token storage."""
|
||||
if self._cache_server is None:
|
||||
cache_ttl = CONF.cache.expiration_time
|
||||
try:
|
||||
CONF.set_override('expiration_time', None, 'cache')
|
||||
cache_region = oslo_cache.create_region()
|
||||
self._cache_instance = oslo_cache.configure_cache_region(
|
||||
self._cache_server = oslo_cache.configure_cache_region(
|
||||
CONF, cache_region)
|
||||
finally:
|
||||
CONF.set_override('expiration_time', cache_ttl, 'cache')
|
||||
return self._cache_instance
|
||||
return self._cache_server
|
||||
|
||||
def reset(self):
|
||||
LOG.info('Reloading Mogan engine RPC API')
|
||||
self.engine_rpcapi = rpcapi.EngineAPI()
|
||||
|
||||
def _get_tokens_for_instance(self, instance_uuid):
|
||||
tokens_str = self.cache_instance.get(instance_uuid.encode('UTF-8'))
|
||||
def _get_tokens_for_server(self, server_uuid):
|
||||
tokens_str = self.cache_server.get(server_uuid.encode('UTF-8'))
|
||||
if not tokens_str:
|
||||
tokens = []
|
||||
else:
|
||||
@ -100,11 +100,11 @@ class ConsoleAuthManager(object):
|
||||
return tokens
|
||||
|
||||
def authorize_console(self, context, token, console_type, host, port,
|
||||
internal_access_path, instance_uuid,
|
||||
internal_access_path, server_uuid,
|
||||
access_url=None):
|
||||
|
||||
token_dict = {'token': token,
|
||||
'instance_uuid': instance_uuid,
|
||||
'server_uuid': server_uuid,
|
||||
'console_type': console_type,
|
||||
'host': host,
|
||||
'port': port,
|
||||
@ -114,7 +114,7 @@ class ConsoleAuthManager(object):
|
||||
data = jsonutils.dumps(token_dict)
|
||||
|
||||
self.cache.set(token.encode('UTF-8'), data)
|
||||
tokens = self._get_tokens_for_instance(instance_uuid)
|
||||
tokens = self._get_tokens_for_server(server_uuid)
|
||||
|
||||
# Remove the expired tokens from cache.
|
||||
token_values = self.cache.get_multi(
|
||||
@ -123,20 +123,20 @@ class ConsoleAuthManager(object):
|
||||
if value]
|
||||
tokens.append(token)
|
||||
|
||||
self.cache_instance.set(instance_uuid.encode('UTF-8'),
|
||||
jsonutils.dumps(tokens))
|
||||
self.cache_server.set(server_uuid.encode('UTF-8'),
|
||||
jsonutils.dumps(tokens))
|
||||
|
||||
LOG.info("Received Token: %(token)s, %(token_dict)s",
|
||||
{'token': token, 'token_dict': token_dict})
|
||||
|
||||
def _validate_token(self, context, token):
|
||||
instance_uuid = token['instance_uuid']
|
||||
if instance_uuid is None:
|
||||
server_uuid = token['server_uuid']
|
||||
if server_uuid is None:
|
||||
return False
|
||||
return True
|
||||
# TODO(need to validate the console port)
|
||||
# return self.compute_rpcapi.validate_console_port(
|
||||
# context, instance, token['port'], token['console_type'])
|
||||
# context, server, token['port'], token['console_type'])
|
||||
|
||||
def check_token(self, context, token):
|
||||
token_str = self.cache.get(token.encode('UTF-8'))
|
||||
@ -148,8 +148,8 @@ class ConsoleAuthManager(object):
|
||||
if self._validate_token(context, token):
|
||||
return token
|
||||
|
||||
def delete_tokens_for_instance(self, context, instance_uuid):
|
||||
tokens = self._get_tokens_for_instance(instance_uuid)
|
||||
def delete_tokens_for_server(self, context, server_uuid):
|
||||
tokens = self._get_tokens_for_server(server_uuid)
|
||||
self.cache.delete_multi(
|
||||
[tok.encode('UTF-8') for tok in tokens])
|
||||
self.cache_instance.delete(instance_uuid.encode('UTF-8'))
|
||||
self.cache_server.delete(server_uuid.encode('UTF-8'))
|
||||
|
@ -51,14 +51,14 @@ class ConsoleAuthAPI(object):
|
||||
serializer=serializer)
|
||||
|
||||
def authorize_console(self, ctxt, token, console_type, host, port,
|
||||
internal_access_path, instance_uuid,
|
||||
internal_access_path, server_uuid,
|
||||
access_url):
|
||||
# The remote side doesn't return anything, but we want to block
|
||||
# until it completes.'
|
||||
msg_args = dict(token=token, console_type=console_type,
|
||||
host=host, port=port,
|
||||
internal_access_path=internal_access_path,
|
||||
instance_uuid=instance_uuid,
|
||||
server_uuid=server_uuid,
|
||||
access_url=access_url)
|
||||
|
||||
cctxt = self.client.prepare()
|
||||
@ -68,8 +68,8 @@ class ConsoleAuthAPI(object):
|
||||
cctxt = self.client.prepare()
|
||||
return cctxt.call(ctxt, 'check_token', token=token)
|
||||
|
||||
def delete_tokens_for_instance(self, ctxt, instance_uuid):
|
||||
def delete_tokens_for_server(self, ctxt, server_uuid):
|
||||
cctxt = self.client.prepare()
|
||||
return cctxt.cast(ctxt,
|
||||
'delete_tokens_for_instance',
|
||||
instance_uuid=instance_uuid)
|
||||
'delete_tokens_for_server',
|
||||
server_uuid=server_uuid)
|
||||
|
@ -28,8 +28,8 @@ IMPL = db_api.DBAPI.from_config(cfg.CONF, backend_mapping=_BACKEND_MAPPING,
|
||||
lazy=True)
|
||||
|
||||
|
||||
def get_instance():
|
||||
"""Return a DB API instance."""
|
||||
def get_server():
|
||||
"""Return a DB API server."""
|
||||
return IMPL
|
||||
|
||||
|
||||
@ -41,46 +41,46 @@ class Connection(object):
|
||||
def __init__(self):
|
||||
"""Constructor."""
|
||||
|
||||
# Instance Types
|
||||
# Flavors
|
||||
@abc.abstractmethod
|
||||
def instance_type_create(self, context, values):
|
||||
"""Create a new instance type."""
|
||||
def flavor_create(self, context, values):
|
||||
"""Create a new server type."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def instance_type_get(self, context, instance_type_uuid):
|
||||
"""Get instance type by uuid."""
|
||||
def flavor_get(self, context, flavor_uuid):
|
||||
"""Get server type by uuid."""
|
||||
|
||||
def instance_type_update(self, context, instance_type_id, values):
|
||||
"""Update an instance type."""
|
||||
def flavor_update(self, context, flavor_id, values):
|
||||
"""Update a server type."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def instance_type_get_all(self, context):
|
||||
"""Get all instance types."""
|
||||
def flavor_get_all(self, context):
|
||||
"""Get all server types."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def instance_type_destroy(self, context, instance_type_uuid):
|
||||
"""Delete an instance type."""
|
||||
def flavor_destroy(self, context, flavor_uuid):
|
||||
"""Delete a server type."""
|
||||
|
||||
# Instances
|
||||
# Servers
|
||||
@abc.abstractmethod
|
||||
def instance_create(self, context, values):
|
||||
"""Create a new instance."""
|
||||
def server_create(self, context, values):
|
||||
"""Create a new server."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def instance_get(self, context, instance_id):
|
||||
"""Get instance by name."""
|
||||
def server_get(self, context, server_id):
|
||||
"""Get server by name."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def instance_get_all(self, context, project_only):
|
||||
"""Get all instances."""
|
||||
def server_get_all(self, context, project_only):
|
||||
"""Get all servers."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def instance_destroy(self, context, instance_id):
|
||||
"""Delete an instance."""
|
||||
def server_destroy(self, context, server_id):
|
||||
"""Delete a server."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def instance_update(self, context, instance_id, values):
|
||||
"""Update an instance."""
|
||||
def server_update(self, context, server_id, values):
|
||||
"""Update a server."""
|
||||
|
||||
# Compute nodes
|
||||
@abc.abstractmethod
|
||||
@ -157,23 +157,23 @@ class Connection(object):
|
||||
def compute_disk_update(self, context, disk_uuid, values):
|
||||
"""Update a compute disk."""
|
||||
|
||||
# Instance Type extra specs
|
||||
# Flavor extra specs
|
||||
@abc.abstractmethod
|
||||
def extra_specs_update_or_create(self, context,
|
||||
instance_type_uuid, extra_specs):
|
||||
"""Create or update instance type extra specs.
|
||||
flavor_uuid, extra_specs):
|
||||
"""Create or update server type extra specs.
|
||||
|
||||
This adds or modifies the key/value pairs specified in the
|
||||
extra specs dict argument
|
||||
"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def instance_type_extra_specs_get(self, context, type_id):
|
||||
"""Get instance type extra specs"""
|
||||
def flavor_extra_specs_get(self, context, type_id):
|
||||
"""Get server type extra specs"""
|
||||
|
||||
@abc.abstractmethod
|
||||
def type_extra_specs_delete(self, context, instance_type_uuid, key):
|
||||
"""Delete instance type extra specs.
|
||||
def type_extra_specs_delete(self, context, flavor_uuid, key):
|
||||
"""Delete server type extra specs.
|
||||
|
||||
This deletes the key/value pairs specified in the
|
||||
extra specs dict argument
|
||||
@ -193,25 +193,25 @@ class Connection(object):
|
||||
"""Remove flavor access for project."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def instance_nics_get_by_instance_uuid(self, context, instance_uuid):
|
||||
"""Get the Nics info of an instnace.
|
||||
def server_nics_get_by_server_uuid(self, context, server_uuid):
|
||||
"""Get the Nics info of a server.
|
||||
|
||||
This query the Nics info of the specified instance.
|
||||
This query the Nics info of the specified server.
|
||||
"""
|
||||
|
||||
def instance_nic_update_or_create(self, context, port_id, values):
|
||||
def server_nic_update_or_create(self, context, port_id, values):
|
||||
"""Update/Create a nic db entry.
|
||||
|
||||
This creates or updates a nic db entry.
|
||||
"""
|
||||
# Instances Faults
|
||||
# Servers Faults
|
||||
@abc.abstractmethod
|
||||
def instance_fault_create(self, context, values):
|
||||
"""Create a new Instance Fault."""
|
||||
def server_fault_create(self, context, values):
|
||||
"""Create a new Server Fault."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def instance_fault_get_by_instance_uuids(self, context, instance_uuids):
|
||||
"""Get all instance faults for the provided instance_uuids."""
|
||||
def server_fault_get_by_server_uuids(self, context, server_uuids):
|
||||
"""Get all server faults for the provided server_uuids."""
|
||||
|
||||
@abc.abstractmethod
|
||||
def quota_get(self, context, project_id, resource_name):
|
||||
|
@ -29,7 +29,7 @@ import sqlalchemy as sa
|
||||
|
||||
def upgrade():
|
||||
op.create_table(
|
||||
'instance_types',
|
||||
'flavors',
|
||||
sa.Column('created_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('uuid', sa.String(length=36), nullable=False),
|
||||
@ -41,34 +41,34 @@ def upgrade():
|
||||
mysql_DEFAULT_CHARSET='UTF8'
|
||||
)
|
||||
op.create_table(
|
||||
'instance_type_extra_specs',
|
||||
'flavor_extra_specs',
|
||||
sa.Column('created_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('instance_type_uuid', sa.String(length=36), nullable=False),
|
||||
sa.Column('flavor_uuid', sa.String(length=36), nullable=False),
|
||||
sa.Column('key', sa.String(length=255), nullable=False),
|
||||
sa.Column('value', sa.String(length=255), nullable=False),
|
||||
sa.ForeignKeyConstraint(['instance_type_uuid'],
|
||||
['instance_types.uuid']),
|
||||
sa.ForeignKeyConstraint(['flavor_uuid'],
|
||||
['flavors.uuid']),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
mysql_ENGINE='InnoDB',
|
||||
mysql_DEFAULT_CHARSET='UTF8'
|
||||
)
|
||||
op.create_table(
|
||||
'instance_type_projects',
|
||||
'flavor_projects',
|
||||
sa.Column('created_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('instance_type_uuid', sa.String(length=36), nullable=True),
|
||||
sa.Column('flavor_uuid', sa.String(length=36), nullable=True),
|
||||
sa.Column('project_id', sa.String(length=36), nullable=True),
|
||||
sa.ForeignKeyConstraint(['instance_type_uuid'],
|
||||
['instance_types.uuid']),
|
||||
sa.ForeignKeyConstraint(['flavor_uuid'],
|
||||
['flavors.uuid']),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
mysql_ENGINE='InnoDB',
|
||||
mysql_DEFAULT_CHARSET='UTF8'
|
||||
)
|
||||
op.create_table(
|
||||
'instances',
|
||||
'servers',
|
||||
sa.Column('created_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('uuid', sa.String(length=36), nullable=True),
|
||||
@ -79,7 +79,7 @@ def upgrade():
|
||||
sa.Column('description', sa.String(length=255), nullable=True),
|
||||
sa.Column('status', sa.String(length=255), nullable=True),
|
||||
sa.Column('power_state', sa.String(length=15), nullable=True),
|
||||
sa.Column('instance_type_uuid', sa.String(length=36), nullable=True),
|
||||
sa.Column('flavor_uuid', sa.String(length=36), nullable=True),
|
||||
sa.Column('image_uuid', sa.String(length=36), nullable=True),
|
||||
sa.Column('launched_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('availability_zone', sa.String(length=255), nullable=True),
|
||||
@ -88,7 +88,7 @@ def upgrade():
|
||||
sa.Column('locked', sa.Boolean(), nullable=True),
|
||||
sa.Column('locked_by', sa.Enum('admin', 'owner'), nullable=True),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
sa.UniqueConstraint('uuid', name='uniq_instances0uuid'),
|
||||
sa.UniqueConstraint('uuid', name='uniq_servers0uuid'),
|
||||
mysql_ENGINE='InnoDB',
|
||||
mysql_DEFAULT_CHARSET='UTF8'
|
||||
)
|
||||
@ -143,31 +143,31 @@ def upgrade():
|
||||
mysql_DEFAULT_CHARSET='UTF8'
|
||||
)
|
||||
op.create_table(
|
||||
'instance_nics',
|
||||
'server_nics',
|
||||
sa.Column('created_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('instance_uuid', sa.String(length=36), nullable=False),
|
||||
sa.Column('server_uuid', sa.String(length=36), nullable=False),
|
||||
sa.Column('port_id', sa.String(length=36), nullable=False),
|
||||
sa.Column('mac_address', sa.String(length=36), nullable=True),
|
||||
sa.Column('network_id', sa.String(length=36), nullable=True),
|
||||
sa.Column('port_type', sa.String(length=64), nullable=True),
|
||||
sa.Column('floating_ip', sa.String(length=64), nullable=True),
|
||||
sa.Column('fixed_ips', sa.Text(), nullable=True),
|
||||
sa.ForeignKeyConstraint(['instance_uuid'], ['instances.uuid'], ),
|
||||
sa.ForeignKeyConstraint(['server_uuid'], ['servers.uuid'], ),
|
||||
sa.PrimaryKeyConstraint('port_id'),
|
||||
mysql_ENGINE='InnoDB',
|
||||
mysql_DEFAULT_CHARSET='UTF8'
|
||||
)
|
||||
op.create_table(
|
||||
'instance_faults',
|
||||
'server_faults',
|
||||
sa.Column('id', sa.Integer(), nullable=False),
|
||||
sa.Column('instance_uuid', sa.String(length=36), nullable=True),
|
||||
sa.Column('server_uuid', sa.String(length=36), nullable=True),
|
||||
sa.Column('code', sa.Integer(), nullable=False),
|
||||
sa.Column('created_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('updated_at', sa.DateTime(), nullable=True),
|
||||
sa.Column('message', sa.String(length=255), nullable=True),
|
||||
sa.Column('detail', sa.Text(), nullable=True),
|
||||
sa.ForeignKeyConstraint(['instance_uuid'], ['instances.uuid']),
|
||||
sa.ForeignKeyConstraint(['server_uuid'], ['servers.uuid']),
|
||||
sa.PrimaryKeyConstraint('id'),
|
||||
mysql_ENGINE='InnoDB',
|
||||
mysql_DEFAULT_CHARSET='UTF8'
|
||||
|
@ -94,164 +94,164 @@ def add_identity_filter(query, value):
|
||||
raise exception.InvalidParameterValue(identity=value)
|
||||
|
||||
|
||||
def _dict_with_extra_specs(inst_type_query):
|
||||
"""Takes an instance type query and returns it as a dictionary."""
|
||||
inst_type_dict = dict(inst_type_query)
|
||||
def _dict_with_extra_specs(flavor_query):
|
||||
"""Takes a server type query and returns it as a dictionary."""
|
||||
flavor_dict = dict(flavor_query)
|
||||
extra_specs = {x['key']: x['value']
|
||||
for x in inst_type_query['extra_specs']}
|
||||
inst_type_dict['extra_specs'] = extra_specs
|
||||
return inst_type_dict
|
||||
for x in flavor_query['extra_specs']}
|
||||
flavor_dict['extra_specs'] = extra_specs
|
||||
return flavor_dict
|
||||
|
||||
|
||||
class Connection(api.Connection):
|
||||
"""SqlAlchemy connection."""
|
||||
|
||||
def __init__(self):
|
||||
self.QUOTA_SYNC_FUNCTIONS = {'_sync_instances': self._sync_instances}
|
||||
self.QUOTA_SYNC_FUNCTIONS = {'_sync_servers': self._sync_servers}
|
||||
pass
|
||||
|
||||
def instance_type_create(self, context, values):
|
||||
def flavor_create(self, context, values):
|
||||
if not values.get('uuid'):
|
||||
values['uuid'] = uuidutils.generate_uuid()
|
||||
|
||||
if not values.get('description'):
|
||||
values['description'] = ""
|
||||
|
||||
instance_type = models.InstanceTypes()
|
||||
instance_type.update(values)
|
||||
flavor = models.Flavors()
|
||||
flavor.update(values)
|
||||
|
||||
with _session_for_write() as session:
|
||||
try:
|
||||
session.add(instance_type)
|
||||
session.add(flavor)
|
||||
session.flush()
|
||||
except db_exc.DBDuplicateEntry:
|
||||
raise exception.FlavorAlreadyExists(uuid=values['uuid'])
|
||||
return _dict_with_extra_specs(instance_type)
|
||||
return _dict_with_extra_specs(flavor)
|
||||
|
||||
def instance_type_get(self, context, instance_type_uuid):
|
||||
query = model_query(context, models.InstanceTypes).filter_by(
|
||||
uuid=instance_type_uuid).options(joinedload('extra_specs'))
|
||||
def flavor_get(self, context, flavor_uuid):
|
||||
query = model_query(context, models.Flavors).filter_by(
|
||||
uuid=flavor_uuid).options(joinedload('extra_specs'))
|
||||
try:
|
||||
return _dict_with_extra_specs(query.one())
|
||||
except NoResultFound:
|
||||
raise exception.FlavorNotFound(
|
||||
type_id=instance_type_uuid)
|
||||
type_id=flavor_uuid)
|
||||
|
||||
def instance_type_update(self, context, instance_type_id, values):
|
||||
def flavor_update(self, context, flavor_id, values):
|
||||
with _session_for_write():
|
||||
query = model_query(context, models.InstanceTypes)
|
||||
query = add_identity_filter(query, instance_type_id)
|
||||
query = model_query(context, models.Flavors)
|
||||
query = add_identity_filter(query, flavor_id)
|
||||
try:
|
||||
ref = query.with_lockmode('update').one()
|
||||
except NoResultFound:
|
||||
raise exception.FlavorNotFound(
|
||||
type_id=instance_type_id)
|
||||
type_id=flavor_id)
|
||||
|
||||
ref.update(values)
|
||||
return ref
|
||||
|
||||
def instance_type_get_all(self, context):
|
||||
results = model_query(context, models.InstanceTypes)
|
||||
def flavor_get_all(self, context):
|
||||
results = model_query(context, models.Flavors)
|
||||
return [_dict_with_extra_specs(i) for i in results]
|
||||
|
||||
def instance_type_destroy(self, context, instance_type_uuid):
|
||||
def flavor_destroy(self, context, flavor_uuid):
|
||||
with _session_for_write():
|
||||
# First clean up all extra specs related to this type
|
||||
type_id = _type_get_id_from_type(context, instance_type_uuid)
|
||||
type_id = _type_get_id_from_type(context, flavor_uuid)
|
||||
extra_query = model_query(
|
||||
context,
|
||||
models.InstanceTypeExtraSpecs).filter_by(
|
||||
instance_type_uuid=type_id)
|
||||
models.FlavorExtraSpecs).filter_by(
|
||||
flavor_uuid=type_id)
|
||||
extra_query.delete()
|
||||
|
||||
# Clean up all access related to this flavor
|
||||
project_query = model_query(
|
||||
context,
|
||||
models.InstanceTypeProjects).filter_by(
|
||||
instance_type_uuid=type_id)
|
||||
models.FlavorProjects).filter_by(
|
||||
flavor_uuid=type_id)
|
||||
project_query.delete()
|
||||
|
||||
# Then delete the type record
|
||||
query = model_query(context, models.InstanceTypes)
|
||||
query = add_identity_filter(query, instance_type_uuid)
|
||||
query = model_query(context, models.Flavors)
|
||||
query = add_identity_filter(query, flavor_uuid)
|
||||
|
||||
count = query.delete()
|
||||
if count != 1:
|
||||
raise exception.FlavorNotFound(
|
||||
type_id=instance_type_uuid)
|
||||
type_id=flavor_uuid)
|
||||
|
||||
def instance_create(self, context, values):
|
||||
def server_create(self, context, values):
|
||||
if not values.get('uuid'):
|
||||
values['uuid'] = uuidutils.generate_uuid()
|
||||
|
||||
instance_nics = values.pop('nics', [])
|
||||
instance = models.Instance()
|
||||
instance.update(values)
|
||||
server_nics = values.pop('nics', [])
|
||||
server = models.Server()
|
||||
server.update(values)
|
||||
nic_refs = []
|
||||
for nic in instance_nics:
|
||||
nic_ref = models.InstanceNic()
|
||||
for nic in server_nics:
|
||||
nic_ref = models.ServerNic()
|
||||
nic_ref.update(nic)
|
||||
nic_refs.append(nic_ref)
|
||||
with _session_for_write() as session:
|
||||
try:
|
||||
session.add(instance)
|
||||
session.add(server)
|
||||
for nic_r in nic_refs:
|
||||
session.add(nic_r)
|
||||
session.flush()
|
||||
except db_exc.DBDuplicateEntry:
|
||||
raise exception.InstanceAlreadyExists(name=values['name'])
|
||||
return instance
|
||||
raise exception.ServerAlreadyExists(name=values['name'])
|
||||
return server
|
||||
|
||||
def instance_get(self, context, instance_id):
|
||||
def server_get(self, context, server_id):
|
||||
query = model_query(
|
||||
context,
|
||||
models.Instance,
|
||||
instance=True).filter_by(uuid=instance_id)
|
||||
models.Server,
|
||||
server=True).filter_by(uuid=server_id)
|
||||
try:
|
||||
return query.one()
|
||||
except NoResultFound:
|
||||
raise exception.InstanceNotFound(instance=instance_id)
|
||||
raise exception.ServerNotFound(server=server_id)
|
||||
|
||||
def instance_get_all(self, context, project_only):
|
||||
return model_query(context, models.Instance,
|
||||
instance=True, project_only=project_only)
|
||||
def server_get_all(self, context, project_only):
|
||||
return model_query(context, models.Server,
|
||||
server=True, project_only=project_only)
|
||||
|
||||
def instance_destroy(self, context, instance_id):
|
||||
def server_destroy(self, context, server_id):
|
||||
with _session_for_write():
|
||||
query = model_query(context, models.Instance)
|
||||
query = add_identity_filter(query, instance_id)
|
||||
query = model_query(context, models.Server)
|
||||
query = add_identity_filter(query, server_id)
|
||||
|
||||
nics_query = model_query(context, models.InstanceNic).filter_by(
|
||||
instance_uuid=instance_id)
|
||||
nics_query = model_query(context, models.ServerNic).filter_by(
|
||||
server_uuid=server_id)
|
||||
nics_query.delete()
|
||||
|
||||
faults_query = model_query(
|
||||
context,
|
||||
models.InstanceFault).filter_by(instance_uuid=instance_id)
|
||||
models.ServerFault).filter_by(server_uuid=server_id)
|
||||
faults_query.delete()
|
||||
count = query.delete()
|
||||
if count != 1:
|
||||
raise exception.InstanceNotFound(instance=instance_id)
|
||||
raise exception.ServerNotFound(server=server_id)
|
||||
|
||||
def instance_update(self, context, instance_id, values):
|
||||
def server_update(self, context, server_id, values):
|
||||
if 'uuid' in values:
|
||||
msg = _("Cannot overwrite UUID for an existing Instance.")
|
||||
msg = _("Cannot overwrite UUID for an existing Server.")
|
||||
raise exception.InvalidParameterValue(err=msg)
|
||||
|
||||
try:
|
||||
return self._do_update_instance(context, instance_id, values)
|
||||
return self._do_update_server(context, server_id, values)
|
||||
except db_exc.DBDuplicateEntry as e:
|
||||
if 'name' in e.columns:
|
||||
raise exception.DuplicateName(name=values['name'])
|
||||
|
||||
def _do_update_instance(self, context, instance_id, values):
|
||||
def _do_update_server(self, context, server_id, values):
|
||||
with _session_for_write():
|
||||
query = model_query(context, models.Instance, instance=True)
|
||||
query = add_identity_filter(query, instance_id)
|
||||
query = model_query(context, models.Server, server=True)
|
||||
query = add_identity_filter(query, server_id)
|
||||
try:
|
||||
ref = query.with_lockmode('update').one()
|
||||
except NoResultFound:
|
||||
raise exception.InstanceNotFound(instance=instance_id)
|
||||
raise exception.ServerNotFound(server=server_id)
|
||||
|
||||
ref.update(values)
|
||||
return ref
|
||||
@ -423,9 +423,9 @@ class Connection(api.Connection):
|
||||
return ref
|
||||
|
||||
def extra_specs_update_or_create(self, context,
|
||||
instance_type_uuid, specs,
|
||||
flavor_uuid, specs,
|
||||
max_retries=10):
|
||||
"""Create or update instance type extra specs.
|
||||
"""Create or update server type extra specs.
|
||||
|
||||
This adds or modifies the key/value pairs specified in the
|
||||
extra specs dict argument
|
||||
@ -434,9 +434,9 @@ class Connection(api.Connection):
|
||||
with _session_for_write() as session:
|
||||
try:
|
||||
spec_refs = model_query(
|
||||
context, models.InstanceTypeExtraSpecs). \
|
||||
filter_by(instance_type_uuid=instance_type_uuid). \
|
||||
filter(models.InstanceTypeExtraSpecs.key.in_(
|
||||
context, models.FlavorExtraSpecs). \
|
||||
filter_by(flavor_uuid=flavor_uuid). \
|
||||
filter(models.FlavorExtraSpecs.key.in_(
|
||||
specs.keys())).with_lockmode('update').all()
|
||||
|
||||
existing_keys = set()
|
||||
@ -448,10 +448,10 @@ class Connection(api.Connection):
|
||||
for key, value in specs.items():
|
||||
if key in existing_keys:
|
||||
continue
|
||||
spec_ref = models.InstanceTypeExtraSpecs()
|
||||
spec_ref = models.FlavorExtraSpecs()
|
||||
spec_ref.update(
|
||||
{"key": key, "value": value,
|
||||
"instance_type_uuid": instance_type_uuid})
|
||||
"flavor_uuid": flavor_uuid})
|
||||
|
||||
session.add(spec_ref)
|
||||
session.flush()
|
||||
@ -462,15 +462,15 @@ class Connection(api.Connection):
|
||||
# try again unless this was the last attempt
|
||||
if attempt == max_retries - 1:
|
||||
raise exception.FlavorExtraSpecUpdateCreateFailed(
|
||||
id=instance_type_uuid, retries=max_retries)
|
||||
id=flavor_uuid, retries=max_retries)
|
||||
|
||||
def instance_type_extra_specs_get(self, context, type_id):
|
||||
def flavor_extra_specs_get(self, context, type_id):
|
||||
rows = _type_extra_specs_get_query(context, type_id).all()
|
||||
return {row['key']: row['value'] for row in rows}
|
||||
|
||||
def type_extra_specs_delete(self, context, type_id, key):
|
||||
result = _type_extra_specs_get_query(context, type_id). \
|
||||
filter(models.InstanceTypeExtraSpecs.key == key). \
|
||||
filter(models.FlavorExtraSpecs.key == key). \
|
||||
delete(synchronize_session=False)
|
||||
# did not find the extra spec
|
||||
if result == 0:
|
||||
@ -481,8 +481,8 @@ class Connection(api.Connection):
|
||||
return _flavor_access_query(context, flavor_id)
|
||||
|
||||
def flavor_access_add(self, context, flavor_id, project_id):
|
||||
access_ref = models.InstanceTypeProjects()
|
||||
access_ref.update({"instance_type_uuid": flavor_id,
|
||||
access_ref = models.FlavorProjects()
|
||||
access_ref.update({"flavor_uuid": flavor_id,
|
||||
"project_id": project_id})
|
||||
with _session_for_write() as session:
|
||||
try:
|
||||
@ -502,27 +502,27 @@ class Connection(api.Connection):
|
||||
raise exception.FlavorAccessNotFound(flavor_id=flavor_id,
|
||||
project_id=project_id)
|
||||
|
||||
def instance_nic_update_or_create(self, context, port_id, values):
|
||||
def server_nic_update_or_create(self, context, port_id, values):
|
||||
with _session_for_write() as session:
|
||||
query = model_query(context, models.InstanceNic).filter_by(
|
||||
query = model_query(context, models.ServerNic).filter_by(
|
||||
port_id=port_id)
|
||||
nic = query.first()
|
||||
if not nic:
|
||||
nic = models.InstanceNic()
|
||||
nic = models.ServerNic()
|
||||
values.update(port_id=port_id)
|
||||
nic.update(values)
|
||||
session.add(nic)
|
||||
session.flush()
|
||||
return nic
|
||||
|
||||
def instance_nics_get_by_instance_uuid(self, context, instance_uuid):
|
||||
return model_query(context, models.InstanceNic).filter_by(
|
||||
instance_uuid=instance_uuid).all()
|
||||
def server_nics_get_by_server_uuid(self, context, server_uuid):
|
||||
return model_query(context, models.ServerNic).filter_by(
|
||||
server_uuid=server_uuid).all()
|
||||
|
||||
def instance_fault_create(self, context, values):
|
||||
"""Create a new InstanceFault."""
|
||||
def server_fault_create(self, context, values):
|
||||
"""Create a new ServerFault."""
|
||||
|
||||
fault = models.InstanceFault()
|
||||
fault = models.ServerFault()
|
||||
fault.update(values)
|
||||
|
||||
with _session_for_write() as session:
|
||||
@ -530,22 +530,22 @@ class Connection(api.Connection):
|
||||
session.flush()
|
||||
return fault
|
||||
|
||||
def instance_fault_get_by_instance_uuids(self, context, instance_uuids):
|
||||
"""Get all instance faults for the provided instance_uuids."""
|
||||
if not instance_uuids:
|
||||
def server_fault_get_by_server_uuids(self, context, server_uuids):
|
||||
"""Get all server faults for the provided server_uuids."""
|
||||
if not server_uuids:
|
||||
return {}
|
||||
|
||||
rows = model_query(context, models.InstanceFault).\
|
||||
filter(models.InstanceFault.instance_uuid.in_(instance_uuids)).\
|
||||
rows = model_query(context, models.ServerFault).\
|
||||
filter(models.ServerFault.server_uuid.in_(server_uuids)).\
|
||||
order_by(desc("created_at"), desc("id")).all()
|
||||
|
||||
output = {}
|
||||
for instance_uuid in instance_uuids:
|
||||
output[instance_uuid] = []
|
||||
for server_uuid in server_uuids:
|
||||
output[server_uuid] = []
|
||||
|
||||
for row in rows:
|
||||
data = dict(row)
|
||||
output[row['instance_uuid']].append(data)
|
||||
output[row['server_uuid']].append(data)
|
||||
|
||||
return output
|
||||
|
||||
@ -679,10 +679,10 @@ class Connection(api.Connection):
|
||||
project_id=project_id)
|
||||
return reservation_ref
|
||||
|
||||
def _sync_instances(self, context, project_id):
|
||||
query = model_query(context, models.Instance, instance=True).\
|
||||
def _sync_servers(self, context, project_id):
|
||||
query = model_query(context, models.Server, server=True).\
|
||||
filter_by(project_id=project_id).all()
|
||||
return {'instances': len(query) or 0}
|
||||
return {'servers': len(query) or 0}
|
||||
|
||||
def quota_reserve(self, context, resources, quotas, deltas, expire,
|
||||
until_refresh, max_age, project_id,
|
||||
@ -902,7 +902,7 @@ class Connection(api.Connection):
|
||||
|
||||
|
||||
def _type_get_id_from_type_query(context, type_id):
|
||||
return model_query(context, models.InstanceTypes). \
|
||||
return model_query(context, models.Flavors). \
|
||||
filter_by(uuid=type_id)
|
||||
|
||||
|
||||
@ -914,10 +914,10 @@ def _type_get_id_from_type(context, type_id):
|
||||
|
||||
|
||||
def _type_extra_specs_get_query(context, type_id):
|
||||
return model_query(context, models.InstanceTypeExtraSpecs). \
|
||||
filter_by(instance_type_uuid=type_id)
|
||||
return model_query(context, models.FlavorExtraSpecs). \
|
||||
filter_by(flavor_uuid=type_id)
|
||||
|
||||
|
||||
def _flavor_access_query(context, flavor_id):
|
||||
return model_query(context, models.InstanceTypeProjects). \
|
||||
filter_by(instance_type_uuid=flavor_id)
|
||||
return model_query(context, models.FlavorProjects). \
|
||||
filter_by(flavor_uuid=flavor_id)
|
||||
|
@ -63,12 +63,12 @@ class MoganBase(models.TimestampMixin,
|
||||
Base = declarative_base(cls=MoganBase)
|
||||
|
||||
|
||||
class Instance(Base):
|
||||
"""Represents possible types for instances."""
|
||||
class Server(Base):
|
||||
"""Represents possible types for servers."""
|
||||
|
||||
__tablename__ = 'instances'
|
||||
__tablename__ = 'servers'
|
||||
__table_args__ = (
|
||||
schema.UniqueConstraint('uuid', name='uniq_instances0uuid'),
|
||||
schema.UniqueConstraint('uuid', name='uniq_servers0uuid'),
|
||||
table_args()
|
||||
)
|
||||
id = Column(Integer, primary_key=True)
|
||||
@ -79,7 +79,7 @@ class Instance(Base):
|
||||
user_id = Column(String(36), nullable=True)
|
||||
status = Column(String(255), nullable=True)
|
||||
power_state = Column(String(15), nullable=True)
|
||||
instance_type_uuid = Column(String(36), nullable=True)
|
||||
flavor_uuid = Column(String(36), nullable=True)
|
||||
availability_zone = Column(String(255), nullable=True)
|
||||
image_uuid = Column(String(36), nullable=True)
|
||||
node_uuid = Column(String(36), nullable=True)
|
||||
@ -153,100 +153,99 @@ class ComputeDisk(Base):
|
||||
primaryjoin='ComputeNode.node_uuid == ComputeDisk.node_uuid')
|
||||
|
||||
|
||||
class InstanceNic(Base):
|
||||
"""Represents the NIC info for instances."""
|
||||
class ServerNic(Base):
|
||||
"""Represents the NIC info for servers."""
|
||||
|
||||
__tablename__ = 'instance_nics'
|
||||
instance_uuid = Column(String(36), nullable=True)
|
||||
__tablename__ = 'server_nics'
|
||||
server_uuid = Column(String(36), nullable=True)
|
||||
port_id = Column(String(36), primary_key=True)
|
||||
mac_address = Column(String(32), nullable=False)
|
||||
network_id = Column(String(36), nullable=True)
|
||||
fixed_ips = Column(db_types.JsonEncodedList)
|
||||
port_type = Column(String(64), nullable=True)
|
||||
floating_ip = Column(String(64), nullable=True)
|
||||
_instance = orm.relationship(
|
||||
Instance,
|
||||
backref=orm.backref('instance_nics', uselist=False),
|
||||
foreign_keys=instance_uuid,
|
||||
primaryjoin='Instance.uuid == InstanceNic.instance_uuid')
|
||||
_server = orm.relationship(
|
||||
Server,
|
||||
backref=orm.backref('server_nics', uselist=False),
|
||||
foreign_keys=server_uuid,
|
||||
primaryjoin='Server.uuid == ServerNic.server_uuid')
|
||||
|
||||
|
||||
class InstanceTypes(Base):
|
||||
"""Represents possible types for instances."""
|
||||
class Flavors(Base):
|
||||
"""Represents possible types for servers."""
|
||||
|
||||
__tablename__ = 'instance_types'
|
||||
__tablename__ = 'flavors'
|
||||
uuid = Column(String(36), primary_key=True)
|
||||
name = Column(String(255), nullable=False)
|
||||
description = Column(String(255), nullable=True)
|
||||
is_public = Column(Boolean, default=True)
|
||||
instances = orm.relationship(
|
||||
Instance,
|
||||
backref=orm.backref('instance_type', uselist=False),
|
||||
servers = orm.relationship(
|
||||
Server,
|
||||
backref=orm.backref('flavor', uselist=False),
|
||||
foreign_keys=uuid,
|
||||
primaryjoin='Instance.instance_type_uuid == InstanceTypes.uuid')
|
||||
primaryjoin='Server.flavor_uuid == Flavors.uuid')
|
||||
|
||||
|
||||
class InstanceTypeProjects(Base):
|
||||
"""Represents projects associated instance_types."""
|
||||
class FlavorProjects(Base):
|
||||
"""Represents projects associated flavors."""
|
||||
|
||||
__tablename__ = 'instance_type_projects'
|
||||
__tablename__ = 'flavor_projects'
|
||||
__table_args__ = (
|
||||
schema.UniqueConstraint(
|
||||
'instance_type_uuid', 'project_id',
|
||||
name='uniq_instance_type_projects0instance_type_uuid0project_id'
|
||||
'flavor_uuid', 'project_id',
|
||||
name='uniq_flavor_projects0flavor_uuid0project_id'
|
||||
),
|
||||
table_args()
|
||||
)
|
||||
id = Column(Integer, primary_key=True)
|
||||
instance_type_uuid = Column(Integer, nullable=True)
|
||||
flavor_uuid = Column(Integer, nullable=True)
|
||||
project_id = Column(String(36), nullable=True)
|
||||
instances = orm.relationship(
|
||||
InstanceTypes,
|
||||
servers = orm.relationship(
|
||||
Flavors,
|
||||
backref=orm.backref('projects', uselist=False),
|
||||
foreign_keys=instance_type_uuid,
|
||||
primaryjoin='InstanceTypeProjects.instance_type_uuid'
|
||||
' == InstanceTypes.uuid')
|
||||
foreign_keys=flavor_uuid,
|
||||
primaryjoin='FlavorProjects.flavor_uuid'
|
||||
' == Flavors.uuid')
|
||||
|
||||
|
||||
class InstanceTypeExtraSpecs(Base):
|
||||
"""Represents additional specs as key/value pairs for an instance_type."""
|
||||
__tablename__ = 'instance_type_extra_specs'
|
||||
class FlavorExtraSpecs(Base):
|
||||
"""Represents additional specs as key/value pairs for an flavor."""
|
||||
__tablename__ = 'flavor_extra_specs'
|
||||
__table_args__ = (
|
||||
schema.UniqueConstraint(
|
||||
"instance_type_uuid", "key",
|
||||
name=("uniq_instance_type_extra_specs0"
|
||||
"instance_type_uuid")
|
||||
"flavor_uuid", "key",
|
||||
name=("uniq_flavor_extra_specs0"
|
||||
"flavor_uuid")
|
||||
),
|
||||
{'mysql_collate': 'utf8_bin'},
|
||||
)
|
||||
id = Column(Integer, primary_key=True)
|
||||
key = Column(String(255))
|
||||
value = Column(String(255))
|
||||
instance_type_uuid = Column(String(36), ForeignKey('instance_types.uuid'),
|
||||
nullable=False)
|
||||
instance_type = orm.relationship(
|
||||
InstanceTypes, backref="extra_specs",
|
||||
foreign_keys=instance_type_uuid,
|
||||
primaryjoin='InstanceTypeExtraSpecs.instance_type_uuid '
|
||||
'== InstanceTypes.uuid')
|
||||
flavor_uuid = Column(String(36), ForeignKey('flavors.uuid'),
|
||||
nullable=False)
|
||||
flavor = orm.relationship(
|
||||
Flavors, backref="extra_specs",
|
||||
foreign_keys=flavor_uuid,
|
||||
primaryjoin='FlavorExtraSpecs.flavor_uuid '
|
||||
'== Flavors.uuid')
|
||||
|
||||
|
||||
class InstanceFault(Base):
|
||||
"""Represents fault info for instance"""
|
||||
class ServerFault(Base):
|
||||
"""Represents fault info for server"""
|
||||
|
||||
__tablename__ = "instance_faults"
|
||||
__tablename__ = "server_faults"
|
||||
|
||||
id = Column(Integer, primary_key=True, nullable=False)
|
||||
instance_uuid = Column(String(36),
|
||||
ForeignKey('instances.uuid'))
|
||||
server_uuid = Column(String(36), ForeignKey('servers.uuid'))
|
||||
code = Column(Integer(), nullable=False)
|
||||
message = Column(String(255))
|
||||
detail = Column(MediumText())
|
||||
instance = orm.relationship(
|
||||
Instance,
|
||||
backref=orm.backref('instance_faults', uselist=False),
|
||||
foreign_keys=instance_uuid,
|
||||
primaryjoin='Instance.uuid == InstanceFault.instance_uuid')
|
||||
server = orm.relationship(
|
||||
Server,
|
||||
backref=orm.backref('server_faults', uselist=False),
|
||||
foreign_keys=server_uuid,
|
||||
primaryjoin='Server.uuid == ServerFault.server_uuid')
|
||||
|
||||
|
||||
class Quota(Base):
|
||||
|
@ -42,21 +42,21 @@ LOG = log.getLogger(__name__)
|
||||
MAX_USERDATA_SIZE = 65535
|
||||
|
||||
|
||||
def check_instance_lock(function):
|
||||
def check_server_lock(function):
|
||||
@six.wraps(function)
|
||||
def inner(self, context, instance, *args, **kwargs):
|
||||
if instance.locked and not context.is_admin:
|
||||
raise exception.InstanceIsLocked(instance_uuid=instance.uuid)
|
||||
return function(self, context, instance, *args, **kwargs)
|
||||
def inner(self, context, server, *args, **kwargs):
|
||||
if server.locked and not context.is_admin:
|
||||
raise exception.ServerIsLocked(server_uuid=server.uuid)
|
||||
return function(self, context, server, *args, **kwargs)
|
||||
return inner
|
||||
|
||||
|
||||
def check_instance_maintenance(function):
|
||||
def check_server_maintenance(function):
|
||||
@six.wraps(function)
|
||||
def inner(self, context, instance, *args, **kwargs):
|
||||
if instance.status == states.MAINTENANCE:
|
||||
raise exception.InstanceInMaintenance(instance_uuid=instance.uuid)
|
||||
return function(self, context, instance, *args, **kwargs)
|
||||
def inner(self, context, server, *args, **kwargs):
|
||||
if server.status == states.MAINTENANCE:
|
||||
raise exception.ServerInMaintenance(server_uuid=server.uuid)
|
||||
return function(self, context, server, *args, **kwargs)
|
||||
return inner
|
||||
|
||||
|
||||
@ -69,13 +69,13 @@ class API(object):
|
||||
self.engine_rpcapi = rpcapi.EngineAPI()
|
||||
self.network_api = network.API()
|
||||
self.quota = quota.Quota()
|
||||
self.quota.register_resource(objects.quota.InstanceResource())
|
||||
self.quota.register_resource(objects.quota.ServerResource())
|
||||
self.consoleauth_rpcapi = consoleauth_rpcapi.ConsoleAuthAPI()
|
||||
|
||||
def _get_image(self, context, image_uuid):
|
||||
return self.image_api.get(context, image_uuid)
|
||||
|
||||
def _validate_and_build_base_options(self, context, instance_type,
|
||||
def _validate_and_build_base_options(self, context, flavor,
|
||||
image_uuid, name, description,
|
||||
availability_zone, extra,
|
||||
requested_networks, user_data,
|
||||
@ -85,16 +85,16 @@ class API(object):
|
||||
if user_data:
|
||||
l = len(user_data)
|
||||
if l > MAX_USERDATA_SIZE:
|
||||
raise exception.InstanceUserDataTooLarge(
|
||||
raise exception.ServerUserDataTooLarge(
|
||||
length=l, maxsize=MAX_USERDATA_SIZE)
|
||||
|
||||
try:
|
||||
base64utils.decode_as_bytes(user_data)
|
||||
except TypeError:
|
||||
raise exception.InstanceUserDataMalformed()
|
||||
raise exception.ServerUserDataMalformed()
|
||||
|
||||
# Note: max_count is the number of instances requested by the user,
|
||||
# max_network_count is the maximum number of instances taking into
|
||||
# Note: max_count is the number of servers requested by the user,
|
||||
# max_network_count is the maximum number of servers taking into
|
||||
# account any network quotas
|
||||
max_network_count = self._check_requested_networks(context,
|
||||
requested_networks,
|
||||
@ -113,7 +113,7 @@ class API(object):
|
||||
'user_id': context.user,
|
||||
'project_id': context.tenant,
|
||||
'power_state': states.NOSTATE,
|
||||
'instance_type_uuid': instance_type['uuid'],
|
||||
'flavor_uuid': flavor['uuid'],
|
||||
'name': name,
|
||||
'description': description,
|
||||
'locked': False,
|
||||
@ -123,15 +123,15 @@ class API(object):
|
||||
# return the validated options
|
||||
return base_options, max_network_count, key_pair
|
||||
|
||||
def _new_instance_name_from_template(self, uuid, name, index):
|
||||
"""Apply the template to instance name.
|
||||
def _new_server_name_from_template(self, uuid, name, index):
|
||||
"""Apply the template to server name.
|
||||
|
||||
Apply name template for multi-instance scenario.
|
||||
Apply name template for multi-server scenario.
|
||||
|
||||
:param uuid: The uuid of instance.
|
||||
:param name: The name of instance.
|
||||
:param index: The index of instance.
|
||||
:return: The new name of instance.
|
||||
:param uuid: The uuid of server.
|
||||
:param name: The name of server.
|
||||
:param index: The index of server.
|
||||
:return: The new name of server.
|
||||
"""
|
||||
params = {
|
||||
'uuid': uuid,
|
||||
@ -139,46 +139,46 @@ class API(object):
|
||||
'count': index + 1,
|
||||
}
|
||||
try:
|
||||
new_name = (CONF.api.multi_instance_name_template %
|
||||
new_name = (CONF.api.multi_server_name_template %
|
||||
params)
|
||||
except (KeyError, TypeError):
|
||||
LOG.exception('Failed to set instance name using '
|
||||
'multi_instance_name_template.')
|
||||
LOG.exception('Failed to set server name using '
|
||||
'multi_server_name_template.')
|
||||
new_name = name
|
||||
return new_name
|
||||
|
||||
def _populate_instance_names(self, instance, num_instances, index):
|
||||
"""Rename the instance name in multi-instance scenario.
|
||||
def _populate_server_names(self, server, num_servers, index):
|
||||
"""Rename the server name in multi-server scenario.
|
||||
|
||||
This is for rename instance in multi-instance scenario.
|
||||
This is for rename server in multi-server scenario.
|
||||
|
||||
:param instance: The instance object.
|
||||
:param num_instances: The number of instances
|
||||
:param index: the index of the instance
|
||||
:return: The instance object
|
||||
:param server: The server object.
|
||||
:param num_servers: The number of servers
|
||||
:param index: the index of the server
|
||||
:return: The server object
|
||||
"""
|
||||
if num_instances > 1:
|
||||
instance.name = self._new_instance_name_from_template(
|
||||
instance.uuid, instance.name, index)
|
||||
if num_servers > 1:
|
||||
server.name = self._new_server_name_from_template(
|
||||
server.uuid, server.name, index)
|
||||
|
||||
return instance
|
||||
return server
|
||||
|
||||
def _check_num_instances_quota(self, context, min_count, max_count):
|
||||
ins_resource = self.quota.resources['instances']
|
||||
def _check_num_servers_quota(self, context, min_count, max_count):
|
||||
ins_resource = self.quota.resources['servers']
|
||||
quotas = self.quota.get_quota_limit_and_usage(context,
|
||||
{'instances':
|
||||
{'servers':
|
||||
ins_resource},
|
||||
context.tenant)
|
||||
limit = quotas['instances']['limit']
|
||||
in_use = quotas['instances']['in_use']
|
||||
reserved = quotas['instances']['reserved']
|
||||
limit = quotas['servers']['limit']
|
||||
in_use = quotas['servers']['in_use']
|
||||
reserved = quotas['servers']['reserved']
|
||||
available_quota = limit - in_use - reserved
|
||||
if max_count <= available_quota:
|
||||
return max_count
|
||||
elif min_count <= available_quota and max_count > available_quota:
|
||||
return available_quota
|
||||
else:
|
||||
raise exception.OverQuota(overs='instances')
|
||||
raise exception.OverQuota(overs='servers')
|
||||
|
||||
def _decode_files(self, injected_files):
|
||||
"""Base64 decode the list of files to inject."""
|
||||
@ -196,45 +196,45 @@ class API(object):
|
||||
|
||||
return [_decode(f) for f in injected_files]
|
||||
|
||||
def _provision_instances(self, context, base_options,
|
||||
min_count, max_count):
|
||||
# Return num_instances according quota
|
||||
num_instances = self._check_num_instances_quota(
|
||||
def _provision_servers(self, context, base_options,
|
||||
min_count, max_count):
|
||||
# Return num_servers according quota
|
||||
num_servers = self._check_num_servers_quota(
|
||||
context, min_count, max_count)
|
||||
|
||||
# Create the instances reservations
|
||||
reserve_opts = {'instances': num_instances}
|
||||
# Create the servers reservations
|
||||
reserve_opts = {'servers': num_servers}
|
||||
reservations = self.quota.reserve(context, **reserve_opts)
|
||||
|
||||
LOG.debug("Going to run %s instances...", num_instances)
|
||||
LOG.debug("Going to run %s servers...", num_servers)
|
||||
|
||||
instances = []
|
||||
servers = []
|
||||
try:
|
||||
for num in range(num_instances):
|
||||
instance = objects.Instance(context=context)
|
||||
instance.update(base_options)
|
||||
instance.uuid = uuidutils.generate_uuid()
|
||||
# Refactor name of the instance.
|
||||
self._populate_instance_names(instance, num_instances, num)
|
||||
for num in range(num_servers):
|
||||
server = objects.Server(context=context)
|
||||
server.update(base_options)
|
||||
server.uuid = uuidutils.generate_uuid()
|
||||
# Refactor name of the server.
|
||||
self._populate_server_names(server, num_servers, num)
|
||||
|
||||
instance.create()
|
||||
instances.append(instance)
|
||||
server.create()
|
||||
servers.append(server)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
try:
|
||||
for instance in instances:
|
||||
for server in servers:
|
||||
try:
|
||||
instance.destroy()
|
||||
server.destroy()
|
||||
except exception.ObjectActionError:
|
||||
pass
|
||||
finally:
|
||||
self.quota.rollback(context, reservations)
|
||||
|
||||
# Commit instances reservations
|
||||
# Commit servers reservations
|
||||
if reservations:
|
||||
self.quota.commit(context, reservations)
|
||||
|
||||
return instances
|
||||
return servers
|
||||
|
||||
def _check_requested_networks(self, context, requested_networks,
|
||||
max_count):
|
||||
@ -246,10 +246,10 @@ class API(object):
|
||||
return self.network_api.validate_networks(context, requested_networks,
|
||||
max_count)
|
||||
|
||||
def _create_instance(self, context, instance_type, image_uuid,
|
||||
name, description, availability_zone, extra,
|
||||
requested_networks, user_data, injected_files,
|
||||
key_name, min_count, max_count):
|
||||
def _create_server(self, context, flavor, image_uuid,
|
||||
name, description, availability_zone, extra,
|
||||
requested_networks, user_data, injected_files,
|
||||
key_name, min_count, max_count):
|
||||
"""Verify all the input parameters"""
|
||||
|
||||
# Verify the specified image exists
|
||||
@ -258,11 +258,11 @@ class API(object):
|
||||
|
||||
base_options, max_net_count, key_pair = \
|
||||
self._validate_and_build_base_options(
|
||||
context, instance_type, image_uuid, name, description,
|
||||
context, flavor, image_uuid, name, description,
|
||||
availability_zone, extra, requested_networks, user_data,
|
||||
key_name, max_count)
|
||||
|
||||
# max_net_count is the maximum number of instances requested by the
|
||||
# max_net_count is the maximum number of servers requested by the
|
||||
# user adjusted for any network quota constraints, including
|
||||
# consideration of connections to each requested network
|
||||
if max_net_count < min_count:
|
||||
@ -278,43 +278,43 @@ class API(object):
|
||||
# b64 decode the files to inject:
|
||||
decoded_files = self._decode_files(injected_files)
|
||||
|
||||
instances = self._provision_instances(context, base_options,
|
||||
min_count, max_count)
|
||||
servers = self._provision_servers(context, base_options,
|
||||
min_count, max_count)
|
||||
|
||||
if not availability_zone:
|
||||
availability_zone = CONF.engine.default_schedule_zone
|
||||
request_spec = {
|
||||
'instance_id': instances[0].uuid,
|
||||
'instance_properties': {
|
||||
'instance_type_uuid': instances[0].instance_type_uuid,
|
||||
'server_id': servers[0].uuid,
|
||||
'server_properties': {
|
||||
'flavor_uuid': servers[0].flavor_uuid,
|
||||
'networks': requested_networks,
|
||||
},
|
||||
'instance_type': dict(instance_type),
|
||||
'flavor': dict(flavor),
|
||||
'availability_zone': availability_zone,
|
||||
}
|
||||
|
||||
for instance in instances:
|
||||
self.engine_rpcapi.create_instance(context, instance,
|
||||
requested_networks,
|
||||
user_data,
|
||||
decoded_files,
|
||||
key_pair,
|
||||
request_spec,
|
||||
filter_properties=None)
|
||||
for server in servers:
|
||||
self.engine_rpcapi.create_server(context, server,
|
||||
requested_networks,
|
||||
user_data,
|
||||
decoded_files,
|
||||
key_pair,
|
||||
request_spec,
|
||||
filter_properties=None)
|
||||
|
||||
return instances
|
||||
return servers
|
||||
|
||||
def create(self, context, instance_type, image_uuid,
|
||||
def create(self, context, flavor, image_uuid,
|
||||
name=None, description=None, availability_zone=None,
|
||||
extra=None, requested_networks=None, user_data=None,
|
||||
injected_files=None, key_name=None, min_count=None,
|
||||
max_count=None):
|
||||
"""Provision instances
|
||||
"""Provision servers
|
||||
|
||||
Sending instance information to the engine and will handle
|
||||
Sending server information to the engine and will handle
|
||||
creating the DB entries.
|
||||
|
||||
Returns an instance object
|
||||
Returns a server object
|
||||
"""
|
||||
|
||||
# check availability zone
|
||||
@ -323,65 +323,65 @@ class API(object):
|
||||
if availability_zone not in azs['availability_zones']:
|
||||
raise exception.AZNotFound
|
||||
|
||||
return self._create_instance(context, instance_type,
|
||||
image_uuid, name, description,
|
||||
availability_zone, extra,
|
||||
requested_networks, user_data,
|
||||
injected_files, key_name,
|
||||
min_count, max_count)
|
||||
return self._create_server(context, flavor,
|
||||
image_uuid, name, description,
|
||||
availability_zone, extra,
|
||||
requested_networks, user_data,
|
||||
injected_files, key_name,
|
||||
min_count, max_count)
|
||||
|
||||
def _delete_instance(self, context, instance):
|
||||
def _delete_server(self, context, server):
|
||||
|
||||
fsm = utils.get_state_machine(start_state=instance.status)
|
||||
fsm = utils.get_state_machine(start_state=server.status)
|
||||
|
||||
try:
|
||||
utils.process_event(fsm, instance, event='delete')
|
||||
except exception.InstanceNotFound:
|
||||
LOG.debug("Instance is not found while deleting",
|
||||
instance=instance)
|
||||
utils.process_event(fsm, server, event='delete')
|
||||
except exception.ServerNotFound:
|
||||
LOG.debug("Server is not found while deleting",
|
||||
server=server)
|
||||
return
|
||||
reserve_opts = {'instances': -1}
|
||||
reserve_opts = {'servers': -1}
|
||||
reservations = self.quota.reserve(context, **reserve_opts)
|
||||
if reservations:
|
||||
self.quota.commit(context, reservations)
|
||||
self.engine_rpcapi.delete_instance(context, instance)
|
||||
self.engine_rpcapi.delete_server(context, server)
|
||||
|
||||
@check_instance_lock
|
||||
def delete(self, context, instance):
|
||||
"""Delete an instance."""
|
||||
LOG.debug("Going to try to delete instance %s", instance.uuid)
|
||||
self._delete_instance(context, instance)
|
||||
@check_server_lock
|
||||
def delete(self, context, server):
|
||||
"""Delete a server."""
|
||||
LOG.debug("Going to try to delete server %s", server.uuid)
|
||||
self._delete_server(context, server)
|
||||
|
||||
@check_instance_lock
|
||||
@check_instance_maintenance
|
||||
def power(self, context, instance, target):
|
||||
"""Set power state of an instance."""
|
||||
LOG.debug("Going to try to set instance power state to %s",
|
||||
target, instance=instance)
|
||||
fsm = utils.get_state_machine(start_state=instance.status)
|
||||
@check_server_lock
|
||||
@check_server_maintenance
|
||||
def power(self, context, server, target):
|
||||
"""Set power state of a server."""
|
||||
LOG.debug("Going to try to set server power state to %s",
|
||||
target, server=server)
|
||||
fsm = utils.get_state_machine(start_state=server.status)
|
||||
try:
|
||||
utils.process_event(fsm, instance,
|
||||
utils.process_event(fsm, server,
|
||||
event=states.POWER_ACTION_MAP[target])
|
||||
except exception.InstanceNotFound:
|
||||
LOG.debug("Instance is not found while setting power state",
|
||||
instance=instance)
|
||||
except exception.ServerNotFound:
|
||||
LOG.debug("Server is not found while setting power state",
|
||||
server=server)
|
||||
return
|
||||
|
||||
self.engine_rpcapi.set_power_state(context, instance, target)
|
||||
self.engine_rpcapi.set_power_state(context, server, target)
|
||||
|
||||
@check_instance_lock
|
||||
@check_instance_maintenance
|
||||
def rebuild(self, context, instance):
|
||||
"""Rebuild an instance."""
|
||||
fsm = utils.get_state_machine(start_state=instance.status)
|
||||
@check_server_lock
|
||||
@check_server_maintenance
|
||||
def rebuild(self, context, server):
|
||||
"""Rebuild a server."""
|
||||
fsm = utils.get_state_machine(start_state=server.status)
|
||||
try:
|
||||
utils.process_event(fsm, instance, event='rebuild')
|
||||
except exception.InstanceNotFound:
|
||||
LOG.debug("Instance is not found while rebuilding",
|
||||
instance=instance)
|
||||
utils.process_event(fsm, server, event='rebuild')
|
||||
except exception.ServerNotFound:
|
||||
LOG.debug("Server is not found while rebuilding",
|
||||
server=server)
|
||||
return
|
||||
|
||||
self.engine_rpcapi.rebuild_instance(context, instance)
|
||||
self.engine_rpcapi.rebuild_server(context, server)
|
||||
|
||||
def list_availability_zones(self, context):
|
||||
"""Get availability zone list."""
|
||||
@ -396,43 +396,43 @@ class API(object):
|
||||
|
||||
return {'availability_zones': list(azs)}
|
||||
|
||||
def lock(self, context, instance):
|
||||
"""Lock the given instance."""
|
||||
def lock(self, context, server):
|
||||
"""Lock the given server."""
|
||||
|
||||
is_owner = instance.project_id == context.project_id
|
||||
if instance.locked and is_owner:
|
||||
is_owner = server.project_id == context.project_id
|
||||
if server.locked and is_owner:
|
||||
return
|
||||
|
||||
LOG.debug('Locking', instance=instance)
|
||||
instance.locked = True
|
||||
instance.locked_by = 'owner' if is_owner else 'admin'
|
||||
instance.save()
|
||||
LOG.debug('Locking', server=server)
|
||||
server.locked = True
|
||||
server.locked_by = 'owner' if is_owner else 'admin'
|
||||
server.save()
|
||||
|
||||
def unlock(self, context, instance):
|
||||
"""Unlock the given instance."""
|
||||
def unlock(self, context, server):
|
||||
"""Unlock the given server."""
|
||||
|
||||
LOG.debug('Unlocking', instance=instance)
|
||||
instance.locked = False
|
||||
instance.locked_by = None
|
||||
instance.save()
|
||||
LOG.debug('Unlocking', server=server)
|
||||
server.locked = False
|
||||
server.locked_by = None
|
||||
server.save()
|
||||
|
||||
def is_expected_locked_by(self, context, instance):
|
||||
is_owner = instance.project_id == context.project_id
|
||||
def is_expected_locked_by(self, context, server):
|
||||
is_owner = server.project_id == context.project_id
|
||||
expect_locked_by = 'owner' if is_owner else 'admin'
|
||||
locked_by = instance.locked_by
|
||||
locked_by = server.locked_by
|
||||
if locked_by and locked_by != expect_locked_by:
|
||||
return False
|
||||
return True
|
||||
|
||||
def get_serial_console(self, context, instance):
|
||||
"""Get a url to an instance Console."""
|
||||
def get_serial_console(self, context, server):
|
||||
"""Get a url to a server Console."""
|
||||
connect_info = self.engine_rpcapi.get_serial_console(
|
||||
context, instance=instance)
|
||||
context, server=server)
|
||||
self.consoleauth_rpcapi.authorize_console(
|
||||
context,
|
||||
connect_info['token'], 'serial',
|
||||
connect_info['host'], connect_info['port'],
|
||||
connect_info['internal_access_path'], instance.uuid,
|
||||
connect_info['internal_access_path'], server.uuid,
|
||||
access_url=connect_info['access_url'])
|
||||
|
||||
return {'url': connect_info['access_url']}
|
||||
|
@ -52,10 +52,10 @@ class BaseEngineDriver(object):
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_power_state(self, context, instance_uuid):
|
||||
"""Return a node's power state by passing instance uuid.
|
||||
def get_power_state(self, context, server_uuid):
|
||||
"""Return a node's power state by passing server uuid.
|
||||
|
||||
:param instance_uuid: mogan instance uuid to get power state.
|
||||
:param server_uuid: moga server uuid to get power state.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@ -84,40 +84,40 @@ class BaseEngineDriver(object):
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def unplug_vifs(self, context, instance):
|
||||
def unplug_vifs(self, context, server):
|
||||
"""Unplug network interfaces.
|
||||
|
||||
:param instance: the instance object.
|
||||
:param server: the server object.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def spawn(self, context, instance, configdrive_value):
|
||||
"""Create a new instance on the provision platform.
|
||||
def spawn(self, context, server, configdrive_value):
|
||||
"""Create a new server on the provision platform.
|
||||
|
||||
:param context: security context
|
||||
:param instance: mogan instance object.
|
||||
:param server: moga server object.
|
||||
:param configdrive_value: The configdrive value to be injected.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def destroy(self, context, instance):
|
||||
def destroy(self, context, server):
|
||||
"""Trigger node destroy process.
|
||||
|
||||
:param instance: the instance to destory.
|
||||
:param server: the server to destory.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def rebuild(self, context, instance):
|
||||
def rebuild(self, context, server):
|
||||
"""Trigger node deploy process.
|
||||
|
||||
:param instance: instance to rebuild.
|
||||
:param server: server to rebuild.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def get_serial_console_by_instance(self, context, instance):
|
||||
"""Get console info by instance.
|
||||
def get_serial_console_by_server(self, context, server):
|
||||
"""Get console info by server.
|
||||
|
||||
:param instance: instance to get its console info.
|
||||
:param server: server to get its console info.
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
@ -130,7 +130,7 @@ def load_engine_driver(engine_driver):
|
||||
argument.
|
||||
|
||||
:param engine_driver: a engine driver name to override the config opt
|
||||
:returns: a EngineDriver instance
|
||||
:returns: a EngineDriver server
|
||||
"""
|
||||
|
||||
if not engine_driver:
|
||||
|
@ -53,7 +53,7 @@ def map_power_state(state):
|
||||
return states.NOSTATE
|
||||
|
||||
|
||||
def _log_ironic_polling(what, node, instance):
|
||||
def _log_ironic_polling(what, node, server):
|
||||
power_state = (None if node.power_state is None else
|
||||
'"%s"' % node.power_state)
|
||||
tgt_power_state = (None if node.target_power_state is None else
|
||||
@ -73,7 +73,7 @@ def _log_ironic_polling(what, node, instance):
|
||||
tgt_power_state=tgt_power_state,
|
||||
prov_state=prov_state,
|
||||
tgt_prov_state=tgt_prov_state),
|
||||
instance=instance)
|
||||
server=server)
|
||||
|
||||
|
||||
class IronicDriver(base_driver.BaseEngineDriver):
|
||||
@ -87,17 +87,17 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
return self.ironicclient.call('node.get', node_uuid,
|
||||
fields=_NODE_FIELDS)
|
||||
|
||||
def _validate_instance_and_node(self, instance):
|
||||
"""Get the node associated with the instance.
|
||||
def _validate_server_and_node(self, server):
|
||||
"""Get the node associated with the server.
|
||||
|
||||
Check with the Ironic service that this instance is associated with a
|
||||
Check with the Ironic service that this server is associated with a
|
||||
node, and return the node.
|
||||
"""
|
||||
try:
|
||||
return self.ironicclient.call('node.get_by_instance_uuid',
|
||||
instance.uuid, fields=_NODE_FIELDS)
|
||||
server.uuid, fields=_NODE_FIELDS)
|
||||
except ironic_exc.NotFound:
|
||||
raise exception.InstanceNotFound(instance=instance.uuid)
|
||||
raise exception.ServerNotFound(server=server.uuid)
|
||||
|
||||
def _parse_node_properties(self, node):
|
||||
"""Helper method to parse the node's properties."""
|
||||
@ -168,15 +168,15 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
}
|
||||
return dic
|
||||
|
||||
def _add_instance_info_to_node(self, node, instance):
|
||||
def _add_server_info_to_node(self, node, server):
|
||||
|
||||
patch = list()
|
||||
# Associate the node with an instance
|
||||
# Associate the node with a server
|
||||
patch.append({'path': '/instance_uuid', 'op': 'add',
|
||||
'value': instance.uuid})
|
||||
'value': server.uuid})
|
||||
# Add the required fields to deploy a node.
|
||||
patch.append({'path': '/instance_info/image_source', 'op': 'add',
|
||||
'value': instance.image_uuid})
|
||||
'value': server.image_uuid})
|
||||
# TODO(zhenguo) Add partition support
|
||||
patch.append({'path': '/instance_info/root_gb', 'op': 'add',
|
||||
'value': str(node.properties.get('local_gb', 0))})
|
||||
@ -185,68 +185,68 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
# FIXME(lucasagomes): The "retry_on_conflict" parameter was added
|
||||
# to basically causes the deployment to fail faster in case the
|
||||
# node picked by the scheduler is already associated with another
|
||||
# instance due bug #1341420.
|
||||
# server due bug #1341420.
|
||||
self.ironicclient.call('node.update', node.uuid, patch,
|
||||
retry_on_conflict=False)
|
||||
except ironic_exc.BadRequest:
|
||||
msg = (_("Failed to add deploy parameters on node %(node)s "
|
||||
"when provisioning the instance %(instance)s")
|
||||
% {'node': node.uuid, 'instance': instance.uuid})
|
||||
"when provisioning the server %(server)s")
|
||||
% {'node': node.uuid, 'server': server.uuid})
|
||||
LOG.error(msg)
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
raise exception.ServerDeployFailure(msg)
|
||||
|
||||
def _remove_instance_info_from_node(self, node, instance):
|
||||
def _remove_server_info_from_node(self, node, server):
|
||||
patch = [{'path': '/instance_info', 'op': 'remove'},
|
||||
{'path': '/instance_uuid', 'op': 'remove'}]
|
||||
try:
|
||||
self.ironicclient.call('node.update', node.uuid, patch)
|
||||
except ironic_exc.BadRequest as e:
|
||||
LOG.warning("Failed to remove deploy parameters from node "
|
||||
"%(node)s when unprovisioning the instance "
|
||||
"%(instance)s: %(reason)s",
|
||||
{'node': node.uuid, 'instance': instance.uuid,
|
||||
"%(node)s when unprovisioning the server "
|
||||
"%(server)s: %(reason)s",
|
||||
{'node': node.uuid, 'server': server.uuid,
|
||||
'reason': six.text_type(e)})
|
||||
|
||||
def _wait_for_active(self, instance):
|
||||
def _wait_for_active(self, server):
|
||||
"""Wait for the node to be marked as ACTIVE in Ironic."""
|
||||
instance.refresh()
|
||||
if instance.status in (states.DELETING, states.ERROR, states.DELETED):
|
||||
raise exception.InstanceDeployAborted(
|
||||
_("Instance %s provisioning was aborted") % instance.uuid)
|
||||
server.refresh()
|
||||
if server.status in (states.DELETING, states.ERROR, states.DELETED):
|
||||
raise exception.ServerDeployAborted(
|
||||
_("Server %s provisioning was aborted") % server.uuid)
|
||||
|
||||
node = self._validate_instance_and_node(instance)
|
||||
node = self._validate_server_and_node(server)
|
||||
if node.provision_state == ironic_states.ACTIVE:
|
||||
# job is done
|
||||
LOG.debug("Ironic node %(node)s is now ACTIVE",
|
||||
dict(node=node.uuid), instance=instance)
|
||||
dict(node=node.uuid), server=server)
|
||||
raise loopingcall.LoopingCallDone()
|
||||
|
||||
if node.target_provision_state in (ironic_states.DELETED,
|
||||
ironic_states.AVAILABLE):
|
||||
# ironic is trying to delete it now
|
||||
raise exception.InstanceNotFound(instance=instance.uuid)
|
||||
raise exception.ServerNotFound(server=server.uuid)
|
||||
|
||||
if node.provision_state in (ironic_states.NOSTATE,
|
||||
ironic_states.AVAILABLE):
|
||||
# ironic already deleted it
|
||||
raise exception.InstanceNotFound(instance=instance.uuid)
|
||||
raise exception.ServerNotFound(server=server.uuid)
|
||||
|
||||
if node.provision_state == ironic_states.DEPLOYFAIL:
|
||||
# ironic failed to deploy
|
||||
msg = (_("Failed to provision instance %(inst)s: %(reason)s")
|
||||
% {'inst': instance.uuid, 'reason': node.last_error})
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
msg = (_("Failed to provision server %(server)s: %(reason)s")
|
||||
% {'server': server.uuid, 'reason': node.last_error})
|
||||
raise exception.ServerDeployFailure(msg)
|
||||
|
||||
_log_ironic_polling('become ACTIVE', node, instance)
|
||||
_log_ironic_polling('become ACTIVE', node, server)
|
||||
|
||||
def _wait_for_power_state(self, instance, message):
|
||||
def _wait_for_power_state(self, server, message):
|
||||
"""Wait for the node to complete a power state change."""
|
||||
node = self._validate_instance_and_node(instance)
|
||||
node = self._validate_server_and_node(server)
|
||||
|
||||
if node.target_power_state == ironic_states.NOSTATE:
|
||||
raise loopingcall.LoopingCallDone()
|
||||
|
||||
_log_ironic_polling(message, node, instance)
|
||||
_log_ironic_polling(message, node, server)
|
||||
|
||||
def _get_hypervisor_type(self):
|
||||
"""Get hypervisor type."""
|
||||
@ -266,14 +266,14 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
'value': port_id}]
|
||||
self.ironicclient.call("port.update", ironic_port_id, patch)
|
||||
|
||||
def unplug_vifs(self, context, instance):
|
||||
LOG.debug("unplug: instance_uuid=%(uuid)s vif=%(instance_nics)s",
|
||||
{'uuid': instance.uuid,
|
||||
'instance_nics': str(instance.nics)})
|
||||
def unplug_vifs(self, context, server):
|
||||
LOG.debug("unplug: server_uuid=%(uuid)s vif=%(server_nics)s",
|
||||
{'uuid': server.uuid,
|
||||
'server_nics': str(server.nics)})
|
||||
patch = [{'op': 'remove',
|
||||
'path': '/extra/vif_port_id'}]
|
||||
|
||||
ports = self.get_ports_from_node(instance.node_uuid)
|
||||
ports = self.get_ports_from_node(server.node_uuid)
|
||||
|
||||
for port in ports:
|
||||
try:
|
||||
@ -283,42 +283,42 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
except client_e.BadRequest:
|
||||
pass
|
||||
|
||||
def _cleanup_deploy(self, context, node, instance):
|
||||
def _cleanup_deploy(self, context, node, server):
|
||||
# NOTE(liusheng): here we may need to stop firewall if we have
|
||||
# implemented in ironic like what Nova dose.
|
||||
self.unplug_vifs(context, instance)
|
||||
self.unplug_vifs(context, server)
|
||||
|
||||
def spawn(self, context, instance, configdrive_value):
|
||||
"""Deploy an instance.
|
||||
def spawn(self, context, server, configdrive_value):
|
||||
"""Deploy a server.
|
||||
|
||||
:param context: The security context.
|
||||
:param instance: The instance object.
|
||||
:param server: The server object.
|
||||
:param configdrive_value: The configdrive value to be injected.
|
||||
"""
|
||||
LOG.debug('Spawn called for instance', instance=instance)
|
||||
LOG.debug('Spawn called for server', server=server)
|
||||
|
||||
# The engine manager is meant to know the node uuid, so missing uuid
|
||||
# is a significant issue. It may mean we've been passed the wrong data.
|
||||
node_uuid = instance.node_uuid
|
||||
node_uuid = server.node_uuid
|
||||
if not node_uuid:
|
||||
raise ironic_exc.BadRequest(
|
||||
_("Ironic node uuid not supplied to "
|
||||
"driver for instance %s.") % instance.uuid)
|
||||
"driver for server %s.") % server.uuid)
|
||||
|
||||
# add instance info to node
|
||||
# add server info to node
|
||||
node = self._get_node(node_uuid)
|
||||
self._add_instance_info_to_node(node, instance)
|
||||
self._add_server_info_to_node(node, server)
|
||||
|
||||
# validate we are ready to do the deploy
|
||||
validate_chk = self.ironicclient.call("node.validate", node_uuid)
|
||||
if (not validate_chk.deploy.get('result')
|
||||
or not validate_chk.power.get('result')):
|
||||
# something is wrong. undo what we have done
|
||||
self._cleanup_deploy(context, node, instance)
|
||||
self._cleanup_deploy(context, node, server)
|
||||
raise exception.ValidationError(_(
|
||||
"Ironic node: %(id)s failed to validate."
|
||||
" (deploy: %(deploy)s, power: %(power)s)")
|
||||
% {'id': instance.node_uuid,
|
||||
% {'id': server.node_uuid,
|
||||
'deploy': validate_chk.deploy,
|
||||
'power': validate_chk.power})
|
||||
|
||||
@ -329,26 +329,26 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
configdrive=configdrive_value)
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
msg = ("Failed to request Ironic to provision instance "
|
||||
"%(inst)s: %(reason)s",
|
||||
{'inst': instance.uuid,
|
||||
msg = ("Failed to request Ironic to provision server "
|
||||
"%(server)s: %(reason)s",
|
||||
{'server': server.uuid,
|
||||
'reason': six.text_type(e)})
|
||||
LOG.error(msg)
|
||||
|
||||
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
|
||||
instance)
|
||||
server)
|
||||
try:
|
||||
timer.start(interval=CONF.ironic.api_retry_interval).wait()
|
||||
LOG.info('Successfully provisioned Ironic node %s',
|
||||
node.uuid, instance=instance)
|
||||
node.uuid, server=server)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error("Error deploying instance %(instance)s on "
|
||||
LOG.error("Error deploying server %(server)s on "
|
||||
"baremetal node %(node)s.",
|
||||
{'instance': instance.uuid,
|
||||
{'server': server.uuid,
|
||||
'node': node_uuid})
|
||||
|
||||
def _unprovision(self, instance, node):
|
||||
def _unprovision(self, server, node):
|
||||
"""This method is called from destroy() to unprovision
|
||||
already provisioned node after required checks.
|
||||
"""
|
||||
@ -361,7 +361,7 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
# TODO(deva): This exception should be added to
|
||||
# python-ironicclient and matched directly,
|
||||
# rather than via __name__.
|
||||
if getattr(e, '__name__', None) != 'InstanceDeployFailure':
|
||||
if getattr(e, '__name__', None) != 'ServerDeployFailure':
|
||||
raise
|
||||
|
||||
# using a dict because this is modified in the local method
|
||||
@ -369,10 +369,10 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
|
||||
def _wait_for_provision_state():
|
||||
try:
|
||||
node = self._validate_instance_and_node(instance)
|
||||
except exception.InstanceNotFound:
|
||||
LOG.debug("Instance already removed from Ironic",
|
||||
instance=instance)
|
||||
node = self._validate_server_and_node(server)
|
||||
except exception.ServerNotFound:
|
||||
LOG.debug("Server already removed from Ironic",
|
||||
server=server)
|
||||
raise loopingcall.LoopingCallDone()
|
||||
if node.provision_state in (ironic_states.NOSTATE,
|
||||
ironic_states.CLEANING,
|
||||
@ -381,15 +381,15 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
ironic_states.AVAILABLE):
|
||||
# From a user standpoint, the node is unprovisioned. If a node
|
||||
# gets into CLEANFAIL state, it must be fixed in Ironic, but we
|
||||
# can consider the instance unprovisioned.
|
||||
# can consider the server unprovisioned.
|
||||
LOG.debug("Ironic node %(node)s is in state %(state)s, "
|
||||
"instance is now unprovisioned.",
|
||||
"server is now unprovisioned.",
|
||||
dict(node=node.uuid, state=node.provision_state),
|
||||
instance=instance)
|
||||
server=server)
|
||||
raise loopingcall.LoopingCallDone()
|
||||
|
||||
if data['tries'] >= CONF.ironic.api_max_retries + 1:
|
||||
msg = (_("Error destroying the instance on node %(node)s. "
|
||||
msg = (_("Error destroying the server on node %(node)s. "
|
||||
"Provision state still '%(state)s'.")
|
||||
% {'state': node.provision_state,
|
||||
'node': node.uuid})
|
||||
@ -398,36 +398,36 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
else:
|
||||
data['tries'] += 1
|
||||
|
||||
_log_ironic_polling('unprovision', node, instance)
|
||||
_log_ironic_polling('unprovision', node, server)
|
||||
|
||||
# wait for the state transition to finish
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_wait_for_provision_state)
|
||||
timer.start(interval=CONF.ironic.api_retry_interval).wait()
|
||||
|
||||
def destroy(self, context, instance):
|
||||
"""Destroy the specified instance, if it can be found.
|
||||
def destroy(self, context, server):
|
||||
"""Destroy the specified server, if it can be found.
|
||||
|
||||
:param context: The security context.
|
||||
:param instance: The instance object.
|
||||
:param server: The server object.
|
||||
"""
|
||||
LOG.debug('Destroy called for instance', instance=instance)
|
||||
LOG.debug('Destroy called for server', server=server)
|
||||
try:
|
||||
node = self._validate_instance_and_node(instance)
|
||||
except exception.InstanceNotFound:
|
||||
LOG.warning("Destroy called on non-existing instance %s.",
|
||||
instance.uuid)
|
||||
node = self._validate_server_and_node(server)
|
||||
except exception.ServerNotFound:
|
||||
LOG.warning("Destroy called on non-existing server %s.",
|
||||
server.uuid)
|
||||
return
|
||||
|
||||
if node.provision_state in _UNPROVISION_STATES:
|
||||
self._unprovision(instance, node)
|
||||
self._unprovision(server, node)
|
||||
else:
|
||||
# NOTE(hshiina): if spawn() fails before ironic starts
|
||||
# provisioning, instance information should be
|
||||
# provisioning, server information should be
|
||||
# removed from ironic node.
|
||||
self._remove_instance_info_from_node(node, instance)
|
||||
self._remove_server_info_from_node(node, server)
|
||||
|
||||
LOG.info('Successfully unprovisioned Ironic node %s',
|
||||
node.uuid, instance=instance)
|
||||
node.uuid, server=server)
|
||||
|
||||
def get_available_resources(self):
|
||||
"""Helper function to return the list of resources.
|
||||
@ -518,22 +518,22 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
node_list = []
|
||||
return node_list
|
||||
|
||||
def get_power_state(self, context, instance_uuid):
|
||||
def get_power_state(self, context, server_uuid):
|
||||
try:
|
||||
node = self.ironicclient.call('node.get_by_instance_uuid',
|
||||
instance_uuid,
|
||||
server_uuid,
|
||||
fields=('power_state',))
|
||||
return map_power_state(node.power_state)
|
||||
except client_e.NotFound:
|
||||
return map_power_state(ironic_states.NOSTATE)
|
||||
|
||||
def set_power_state(self, context, instance, state):
|
||||
"""Set power state on the specified instance.
|
||||
def set_power_state(self, context, server, state):
|
||||
"""Set power state on the specified server.
|
||||
|
||||
:param context: The security context.
|
||||
:param instance: The instance object.
|
||||
:param server: The server object.
|
||||
"""
|
||||
node = self._validate_instance_and_node(instance)
|
||||
node = self._validate_server_and_node(server)
|
||||
if state == "soft_off":
|
||||
self.ironicclient.call("node.set_power_state",
|
||||
node.uuid, "off", soft=True)
|
||||
@ -544,38 +544,38 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
self.ironicclient.call("node.set_power_state",
|
||||
node.uuid, state)
|
||||
timer = loopingcall.FixedIntervalLoopingCall(
|
||||
self._wait_for_power_state, instance, state)
|
||||
self._wait_for_power_state, server, state)
|
||||
timer.start(interval=CONF.ironic.api_retry_interval).wait()
|
||||
|
||||
def rebuild(self, context, instance):
|
||||
"""Rebuild/redeploy an instance.
|
||||
def rebuild(self, context, server):
|
||||
"""Rebuild/redeploy a server.
|
||||
|
||||
:param context: The security context.
|
||||
:param instance: The instance object.
|
||||
:param server: The server object.
|
||||
"""
|
||||
LOG.debug('Rebuild called for instance', instance=instance)
|
||||
LOG.debug('Rebuild called for server', server=server)
|
||||
|
||||
# trigger the node rebuild
|
||||
try:
|
||||
self.ironicclient.call("node.set_provision_state",
|
||||
instance.node_uuid,
|
||||
server.node_uuid,
|
||||
ironic_states.REBUILD)
|
||||
except (ironic_exc.InternalServerError,
|
||||
ironic_exc.BadRequest) as e:
|
||||
msg = (_("Failed to request Ironic to rebuild instance "
|
||||
"%(inst)s: %(reason)s") % {'inst': instance.uuid,
|
||||
'reason': six.text_type(e)})
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
msg = (_("Failed to request Ironic to rebuild server "
|
||||
"%(server)s: %(reason)s") % {'server': server.uuid,
|
||||
'reason': six.text_type(e)})
|
||||
raise exception.ServerDeployFailure(msg)
|
||||
|
||||
# Although the target provision state is REBUILD, it will actually go
|
||||
# to ACTIVE once the redeploy is finished.
|
||||
timer = loopingcall.FixedIntervalLoopingCall(self._wait_for_active,
|
||||
instance)
|
||||
server)
|
||||
timer.start(interval=CONF.ironic.api_retry_interval).wait()
|
||||
LOG.info('Instance was successfully rebuilt', instance=instance)
|
||||
LOG.info('Server was successfully rebuilt', server=server)
|
||||
|
||||
def get_serial_console_by_instance(self, context, instance):
|
||||
node = self._validate_instance_and_node(instance)
|
||||
def get_serial_console_by_server(self, context, server):
|
||||
node = self._validate_server_and_node(server)
|
||||
node_uuid = node.uuid
|
||||
|
||||
def _get_console():
|
||||
@ -585,8 +585,8 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
except (ironic_exc.InternalServerError,
|
||||
ironic_exc.BadRequest) as e:
|
||||
LOG.error('Failed to acquire console information for '
|
||||
'node %(inst)s: %(reason)s',
|
||||
{'inst': node_uuid,
|
||||
'node %(server)s: %(reason)s',
|
||||
{'server': node_uuid,
|
||||
'reason': e})
|
||||
raise exception.ConsoleNotAvailable()
|
||||
|
||||
@ -596,7 +596,7 @@ class IronicDriver(base_driver.BaseEngineDriver):
|
||||
if console['console_enabled'] == state:
|
||||
raise loopingcall.LoopingCallDone(retvalue=console)
|
||||
|
||||
_log_ironic_polling('set console mode', node, instance)
|
||||
_log_ironic_polling('set console mode', node, server)
|
||||
|
||||
# Return False to start backing off
|
||||
return False
|
||||
|
@ -58,7 +58,7 @@ This state is replacing the NOSTATE state used prior to Kilo.
|
||||
"""
|
||||
|
||||
ACTIVE = 'active'
|
||||
""" Node is successfully deployed and associated with an instance. """
|
||||
""" Node is successfully deployed and associated with a server. """
|
||||
|
||||
DEPLOYWAIT = 'wait call-back'
|
||||
""" Node is waiting to be deployed.
|
||||
|
@ -54,7 +54,7 @@ class BaseEngineManager(periodic_task.PeriodicTasks):
|
||||
raise RuntimeError(_('Attempt to start an already running '
|
||||
'engine manager'))
|
||||
|
||||
self.dbapi = dbapi.get_instance()
|
||||
self.dbapi = dbapi.get_server()
|
||||
|
||||
self._worker_pool = greenpool.GreenPool(
|
||||
size=CONF.engine.workers_pool_size)
|
||||
|
@ -35,12 +35,12 @@ CONFIGDRIVESIZE_BYTES = 64 * units.Mi
|
||||
class ConfigDriveBuilder(object):
|
||||
"""Build config drives, optionally as a context manager."""
|
||||
|
||||
def __init__(self, instance_md=None):
|
||||
def __init__(self, server_md=None):
|
||||
self.imagefile = None
|
||||
self.mdfiles = []
|
||||
|
||||
if instance_md is not None:
|
||||
self.add_instance_metadata(instance_md)
|
||||
if server_md is not None:
|
||||
self.add_server_metadata(server_md)
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
@ -64,8 +64,8 @@ class ConfigDriveBuilder(object):
|
||||
data = data.encode('utf-8')
|
||||
f.write(data)
|
||||
|
||||
def add_instance_metadata(self, instance_md):
|
||||
for (path, data) in instance_md.metadata_for_config_drive():
|
||||
def add_server_metadata(self, server_md):
|
||||
for (path, data) in server_md.metadata_for_config_drive():
|
||||
self.mdfiles.append((path, data))
|
||||
|
||||
def _write_md_files(self, basedir):
|
||||
|
@ -32,36 +32,36 @@ from mogan.common import flow_utils
|
||||
from mogan.common.i18n import _
|
||||
from mogan.common import utils
|
||||
from mogan.engine import configdrive
|
||||
from mogan.engine import metadata as instance_metadata
|
||||
from mogan.engine import metadata as server_metadata
|
||||
from mogan import objects
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
ACTION = 'instance:create'
|
||||
ACTION = 'server:create'
|
||||
CONF = cfg.CONF
|
||||
|
||||
|
||||
class OnFailureRescheduleTask(flow_utils.MoganTask):
|
||||
"""Triggers a rescheduling request to be sent when reverting occurs.
|
||||
|
||||
If rescheduling doesn't occur this task errors out the instance.
|
||||
If rescheduling doesn't occur this task errors out the server.
|
||||
"""
|
||||
|
||||
def __init__(self, engine_rpcapi):
|
||||
requires = ['filter_properties', 'request_spec', 'instance',
|
||||
requires = ['filter_properties', 'request_spec', 'server',
|
||||
'requested_networks', 'user_data', 'injected_files',
|
||||
'key_pair', 'context']
|
||||
super(OnFailureRescheduleTask, self).__init__(addons=[ACTION],
|
||||
requires=requires)
|
||||
self.engine_rpcapi = engine_rpcapi
|
||||
# These exception types will trigger the instance to be set into error
|
||||
# These exception types will trigger the server to be set into error
|
||||
# status rather than being rescheduled.
|
||||
self.no_reschedule_exc_types = [
|
||||
# The instance has been removed from the database, that can not
|
||||
# The server has been removed from the database, that can not
|
||||
# be fixed by rescheduling.
|
||||
exception.InstanceNotFound,
|
||||
exception.InstanceDeployAborted,
|
||||
exception.ServerNotFound,
|
||||
exception.ServerDeployAborted,
|
||||
exception.NetworkError,
|
||||
]
|
||||
|
||||
@ -69,11 +69,11 @@ class OnFailureRescheduleTask(flow_utils.MoganTask):
|
||||
pass
|
||||
|
||||
def _reschedule(self, context, cause, request_spec, filter_properties,
|
||||
instance, requested_networks, user_data, injected_files,
|
||||
server, requested_networks, user_data, injected_files,
|
||||
key_pair):
|
||||
"""Actions that happen during the rescheduling attempt occur here."""
|
||||
|
||||
create_instance = self.engine_rpcapi.create_instance
|
||||
create_server = self.engine_rpcapi.create_server
|
||||
if not filter_properties:
|
||||
filter_properties = {}
|
||||
if 'retry' not in filter_properties:
|
||||
@ -82,10 +82,10 @@ class OnFailureRescheduleTask(flow_utils.MoganTask):
|
||||
retry_info = filter_properties['retry']
|
||||
num_attempts = retry_info.get('num_attempts', 0)
|
||||
|
||||
LOG.debug("Instance %(instance_id)s: re-scheduling %(method)s "
|
||||
LOG.debug("Server %(server_id)s: re-scheduling %(method)s "
|
||||
"attempt %(num)d due to %(reason)s",
|
||||
{'instance_id': instance.uuid,
|
||||
'method': utils.make_pretty_name(create_instance),
|
||||
{'server_id': server.uuid,
|
||||
'method': utils.make_pretty_name(create_server),
|
||||
'num': num_attempts,
|
||||
'reason': cause.exception_str})
|
||||
|
||||
@ -93,55 +93,55 @@ class OnFailureRescheduleTask(flow_utils.MoganTask):
|
||||
# Stringify to avoid circular ref problem in json serialization
|
||||
retry_info['exc'] = traceback.format_exception(*cause.exc_info)
|
||||
|
||||
return create_instance(context, instance, requested_networks,
|
||||
user_data=user_data,
|
||||
injected_files=injected_files,
|
||||
key_pair=key_pair,
|
||||
request_spec=request_spec,
|
||||
filter_properties=filter_properties)
|
||||
return create_server(context, server, requested_networks,
|
||||
user_data=user_data,
|
||||
injected_files=injected_files,
|
||||
key_pair=key_pair,
|
||||
request_spec=request_spec,
|
||||
filter_properties=filter_properties)
|
||||
|
||||
def revert(self, context, result, flow_failures, instance, **kwargs):
|
||||
# Cleanup associated instance node uuid
|
||||
if instance.node_uuid:
|
||||
def revert(self, context, result, flow_failures, server, **kwargs):
|
||||
# Cleanup associated server node uuid
|
||||
if server.node_uuid:
|
||||
# If the compute node is still in DB, release it.
|
||||
try:
|
||||
cn = objects.ComputeNode.get(context, instance.node_uuid)
|
||||
cn = objects.ComputeNode.get(context, server.node_uuid)
|
||||
except exception.ComputeNodeNotFound:
|
||||
pass
|
||||
else:
|
||||
cn.destroy()
|
||||
instance.node_uuid = None
|
||||
instance.save()
|
||||
server.node_uuid = None
|
||||
server.save()
|
||||
|
||||
# Check if we have a cause which can tell us not to reschedule and
|
||||
# set the instance's status to error.
|
||||
# set the server's status to error.
|
||||
for failure in flow_failures.values():
|
||||
if failure.check(*self.no_reschedule_exc_types):
|
||||
LOG.error("Instance %s: create failed and no reschedule.",
|
||||
instance.uuid)
|
||||
LOG.error("Server %s: create failed and no reschedule.",
|
||||
server.uuid)
|
||||
return False
|
||||
|
||||
cause = list(flow_failures.values())[0]
|
||||
try:
|
||||
self._reschedule(context, cause, instance=instance, **kwargs)
|
||||
self._reschedule(context, cause, server=server, **kwargs)
|
||||
return True
|
||||
except exception.MoganException:
|
||||
LOG.exception("Instance %s: rescheduling failed",
|
||||
instance.uuid)
|
||||
LOG.exception("Server %s: rescheduling failed",
|
||||
server.uuid)
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class BuildNetworkTask(flow_utils.MoganTask):
|
||||
"""Build network for the instance."""
|
||||
"""Build network for the server."""
|
||||
|
||||
def __init__(self, manager):
|
||||
requires = ['instance', 'requested_networks', 'ports', 'context']
|
||||
requires = ['server', 'requested_networks', 'ports', 'context']
|
||||
super(BuildNetworkTask, self).__init__(addons=[ACTION],
|
||||
requires=requires)
|
||||
self.manager = manager
|
||||
|
||||
def _build_networks(self, context, instance, requested_networks, ports):
|
||||
def _build_networks(self, context, server, requested_networks, ports):
|
||||
|
||||
# TODO(zhenguo): This seems not needed as our scheduler has already
|
||||
# guaranteed this.
|
||||
@ -150,18 +150,18 @@ class BuildNetworkTask(flow_utils.MoganTask):
|
||||
"Ironic node: %(id)s virtual to physical interface count"
|
||||
" mismatch"
|
||||
" (Vif count: %(vif_count)d, Pif count: %(pif_count)d)")
|
||||
% {'id': instance.node_uuid,
|
||||
% {'id': server.node_uuid,
|
||||
'vif_count': len(requested_networks),
|
||||
'pif_count': len(ports)})
|
||||
|
||||
nics_obj = objects.InstanceNics(context)
|
||||
nics_obj = objects.ServerNics(context)
|
||||
for vif in requested_networks:
|
||||
for pif in ports:
|
||||
# Match the specified port type with physical interface type
|
||||
if vif.get('port_type', 'None') == pif.port_type:
|
||||
try:
|
||||
port = self.manager.network_api.create_port(
|
||||
context, vif['net_id'], pif.address, instance.uuid)
|
||||
context, vif['net_id'], pif.address, server.uuid)
|
||||
port_dict = port['port']
|
||||
|
||||
self.manager.driver.plug_vif(pif.port_uuid,
|
||||
@ -171,61 +171,61 @@ class BuildNetworkTask(flow_utils.MoganTask):
|
||||
'mac_address': port_dict['mac_address'],
|
||||
'fixed_ips': port_dict['fixed_ips'],
|
||||
'port_type': vif.get('port_type'),
|
||||
'instance_uuid': instance.uuid}
|
||||
nics_obj.objects.append(objects.InstanceNic(
|
||||
'server_uuid': server.uuid}
|
||||
nics_obj.objects.append(objects.ServerNic(
|
||||
context, **nic_dict))
|
||||
|
||||
except Exception:
|
||||
# Set nics here, so we can clean up the
|
||||
# created networks during reverting.
|
||||
instance.nics = nics_obj
|
||||
LOG.error("Instance %s: create network failed",
|
||||
instance.uuid)
|
||||
server.nics = nics_obj
|
||||
LOG.error("Server %s: create network failed",
|
||||
server.uuid)
|
||||
raise exception.NetworkError(_(
|
||||
"Build network for instance failed."))
|
||||
"Build network for server failed."))
|
||||
return nics_obj
|
||||
|
||||
def execute(self, context, instance, requested_networks, ports):
|
||||
instance_nics = self._build_networks(
|
||||
def execute(self, context, server, requested_networks, ports):
|
||||
server_nics = self._build_networks(
|
||||
context,
|
||||
instance,
|
||||
server,
|
||||
requested_networks,
|
||||
ports)
|
||||
|
||||
instance.nics = instance_nics
|
||||
instance.save()
|
||||
server.nics = server_nics
|
||||
server.save()
|
||||
|
||||
def revert(self, context, result, flow_failures, instance, **kwargs):
|
||||
def revert(self, context, result, flow_failures, server, **kwargs):
|
||||
# Check if we need to clean up networks.
|
||||
if instance.nics:
|
||||
LOG.debug("Instance %s: cleaning up node networks",
|
||||
instance.uuid)
|
||||
self.manager.destroy_networks(context, instance)
|
||||
if server.nics:
|
||||
LOG.debug("Server %s: cleaning up node networks",
|
||||
server.uuid)
|
||||
self.manager.destroy_networks(context, server)
|
||||
# Unset nics here as we have destroyed it.
|
||||
instance.nics = None
|
||||
server.nics = None
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
class GenerateConfigDriveTask(flow_utils.MoganTask):
|
||||
"""Generate ConfigDrive value the instance."""
|
||||
"""Generate ConfigDrive value the server."""
|
||||
|
||||
def __init__(self):
|
||||
requires = ['instance', 'user_data', 'injected_files', 'key_pair',
|
||||
requires = ['server', 'user_data', 'injected_files', 'key_pair',
|
||||
'configdrive', 'context']
|
||||
super(GenerateConfigDriveTask, self).__init__(addons=[ACTION],
|
||||
requires=requires)
|
||||
|
||||
def _generate_configdrive(self, context, instance, user_data=None,
|
||||
def _generate_configdrive(self, context, server, user_data=None,
|
||||
files=None, key_pair=None):
|
||||
"""Generate a config drive."""
|
||||
|
||||
i_meta = instance_metadata.InstanceMetadata(
|
||||
instance, content=files, user_data=user_data, key_pair=key_pair)
|
||||
i_meta = server_metadata.ServerMetadata(
|
||||
server, content=files, user_data=user_data, key_pair=key_pair)
|
||||
|
||||
with tempfile.NamedTemporaryFile() as uncompressed:
|
||||
with configdrive.ConfigDriveBuilder(instance_md=i_meta) as cdb:
|
||||
with configdrive.ConfigDriveBuilder(server_md=i_meta) as cdb:
|
||||
cdb.make_drive(uncompressed.name)
|
||||
|
||||
with tempfile.NamedTemporaryFile() as compressed:
|
||||
@ -238,55 +238,55 @@ class GenerateConfigDriveTask(flow_utils.MoganTask):
|
||||
compressed.seek(0)
|
||||
return base64.b64encode(compressed.read())
|
||||
|
||||
def execute(self, context, instance, user_data, injected_files, key_pair,
|
||||
def execute(self, context, server, user_data, injected_files, key_pair,
|
||||
configdrive):
|
||||
|
||||
try:
|
||||
configdrive['value'] = self._generate_configdrive(
|
||||
context, instance, user_data=user_data, files=injected_files,
|
||||
context, server, user_data=user_data, files=injected_files,
|
||||
key_pair=key_pair)
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
msg = ("Failed to build configdrive: %s" %
|
||||
six.text_type(e))
|
||||
LOG.error(msg, instance=instance)
|
||||
LOG.error(msg, server=server)
|
||||
|
||||
LOG.info("Config drive for instance %(instance)s created.",
|
||||
{'instance': instance.uuid})
|
||||
LOG.info("Config drive for server %(server)s created.",
|
||||
{'server': server.uuid})
|
||||
|
||||
|
||||
class CreateInstanceTask(flow_utils.MoganTask):
|
||||
"""Build and deploy the instance."""
|
||||
class CreateServerTask(flow_utils.MoganTask):
|
||||
"""Build and deploy the server."""
|
||||
|
||||
def __init__(self, driver):
|
||||
requires = ['instance', 'configdrive', 'context']
|
||||
super(CreateInstanceTask, self).__init__(addons=[ACTION],
|
||||
requires=requires)
|
||||
requires = ['server', 'configdrive', 'context']
|
||||
super(CreateServerTask, self).__init__(addons=[ACTION],
|
||||
requires=requires)
|
||||
self.driver = driver
|
||||
# These exception types will trigger the instance to be cleaned.
|
||||
self.instance_cleaned_exc_types = [
|
||||
exception.InstanceDeployFailure,
|
||||
# These exception types will trigger the server to be cleaned.
|
||||
self.server_cleaned_exc_types = [
|
||||
exception.ServerDeployFailure,
|
||||
loopingcall.LoopingCallTimeOut,
|
||||
]
|
||||
|
||||
def execute(self, context, instance, configdrive):
|
||||
def execute(self, context, server, configdrive):
|
||||
configdrive_value = configdrive.get('value')
|
||||
self.driver.spawn(context, instance, configdrive_value)
|
||||
self.driver.spawn(context, server, configdrive_value)
|
||||
LOG.info('Successfully provisioned Ironic node %s',
|
||||
instance.node_uuid)
|
||||
server.node_uuid)
|
||||
|
||||
def revert(self, context, result, flow_failures, instance, **kwargs):
|
||||
# Check if we have a cause which need to clean up instance.
|
||||
def revert(self, context, result, flow_failures, server, **kwargs):
|
||||
# Check if we have a cause which need to clean up server.
|
||||
for failure in flow_failures.values():
|
||||
if failure.check(*self.instance_cleaned_exc_types):
|
||||
LOG.debug("Instance %s: destroy ironic node", instance.uuid)
|
||||
self.driver.destroy(context, instance)
|
||||
if failure.check(*self.server_cleaned_exc_types):
|
||||
LOG.debug("Server %s: destroy ironic node", server.uuid)
|
||||
self.driver.destroy(context, server)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def get_flow(context, manager, instance, requested_networks, user_data,
|
||||
def get_flow(context, manager, server, requested_networks, user_data,
|
||||
injected_files, key_pair, ports, request_spec,
|
||||
filter_properties):
|
||||
|
||||
@ -294,14 +294,14 @@ def get_flow(context, manager, instance, requested_networks, user_data,
|
||||
|
||||
This flow will do the following:
|
||||
|
||||
1. Build networks for the instance and set port id back to baremetal port
|
||||
2. Generate configdrive value for instance.
|
||||
1. Build networks for the server and set port id back to baremetal port
|
||||
2. Generate configdrive value for server.
|
||||
3. Do node deploy and handle errors.
|
||||
4. Reschedule if the tasks are on failure.
|
||||
"""
|
||||
|
||||
flow_name = ACTION.replace(":", "_") + "_manager"
|
||||
instance_flow = linear_flow.Flow(flow_name)
|
||||
server_flow = linear_flow.Flow(flow_name)
|
||||
|
||||
# This injects the initial starting flow values into the workflow so that
|
||||
# the dependency order of the tasks provides/requires can be correctly
|
||||
@ -310,7 +310,7 @@ def get_flow(context, manager, instance, requested_networks, user_data,
|
||||
'context': context,
|
||||
'filter_properties': filter_properties,
|
||||
'request_spec': request_spec,
|
||||
'instance': instance,
|
||||
'server': server,
|
||||
'requested_networks': requested_networks,
|
||||
'user_data': user_data,
|
||||
'injected_files': injected_files,
|
||||
@ -319,10 +319,10 @@ def get_flow(context, manager, instance, requested_networks, user_data,
|
||||
'configdrive': {}
|
||||
}
|
||||
|
||||
instance_flow.add(OnFailureRescheduleTask(manager.engine_rpcapi),
|
||||
BuildNetworkTask(manager),
|
||||
GenerateConfigDriveTask(),
|
||||
CreateInstanceTask(manager.driver))
|
||||
server_flow.add(OnFailureRescheduleTask(manager.engine_rpcapi),
|
||||
BuildNetworkTask(manager),
|
||||
GenerateConfigDriveTask(),
|
||||
CreateServerTask(manager.driver))
|
||||
|
||||
# Now load (but do not run) the flow using the provided initial data.
|
||||
return taskflow.engines.load(instance_flow, store=create_what)
|
||||
return taskflow.engines.load(server_flow, store=create_what)
|
@ -31,7 +31,7 @@ from mogan.common import states
|
||||
from mogan.common import utils
|
||||
from mogan.conf import CONF
|
||||
from mogan.engine import base_manager
|
||||
from mogan.engine.flows import create_instance
|
||||
from mogan.engine.flows import create_server
|
||||
from mogan.notifications import base as notifications
|
||||
from mogan import objects
|
||||
from mogan.objects import fields
|
||||
@ -40,27 +40,27 @@ from mogan.objects import quota
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
@utils.expects_func_args('instance')
|
||||
def wrap_instance_fault(function):
|
||||
"""Wraps a method to catch exceptions related to instances.
|
||||
@utils.expects_func_args('server')
|
||||
def wrap_server_fault(function):
|
||||
"""Wraps a method to catch exceptions related to servers.
|
||||
|
||||
This decorator wraps a method to catch any exceptions having to do with
|
||||
an instance that may get thrown. It then logs an instance fault in the db.
|
||||
a server that may get thrown. It then logs a server fault in the db.
|
||||
"""
|
||||
|
||||
@functools.wraps(function)
|
||||
def decorated_function(self, context, *args, **kwargs):
|
||||
try:
|
||||
return function(self, context, *args, **kwargs)
|
||||
except exception.InstanceNotFound:
|
||||
except exception.ServerNotFound:
|
||||
raise
|
||||
except Exception as e:
|
||||
kwargs.update(dict(zip(function.__code__.co_varnames[2:], args)))
|
||||
|
||||
with excutils.save_and_reraise_exception():
|
||||
utils.add_instance_fault_from_exc(context,
|
||||
kwargs['instance'],
|
||||
e, sys.exc_info())
|
||||
utils.add_server_fault_from_exc(context,
|
||||
kwargs['server'],
|
||||
e, sys.exc_info())
|
||||
|
||||
return decorated_function
|
||||
|
||||
@ -75,7 +75,7 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(EngineManager, self).__init__(*args, **kwargs)
|
||||
self.quota = quota.Quota()
|
||||
self.quota.register_resource(objects.quota.InstanceResource())
|
||||
self.quota.register_resource(objects.quota.ServerResource())
|
||||
|
||||
def _get_compute_port(self, context, port_uuid):
|
||||
"""Gets compute port by the uuid."""
|
||||
@ -189,93 +189,93 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
if node.target_power_state is None}
|
||||
|
||||
if not node_dict:
|
||||
LOG.warning("While synchronizing instance power states, "
|
||||
"found none instance with stable power state "
|
||||
LOG.warning("While synchronizing server power states, "
|
||||
"found none server with stable power state "
|
||||
"on the hypervisor.")
|
||||
return
|
||||
|
||||
def _sync(db_instance, node_power_state):
|
||||
def _sync(db_server, node_power_state):
|
||||
# This must be synchronized as we query state from two separate
|
||||
# sources, the driver (ironic) and the database. They are set
|
||||
# (in stop_instance) and read, in sync.
|
||||
@utils.synchronized(db_instance.uuid)
|
||||
def sync_instance_power_state():
|
||||
self._sync_instance_power_state(context, db_instance,
|
||||
node_power_state)
|
||||
# (in stop_server) and read, in sync.
|
||||
@utils.synchronized(db_server.uuid)
|
||||
def sync_server_power_state():
|
||||
self._sync_server_power_state(context, db_server,
|
||||
node_power_state)
|
||||
|
||||
try:
|
||||
sync_instance_power_state()
|
||||
sync_server_power_state()
|
||||
except Exception:
|
||||
LOG.exception("Periodic sync_power_state task had an "
|
||||
"error while processing an instance.",
|
||||
instance=db_instance)
|
||||
"error while processing a server.",
|
||||
server=db_server)
|
||||
|
||||
self._syncs_in_progress.pop(db_instance.uuid)
|
||||
self._syncs_in_progress.pop(db_server.uuid)
|
||||
|
||||
db_instances = objects.Instance.list(context)
|
||||
for db_instance in db_instances:
|
||||
# process syncs asynchronously - don't want instance locking to
|
||||
db_servers = objects.Server.list(context)
|
||||
for db_server in db_servers:
|
||||
# process syncs asynchronously - don't want server locking to
|
||||
# block entire periodic task thread
|
||||
uuid = db_instance.uuid
|
||||
uuid = db_server.uuid
|
||||
if uuid in self._syncs_in_progress:
|
||||
LOG.debug('Sync power state already in progress for %s', uuid)
|
||||
continue
|
||||
|
||||
if db_instance.status not in (states.ACTIVE, states.STOPPED):
|
||||
if db_instance.status in states.UNSTABLE_STATES:
|
||||
LOG.info("During sync_power_state the instance has a "
|
||||
if db_server.status not in (states.ACTIVE, states.STOPPED):
|
||||
if db_server.status in states.UNSTABLE_STATES:
|
||||
LOG.info("During sync_power_state the server has a "
|
||||
"pending task (%(task)s). Skip.",
|
||||
{'task': db_instance.status},
|
||||
instance=db_instance)
|
||||
{'task': db_server.status},
|
||||
server=db_server)
|
||||
continue
|
||||
|
||||
if uuid not in node_dict:
|
||||
continue
|
||||
|
||||
node_power_state = node_dict[uuid].power_state
|
||||
if db_instance.power_state != node_power_state:
|
||||
if db_server.power_state != node_power_state:
|
||||
LOG.debug('Triggering sync for uuid %s', uuid)
|
||||
self._syncs_in_progress[uuid] = True
|
||||
self._sync_power_pool.spawn_n(_sync, db_instance,
|
||||
self._sync_power_pool.spawn_n(_sync, db_server,
|
||||
node_power_state)
|
||||
|
||||
def _sync_instance_power_state(self, context, db_instance,
|
||||
node_power_state):
|
||||
"""Align instance power state between the database and hypervisor.
|
||||
def _sync_server_power_state(self, context, db_server,
|
||||
node_power_state):
|
||||
"""Align server power state between the database and hypervisor.
|
||||
|
||||
If the instance is not found on the hypervisor, but is in the database,
|
||||
then a stop() API will be called on the instance.
|
||||
If the server is not found on the hypervisor, but is in the database,
|
||||
then a stop() API will be called on the server.
|
||||
"""
|
||||
|
||||
# We re-query the DB to get the latest instance info to minimize
|
||||
# We re-query the DB to get the latest server info to minimize
|
||||
# (not eliminate) race condition.
|
||||
db_instance.refresh()
|
||||
db_power_state = db_instance.power_state
|
||||
db_server.refresh()
|
||||
db_power_state = db_server.power_state
|
||||
|
||||
if db_instance.status not in (states.ACTIVE, states.STOPPED):
|
||||
if db_server.status not in (states.ACTIVE, states.STOPPED):
|
||||
# on the receiving end of mogan-engine, it could happen
|
||||
# that the DB instance already report the new resident
|
||||
# that the DB server already report the new resident
|
||||
# but the actual BM has not showed up on the hypervisor
|
||||
# yet. In this case, let's allow the loop to continue
|
||||
# and run the state sync in a later round
|
||||
LOG.info("During sync_power_state the instance has a "
|
||||
LOG.info("During sync_power_state the server has a "
|
||||
"pending task (%(task)s). Skip.",
|
||||
{'task': db_instance.task_state},
|
||||
instance=db_instance)
|
||||
{'task': db_server.task_state},
|
||||
server=db_server)
|
||||
return
|
||||
|
||||
if node_power_state != db_power_state:
|
||||
LOG.info('During _sync_instance_power_state the DB '
|
||||
LOG.info('During _sync_server_power_state the DB '
|
||||
'power_state (%(db_power_state)s) does not match '
|
||||
'the node_power_state from the hypervisor '
|
||||
'(%(node_power_state)s). Updating power_state in the '
|
||||
'DB to match the hypervisor.',
|
||||
{'db_power_state': db_power_state,
|
||||
'node_power_state': node_power_state},
|
||||
instance=db_instance)
|
||||
server=db_server)
|
||||
# power_state is always updated from hypervisor to db
|
||||
db_instance.power_state = node_power_state
|
||||
db_instance.save()
|
||||
db_server.power_state = node_power_state
|
||||
db_server.save()
|
||||
|
||||
@periodic_task.periodic_task(spacing=CONF.engine.sync_maintenance_interval,
|
||||
run_immediately=True)
|
||||
@ -291,26 +291,26 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
# Just retrun if we fail to get nodes maintenance state.
|
||||
return
|
||||
|
||||
node_dict = {node.instance_uuid: node for node in nodes}
|
||||
node_dict = {node.server_uuid: node for node in nodes}
|
||||
|
||||
if not node_dict:
|
||||
LOG.warning("While synchronizing instance maintenance states, "
|
||||
"found none node with instance associated on the "
|
||||
LOG.warning("While synchronizing server maintenance states, "
|
||||
"found none node with server associated on the "
|
||||
"hypervisor.")
|
||||
return
|
||||
|
||||
db_instances = objects.Instance.list(context)
|
||||
for instance in db_instances:
|
||||
uuid = instance.uuid
|
||||
db_servers = objects.Server.list(context)
|
||||
for server in db_servers:
|
||||
uuid = server.uuid
|
||||
|
||||
# If instance in unstable states and the node goes to maintenance,
|
||||
# If server in unstable states and the node goes to maintenance,
|
||||
# just skip the syncing process as the pending task should be goes
|
||||
# to error state instead.
|
||||
if instance.status in states.UNSTABLE_STATES:
|
||||
LOG.info("During sync_maintenance_state the instance "
|
||||
if server.status in states.UNSTABLE_STATES:
|
||||
LOG.info("During sync_maintenance_state the server "
|
||||
"has a pending task (%(task)s). Skip.",
|
||||
{'task': instance.status},
|
||||
instance=instance)
|
||||
{'task': server.status},
|
||||
server=server)
|
||||
continue
|
||||
|
||||
if uuid not in node_dict:
|
||||
@ -318,40 +318,40 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
|
||||
node_maintenance = node_dict[uuid].maintenance
|
||||
|
||||
if instance.status == states.MAINTENANCE and not node_maintenance:
|
||||
if server.status == states.MAINTENANCE and not node_maintenance:
|
||||
# TODO(zhenguo): need to check whether we need states machine
|
||||
# transition here, and currently we just move to ACTIVE state
|
||||
# regardless of it's real power state which may need sync power
|
||||
# state periodic task to correct it.
|
||||
instance.status = states.ACTIVE
|
||||
instance.save()
|
||||
elif node_maintenance and instance.status != states.MAINTENANCE:
|
||||
instance.status = states.MAINTENANCE
|
||||
instance.save()
|
||||
server.status = states.ACTIVE
|
||||
server.save()
|
||||
elif node_maintenance and server.status != states.MAINTENANCE:
|
||||
server.status = states.MAINTENANCE
|
||||
server.save()
|
||||
|
||||
def destroy_networks(self, context, instance):
|
||||
ports = instance.nics.get_port_ids()
|
||||
def destroy_networks(self, context, server):
|
||||
ports = server.nics.get_port_ids()
|
||||
for port in ports:
|
||||
self.network_api.delete_port(context, port, instance.uuid)
|
||||
self.network_api.delete_port(context, port, server.uuid)
|
||||
|
||||
def _rollback_instances_quota(self, context, number):
|
||||
reserve_opts = {'instances': number}
|
||||
def _rollback_servers_quota(self, context, number):
|
||||
reserve_opts = {'servers': number}
|
||||
reservations = self.quota.reserve(context, **reserve_opts)
|
||||
if reservations:
|
||||
self.quota.commit(context, reservations)
|
||||
|
||||
@wrap_instance_fault
|
||||
def create_instance(self, context, instance, requested_networks,
|
||||
user_data, injected_files, key_pair, request_spec=None,
|
||||
filter_properties=None):
|
||||
@wrap_server_fault
|
||||
def create_server(self, context, server, requested_networks,
|
||||
user_data, injected_files, key_pair, request_spec=None,
|
||||
filter_properties=None):
|
||||
"""Perform a deployment."""
|
||||
LOG.debug("Starting instance...", instance=instance)
|
||||
notifications.notify_about_instance_action(
|
||||
context, instance, self.host,
|
||||
LOG.debug("Starting server...", server=server)
|
||||
notifications.notify_about_server_action(
|
||||
context, server, self.host,
|
||||
action=fields.NotificationAction.CREATE,
|
||||
phase=fields.NotificationPhase.START)
|
||||
|
||||
fsm = utils.get_state_machine(start_state=instance.status,
|
||||
fsm = utils.get_state_machine(start_state=server.status,
|
||||
target_state=states.ACTIVE)
|
||||
|
||||
if filter_properties is None:
|
||||
@ -372,24 +372,24 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
try:
|
||||
node = self.scheduler_rpcapi.select_destinations(
|
||||
context, request_spec, filter_properties)
|
||||
instance.node_uuid = node['node_uuid']
|
||||
instance.save()
|
||||
server.node_uuid = node['node_uuid']
|
||||
server.save()
|
||||
# Add a retry entry for the selected node
|
||||
nodes = retry['nodes']
|
||||
nodes.append(node['node_uuid'])
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
utils.process_event(fsm, instance, event='error')
|
||||
LOG.error("Created instance %(uuid)s failed. "
|
||||
utils.process_event(fsm, server, event='error')
|
||||
LOG.error("Created server %(uuid)s failed. "
|
||||
"Exception: %(exception)s",
|
||||
{"uuid": instance.uuid,
|
||||
{"uuid": server.uuid,
|
||||
"exception": e})
|
||||
|
||||
try:
|
||||
flow_engine = create_instance.get_flow(
|
||||
flow_engine = create_server.get_flow(
|
||||
context,
|
||||
self,
|
||||
instance,
|
||||
server,
|
||||
requested_networks,
|
||||
user_data,
|
||||
injected_files,
|
||||
@ -400,13 +400,13 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
)
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
utils.process_event(fsm, instance, event='error')
|
||||
self._rollback_instances_quota(context, -1)
|
||||
msg = _("Create manager instance flow failed.")
|
||||
utils.process_event(fsm, server, event='error')
|
||||
self._rollback_servers_quota(context, -1)
|
||||
msg = _("Create manager server flow failed.")
|
||||
LOG.exception(msg)
|
||||
|
||||
def _run_flow():
|
||||
# This code executes create instance flow. If something goes wrong,
|
||||
# This code executes create server flow. If something goes wrong,
|
||||
# flow reverts all job that was done and reraises an exception.
|
||||
# Otherwise, all data that was generated by flow becomes available
|
||||
# in flow engine's storage.
|
||||
@ -417,130 +417,130 @@ class EngineManager(base_manager.BaseEngineManager):
|
||||
_run_flow()
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
instance.power_state = states.NOSTATE
|
||||
utils.process_event(fsm, instance, event='error')
|
||||
self._rollback_instances_quota(context, -1)
|
||||
LOG.error("Created instance %(uuid)s failed."
|
||||
server.power_state = states.NOSTATE
|
||||
utils.process_event(fsm, server, event='error')
|
||||
self._rollback_servers_quota(context, -1)
|
||||
LOG.error("Created server %(uuid)s failed."
|
||||
"Exception: %(exception)s",
|
||||
{"uuid": instance.uuid,
|
||||
{"uuid": server.uuid,
|
||||
"exception": e})
|
||||
else:
|
||||
# Advance the state model for the given event. Note that this
|
||||
# doesn't alter the instance in any way. This may raise
|
||||
# doesn't alter the server in any way. This may raise
|
||||
# InvalidState, if this event is not allowed in the current state.
|
||||
instance.power_state = self.driver.get_power_state(context,
|
||||
instance.uuid)
|
||||
instance.launched_at = timeutils.utcnow()
|
||||
utils.process_event(fsm, instance, event='done')
|
||||
LOG.info("Created instance %s successfully.", instance.uuid)
|
||||
server.power_state = self.driver.get_power_state(context,
|
||||
server.uuid)
|
||||
server.launched_at = timeutils.utcnow()
|
||||
utils.process_event(fsm, server, event='done')
|
||||
LOG.info("Created server %s successfully.", server.uuid)
|
||||
|
||||
def _delete_instance(self, context, instance):
|
||||
"""Delete an instance
|
||||
def _delete_server(self, context, server):
|
||||
"""Delete a server
|
||||
|
||||
:param context: mogan request context
|
||||
:param instance: instance object
|
||||
:param server: server object
|
||||
"""
|
||||
# TODO(zhenguo): Add delete notification
|
||||
|
||||
try:
|
||||
self.destroy_networks(context, instance)
|
||||
self.destroy_networks(context, server)
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.error("Destroy networks for instance %(uuid)s failed. "
|
||||
LOG.error("Destroy networks for server %(uuid)s failed. "
|
||||
"Exception: %(exception)s",
|
||||
{"uuid": instance.uuid, "exception": e})
|
||||
{"uuid": server.uuid, "exception": e})
|
||||
|
||||
self.driver.unplug_vifs(context, instance)
|
||||
self.driver.destroy(context, instance)
|
||||
self.driver.unplug_vifs(context, server)
|
||||
self.driver.destroy(context, server)
|
||||
|
||||
@wrap_instance_fault
|
||||
def delete_instance(self, context, instance):
|
||||
"""Delete an instance."""
|
||||
LOG.debug("Deleting instance...")
|
||||
@wrap_server_fault
|
||||
def delete_server(self, context, server):
|
||||
"""Delete a server."""
|
||||
LOG.debug("Deleting server...")
|
||||
|
||||
fsm = utils.get_state_machine(start_state=instance.status,
|
||||
fsm = utils.get_state_machine(start_state=server.status,
|
||||
target_state=states.DELETED)
|
||||
|
||||
@utils.synchronized(instance.uuid)
|
||||
def do_delete_instance(instance):
|
||||
@utils.synchronized(server.uuid)
|
||||
def do_delete_server(server):
|
||||
try:
|
||||
self._delete_instance(context, instance)
|
||||
except exception.InstanceNotFound:
|
||||
LOG.info("Instance disappeared during terminate",
|
||||
instance=instance)
|
||||
self._delete_server(context, server)
|
||||
except exception.ServerNotFound:
|
||||
LOG.info("Server disappeared during terminate",
|
||||
server=server)
|
||||
except Exception:
|
||||
# As we're trying to delete always go to Error if something
|
||||
# goes wrong that _delete_instance can't handle.
|
||||
# goes wrong that _delete_server can't handle.
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.exception('Setting instance status to ERROR',
|
||||
instance=instance)
|
||||
instance.power_state = states.NOSTATE
|
||||
utils.process_event(fsm, instance, event='error')
|
||||
self._rollback_instances_quota(context, 1)
|
||||
LOG.exception('Setting server status to ERROR',
|
||||
server=server)
|
||||
server.power_state = states.NOSTATE
|
||||
utils.process_event(fsm, server, event='error')
|
||||
self._rollback_servers_quota(context, 1)
|
||||
|
||||
# Issue delete request to driver only if instance is associated with
|
||||
# Issue delete request to driver only if server is associated with
|
||||
# a underlying node.
|
||||
if instance.node_uuid:
|
||||
do_delete_instance(instance)
|
||||
if server.node_uuid:
|
||||
do_delete_server(server)
|
||||
|
||||
instance.power_state = states.NOSTATE
|
||||
utils.process_event(fsm, instance, event='done')
|
||||
instance.destroy()
|
||||
server.power_state = states.NOSTATE
|
||||
utils.process_event(fsm, server, event='done')
|
||||
server.destroy()
|
||||
|
||||
def set_power_state(self, context, instance, state):
|
||||
"""Set power state for the specified instance."""
|
||||
def set_power_state(self, context, server, state):
|
||||
"""Set power state for the specified server."""
|
||||
|
||||
fsm = utils.get_state_machine(start_state=instance.status)
|
||||
fsm = utils.get_state_machine(start_state=server.status)
|
||||
|
||||
@utils.synchronized(instance.uuid)
|
||||
@utils.synchronized(server.uuid)
|
||||
def do_set_power_state():
|
||||
LOG.debug('Power %(state)s called for instance %(instance)s',
|
||||
LOG.debug('Power %(state)s called for server %(server)s',
|
||||
{'state': state,
|
||||
'instance': instance})
|
||||
self.driver.set_power_state(context, instance, state)
|
||||
'server': server})
|
||||
self.driver.set_power_state(context, server, state)
|
||||
|
||||
do_set_power_state()
|
||||
instance.power_state = self.driver.get_power_state(context,
|
||||
instance.uuid)
|
||||
utils.process_event(fsm, instance, event='done')
|
||||
server.power_state = self.driver.get_power_state(context,
|
||||
server.uuid)
|
||||
utils.process_event(fsm, server, event='done')
|
||||
LOG.info('Successfully set node power state: %s',
|
||||
state, instance=instance)
|
||||
state, server=server)
|
||||
|
||||
def _rebuild_instance(self, context, instance):
|
||||
"""Perform rebuild action on the specified instance."""
|
||||
def _rebuild_server(self, context, server):
|
||||
"""Perform rebuild action on the specified server."""
|
||||
|
||||
# TODO(zhenguo): Add delete notification
|
||||
|
||||
self.driver.rebuild(context, instance)
|
||||
self.driver.rebuild(context, server)
|
||||
|
||||
@wrap_instance_fault
|
||||
def rebuild_instance(self, context, instance):
|
||||
"""Destroy and re-make this instance.
|
||||
@wrap_server_fault
|
||||
def rebuild_server(self, context, server):
|
||||
"""Destroy and re-make this server.
|
||||
|
||||
:param context: mogan request context
|
||||
:param instance: instance object
|
||||
:param server: server object
|
||||
"""
|
||||
|
||||
LOG.debug('Rebuilding instance', instance=instance)
|
||||
LOG.debug('Rebuilding server', server=server)
|
||||
|
||||
fsm = utils.get_state_machine(start_state=instance.status)
|
||||
fsm = utils.get_state_machine(start_state=server.status)
|
||||
|
||||
try:
|
||||
self._rebuild_instance(context, instance)
|
||||
self._rebuild_server(context, server)
|
||||
except Exception as e:
|
||||
with excutils.save_and_reraise_exception():
|
||||
utils.process_event(fsm, instance, event='error')
|
||||
LOG.error("Rebuild instance %(uuid)s failed."
|
||||
utils.process_event(fsm, server, event='error')
|
||||
LOG.error("Rebuild server %(uuid)s failed."
|
||||
"Exception: %(exception)s",
|
||||
{"uuid": instance.uuid,
|
||||
{"uuid": server.uuid,
|
||||
"exception": e})
|
||||
|
||||
utils.process_event(fsm, instance, event='done')
|
||||
LOG.info('Instance was successfully rebuilt', instance=instance)
|
||||
utils.process_event(fsm, server, event='done')
|
||||
LOG.info('Server was successfully rebuilt', server=server)
|
||||
|
||||
def get_serial_console(self, context, instance):
|
||||
node_console_info = self.driver.get_serial_console_by_instance(
|
||||
context, instance)
|
||||
def get_serial_console(self, context, server):
|
||||
node_console_info = self.driver.get_serial_console_by_server(
|
||||
context, server)
|
||||
token = uuidutils.generate_uuid()
|
||||
access_url = '%s?token=%s' % (
|
||||
CONF.shellinabox_console.shellinabox_base_url, token)
|
||||
|
@ -14,7 +14,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Instance Metadata information."""
|
||||
"""Server Metadata information."""
|
||||
|
||||
import posixpath
|
||||
|
||||
@ -47,39 +47,39 @@ class InvalidMetadataPath(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class InstanceMetadata(object):
|
||||
"""Instance metadata."""
|
||||
class ServerMetadata(object):
|
||||
"""Server metadata."""
|
||||
|
||||
def __init__(self, instance, content=None, user_data=None,
|
||||
def __init__(self, server, content=None, user_data=None,
|
||||
key_pair=None, extra_md=None):
|
||||
"""Creation of this object should basically cover all time consuming
|
||||
collection. Methods after that should not cause time delays due to
|
||||
network operations or lengthy cpu operations.
|
||||
|
||||
The user should then get a single instance and make multiple method
|
||||
The user should then get a single server and make multiple method
|
||||
calls on it.
|
||||
"""
|
||||
if not content:
|
||||
content = []
|
||||
|
||||
self.instance = instance
|
||||
self.server = server
|
||||
self.extra_md = extra_md
|
||||
self.availability_zone = instance.availability_zone
|
||||
self.availability_zone = server.availability_zone
|
||||
|
||||
if user_data is not None:
|
||||
self.userdata_raw = base64.decode_as_bytes(user_data)
|
||||
else:
|
||||
self.userdata_raw = None
|
||||
|
||||
# TODO(zhenguo): Add hostname to instance object
|
||||
self.hostname = instance.name
|
||||
self.uuid = instance.uuid
|
||||
# TODO(zhenguo): Add hostname to server object
|
||||
self.hostname = server.name
|
||||
self.uuid = server.uuid
|
||||
self.content = {}
|
||||
self.files = []
|
||||
self.keypair = key_pair
|
||||
|
||||
# 'content' is passed in from the configdrive code in
|
||||
# mogan/engine/flows/create_instance.py. That's how we get the
|
||||
# mogan/engine/flows/create_server.py. That's how we get the
|
||||
# injected files (personalities) in.
|
||||
for (path, contents) in content:
|
||||
key = "%04i" % len(self.content)
|
||||
@ -125,7 +125,7 @@ class InstanceMetadata(object):
|
||||
]
|
||||
|
||||
metadata['hostname'] = self.hostname
|
||||
metadata['name'] = self.instance.name
|
||||
metadata['name'] = self.server.name
|
||||
metadata['availability_zone'] = self.availability_zone
|
||||
|
||||
return jsonutils.dump_as_bytes(metadata)
|
||||
@ -170,7 +170,7 @@ class InstanceMetadata(object):
|
||||
if OPENSTACK_VERSIONS != versions:
|
||||
LOG.debug("future versions %s hidden in version list",
|
||||
[v for v in OPENSTACK_VERSIONS
|
||||
if v not in versions], instance=self.instance)
|
||||
if v not in versions], server=self.server)
|
||||
versions += ["latest"]
|
||||
return versions
|
||||
|
||||
|
@ -49,12 +49,12 @@ class EngineAPI(object):
|
||||
version_cap=self.RPC_API_VERSION,
|
||||
serializer=serializer)
|
||||
|
||||
def create_instance(self, context, instance, requested_networks,
|
||||
user_data, injected_files, key_pair, request_spec,
|
||||
filter_properties):
|
||||
def create_server(self, context, server, requested_networks,
|
||||
user_data, injected_files, key_pair, request_spec,
|
||||
filter_properties):
|
||||
"""Signal to engine service to perform a deployment."""
|
||||
cctxt = self.client.prepare(topic=self.topic, server=CONF.host)
|
||||
cctxt.cast(context, 'create_instance', instance=instance,
|
||||
cctxt.cast(context, 'create_server', server=server,
|
||||
requested_networks=requested_networks,
|
||||
user_data=user_data,
|
||||
injected_files=injected_files,
|
||||
@ -62,23 +62,23 @@ class EngineAPI(object):
|
||||
request_spec=request_spec,
|
||||
filter_properties=filter_properties)
|
||||
|
||||
def delete_instance(self, context, instance):
|
||||
"""Signal to engine service to delete an instance."""
|
||||
def delete_server(self, context, server):
|
||||
"""Signal to engine service to delete a server."""
|
||||
cctxt = self.client.prepare(topic=self.topic, server=CONF.host)
|
||||
cctxt.cast(context, 'delete_instance', instance=instance)
|
||||
cctxt.cast(context, 'delete_server', server=server)
|
||||
|
||||
def set_power_state(self, context, instance, state):
|
||||
"""Signal to engine service to perform power action on instance."""
|
||||
def set_power_state(self, context, server, state):
|
||||
"""Signal to engine service to perform power action on server."""
|
||||
cctxt = self.client.prepare(topic=self.topic, server=CONF.host)
|
||||
return cctxt.cast(context, 'set_power_state',
|
||||
instance=instance, state=state)
|
||||
server=server, state=state)
|
||||
|
||||
def rebuild_instance(self, context, instance):
|
||||
"""Signal to engine service to rebuild an instance."""
|
||||
def rebuild_server(self, context, server):
|
||||
"""Signal to engine service to rebuild a server."""
|
||||
cctxt = self.client.prepare(topic=self.topic, server=CONF.host)
|
||||
return cctxt.cast(context, 'rebuild_instance', instance=instance)
|
||||
return cctxt.cast(context, 'rebuild_server', server=server)
|
||||
|
||||
def get_serial_console(self, context, instance):
|
||||
def get_serial_console(self, context, server):
|
||||
cctxt = self.client.prepare(topic=self.topic, server=CONF.host)
|
||||
return cctxt.call(context, 'get_serial_console',
|
||||
instance=instance)
|
||||
server=server)
|
||||
|
@ -62,7 +62,7 @@ def get_client(token=None):
|
||||
class API(object):
|
||||
"""API for interacting with the neutron 2.x API."""
|
||||
|
||||
def create_port(self, context, network_uuid, mac, instance_uuid):
|
||||
def create_port(self, context, network_uuid, mac, server_uuid):
|
||||
"""Create neutron port."""
|
||||
|
||||
client = get_client(context.auth_token)
|
||||
@ -70,7 +70,7 @@ class API(object):
|
||||
'port': {
|
||||
'network_id': network_uuid,
|
||||
'mac_address': mac,
|
||||
'device_id': instance_uuid,
|
||||
'device_id': server_uuid,
|
||||
}
|
||||
}
|
||||
|
||||
@ -78,8 +78,8 @@ class API(object):
|
||||
port = client.create_port(body)
|
||||
except neutron_exceptions.NeutronClientException as e:
|
||||
msg = (_("Could not create neutron port on network %(net)s for "
|
||||
"instance %(instance)s. %(exc)s"),
|
||||
{'net': network_uuid, 'instance': instance_uuid, 'exc': e})
|
||||
"server %(server)s. %(exc)s"),
|
||||
{'net': network_uuid, 'server': server_uuid, 'exc': e})
|
||||
LOG.exception(msg)
|
||||
raise exception.NetworkError(msg)
|
||||
return port
|
||||
@ -99,7 +99,7 @@ class API(object):
|
||||
{'port_id': port_id, 'reason': e})
|
||||
raise exception.NetworkError(msg)
|
||||
|
||||
def delete_port(self, context, port_id, instance_uuid):
|
||||
def delete_port(self, context, port_id, server_uuid):
|
||||
"""Delete neutron port."""
|
||||
|
||||
client = get_client(context.auth_token)
|
||||
@ -110,7 +110,7 @@ class API(object):
|
||||
LOG.warning("Port %s does not exist", port_id)
|
||||
else:
|
||||
LOG.warning(
|
||||
"Failed to delete port %s for instance.",
|
||||
"Failed to delete port %s for server.",
|
||||
port_id, exc_info=True)
|
||||
raise e
|
||||
|
||||
@ -149,8 +149,8 @@ class API(object):
|
||||
fip = self._get_floating_ip_by_address(client, address)
|
||||
return fip
|
||||
|
||||
def get_instance_id_by_floating_address(self, context, address):
|
||||
"""Return the instance id a floating IP's fixed IP is allocated to."""
|
||||
def get_server_id_by_floating_address(self, context, address):
|
||||
"""Return the server id a floating IP's fixed IP is allocated to."""
|
||||
client = get_client(context.auth_token)
|
||||
fip = self._get_floating_ip_by_address(client, address)
|
||||
if not fip['port_id']:
|
||||
@ -161,9 +161,9 @@ class API(object):
|
||||
except exception.PortNotFound:
|
||||
# NOTE: Here is a potential race condition between _show_port() and
|
||||
# _get_floating_ip_by_address(). fip['port_id'] shows a port which
|
||||
# is the server instance's. At _get_floating_ip_by_address(),
|
||||
# Neutron returns the list which includes the instance. Just after
|
||||
# that, the deletion of the instance happens and Neutron returns
|
||||
# is the server server's. At _get_floating_ip_by_address(),
|
||||
# Neutron returns the list which includes the server. Just after
|
||||
# that, the deletion of the server happens and Neutron returns
|
||||
# 404 on _show_port().
|
||||
LOG.debug('The port(%s) is not found', fip['port_id'])
|
||||
return None
|
||||
@ -181,7 +181,7 @@ class API(object):
|
||||
client.update_floatingip(fip['id'], {'floatingip': param})
|
||||
|
||||
def disassociate_floating_ip(self, context, address):
|
||||
"""Disassociate a floating IP from the instance."""
|
||||
"""Disassociate a floating IP from the server."""
|
||||
|
||||
client = get_client(context.auth_token)
|
||||
fip = self._get_floating_ip_by_address(client, address)
|
||||
@ -202,12 +202,12 @@ class API(object):
|
||||
|
||||
return nets
|
||||
|
||||
def _ports_needed_per_instance(self, context, client, requested_networks):
|
||||
def _ports_needed_per_server(self, context, client, requested_networks):
|
||||
|
||||
ports_needed_per_instance = 0
|
||||
ports_needed_per_server = 0
|
||||
net_ids_requested = []
|
||||
for request in requested_networks:
|
||||
ports_needed_per_instance += 1
|
||||
ports_needed_per_server += 1
|
||||
net_ids_requested.append(request['net_id'])
|
||||
|
||||
# Now check to see if all requested networks exist
|
||||
@ -231,27 +231,27 @@ class API(object):
|
||||
id_str = id_str and id_str + ', ' + _id or _id
|
||||
raise exception.NetworkNotFound(network_id=id_str)
|
||||
|
||||
return ports_needed_per_instance
|
||||
return ports_needed_per_server
|
||||
|
||||
def validate_networks(self, context, requested_networks, num_instances):
|
||||
def validate_networks(self, context, requested_networks, num_servers):
|
||||
"""Validate that the tenant can use the requested networks.
|
||||
|
||||
Return the number of instances than can be successfully allocated
|
||||
Return the number of servers than can be successfully allocated
|
||||
with the requested network configuration.
|
||||
"""
|
||||
LOG.debug('validate_networks() for %s', requested_networks)
|
||||
|
||||
client = get_client(context.auth_token)
|
||||
ports_needed_per_instance = self._ports_needed_per_instance(
|
||||
ports_needed_per_server = self._ports_needed_per_server(
|
||||
context, client, requested_networks)
|
||||
|
||||
# Check the quota and return how many of the requested number of
|
||||
# instances can be created
|
||||
if ports_needed_per_instance:
|
||||
# servers can be created
|
||||
if ports_needed_per_server:
|
||||
quotas = client.show_quota(context.project_id)['quota']
|
||||
if quotas.get('port', -1) == -1:
|
||||
# Unlimited Port Quota
|
||||
return num_instances
|
||||
return num_servers
|
||||
|
||||
# We only need the port count so only ask for ids back.
|
||||
params = dict(tenant_id=context.project_id, fields=['id'])
|
||||
@ -263,13 +263,13 @@ class API(object):
|
||||
{'ports': len(ports),
|
||||
'quota': quotas.get('port')})
|
||||
raise exception.PortLimitExceeded(msg)
|
||||
ports_needed = ports_needed_per_instance * num_instances
|
||||
ports_needed = ports_needed_per_server * num_servers
|
||||
if free_ports >= ports_needed:
|
||||
return num_instances
|
||||
return num_servers
|
||||
else:
|
||||
return free_ports // ports_needed_per_instance
|
||||
return free_ports // ports_needed_per_server
|
||||
|
||||
return num_instances
|
||||
return num_servers
|
||||
|
||||
|
||||
def _ensure_requested_network_ordering(accessor, unordered, preferred):
|
||||
|
@ -18,7 +18,7 @@ the system.
|
||||
|
||||
from mogan.notifications.objects import base as notification_base
|
||||
from mogan.notifications.objects import exception as notification_exception
|
||||
from mogan.notifications.objects import instance as instance_notification
|
||||
from mogan.notifications.objects import server as server_notification
|
||||
from mogan.objects import fields
|
||||
|
||||
|
||||
@ -34,10 +34,10 @@ def _get_fault_and_priority_from_exc(exception):
|
||||
return fault, priority
|
||||
|
||||
|
||||
def notify_about_instance_action(context, instance, host, action, phase=None,
|
||||
binary='mogan-engine', exception=None):
|
||||
"""Send versioned notification about the action made on the instance
|
||||
:param instance: the instance which the action performed on
|
||||
def notify_about_server_action(context, server, host, action, phase=None,
|
||||
binary='mogan-engine', exception=None):
|
||||
"""Send versioned notification about the action made on the server
|
||||
:param server: the server which the action performed on
|
||||
:param host: the host emitting the notification
|
||||
:param action: the name of the action
|
||||
:param phase: the phase of the action
|
||||
@ -47,16 +47,16 @@ def notify_about_instance_action(context, instance, host, action, phase=None,
|
||||
|
||||
fault, priority = _get_fault_and_priority_from_exc(exception)
|
||||
|
||||
payload = instance_notification.InstanceActionPayload(
|
||||
instance=instance,
|
||||
payload = server_notification.ServerActionPayload(
|
||||
server=server,
|
||||
fault=fault)
|
||||
notification = instance_notification.InstanceActionNotification(
|
||||
notification = server_notification.ServerActionNotification(
|
||||
context=context,
|
||||
priority=priority,
|
||||
publisher=notification_base.NotificationPublisher(
|
||||
context=context, host=host, binary=binary),
|
||||
event_type=notification_base.EventType(
|
||||
object='instance',
|
||||
object='server',
|
||||
action=action,
|
||||
phase=phase),
|
||||
payload=payload)
|
||||
|
@ -16,21 +16,21 @@ from mogan.objects import fields
|
||||
|
||||
|
||||
@mogan_base.MoganObjectRegistry.register_notification
|
||||
class InstancePayload(base.NotificationPayloadBase):
|
||||
class ServerPayload(base.NotificationPayloadBase):
|
||||
SCHEMA = {
|
||||
'name': ('instance', 'name'),
|
||||
'uuid': ('instance', 'uuid'),
|
||||
'user_id': ('instance', 'user_id'),
|
||||
'project_id': ('instance', 'project_id'),
|
||||
'availability_zone': ('instance', 'availability_zone'),
|
||||
'image_uuid': ('instance', 'image_uuid'),
|
||||
'created_at': ('instance', 'created_at'),
|
||||
'launched_at': ('instance', 'launched_at'),
|
||||
'updated_at': ('instance', 'updated_at'),
|
||||
'status': ('instance', 'status'),
|
||||
'power_state': ('instance', 'power_state'),
|
||||
'instance_type_uuid': ('instance', 'instance_type_uuid'),
|
||||
'description': ('instance', 'description')
|
||||
'name': ('server', 'name'),
|
||||
'uuid': ('server', 'uuid'),
|
||||
'user_id': ('server', 'user_id'),
|
||||
'project_id': ('server', 'project_id'),
|
||||
'availability_zone': ('server', 'availability_zone'),
|
||||
'image_uuid': ('server', 'image_uuid'),
|
||||
'created_at': ('server', 'created_at'),
|
||||
'launched_at': ('server', 'launched_at'),
|
||||
'updated_at': ('server', 'updated_at'),
|
||||
'status': ('server', 'status'),
|
||||
'power_state': ('server', 'power_state'),
|
||||
'flavor_uuid': ('server', 'flavor_uuid'),
|
||||
'description': ('server', 'description')
|
||||
}
|
||||
# Version 1.0: Initial version
|
||||
VERSION = '1.0'
|
||||
@ -40,7 +40,7 @@ class InstancePayload(base.NotificationPayloadBase):
|
||||
'user_id': fields.StringField(nullable=True),
|
||||
'project_id': fields.StringField(nullable=True),
|
||||
'description': fields.StringField(nullable=True),
|
||||
'instance_type_uuid': fields.UUIDField(nullable=False),
|
||||
'flavor_uuid': fields.UUIDField(nullable=False),
|
||||
'image_uuid': fields.UUIDField(nullable=True),
|
||||
'availability_zone': fields.StringField(nullable=True),
|
||||
'power_state': fields.StringField(nullable=True),
|
||||
@ -52,13 +52,13 @@ class InstancePayload(base.NotificationPayloadBase):
|
||||
# 'extra'
|
||||
}
|
||||
|
||||
def __init__(self, instance, **kwargs):
|
||||
super(InstancePayload, self).__init__(**kwargs)
|
||||
self.populate_schema(instance=instance)
|
||||
def __init__(self, server, **kwargs):
|
||||
super(ServerPayload, self).__init__(**kwargs)
|
||||
self.populate_schema(server=server)
|
||||
|
||||
|
||||
@mogan_base.MoganObjectRegistry.register_notification
|
||||
class InstanceActionPayload(InstancePayload):
|
||||
class ServerActionPayload(ServerPayload):
|
||||
# No SCHEMA as all the additional fields are calculated
|
||||
|
||||
VERSION = '1.0'
|
||||
@ -66,18 +66,18 @@ class InstanceActionPayload(InstancePayload):
|
||||
'fault': fields.ObjectField('ExceptionPayload', nullable=True),
|
||||
}
|
||||
|
||||
def __init__(self, instance, fault, **kwargs):
|
||||
super(InstanceActionPayload, self).__init__(
|
||||
instance=instance,
|
||||
def __init__(self, server, fault, **kwargs):
|
||||
super(ServerActionPayload, self).__init__(
|
||||
server=server,
|
||||
fault=fault,
|
||||
**kwargs)
|
||||
|
||||
|
||||
@mogan_base.MoganObjectRegistry.register_notification
|
||||
class InstanceActionNotification(base.NotificationBase):
|
||||
class ServerActionNotification(base.NotificationBase):
|
||||
# Version 1.0: Initial version
|
||||
VERSION = '1.0'
|
||||
|
||||
fields = {
|
||||
'payload': fields.ObjectField('InstanceActionPayload')
|
||||
'payload': fields.ObjectField('ServerActionPayload')
|
||||
}
|
@ -25,10 +25,10 @@ def register_all():
|
||||
# NOTE(danms): You must make sure your object gets imported in this
|
||||
# function in order for it to be registered by services that may
|
||||
# need to receive it via RPC.
|
||||
__import__('mogan.objects.instance_type')
|
||||
__import__('mogan.objects.instance')
|
||||
__import__('mogan.objects.instance_nics')
|
||||
__import__('mogan.objects.instance_fault')
|
||||
__import__('mogan.objects.flavor')
|
||||
__import__('mogan.objects.server')
|
||||
__import__('mogan.objects.server_nics')
|
||||
__import__('mogan.objects.server_fault')
|
||||
__import__('mogan.objects.compute_node')
|
||||
__import__('mogan.objects.compute_port')
|
||||
__import__('mogan.objects.compute_disk')
|
||||
|
@ -26,7 +26,7 @@ class ComputeDisk(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
# Version 1.0: Initial version
|
||||
VERSION = '1.0'
|
||||
|
||||
dbapi = dbapi.get_instance()
|
||||
dbapi = dbapi.get_server()
|
||||
|
||||
fields = {
|
||||
'id': object_fields.IntegerField(read_only=True),
|
||||
@ -81,7 +81,7 @@ class ComputeDiskList(object_base.ObjectListBase, base.MoganObject,
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
dbapi = dbapi.get_instance()
|
||||
dbapi = dbapi.get_server()
|
||||
|
||||
fields = {
|
||||
'objects': object_fields.ListOfObjectsField('ComputeDisk')
|
||||
|
@ -27,7 +27,7 @@ class ComputeNode(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
# Version 1.0: Initial version
|
||||
VERSION = '1.0'
|
||||
|
||||
dbapi = dbapi.get_instance()
|
||||
dbapi = dbapi.get_server()
|
||||
|
||||
fields = {
|
||||
'id': object_fields.IntegerField(read_only=True),
|
||||
@ -100,7 +100,7 @@ class ComputeNodeList(object_base.ObjectListBase, base.MoganObject,
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
dbapi = dbapi.get_instance()
|
||||
dbapi = dbapi.get_server()
|
||||
|
||||
fields = {
|
||||
'objects': object_fields.ListOfObjectsField('ComputeNode')
|
||||
|
@ -26,7 +26,7 @@ class ComputePort(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
# Version 1.0: Initial version
|
||||
VERSION = '1.0'
|
||||
|
||||
dbapi = dbapi.get_instance()
|
||||
dbapi = dbapi.get_server()
|
||||
|
||||
fields = {
|
||||
'id': object_fields.IntegerField(read_only=True),
|
||||
@ -88,7 +88,7 @@ class ComputePortList(object_base.ObjectListBase, base.MoganObject,
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
dbapi = dbapi.get_instance()
|
||||
dbapi = dbapi.get_server()
|
||||
|
||||
fields = {
|
||||
'objects': object_fields.ListOfObjectsField('ComputePort')
|
||||
|
@ -25,11 +25,11 @@ OPTIONAL_FIELDS = ['extra_specs', 'projects']
|
||||
|
||||
|
||||
@base.MoganObjectRegistry.register
|
||||
class InstanceType(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
class Flavor(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
# Version 1.0: Initial version
|
||||
VERSION = '1.0'
|
||||
|
||||
dbapi = dbapi.get_instance()
|
||||
dbapi = dbapi.get_server()
|
||||
|
||||
fields = {
|
||||
'uuid': object_fields.UUIDField(nullable=True),
|
||||
@ -41,7 +41,7 @@ class InstanceType(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
}
|
||||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(InstanceType, self).__init__(*args, **kwargs)
|
||||
super(Flavor, self).__init__(*args, **kwargs)
|
||||
self._orig_extra_specs = {}
|
||||
self._orig_projects = {}
|
||||
|
||||
@ -73,8 +73,8 @@ class InstanceType(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
self.obj_reset_changes(['projects'])
|
||||
|
||||
def obj_reset_changes(self, fields=None, recursive=False):
|
||||
super(InstanceType, self).obj_reset_changes(fields=fields,
|
||||
recursive=recursive)
|
||||
super(Flavor, self).obj_reset_changes(fields=fields,
|
||||
recursive=recursive)
|
||||
if fields is None or 'extra_specs' in fields:
|
||||
self._orig_extra_specs = (dict(self.extra_specs)
|
||||
if self.obj_attr_is_set('extra_specs')
|
||||
@ -85,7 +85,7 @@ class InstanceType(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
else [])
|
||||
|
||||
def obj_what_changed(self):
|
||||
changes = super(InstanceType, self).obj_what_changed()
|
||||
changes = super(Flavor, self).obj_what_changed()
|
||||
if ('extra_specs' in self and
|
||||
self.extra_specs != self._orig_extra_specs):
|
||||
changes.add('extra_specs')
|
||||
@ -96,37 +96,35 @@ class InstanceType(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
@staticmethod
|
||||
def _from_db_object_list(db_objects, cls, context):
|
||||
"""Converts a list of database entities to a list of formal objects."""
|
||||
return [InstanceType._from_db_object(context, cls(context), obj,
|
||||
expected_attrs=['extra_specs'])
|
||||
return [Flavor._from_db_object(context, cls(context), obj,
|
||||
expected_attrs=['extra_specs'])
|
||||
for obj in db_objects]
|
||||
|
||||
@classmethod
|
||||
def list(cls, context):
|
||||
"""Return a list of Instance Type objects."""
|
||||
db_instance_types = cls.dbapi.instance_type_get_all(context)
|
||||
return InstanceType._from_db_object_list(db_instance_types, cls,
|
||||
context)
|
||||
"""Return a list of Flavor objects."""
|
||||
db_flavors = cls.dbapi.flavor_get_all(context)
|
||||
return Flavor._from_db_object_list(db_flavors, cls, context)
|
||||
|
||||
@classmethod
|
||||
def get(cls, context, instance_type_uuid):
|
||||
"""Find a Instance Type and return a Instance Type object."""
|
||||
db_instance_type = cls.dbapi.instance_type_get(context,
|
||||
instance_type_uuid)
|
||||
instance_type = InstanceType._from_db_object(
|
||||
context, cls(context), db_instance_type,
|
||||
def get(cls, context, flavor_uuid):
|
||||
"""Find a Flavor and return a Flavor object."""
|
||||
db_flavor = cls.dbapi.flavor_get(context, flavor_uuid)
|
||||
flavor = Flavor._from_db_object(
|
||||
context, cls(context), db_flavor,
|
||||
expected_attrs=['extra_specs', 'projects'])
|
||||
return instance_type
|
||||
return flavor
|
||||
|
||||
def create(self, context=None):
|
||||
"""Create a Instance Type record in the DB."""
|
||||
"""Create a Flavor record in the DB."""
|
||||
values = self.obj_get_changes()
|
||||
db_instance_type = self.dbapi.instance_type_create(context, values)
|
||||
self._from_db_object(context, self, db_instance_type,
|
||||
db_flavor = self.dbapi.flavor_create(context, values)
|
||||
self._from_db_object(context, self, db_flavor,
|
||||
expected_attrs=['extra_specs'])
|
||||
|
||||
def destroy(self, context=None):
|
||||
"""Delete the Instance Type from the DB."""
|
||||
self.dbapi.instance_type_destroy(context, self.uuid)
|
||||
"""Delete the Flavor from the DB."""
|
||||
self.dbapi.flavor_destroy(context, self.uuid)
|
||||
self.obj_reset_changes()
|
||||
|
||||
def save(self, context=None):
|
||||
@ -152,7 +150,7 @@ class InstanceType(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
if added_projects or deleted_projects:
|
||||
self.save_projects(context, added_projects, deleted_projects)
|
||||
|
||||
self.dbapi.instance_type_update(context, self.uuid, updates)
|
||||
self.dbapi.flavor_update(context, self.uuid, updates)
|
||||
|
||||
def save_extra_specs(self, context, to_add=None, to_delete=None):
|
||||
"""Add or delete extra_specs.
|
@ -30,7 +30,7 @@ class KeyPair(base.MoganObject):
|
||||
# Version 1.0: Initial version
|
||||
VERSION = '1.0'
|
||||
|
||||
dbapi = dbapi.get_instance()
|
||||
dbapi = dbapi.get_server()
|
||||
|
||||
fields = {
|
||||
'id': fields.IntegerField(),
|
||||
@ -85,7 +85,7 @@ class KeyPairList(object_base.ObjectListBase, base.MoganObject):
|
||||
# Version 1.0: Initial version
|
||||
VERSION = '1.0'
|
||||
|
||||
dbapi = dbapi.get_instance()
|
||||
dbapi = dbapi.get_server()
|
||||
|
||||
fields = {
|
||||
'objects': fields.ListOfObjectsField('KeyPair'),
|
||||
|
@ -14,7 +14,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Quotas for instances."""
|
||||
"""Quotas for servers."""
|
||||
|
||||
import datetime
|
||||
|
||||
@ -38,7 +38,7 @@ class Quota(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
# Version 1.0: Initial version
|
||||
VERSION = '1.0'
|
||||
|
||||
dbapi = dbapi.get_instance()
|
||||
dbapi = dbapi.get_server()
|
||||
|
||||
fields = {
|
||||
'id': object_fields.IntegerField(),
|
||||
@ -164,7 +164,7 @@ class DbQuotaDriver(object):
|
||||
The default driver utilizes the local database.
|
||||
"""
|
||||
|
||||
dbapi = dbapi.get_instance()
|
||||
dbapi = dbapi.get_server()
|
||||
|
||||
def get_project_quotas(self, context, resources, project_id, usages=True):
|
||||
"""Retrieve quotas for a project.
|
||||
@ -185,11 +185,11 @@ class DbQuotaDriver(object):
|
||||
for p_quota in res:
|
||||
project_quotas[p_quota.resource_name] = p_quota.hard_limit
|
||||
if project_quotas == {}:
|
||||
self.dbapi.quota_create(context, {'resource_name': 'instances',
|
||||
self.dbapi.quota_create(context, {'resource_name': 'servers',
|
||||
'project_id': project_id,
|
||||
'hard_limit': 10,
|
||||
'allocated': 0})
|
||||
project_quotas['instances'] = 10
|
||||
project_quotas['servers'] = 10
|
||||
allocated_quotas = None
|
||||
if usages:
|
||||
project_usages = self.dbapi.quota_usage_get_all_by_project(
|
||||
@ -374,7 +374,7 @@ class BaseResource(object):
|
||||
def __init__(self, name, sync, count=None):
|
||||
"""Initializes a Resource.
|
||||
|
||||
:param name: The name of the resource, i.e., "instances".
|
||||
:param name: The name of the resource, i.e., "servers".
|
||||
:param sync: A dbapi methods name which returns a dictionary
|
||||
to resynchronize the in_use count for one or more
|
||||
resources, as described above.
|
||||
@ -407,12 +407,12 @@ class BaseResource(object):
|
||||
return -1
|
||||
|
||||
|
||||
class InstanceResource(BaseResource):
|
||||
"""ReservableResource for a specific instance."""
|
||||
class ServerResource(BaseResource):
|
||||
"""ReservableResource for a specific server."""
|
||||
|
||||
def __init__(self, name='instances'):
|
||||
"""Initializes a InstanceResource.
|
||||
def __init__(self, name='servers'):
|
||||
"""Initializes a ServerResource.
|
||||
|
||||
:param name: The kind of resource, i.e., "instances".
|
||||
:param name: The kind of resource, i.e., "servers".
|
||||
"""
|
||||
super(InstanceResource, self).__init__(name, "_sync_%s" % name)
|
||||
super(ServerResource, self).__init__(name, "_sync_%s" % name)
|
||||
|
@ -29,11 +29,11 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@base.MoganObjectRegistry.register
|
||||
class Instance(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
class Server(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
# Version 1.0: Initial version
|
||||
VERSION = '1.0'
|
||||
|
||||
dbapi = dbapi.get_instance()
|
||||
dbapi = dbapi.get_server()
|
||||
|
||||
fields = {
|
||||
'id': object_fields.IntegerField(),
|
||||
@ -44,10 +44,10 @@ class Instance(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
'user_id': object_fields.UUIDField(nullable=True),
|
||||
'status': object_fields.StringField(nullable=True),
|
||||
'power_state': object_fields.StringField(nullable=True),
|
||||
'instance_type_uuid': object_fields.UUIDField(nullable=True),
|
||||
'flavor_uuid': object_fields.UUIDField(nullable=True),
|
||||
'availability_zone': object_fields.StringField(nullable=True),
|
||||
'image_uuid': object_fields.UUIDField(nullable=True),
|
||||
'nics': object_fields.ObjectField('InstanceNics', nullable=True),
|
||||
'nics': object_fields.ObjectField('ServerNics', nullable=True),
|
||||
'node_uuid': object_fields.UUIDField(nullable=True),
|
||||
'launched_at': object_fields.DateTimeField(nullable=True),
|
||||
'extra': object_fields.FlexibleDictField(nullable=True),
|
||||
@ -56,110 +56,108 @@ class Instance(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
}
|
||||
|
||||
def __init__(self, context=None, **kwargs):
|
||||
instance_nics = kwargs.pop('nics', None)
|
||||
if instance_nics and isinstance(instance_nics, list):
|
||||
nics_obj = objects.InstanceNics(context)
|
||||
for nic in instance_nics:
|
||||
nic_obj = objects.InstanceNic(
|
||||
context, instance_uuid=kwargs['uuid'], **nic)
|
||||
server_nics = kwargs.pop('nics', None)
|
||||
if server_nics and isinstance(server_nics, list):
|
||||
nics_obj = objects.ServerNics(context)
|
||||
for nic in server_nics:
|
||||
nic_obj = objects.ServerNic(
|
||||
context, server_uuid=kwargs['uuid'], **nic)
|
||||
nics_obj.objects.append(nic_obj)
|
||||
kwargs['nics'] = nics_obj
|
||||
super(Instance, self).__init__(context=context, **kwargs)
|
||||
super(Server, self).__init__(context=context, **kwargs)
|
||||
|
||||
@staticmethod
|
||||
def _from_db_object(instance, db_inst, expected_attrs=None):
|
||||
def _from_db_object(server, db_server, expected_attrs=None):
|
||||
"""Method to help with migration to objects.
|
||||
|
||||
Converts a database entity to a formal object.
|
||||
|
||||
:param instance: An object of the Instance class.
|
||||
:param db_inst: A DB Instance model of the object
|
||||
:param server: An object of the Server class.
|
||||
:param db_server: A DB Server model of the object
|
||||
:return: The object of the class with the database entity added
|
||||
"""
|
||||
for field in set(instance.fields) - set(OPTIONAL_ATTRS):
|
||||
instance[field] = db_inst[field]
|
||||
for field in set(server.fields) - set(OPTIONAL_ATTRS):
|
||||
server[field] = db_server[field]
|
||||
|
||||
if expected_attrs is None:
|
||||
expected_attrs = []
|
||||
if 'nics' in expected_attrs:
|
||||
instance._load_instance_nics(instance._context, instance.uuid)
|
||||
server._load_server_nics(server._context, server.uuid)
|
||||
else:
|
||||
instance.nics = None
|
||||
server.nics = None
|
||||
if 'fault' in expected_attrs:
|
||||
instance._load_fault(instance._context, instance.uuid)
|
||||
server._load_fault(server._context, server.uuid)
|
||||
|
||||
instance.obj_reset_changes()
|
||||
return instance
|
||||
server.obj_reset_changes()
|
||||
return server
|
||||
|
||||
def _load_instance_nics(self, context, instance_uuid):
|
||||
self.nics = objects.InstanceNics.get_by_instance_uuid(
|
||||
context=context, instance_uuid=instance_uuid)
|
||||
def _load_server_nics(self, context, server_uuid):
|
||||
self.nics = objects.ServerNics.get_by_server_uuid(
|
||||
context=context, server_uuid=server_uuid)
|
||||
|
||||
@staticmethod
|
||||
def _from_db_object_list(db_objects, cls, context):
|
||||
"""Converts a list of database entities to a list of formal objects."""
|
||||
instances = []
|
||||
servers = []
|
||||
for obj in db_objects:
|
||||
expected_attrs = ['nics']
|
||||
if obj["status"] == "error":
|
||||
expected_attrs.append("fault")
|
||||
instances.append(Instance._from_db_object(cls(context),
|
||||
obj,
|
||||
expected_attrs))
|
||||
return instances
|
||||
servers.append(Server._from_db_object(cls(context), obj,
|
||||
expected_attrs))
|
||||
return servers
|
||||
|
||||
def _load_fault(self, context, instance_uuid):
|
||||
self.fault = objects.InstanceFault.get_latest_for_instance(
|
||||
context=context, instance_uuid=instance_uuid)
|
||||
def _load_fault(self, context, server_uuid):
|
||||
self.fault = objects.ServerFault.get_latest_for_server(
|
||||
context=context, server_uuid=server_uuid)
|
||||
|
||||
def _save_nics(self, context):
|
||||
for nic_obj in self.nics or []:
|
||||
nic_obj.save(context)
|
||||
|
||||
def as_dict(self):
|
||||
data = super(Instance, self).as_dict()
|
||||
data = super(Server, self).as_dict()
|
||||
if 'nics' in data:
|
||||
data.update(network_info=data['nics'].to_legacy_dict())
|
||||
return data
|
||||
|
||||
@classmethod
|
||||
def list(cls, context, project_only=False):
|
||||
"""Return a list of Instance objects."""
|
||||
db_instances = cls.dbapi.instance_get_all(context,
|
||||
project_only=project_only)
|
||||
return Instance._from_db_object_list(db_instances, cls, context)
|
||||
"""Return a list of Server objects."""
|
||||
db_servers = cls.dbapi.server_get_all(context,
|
||||
project_only=project_only)
|
||||
return Server._from_db_object_list(db_servers, cls, context)
|
||||
|
||||
@classmethod
|
||||
def get(cls, context, uuid):
|
||||
"""Find a instance and return a Instance object."""
|
||||
"""Find a server and return a Server object."""
|
||||
expected_attrs = ['nics']
|
||||
db_instance = cls.dbapi.instance_get(context, uuid)
|
||||
if db_instance["status"] == "error":
|
||||
db_server = cls.dbapi.server_get(context, uuid)
|
||||
if db_server["status"] == "error":
|
||||
expected_attrs.append("fault")
|
||||
instance = Instance._from_db_object(cls(context),
|
||||
db_instance,
|
||||
expected_attrs)
|
||||
return instance
|
||||
server = Server._from_db_object(cls(context), db_server,
|
||||
expected_attrs)
|
||||
return server
|
||||
|
||||
def create(self, context=None):
|
||||
"""Create a Instance record in the DB."""
|
||||
"""Create a Server record in the DB."""
|
||||
values = self.obj_get_changes()
|
||||
instance_nics = values.pop('nics', None)
|
||||
if instance_nics:
|
||||
values['nics'] = instance_nics.as_list_of_dict()
|
||||
db_instance = self.dbapi.instance_create(context, values)
|
||||
server_nics = values.pop('nics', None)
|
||||
if server_nics:
|
||||
values['nics'] = server_nics.as_list_of_dict()
|
||||
db_server = self.dbapi.server_create(context, values)
|
||||
expected_attrs = None
|
||||
if instance_nics:
|
||||
if server_nics:
|
||||
expected_attrs = ['nics']
|
||||
self._from_db_object(self, db_instance, expected_attrs)
|
||||
self._from_db_object(self, db_server, expected_attrs)
|
||||
|
||||
def destroy(self, context=None):
|
||||
"""Delete the Instance from the DB."""
|
||||
self.dbapi.instance_destroy(context, self.uuid)
|
||||
"""Delete the Server from the DB."""
|
||||
self.dbapi.server_destroy(context, self.uuid)
|
||||
self.obj_reset_changes()
|
||||
|
||||
def save(self, context=None):
|
||||
"""Save updates to this Instance."""
|
||||
"""Save updates to this Server."""
|
||||
updates = self.obj_get_changes()
|
||||
for field in list(updates):
|
||||
if (self.obj_attr_is_set(field) and
|
||||
@ -169,13 +167,13 @@ class Instance(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
getattr(self, '_save_%s' % field)(context)
|
||||
except AttributeError:
|
||||
LOG.exception('No save handler for %s', field,
|
||||
instance=self)
|
||||
server=self)
|
||||
except db_exc.DBReferenceError as exp:
|
||||
if exp.key != 'instance_uuid':
|
||||
if exp.key != 'server_uuid':
|
||||
raise
|
||||
updates.pop(field)
|
||||
|
||||
self.dbapi.instance_update(context, self.uuid, updates)
|
||||
self.dbapi.server_update(context, self.uuid, updates)
|
||||
self.obj_reset_changes()
|
||||
|
||||
def refresh(self, context=None):
|
@ -26,15 +26,15 @@ from mogan.objects import fields as object_fields
|
||||
|
||||
|
||||
@base.MoganObjectRegistry.register
|
||||
class InstanceFault(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
class ServerFault(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
# Version 1.0: Initial version
|
||||
VERSION = '1.0'
|
||||
|
||||
dbapi = dbapi.get_instance()
|
||||
dbapi = dbapi.get_server()
|
||||
|
||||
fields = {
|
||||
'id': object_fields.IntegerField(),
|
||||
'instance_uuid': object_fields.UUIDField(),
|
||||
'server_uuid': object_fields.UUIDField(),
|
||||
'code': object_fields.IntegerField(),
|
||||
'message': object_fields.StringField(nullable=True),
|
||||
'detail': object_fields.StringField(nullable=True),
|
||||
@ -49,46 +49,45 @@ class InstanceFault(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
return fault
|
||||
|
||||
@classmethod
|
||||
def get_latest_for_instance(cls, context, instance_uuid):
|
||||
db_faults = cls.dbapi.instance_fault_get_by_instance_uuids(
|
||||
context, [instance_uuid])
|
||||
if instance_uuid in db_faults and db_faults[instance_uuid]:
|
||||
def get_latest_for_server(cls, context, server_uuid):
|
||||
db_faults = cls.dbapi.server_fault_get_by_server_uuids(
|
||||
context, [server_uuid])
|
||||
if server_uuid in db_faults and db_faults[server_uuid]:
|
||||
return cls._from_db_object(context, cls(),
|
||||
db_faults[instance_uuid][0])
|
||||
db_faults[server_uuid][0])
|
||||
|
||||
def create(self):
|
||||
if self.obj_attr_is_set('id'):
|
||||
raise exception.ObjectActionError(action='create',
|
||||
reason='already created')
|
||||
values = {
|
||||
'instance_uuid': self.instance_uuid,
|
||||
'server_uuid': self.server_uuid,
|
||||
'code': self.code,
|
||||
'message': self.message,
|
||||
'detail': self.detail,
|
||||
}
|
||||
db_fault = self.dbapi.instance_fault_create(self._context, values)
|
||||
db_fault = self.dbapi.server_fault_create(self._context, values)
|
||||
self._from_db_object(self._context, self, db_fault)
|
||||
self.obj_reset_changes()
|
||||
|
||||
|
||||
@base.MoganObjectRegistry.register
|
||||
class InstanceFaultList(base.MoganObject,
|
||||
object_base.VersionedObjectDictCompat):
|
||||
class ServerFaultList(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
# Version 1.0: Initial version
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
dbapi = dbapi.get_instance()
|
||||
dbapi = dbapi.get_server()
|
||||
|
||||
fields = {
|
||||
'objects': object_fields.ListOfObjectsField('InstanceFault')
|
||||
'objects': object_fields.ListOfObjectsField('ServerFault')
|
||||
}
|
||||
|
||||
@classmethod
|
||||
def get_by_instance_uuids(cls, context, instance_uuids):
|
||||
db_faultdict = cls.dbapi.instance_fault_get_by_instance_uuids(
|
||||
context, instance_uuids)
|
||||
def get_by_server_uuids(cls, context, server_uuids):
|
||||
db_faultdict = cls.dbapi.server_fault_get_by_server_uuids(
|
||||
context, server_uuids)
|
||||
db_faultlist = itertools.chain(*db_faultdict.values())
|
||||
return object_base.obj_make_list(context, cls(context),
|
||||
objects.InstanceFault,
|
||||
objects.ServerFault,
|
||||
db_faultlist)
|
@ -23,15 +23,15 @@ from mogan.objects import fields as object_fields
|
||||
|
||||
|
||||
@base.MoganObjectRegistry.register
|
||||
class InstanceNic(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
class ServerNic(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
# Version 1.0: Initial version
|
||||
VERSION = '1.0'
|
||||
|
||||
dbapi = dbapi.get_instance()
|
||||
dbapi = dbapi.get_server()
|
||||
|
||||
fields = {
|
||||
'port_id': object_fields.UUIDField(nullable=False),
|
||||
'instance_uuid': object_fields.UUIDField(nullable=True),
|
||||
'server_uuid': object_fields.UUIDField(nullable=True),
|
||||
'mac_address': object_fields.MACAddressField(nullable=True),
|
||||
'network_id': object_fields.UUIDField(nullable=True),
|
||||
'fixed_ips': object_fields.ListOfDictOfNullableStringsField(
|
||||
@ -41,51 +41,51 @@ class InstanceNic(base.MoganObject, object_base.VersionedObjectDictCompat):
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def _from_db_object(context, obj, db_object, instance_uuid=None):
|
||||
if instance_uuid:
|
||||
def _from_db_object(context, obj, db_object, server_uuid=None):
|
||||
if server_uuid:
|
||||
db_object = copy.deepcopy(db_object)
|
||||
db_object.update(instance_uuid=instance_uuid)
|
||||
db_object.update(server_uuid=server_uuid)
|
||||
if not isinstance(db_object, dict):
|
||||
db_object_dict = db_object.as_dict()
|
||||
else:
|
||||
db_object_dict = db_object
|
||||
obj = InstanceNic(context)
|
||||
obj = ServerNic(context)
|
||||
obj.update(db_object_dict)
|
||||
obj.obj_reset_changes()
|
||||
return obj
|
||||
|
||||
def save(self, context):
|
||||
updates = self.obj_get_changes()
|
||||
self.dbapi.instance_nic_update_or_create(
|
||||
self.dbapi.server_nic_update_or_create(
|
||||
context, self.port_id, updates)
|
||||
|
||||
def create(self, context):
|
||||
values = self.obj_to_primitive()['mogan_object.data']
|
||||
self.dbapi.instance_nic_update_or_create(
|
||||
self.dbapi.server_nic_update_or_create(
|
||||
context, self.port_id, values)
|
||||
|
||||
|
||||
@base.MoganObjectRegistry.register
|
||||
class InstanceNics(object_base.ObjectListBase, base.MoganObject,
|
||||
object_base.VersionedObjectDictCompat):
|
||||
class ServerNics(object_base.ObjectListBase, base.MoganObject,
|
||||
object_base.VersionedObjectDictCompat):
|
||||
# Version 1.0: Initial version
|
||||
|
||||
VERSION = '1.0'
|
||||
|
||||
dbapi = dbapi.get_instance()
|
||||
dbapi = dbapi.get_server()
|
||||
|
||||
fields = {
|
||||
'objects': object_fields.ListOfObjectsField('InstanceNic')}
|
||||
'objects': object_fields.ListOfObjectsField('ServerNic')}
|
||||
|
||||
def __init__(self, context=None, **kwargs):
|
||||
|
||||
super(InstanceNics, self).__init__(context=context, **kwargs)
|
||||
super(ServerNics, self).__init__(context=context, **kwargs)
|
||||
|
||||
@classmethod
|
||||
def get_by_instance_uuid(cls, context, instance_uuid):
|
||||
nics = cls.dbapi.instance_nics_get_by_instance_uuid(
|
||||
context, instance_uuid)
|
||||
return object_base.obj_make_list(context, cls(context), InstanceNic,
|
||||
def get_by_server_uuid(cls, context, server_uuid):
|
||||
nics = cls.dbapi.server_nics_get_by_server_uuid(
|
||||
context, server_uuid)
|
||||
return object_base.obj_make_list(context, cls(context), ServerNic,
|
||||
nics)
|
||||
|
||||
def create(self, context):
|
@ -45,11 +45,11 @@ class BaseFilter(object):
|
||||
yield obj
|
||||
|
||||
# Set to true in a subclass if a filter only needs to be run once
|
||||
# for each request rather than for each instance
|
||||
# for each request rather than for each server
|
||||
run_filter_once_per_request = False
|
||||
|
||||
def run_filter_for_index(self, index):
|
||||
"""Return True if the filter needs to be run for n-th instances.
|
||||
"""Return True if the filter needs to be run for n-th servers.
|
||||
|
||||
Only need to override this if a filter needs anything other than
|
||||
"first only" or "all" behaviour.
|
||||
@ -67,19 +67,19 @@ class BaseFilterHandler(base_handler.BaseHandler):
|
||||
part_filter_results, filter_properties):
|
||||
# Log the filtration history
|
||||
rspec = filter_properties.get("request_spec", {})
|
||||
msg_dict = {"inst_id": rspec.get("instance_id", ""),
|
||||
msg_dict = {"server_id": rspec.get("server_id", ""),
|
||||
"str_results": six.text_type(full_filter_results),
|
||||
}
|
||||
full_msg = ("Filtering removed all nodes for the request with "
|
||||
"instance ID "
|
||||
"'%(inst_id)s'. Filter results: %(str_results)s"
|
||||
"server ID "
|
||||
"'%(server_id)s'. Filter results: %(str_results)s"
|
||||
) % msg_dict
|
||||
msg_dict["str_results"] = ', '.join(
|
||||
("%(cls_name)s: (start: %(start)s, end: %(end)s)") %
|
||||
{"cls_name": value[0], "start": value[1], "end": value[2]}
|
||||
for value in part_filter_results)
|
||||
part_msg = ("Filtering removed all nodes for the request with "
|
||||
"instance ID '%(inst_id)s'. "
|
||||
"server ID '%(server_id)s'. "
|
||||
"Filter results: %(str_results)s") % msg_dict
|
||||
LOG.debug(full_msg)
|
||||
LOG.info(part_msg)
|
||||
|
@ -10,7 +10,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""The FilterScheduler is for creating instances.
|
||||
"""The FilterScheduler is for creating servers.
|
||||
|
||||
You can customize this scheduler by specifying your own node Filters and
|
||||
Weighing Functions.
|
||||
@ -46,9 +46,9 @@ class FilterScheduler(driver.Scheduler):
|
||||
|
||||
Can be overridden in a subclass to add more data.
|
||||
"""
|
||||
instance = request_spec['instance_properties']
|
||||
server = request_spec['server_properties']
|
||||
filter_properties['availability_zone'] = \
|
||||
instance.get('availability_zone')
|
||||
server.get('availability_zone')
|
||||
|
||||
def _max_attempts(self):
|
||||
max_attempts = CONF.scheduler.scheduler_max_attempts
|
||||
@ -58,9 +58,9 @@ class FilterScheduler(driver.Scheduler):
|
||||
"must be >=1"))
|
||||
return max_attempts
|
||||
|
||||
def _log_instance_error(self, instance_id, retry):
|
||||
"""Log requests with exceptions from previous instance operations."""
|
||||
exc = retry.pop('exc', None) # string-ified exception from instance
|
||||
def _log_server_error(self, server_id, retry):
|
||||
"""Log requests with exceptions from previous server operations."""
|
||||
exc = retry.pop('exc', None) # string-ified exception from server
|
||||
if not exc:
|
||||
return # no exception info from a previous attempt, skip
|
||||
|
||||
@ -69,9 +69,9 @@ class FilterScheduler(driver.Scheduler):
|
||||
return # no previously attempted nodes, skip
|
||||
|
||||
last_node = nodes[-1]
|
||||
LOG.error("Error scheduling %(instance_id)s from last node: "
|
||||
LOG.error("Error scheduling %(server_id)s from last node: "
|
||||
"%(last_node)s : %(exc)s",
|
||||
{'instance_id': instance_id,
|
||||
{'server_id': server_id,
|
||||
'last_node': last_node,
|
||||
'exc': exc})
|
||||
|
||||
@ -87,15 +87,15 @@ class FilterScheduler(driver.Scheduler):
|
||||
# re-scheduling is disabled.
|
||||
return
|
||||
|
||||
instance_id = request_spec.get('instance_id')
|
||||
self._log_instance_error(instance_id, retry)
|
||||
server_id = request_spec.get('server_id')
|
||||
self._log_server_error(server_id, retry)
|
||||
|
||||
if retry['num_attempts'] > max_attempts:
|
||||
raise exception.NoValidNode(
|
||||
_("Exceeded max scheduling attempts %(max_attempts)d "
|
||||
"for instance %(instance_id)s") %
|
||||
"for server %(server_id)s") %
|
||||
{'max_attempts': max_attempts,
|
||||
'instance_id': instance_id})
|
||||
'server_id': server_id})
|
||||
|
||||
def _get_weighted_candidates(self, context, request_spec,
|
||||
filter_properties=None):
|
||||
@ -104,9 +104,9 @@ class FilterScheduler(driver.Scheduler):
|
||||
Returned list is ordered by their fitness.
|
||||
"""
|
||||
# Since Mogan is using mixed filters from Oslo and it's own, which
|
||||
# takes 'resource_XX' and 'instance_XX' as input respectively, copying
|
||||
# 'instance_type' to 'resource_type' will make both filters happy.
|
||||
instance_type = resource_type = request_spec.get("instance_type")
|
||||
# takes 'resource_XX' and 'server_XX' as input respectively, copying
|
||||
# 'flavor' to 'resource_type' will make both filters happy.
|
||||
flavor = resource_type = request_spec.get("flavor")
|
||||
|
||||
config_options = self._get_configuration_options()
|
||||
|
||||
@ -118,7 +118,7 @@ class FilterScheduler(driver.Scheduler):
|
||||
|
||||
filter_properties.update({'request_spec': request_spec_dict,
|
||||
'config_options': config_options,
|
||||
'instance_type': instance_type,
|
||||
'flavor': flavor,
|
||||
'resource_type': resource_type})
|
||||
|
||||
self.populate_filter_properties(request_spec,
|
||||
@ -158,9 +158,9 @@ class FilterScheduler(driver.Scheduler):
|
||||
weighed_nodes = self._get_weighted_candidates(
|
||||
context, request_spec, filter_properties)
|
||||
if not weighed_nodes:
|
||||
LOG.warning('No weighed nodes found for instance '
|
||||
LOG.warning('No weighed nodes found for server '
|
||||
'with properties: %s',
|
||||
request_spec.get('instance_type'))
|
||||
request_spec.get('flavor'))
|
||||
raise exception.NoValidNode(_("No weighed nodes available"))
|
||||
|
||||
node = self._choose_top_node(weighed_nodes, request_spec)
|
||||
|
@ -22,7 +22,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CapabilitiesFilter(filters.BaseNodeFilter):
|
||||
"""NodeFilter to work with resource instance type records."""
|
||||
"""NodeFilter to work with resource server type records."""
|
||||
|
||||
def _satisfies_extra_specs(self, capabilities, resource_type):
|
||||
"""Check if capabilities satisfy resource type requirements.
|
||||
@ -68,7 +68,7 @@ class CapabilitiesFilter(filters.BaseNodeFilter):
|
||||
break
|
||||
else:
|
||||
# Nothing matched, so bail out
|
||||
LOG.debug('Instance type extra spec requirement '
|
||||
LOG.debug('Flavor extra spec requirement '
|
||||
'"%(key)s=%(req)s" does not match reported '
|
||||
'capability "%(cap)s"',
|
||||
{'key': key, 'req': req, 'cap': cap})
|
||||
|
@ -16,17 +16,17 @@
|
||||
from mogan.scheduler import filters
|
||||
|
||||
|
||||
class InstanceTypeFilter(filters.BaseNodeFilter):
|
||||
"""Filters Nodes by instance type."""
|
||||
class FlavorFilter(filters.BaseNodeFilter):
|
||||
"""Filters Nodes by server type."""
|
||||
|
||||
# Instance types do not change within a request
|
||||
# Flavors do not change within a request
|
||||
run_filter_once_per_request = True
|
||||
|
||||
def node_passes(self, node_state, filter_properties):
|
||||
spec = filter_properties.get('request_spec', {})
|
||||
instance_type = spec.get('instance_type', {})
|
||||
type_name = instance_type.get('name')
|
||||
flavor = spec.get('flavor', {})
|
||||
type_name = flavor.get('name')
|
||||
|
||||
if type_name:
|
||||
return type_name == node_state.instance_type
|
||||
return type_name == node_state.flavor
|
||||
return True
|
@ -21,7 +21,7 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class PortsFilter(filters.BaseNodeFilter):
|
||||
"""NodeFilter to work with resource instance type records."""
|
||||
"""NodeFilter to work with resource server type records."""
|
||||
|
||||
def _find_port_type(self, ports, port_type):
|
||||
"""Check if ports has the specified port type."""
|
||||
@ -55,7 +55,7 @@ class PortsFilter(filters.BaseNodeFilter):
|
||||
def node_passes(self, node_state, filter_properties):
|
||||
"""Return a list of nodes that can create resource_type."""
|
||||
spec = filter_properties.get('request_spec', {})
|
||||
props = spec.get('instance_properties', {})
|
||||
props = spec.get('server_properties', {})
|
||||
networks = props.get('networks')
|
||||
if not self._satisfies_networks(node_state.ports, networks):
|
||||
LOG.debug("%(node_state)s fails network ports "
|
||||
|
@ -38,7 +38,7 @@ class NodeState(object):
|
||||
self.capabilities = node.extra_specs
|
||||
self.availability_zone = node.availability_zone \
|
||||
or CONF.engine.default_availability_zone
|
||||
self.instance_type = node.node_type
|
||||
self.flavor = node.node_type
|
||||
self.ports = node.ports
|
||||
|
||||
def consume_from_request(self, context):
|
||||
|
@ -47,7 +47,7 @@ def _get_fake_image(**kwargs):
|
||||
|
||||
def _get_fake_node(**kwargs):
|
||||
fake_node = {
|
||||
u'instance_uuid': u'dc18e1a6-4177-4b64-8a00-1974696dd049',
|
||||
u'server_uuid': u'dc18e1a6-4177-4b64-8a00-1974696dd049',
|
||||
u'power_state': u'power on',
|
||||
u'links': [
|
||||
{
|
||||
@ -65,7 +65,7 @@ def _get_fake_node(**kwargs):
|
||||
return fake_node
|
||||
|
||||
|
||||
class TestInstances(v1_test.APITestV1):
|
||||
class TestServers(v1_test.APITestV1):
|
||||
INSTANCE_TYPE_UUID = 'ff28b5a2-73e5-431c-b4b7-1b96b74bca7b'
|
||||
|
||||
INSTANCE_UUIDS = ['59f1b681-6ca4-4a17-b784-297a7285004e',
|
||||
@ -89,60 +89,60 @@ class TestInstances(v1_test.APITestV1):
|
||||
return_value=self.network_api))
|
||||
self.image_api.get.return_value = _get_fake_image()
|
||||
self.network_api.validate_networks.return_value = 100
|
||||
super(TestInstances, self).setUp()
|
||||
self._prepare_instance_type()
|
||||
self.addCleanup(self._clean_instances)
|
||||
super(TestServers, self).setUp()
|
||||
self._prepare_flavor()
|
||||
self.addCleanup(self._clean_servers)
|
||||
self.addCleanup(self._clean_type)
|
||||
|
||||
def _clean_instances(self):
|
||||
for instance_uuid in self.INSTANCE_UUIDS:
|
||||
def _clean_servers(self):
|
||||
for server_uuid in self.INSTANCE_UUIDS:
|
||||
# TODO(liusheng) should catch the NotFound exception
|
||||
self.delete('/instances/' + instance_uuid, status=204,
|
||||
self.delete('/servers/' + server_uuid, status=204,
|
||||
expect_errors=True)
|
||||
|
||||
def _clean_type(self):
|
||||
self.delete('/flavors/' + self.INSTANCE_TYPE_UUID, status=204)
|
||||
|
||||
def _make_app(self):
|
||||
return super(TestInstances, self)._make_app()
|
||||
return super(TestServers, self)._make_app()
|
||||
|
||||
@mock.patch('oslo_utils.uuidutils.generate_uuid')
|
||||
def _prepare_instance_type(self, mocked):
|
||||
def _prepare_flavor(self, mocked):
|
||||
mocked.side_effect = [self.INSTANCE_TYPE_UUID]
|
||||
body = {"name": "type_for_instance_testing",
|
||||
"description": "type for instance testing"}
|
||||
body = {"name": "type_for_server_testing",
|
||||
"description": "type for server testing"}
|
||||
self.post_json('/flavors', body, status=201)
|
||||
|
||||
@mock.patch('oslo_utils.uuidutils.generate_uuid')
|
||||
def _prepare_instance(self, amount, mocked):
|
||||
def _prepare_server(self, amount, mocked):
|
||||
# NOTE(wanghao): Since we added quota reserve in creation option,
|
||||
# there is one more generate_uuid out of provision_instances, so
|
||||
# there is one more generate_uuid out of provision_servers, so
|
||||
# amount should *2 here.
|
||||
mocked.side_effect = self.INSTANCE_UUIDS[:(amount * 2)]
|
||||
responses = []
|
||||
headers = self.gen_headers(self.context)
|
||||
for i in six.moves.xrange(amount):
|
||||
test_body = {
|
||||
"name": "test_instance_" + str(i),
|
||||
"description": "just test instance " + str(i),
|
||||
'instance_type_uuid': 'ff28b5a2-73e5-431c-b4b7-1b96b74bca7b',
|
||||
"name": "test_server_" + str(i),
|
||||
"description": "just test server " + str(i),
|
||||
'flavor_uuid': 'ff28b5a2-73e5-431c-b4b7-1b96b74bca7b',
|
||||
'image_uuid': 'b8f82429-3a13-4ffe-9398-4d1abdc256a8',
|
||||
'networks': [
|
||||
{'net_id': 'c1940655-8b8e-4370-b8f9-03ba1daeca31'}],
|
||||
'extra': {'fake_key': 'fake_value'}
|
||||
}
|
||||
responses.append(
|
||||
self.post_json('/instances', test_body, headers=headers,
|
||||
self.post_json('/servers', test_body, headers=headers,
|
||||
status=201))
|
||||
return responses
|
||||
|
||||
def test_instance_post(self):
|
||||
resp = self._prepare_instance(1)[0].json
|
||||
self.assertEqual('test_instance_0', resp['name'])
|
||||
def test_server_post(self):
|
||||
resp = self._prepare_server(1)[0].json
|
||||
self.assertEqual('test_server_0', resp['name'])
|
||||
self.assertEqual('building', resp['status'])
|
||||
self.assertEqual(self.INSTANCE_UUIDS[1], resp['uuid'])
|
||||
self.assertEqual('just test instance 0', resp['description'])
|
||||
self.assertEqual(self.INSTANCE_TYPE_UUID, resp['instance_type_uuid'])
|
||||
self.assertEqual('just test server 0', resp['description'])
|
||||
self.assertEqual(self.INSTANCE_TYPE_UUID, resp['flavor_uuid'])
|
||||
self.assertEqual('b8f82429-3a13-4ffe-9398-4d1abdc256a8',
|
||||
resp['image_uuid'])
|
||||
self.assertEqual(None, resp['availability_zone'])
|
||||
@ -155,16 +155,16 @@ class TestInstances(v1_test.APITestV1):
|
||||
self.assertIn('project_id', resp)
|
||||
self.assertIn('launched_at', resp)
|
||||
|
||||
def test_instance_show(self):
|
||||
self._prepare_instance(1)
|
||||
def test_server_show(self):
|
||||
self._prepare_server(1)
|
||||
headers = self.gen_headers(self.context)
|
||||
resp = self.get_json('/instances/%s' % self.INSTANCE_UUIDS[1],
|
||||
resp = self.get_json('/servers/%s' % self.INSTANCE_UUIDS[1],
|
||||
headers=headers)
|
||||
self.assertEqual('test_instance_0', resp['name'])
|
||||
self.assertEqual('test_server_0', resp['name'])
|
||||
self.assertEqual('building', resp['status'])
|
||||
self.assertEqual(self.INSTANCE_UUIDS[1], resp['uuid'])
|
||||
self.assertEqual('just test instance 0', resp['description'])
|
||||
self.assertEqual(self.INSTANCE_TYPE_UUID, resp['instance_type_uuid'])
|
||||
self.assertEqual('just test server 0', resp['description'])
|
||||
self.assertEqual(self.INSTANCE_TYPE_UUID, resp['flavor_uuid'])
|
||||
self.assertEqual('b8f82429-3a13-4ffe-9398-4d1abdc256a8',
|
||||
resp['image_uuid'])
|
||||
self.assertEqual(None, resp['availability_zone'])
|
||||
@ -177,35 +177,35 @@ class TestInstances(v1_test.APITestV1):
|
||||
self.assertIn('project_id', resp)
|
||||
self.assertIn('launched_at', resp)
|
||||
|
||||
def test_instance_list(self):
|
||||
self._prepare_instance(4)
|
||||
def test_server_list(self):
|
||||
self._prepare_server(4)
|
||||
headers = self.gen_headers(self.context)
|
||||
resps = self.get_json('/instances', headers=headers)['instances']
|
||||
resps = self.get_json('/servers', headers=headers)['servers']
|
||||
self.assertEqual(4, len(resps))
|
||||
self.assertEqual('test_instance_0', resps[0]['name'])
|
||||
self.assertEqual('just test instance 0', resps[0]['description'])
|
||||
self.assertEqual('test_server_0', resps[0]['name'])
|
||||
self.assertEqual('just test server 0', resps[0]['description'])
|
||||
self.assertEqual('building', resps[0]['status'])
|
||||
|
||||
def test_instance_list_with_details(self):
|
||||
self._prepare_instance(4)
|
||||
def test_server_list_with_details(self):
|
||||
self._prepare_server(4)
|
||||
headers = self.gen_headers(self.context)
|
||||
resps = self.get_json('/instances/detail',
|
||||
headers=headers)['instances']
|
||||
resps = self.get_json('/servers/detail',
|
||||
headers=headers)['servers']
|
||||
self.assertEqual(4, len(resps))
|
||||
self.assertEqual(16, len(resps[0].keys()))
|
||||
self.assertEqual('test_instance_0', resps[0]['name'])
|
||||
self.assertEqual('just test instance 0', resps[0]['description'])
|
||||
self.assertEqual('test_server_0', resps[0]['name'])
|
||||
self.assertEqual('just test server 0', resps[0]['description'])
|
||||
self.assertEqual('building', resps[0]['status'])
|
||||
self.assertEqual('ff28b5a2-73e5-431c-b4b7-1b96b74bca7b',
|
||||
resps[0]['instance_type_uuid'])
|
||||
resps[0]['flavor_uuid'])
|
||||
self.assertEqual('b8f82429-3a13-4ffe-9398-4d1abdc256a8',
|
||||
resps[0]['image_uuid'])
|
||||
|
||||
def test_instance_delete(self):
|
||||
self._prepare_instance(4)
|
||||
def test_server_delete(self):
|
||||
self._prepare_server(4)
|
||||
headers = self.gen_headers(self.context)
|
||||
self.delete('/instances/' + self.INSTANCE_UUIDS[1], headers=headers,
|
||||
self.delete('/servers/' + self.INSTANCE_UUIDS[1], headers=headers,
|
||||
status=204)
|
||||
resp = self.get_json('/instances/%s' % self.INSTANCE_UUIDS[1],
|
||||
resp = self.get_json('/servers/%s' % self.INSTANCE_UUIDS[1],
|
||||
headers=headers)
|
||||
self.assertEqual('deleting', resp['status'])
|
@ -71,50 +71,50 @@ class BaseBaremetalComputeTest(tempest.test.BaseTestCase):
|
||||
def resource_setup(cls):
|
||||
super(BaseBaremetalComputeTest, cls).resource_setup()
|
||||
cls.flavor_ids = []
|
||||
cls.instance_ids = []
|
||||
cls.server_ids = []
|
||||
cls.small_flavor = cls._get_small_flavor()
|
||||
cls.image_id = CONF.compute.image_ref
|
||||
cls.net_id = cls._get_net_id()
|
||||
|
||||
@classmethod
|
||||
def create_instance(cls, wait_until_active=True):
|
||||
body = {'name': data_utils.rand_name('mogan_instance'),
|
||||
'description': "mogan tempest instance",
|
||||
'instance_type_uuid': cls.small_flavor,
|
||||
def create_server(cls, wait_until_active=True):
|
||||
body = {'name': data_utils.rand_name('mogan_server'),
|
||||
'description': "mogan tempest server",
|
||||
'flavor_uuid': cls.small_flavor,
|
||||
'image_uuid': cls.image_id,
|
||||
"networks": [{"net_id": cls.net_id}]
|
||||
}
|
||||
resp = cls.baremetal_compute_client.create_instance(**body)
|
||||
cls.instance_ids.append(resp['uuid'])
|
||||
resp = cls.baremetal_compute_client.create_server(**body)
|
||||
cls.server_ids.append(resp['uuid'])
|
||||
if wait_until_active:
|
||||
cls._wait_for_instances_status(resp['uuid'], 'active', 15, 900)
|
||||
cls._wait_for_servers_status(resp['uuid'], 'active', 15, 900)
|
||||
return resp
|
||||
|
||||
@classmethod
|
||||
def _wait_for_instances_status(cls, inst_id, status,
|
||||
wait_interval, wait_timeout):
|
||||
"""Waits for a Instance to reach a given status."""
|
||||
inst_status = None
|
||||
def _wait_for_servers_status(cls, server_id, status,
|
||||
wait_interval, wait_timeout):
|
||||
"""Waits for a Server to reach a given status."""
|
||||
server_status = None
|
||||
start = int(time.time())
|
||||
|
||||
while inst_status != status:
|
||||
while server_status != status:
|
||||
time.sleep(wait_interval)
|
||||
try:
|
||||
body = cls.baremetal_compute_client.show_instance(inst_id)
|
||||
inst_status = body['status']
|
||||
body = cls.baremetal_compute_client.show_server(server_id)
|
||||
server_status = body['status']
|
||||
except lib_exc.NotFound:
|
||||
if status == 'deleted':
|
||||
break
|
||||
else:
|
||||
raise
|
||||
if inst_status == 'error' and status != 'error':
|
||||
msg = ('Failed to provision instance %s' % inst_id)
|
||||
raise exception.InstanceDeployFailure(msg)
|
||||
if server_status == 'error' and status != 'error':
|
||||
msg = ('Failed to provision server %s' % server_id)
|
||||
raise exception.ServerDeployFailure(msg)
|
||||
|
||||
if int(time.time()) - start >= wait_timeout:
|
||||
message = ('Instance %s failed to reach %s status '
|
||||
message = ('Server %s failed to reach %s status '
|
||||
'(current %s) within the required time (%s s).' %
|
||||
(inst_id, status, inst_status,
|
||||
(server_id, status, server_status,
|
||||
wait_timeout))
|
||||
raise lib_exc.TimeoutException(message)
|
||||
|
||||
@ -130,6 +130,6 @@ class BaseBaremetalComputeTest(tempest.test.BaseTestCase):
|
||||
def resource_cleanup(cls):
|
||||
cls.cleanup_resources(
|
||||
cls.baremetal_compute_client.delete_flavor, cls.flavor_ids)
|
||||
cls.cleanup_resources(cls.baremetal_compute_client.delete_instance,
|
||||
cls.instance_ids)
|
||||
cls.cleanup_resources(cls.baremetal_compute_client.delete_server,
|
||||
cls.server_ids)
|
||||
super(BaseBaremetalComputeTest, cls).resource_cleanup()
|
||||
|
@ -35,10 +35,10 @@ class BaremetalComputeAPITest(base.BaseBaremetalComputeTest):
|
||||
|
||||
@decorators.idempotent_id('4b256d35-47a9-4195-8f7e-56ceb4ce4737')
|
||||
def test_flavor_list(self):
|
||||
# List instance types
|
||||
# List server types
|
||||
flavor_list = self.baremetal_compute_client.list_flavors()
|
||||
|
||||
# Verify created instance type in the list
|
||||
# Verify created server type in the list
|
||||
fetched_ids = [f['uuid'] for f in flavor_list]
|
||||
missing_flavors = [a for a in self.flavor_ids if a not in fetched_ids]
|
||||
self.assertEqual(0, len(missing_flavors),
|
||||
|
@ -15,19 +15,19 @@
|
||||
from mogan.tests.tempest.api import base
|
||||
|
||||
|
||||
class BaremetalComputeAPIInstancesTest(base.BaseBaremetalComputeTest):
|
||||
def test_instance_all_cases(self):
|
||||
# NOTE(liusheng) Since the mogan server deployment is a
|
||||
class BaremetalComputeAPIServersTest(base.BaseBaremetalComputeTest):
|
||||
def test_server_all_cases(self):
|
||||
# NOTE(liusheng) Since the moga server deployment is a
|
||||
# time-consuming operation and the ironic resource cleanup
|
||||
# will be performed after a server deleted, we'd better to
|
||||
# put all test cases in a test
|
||||
|
||||
# Test post
|
||||
resp = self.create_instance()
|
||||
self.assertEqual(self.instance_ids[0], resp['uuid'])
|
||||
resp = self.create_server()
|
||||
self.assertEqual(self.server_ids[0], resp['uuid'])
|
||||
self.assertEqual('building', resp['status'])
|
||||
self.assertEqual(self.small_flavor, resp['instance_type_uuid'])
|
||||
self.assertEqual('mogan tempest instance', resp['description'])
|
||||
self.assertEqual(self.small_flavor, resp['flavor_uuid'])
|
||||
self.assertEqual('mogan tempest server', resp['description'])
|
||||
self.assertEqual(self.image_id, resp['image_uuid'])
|
||||
self.assertIn('launched_at', resp)
|
||||
self.assertIn('updated_at', resp)
|
||||
@ -40,11 +40,11 @@ class BaremetalComputeAPIInstancesTest(base.BaseBaremetalComputeTest):
|
||||
self.assertIn('name', resp)
|
||||
|
||||
# Test show
|
||||
resp = self.baremetal_compute_client.show_instance(
|
||||
self.instance_ids[0])
|
||||
resp = self.baremetal_compute_client.show_server(
|
||||
self.server_ids[0])
|
||||
self.assertEqual('active', resp['status'])
|
||||
self.assertEqual(self.small_flavor, resp['instance_type_uuid'])
|
||||
self.assertEqual('mogan tempest instance', resp['description'])
|
||||
self.assertEqual(self.small_flavor, resp['flavor_uuid'])
|
||||
self.assertEqual('mogan tempest server', resp['description'])
|
||||
self.assertEqual(self.image_id, resp['image_uuid'])
|
||||
self.assertEqual('power on', resp['power_state'])
|
||||
self.assertIn('launched_at', resp)
|
||||
@ -58,17 +58,16 @@ class BaremetalComputeAPIInstancesTest(base.BaseBaremetalComputeTest):
|
||||
self.assertIn('name', resp)
|
||||
|
||||
# Test list
|
||||
resp = self.baremetal_compute_client.list_instances()
|
||||
resp = self.baremetal_compute_client.list_servers()
|
||||
self.assertEqual(1, len(resp))
|
||||
self.assertEqual(self.instance_ids[0], resp[0]['uuid'])
|
||||
self.assertEqual(self.server_ids[0], resp[0]['uuid'])
|
||||
self.assertEqual('active', resp[0]['status'])
|
||||
self.assertIn('name', resp[0])
|
||||
self.assertEqual('mogan tempest instance', resp[0]['description'])
|
||||
self.assertEqual('mogan tempest server', resp[0]['description'])
|
||||
self.assertIn('links', resp[0])
|
||||
|
||||
# Test delete
|
||||
self.baremetal_compute_client.delete_instance(
|
||||
self.instance_ids[0])
|
||||
self._wait_for_instances_status(self.instance_ids[0], 'deleted',
|
||||
10, 900)
|
||||
self.instance_ids.remove(self.instance_ids[0])
|
||||
self.baremetal_compute_client.delete_server(
|
||||
self.server_ids[0])
|
||||
self._wait_for_servers_status(self.server_ids[0], 'deleted', 10, 900)
|
||||
self.server_ids.remove(self.server_ids[0])
|
@ -66,30 +66,30 @@ class BaremetalComputeClient(rest_client.RestClient):
|
||||
body = self.deserialize(body)
|
||||
return rest_client.ResponseBody(resp, body)
|
||||
|
||||
def create_instance(self, **kwargs):
|
||||
uri = "%s/instances" % self.uri_prefix
|
||||
def create_server(self, **kwargs):
|
||||
uri = "%s/servers" % self.uri_prefix
|
||||
body = self.serialize(kwargs)
|
||||
resp, body = self.post(uri, body)
|
||||
self.expected_success(201, resp.status)
|
||||
body = self.deserialize(body)
|
||||
return rest_client.ResponseBody(resp, body)
|
||||
|
||||
def list_instances(self):
|
||||
uri = '%s/instances' % self.uri_prefix
|
||||
def list_servers(self):
|
||||
uri = '%s/servers' % self.uri_prefix
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = self.deserialize(body)['instances']
|
||||
body = self.deserialize(body)['servers']
|
||||
return rest_client.ResponseBodyList(resp, body)
|
||||
|
||||
def show_instance(self, instance_id):
|
||||
uri = '%s/instances/%s' % (self.uri_prefix, instance_id)
|
||||
def show_server(self, server_id):
|
||||
uri = '%s/servers/%s' % (self.uri_prefix, server_id)
|
||||
resp, body = self.get(uri)
|
||||
self.expected_success(200, resp.status)
|
||||
body = self.deserialize(body)
|
||||
return rest_client.ResponseBody(resp, body)
|
||||
|
||||
def delete_instance(self, instance_id):
|
||||
uri = "%s/instances/%s" % (self.uri_prefix, instance_id)
|
||||
def delete_server(self, server_id):
|
||||
uri = "%s/servers/%s" % (self.uri_prefix, server_id)
|
||||
resp, body = self.delete(uri)
|
||||
self.expected_success(204, resp.status)
|
||||
if body:
|
||||
|
@ -34,76 +34,76 @@ def gen_post_body(**kw):
|
||||
}
|
||||
]
|
||||
return {
|
||||
"name": kw.get("name", "test_instance"),
|
||||
"description": kw.get("description", "this is a test instance"),
|
||||
"instance_type_uuid": kw.get(
|
||||
"instance_type_uuid", "0607b5f3-6111-424d-ba46-f5de39a6fa69"),
|
||||
"name": kw.get("name", "test_server"),
|
||||
"description": kw.get("description", "this is a test server"),
|
||||
"flavor_uuid": kw.get(
|
||||
"flavor_uuid", "0607b5f3-6111-424d-ba46-f5de39a6fa69"),
|
||||
"image_uuid": kw.get(
|
||||
"image_uuid", "efe0a06f-ca95-4808-b41e-9f55b9c5eb98"),
|
||||
"networks": kw.get("networks", fake_networks)
|
||||
}
|
||||
|
||||
|
||||
class TestInstanceAuthorization(v1_test.APITestV1):
|
||||
class TestServerAuthorization(v1_test.APITestV1):
|
||||
|
||||
DENY_MESSAGE = "Access was denied to the following resource: mogan:%s"
|
||||
|
||||
def setUp(self):
|
||||
super(TestInstanceAuthorization, self).setUp()
|
||||
super(TestServerAuthorization, self).setUp()
|
||||
project_id = "0abcdef1-2345-6789-abcd-ef123456abc1"
|
||||
# evil_project is an wicked tenant, is used for unauthorization test.
|
||||
self.evil_project = "0abcdef1-2345-6789-abcd-ef123456abc9"
|
||||
self.instance1 = utils.create_test_instance(
|
||||
self.server1 = utils.create_test_server(
|
||||
name="T1", project_id=project_id)
|
||||
|
||||
@mock.patch('mogan.engine.api.API.create')
|
||||
@mock.patch('mogan.objects.InstanceType.get')
|
||||
def test_instance_post(self, mock_get, mock_engine_create):
|
||||
@mock.patch('mogan.objects.Flavor.get')
|
||||
def test_server_post(self, mock_get, mock_engine_create):
|
||||
mock_get.side_effect = None
|
||||
mock_engine_create.side_effect = None
|
||||
mock_engine_create.return_value = [self.instance1]
|
||||
mock_engine_create.return_value = [self.server1]
|
||||
body = gen_post_body()
|
||||
self.context.roles = "no-admin"
|
||||
# we can not prevent the evil tenant, quota will limite him.
|
||||
# Note(Shaohe): quota is in plan
|
||||
self.context.tenant = self.evil_project
|
||||
headers = self.gen_headers(self.context)
|
||||
self.post_json('/instances', body, headers=headers, status=201)
|
||||
self.post_json('/servers', body, headers=headers, status=201)
|
||||
|
||||
def test_instance_get_one_by_owner(self):
|
||||
def test_server_get_one_by_owner(self):
|
||||
# not admin but the owner
|
||||
self.context.tenant = self.instance1.project_id
|
||||
self.context.tenant = self.server1.project_id
|
||||
headers = self.gen_headers(self.context, roles="no-admin")
|
||||
self.get_json('/instances/%s' % self.instance1.uuid, headers=headers)
|
||||
self.get_json('/servers/%s' % self.server1.uuid, headers=headers)
|
||||
|
||||
def test_instance_get_one_by_admin(self):
|
||||
def test_server_get_one_by_admin(self):
|
||||
# when the evil tenant is admin, he can do everything.
|
||||
self.context.tenant = self.evil_project
|
||||
headers = self.gen_headers(self.context, roles="admin")
|
||||
self.get_json('/instances/%s' % self.instance1.uuid, headers=headers)
|
||||
self.get_json('/servers/%s' % self.server1.uuid, headers=headers)
|
||||
|
||||
def test_instance_get_one_unauthorized(self):
|
||||
def test_server_get_one_unauthorized(self):
|
||||
# not admin and not the owner
|
||||
self.context.tenant = self.evil_project
|
||||
headers = self.gen_headers(self.context, roles="no-admin")
|
||||
resp = self.get_json('/instances/%s' % self.instance1.uuid,
|
||||
resp = self.get_json('/servers/%s' % self.server1.uuid,
|
||||
True, headers=headers)
|
||||
error = self.parser_error_body(resp)
|
||||
self.assertEqual(error['faultstring'],
|
||||
self.DENY_MESSAGE % 'instance:get')
|
||||
self.DENY_MESSAGE % 'server:get')
|
||||
|
||||
|
||||
class TestPatch(v1_test.APITestV1):
|
||||
|
||||
def setUp(self):
|
||||
super(TestPatch, self).setUp()
|
||||
self.instance = utils.create_test_instance(name="patch_instance")
|
||||
self.context.tenant = self.instance.project_id
|
||||
self.server = utils.create_test_server(name="patch_server")
|
||||
self.context.tenant = self.server.project_id
|
||||
self.headers = self.gen_headers(self.context, roles="no-admin")
|
||||
|
||||
def test_update_not_found(self):
|
||||
uuid = uuidutils.generate_uuid()
|
||||
response = self.patch_json('/instances/%s' % uuid,
|
||||
response = self.patch_json('/servers/%s' % uuid,
|
||||
[{'path': '/extra/a', 'value': 'b',
|
||||
'op': 'add'}],
|
||||
headers=self.headers,
|
||||
@ -114,17 +114,17 @@ class TestPatch(v1_test.APITestV1):
|
||||
|
||||
@mock.patch.object(timeutils, 'utcnow')
|
||||
def test_replace_singular(self, mock_utcnow):
|
||||
description = 'instance-new-description'
|
||||
description = 'server-new-description'
|
||||
test_time = datetime.datetime(2000, 1, 1, 0, 0)
|
||||
|
||||
mock_utcnow.return_value = test_time
|
||||
response = self.patch_json('/instances/%s' % self.instance.uuid,
|
||||
response = self.patch_json('/servers/%s' % self.server.uuid,
|
||||
[{'path': '/description',
|
||||
'value': description, 'op': 'replace'}],
|
||||
headers=self.headers)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(http_client.OK, response.status_code)
|
||||
result = self.get_json('/instances/%s' % self.instance.uuid,
|
||||
result = self.get_json('/servers/%s' % self.server.uuid,
|
||||
headers=self.headers)
|
||||
self.assertEqual(description, result['description'])
|
||||
return_updated_at = timeutils.parse_isotime(
|
||||
@ -134,16 +134,16 @@ class TestPatch(v1_test.APITestV1):
|
||||
def test_replace_multi(self):
|
||||
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
|
||||
uuid = uuidutils.generate_uuid()
|
||||
instance = utils.create_test_instance(name='test1', uuid=uuid,
|
||||
extra=extra)
|
||||
server = utils.create_test_server(name='test1', uuid=uuid,
|
||||
extra=extra)
|
||||
new_value = 'new value'
|
||||
response = self.patch_json('/instances/%s' % instance.uuid,
|
||||
response = self.patch_json('/servers/%s' % server.uuid,
|
||||
[{'path': '/extra/foo2',
|
||||
'value': new_value, 'op': 'replace'}],
|
||||
headers=self.headers)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(http_client.OK, response.status_code)
|
||||
result = self.get_json('/instances/%s' % instance.uuid,
|
||||
result = self.get_json('/servers/%s' % server.uuid,
|
||||
headers=self.headers)
|
||||
|
||||
extra["foo2"] = new_value
|
||||
@ -151,54 +151,54 @@ class TestPatch(v1_test.APITestV1):
|
||||
|
||||
def test_remove_singular(self):
|
||||
uuid = uuidutils.generate_uuid()
|
||||
instance = utils.create_test_instance(name='test2', uuid=uuid,
|
||||
extra={'a': 'b'})
|
||||
response = self.patch_json('/instances/%s' % instance.uuid,
|
||||
server = utils.create_test_server(name='test2', uuid=uuid,
|
||||
extra={'a': 'b'})
|
||||
response = self.patch_json('/servers/%s' % server.uuid,
|
||||
[{'path': '/description', 'op': 'remove'}],
|
||||
headers=self.headers)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(http_client.OK, response.status_code)
|
||||
result = self.get_json('/instances/%s' % instance.uuid,
|
||||
result = self.get_json('/servers/%s' % server.uuid,
|
||||
headers=self.headers)
|
||||
self.assertIsNone(result['description'])
|
||||
|
||||
# Assert nothing else was changed
|
||||
self.assertEqual(instance.uuid, result['uuid'])
|
||||
self.assertEqual(instance.extra, result['extra'])
|
||||
self.assertEqual(server.uuid, result['uuid'])
|
||||
self.assertEqual(server.extra, result['extra'])
|
||||
|
||||
def test_remove_multi(self):
|
||||
extra = {"foo1": "bar1", "foo2": "bar2", "foo3": "bar3"}
|
||||
uuid = uuidutils.generate_uuid()
|
||||
instance = utils.create_test_instance(name='test3', extra=extra,
|
||||
uuid=uuid, description="foobar")
|
||||
server = utils.create_test_server(name='test3', extra=extra,
|
||||
uuid=uuid, description="foobar")
|
||||
|
||||
# Removing one item from the collection
|
||||
response = self.patch_json('/instances/%s' % instance.uuid,
|
||||
response = self.patch_json('/servers/%s' % server.uuid,
|
||||
[{'path': '/extra/foo2', 'op': 'remove'}],
|
||||
headers=self.headers)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(http_client.OK, response.status_code)
|
||||
result = self.get_json('/instances/%s' % instance.uuid,
|
||||
result = self.get_json('/servers/%s' % server.uuid,
|
||||
headers=self.headers)
|
||||
extra.pop("foo2")
|
||||
self.assertEqual(extra, result['extra'])
|
||||
|
||||
# Removing the collection
|
||||
response = self.patch_json('/instances/%s' % instance.uuid,
|
||||
response = self.patch_json('/servers/%s' % server.uuid,
|
||||
[{'path': '/extra', 'op': 'remove'}],
|
||||
headers=self.headers)
|
||||
self.assertEqual(http_client.OK, response.status_code)
|
||||
result = self.get_json('/instances/%s' % instance.uuid,
|
||||
result = self.get_json('/servers/%s' % server.uuid,
|
||||
headers=self.headers)
|
||||
self.assertEqual({}, result['extra'])
|
||||
|
||||
# Assert nothing else was changed
|
||||
self.assertEqual(instance.uuid, result['uuid'])
|
||||
self.assertEqual(instance.description, result['description'])
|
||||
self.assertEqual(server.uuid, result['uuid'])
|
||||
self.assertEqual(server.description, result['description'])
|
||||
|
||||
def test_remove_non_existent_property_fail(self):
|
||||
response = self.patch_json(
|
||||
'/instances/%s' % self.instance.uuid,
|
||||
'/servers/%s' % self.server.uuid,
|
||||
[{'path': '/extra/non-existent', 'op': 'remove'}],
|
||||
headers=self.headers,
|
||||
expect_errors=True)
|
||||
@ -207,7 +207,7 @@ class TestPatch(v1_test.APITestV1):
|
||||
self.assertTrue(response.json['error_message'])
|
||||
|
||||
def test_add_root(self):
|
||||
response = self.patch_json('/instances/%s' % self.instance.uuid,
|
||||
response = self.patch_json('/servers/%s' % self.server.uuid,
|
||||
[{'path': '/description', 'value': 'test',
|
||||
'op': 'add'}],
|
||||
headers=self.headers)
|
||||
@ -215,7 +215,7 @@ class TestPatch(v1_test.APITestV1):
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
|
||||
def test_add_root_non_existent(self):
|
||||
response = self.patch_json('/instances/%s' % self.instance.uuid,
|
||||
response = self.patch_json('/servers/%s' % self.server.uuid,
|
||||
[{'path': '/foo', 'value': 'bar',
|
||||
'op': 'add'}],
|
||||
expect_errors=True,
|
||||
@ -224,7 +224,7 @@ class TestPatch(v1_test.APITestV1):
|
||||
self.assertTrue(response.json['error_message'])
|
||||
|
||||
def test_add_multi(self):
|
||||
response = self.patch_json('/instances/%s' % self.instance.uuid,
|
||||
response = self.patch_json('/servers/%s' % self.server.uuid,
|
||||
[{'path': '/extra/foo1', 'value': 'bar1',
|
||||
'op': 'add'},
|
||||
{'path': '/extra/foo2', 'value': 'bar2',
|
||||
@ -232,13 +232,13 @@ class TestPatch(v1_test.APITestV1):
|
||||
headers=self.headers)
|
||||
self.assertEqual('application/json', response.content_type)
|
||||
self.assertEqual(http_client.OK, response.status_code)
|
||||
result = self.get_json('/instances/%s' % self.instance.uuid,
|
||||
result = self.get_json('/servers/%s' % self.server.uuid,
|
||||
headers=self.headers)
|
||||
expected = {"foo1": "bar1", "foo2": "bar2"}
|
||||
self.assertEqual(expected, result['extra'])
|
||||
|
||||
def test_remove_uuid(self):
|
||||
response = self.patch_json('/instances/%s' % self.instance.uuid,
|
||||
response = self.patch_json('/servers/%s' % self.server.uuid,
|
||||
[{'path': '/uuid', 'op': 'remove'}],
|
||||
expect_errors=True,
|
||||
headers=self.headers)
|
@ -25,13 +25,13 @@ class TestAuthorizeWsgi(base.TestCase):
|
||||
self.ctxt = context.RequestContext(
|
||||
tenant='c18e8a1a870d4c08a0b51ced6e0b6459',
|
||||
user='cdbf77d47f1d4d04ad9b7ff62b672467')
|
||||
self.test_inst = utils.get_test_instance(self.ctxt)
|
||||
self.fake_controller._get_resource.return_value = self.test_inst
|
||||
self.test_server = utils.get_test_server(self.ctxt)
|
||||
self.fake_controller._get_resource.return_value = self.test_server
|
||||
|
||||
def power(self, instance_uuid, target):
|
||||
def power(self, server_uuid, target):
|
||||
pass
|
||||
|
||||
def lock(self, instance_uuid, target):
|
||||
def lock(self, server_uuid, target):
|
||||
pass
|
||||
|
||||
self.fake_power = power
|
||||
@ -41,15 +41,15 @@ class TestAuthorizeWsgi(base.TestCase):
|
||||
def test_authorize_power_action_owner(self, mocked_pecan_request):
|
||||
mocked_pecan_request.context = self.ctxt
|
||||
|
||||
policy.authorize_wsgi("mogan:instance", "set_power_state")(
|
||||
self.fake_power)(self.fake_controller, 'fake_instance_id', 'off')
|
||||
policy.authorize_wsgi("mogan:server", "set_power_state")(
|
||||
self.fake_power)(self.fake_controller, 'fake_server_id', 'off')
|
||||
|
||||
@mock.patch('pecan.request')
|
||||
def test_authorize_power_action_admin(self, mocked_pecan_request):
|
||||
mocked_pecan_request.context = context.get_admin_context()
|
||||
|
||||
policy.authorize_wsgi("mogan:instance", "set_power_state")(
|
||||
self.fake_power)(self.fake_controller, 'fake_instance_id', 'off')
|
||||
policy.authorize_wsgi("mogan:server", "set_power_state")(
|
||||
self.fake_power)(self.fake_controller, 'fake_server_id', 'off')
|
||||
|
||||
@mock.patch('pecan.response')
|
||||
@mock.patch('pecan.request')
|
||||
@ -59,27 +59,27 @@ class TestAuthorizeWsgi(base.TestCase):
|
||||
tenant='non-exist-tenant',
|
||||
user='non-exist-user')
|
||||
|
||||
data = policy.authorize_wsgi("mogan:instance", "set_power_state")(
|
||||
self.fake_power)(self.fake_controller, 'fake_instance_id',
|
||||
data = policy.authorize_wsgi("mogan:server", "set_power_state")(
|
||||
self.fake_power)(self.fake_controller, 'fake_server_id',
|
||||
'reboot')
|
||||
self.assertEqual(403, mocked_pecan_response.status)
|
||||
self.assertEqual('Access was denied to the following resource: '
|
||||
'mogan:instance:set_power_state',
|
||||
'mogan:server:set_power_state',
|
||||
data['faultstring'])
|
||||
|
||||
@mock.patch('pecan.request')
|
||||
def test_authorize_lock_action_owner(self, mocked_pecan_request):
|
||||
mocked_pecan_request.context = self.ctxt
|
||||
|
||||
policy.authorize_wsgi("mogan:instance", "set_lock_state")(
|
||||
self.fake_lock)(self.fake_controller, 'fake_instance_id', True)
|
||||
policy.authorize_wsgi("mogan:server", "set_lock_state")(
|
||||
self.fake_lock)(self.fake_controller, 'fake_server_id', True)
|
||||
|
||||
@mock.patch('pecan.request')
|
||||
def test_authorize_lock_action_admin(self, mocked_pecan_request):
|
||||
mocked_pecan_request.context = context.get_admin_context()
|
||||
|
||||
policy.authorize_wsgi("mogan:instance", "set_lock_state")(
|
||||
self.fake_lock)(self.fake_controller, 'fake_instance_id', True)
|
||||
policy.authorize_wsgi("mogan:server", "set_lock_state")(
|
||||
self.fake_lock)(self.fake_controller, 'fake_server_id', True)
|
||||
|
||||
@mock.patch('pecan.response')
|
||||
@mock.patch('pecan.request')
|
||||
@ -89,10 +89,10 @@ class TestAuthorizeWsgi(base.TestCase):
|
||||
tenant='non-exist-tenant',
|
||||
user='non-exist-user')
|
||||
|
||||
data = policy.authorize_wsgi("mogan:instance", "set_lock_state")(
|
||||
self.fake_lock)(self.fake_controller, 'fake_instance_id',
|
||||
data = policy.authorize_wsgi("mogan:server", "set_lock_state")(
|
||||
self.fake_lock)(self.fake_controller, 'fake_server_id',
|
||||
True)
|
||||
self.assertEqual(403, mocked_pecan_response.status)
|
||||
self.assertEqual('Access was denied to the following resource: '
|
||||
'mogan:instance:set_lock_state',
|
||||
'mogan:server:set_lock_state',
|
||||
data['faultstring'])
|
||||
|
@ -30,12 +30,12 @@ class ConsoleAuthManagerTestCase(test.TestCase):
|
||||
super(ConsoleAuthManagerTestCase, self).setUp()
|
||||
self.manager = manager.ConsoleAuthManager('test-host',
|
||||
'test-consoleauth-topic')
|
||||
self.instance_uuid = 'e7481762-3215-4489-bde5-0068a6bf79d1'
|
||||
self.server_uuid = 'e7481762-3215-4489-bde5-0068a6bf79d1'
|
||||
self.config(backend='oslo_cache.dict', enabled=True,
|
||||
group='cache')
|
||||
self.addCleanup(
|
||||
self.manager.delete_tokens_for_instance, self.context,
|
||||
self.instance_uuid)
|
||||
self.manager.delete_tokens_for_server, self.context,
|
||||
self.server_uuid)
|
||||
|
||||
def test_reset(self):
|
||||
with mock.patch('mogan.engine.rpcapi.EngineAPI') as mock_rpc:
|
||||
@ -51,33 +51,33 @@ class ConsoleAuthManagerTestCase(test.TestCase):
|
||||
self.config(expiration_time=1, group='cache')
|
||||
self.manager.authorize_console(
|
||||
self.context, token, 'shellinabox', '127.0.0.1', 4321,
|
||||
None, self.instance_uuid, None)
|
||||
None, self.server_uuid, None)
|
||||
self.assertIsNotNone(self.manager.check_token(self.context, token))
|
||||
time.sleep(1)
|
||||
self.assertIsNone(self.manager.check_token(self.context, token))
|
||||
|
||||
def test_multiple_tokens_for_instance(self):
|
||||
def test_multiple_tokens_for_server(self):
|
||||
tokens = [u"token" + str(i) for i in range(10)]
|
||||
|
||||
for token in tokens:
|
||||
self.manager.authorize_console(
|
||||
self.context, token, 'shellinabox', '127.0.0.1', 4321,
|
||||
None, self.instance_uuid, None)
|
||||
None, self.server_uuid, None)
|
||||
|
||||
for token in tokens:
|
||||
self.assertIsNotNone(
|
||||
self.manager.check_token(self.context, token))
|
||||
|
||||
def test_delete_tokens_for_instance(self):
|
||||
def test_delete_tokens_for_server(self):
|
||||
tokens = [u"token" + str(i) for i in range(10)]
|
||||
for token in tokens:
|
||||
self.manager.authorize_console(
|
||||
self.context, token, 'shellinabox', '127.0.0.1', 4321,
|
||||
None, self.instance_uuid, None)
|
||||
self.manager.delete_tokens_for_instance(self.context,
|
||||
self.instance_uuid)
|
||||
stored_tokens = self.manager._get_tokens_for_instance(
|
||||
self.instance_uuid)
|
||||
None, self.server_uuid, None)
|
||||
self.manager.delete_tokens_for_server(self.context,
|
||||
self.server_uuid)
|
||||
stored_tokens = self.manager._get_tokens_for_server(
|
||||
self.server_uuid)
|
||||
|
||||
self.assertEqual(len(stored_tokens), 0)
|
||||
|
||||
@ -91,16 +91,16 @@ class ConsoleAuthManagerTestCase(test.TestCase):
|
||||
|
||||
self.manager.authorize_console(
|
||||
self.context, token, 'shellinabox', '127.0.0.1', 4321,
|
||||
None, self.instance_uuid, None)
|
||||
None, self.server_uuid, None)
|
||||
time.sleep(1)
|
||||
self.assertIsNone(self.manager.check_token(self.context, token))
|
||||
|
||||
token1 = u'mytok2'
|
||||
self.manager.authorize_console(
|
||||
self.context, token1, 'shellinabox', '127.0.0.1', 4321,
|
||||
None, self.instance_uuid, None)
|
||||
stored_tokens = self.manager._get_tokens_for_instance(
|
||||
self.instance_uuid)
|
||||
None, self.server_uuid, None)
|
||||
stored_tokens = self.manager._get_tokens_for_server(
|
||||
self.server_uuid)
|
||||
# when trying to store token1, expired token is removed fist.
|
||||
self.assertEqual(len(stored_tokens), 1)
|
||||
self.assertEqual(stored_tokens[0], token1)
|
||||
|
@ -66,7 +66,7 @@ class DbTestCase(base.TestCase):
|
||||
def setUp(self):
|
||||
super(DbTestCase, self).setUp()
|
||||
|
||||
self.dbapi = dbapi.get_instance()
|
||||
self.dbapi = dbapi.get_server()
|
||||
|
||||
global _DB_CACHE
|
||||
if not _DB_CACHE:
|
||||
|
@ -224,7 +224,7 @@ class MigrationCheckersMixin(object):
|
||||
self.fail("Shouldn't have connected")
|
||||
|
||||
def _check_91941bf1ebc9(self, engine, data):
|
||||
nodes = db_utils.get_table(engine, 'instances')
|
||||
nodes = db_utils.get_table(engine, 'servers')
|
||||
col_names = [column.name for column in nodes.c]
|
||||
self.assertIn('created_at', col_names)
|
||||
self.assertIsInstance(nodes.c.provision_updated_at.type,
|
||||
|
@ -13,55 +13,55 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Tests for manipulating Instance Type Extra Specs via the DB API"""
|
||||
"""Tests for manipulating Flavor Extra Specs via the DB API"""
|
||||
|
||||
from mogan.common import exception
|
||||
from mogan.tests.unit.db import base
|
||||
from mogan.tests.unit.db import utils
|
||||
|
||||
|
||||
class DbInstanceTypeExtraSpecsTestCase(base.DbTestCase):
|
||||
class DbFlavorExtraSpecsTestCase(base.DbTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(DbInstanceTypeExtraSpecsTestCase, self).setUp()
|
||||
super(DbFlavorExtraSpecsTestCase, self).setUp()
|
||||
self.context = {}
|
||||
self.instance_type = utils.create_test_instance_type()
|
||||
self.flavor = utils.create_test_flavor()
|
||||
self.specs = {'k1': 'v1', 'k2': 'v2'}
|
||||
|
||||
def test_create_extra_specs(self):
|
||||
self.dbapi.extra_specs_update_or_create(
|
||||
self.context, self.instance_type['uuid'], self.specs)
|
||||
self.context, self.flavor['uuid'], self.specs)
|
||||
|
||||
def test_get_extra_specs(self):
|
||||
self.dbapi.extra_specs_update_or_create(
|
||||
self.context, self.instance_type['uuid'], self.specs)
|
||||
extra_specs = self.dbapi.instance_type_extra_specs_get(
|
||||
self.context, self.instance_type['uuid'])
|
||||
self.context, self.flavor['uuid'], self.specs)
|
||||
extra_specs = self.dbapi.flavor_extra_specs_get(
|
||||
self.context, self.flavor['uuid'])
|
||||
|
||||
self.assertEqual(self.specs, extra_specs)
|
||||
|
||||
def test_get_extra_specs_empty(self):
|
||||
extra_specs = self.dbapi.instance_type_extra_specs_get(
|
||||
self.context, self.instance_type['uuid'])
|
||||
extra_specs = self.dbapi.flavor_extra_specs_get(
|
||||
self.context, self.flavor['uuid'])
|
||||
|
||||
self.assertEqual({}, extra_specs)
|
||||
|
||||
def test_destroy_extra_specs(self):
|
||||
self.dbapi.extra_specs_update_or_create(
|
||||
self.context, self.instance_type['uuid'], self.specs)
|
||||
self.context, self.flavor['uuid'], self.specs)
|
||||
|
||||
self.dbapi.type_extra_specs_delete(
|
||||
self.context, self.instance_type['uuid'], 'k1')
|
||||
extra_specs = self.dbapi.instance_type_extra_specs_get(
|
||||
self.context, self.instance_type['uuid'])
|
||||
self.context, self.flavor['uuid'], 'k1')
|
||||
extra_specs = self.dbapi.flavor_extra_specs_get(
|
||||
self.context, self.flavor['uuid'])
|
||||
|
||||
self.assertEqual({'k2': 'v2'}, extra_specs)
|
||||
|
||||
def test_delete_extra_specs_does_not_exist(self):
|
||||
self.dbapi.extra_specs_update_or_create(
|
||||
self.context, self.instance_type['uuid'], self.specs)
|
||||
self.context, self.flavor['uuid'], self.specs)
|
||||
self.assertRaises(exception.FlavorExtraSpecsNotFound,
|
||||
self.dbapi.type_extra_specs_delete,
|
||||
self.context,
|
||||
self.instance_type['uuid'],
|
||||
self.flavor['uuid'],
|
||||
'k3')
|
@ -13,7 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Tests for manipulating Instance Types via the DB API"""
|
||||
"""Tests for manipulating Flavors via the DB API"""
|
||||
|
||||
from oslo_utils import uuidutils
|
||||
import six
|
||||
@ -23,55 +23,54 @@ from mogan.tests.unit.db import base
|
||||
from mogan.tests.unit.db import utils
|
||||
|
||||
|
||||
class DbInstanceTypeTestCase(base.DbTestCase):
|
||||
class DbFlavorTestCase(base.DbTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(DbInstanceTypeTestCase, self).setUp()
|
||||
super(DbFlavorTestCase, self).setUp()
|
||||
self.context = {}
|
||||
self.instance_type = utils.create_test_instance_type()
|
||||
self.flavor = utils.create_test_flavor()
|
||||
|
||||
def test_create_instance_type(self):
|
||||
utils.create_test_instance_type(name='testing')
|
||||
def test_create_flavor(self):
|
||||
utils.create_test_flavor(name='testing')
|
||||
|
||||
def test_create_instance_type_already_exists(self):
|
||||
def test_create_flavor_already_exists(self):
|
||||
self.assertRaises(exception.FlavorAlreadyExists,
|
||||
utils.create_test_instance_type,
|
||||
uuid=self.instance_type['uuid'])
|
||||
utils.create_test_flavor,
|
||||
uuid=self.flavor['uuid'])
|
||||
|
||||
def test_get_instance_type_list(self):
|
||||
uuids = [self.instance_type['uuid']]
|
||||
def test_get_flavor_list(self):
|
||||
uuids = [self.flavor['uuid']]
|
||||
for i in range(1, 6):
|
||||
inst_type = utils.create_test_instance_type(
|
||||
flavor = utils.create_test_flavor(
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
name=six.text_type(i))
|
||||
uuids.append(six.text_type(inst_type['uuid']))
|
||||
res = self.dbapi.instance_type_get_all(self.context)
|
||||
uuids.append(six.text_type(flavor['uuid']))
|
||||
res = self.dbapi.flavor_get_all(self.context)
|
||||
res_uuids = [r['uuid'] for r in res]
|
||||
six.assertCountEqual(self, uuids, res_uuids)
|
||||
|
||||
def test_get_instance_type(self):
|
||||
instance_type = self.dbapi.instance_type_get(
|
||||
self.context, self.instance_type['uuid'])
|
||||
def test_get_flavor(self):
|
||||
flavor = self.dbapi.flavor_get(
|
||||
self.context, self.flavor['uuid'])
|
||||
|
||||
self.assertEqual(self.instance_type['uuid'], instance_type['uuid'])
|
||||
self.assertEqual(self.flavor['uuid'], flavor['uuid'])
|
||||
|
||||
def test_get_instance_type_that_does_not_exist(self):
|
||||
def test_get_flavor_that_does_not_exist(self):
|
||||
self.assertRaises(exception.FlavorNotFound,
|
||||
self.dbapi.instance_type_get,
|
||||
self.dbapi.flavor_get,
|
||||
self.context,
|
||||
uuidutils.generate_uuid())
|
||||
|
||||
def test_destroy_instance_type(self):
|
||||
self.dbapi.instance_type_destroy(self.context,
|
||||
self.instance_type['uuid'])
|
||||
def test_destroy_flavor(self):
|
||||
self.dbapi.flavor_destroy(self.context, self.flavor['uuid'])
|
||||
|
||||
self.assertRaises(exception.FlavorNotFound,
|
||||
self.dbapi.instance_type_destroy,
|
||||
self.dbapi.flavor_destroy,
|
||||
self.context,
|
||||
self.instance_type['uuid'])
|
||||
self.flavor['uuid'])
|
||||
|
||||
def test_destroy_instance_type_that_does_not_exist(self):
|
||||
def test_destroy_flavor_that_does_not_exist(self):
|
||||
self.assertRaises(exception.FlavorNotFound,
|
||||
self.dbapi.instance_type_destroy,
|
||||
self.dbapi.flavor_destroy,
|
||||
self.context,
|
||||
uuidutils.generate_uuid())
|
@ -1,119 +0,0 @@
|
||||
# Copyright 2016 Intel
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Tests for manipulating Instances via the DB API"""
|
||||
|
||||
from oslo_utils import uuidutils
|
||||
import six
|
||||
|
||||
from mogan.common import exception
|
||||
from mogan.tests.unit.db import base
|
||||
from mogan.tests.unit.db import utils
|
||||
|
||||
|
||||
class DbInstanceTestCase(base.DbTestCase):
|
||||
|
||||
def test_instance_create(self):
|
||||
utils.create_test_instance()
|
||||
|
||||
def test_instance_create_with_same_uuid(self):
|
||||
utils.create_test_instance(uuid='uuid', name='instance1')
|
||||
self.assertRaises(exception.InstanceAlreadyExists,
|
||||
utils.create_test_instance,
|
||||
uuid='uuid',
|
||||
name='instance2')
|
||||
|
||||
def test_instance_get_by_uuid(self):
|
||||
instance = utils.create_test_instance()
|
||||
res = self.dbapi.instance_get(self.context, instance.uuid)
|
||||
self.assertEqual(instance.uuid, res.uuid)
|
||||
|
||||
def test_instance_get_not_exist(self):
|
||||
self.assertRaises(exception.InstanceNotFound,
|
||||
self.dbapi.instance_get,
|
||||
self.context,
|
||||
'12345678-9999-0000-aaaa-123456789012')
|
||||
|
||||
def test_instance_get_all(self):
|
||||
uuids_project_1 = []
|
||||
uuids_project_2 = []
|
||||
uuids_project_all = []
|
||||
for i in range(0, 3):
|
||||
instance = utils.create_test_instance(
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
project_id='project_1',
|
||||
name=str(i))
|
||||
uuids_project_1.append(six.text_type(instance['uuid']))
|
||||
for i in range(3, 5):
|
||||
instance = utils.create_test_instance(
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
project_id='project_2',
|
||||
name=str(i))
|
||||
uuids_project_2.append(six.text_type(instance['uuid']))
|
||||
uuids_project_all.extend(uuids_project_1)
|
||||
uuids_project_all.extend(uuids_project_2)
|
||||
|
||||
# Set project_only to False
|
||||
# get all instances from all projects
|
||||
res = self.dbapi.instance_get_all(self.context, project_only=False)
|
||||
res_uuids = [r.uuid for r in res]
|
||||
six.assertCountEqual(self, uuids_project_all, res_uuids)
|
||||
|
||||
# Set project_only to True
|
||||
# get instances from current project (project_1)
|
||||
self.context.tenant = 'project_1'
|
||||
res = self.dbapi.instance_get_all(self.context, project_only=True)
|
||||
res_uuids = [r.uuid for r in res]
|
||||
six.assertCountEqual(self, uuids_project_1, res_uuids)
|
||||
|
||||
# Set project_only to True
|
||||
# get instances from current project (project_2)
|
||||
self.context.tenant = 'project_2'
|
||||
res = self.dbapi.instance_get_all(self.context, project_only=True)
|
||||
res_uuids = [r.uuid for r in res]
|
||||
six.assertCountEqual(self, uuids_project_2, res_uuids)
|
||||
|
||||
def test_instance_destroy(self):
|
||||
instance = utils.create_test_instance()
|
||||
self.dbapi.instance_destroy(self.context, instance.uuid)
|
||||
self.assertRaises(exception.InstanceNotFound,
|
||||
self.dbapi.instance_get,
|
||||
self.context,
|
||||
instance.uuid)
|
||||
|
||||
def test_instance_destroy_not_exist(self):
|
||||
self.assertRaises(exception.InstanceNotFound,
|
||||
self.dbapi.instance_destroy,
|
||||
self.context,
|
||||
'12345678-9999-0000-aaaa-123456789012')
|
||||
|
||||
def test_instance_update(self):
|
||||
instance = utils.create_test_instance()
|
||||
old_extra = instance.extra
|
||||
new_extra = {'foo': 'bar'}
|
||||
self.assertNotEqual(old_extra, new_extra)
|
||||
|
||||
res = self.dbapi.instance_update(self.context,
|
||||
instance.uuid,
|
||||
{'extra': new_extra})
|
||||
self.assertEqual(new_extra, res.extra)
|
||||
|
||||
def test_instance_update_with_invalid_parameter_value(self):
|
||||
instance = utils.create_test_instance()
|
||||
self.assertRaises(exception.InvalidParameterValue,
|
||||
self.dbapi.instance_update,
|
||||
self.context,
|
||||
instance.uuid,
|
||||
{'uuid': '12345678-9999-0000-aaaa-123456789012'})
|
@ -34,49 +34,49 @@ class DbQuotaUsageTestCase(base.DbTestCase):
|
||||
def setUp(self):
|
||||
super(DbQuotaUsageTestCase, self).setUp()
|
||||
self.context = context.get_admin_context()
|
||||
self.instance = quota.InstanceResource()
|
||||
self.resources = {self.instance.name: self.instance}
|
||||
self.server = quota.ServerResource()
|
||||
self.resources = {self.server.name: self.server}
|
||||
self.project_id = "c18e8a1a870d4c08a0b51ced6e0b6459"
|
||||
|
||||
def test_quota_usage_reserve(self):
|
||||
utils.create_test_quota()
|
||||
dbapi = db_api.get_instance()
|
||||
dbapi = db_api.get_server()
|
||||
r = dbapi.quota_reserve(self.context, self.resources,
|
||||
{'instances': 10},
|
||||
{'instances': 1},
|
||||
{'servers': 10},
|
||||
{'servers': 1},
|
||||
datetime.datetime(2099, 1, 1, 0, 0),
|
||||
CONF.quota.until_refresh, CONF.quota.max_age,
|
||||
project_id=self.project_id)
|
||||
self.assertEqual('instances', r[0].resource_name)
|
||||
self.assertEqual('servers', r[0].resource_name)
|
||||
|
||||
def test_reserve_commit(self):
|
||||
utils.create_test_quota()
|
||||
dbapi = db_api.get_instance()
|
||||
dbapi = db_api.get_server()
|
||||
rs = dbapi.quota_reserve(self.context, self.resources,
|
||||
{'instances': 10},
|
||||
{'instances': 1},
|
||||
{'servers': 10},
|
||||
{'servers': 1},
|
||||
datetime.datetime(2099, 1, 1, 0, 0),
|
||||
CONF.quota.until_refresh, CONF.quota.max_age,
|
||||
project_id=self.project_id)
|
||||
r = dbapi.quota_usage_get_all_by_project(self.context, self.project_id)
|
||||
before_in_use = r['instances']['in_use']
|
||||
before_in_use = r['servers']['in_use']
|
||||
dbapi.reservation_commit(self.context, rs, self.project_id)
|
||||
r = dbapi.quota_usage_get_all_by_project(self.context, self.project_id)
|
||||
after_in_use = r['instances']['in_use']
|
||||
after_in_use = r['servers']['in_use']
|
||||
self.assertEqual(before_in_use + 1, after_in_use)
|
||||
|
||||
def test_reserve_rollback(self):
|
||||
utils.create_test_quota()
|
||||
dbapi = db_api.get_instance()
|
||||
dbapi = db_api.get_server()
|
||||
rs = dbapi.quota_reserve(self.context, self.resources,
|
||||
{'instances': 10},
|
||||
{'instances': 1},
|
||||
{'servers': 10},
|
||||
{'servers': 1},
|
||||
datetime.datetime(2099, 1, 1, 0, 0),
|
||||
CONF.quota.until_refresh, CONF.quota.max_age,
|
||||
project_id=self.project_id)
|
||||
r = dbapi.quota_usage_get_all_by_project(self.context, self.project_id)
|
||||
before_in_use = r['instances']['in_use']
|
||||
before_in_use = r['servers']['in_use']
|
||||
dbapi.reservation_rollback(self.context, rs, self.project_id)
|
||||
r = dbapi.quota_usage_get_all_by_project(self.context, self.project_id)
|
||||
after_in_use = r['instances']['in_use']
|
||||
after_in_use = r['servers']['in_use']
|
||||
self.assertEqual(before_in_use, after_in_use)
|
||||
|
@ -43,7 +43,7 @@ class DbQuotaTestCase(base.DbTestCase):
|
||||
ids_project_1 = []
|
||||
ids_project_2 = []
|
||||
ids_project_all = []
|
||||
resource_names = ['instances', 'instances_type', 'test_resource']
|
||||
resource_names = ['servers', 'servers_type', 'test_resource']
|
||||
for i in range(0, 3):
|
||||
quota = utils.create_test_quota(project_id='project_1',
|
||||
resource_name=resource_names[i])
|
||||
@ -111,4 +111,4 @@ class DbQuotaTestCase(base.DbTestCase):
|
||||
self.context,
|
||||
quota.project_id,
|
||||
quota.resource_name,
|
||||
{'resource_name': 'instance_test'})
|
||||
{'resource_name': 'server_test'})
|
||||
|
@ -13,7 +13,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Tests for manipulating Instance Faults via the DB API"""
|
||||
"""Tests for manipulating Server Faults via the DB API"""
|
||||
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
@ -21,54 +21,54 @@ from mogan.tests.unit.db import base
|
||||
from mogan.tests.unit.db import utils
|
||||
|
||||
|
||||
class DbInstanceFaultTestCase(base.DbTestCase):
|
||||
class DbServerFaultTestCase(base.DbTestCase):
|
||||
def setUp(self):
|
||||
super(DbInstanceFaultTestCase, self).setUp()
|
||||
super(DbServerFaultTestCase, self).setUp()
|
||||
self.ctxt = {}
|
||||
|
||||
def test_create_instance_fault(self):
|
||||
def test_create_server_fault(self):
|
||||
uuid = uuidutils.generate_uuid()
|
||||
# Ensure no faults registered for this instance
|
||||
faults = self.dbapi.instance_fault_get_by_instance_uuids(self.ctxt,
|
||||
[uuid])
|
||||
# Ensure no faults registered for this server
|
||||
faults = self.dbapi.server_fault_get_by_server_uuids(self.ctxt,
|
||||
[uuid])
|
||||
self.assertEqual(0, len(faults[uuid]))
|
||||
|
||||
# Create a fault
|
||||
fault_values = utils.get_test_instance_fault(instance_uuid=uuid)
|
||||
fault = utils.create_test_instance_fault(self.ctxt, instance_uuid=uuid)
|
||||
fault_values = utils.get_test_server_fault(server_uuid=uuid)
|
||||
fault = utils.create_test_server_fault(self.ctxt, server_uuid=uuid)
|
||||
|
||||
ignored_keys = ['created_at', 'updated_at', 'id']
|
||||
self._assertEqualObjects(fault_values, fault, ignored_keys)
|
||||
|
||||
# Retrieve the fault to ensure it was successfully added
|
||||
faults = self.dbapi.instance_fault_get_by_instance_uuids(self.ctxt,
|
||||
[uuid])
|
||||
faults = self.dbapi.server_fault_get_by_server_uuids(self.ctxt,
|
||||
[uuid])
|
||||
self.assertEqual(1, len(faults[uuid]))
|
||||
self._assertEqualObjects(fault, faults[uuid][0])
|
||||
|
||||
def test_get_instance_fault_by_instance(self):
|
||||
"""Ensure we can retrieve faults for instance."""
|
||||
def test_get_server_fault_by_server(self):
|
||||
"""Ensure we can retrieve faults for server."""
|
||||
uuids = [uuidutils.generate_uuid(), uuidutils.generate_uuid()]
|
||||
fault_codes = [404, 500]
|
||||
expected = {}
|
||||
|
||||
# Create faults
|
||||
for uuid in uuids:
|
||||
utils.create_test_instance(self.ctxt, instance_uuid=uuid)
|
||||
utils.create_test_server(self.ctxt, server_uuid=uuid)
|
||||
|
||||
expected[uuid] = []
|
||||
for code in fault_codes:
|
||||
fault = utils.create_test_instance_fault(self.ctxt,
|
||||
instance_uuid=uuid,
|
||||
code=code)
|
||||
fault = utils.create_test_server_fault(self.ctxt,
|
||||
server_uuid=uuid,
|
||||
code=code)
|
||||
# We expect the faults to be returned ordered by created_at in
|
||||
# descending order, so insert the newly created fault at the
|
||||
# front of our list.
|
||||
expected[uuid].insert(0, fault)
|
||||
|
||||
# Ensure faults are saved
|
||||
faults = self.dbapi.instance_fault_get_by_instance_uuids(self.ctxt,
|
||||
uuids)
|
||||
faults = self.dbapi.server_fault_get_by_server_uuids(self.ctxt,
|
||||
uuids)
|
||||
ignored_keys = ['created_at', 'updated_at', 'id']
|
||||
self.assertEqual(len(expected), len(faults))
|
||||
for uuid in uuids:
|
||||
@ -76,17 +76,17 @@ class DbInstanceFaultTestCase(base.DbTestCase):
|
||||
faults[uuid],
|
||||
ignored_keys)
|
||||
|
||||
def test_delete_instance_faults_on_instance_destroy(self):
|
||||
instance = utils.create_test_instance(self.ctxt)
|
||||
fault = utils.create_test_instance_fault(self.ctxt,
|
||||
instance_uuid=instance.uuid)
|
||||
faults = self.dbapi.instance_fault_get_by_instance_uuids(
|
||||
def test_delete_server_faults_on_server_destroy(self):
|
||||
server = utils.create_test_server(self.ctxt)
|
||||
fault = utils.create_test_server_fault(self.ctxt,
|
||||
server_uuid=server.uuid)
|
||||
faults = self.dbapi.server_fault_get_by_server_uuids(
|
||||
self.ctxt,
|
||||
[instance.uuid])
|
||||
self.assertEqual(1, len(faults[instance.uuid]))
|
||||
self._assertEqualObjects(fault, faults[instance.uuid][0])
|
||||
self.dbapi.instance_destroy(self.ctxt, instance.uuid)
|
||||
faults = self.dbapi.instance_fault_get_by_instance_uuids(
|
||||
[server.uuid])
|
||||
self.assertEqual(1, len(faults[server.uuid]))
|
||||
self._assertEqualObjects(fault, faults[server.uuid][0])
|
||||
self.dbapi.server_destroy(self.ctxt, server.uuid)
|
||||
faults = self.dbapi.server_fault_get_by_server_uuids(
|
||||
self.ctxt,
|
||||
[instance.uuid])
|
||||
self.assertEqual(0, len(faults[instance.uuid]))
|
||||
[server.uuid])
|
||||
self.assertEqual(0, len(faults[server.uuid]))
|
119
mogan/tests/unit/db/test_servers.py
Normal file
119
mogan/tests/unit/db/test_servers.py
Normal file
@ -0,0 +1,119 @@
|
||||
# Copyright 2016 Intel
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""Tests for manipulating Servers via the DB API"""
|
||||
|
||||
from oslo_utils import uuidutils
|
||||
import six
|
||||
|
||||
from mogan.common import exception
|
||||
from mogan.tests.unit.db import base
|
||||
from mogan.tests.unit.db import utils
|
||||
|
||||
|
||||
class DbServerTestCase(base.DbTestCase):
|
||||
|
||||
def test_server_create(self):
|
||||
utils.create_test_server()
|
||||
|
||||
def test_server_create_with_same_uuid(self):
|
||||
utils.create_test_server(uuid='uuid', name='server1')
|
||||
self.assertRaises(exception.ServerAlreadyExists,
|
||||
utils.create_test_server,
|
||||
uuid='uuid',
|
||||
name='server2')
|
||||
|
||||
def test_server_get_by_uuid(self):
|
||||
server = utils.create_test_server()
|
||||
res = self.dbapi.server_get(self.context, server.uuid)
|
||||
self.assertEqual(server.uuid, res.uuid)
|
||||
|
||||
def test_server_get_not_exist(self):
|
||||
self.assertRaises(exception.ServerNotFound,
|
||||
self.dbapi.server_get,
|
||||
self.context,
|
||||
'12345678-9999-0000-aaaa-123456789012')
|
||||
|
||||
def test_server_get_all(self):
|
||||
uuids_project_1 = []
|
||||
uuids_project_2 = []
|
||||
uuids_project_all = []
|
||||
for i in range(0, 3):
|
||||
server = utils.create_test_server(
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
project_id='project_1',
|
||||
name=str(i))
|
||||
uuids_project_1.append(six.text_type(server['uuid']))
|
||||
for i in range(3, 5):
|
||||
server = utils.create_test_server(
|
||||
uuid=uuidutils.generate_uuid(),
|
||||
project_id='project_2',
|
||||
name=str(i))
|
||||
uuids_project_2.append(six.text_type(server['uuid']))
|
||||
uuids_project_all.extend(uuids_project_1)
|
||||
uuids_project_all.extend(uuids_project_2)
|
||||
|
||||
# Set project_only to False
|
||||
# get all servers from all projects
|
||||
res = self.dbapi.server_get_all(self.context, project_only=False)
|
||||
res_uuids = [r.uuid for r in res]
|
||||
six.assertCountEqual(self, uuids_project_all, res_uuids)
|
||||
|
||||
# Set project_only to True
|
||||
# get servers from current project (project_1)
|
||||
self.context.tenant = 'project_1'
|
||||
res = self.dbapi.server_get_all(self.context, project_only=True)
|
||||
res_uuids = [r.uuid for r in res]
|
||||
six.assertCountEqual(self, uuids_project_1, res_uuids)
|
||||
|
||||
# Set project_only to True
|
||||
# get servers from current project (project_2)
|
||||
self.context.tenant = 'project_2'
|
||||
res = self.dbapi.server_get_all(self.context, project_only=True)
|
||||
res_uuids = [r.uuid for r in res]
|
||||
six.assertCountEqual(self, uuids_project_2, res_uuids)
|
||||
|
||||
def test_server_destroy(self):
|
||||
server = utils.create_test_server()
|
||||
self.dbapi.server_destroy(self.context, server.uuid)
|
||||
self.assertRaises(exception.ServerNotFound,
|
||||
self.dbapi.server_get,
|
||||
self.context,
|
||||
server.uuid)
|
||||
|
||||
def test_server_destroy_not_exist(self):
|
||||
self.assertRaises(exception.ServerNotFound,
|
||||
self.dbapi.server_destroy,
|
||||
self.context,
|
||||
'12345678-9999-0000-aaaa-123456789012')
|
||||
|
||||
def test_server_update(self):
|
||||
server = utils.create_test_server()
|
||||
old_extra = server.extra
|
||||
new_extra = {'foo': 'bar'}
|
||||
self.assertNotEqual(old_extra, new_extra)
|
||||
|
||||
res = self.dbapi.server_update(self.context,
|
||||
server.uuid,
|
||||
{'extra': new_extra})
|
||||
self.assertEqual(new_extra, res.extra)
|
||||
|
||||
def test_server_update_with_invalid_parameter_value(self):
|
||||
server = utils.create_test_server()
|
||||
self.assertRaises(exception.InvalidParameterValue,
|
||||
self.dbapi.server_update,
|
||||
self.context,
|
||||
server.uuid,
|
||||
{'uuid': '12345678-9999-0000-aaaa-123456789012'})
|
@ -20,8 +20,8 @@ from mogan.common import states
|
||||
from mogan.db import api as db_api
|
||||
|
||||
|
||||
def get_test_instance(**kw):
|
||||
fake_instance_nics = [{
|
||||
def get_test_server(**kw):
|
||||
fake_server_nics = [{
|
||||
'port_id': uuidutils.generate_uuid(),
|
||||
'network_id': 'bf942f63-c284-4eb8-925b-c2fa1a89ed33',
|
||||
'mac_address': '52:54:00:6a:b7:cc',
|
||||
@ -49,12 +49,12 @@ def get_test_instance(**kw):
|
||||
'user_id': kw.get('user_id', 'cdbf77d47f1d4d04ad9b7ff62b672467'),
|
||||
'status': kw.get('status', states.ACTIVE),
|
||||
'power_state': kw.get('power_state', 'power on'),
|
||||
'instance_type_uuid': kw.get('instance_type_uuid',
|
||||
'28708dff-283c-449e-9bfa-a48c93480c86'),
|
||||
'flavor_uuid': kw.get('flavor_uuid',
|
||||
'28708dff-283c-449e-9bfa-a48c93480c86'),
|
||||
'availability_zone': kw.get('availability_zone', 'test_az'),
|
||||
'image_uuid': kw.get('image_uuid',
|
||||
'ac3b2291-b9ef-45f6-8eeb-21ac568a64a5'),
|
||||
'nics': kw.get('nics', fake_instance_nics),
|
||||
'nics': kw.get('nics', fake_server_nics),
|
||||
'node_uuid': kw.get('node_uuid',
|
||||
'f978ef48-d4af-4dad-beec-e6174309bc71'),
|
||||
'launched_at': kw.get('launched_at'),
|
||||
@ -66,23 +66,23 @@ def get_test_instance(**kw):
|
||||
}
|
||||
|
||||
|
||||
def create_test_instance(context={}, **kw):
|
||||
"""Create test instance entry in DB and return Instance DB object.
|
||||
def create_test_server(context={}, **kw):
|
||||
"""Create test server entry in DB and return Server DB object.
|
||||
|
||||
Function to be used to create test Instance objects in the database.
|
||||
Function to be used to create test Server objects in the database.
|
||||
|
||||
:param context: The request context, for access checks.
|
||||
:param kw: kwargs with overriding values for instance's attributes.
|
||||
:returns: Test Instance DB object.
|
||||
:param kw: kwargs with overriding values for server's attributes.
|
||||
:returns: Test Server DB object.
|
||||
|
||||
"""
|
||||
instance = get_test_instance(**kw)
|
||||
server = get_test_server(**kw)
|
||||
# Let DB generate ID if it isn't specified explicitly
|
||||
if 'id' not in kw:
|
||||
del instance['id']
|
||||
dbapi = db_api.get_instance()
|
||||
del server['id']
|
||||
dbapi = db_api.get_server()
|
||||
|
||||
return dbapi.instance_create(context, instance)
|
||||
return dbapi.server_create(context, server)
|
||||
|
||||
|
||||
def get_test_compute_node(**kw):
|
||||
@ -121,7 +121,7 @@ def create_test_compute_node(context={}, **kw):
|
||||
# specified explicitly just delete it.
|
||||
if 'ports' not in kw:
|
||||
del node['ports']
|
||||
dbapi = db_api.get_instance()
|
||||
dbapi = db_api.get_server()
|
||||
|
||||
return dbapi.compute_node_create(context, node)
|
||||
|
||||
@ -155,7 +155,7 @@ def create_test_compute_port(context={}, **kw):
|
||||
# Let DB generate ID if it isn't specified explicitly
|
||||
if 'id' not in kw:
|
||||
del port['id']
|
||||
dbapi = db_api.get_instance()
|
||||
dbapi = db_api.get_server()
|
||||
|
||||
return dbapi.compute_port_create(context, port)
|
||||
|
||||
@ -189,12 +189,12 @@ def create_test_compute_disk(context={}, **kw):
|
||||
# Let DB generate ID if it isn't specified explicitly
|
||||
if 'id' not in kw:
|
||||
del disk['id']
|
||||
dbapi = db_api.get_instance()
|
||||
dbapi = db_api.get_server()
|
||||
|
||||
return dbapi.compute_disk_create(context, disk)
|
||||
|
||||
|
||||
def get_test_instance_type(**kw):
|
||||
def get_test_flavor(**kw):
|
||||
return {
|
||||
'uuid': kw.get('uuid', uuidutils.generate_uuid()),
|
||||
'name': kw.get('name', 'test'),
|
||||
@ -205,25 +205,25 @@ def get_test_instance_type(**kw):
|
||||
}
|
||||
|
||||
|
||||
def create_test_instance_type(context={}, **kw):
|
||||
"""Create test instance type entry in DB and return the DB object.
|
||||
def create_test_flavor(context={}, **kw):
|
||||
"""Create test server type entry in DB and return the DB object.
|
||||
|
||||
Function to be used to create test Instance Type objects in the database.
|
||||
Function to be used to create test Flavor objects in the database.
|
||||
|
||||
:param context: The request context, for access checks.
|
||||
:param kw: kwargs with overriding values for instance type's attributes.
|
||||
:returns: Test Instance Type DB object.
|
||||
:param kw: kwargs with overriding values for server type's attributes.
|
||||
:returns: Test Flavor DB object.
|
||||
|
||||
"""
|
||||
instance_type = get_test_instance_type(**kw)
|
||||
dbapi = db_api.get_instance()
|
||||
flavor = get_test_flavor(**kw)
|
||||
dbapi = db_api.get_server()
|
||||
|
||||
return dbapi.instance_type_create(context, instance_type)
|
||||
return dbapi.flavor_create(context, flavor)
|
||||
|
||||
|
||||
def get_test_instance_fault(**kw):
|
||||
def get_test_server_fault(**kw):
|
||||
return {
|
||||
'instance_uuid': kw.get('instance_uuid'),
|
||||
'server_uuid': kw.get('server_uuid'),
|
||||
'code': kw.get('code', 404),
|
||||
'message': kw.get('message', 'message'),
|
||||
'detail': kw.get('detail', 'detail'),
|
||||
@ -232,26 +232,26 @@ def get_test_instance_fault(**kw):
|
||||
}
|
||||
|
||||
|
||||
def create_test_instance_fault(context={}, **kw):
|
||||
"""Create test instance fault entry in DB and return the DB object.
|
||||
def create_test_server_fault(context={}, **kw):
|
||||
"""Create test server fault entry in DB and return the DB object.
|
||||
|
||||
Function to be used to create test Instance Fault objects in the database.
|
||||
Function to be used to create test Server Fault objects in the database.
|
||||
|
||||
:param context: The request context, for access checks.
|
||||
:param kw: kwargs with overriding values for instance fault's attributes.
|
||||
:returns: Test Instance Fault DB object.
|
||||
:param kw: kwargs with overriding values for server fault's attributes.
|
||||
:returns: Test Server Fault DB object.
|
||||
|
||||
"""
|
||||
instance_fault = get_test_instance_fault(**kw)
|
||||
dbapi = db_api.get_instance()
|
||||
server_fault = get_test_server_fault(**kw)
|
||||
dbapi = db_api.get_server()
|
||||
|
||||
return dbapi.instance_fault_create(context, instance_fault)
|
||||
return dbapi.server_fault_create(context, server_fault)
|
||||
|
||||
|
||||
def get_test_quota(**kw):
|
||||
return {
|
||||
'id': kw.get('id', 123),
|
||||
'resource_name': kw.get('resource_name', 'instances'),
|
||||
'resource_name': kw.get('resource_name', 'servers'),
|
||||
'project_id': kw.get('project_id',
|
||||
'c18e8a1a870d4c08a0b51ced6e0b6459'),
|
||||
'hard_limit': kw.get('hard_limit', 10),
|
||||
@ -267,7 +267,7 @@ def create_test_quota(context={}, **kw):
|
||||
Function to be used to create test Quota objects in the database.
|
||||
|
||||
:param context: The request context, for access checks.
|
||||
:param kw: kwargs with overriding values for instance's attributes.
|
||||
:param kw: kwargs with overriding values for server's attributes.
|
||||
:returns: Test Quota DB object.
|
||||
|
||||
"""
|
||||
@ -275,6 +275,6 @@ def create_test_quota(context={}, **kw):
|
||||
# Let DB generate ID if it isn't specified explicitly
|
||||
if 'id' not in kw:
|
||||
del quota['id']
|
||||
dbapi = db_api.get_instance()
|
||||
dbapi = db_api.get_server()
|
||||
|
||||
return dbapi.quota_create(context, quota)
|
||||
|
@ -12,51 +12,51 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
""" Tests for create_instance TaskFlow """
|
||||
""" Tests for create_server TaskFlow """
|
||||
|
||||
import mock
|
||||
from oslo_context import context
|
||||
|
||||
from mogan.engine.baremetal.ironic import IronicDriver
|
||||
from mogan.engine.flows import create_instance
|
||||
from mogan.engine.flows import create_server
|
||||
from mogan.engine import manager
|
||||
from mogan import objects
|
||||
from mogan.tests import base
|
||||
from mogan.tests.unit.objects import utils as obj_utils
|
||||
|
||||
|
||||
class CreateInstanceFlowTestCase(base.TestCase):
|
||||
class CreateServerFlowTestCase(base.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(CreateInstanceFlowTestCase, self).setUp()
|
||||
super(CreateServerFlowTestCase, self).setUp()
|
||||
self.ctxt = context.get_admin_context()
|
||||
|
||||
@mock.patch.object(objects.instance.Instance, 'save')
|
||||
@mock.patch.object(create_instance.BuildNetworkTask, '_build_networks')
|
||||
@mock.patch.object(objects.server.Server, 'save')
|
||||
@mock.patch.object(create_server.BuildNetworkTask, '_build_networks')
|
||||
def test_create_network_task_execute(self, mock_build_networks, mock_save):
|
||||
fake_engine_manager = mock.MagicMock()
|
||||
fake_requested_networks = mock.MagicMock()
|
||||
fake_ports = mock.MagicMock()
|
||||
task = create_instance.BuildNetworkTask(fake_engine_manager)
|
||||
instance_obj = obj_utils.get_test_instance(self.ctxt)
|
||||
task = create_server.BuildNetworkTask(fake_engine_manager)
|
||||
server_obj = obj_utils.get_test_server(self.ctxt)
|
||||
mock_build_networks.return_value = None
|
||||
mock_save.return_value = None
|
||||
|
||||
task.execute(
|
||||
self.ctxt, instance_obj, fake_requested_networks, fake_ports)
|
||||
self.ctxt, server_obj, fake_requested_networks, fake_ports)
|
||||
mock_build_networks.assert_called_once_with(self.ctxt,
|
||||
instance_obj,
|
||||
server_obj,
|
||||
fake_requested_networks,
|
||||
fake_ports)
|
||||
|
||||
@mock.patch.object(IronicDriver, 'spawn')
|
||||
def test_create_instance_task_execute(self, mock_spawn):
|
||||
def test_create_server_task_execute(self, mock_spawn):
|
||||
flow_manager = manager.EngineManager('test-host', 'test-topic')
|
||||
task = create_instance.CreateInstanceTask(
|
||||
task = create_server.CreateServerTask(
|
||||
flow_manager.driver)
|
||||
instance_obj = obj_utils.get_test_instance(self.ctxt)
|
||||
server_obj = obj_utils.get_test_server(self.ctxt)
|
||||
mock_spawn.side_effect = None
|
||||
|
||||
task.execute(self.ctxt, instance_obj, {'value': 'configdrive'})
|
||||
task.execute(self.ctxt, server_obj, {'value': 'configdrive'})
|
||||
mock_spawn.assert_called_once_with(
|
||||
self.ctxt, instance_obj, 'configdrive')
|
||||
self.ctxt, server_obj, 'configdrive')
|
@ -38,22 +38,22 @@ class ComputeAPIUnitTest(base.DbTestCase):
|
||||
self.context = context.RequestContext(user=self.user_id,
|
||||
tenant=self.project_id)
|
||||
|
||||
def _create_instance_type(self):
|
||||
inst_type = db_utils.get_test_instance_type()
|
||||
inst_type['extra'] = {}
|
||||
type_obj = objects.InstanceType(self.context, **inst_type)
|
||||
def _create_flavor(self):
|
||||
flavor = db_utils.get_test_flavor()
|
||||
flavor['extra'] = {}
|
||||
type_obj = objects.Flavor(self.context, **flavor)
|
||||
type_obj.create(self.context)
|
||||
return type_obj
|
||||
|
||||
@mock.patch('mogan.engine.api.API._check_requested_networks')
|
||||
def test__validate_and_build_base_options(self, mock_check_nets):
|
||||
instance_type = self._create_instance_type()
|
||||
flavor = self._create_flavor()
|
||||
mock_check_nets.return_value = 3
|
||||
|
||||
base_opts, max_network_count, key_pair = \
|
||||
self.engine_api._validate_and_build_base_options(
|
||||
self.context,
|
||||
instance_type=instance_type,
|
||||
flavor=flavor,
|
||||
image_uuid='fake-uuid',
|
||||
name='fake-name',
|
||||
description='fake-descritpion',
|
||||
@ -67,44 +67,44 @@ class ComputeAPIUnitTest(base.DbTestCase):
|
||||
self.assertEqual('fake-user', base_opts['user_id'])
|
||||
self.assertEqual('fake-project', base_opts['project_id'])
|
||||
self.assertEqual(states.BUILDING, base_opts['status'])
|
||||
self.assertEqual(instance_type.uuid, base_opts['instance_type_uuid'])
|
||||
self.assertEqual(flavor.uuid, base_opts['flavor_uuid'])
|
||||
self.assertEqual({'k1', 'v1'}, base_opts['extra'])
|
||||
self.assertEqual('test_az', base_opts['availability_zone'])
|
||||
self.assertEqual(None, key_pair)
|
||||
|
||||
@mock.patch.object(objects.Instance, 'create')
|
||||
def test__provision_instances(self, mock_inst_create):
|
||||
mock_inst_create.return_value = mock.MagicMock()
|
||||
@mock.patch.object(objects.Server, 'create')
|
||||
def test__provision_servers(self, mock_server_create):
|
||||
mock_server_create.return_value = mock.MagicMock()
|
||||
|
||||
base_options = {'image_uuid': 'fake-uuid',
|
||||
'status': states.BUILDING,
|
||||
'user_id': 'fake-user',
|
||||
'project_id': 'fake-project',
|
||||
'instance_type_uuid': 'fake-type-uuid',
|
||||
'flavor_uuid': 'fake-type-uuid',
|
||||
'name': 'fake-name',
|
||||
'description': 'fake-description',
|
||||
'extra': {},
|
||||
'availability_zone': None}
|
||||
min_count = 1
|
||||
max_count = 2
|
||||
self.engine_api._provision_instances(self.context, base_options,
|
||||
min_count, max_count)
|
||||
self.engine_api._provision_servers(self.context, base_options,
|
||||
min_count, max_count)
|
||||
calls = [mock.call() for i in range(max_count)]
|
||||
mock_inst_create.assert_has_calls(calls)
|
||||
mock_server_create.assert_has_calls(calls)
|
||||
|
||||
@mock.patch.object(engine_rpcapi.EngineAPI, 'create_instance')
|
||||
@mock.patch.object(engine_rpcapi.EngineAPI, 'create_server')
|
||||
@mock.patch('mogan.engine.api.API._get_image')
|
||||
@mock.patch('mogan.engine.api.API._validate_and_build_base_options')
|
||||
@mock.patch('mogan.engine.api.API.list_availability_zones')
|
||||
def test_create(self, mock_list_az, mock_validate, mock_get_image,
|
||||
mock_create):
|
||||
instance_type = self._create_instance_type()
|
||||
flavor = self._create_flavor()
|
||||
|
||||
base_options = {'image_uuid': 'fake-uuid',
|
||||
'status': states.BUILDING,
|
||||
'user_id': 'fake-user',
|
||||
'project_id': 'fake-project',
|
||||
'instance_type_uuid': 'fake-type-uuid',
|
||||
'flavor_uuid': 'fake-type-uuid',
|
||||
'name': 'fake-name',
|
||||
'description': 'fake-description',
|
||||
'extra': {'k1', 'v1'},
|
||||
@ -119,12 +119,12 @@ class ComputeAPIUnitTest(base.DbTestCase):
|
||||
|
||||
res = self.dbapi._get_quota_usages(self.context, self.project_id)
|
||||
before_in_use = 0
|
||||
if res.get('instances') is not None:
|
||||
before_in_use = res.get('instances').in_use
|
||||
if res.get('servers') is not None:
|
||||
before_in_use = res.get('servers').in_use
|
||||
|
||||
self.engine_api.create(
|
||||
self.context,
|
||||
instance_type=instance_type,
|
||||
flavor=flavor,
|
||||
image_uuid='fake-uuid',
|
||||
name='fake-name',
|
||||
description='fake-descritpion',
|
||||
@ -136,25 +136,25 @@ class ComputeAPIUnitTest(base.DbTestCase):
|
||||
|
||||
mock_list_az.assert_called_once_with(self.context)
|
||||
mock_validate.assert_called_once_with(
|
||||
self.context, instance_type, 'fake-uuid', 'fake-name',
|
||||
self.context, flavor, 'fake-uuid', 'fake-name',
|
||||
'fake-descritpion', 'test_az', {'k1', 'v1'}, requested_networks,
|
||||
None, None, max_count)
|
||||
self.assertTrue(mock_create.called)
|
||||
self.assertTrue(mock_get_image.called)
|
||||
res = self.dbapi._get_quota_usages(self.context, self.project_id)
|
||||
after_in_use = res.get('instances').in_use
|
||||
after_in_use = res.get('servers').in_use
|
||||
self.assertEqual(before_in_use + 2, after_in_use)
|
||||
|
||||
@mock.patch('mogan.engine.api.API.list_availability_zones')
|
||||
def test_create_with_invalid_az(self, mock_list_az):
|
||||
instance_type = mock.MagicMock()
|
||||
flavor = mock.MagicMock()
|
||||
mock_list_az.return_value = {'availability_zones': ['invalid_az']}
|
||||
|
||||
self.assertRaises(
|
||||
exception.AZNotFound,
|
||||
self.engine_api.create,
|
||||
self.context,
|
||||
instance_type,
|
||||
flavor,
|
||||
'fake-uuid',
|
||||
'fake-name',
|
||||
'fake-descritpion',
|
||||
@ -169,13 +169,13 @@ class ComputeAPIUnitTest(base.DbTestCase):
|
||||
@mock.patch('mogan.engine.api.API.list_availability_zones')
|
||||
def test_create_over_quota_limit(self, mock_list_az, mock_validate,
|
||||
mock_get_image):
|
||||
instance_type = self._create_instance_type()
|
||||
flavor = self._create_flavor()
|
||||
|
||||
base_options = {'image_uuid': 'fake-uuid',
|
||||
'status': states.BUILDING,
|
||||
'user_id': 'fake-user',
|
||||
'project_id': 'fake-project',
|
||||
'instance_type_uuid': 'fake-type-uuid',
|
||||
'flavor_uuid': 'fake-type-uuid',
|
||||
'name': 'fake-name',
|
||||
'description': 'fake-description',
|
||||
'extra': {'k1', 'v1'},
|
||||
@ -191,7 +191,7 @@ class ComputeAPIUnitTest(base.DbTestCase):
|
||||
exception.OverQuota,
|
||||
self.engine_api.create,
|
||||
self.context,
|
||||
instance_type,
|
||||
flavor,
|
||||
'fake-uuid',
|
||||
'fake-name',
|
||||
'fake-descritpion',
|
||||
@ -204,116 +204,116 @@ class ComputeAPIUnitTest(base.DbTestCase):
|
||||
min_count,
|
||||
max_count)
|
||||
|
||||
def _create_fake_instance_obj(self, fake_instance):
|
||||
fake_instance_obj = objects.Instance(self.context, **fake_instance)
|
||||
fake_instance_obj.create(self.context)
|
||||
return fake_instance_obj
|
||||
def _create_fake_server_obj(self, fake_server):
|
||||
fake_server_obj = objects.Server(self.context, **fake_server)
|
||||
fake_server_obj.create(self.context)
|
||||
return fake_server_obj
|
||||
|
||||
def test_lock_by_owner(self):
|
||||
fake_instance = db_utils.get_test_instance(
|
||||
fake_server = db_utils.get_test_server(
|
||||
user_id=self.user_id, project_id=self.project_id)
|
||||
fake_instance_obj = self._create_fake_instance_obj(fake_instance)
|
||||
self.engine_api.lock(self.context, fake_instance_obj)
|
||||
self.assertTrue(fake_instance_obj.locked)
|
||||
self.assertEqual('owner', fake_instance_obj.locked_by)
|
||||
fake_server_obj = self._create_fake_server_obj(fake_server)
|
||||
self.engine_api.lock(self.context, fake_server_obj)
|
||||
self.assertTrue(fake_server_obj.locked)
|
||||
self.assertEqual('owner', fake_server_obj.locked_by)
|
||||
|
||||
def test_unlock_by_owner(self):
|
||||
fake_instance = db_utils.get_test_instance(
|
||||
fake_server = db_utils.get_test_server(
|
||||
user_id=self.user_id, project_id=self.project_id,
|
||||
locked=True, locked_by='owner')
|
||||
fake_instance_obj = self._create_fake_instance_obj(fake_instance)
|
||||
self.engine_api.unlock(self.context, fake_instance_obj)
|
||||
self.assertFalse(fake_instance_obj.locked)
|
||||
self.assertEqual(None, fake_instance_obj.locked_by)
|
||||
fake_server_obj = self._create_fake_server_obj(fake_server)
|
||||
self.engine_api.unlock(self.context, fake_server_obj)
|
||||
self.assertFalse(fake_server_obj.locked)
|
||||
self.assertEqual(None, fake_server_obj.locked_by)
|
||||
|
||||
def test_lock_by_admin(self):
|
||||
fake_instance = db_utils.get_test_instance(
|
||||
fake_server = db_utils.get_test_server(
|
||||
user_id=self.user_id, project_id=self.project_id)
|
||||
fake_instance_obj = self._create_fake_instance_obj(fake_instance)
|
||||
fake_server_obj = self._create_fake_server_obj(fake_server)
|
||||
admin_context = context.get_admin_context()
|
||||
self.engine_api.lock(admin_context, fake_instance_obj)
|
||||
self.assertTrue(fake_instance_obj.locked)
|
||||
self.assertEqual('admin', fake_instance_obj.locked_by)
|
||||
self.engine_api.lock(admin_context, fake_server_obj)
|
||||
self.assertTrue(fake_server_obj.locked)
|
||||
self.assertEqual('admin', fake_server_obj.locked_by)
|
||||
|
||||
def test_unlock_by_admin(self):
|
||||
fake_instance = db_utils.get_test_instance(
|
||||
fake_server = db_utils.get_test_server(
|
||||
user_id=self.user_id, project_id=self.project_id,
|
||||
locked=True, locked_by='owner')
|
||||
fake_instance_obj = self._create_fake_instance_obj(fake_instance)
|
||||
fake_server_obj = self._create_fake_server_obj(fake_server)
|
||||
admin_context = context.get_admin_context()
|
||||
self.engine_api.unlock(admin_context, fake_instance_obj)
|
||||
self.assertFalse(fake_instance_obj.locked)
|
||||
self.assertEqual(None, fake_instance_obj.locked_by)
|
||||
self.engine_api.unlock(admin_context, fake_server_obj)
|
||||
self.assertFalse(fake_server_obj.locked)
|
||||
self.assertEqual(None, fake_server_obj.locked_by)
|
||||
|
||||
@mock.patch('mogan.engine.api.API._delete_instance')
|
||||
def test_delete_locked_instance_with_non_admin(self, mock_deleted):
|
||||
fake_instance = db_utils.get_test_instance(
|
||||
@mock.patch('mogan.engine.api.API._delete_server')
|
||||
def test_delete_locked_server_with_non_admin(self, mock_deleted):
|
||||
fake_server = db_utils.get_test_server(
|
||||
user_id=self.user_id, project_id=self.project_id,
|
||||
locked=True, locked_by='owner')
|
||||
fake_instance_obj = self._create_fake_instance_obj(fake_instance)
|
||||
self.assertRaises(exception.InstanceIsLocked,
|
||||
fake_server_obj = self._create_fake_server_obj(fake_server)
|
||||
self.assertRaises(exception.ServerIsLocked,
|
||||
self.engine_api.delete,
|
||||
self.context, fake_instance_obj)
|
||||
self.context, fake_server_obj)
|
||||
mock_deleted.assert_not_called()
|
||||
|
||||
@mock.patch.object(engine_rpcapi.EngineAPI, 'set_power_state')
|
||||
def test_power_locked_instance_with_non_admin(self, mock_powered):
|
||||
fake_instance = db_utils.get_test_instance(
|
||||
def test_power_locked_server_with_non_admin(self, mock_powered):
|
||||
fake_server = db_utils.get_test_server(
|
||||
user_id=self.user_id, project_id=self.project_id,
|
||||
locked=True, locked_by='owner')
|
||||
fake_instance_obj = self._create_fake_instance_obj(fake_instance)
|
||||
self.assertRaises(exception.InstanceIsLocked,
|
||||
fake_server_obj = self._create_fake_server_obj(fake_server)
|
||||
self.assertRaises(exception.ServerIsLocked,
|
||||
self.engine_api.power,
|
||||
self.context, fake_instance_obj, 'reboot')
|
||||
self.context, fake_server_obj, 'reboot')
|
||||
mock_powered.assert_not_called()
|
||||
|
||||
@mock.patch('mogan.engine.api.API._delete_instance')
|
||||
def test_delete_locked_instance_with_admin(self, mock_deleted):
|
||||
fake_instance = db_utils.get_test_instance(
|
||||
@mock.patch('mogan.engine.api.API._delete_server')
|
||||
def test_delete_locked_server_with_admin(self, mock_deleted):
|
||||
fake_server = db_utils.get_test_server(
|
||||
user_id=self.user_id, project_id=self.project_id,
|
||||
locked=True, locked_by='owner')
|
||||
fake_instance_obj = self._create_fake_instance_obj(fake_instance)
|
||||
fake_server_obj = self._create_fake_server_obj(fake_server)
|
||||
admin_context = context.get_admin_context()
|
||||
self.engine_api.delete(admin_context, fake_instance_obj)
|
||||
self.engine_api.delete(admin_context, fake_server_obj)
|
||||
self.assertTrue(mock_deleted.called)
|
||||
|
||||
@mock.patch.object(engine_rpcapi.EngineAPI, 'set_power_state')
|
||||
def test_power_locked_instance_with_admin(self, mock_powered):
|
||||
fake_instance = db_utils.get_test_instance(
|
||||
def test_power_locked_server_with_admin(self, mock_powered):
|
||||
fake_server = db_utils.get_test_server(
|
||||
user_id=self.user_id, project_id=self.project_id,
|
||||
locked=True, locked_by='owner')
|
||||
fake_instance_obj = self._create_fake_instance_obj(fake_instance)
|
||||
fake_server_obj = self._create_fake_server_obj(fake_server)
|
||||
admin_context = context.get_admin_context()
|
||||
self.engine_api.power(admin_context, fake_instance_obj, 'reboot')
|
||||
self.engine_api.power(admin_context, fake_server_obj, 'reboot')
|
||||
self.assertTrue(mock_powered.called)
|
||||
|
||||
@mock.patch.object(engine_rpcapi.EngineAPI, 'rebuild_instance')
|
||||
def test_rebuild_locked_instance_with_non_admin(self, mock_rebuild):
|
||||
fake_instance = db_utils.get_test_instance(
|
||||
@mock.patch.object(engine_rpcapi.EngineAPI, 'rebuild_server')
|
||||
def test_rebuild_locked_server_with_non_admin(self, mock_rebuild):
|
||||
fake_server = db_utils.get_test_server(
|
||||
user_id=self.user_id, project_id=self.project_id,
|
||||
locked=True, locked_by='owner')
|
||||
fake_instance_obj = self._create_fake_instance_obj(fake_instance)
|
||||
self.assertRaises(exception.InstanceIsLocked,
|
||||
fake_server_obj = self._create_fake_server_obj(fake_server)
|
||||
self.assertRaises(exception.ServerIsLocked,
|
||||
self.engine_api.rebuild,
|
||||
self.context, fake_instance_obj)
|
||||
self.context, fake_server_obj)
|
||||
mock_rebuild.assert_not_called()
|
||||
|
||||
@mock.patch.object(engine_rpcapi.EngineAPI, 'rebuild_instance')
|
||||
def test_rebuild_locked_instance_with_admin(self, mock_rebuild):
|
||||
fake_instance = db_utils.get_test_instance(
|
||||
@mock.patch.object(engine_rpcapi.EngineAPI, 'rebuild_server')
|
||||
def test_rebuild_locked_server_with_admin(self, mock_rebuild):
|
||||
fake_server = db_utils.get_test_server(
|
||||
user_id=self.user_id, project_id=self.project_id,
|
||||
locked=True, locked_by='owner')
|
||||
fake_instance_obj = self._create_fake_instance_obj(fake_instance)
|
||||
fake_server_obj = self._create_fake_server_obj(fake_server)
|
||||
admin_context = context.get_admin_context()
|
||||
self.engine_api.rebuild(admin_context, fake_instance_obj)
|
||||
self.engine_api.rebuild(admin_context, fake_server_obj)
|
||||
self.assertTrue(mock_rebuild.called)
|
||||
|
||||
@mock.patch.object(engine_rpcapi.EngineAPI, 'rebuild_instance')
|
||||
def test_rebuild_instance(self, mock_rebuild):
|
||||
fake_instance = db_utils.get_test_instance(
|
||||
@mock.patch.object(engine_rpcapi.EngineAPI, 'rebuild_server')
|
||||
def test_rebuild_server(self, mock_rebuild):
|
||||
fake_server = db_utils.get_test_server(
|
||||
user_id=self.user_id, project_id=self.project_id)
|
||||
fake_instance_obj = self._create_fake_instance_obj(fake_instance)
|
||||
self.engine_api.rebuild(self.context, fake_instance_obj)
|
||||
fake_server_obj = self._create_fake_server_obj(fake_server)
|
||||
self.engine_api.rebuild(self.context, fake_server_obj)
|
||||
self.assertTrue(mock_rebuild.called)
|
||||
|
||||
def test_list_availability_zone(self):
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user