Improve the function tests

- Execute test groups in serial to make sure no more than 2 database
  instance are created at the same time.
- Remove some unneccesary tests
- Remove unneeded datastore, e.g. 'Test_Datastore_1'
- Remove unsupported trovestack subcommands
- Move unsupported DIB elements to the 'deprecated-elements' folder
- Decrease default value of 'agent_call_high_timeout' to 5min
- Add initial_deplay for pooling task
- Use socket file to connect with database instead of using localhost
  IP

Change-Id: Ie5030a671fbeb453eafa6cbe04e08da7b52e33c9
This commit is contained in:
Lingxian Kong 2020-01-10 18:37:58 +13:00
parent 54987b60a7
commit 602c4d42de
157 changed files with 765 additions and 4853 deletions

View File

@ -142,6 +142,9 @@
devstack_localrc: devstack_localrc:
TROVE_RESIZE_TIME_OUT: 1800 TROVE_RESIZE_TIME_OUT: 1800
trove_resize_time_out: 1800 trove_resize_time_out: 1800
trove_test_datastore: 'mysql'
trove_test_group: 'mysql'
trove_test_datastore_version: '5.7'
- job: - job:
name: trove-grenade name: trove-grenade
@ -196,6 +199,7 @@
vars: vars:
trove_test_datastore: mariadb trove_test_datastore: mariadb
trove_test_group: mariadb-supported-single trove_test_group: mariadb-supported-single
trove_test_datastore_version: 10.4
devstack_localrc: devstack_localrc:
TROVE_ENABLE_IMAGE_BUILD: false TROVE_ENABLE_IMAGE_BUILD: false
@ -205,6 +209,7 @@
vars: vars:
trove_test_datastore: mariadb trove_test_datastore: mariadb
trove_test_group: mariadb-supported-multi trove_test_group: mariadb-supported-multi
trove_test_datastore_version: 10.4
devstack_localrc: devstack_localrc:
TROVE_ENABLE_IMAGE_BUILD: false TROVE_ENABLE_IMAGE_BUILD: false
@ -214,6 +219,7 @@
vars: vars:
trove_test_datastore: mysql trove_test_datastore: mysql
trove_test_group: mysql-supported-single trove_test_group: mysql-supported-single
trove_test_datastore_version: 5.7
- job: - job:
name: trove-scenario-mysql-multi name: trove-scenario-mysql-multi
@ -221,6 +227,7 @@
vars: vars:
trove_test_datastore: mysql trove_test_datastore: mysql
trove_test_group: mysql-supported-multi trove_test_group: mysql-supported-multi
trove_test_datastore_version: 5.7
- job: - job:
name: trove-scenario-percona-multi name: trove-scenario-percona-multi

View File

@ -390,9 +390,11 @@ function setup_mgmt_network() {
die_if_not_set $LINENO network_id "Failed to create network: $NET_NAME, project: ${PROJECT_ID}" die_if_not_set $LINENO network_id "Failed to create network: $NET_NAME, project: ${PROJECT_ID}"
if [[ "$IP_VERSION" =~ 4.* ]]; then if [[ "$IP_VERSION" =~ 4.* ]]; then
NEW_SUBNET_ID=$(create_mgmt_subnet_v4 ${PROJECT_ID} ${network_id} ${SUBNET_NAME} ${SUBNET_RANGE}) net_subnet_id=$(create_mgmt_subnet_v4 ${PROJECT_ID} ${network_id} ${SUBNET_NAME} ${SUBNET_RANGE})
openstack router add subnet $ROUTER_ID $NEW_SUBNET_ID # 'openstack router add' has a bug that cound't show the error message
# openstack router add subnet ${ROUTER_ID} ${net_subnet_id} --debug
fi fi
# Trove doesn't support IPv6 for now. # Trove doesn't support IPv6 for now.
# if [[ "$IP_VERSION" =~ .*6 ]]; then # if [[ "$IP_VERSION" =~ .*6 ]]; then
# NEW_IPV6_SUBNET_ID=$(create_subnet_v6 ${PROJECT_ID} ${network_id} ${IPV6_SUBNET_NAME}) # NEW_IPV6_SUBNET_ID=$(create_subnet_v6 ${PROJECT_ID} ${network_id} ${IPV6_SUBNET_NAME})
@ -454,32 +456,25 @@ function create_guest_image {
${TROVE_IMAGE_OS_RELEASE} \ ${TROVE_IMAGE_OS_RELEASE} \
true true
image_name="trove-${TROVE_IMAGE_OS}-${TROVE_IMAGE_OS_RELEASE}-${TROVE_DATASTORE_TYPE}" image_name="trove-datastore-${TROVE_IMAGE_OS}-${TROVE_IMAGE_OS_RELEASE}-${TROVE_DATASTORE_TYPE}"
image_file=$HOME/images/${image_name}.qcow2 image_file=$HOME/images/${image_name}.qcow2
if [ ! -f ${image_file} ]; then if [ ! -f ${image_file} ]; then
echo "Image file was not found at ${image_file}. Probably it was not created." echo "Image file was not found at ${image_file}"
return 1 return 1
fi fi
ACTIVE=1
INACTIVE=0
echo "Add the image to glance" echo "Add the image to glance"
glance_image_id=$(openstack --os-region-name RegionOne --os-password ${SERVICE_PASSWORD} \ glance_image_id=$(openstack --os-region-name RegionOne --os-password ${SERVICE_PASSWORD} \
--os-project-name service --os-username trove \ --os-project-name service --os-username trove \
image create ${TROVE_IMAGE_OS}-${TROVE_IMAGE_OS_RELEASE}-${TROVE_DATASTORE_TYPE} \ image create ${image_name} \
--disk-format qcow2 --container-format bare --property hw_rng_model='virtio' --file ${image_file} \ --disk-format qcow2 --container-format bare --property hw_rng_model='virtio' --file ${image_file} \
-c id -f value) -c id -f value)
echo "Register the image in datastore" echo "Register the image in datastore"
$TROVE_MANAGE datastore_update $TROVE_DATASTORE_TYPE "" $TROVE_MANAGE datastore_update $TROVE_DATASTORE_TYPE ""
$TROVE_MANAGE datastore_version_update $TROVE_DATASTORE_TYPE $TROVE_DATASTORE_VERSION $TROVE_DATASTORE_TYPE $glance_image_id "" $ACTIVE $TROVE_MANAGE datastore_version_update $TROVE_DATASTORE_TYPE $TROVE_DATASTORE_VERSION $TROVE_DATASTORE_TYPE $glance_image_id "" 1
$TROVE_MANAGE datastore_update $TROVE_DATASTORE_TYPE $TROVE_DATASTORE_VERSION $TROVE_MANAGE datastore_update $TROVE_DATASTORE_TYPE $TROVE_DATASTORE_VERSION
# just for tests
$TROVE_MANAGE datastore_version_update "$TROVE_DATASTORE_TYPE" "inactive_version" "manager1" $glance_image_id "" $INACTIVE
$TROVE_MANAGE datastore_update Test_Datastore_1 ""
echo "Add parameter validation rules if available" echo "Add parameter validation rules if available"
if [ -f $DEST/trove/trove/templates/$TROVE_DATASTORE_TYPE/validation-rules.json ]; then if [ -f $DEST/trove/trove/templates/$TROVE_DATASTORE_TYPE/validation-rules.json ]; then
$TROVE_MANAGE db_load_datastore_config_parameters "$TROVE_DATASTORE_TYPE" "$TROVE_DATASTORE_VERSION" \ $TROVE_MANAGE db_load_datastore_config_parameters "$TROVE_DATASTORE_TYPE" "$TROVE_DATASTORE_VERSION" \
@ -546,6 +541,8 @@ function config_trove_network {
openstack network list openstack network list
echo "Neutron subnet list:" echo "Neutron subnet list:"
openstack subnet list openstack subnet list
echo "Neutron router:"
openstack router show ${ROUTER_ID} -f yaml
echo "ip route:" echo "ip route:"
sudo ip route sudo ip route

View File

@ -56,6 +56,8 @@ if is_service_enabled neutron; then
TROVE_MGMT_NETWORK_NAME=${TROVE_MGMT_NETWORK_NAME:-"trove-mgmt"} TROVE_MGMT_NETWORK_NAME=${TROVE_MGMT_NETWORK_NAME:-"trove-mgmt"}
TROVE_MGMT_SUBNET_NAME=${TROVE_MGMT_SUBNET_NAME:-${TROVE_MGMT_NETWORK_NAME}-subnet} TROVE_MGMT_SUBNET_NAME=${TROVE_MGMT_SUBNET_NAME:-${TROVE_MGMT_NETWORK_NAME}-subnet}
TROVE_MGMT_SUBNET_RANGE=${TROVE_MGMT_SUBNET_RANGE:-"192.168.254.0/24"} TROVE_MGMT_SUBNET_RANGE=${TROVE_MGMT_SUBNET_RANGE:-"192.168.254.0/24"}
TROVE_MGMT_SUBNET_START=${TROVE_MGMT_SUBNET_START:-"192.168.254.2"}
TROVE_MGMT_SUBNET_END=${TROVE_MGMT_SUBNET_END:-"192.168.254.200"}
else else
TROVE_HOST_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1} TROVE_HOST_GATEWAY=${NETWORK_GATEWAY:-10.0.0.1}
fi fi

View File

@ -1,360 +0,0 @@
.. _create_trove_instance:
==============================
How to create a trove instance
==============================
While creating a trove instance, I often have problems with cinder
volumes and nova servers, this is due to my lack of knowledge in the area.
This post is to help describe my journey on creating a trove instance.
----------------
Installing trove
----------------
I use the integration tools provided by trove to install the required services.
This is already covered in the install guide.
Install trove
.. code-block:: bash
/trove/integration/scripts$ ./trovestack install
Once that completes, I use the command kick-start that gets a datastore
ready for us to use and target for our trove instance. This shows the
mysql datastore.
.. code-block:: bash
/trove/integration/scripts$ ./trovestack kick-start mysql
Note: This command doesn't give you a completion message.
You can view the available datastores by running the following command
.. code-block:: bash
$ trove datastore-list
+--------------------------------------+------------------+
| ID | Name |
+--------------------------------------+------------------+
| 137c27ee-d491-4a54-90ab-06307e9f6bf6 | mysql |
| aea3d4c5-9c2e-48ae-b100-527b18d4eb02 | Test_Datastore_1 |
| b8583e8c-8177-480e-889e-a73c5290b558 | test_ds |
+--------------------------------------+------------------+
Once that is done, view the image that was built for the datastore you have
kick-started and identify the resources required for it.
.. code-block:: bash
$ openstack image list
+--------------------------------------+--------------------------+--------+
| ID | Name | Status |
+--------------------------------------+--------------------------+--------+
| 37d4b996-14c2-4981-820e-3ac87bb4c5a2 | cirros-0.3.5-x86_64-disk | active |
| 2d7d930a-d606-4934-8602-851207546fee | ubuntu_mysql | active |
+--------------------------------------+--------------------------+--------+
Grab the ID from the list and run the following command to view the size of
the image.
.. code-block:: bash
$ openstack image show ubuntu_mysql
+------------------+------------------------------------------------------+
| Field | Value |
+------------------+------------------------------------------------------+
| checksum | 9facdf0670ccb58ea27bf665e4fdcdf5 |
| container_format | bare |
| created_at | 2017-05-26T14:35:39Z |
| disk_format | qcow2 |
| file | /v2/images/2d7d930a-d606-4934-8602-851207546fee/file |
| id | 2d7d930a-d606-4934-8602-851207546fee |
| min_disk | 0 |
| min_ram | 0 |
| name | ubuntu_mysql |
| owner | e765230cd96f47f294f910551ec3c1f4 |
| protected | False |
| schema | /v2/schemas/image |
| size | 633423872 |
| status | active |
| tags | |
| updated_at | 2017-05-26T14:35:42Z |
| virtual_size | None |
| visibility | public |
+------------------+------------------------------------------------------+
Take the value that says size, this is 633423872 in bytes. Cinder volumes are
in gigabytes so 633423872 becomes:
633423872 / 1024
618578 # KB
618578 / 1024
604 # MB
604 / 1024
0 # < 1 GB so we will round up.
Then test that you can create the cinder volume:
.. code-block:: bash
$ cinder create --name my-v 1
+--------------------------------+--------------------------------------+
| Property | Value |
+--------------------------------+--------------------------------------+
| attachments | [] |
| availability_zone | nova |
| bootable | false |
| consistencygroup_id | None |
| created_at | 2017-05-26T16:37:55.000000 |
| description | None |
| encrypted | False |
| id | 7a2da60f-cc1b-4798-ba7a-1f0215c74615 |
| metadata | {} |
| migration_status | None |
| multiattach | False |
| name | my-v |
| os-vol-host-attr:host | None |
| os-vol-mig-status-attr:migstat | None |
| os-vol-mig-status-attr:name_id | None |
| os-vol-tenant-attr:tenant_id | e765230cd96f47f294f910551ec3c1f4 |
| replication_status | None |
| size | 1 |
| snapshot_id | None |
| source_volid | None |
| status | creating |
| updated_at | None |
| user_id | cf1e59dc2e4d4aeca51aa050faac15c2 |
| volume_type | lvmdriver-1 |
+--------------------------------+--------------------------------------+
Next, verify the cinder volume status has moved from creating to available.
.. code-block:: bash
$ cinder show my-v
+--------------------------------+--------------------------------------+
| Property | Value |
+--------------------------------+--------------------------------------+
| attachments | [] |
| availability_zone | nova |
| bootable | false |
| consistencygroup_id | None |
| created_at | 2017-05-26T16:37:55.000000 |
| description | None |
| encrypted | False |
| id | 7a2da60f-cc1b-4798-ba7a-1f0215c74615 |
| metadata | {} |
| migration_status | None |
| multiattach | False |
| name | my-v |
| os-vol-host-attr:host | ubuntu@lvmdriver-1#lvmdriver-1 |
| os-vol-mig-status-attr:migstat | None |
| os-vol-mig-status-attr:name_id | None |
| os-vol-tenant-attr:tenant_id | e765230cd96f47f294f910551ec3c1f4 |
| replication_status | None |
| size | 1 |
| snapshot_id | None |
| source_volid | None |
| status | available |
| updated_at | 2017-05-26T16:37:56.000000 |
| user_id | cf1e59dc2e4d4aeca51aa050faac15c2 |
| volume_type | lvmdriver-1 |
+--------------------------------+--------------------------------------+
Ok, now we know that works so lets delete it.
.. code-block:: bash
$ cinder delete my-v
Next is to choose a server flavor that fits the requirements of your datastore
and do not exceed your computer hardware limitations.
.. code-block:: bash
$ trove flavor-list
+------+--------------------------+--------+-------+------+-----------+
| ID | Name | RAM | vCPUs | Disk | Ephemeral |
+------+--------------------------+--------+-------+------+-----------+
| 1 | m1.tiny | 512 | 1 | 1 | 0 |
| 10 | test.tiny-3 | 512 | 1 | 3 | 0 |
| 10e | test.eph.tiny-3 | 512 | 1 | 3 | 1 |
| 10er | test.eph.tiny-3.resize | 528 | 2 | 3 | 1 |
| 10r | test.tiny-3.resize | 528 | 2 | 3 | 0 |
| 15 | test.small-3 | 768 | 1 | 3 | 0 |
| 15e | test.eph.small-3 | 768 | 1 | 3 | 1 |
| 15er | test.eph.small-3.resize | 784 | 2 | 3 | 1 |
| 15r | test.small-3.resize | 784 | 2 | 3 | 0 |
| 16 | test.small-4 | 768 | 1 | 4 | 0 |
| 16e | test.eph.small-4 | 768 | 1 | 4 | 1 |
| 16er | test.eph.small-4.resize | 784 | 2 | 4 | 1 |
| 16r | test.small-4.resize | 784 | 2 | 4 | 0 |
| 17 | test.small-5 | 768 | 1 | 5 | 0 |
| 17e | test.eph.small-5 | 768 | 1 | 5 | 1 |
| 17er | test.eph.small-5.resize | 784 | 2 | 5 | 1 |
| 17r | test.small-5.resize | 784 | 2 | 5 | 0 |
| 2 | m1.small | 2048 | 1 | 20 | 0 |
| 20 | test.medium-4 | 1024 | 1 | 4 | 0 |
| 20e | test.eph.medium-4 | 1024 | 1 | 4 | 1 |
| 20er | test.eph.medium-4.resize | 1040 | 2 | 4 | 1 |
| 20r | test.medium-4.resize | 1040 | 2 | 4 | 0 |
| 21 | test.medium-5 | 1024 | 1 | 5 | 0 |
| 21e | test.eph.medium-5 | 1024 | 1 | 5 | 1 |
| 21er | test.eph.medium-5.resize | 1040 | 2 | 5 | 1 |
| 21r | test.medium-5.resize | 1040 | 2 | 5 | 0 |
| 25 | test.large-5 | 2048 | 1 | 5 | 0 |
| 25e | test.eph.large-5 | 2048 | 1 | 5 | 1 |
| 25er | test.eph.large-5.resize | 2064 | 2 | 5 | 1 |
| 25r | test.large-5.resize | 2064 | 2 | 5 | 0 |
| 26 | test.large-10 | 2048 | 1 | 10 | 0 |
| 26e | test.eph.large-10 | 2048 | 1 | 10 | 1 |
| 26er | test.eph.large-10.resize | 2064 | 2 | 10 | 1 |
| 26r | test.large-10.resize | 2064 | 2 | 10 | 0 |
| 27 | test.large-15 | 2048 | 1 | 15 | 0 |
| 27e | test.eph.large-15 | 2048 | 1 | 15 | 1 |
| 27er | test.eph.large-15.resize | 2064 | 2 | 15 | 1 |
| 27r | test.large-15.resize | 2064 | 2 | 15 | 0 |
| 3 | m1.medium | 4096 | 2 | 40 | 0 |
| 30 | test.fault_1-1 | 512 | 1 | 1 | 0 |
| 30e | test.eph.fault_1-1 | 512 | 1 | 1 | 1 |
| 31 | test.fault_2-5 | 131072 | 1 | 5 | 0 |
| 31e | test.eph.fault_2-5 | 131072 | 1 | 5 | 1 |
| 4 | m1.large | 8192 | 4 | 80 | 0 |
| 42 | m1.nano | 64 | 1 | 0 | 0 |
| 451 | m1.heat | 512 | 1 | 0 | 0 |
| 5 | m1.xlarge | 16384 | 8 | 160 | 0 |
| 84 | m1.micro | 128 | 1 | 0 | 0 |
| c1 | cirros256 | 256 | 1 | 0 | 0 |
| d1 | ds512M | 512 | 1 | 5 | 0 |
| d2 | ds1G | 1024 | 1 | 10 | 0 |
| d3 | ds2G | 2048 | 2 | 10 | 0 |
| d4 | ds4G | 4096 | 4 | 20 | 0 |
+------+--------------------------+--------+-------+------+-----------+
The flavor sizes are in megabytes, check your computer disk space and pick a
flavor less than your limitations.
.. code-block:: bash
$ df -h
Filesystem Size Used Avail Use% Mounted on
udev 7.9G 0 7.9G 0% /dev
tmpfs 1.6G 162M 1.5G 11% /run
/dev/mapper/ubuntu--vg-root 33G 11G 21G 34% /
tmpfs 7.9G 4.0K 7.9G 1% /dev/shm
tmpfs 5.0M 0 5.0M 0% /run/lock
tmpfs 7.9G 0 7.9G 0% /sys/fs/cgroup
/dev/vda1 472M 102M 346M 23% /boot
tmpfs 1.6G 0 1.6G 0% /run/user/1000
/dev/loop0 6.0G 650M 5.4G 11% /opt/stack/data/swift/drives/sdb1
I have a lot of partitions I don't understand but ubuntu--vg-root is the one
setup by LVM during the install and it is the largest one so I'm going to use 21G
as my upper limit. Now I only need 1G, this information is still good to know when
you are dealing with multiple instances, larger images, or limited disk space.
Flavors also use RAM so it's important to check your free memory.
.. code-block:: bash
$ free -h
total used free shared buff/cache available
Mem: 15G 5.1G 5.0G 150M 5.5G 10G
Swap: 15G 4.1M 15G
I have given my VM 16GB RAM and it shows I have 5GB free. So In order to be safe,
I will choose test-small-3 (ID 15), this is RAM 768 and disk size 3GB. The disk size must be
greater than 604MB from the ubuntu_mysql image requirements, but we rounded to 1GB to
be safe.
After all of this we are ready to create our trove instance.
.. code-block:: bash
$ trove create my-inst 15 --size 1
+-------------------------+--------------------------------------+
| Property | Value |
+-------------------------+--------------------------------------+
| created | 2017-05-26T16:53:06 |
| datastore | mysql |
| datastore_version | 5.6 |
| encrypted_rpc_messaging | True |
| flavor | 15 |
| id | 39f8ac9e-2935-40fb-8b09-8a963fb235bd |
| name | my-inst |
| region | RegionOne |
| server_id | None |
| status | BUILD |
| tenant_id | e765230cd96f47f294f910551ec3c1f4 |
| updated | 2017-05-26T16:53:06 |
| volume | 1 |
| volume_id | None |
+-------------------------+--------------------------------------+
Now we view the details to see if it is successful.
.. code-block:: bash
$ trove show my-inst
+-------------------------+--------------------------------------+
| Property | Value |
+-------------------------+--------------------------------------+
| created | 2017-05-26T16:53:07 |
| datastore | mysql |
| datastore_version | 5.6 |
| encrypted_rpc_messaging | True |
| flavor | 15 |
| id | 39f8ac9e-2935-40fb-8b09-8a963fb235bd |
| name | my-inst |
| region | RegionOne |
| server_id | 62399b7e-dec1-4606-9297-3b3711a62d68 |
| status | BUILD |
| tenant_id | e765230cd96f47f294f910551ec3c1f4 |
| updated | 2017-05-26T16:53:13 |
| volume | 1 |
| volume_id | da3b3951-7f7a-4c71-86b9-f0059da814f8 |
+-------------------------+--------------------------------------+
Notice, status still says BUILD but we now have a server_id and volume_id.
After waiting a few moments, check it again.
.. code-block:: bash
$ trove show my-inst
+-------------------------+--------------------------------------+
| Property | Value |
+-------------------------+--------------------------------------+
| created | 2017-05-26T16:53:07 |
| datastore | mysql |
| datastore_version | 5.6 |
| encrypted_rpc_messaging | True |
| flavor | 15 |
| id | 39f8ac9e-2935-40fb-8b09-8a963fb235bd |
| name | my-inst |
| region | RegionOne |
| server_id | 62399b7e-dec1-4606-9297-3b3711a62d68 |
| status | ACTIVE |
| tenant_id | e765230cd96f47f294f910551ec3c1f4 |
| updated | 2017-05-26T16:53:13 |
| volume | 1 |
| volume_id | da3b3951-7f7a-4c71-86b9-f0059da814f8 |
| volume_used | 0.1 |
+-------------------------+--------------------------------------+
The status is now set to ACTIVE and you are done!

View File

@ -11,4 +11,3 @@ functionality, the following resources are provided.
design design
testing testing
how_to_create_a_trove_instance.rst

View File

@ -33,21 +33,8 @@ For an in-depth look at the project's design and structure, see the
- `Trove`_ - `Trove`_
- `Trove Client`_ - `Trove Client`_
* `Trove Wiki`_ on OpenStack
* `Trove API Documentation`_ on docs.openstack.org * `Trove API Documentation`_ on docs.openstack.org
* `Trove Blueprints`_ on storyboard.openstack.org * `Trove storyboard`_ on storyboard.openstack.org
* `Trove Bugs`_ on storyboard.openstack.org
Guest Images
============
In order to use Trove, you need to have Guest Images for each
datastore and version. These images are loaded into Glance and
registered with Trove.
For those wishing to develop guest images, please refer to the
:ref:`build_guest_images` page.
Search Trove Documentation Search Trove Documentation
@ -56,9 +43,7 @@ Search Trove Documentation
* :ref:`search` * :ref:`search`
.. _Trove Wiki: https://wiki.openstack.org/wiki/Trove
.. _Trove: https://opendev.org/openstack/trove .. _Trove: https://opendev.org/openstack/trove
.. _Trove Client: https://opendev.org/openstack/python-troveclient .. _Trove Client: https://opendev.org/openstack/python-troveclient
.. _Trove API Documentation: https://docs.openstack.org/api-ref/database/ .. _Trove API Documentation: https://docs.openstack.org/api-ref/database/
.. _Trove Blueprints: https://storyboard.openstack.org/#!/project/openstack/trove .. _Trove storyboard: https://storyboard.openstack.org/#!/project/openstack/trove
.. _Trove Bugs: https://storyboard.openstack.org/#!/project/openstack/trove

View File

@ -1,6 +1,5 @@
{ {
"report_directory":"rdli-test-report", "report_directory":"rdli-test-report",
"start_services": false,
"test_mgmt":false, "test_mgmt":false,
"use_local_ovz":false, "use_local_ovz":false,
@ -16,7 +15,6 @@
"nova_conf":"/home/vagrant/nova.conf", "nova_conf":"/home/vagrant/nova.conf",
"keystone_code_root":"/opt/stack/keystone", "keystone_code_root":"/opt/stack/keystone",
"keystone_conf":"/etc/keystone/keystone.conf", "keystone_conf":"/etc/keystone/keystone.conf",
"keystone_use_combined":true,
"trove_code_root":"/opt/stack/trove", "trove_code_root":"/opt/stack/trove",
"trove_conf":"/tmp/trove.conf", "trove_conf":"/tmp/trove.conf",
"trove_version":"v1.0", "trove_version":"v1.0",

View File

@ -37,23 +37,11 @@
], ],
"flavors": null, "flavors": null,
"white_box":false, "white_box":false,
"start_services": %startservices%,
"test_mgmt":false, "test_mgmt":false,
"use_local_ovz":false, "use_local_ovz":false,
"use_venv":false, "use_venv":false,
"glance_code_root":"%glance_path%",
"glance_api_conf":"/vagrant/conf/glance-api.conf",
"glance_reg_conf":"/vagrant/conf/glance-reg.conf",
"glance_images_directory": "/glance_images",
"glance_image": "debian-squeeze-x86_64-openvz.tar.gz",
"report_directory":"%report_directory%", "report_directory":"%report_directory%",
"usr_bin_dir":"%bin_path%", "usr_bin_dir":"%bin_path%",
"nova_code_root":"%nova_path%",
"nova_conf":"/home/vagrant/nova.conf",
"keystone_code_root":"%keystone_path%",
"keystone_conf":"/etc/keystone/keystone.conf",
"keystone_use_combined":true,
"trove_code_root":"%trove_path%",
"trove_conf":"/tmp/trove.conf", "trove_conf":"/tmp/trove.conf",
"trove_version":"v1.0", "trove_version":"v1.0",
"trove_api_updated":"2012-08-01T00:00:00Z", "trove_api_updated":"2012-08-01T00:00:00Z",

View File

@ -1,3 +0,0 @@
Sets up a MySQL server install in the image.
TODO: auto-tune settings based on host resources or metadata service.

View File

@ -6,5 +6,3 @@
dd if=/tmp/in_target.d/trove-guest.service of=/etc/systemd/system/trove-guest.service dd if=/tmp/in_target.d/trove-guest.service of=/etc/systemd/system/trove-guest.service
systemctl enable trove-guest.service systemctl enable trove-guest.service

View File

@ -6,6 +6,7 @@
GUEST_UNIT_DROPINS="/etc/systemd/system/trove-guest.service.d" GUEST_UNIT_DROPINS="/etc/systemd/system/trove-guest.service.d"
mkdir -v -p ${GUEST_UNIT_DROPINS} mkdir -v -p ${GUEST_UNIT_DROPINS}
echo -e '[Service]\nEnvironment=REQUESTS_CA_BUNDLE=/etc/ssl/certs' > ${GUEST_UNIT_DROPINS}/30-use-system-certificates.conf cat <<EOF > ${GUEST_UNIT_DROPINS}/30-use-system-certificates.conf
[Service]
Environment=REQUESTS_CA_BUNDLE=/etc/ssl/certs
EOF

View File

@ -1 +1 @@
ubuntu-mariadb ubuntu-mariadb

View File

@ -14,17 +14,17 @@ apt-key adv --recv-keys --keyserver hkp://keyserver.ubuntu.com:80 0xF1656F24C74C
curl -sS https://downloads.mariadb.com/MariaDB/mariadb_repo_setup | curl -sS https://downloads.mariadb.com/MariaDB/mariadb_repo_setup |
bash -s -- --mariadb-server-version="mariadb-10.4" --skip-key-import --skip-maxscale bash -s -- --mariadb-server-version="mariadb-10.4" --skip-key-import --skip-maxscale
apt-get install -y -qq apt-transport-https ca-certificates gnupg2
# NOTE(lxkong): Refer to https://www.percona.com/doc/percona-xtrabackup/2.4/installation/apt_repo.html # NOTE(lxkong): Refer to https://www.percona.com/doc/percona-xtrabackup/2.4/installation/apt_repo.html
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
apt-get install -y -qq apt-transport-https ca-certificates
apt-get update -qq
# Disable password prompt # Disable password prompt
debconf-set-selections <<< "mariadb-server mysql-server/root_password password ''" debconf-set-selections <<< "mariadb-server mysql-server/root_password password ''"
debconf-set-selections <<< "mariadb-server mysql-server/root_password_again password ''" debconf-set-selections <<< "mariadb-server mysql-server/root_password_again password ''"
apt-get update -qq
apt-get install -y -qq --allow-unauthenticated mariadb-server mariadb-client galera-4 libmariadb3 mariadb-backup mariadb-common apt-get install -y -qq --allow-unauthenticated mariadb-server mariadb-client galera-4 libmariadb3 mariadb-backup mariadb-common
cat <<EOF >/etc/mysql/conf.d/no_perf_schema.cnf cat <<EOF >/etc/mysql/conf.d/no_perf_schema.cnf
@ -34,5 +34,6 @@ EOF
chown mysql:mysql /etc/mysql/my.cnf chown mysql:mysql /etc/mysql/my.cnf
rm -f /etc/init.d/mysql rm -f /etc/init.d/mysql
systemctl daemon-reload systemctl daemon-reload
systemctl enable mariadb systemctl enable mariadb

View File

@ -8,44 +8,15 @@ set -o xtrace
export DEBIAN_FRONTEND=noninteractive export DEBIAN_FRONTEND=noninteractive
apt-get --allow-unauthenticated -y install mysql-client mysql-server apt-get --allow-unauthenticated -y install mysql-client mysql-server gnupg2
# Xenial provides mysql 5.7 which requires percona-xtrabackup-24
PXB_VERSION_OVERRIDE=24
#PKGS=$(apt-cache search percona-xtrabackup-${PXB_VERSION_OVERRIDE})
#if [[ "$PKGS" == *"percona-xtrabackup-$PXB_VERSION_OVERRIDE"* ]]; then
# apt-get --allow-unauthenticated -y install percona-xtrabackup-${PXB_VERSION_OVERRIDE}
#else
# # Architecture is not supported by percona website. Compile and install it
# PXB_VERSION=${PXB_VERSION_OVERRIDE:0:1}.${PXB_VERSION_OVERRIDE:1:1}
#
# apt-get --allow-unauthenticated -y install build-essential flex bison automake autoconf \
# libtool cmake libaio-dev mysql-client libncurses-dev zlib1g-dev \
# libgcrypt11-dev libev-dev libcurl4-gnutls-dev vim-common
#
# pushd /tmp
#
# git clone https://github.com/percona/percona-xtrabackup.git
# cd percona-xtrabackup
# git checkout $PXB_VERSION
#
# mkdir /tmp/boost
# cmake -DDOWNLOAD_BOOST=1 -DWITH_BOOST=/tmp/boost -DBUILD_CONFIG=xtrabackup_release -DWITH_MAN_PAGES=OFF && make -j4
# make install
# ln -s /usr/local/xtrabackup/bin/* /usr/bin/
#
# dpkg -P build-essential automake autoconf libtool cmake
# apt-get -y clean
#
# popd
#
# rm -rf /tmp/boost /tmp/percona-xtrabackup
#fi
# NOTE(lxkong): Refer to https://www.percona.com/doc/percona-xtrabackup/2.4/installation/apt_repo.html # NOTE(lxkong): Refer to https://www.percona.com/doc/percona-xtrabackup/2.4/installation/apt_repo.html
wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb wget https://repo.percona.com/apt/percona-release_latest.$(lsb_release -sc)_all.deb
dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb dpkg -i percona-release_latest.$(lsb_release -sc)_all.deb
apt-get update apt-get update
# Xenial provides mysql 5.7 which requires percona-xtrabackup-24
PXB_VERSION_OVERRIDE=24
apt-get --allow-unauthenticated -y install percona-xtrabackup-${PXB_VERSION_OVERRIDE} apt-get --allow-unauthenticated -y install percona-xtrabackup-${PXB_VERSION_OVERRIDE}
cat >/etc/mysql/conf.d/no_perf_schema.cnf <<_EOF_ cat >/etc/mysql/conf.d/no_perf_schema.cnf <<_EOF_
@ -58,7 +29,6 @@ mv /etc/mysql/my.cnf.fallback /etc/mysql/my.cnf
chown mysql:mysql /etc/mysql/my.cnf chown mysql:mysql /etc/mysql/my.cnf
cat >/etc/mysql/my.cnf <<_EOF_ cat >/etc/mysql/my.cnf <<_EOF_
[mysql] [mysql]
!includedir /etc/mysql/conf.d/ !includedir /etc/mysql/conf.d/
_EOF_ _EOF_

View File

@ -1 +1 @@
ubuntu-postgresql ubuntu-postgresql

View File

@ -1,7 +1,9 @@
[Unit] [Unit]
Description=Trove Guest Description=Trove Guest
After=syslog.target After=syslog.target network.target
After=network.target
[Install]
WantedBy=multi-user.target
[Service] [Service]
Type=simple Type=simple
@ -13,25 +15,23 @@ Group=GUEST_USERNAME
# CONTROLLER=192.168.32.151 # CONTROLLER=192.168.32.151
EnvironmentFile=/etc/trove/controller.conf EnvironmentFile=/etc/trove/controller.conf
ExecStartPre=/bin/bash -c "sudo mkdir -p GUEST_LOGDIR ; sudo chown GUEST_USERNAME:root GUEST_LOGDIR" ExecStartPre=/bin/bash -c "sudo mkdir -p GUEST_LOGDIR"
# If ~/trove-installed does not exist, copy the trove source from # If ~/trove-installed does not exist, copy the trove source from
# the user's development environment, then touch the sentinel file # the user's development environment, then touch the sentinel file
ExecStartPre=/bin/bash -c "test -e /home/GUEST_USERNAME/trove-installed || sudo -u GUEST_USERNAME rsync -e 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' -avz --exclude='.*' HOST_SCP_USERNAME@$CONTROLLER:PATH_TROVE/ /home/GUEST_USERNAME/trove && touch /home/GUEST_USERNAME/trove-installed" ExecStartPre=/bin/bash -c "test -e /home/GUEST_USERNAME/trove-installed || sudo rsync -e 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/GUEST_USERNAME/.ssh/id_rsa' -avz --exclude='.*' HOST_SCP_USERNAME@$CONTROLLER:PATH_TROVE/ /home/GUEST_USERNAME/trove && touch /home/GUEST_USERNAME/trove-installed"
# If /etc/trove does not exist, create it and then copy the trove-guestagent.conf # If /etc/trove does not exist, create it and then copy the trove-guestagent.conf
# from /etc/trove on the user's development environment, # from /etc/trove on the user's development environment,
ExecStartPre=/bin/bash -c "test -d /etc/trove/conf.d || sudo mkdir -p /etc/trove/conf.d && sudo -u GUEST_USERNAME rsync -e 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no' -avz --exclude='.*' HOST_SCP_USERNAME@$CONTROLLER:/etc/trove/trove-guestagent.conf ~GUEST_USERNAME/ && sudo mv ~GUEST_USERNAME/trove-guestagent.conf /etc/trove/conf.d/trove-guestagent.conf" ExecStartPre=/bin/bash -c "test -d /etc/trove/conf.d || sudo mkdir -p /etc/trove/conf.d && sudo rsync -e 'ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -i /home/GUEST_USERNAME/.ssh/id_rsa' -avz --exclude='.*' HOST_SCP_USERNAME@$CONTROLLER:/etc/trove/trove-guestagent.conf ~GUEST_USERNAME/ && sudo mv ~GUEST_USERNAME/trove-guestagent.conf /etc/trove/conf.d/trove-guestagent.conf"
ExecStartPre=/bin/bash -c "sudo chown -R GUEST_USERNAME:root /etc/trove" ExecStartPre=/bin/bash -c "sudo chown -R GUEST_USERNAME:root /etc/trove /home/GUEST_USERNAME/trove GUEST_LOGDIR"
ExecStart=/home/GUEST_USERNAME/trove/contrib/trove-guestagent --config-dir=/etc/trove/conf.d # Start trove-guest.service
ExecStart=/bin/bash -c "/home/GUEST_USERNAME/trove/contrib/trove-guestagent --config-dir=/etc/trove/conf.d"
# Give a reasonable amount of time for the server to start up/shut down
TimeoutSec=300 TimeoutSec=300
Restart=on-failure
# PgSql doesn't play nice with PrivateTmp # PostgreSQL doesn't play nice with PrivateTmp
PrivateTmp=false PrivateTmp=false
[Install]
WantedBy=multi-user.target

View File

@ -66,7 +66,9 @@ function build_vm() {
elementes="$elementes pip-cache" elementes="$elementes pip-cache"
elementes="$elementes guest-agent" elementes="$elementes guest-agent"
else else
# Install guest agent dependencies, user, etc.
elementes="$elementes ${guest_os}-guest" elementes="$elementes ${guest_os}-guest"
# Install guest agent service
elementes="$elementes ${guest_os}-${guest_release}-guest" elementes="$elementes ${guest_os}-${guest_release}-guest"
fi fi

View File

@ -514,10 +514,6 @@ function get_field() {
done done
} }
function get_glance_id () {
echo `$@ | grep ' id ' | get_field 2`
}
function set_bin_path() { function set_bin_path() {
if is_fedora; then if is_fedora; then
sed -i "s|%bin_path%|/usr/bin|g" $TEST_CONF sed -i "s|%bin_path%|/usr/bin|g" $TEST_CONF
@ -526,35 +522,16 @@ function set_bin_path() {
fi fi
} }
function set_mysql_pkg() {
if is_fedora; then
MYSQL_PKG="mysql-community-server"
MYSQL_VER="5.6"
else
if [[ "$RELEASE" == "xenial" || "$RELEASE" == "bionic" ]]; then
MYSQL_PKG="mysql-server-5.7"
MYSQL_VER="5.7"
else
MYSQL_PKG="mysql-server-5.6"
MYSQL_VER="5.6"
fi
fi
}
function cmd_set_datastore() { function cmd_set_datastore() {
local IMAGEID=$1 local IMAGEID=$1
local DATASTORE_TYPE=$2 local DATASTORE_TYPE=$2
local RESTART_TROVE=${3:-$(get_bool RESTART_TROVE "true")}
# rd_manage datastore_update <datastore_name> <default_version> # rd_manage datastore_update <datastore_name> <default_version>
rd_manage datastore_update "$DATASTORE_TYPE" "" rd_manage datastore_update "$DATASTORE_TYPE" ""
PACKAGES=${PACKAGES:-""} PACKAGES=${PACKAGES:-""}
if [ "$DATASTORE_TYPE" == "mysql" ]; then if [ "$DATASTORE_TYPE" == "mysql" ]; then
set_mysql_pkg VERSION="5.7"
PACKAGES=${PACKAGES:-$MYSQL_PKG}
VERSION=$MYSQL_VER
elif [ "$DATASTORE_TYPE" == "percona" ]; then elif [ "$DATASTORE_TYPE" == "percona" ]; then
PACKAGES=${PACKAGES:-"percona-server-server-5.6"} PACKAGES=${PACKAGES:-"percona-server-server-5.6"}
VERSION="5.6" VERSION="5.6"
@ -562,7 +539,6 @@ function cmd_set_datastore() {
PACKAGES=${PACKAGES:-"percona-xtradb-cluster-server-5.6"} PACKAGES=${PACKAGES:-"percona-xtradb-cluster-server-5.6"}
VERSION="5.6" VERSION="5.6"
elif [ "$DATASTORE_TYPE" == "mariadb" ]; then elif [ "$DATASTORE_TYPE" == "mariadb" ]; then
PACKAGES=${PACKAGES:-"mariadb-server"}
VERSION="10.4" VERSION="10.4"
elif [ "$DATASTORE_TYPE" == "mongodb" ]; then elif [ "$DATASTORE_TYPE" == "mongodb" ]; then
PACKAGES=${PACKAGES:-"mongodb-org"} PACKAGES=${PACKAGES:-"mongodb-org"}
@ -577,7 +553,6 @@ function cmd_set_datastore() {
PACKAGES=${PACKAGES:-"couchbase-server"} PACKAGES=${PACKAGES:-"couchbase-server"}
VERSION="2.2.0" VERSION="2.2.0"
elif [ "$DATASTORE_TYPE" == "postgresql" ]; then elif [ "$DATASTORE_TYPE" == "postgresql" ]; then
PACKAGES=${PACKAGES:-"postgresql-9.6"}
VERSION="9.6" VERSION="9.6"
elif [ "$DATASTORE_TYPE" == "couchdb" ]; then elif [ "$DATASTORE_TYPE" == "couchdb" ]; then
PACKAGES=${PACKAGES:-"couchdb"} PACKAGES=${PACKAGES:-"couchdb"}
@ -593,28 +568,14 @@ function cmd_set_datastore() {
exit 1 exit 1
fi fi
sed -i "s/%datastore_type%/$DATASTORE_TYPE/g" $TEST_CONF # trove-manage datastore_version_update <datastore_name> <version_name> <datastore_manager> <image_id> <packages> <active>
sed -i "s/%datastore_version%/$VERSION/g" $TEST_CONF
#rd_manage datastore_version_update <datastore_name> <version_name> <datastore_manager> <image_id> <packages> <active>
rd_manage datastore_version_update "$DATASTORE_TYPE" "$VERSION" "$DATASTORE_TYPE" $IMAGEID "$PACKAGES" 1 rd_manage datastore_version_update "$DATASTORE_TYPE" "$VERSION" "$DATASTORE_TYPE" $IMAGEID "$PACKAGES" 1
rd_manage datastore_version_update "$DATASTORE_TYPE" "inactive_version" "manager1" $IMAGEID "" 0
rd_manage datastore_update "$DATASTORE_TYPE" "$VERSION" rd_manage datastore_update "$DATASTORE_TYPE" "$VERSION"
rd_manage datastore_update Test_Datastore_1 ""
if [ -f "$PATH_TROVE"/trove/templates/$DATASTORE_TYPE/validation-rules.json ]; then if [ -f "$PATH_TROVE"/trove/templates/$DATASTORE_TYPE/validation-rules.json ]; then
# add the configuration parameters to the database for the kick-start datastore # add the configuration parameters to the database for the kick-start datastore
rd_manage db_load_datastore_config_parameters "$DATASTORE_TYPE" "$VERSION" "$PATH_TROVE"/trove/templates/$DATASTORE_TYPE/validation-rules.json rd_manage db_load_datastore_config_parameters "$DATASTORE_TYPE" "$VERSION" "$PATH_TROVE"/trove/templates/$DATASTORE_TYPE/validation-rules.json
fi fi
if [[ "${RESTART_TROVE}" == true ]]; then
cmd_stop
fi
iniset $TROVE_CONF DEFAULT default_datastore "$DATASTORE_TYPE"
sleep 1.5
if [[ "${RESTART_TROVE}" == true ]]; then
cmd_start
fi
} }
############################################################################### ###############################################################################
@ -681,7 +642,8 @@ function install_test_packages() {
} }
function mod_confs() { function mod_confs() {
DATASTORE_TYPE=$1 local DATASTORE_TYPE=$1
local DATASTORE_VERSION=$2
exclaim "Running mod_confs ..." exclaim "Running mod_confs ..."
sudo install -b --mode 0664 $TROVESTACK_SCRIPTS/conf/test_begin.conf $TEST_CONF sudo install -b --mode 0664 $TROVESTACK_SCRIPTS/conf/test_begin.conf $TEST_CONF
@ -701,20 +663,9 @@ function mod_confs() {
cat $DATASTORE_CONF | sudo tee -a $TEST_CONF > /dev/null cat $DATASTORE_CONF | sudo tee -a $TEST_CONF > /dev/null
cat $TROVESTACK_SCRIPTS/conf/test_end.conf | sudo tee -a $TEST_CONF > /dev/null cat $TROVESTACK_SCRIPTS/conf/test_end.conf | sudo tee -a $TEST_CONF > /dev/null
#When running in the gate, don't start services
if [ "${DEVSTACK_GATE_TROVE}" == "1" ]; then
sed -i "s,%startservices%,false,g" ${TEST_CONF}
else
sed -i "s,%startservices%,true,g" ${TEST_CONF}
fi
#Add the paths to the test conf #Add the paths to the test conf
sed -i "s,%report_directory%,$TROVE_REPORT_DIR,g" $TEST_CONF sed -i "s,%report_directory%,$TROVE_REPORT_DIR,g" $TEST_CONF
sed -i "s,%keystone_path%,$PATH_KEYSTONE,g" $TEST_CONF
sed -i "s,%nova_path%,$PATH_NOVA,g" $TEST_CONF
sed -i "s,%glance_path%,$PATH_GLANCE,g" $TEST_CONF
sed -i "s,%trove_path%,$PATH_TROVE,g" $TEST_CONF
sed -i "s,%service_host%,$SERVICE_HOST,g" $TEST_CONF sed -i "s,%service_host%,$SERVICE_HOST,g" $TEST_CONF
sed -i "s,%swifth_path%,$PATH_SWIFT,g" $TEST_CONF
# Add the region name into test.conf # Add the region name into test.conf
sed -i "s/%region_name%/${REGION_NAME}/g" $TEST_CONF sed -i "s/%region_name%/${REGION_NAME}/g" $TEST_CONF
@ -759,8 +710,10 @@ function mod_confs() {
iniset $TROVE_CONF $DATASTORE_TYPE num_config_servers_per_cluster 1 iniset $TROVE_CONF $DATASTORE_TYPE num_config_servers_per_cluster 1
fi fi
set_bin_path sed -i "s/%datastore_type%/$DATASTORE_TYPE/g" $TEST_CONF
sed -i "s/%datastore_version%/${DATASTORE_VERSION}/g" $TEST_CONF
set_bin_path
} }
function setup_cluster_configs() { function setup_cluster_configs() {
@ -794,6 +747,7 @@ function add_test_flavors() {
function cmd_test_init() { function cmd_test_init() {
local DATASTORE_TYPE=$1 local DATASTORE_TYPE=$1
local DATASTORE_VERSION=$2
if [ -z "${DATASTORE_TYPE}" ]; then if [ -z "${DATASTORE_TYPE}" ]; then
exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}" exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}"
@ -806,7 +760,7 @@ function cmd_test_init() {
install_test_packages "${DATASTORE_TYPE}" install_test_packages "${DATASTORE_TYPE}"
exclaim "Modifying test.conf and guest.conf with appropriate values." exclaim "Modifying test.conf and guest.conf with appropriate values."
mod_confs "${DATASTORE_TYPE}" mod_confs "${DATASTORE_TYPE}" "${DATASTORE_VERSION}"
exclaim "Creating Test Flavors." exclaim "Creating Test Flavors."
add_test_flavors add_test_flavors
@ -823,7 +777,7 @@ function cmd_build_image() {
local output=$6 local output=$6
if [[ -z "$output" ]]; then if [[ -z "$output" ]]; then
image_name="trove-${IMAGE_GUEST_OS}-${IMAGE_GUEST_RELEASE}-${IMAGE_DATASTORE_TYPE}" image_name="trove-datastore-${IMAGE_GUEST_OS}-${IMAGE_GUEST_RELEASE}-${IMAGE_DATASTORE_TYPE}"
image_folder=$HOME/images image_folder=$HOME/images
output="${image_folder}/${image_name}" output="${image_folder}/${image_name}"
fi fi
@ -840,13 +794,17 @@ function cmd_build_image() {
build_guest_image $IMAGE_DATASTORE_TYPE $IMAGE_GUEST_OS $IMAGE_GUEST_RELEASE $DEV_MODE ${guest_username} $output build_guest_image $IMAGE_DATASTORE_TYPE $IMAGE_GUEST_OS $IMAGE_GUEST_RELEASE $DEV_MODE ${guest_username} $output
} }
# Build guest image and upload to Glance, register the datastore and configuration parameters.
# We could skip the image build and upload by:
# 1. MYSQL_IMAGE_ID is passed, or
# 2. There is an image in Glance contains the datastore name
function cmd_build_and_upload_image() { function cmd_build_and_upload_image() {
local datastore_type=$1 local datastore_type=$1
local restart_trove=${2:-$(get_bool RESTART_TROVE "true")} local guest_os=${2:-"ubuntu"}
local guest_os=${3:-"ubuntu"} local guest_release=${3:-"xenial"}
local guest_release=${4:-"xenial"} local dev_mode=${4:-"true"}
local dev_mode=${5:-"true"} local guest_username=${5:-"ubuntu"}
local guest_username=${6:-"ubuntu"} local output_dir=${6:-"$HOME/images"}
if [ -z "${datastore_type}" ]; then if [ -z "${datastore_type}" ]; then
exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}" exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}"
@ -861,21 +819,20 @@ function cmd_build_and_upload_image() {
glance_imageid=$(openstack $CLOUD_ADMIN_ARG image list | grep "$datastore_type" | awk 'NR==1 {print}' | awk '{print $2}') glance_imageid=$(openstack $CLOUD_ADMIN_ARG image list | grep "$datastore_type" | awk 'NR==1 {print}' | awk '{print $2}')
if [[ -z $glance_imageid ]]; then if [[ -z $glance_imageid ]]; then
cmd_build_image ${datastore_type} ${guest_os} ${guest_release} ${dev_mode} ${guest_username} mkdir -p ${output_dir}
name=trove-datastore-${guest_os}-${guest_release}-${datastore_type}
output=${output_dir}/$name.qcow2
cmd_build_image ${datastore_type} ${guest_os} ${guest_release} ${dev_mode} ${guest_username} $output
image_folder=$HOME/images glance_imageid=$(openstack ${CLOUD_ADMIN_ARG} image create $name --public --disk-format qcow2 --container-format bare --file $output --property hw_rng_model='virtio' -c id -f value)
qcow_image=`find $image_folder -name '*.qcow2'`
image_url="file://$qcow_image"
glance_imageid=`get_glance_id upload_image $image_url`
[[ -z "$glance_imageid" ]] && echo "Glance upload failed!" && exit 1 [[ -z "$glance_imageid" ]] && echo "Glance upload failed!" && exit 1
echo "IMAGE ID: $glance_imageid"
fi fi
fi fi
echo "IMAGEID: $glance_imageid" exclaim "Using Glance image ID: $glance_imageid"
exclaim "Updating Datastores" exclaim "Updating Datastores"
cmd_set_datastore "${glance_imageid}" "${datastore_type}" "${restart_trove}" cmd_set_datastore "${glance_imageid}" "${datastore_type}"
} }
@ -1037,7 +994,7 @@ function cmd_int_tests() {
fi fi
cd $TROVESTACK_SCRIPTS cd $TROVESTACK_SCRIPTS
if [ $# -lt 1 ]; then if [ $# -lt 1 ]; then
args="--group=blackbox" args="--group=mysql"
else else
args="$@" args="$@"
fi fi
@ -1055,23 +1012,6 @@ function cmd_int_tests() {
python $args python $args
} }
function cmd_int_tests_simple() {
exclaim "Running Trove Simple Integration Tests..."
cd $TROVESTACK_SCRIPTS
if [ $# -lt 1 ]; then
args="--group=simple_blackbox"
else
args="$@"
fi
# -- verbose makes it prettier.
# -- logging-clear-handlers keeps the novaclient and other things from
# spewing logs to stdout.
args="$INT_TEST_OPTIONS -B $TROVESTACK_TESTS/integration/int_tests.py --verbose --logging-clear-handlers $args"
echo "python $args"
python $args
}
function cmd_int_tests_white_box() { function cmd_int_tests_white_box() {
export PYTHONPATH=$PYTHONPATH:$PATH_TROVE export PYTHONPATH=$PYTHONPATH:$PATH_TROVE
export PYTHONPATH=$PYTHONPATH:$PATH_NOVA export PYTHONPATH=$PYTHONPATH:$PATH_NOVA
@ -1259,49 +1199,45 @@ function cmd_clean() {
function cmd_kick_start() { function cmd_kick_start() {
local DATASTORE_TYPE=$1 local DATASTORE_TYPE=$1
local RESTART_TROVE=${2:-$(get_bool RESTART_TROVE "true")} local DATASTORE_VERSION=$2
if [ -z "${DATASTORE_TYPE}" ]; then if [ -z "${DATASTORE_TYPE}" ]; then
exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}" exclaim "${COLOR_RED}Datastore argument was not specified.${COLOR_NONE}"
exit 1 exit 1
fi fi
exclaim "Running kick-start for $DATASTORE_TYPE (restart trove: $RESTART_TROVE)" exclaim "Running kick-start for $DATASTORE_TYPE"
dump_env dump_env
cmd_test_init "${DATASTORE_TYPE}" cmd_test_init "${DATASTORE_TYPE}" "${DATASTORE_VERSION}"
export GUEST_OS=${GUEST_OS:-"ubuntu"}
export GUEST_OS_RELEASE=${GUEST_OS_RELEASE:-"xenial"}
export GUEST_OS_USERNAME=${GUEST_OS_USERNAME:-"ubuntu"}
export DEV_MOEE=${DEV_MODE:-"true"}
cmd_build_and_upload_image "${DATASTORE_TYPE}" "${RESTART_TROVE}" "${GUEST_OS}" "${GUEST_OS_RELEASE}" "${DEV_MOEE}" "${GUEST_OS_USERNAME}"
} }
# Start functional test. The guest image should be created and registered in
# appropriate datastore before the test, the configuration parameters should
# also be loaded as well. DevStack has done all of that.
function cmd_gate_tests() { function cmd_gate_tests() {
local DATASTORE_TYPE=${1:-'mysql'} local DATASTORE_TYPE=${1:-'mysql'}
local TEST_GROUP=${2:-${DATASTORE_TYPE}} local TEST_GROUP=${2:-${DATASTORE_TYPE}}
local HOST_SCP_USERNAME=${3:-$(whoami)} local DATASTORE_VERSION=${3:-'5.7'}
local GUEST_USERNAME=${4:-'ubuntu'} local HOST_SCP_USERNAME=${4:-$(whoami)}
# We're not using devstack-gate in Zuul v3 job local GUEST_USERNAME=${5:-'ubuntu'}
if [[ $GATE_JOB_VER == "v2" ]]; then
local ESCAPED_PATH_TROVE=${5:-'\/opt\/stack\/new\/trove'}
fi
exclaim "Running cmd_gate_tests ..." exclaim "Running cmd_gate_tests ..."
export REPORT_DIRECTORY=${REPORT_DIRECTORY:=$HOME/gate-tests-report/} export REPORT_DIRECTORY=${REPORT_DIRECTORY:=$HOME/gate-tests-report/}
export TROVE_REPORT_DIR=$HOME/gate-tests-report/ export TROVE_REPORT_DIR=$HOME/gate-tests-report/
TROVESTACK_DUMP_ENV=true export TROVESTACK_DUMP_ENV=true
export SSH_DIR=${SSH_DIR:-"$HOME/.ssh"} export SSH_DIR=${SSH_DIR:-"$HOME/.ssh"}
# The user used to connect the db instance. # The user is used to connect with the db instance during testing.
export TROVE_TEST_SSH_USER=${TROVE_TEST_SSH_USER:-"ubuntu"} export TROVE_TEST_SSH_USER=${TROVE_TEST_SSH_USER:-"ubuntu"}
# This var is used to ssh into the db instance during the test. # This var is used to ssh into the db instance during testing.
export TROVE_TEST_SSH_KEY_FILE=${SSH_DIR}/id_rsa export TROVE_TEST_SSH_KEY_FILE=${SSH_DIR}/id_rsa
cd $TROVESTACK_SCRIPTS cd $TROVESTACK_SCRIPTS
local RESTART_TROVE=false
cmd_kick_start "${DATASTORE_TYPE}" "${RESTART_TROVE}" # Build and upload guest image, register datastore version.
cmd_build_and_upload_image ${DATASTORE_TYPE}
cmd_kick_start "${DATASTORE_TYPE}" "${DATASTORE_VERSION}"
cmd_int_tests --group=$TEST_GROUP cmd_int_tests --group=$TEST_GROUP
} }
@ -1441,42 +1377,13 @@ function run_command() {
fi fi
case "$1" in case "$1" in
"install" ) cmd_install;;
"test-init" ) shift; cmd_test_init $@;;
"build-image" ) shift; cmd_build_image $@;; "build-image" ) shift; cmd_build_image $@;;
"initialize" ) cmd_initialize;; "upload-image" ) shift; cmd_build_and_upload_image $@;;
"unit-tests" ) cmd_unit_tests;;
"start-deps" ) cmd_start_deps;;
"stop-deps" ) cmd_stop_deps;;
"start" ) cmd_start;;
"int-tests" ) shift; cmd_int_tests $@;; "int-tests" ) shift; cmd_int_tests $@;;
"int-tests-wb" ) shift; cmd_int_tests_white_box $@;;
"simple-tests") shift; cmd_int_tests_simple $@;;
"stop" ) cmd_stop;;
"restart" ) cmd_stop; cmd_start;;
"wipe-logs" ) cmd_wipe_logs;;
"rd-sql" ) shift; cmd_rd_sql $@;;
"fake-sql" ) shift; cmd_fake_sql $@;;
"run-ci" ) shift; cmd_run_ci $@;;
"vagrant-ssh" ) shift; cmd_vagrant_ssh $@;;
"debug" ) shift; echo "Enabling debugging."; \ "debug" ) shift; echo "Enabling debugging."; \
set -o xtrace; TROVESTACK_DUMP_ENV=true; run_command $@;; set -o xtrace; TROVESTACK_DUMP_ENV=true; run_command $@;;
"clear" ) shift; cmd_clear $@;;
"clean" ) shift; cmd_clean $@;;
"run" ) shift; cmd_run $@;;
"kick-start" ) shift; cmd_kick_start $@;;
"dsvm-gate-tests" ) shift; export GATE_JOB_VER=v2; \
cmd_gate_tests $@;;
"gate-tests" ) shift; cmd_gate_tests $@;; "gate-tests" ) shift; cmd_gate_tests $@;;
"run-fake" ) shift; cmd_run_fake $@;;
"start-fake" ) shift; cmd_start_fake $@;;
"update-projects" ) cmd_clone_projects force_update \
$TROVESTACK_SCRIPTS/projects-list \
$TROVESTACK_SCRIPTS/image-projects-list;;
"reset-task" ) shift; cmd_reset_task $@;;
"wipe-queues" ) shift; cmd_wipe_queues $@;; "wipe-queues" ) shift; cmd_wipe_queues $@;;
"repl" ) shift; cmd_repl $@;;
"help" ) print_usage;;
* ) * )
echo "'$1' not a valid command" echo "'$1' not a valid command"
exit 1 exit 1

View File

@ -1,7 +1,5 @@
{ {
"report_directory":"rdli-test-report", "report_directory":"rdli-test-report",
"start_services": false,
"white_box":false, "white_box":false,
"test_mgmt":false, "test_mgmt":false,
@ -18,7 +16,6 @@
"nova_conf":"/home/vagrant/nova.conf", "nova_conf":"/home/vagrant/nova.conf",
"keystone_code_root":"/opt/stack/keystone", "keystone_code_root":"/opt/stack/keystone",
"keystone_conf":"/etc/keystone/keystone.conf", "keystone_conf":"/etc/keystone/keystone.conf",
"keystone_use_combined":true,
"trove_code_root":"/opt/stack/trove", "trove_code_root":"/opt/stack/trove",
"trove_conf":"/tmp/trove.conf", "trove_conf":"/tmp/trove.conf",
"trove_version":"v1.0", "trove_version":"v1.0",
@ -29,9 +26,8 @@
"trove_max_accepted_volume_size": 1000, "trove_max_accepted_volume_size": 1000,
"trove_max_instances_per_user": 55, "trove_max_instances_per_user": 55,
"trove_max_volumes_per_user": 100, "trove_max_volumes_per_user": 100,
"use_nova_volume": false,
"use_reaper":false, "use_reaper":false,
"root_removed_from_instance_api": true, "root_removed_from_instance_api": true,
"root_timestamp_disabled": false, "root_timestamp_disabled": false,
"openvz_disabled": false, "openvz_disabled": false,
"management_api_disabled": true, "management_api_disabled": true,
@ -43,6 +39,6 @@
"users_page_size": 20, "users_page_size": 20,
"rabbit_runs_locally":false, "rabbit_runs_locally":false,
"dns_instance_entry_factory":"trove.dns.rsdns.driver.RsDnsInstanceEntryFactory", "dns_instance_entry_factory":"trove.dns.rsdns.driver.RsDnsInstanceEntryFactory",
"sentinel": null "sentinel": null
} }

View File

@ -112,44 +112,33 @@ def _clean_up():
def import_tests(): def import_tests():
# TODO(tim.simpson): Import these again once white box test functionality
# is restored.
# from tests.dns import check_domain
# from tests.dns import concurrency
# from tests.dns import conversion
# The DNS stuff is problematic. Not loading the other tests allow us to # The DNS stuff is problematic. Not loading the other tests allow us to
# run its functional tests only. # run its functional tests only.
ADD_DOMAINS = os.environ.get("ADD_DOMAINS", "False") == 'True' ADD_DOMAINS = os.environ.get("ADD_DOMAINS", "False") == 'True'
if not ADD_DOMAINS: if not ADD_DOMAINS:
from tests.api import delete_all # F401 unused imports needed for tox tests
from tests.api import instances_pagination from trove.tests.api import backups # noqa
from tests.api import instances_states from trove.tests.api import configurations # noqa
from tests.dns import dns from trove.tests.api import databases # noqa
from tests import initialize from trove.tests.api import datastores # noqa
from tests.smoke import instance from trove.tests.api import instances as rd_instances # noqa
from tests.volumes import driver from trove.tests.api import instances_actions as acts # noqa
from trove.tests.api import instances_delete # noqa
from trove.tests.api import instances_resize # noqa
from trove.tests.api import limits # noqa
from trove.tests.api.mgmt import datastore_versions # noqa
from trove.tests.api.mgmt import instances_actions as mgmt_acts # noqa
from trove.tests.api import replication # noqa
from trove.tests.api import root # noqa
from trove.tests.api import user_access # noqa
from trove.tests.api import users # noqa
from trove.tests.api import versions # noqa
from trove.tests.db import migrations # noqa
# Groups that exist as core int-tests are registered from the # Groups that exist as core int-tests are registered from the
# trove.tests.int_tests module # trove.tests.int_tests module
from trove.tests import int_tests from trove.tests import int_tests
# Groups defined in trove/integration, or any other externally
# defined groups can be registered here
heavy_black_box_groups = [
"dbaas.api.instances.pagination",
"dbaas.api.instances.delete",
"dbaas.api.instances.status",
"dbaas.api.instances.down",
"dbaas.api.mgmt.hosts.update",
"fake.dbaas.api.mgmt.instances",
"fake.dbaas.api.mgmt.accounts.broken",
"fake.dbaas.api.mgmt.allaccounts"
]
proboscis.register(groups=["heavy_blackbox"],
depends_on_groups=heavy_black_box_groups)
def run_main(test_importer): def run_main(test_importer):

View File

@ -1,57 +0,0 @@
#!/usr/bin/env bash
# Specify the path to the Trove repo as argument one.
# This script will create a .pid file and report in the current directory.
set -e
if [ $# -lt 1 ]; then
echo "Please give the path to the Trove repo as argument one."
exit 5
else
TROVE_PATH=$1
fi
if [ $# -lt 2 ]; then
echo "Please give the path to the Trove Client as argument two."
exit 5
else
TROVECLIENT_PATH=$2
fi
shift;
shift;
PID_FILE="`pwd`.pid"
function start_server() {
pushd $TROVE_PATH
bin/start_server.sh --pid_file=$PID_FILE
popd
}
function stop_server() {
if [ -f $PID_FILE ];
then
pushd $TROVE_PATH
bin/stop_server.sh $PID_FILE
popd
else
echo "The pid file did not exist, so not stopping server."
fi
}
function on_error() {
echo "Something went wrong!"
stop_server
}
trap on_error EXIT # Proceed to trap - END in event of failure.
TROVE_CLIENT_PATH=$TROVECLIENT_PATH tox -e py26
start_server
.tox/py26/bin/pip install -U $TROVECLIENT_PATH
PYTHONPATH=$PYTHONPATH:$TROVECLIENT_PATH .tox/py26/bin/python int_tests.py \
--conf=localhost.test.conf -- $@
stop_server
trap - EXIT
echo "Ran tests successfully. :)"
exit 0

View File

@ -1,30 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="Trove Integration Tests",
version="0.0.9.9",
author='OpenStack',
description="Runs integration tests on Ridley.",
license='Apache',
py_modules=[],
packages=['tests'],
scripts=[]
)

View File

@ -1 +0,0 @@
Integration tests.

View File

@ -1,32 +0,0 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from proboscis import test
from trove.tests.config import CONFIG
from trove.tests.util import create_dbaas_client
from trove.tests.util.users import Requirements
GROUP = "dbaas.api.instances.delete"
@test(groups=[GROUP])
def delete_all():
"""Delete every single one."""
user = CONFIG.users.find_user(Requirements(is_admin=False))
dbaas = create_dbaas_client(user)
instances = dbaas.instances.list()
for instance in instances:
instance.delete()

View File

@ -1,219 +0,0 @@
from proboscis import after_class
from proboscis import before_class
from proboscis import test
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_is_not
from proboscis.asserts import assert_is_none
from proboscis.asserts import assert_true
from troveclient.compat import exceptions
from trove.tests.config import CONFIG
from trove.tests.util import create_dbaas_client
from trove.tests.util.users import Requirements
class TestBase(object):
def set_up(self):
"""Create a ton of instances."""
reqs = Requirements(is_admin=False)
self.user = CONFIG.users.find_user(reqs)
self.dbaas = create_dbaas_client(self.user)
def delete_instances(self):
chunk = 0
while True:
chunk += 1
attempts = 0
instances = self.dbaas.instances.list()
if len(instances) == 0:
break
# Sit around and try to delete this chunk.
while True:
instance_results = []
attempts += 1
deleted_count = 0
for instance in instances:
try:
instance.delete()
result = "[w]"
except exceptions.UnprocessableEntity:
result = "[W]"
except exceptions.NotFound:
result = "[O]"
deleted_count += 1
except Exception:
result = "[X]"
instance_results.append(result)
print("Chunk %d, attempt %d : %s"
% (chunk, attempts, ",".join(instance_results)))
if deleted_count == len(instances):
break
def create_instances(self):
self.ids = []
for index in range(self.max):
name = "multi-%03d" % index
result = self.dbaas.instances.create(name, 1,
{'size': 1}, [], [])
self.ids.append(result.id)
# Sort the list of IDs in order, so we can confirm the lists pagination
# returns is also sorted correctly.
self.ids.sort()
@staticmethod
def assert_instances_sorted_by_ids(instances):
# Assert that the strings are always increasing.
last_id = ""
for instance in instances:
assert_true(last_id < instance.id)
def print_list(self, instances):
print("Length = %d" % len(instances))
print(",".join([instance.id for instance in instances]))
def test_pagination(self, requested_limit, requested_marker,
expected_length, expected_marker, expected_last_item):
instances = self.dbaas.instances.list(limit=requested_limit,
marker=requested_marker)
marker = instances.next
self.print_list(instances)
# Better get as many as we asked for.
assert_equal(len(instances), expected_length)
# The last one should be roughly this one in the list.
assert_equal(instances[-1].id, expected_last_item)
# Because limit < count, the marker must be something.
if expected_marker:
assert_is_not(marker, None)
assert_equal(marker, expected_marker)
else:
assert_is_none(marker)
self.assert_instances_sorted_by_ids(instances)
@test(runs_after_groups=["dbaas.guest.shutdown"],
groups=['dbaas.api.instances.pagination'])
class SimpleCreateAndDestroy(TestBase):
"""
It turns out a big part of guaranteeing pagination works is to make sure
we can create a big batch of instances and delete them without problems.
Even in fake mode though its worth it to check this is the case.
"""
max = 5
@before_class
def set_up(self):
"""Create a ton of instances."""
super(SimpleCreateAndDestroy, self).set_up()
self.delete_instances()
@test
def spin_up(self):
self.create_instances()
@after_class(always_run=True)
def tear_down(self):
self.delete_instances()
@test(runs_after_groups=["dbaas.guest.shutdown"],
groups=['dbaas.api.instances.pagination'])
class InstancePagination50(TestBase):
max = 50
@before_class
def set_up(self):
"""Create a ton of instances."""
super(InstancePagination50, self).set_up()
self.delete_instances()
self.create_instances()
@after_class(always_run=True)
def tear_down(self):
"""Tear down all instances."""
self.delete_instances()
@test
def pagination_short(self):
self.test_pagination(requested_limit=10, requested_marker=None,
expected_length=10, expected_marker=self.ids[9],
expected_last_item=self.ids[9])
@test
def pagination_default(self):
self.test_pagination(requested_limit=None, requested_marker=None,
expected_length=20, expected_marker=self.ids[19],
expected_last_item=self.ids[19])
@test
def pagination_full(self):
self.test_pagination(requested_limit=50, requested_marker=None,
expected_length=20, expected_marker=self.ids[19],
expected_last_item=self.ids[19])
@test(runs_after_groups=["dbaas.guest.shutdown"],
groups=['dbaas.api.instances.pagination'])
class InstancePagination20(TestBase):
max = 20
@before_class
def set_up(self):
"""Create a ton of instances."""
super(InstancePagination20, self).set_up()
self.delete_instances()
self.create_instances()
@after_class(always_run=True)
def tear_down(self):
"""Tear down all instances."""
self.delete_instances()
@test
def pagination_short(self):
self.test_pagination(requested_limit=10, requested_marker=None,
expected_length=10, expected_marker=self.ids[9],
expected_last_item=self.ids[9])
@test
def pagination_default(self):
self.test_pagination(requested_limit=None, requested_marker=None,
expected_length=20, expected_marker=None,
expected_last_item=self.ids[19])
@test
def pagination_full(self):
self.test_pagination(requested_limit=20, requested_marker=None,
expected_length=20, expected_marker=None,
expected_last_item=self.ids[19])
@test
def pagination_overkill(self):
self.test_pagination(requested_limit=30, requested_marker=None,
expected_length=20, expected_marker=None,
expected_last_item=self.ids[19])
@test
def pagination_last_half(self):
self.test_pagination(requested_limit=10, requested_marker=self.ids[9],
expected_length=10, expected_marker=None,
expected_last_item=self.ids[19])
@test
def pagination_third_quarter(self):
self.test_pagination(requested_limit=5, requested_marker=self.ids[9],
expected_length=5, expected_marker=self.ids[14],
expected_last_item=self.ids[14])
@test
def pagination_fourth_quarter(self):
self.test_pagination(requested_limit=20, requested_marker=self.ids[14],
expected_length=5, expected_marker=None,
expected_last_item=self.ids[19])

View File

@ -1,76 +0,0 @@
# Copyright 2012 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
GROUP = "dbaas.api.instances.status"
from proboscis import before_class
from proboscis import test
from proboscis.asserts import assert_equal
from trove.tests.config import CONFIG
from trove.tests.util import create_dbaas_client
from trove.tests.util.users import Requirements
from trove.common.utils import poll_until
@test(groups=[GROUP])
class InstanceStatusTests(object):
@before_class
def set_up(self):
reqs = Requirements(is_admin=False)
self.user = CONFIG.users.find_user(reqs)
self.dbaas = create_dbaas_client(self.user)
@test
def test_create_failure_on_volume_prov_failure(self):
# Fake nova will fail a volume of size 9.
response = self.dbaas.instances.create('volume_fail', 1,
{'size': 9}, [])
poll_until(lambda: self.dbaas.instances.get(response.id),
lambda instance: instance.status == 'ERROR',
time_out=10)
instance = self.dbaas.instances.get(response.id)
print("Status: %s" % instance.status)
assert_equal(instance.status, "ERROR",
"Instance did not drop to error after volume prov failure.")
@test
def test_create_failure_on_server_failure(self):
# Fake nova will fail a server ending with 'SERVER_ERROR'."
response = self.dbaas.instances.create('test_SERVER_ERROR', 1,
{'size': 1}, [])
poll_until(lambda: self.dbaas.instances.get(response.id),
lambda instance: instance.status == 'ERROR',
time_out=10)
instance = self.dbaas.instances.get(response.id)
print("Status: %s" % instance.status)
assert_equal(instance.status, "ERROR",
"Instance did not drop to error after server prov failure.")
###TODO(ed-): We don't at present have a way to test DNS in FAKE_MODE.
@test(enabled=False)
def test_create_failure_on_dns_failure(self):
#TODO(ed-): Throw DNS-specific monkeywrench into works
response = self.dbaas.instances.create('test_DNS_ERROR', 1,
{'size': 1}, [])
poll_until(lambda: self.dbaas.instances.get(response.id),
lambda instance: instance.status == 'ERROR',
time_out=10)
instance = self.dbaas.instances.get(response.id)
print("Status: %s" % instance.status)
assert_equal(instance.status, "ERROR",
"Instance did not drop to error after DNS prov failure.")

View File

@ -1,171 +0,0 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Checks that the domain specified in the flag file exists and is valid.
If you define the environment variable ADD_DOMAINS=True when running the tests,
they will create the domain if its not found (see below for details).
"""
import time
from proboscis import test
from proboscis import before_class
from proboscis.asserts import assert_equal
from proboscis.asserts import assert_not_equal
from proboscis.decorators import time_out
from trove.tests.config import CONFIG
WHITE_BOX = CONFIG.white_box
RUN_DNS = CONFIG.values.get("trove_dns_support", False)
if WHITE_BOX:
from nova import utils
from nova import flags
import rsdns
from trove.dns.rsdns.driver import create_client_with_flag_values
from trove.dns.driver import DnsEntry
from trove.dns.rsdns.driver import RsDnsInstanceEntryFactory
from trove.dns.rsdns.driver import RsDnsDriver
from trove.dns.rsdns.driver import RsDnsZone
from trove.utils import poll_until
FLAGS = flags.FLAGS
TEST_CONTENT = "126.1.1.1"
TEST_NAME = "hiwassup.%s" % FLAGS.dns_domain_name
DNS_DOMAIN_ID = None
@test(groups=["rsdns.domains", "rsdns.show_entries"],
enabled=WHITE_BOX and RUN_DNS)
class ClientTests(object):
@before_class
def increase_logging(self):
import httplib2
httplib2.debuglevel = 1
@test
def can_auth(self):
self.client = create_client_with_flag_values()
self.client.authenticate()
@test(depends_on=[can_auth])
def list_domains(self):
domains = self.client.domains.list()
print(domains)
@test(groups=["rsdns.domains"], depends_on=[ClientTests],
enabled=WHITE_BOX and RUN_DNS)
class RsDnsDriverTests(object):
"""Tests the RS DNS Driver."""
def create_domain_if_needed(self):
"""Adds the domain specified in the flags."""
print("Creating domain %s" % self.driver.default_dns_zone.name)
future = self.driver.dns_client.domains.create(
self.driver.default_dns_zone.name)
while not future.ready:
time.sleep(2)
print("Got something: %s" % future.resource)
with open('/home/vagrant/dns_resource.txt', 'w') as f:
f.write('%r\n' % future.result[0].id)
global DNS_DOMAIN_ID
DNS_DOMAIN_ID = future.result[0].id
print("The domain should have been created with id=%s" % DNS_DOMAIN_ID)
@test
@time_out(2 * 60)
def ensure_domain_specified_in_flags_exists(self):
"""Make sure the domain in the FLAGS exists."""
self.driver = RsDnsDriver(raise_if_zone_missing=False)
assert_not_equal(None, self.driver.default_dns_zone)
def zone_found():
zones = self.driver.get_dns_zones()
print("Retrieving zones.")
for zone in zones:
print("zone %s" % zone)
if zone.name == self.driver.default_dns_zone.name:
self.driver.default_dns_zone.id = zone.id
global DNS_DOMAIN_ID
DNS_DOMAIN_ID = zone.id
return True
return False
if zone_found():
return
self.create_domain_if_needed()
for i in range(5):
if zone_found():
return
self.fail("""Could not find default dns zone.
This happens when they clear the staging DNS service of data.
To fix it, manually run the tests as follows:
$ ADD_DOMAINS=True python int_tests.py
and if all goes well the tests will create a new domain
record.""")
@test(depends_on=[ensure_domain_specified_in_flags_exists],
enabled=WHITE_BOX and FLAGS.dns_domain_name != "dbaas.rackspace.com")
def delete_all_entries(self):
"""Deletes all entries under the default domain."""
list = self.driver.get_entries()
for entry in list:
if entry.type == "A":
self.driver.delete_entry(name=entry.name, type=entry.type,
dns_zone=entry.dns_zone)
# It takes awhile for them to be deleted.
poll_until(lambda: self.driver.get_entries_by_name(TEST_NAME),
lambda list: len(list) == 0,
sleep_time=4, time_out=60)
@test(depends_on=[delete_all_entries])
def create_test_entry(self):
fullname = TEST_NAME
entry = DnsEntry(name=fullname, content=TEST_CONTENT, type="A",
ttl=3600)
self.driver.create_entry(entry)
list = None
for i in range(500):
list = self.driver.get_entries_by_name(name=fullname)
if len(list) > 0:
break
time.sleep(1)
print("This is the list: %r" % list)
assert_equal(1, len(list))
list2 = self.driver.get_entries_by_content(content=TEST_CONTENT)
assert_equal(1, len(list2))
@test(depends_on=[delete_all_entries])
def create_test_rsdns_entry(self):
"""Create an entry using the RsDnsInstanceEntryFactory."""
instance = {'uuid': '000136c0-effa-4711-a747-a5b9fbfcb3bd', 'id': '10'}
ip = "10.100.2.7"
factory = RsDnsInstanceEntryFactory(dns_domain_id=DNS_DOMAIN_ID)
entry = factory.create_entry(instance)
entry.content = ip
self.driver.create_entry(entry)
entries = self.driver.get_entries_by_name(name=entry.name)
assert_equal(1, len(entries))
assert_equal(ip, entries[0].content)
assert_equal(FLAGS.dns_ttl, entries[0].ttl)
@test(depends_on=[create_test_entry])
def delete_test_entry(self):
fullname = TEST_NAME
self.driver.delete_entry(fullname, "A")
# It takes awhile for them to be deleted.
poll_until(lambda: self.driver.get_entries_by_name(TEST_NAME),
lambda list: len(list) == 0,
sleep_time=2, time_out=60)

View File

@ -1,111 +0,0 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
This test recreates an issue we had with eventlet. In the logs, we'd see that
the JSON response was malformed; instead of JSON, it contained the following
string:
Second simultaneous read on fileno 5 detected. Unless you really know what
you're doing, make sure that only one greenthread can read any particular
socket. Consider using a pools.Pool. If you do know what you're doing and want
to disable this error, call
eventlet.debug.hub_multiple_reader_prevention(False)
It is perhaps the most helpful error message ever created.
The root issue was that a subclass of httplib2.Http was created at program
started and used in all threads.
Using the old (broken) RsDNS client code this test recreates the greatest error
message ever.
"""
try:
import eventlet
CAN_USE_EVENTLET = True
except ImportError:
CAN_USE_EVENTLET = False
import uuid
from proboscis import before_class
from proboscis import test
from proboscis.asserts import assert_true
from trove.tests.config import CONFIG
WHITE_BOX = CONFIG.white_box
RUN_DNS = CONFIG.values.get("trove_dns_support", False)
if CONFIG.white_box:
from trove.dns.rsdns.driver import RsDnsInstanceEntryFactory
from nova import flags
from nova import utils
FLAGS = flags.FLAGS
@test(groups=["rsdns.eventlet"], enabled=CAN_USE_EVENTLET)
class RsdnsEventletTests(object):
"""Makes sure the RSDNS client can be used from multiple green threads."""
def assert_record_created(self, index):
msg = "Record %d wasn't created!" % index
assert_true(index in self.new_records, msg)
@before_class(enabled=WHITE_BOX and RUN_DNS)
def create_driver(self):
"""Creates the DNS Driver used in subsequent tests."""
self.driver = utils.import_object(FLAGS.dns_driver)
self.entry_factory = RsDnsInstanceEntryFactory()
self.test_uuid = uuid.uuid4().hex
self.new_records = {}
def make_record(self, index):
"""Creates a record with the form 'eventlet-%s-%d'."""
uuid = "eventlet-%s-%d" % (self.test_uuid, index)
instance = {'uuid': uuid}
entry = self.entry_factory.create_entry(instance)
entry.name = uuid + "." + self.entry_factory.default_dns_zone.name
entry.content = "123.123.123.123"
self.driver.create_entry(entry)
self.new_records[index] = True
@test(enabled=WHITE_BOX and RUN_DNS)
def use_dns_from_a_single_thread(self):
"""Add DNS records one at a time."""
self.new_records = {}
for index in range(-1, -5, -1):
self.make_record(index)
self.assert_record_created(index)
@test(enabled=WHITE_BOX and RUN_DNS)
def use_dns_from_multiple_greenthreads(self):
"""Add multiple DNS records at once."""
self.new_records = {}
def make_record(index):
def __cb():
self.make_record(index)
self.assert_record_created(index)
return index
return __cb
pile = eventlet.GreenPile()
indices = range(1, 4)
for index in indices:
pile.spawn(make_record(index))
list(pile) # Wait for them to finish
for index in indices:
self.assert_record_created(index)

View File

@ -1,103 +0,0 @@
# Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests classes which convert RS style-entries to Nova DNS entries."""
import hashlib
import unittest
from proboscis import test
from trove.tests.config import CONFIG
if CONFIG.white_box:
from nova import flags
from rsdns.client.records import Record
from trove.dns.rsdns.driver import EntryToRecordConverter
from trove.dns.rsdns.driver import RsDnsInstanceEntryFactory
from trove.dns.rsdns.driver import RsDnsZone
FLAGS = flags.FLAGS
driver = None
DEFAULT_ZONE = RsDnsZone(1, "dbaas.rackspace.org")
TEST_CONTENT = "126.1.1.1"
TEST_NAME = "hiwassup.dbaas.rackspace.org"
@test(groups=["unit", "rsdns.conversion"],
enabled=CONFIG.white_box)
class ConvertingNovaEntryNamesToRecordNames(unittest.TestCase):
def setUp(self):
self.converter = EntryToRecordConverter(DEFAULT_ZONE)
self.fake_zone = RsDnsZone(id=5, name="blah.org")
def test_normal_name(self):
long_name = self.converter.name_to_long_name("hi", self.fake_zone)
self.assertEqual("hi.blah.org", long_name)
def test_short_name(self):
long_name = self.converter.name_to_long_name("", self.fake_zone)
self.assertEqual("", long_name)
def test_long_name(self):
long_name = self.converter.name_to_long_name("blah.org.",
self.fake_zone)
self.assertEqual("blah.org..blah.org", long_name)
@test(groups=["unit", "rsdns.conversion"],
enabled=CONFIG.white_box)
class ConvertingRecordsToEntries(unittest.TestCase):
def setUp(self):
self.converter = EntryToRecordConverter(DEFAULT_ZONE)
self.fake_zone = RsDnsZone(id=5, name="blah.org")
def test_normal_name(self):
record = Record(None, {"id": 5, "name": "hi.blah.org",
"data": "stacker.com blah@blah 13452378",
"ttl": 5,
"type": "SOA"})
entry = self.converter.record_to_entry(record=record,
dns_zone=self.fake_zone)
self.assertEqual("stacker.com blah@blah 13452378", entry.content)
self.assertEqual("hi.blah.org", entry.name)
self.assertEqual("5", str(entry.ttl))
self.assertEqual("SOA", entry.type)
@test(groups=["rsdns.conversion"],
enabled=CONFIG.white_box)
class WhenCreatingAnEntryForAnInstance(unittest.TestCase):
# This isn't a unit test because RsDnsInstanceEntryFactory connects to the
# service.
def setUp(self):
self.creator = RsDnsInstanceEntryFactory()
def test_should_concatanate_strings(self):
instance = {'id': '56',
'uuid': '000136c0-effa-4711-a747-a5b9fbfcb3bd'}
entry = self.creator.create_entry(instance)
expected_name = "%s.%s" % (hashlib.sha1(instance['uuid']).hexdigest(),
FLAGS.dns_domain_name)
self.assertEqual(expected_name, entry.name,
msg="Entry name should match - %s" % entry.name)
self.assertIsNone(entry.content)
self.assertEqual("A", entry.type)
self.assertEqual(FLAGS.dns_ttl, entry.ttl)
self.assertIsNone(entry.priority)
self.assertEqual(FLAGS.dns_domain_name, entry.dns_zone.name)
if not entry.dns_zone.id:
self.fail(msg="DNS Zone Id should not be empty")

View File

@ -1,104 +0,0 @@
import unittest
from proboscis import test
from trove.tests.api.instances import instance_info
from trove.tests.api.instances import GROUP_START as INSTANCE_START
from trove.tests.api.instances import GROUP_TEST
from trove.tests.api.instances import GROUP_STOP as INSTANCE_STOP
from trove.tests.config import CONFIG
from trove.common.utils import import_object
from trove.common.utils import poll_until
WHITE_BOX = CONFIG.white_box
if WHITE_BOX:
# TODO(tim.simpson): Restore this once white box functionality can be
# added back to this test module.
pass
# import rsdns
# from nova import flags
# from nova import utils
# from trove import exception
# from trove.utils import poll_until
# FLAGS = flags.FLAGS
dns_driver = None
GROUP = "dbaas.guest.dns"
@test(groups=[GROUP, GROUP_TEST])
class Setup(unittest.TestCase):
"""Creates the DNS Driver and entry factory used in subsequent tests."""
def test_create_rs_dns_driver(self):
global dns_driver
dns_driver = import_object(FLAGS.dns_driver)
def expected_dns_entry():
"""Returns expected DNS entry for this instance.
:rtype: Instance of :class:`DnsEntry`.
"""
return create_dns_entry(instance_info.local_id, instance_info.id)
@test(depends_on_classes=[Setup],
depends_on_groups=[INSTANCE_START],
groups=[GROUP, GROUP_TEST])
class WhenInstanceIsCreated(unittest.TestCase):
"""Make sure the DNS name was provisioned.
This class actually calls the DNS driver to confirm the entry that should
exist for the given instance does exist.
"""
def test_dns_entry_should_exist(self):
entry = expected_dns_entry()
if entry:
def get_entries():
return dns_driver.get_entries_by_name(entry.name)
try:
poll_until(get_entries, lambda entries: len(entries) > 0,
sleep_time=2, time_out=60)
except exception.PollTimeOut:
self.fail("Did not find name " + entry.name + \
" in the entries, which were as follows:"
+ str(dns_driver.get_entries()))
@test(depends_on_classes=[Setup, WhenInstanceIsCreated],
depends_on_groups=[INSTANCE_STOP],
groups=[GROUP])
class AfterInstanceIsDestroyed(unittest.TestCase):
"""Make sure the DNS name is removed along with an instance.
Because the compute manager calls the DNS manager with RPC cast, it can
take awhile. So we wait for 30 seconds for it to disappear.
"""
def test_dns_entry_exist_should_be_removed_shortly_thereafter(self):
entry = expected_dns_entry()
if not entry:
return
def get_entries():
return dns_driver.get_entries_by_name(entry.name)
try:
poll_until(get_entries, lambda entries: len(entries) == 0,
sleep_time=2, time_out=60)
except exception.PollTimeOut:
# Manually delete the rogue item
dns_driver.delete_entry(entry.name, entry.type, entry.dns_zone)
self.fail("The DNS entry was never deleted when the instance "
"was destroyed.")

View File

@ -21,27 +21,14 @@ from tests.util.services import Service
from trove.tests.config import CONFIG from trove.tests.config import CONFIG
FAKE = CONFIG.fake_mode
START_SERVICES = (not FAKE) and CONFIG.values.get('start_services', False)
START_NOVA_NETWORK = (START_SERVICES and
not CONFIG.values.get('neutron_enabled',
False))
KEYSTONE_ALL = CONFIG.values.get('keystone_use_combined', True)
USE_NOVA_VOLUME = CONFIG.values.get('use_nova_volume', False)
dbaas_image = None
instance_name = None
success_statuses = ["build", "active"]
def dbaas_url(): def dbaas_url():
return str(CONFIG.values.get("dbaas_url")) return str(CONFIG.values.get("dbaas_url"))
def nova_url(): def nova_url():
return str(CONFIG.values.get("nova_client")['url']) return str(CONFIG.values.get("nova_client")['url'])
class Daemon(object): class Daemon(object):
"""Starts a daemon.""" """Starts a daemon."""
@ -74,95 +61,3 @@ class Daemon(object):
self.service = Service(cmds) self.service = Service(cmds)
if not self.service.is_service_alive(): if not self.service.is_service_alive():
self.service.start() self.service.start()
@test(groups=["services.initialize"],
enabled=START_SERVICES and (not KEYSTONE_ALL))
def start_keystone_all():
"""Starts the Keystone API."""
Daemon(service_path_root="usr_bin_dir",
service_path="%s/keystone-all",
extra_cmds=['--config-file'],
conf_file_name="keystone_conf").run()
@test(groups=["services.initialize", "services.initialize.glance"],
enabled=START_SERVICES)
def start_glance_registry():
"""Starts the Glance Registry."""
Daemon(alternate_path="/usr/bin/glance-registry",
conf_file_name="glance_reg_conf",
service_path_root="usr_bin_dir",
service_path="%s/glance-registry").run()
@test(groups=["services.initialize", "services.initialize.glance"],
depends_on=[start_glance_registry], enabled=START_SERVICES)
def start_glance_api():
"""Starts the Glance API."""
Daemon(alternate_path="/usr/bin/glance-api",
conf_file_name="glance_reg_conf",
service_path_root="usr_bin_dir",
service_path="%s/glance-api").run()
@test(groups=["services.initialize"], depends_on_classes=[start_glance_api],
enabled=START_NOVA_NETWORK)
def start_nova_network():
"""Starts the Nova Network Service."""
Daemon(service_path_root="usr_bin_dir",
service_path="%s/nova-network",
extra_cmds=['--config-file='],
conf_file_name="nova_conf").run()
@test(groups=["services.initialize"], enabled=START_SERVICES)
def start_scheduler():
"""Starts the Scheduler Service."""
Daemon(service_path_root="usr_bin_dir",
service_path="%s/nova-scheduler",
extra_cmds=['--config-file='],
conf_file_name="nova_conf").run()
@test(groups=["services.initialize"],
depends_on_classes=[start_glance_api],
enabled=START_SERVICES)
def start_compute():
"""Starts the Nova Compute Service."""
Daemon(service_path_root="usr_bin_dir",
service_path="%s/nova-compute",
extra_cmds=['--config-file='],
conf_file_name="nova_conf").run()
@test(groups=["services.initialize"], depends_on_classes=[start_scheduler],
enabled=START_SERVICES and USE_NOVA_VOLUME)
def start_volume():
"""Starts the Nova Compute Service."""
Daemon(service_path_root="usr_bin_dir",
service_path="%s/nova-volume",
extra_cmds=['--config-file='],
conf_file_name="nova_conf").run()
@test(groups=["services.initialize"],
depends_on_classes=[start_glance_api, start_nova_network, start_compute,
start_volume],
enabled=START_SERVICES)
def start_nova_api():
"""Starts the Nova Compute Service."""
Daemon(service_path_root="usr_bin_dir",
service_path="%s/nova-api",
extra_cmds=['--config-file='],
conf_file_name="nova_conf").run()
@test(groups=["services.initialize"],
depends_on_classes=[start_nova_api],
enabled=START_SERVICES)
def start_trove_api():
"""Starts the Trove Service."""
Daemon(service_path_root="usr_bin_dir",
service_path="%s/trove-api",
extra_cmds=['--config-file='],
conf_file_name="trove_conf").run()

View File

@ -1,103 +0,0 @@
from proboscis.asserts import assert_equal
from proboscis import test
from proboscis import before_class
from trove.common.utils import poll_until
from trove.tests.util import create_client
class InstanceGenerator(object):
def __init__(self, client, status=None, name=None, flavor=None,
account_id=None, created_at=None, databases=None, users=None,
volume_size=None):
self.client = client
self.status = status
self.name = name
self.flavor = flavor
self.account_id = account_id
self.databases = databases
self.users = users
self.volume_size = volume_size
self.id = None
def create_instance(self):
#make the call to create the instance
instance = self.client.instances.create(self.name, self.flavor,
self.volume_size, self.databases, self.users)
self.client.assert_http_code(200)
#verify we are in a build state
assert_equal(instance.status, "BUILD")
#pull out the ID
self.id = instance.id
return instance
def wait_for_build_to_finish(self):
poll_until(lambda: self.client.instance.get(self.id),
lambda instance: instance.status != "BUILD",
time_out=600)
def get_active_instance(self):
instance = self.client.instance.get(self.id)
self.client.assert_http_code(200)
#check the container name
assert_equal(instance.name, self.name)
#pull out volume info and verify
assert_equal(str(instance.volume_size), str(self.volume_size))
#pull out the flavor and verify
assert_equal(str(instance.flavor), str(self.flavor))
return instance
@test(groups=['smoke', 'positive'])
class CreateInstance(object):
@before_class
def set_up(self):
client = create_client(is_admin=False)
name = 'test_createInstance_container'
flavor = 1
volume_size = 1
db_name = 'test_db'
databases = [
{
"name": db_name
}
]
users = [
{
"name": "lite",
"password": "litepass",
"databases": [{"name": db_name}]
}
]
#create the Instance
instance = InstanceGenerator(client, name=self.name,
flavor=flavor,
volume_size=self.volume_size,
databases=databases, users=users)
instance.create_instance()
#wait for the instance
instance.wait_for_build_to_finish()
#get the active instance
inst = instance.get_active_instance()
#list out the databases for our instance and verify the db name
dbs = client.databases.list(inst.id)
client.assert_http_code(200)
assert_equal(len(dbs), 1)
assert_equal(dbs[0].name, instance.db_name)
client.instance.delete(inst.id)
client.assert_http_code(202)

View File

@ -1,25 +0,0 @@
# Copyright (c) 2011 OpenStack, LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
:mod:`volumes` -- Tests for volumes.
===================================
"""
""""Tests for Volumes."""
# Is a set of tests written directly against the VolumeManager and VolumeClient
# classes which doesn't require standing up Nova daemons or anything.
VOLUMES_DRIVER = "trove.volumes.driver"

Some files were not shown because too many files have changed in this diff Show More