From 5e1c266859ef26e5f82e08ee3879dfaba21215f1 Mon Sep 17 00:00:00 2001 From: Le Tian Ren Date: Thu, 8 May 2014 17:01:18 +0800 Subject: [PATCH] Initial code base commit. Change-Id: Id1e336028fa662ddee865841ac7b6c31a316f854 Closes-Bug: #1317383 --- cinder-powervc/.project | 21 + cinder-powervc/.pydevproject | 10 + cinder-powervc/bin/cinder-powervc | 63 + cinder-powervc/init/openstack-cinder-powervc | 103 + cinder-powervc/powervc/__init__.py | 9 + cinder-powervc/powervc/volume/__init__.py | 0 .../powervc/volume/driver/__init__.py | 9 + .../powervc/volume/driver/powervc.py | 310 ++ .../powervc/volume/driver/service.py | 280 ++ .../powervc/volume/manager/__init__.py | 9 + .../powervc/volume/manager/constants.py | 61 + .../powervc/volume/manager/manager.py | 1316 ++++++ cinder-powervc/run_tests.sh | 192 + cinder-powervc/test/__init__.py | 0 cinder-powervc/test/cinderclienttest.py | 30 + cinder-powervc/test/fake_volume_type.py | 51 + cinder-powervc/test/powervc/__init__.py | 0 .../test/powervc/volume/__init__.py | 0 .../test/powervc/volume/driver/__init__.py | 0 .../volume/driver/test_powervc_cinder.py | 225 + .../test/powervc/volume/manager/__init__.py | 0 .../powervc/volume/manager/test_manager.py | 121 + common-powervc/.project | 24 + common-powervc/.pydevproject | 10 + common-powervc/etc/powervc.conf | 232 + .../logrotate.d/openstack-powervc-driver | 10 + common-powervc/powervc/__init__.py | 9 + common-powervc/powervc/common/__init__.py | 9 + .../powervc/common/client/__init__.py | 9 + .../powervc/common/client/config.py | 42 + .../powervc/common/client/delegate.py | 83 + .../common/client/extensions/__init__.py | 9 + .../powervc/common/client/extensions/base.py | 16 + .../common/client/extensions/cinder.py | 200 + .../common/client/extensions/glance.py | 57 + .../powervc/common/client/extensions/nova.py | 489 +++ .../powervc/common/client/factory.py | 64 + .../powervc/common/client/patch/__init__.py | 9 + .../powervc/common/client/patch/cinder.py | 27 + .../powervc/common/client/patch/glance.py | 117 + .../powervc/common/client/patch/neutron.py | 25 + .../powervc/common/client/patch/nova.py | 24 + .../powervc/common/client/service.py | 422 ++ common-powervc/powervc/common/config.py | 107 + common-powervc/powervc/common/constants.py | 82 + common-powervc/powervc/common/exception.py | 73 + common-powervc/powervc/common/gettextutils.py | 17 + common-powervc/powervc/common/messaging.py | 499 +++ common-powervc/powervc/common/netutils.py | 115 + common-powervc/powervc/common/utils.py | 829 ++++ common-powervc/run_tests.sh | 192 + common-powervc/test/__init__.py | 9 + common-powervc/test/common/__init__.py | 9 + common-powervc/test/common/client/__init__.py | 9 + .../test/common/client/test_cinder.py | 459 ++ .../test/common/client/test_delegate.py | 53 + .../test/common/client/test_glance.py | 177 + .../test/common/client/test_nova.py | 561 +++ .../test/common/powervc_test_1.conf | 228 + .../test/common/powervc_test_2.conf | 228 + common-powervc/test/common/test_config.py | 62 + common-powervc/test/common/test_messaging.py | 64 + common-powervc/test/common/test_netutils.py | 45 + common-powervc/test/common/test_utils.py | 1045 +++++ glance-powervc/.project | 19 + glance-powervc/.pydevproject | 11 + glance-powervc/bin/glance-powervc | 62 + glance-powervc/init/openstack-glance-powervc | 102 + glance-powervc/powervc/__init__.py | 9 + glance-powervc/powervc/glance/__init__.py | 9 + .../powervc/glance/common/__init__.py | 9 + .../powervc/glance/common/config.py | 45 + .../powervc/glance/common/constants.py | 99 + .../powervc/glance/manager/__init__.py | 9 + .../powervc/glance/manager/manager.py | 3815 +++++++++++++++++ glance-powervc/run_tests.sh | 192 + glance-powervc/test/__init__.py | 9 + .../test/test_glance_client.py.fails | 55 + glance-powervc/test/test_messaging.py.fails | 254 ++ neutron-powervc/.project | 21 + neutron-powervc/.pydevproject | 10 + neutron-powervc/bin/neutron-powervc | 120 + neutron-powervc/bin/neutron-powervc-agent | 53 + neutron-powervc/etc/powervc-neutron.conf | 58 + .../init/openstack-neutron-powervc | 104 + neutron-powervc/powervc/__init__.py | 9 + neutron-powervc/powervc/neutron/__init__.py | 9 + .../powervc/neutron/agent/__init__.py | 9 + .../neutron/agent/neutron_powervc_agent.py | 1422 ++++++ .../powervc/neutron/api/__init__.py | 9 + .../powervc/neutron/api/client_rpc.py | 117 + .../powervc/neutron/api/powervc_rpc.py | 115 + .../powervc/neutron/client/__init__.py | 9 + .../neutron/client/local_os_bindings.py | 253 ++ .../neutron/client/neutron_client_bindings.py | 328 ++ .../neutron/client/powervc_bindings.py | 145 + .../powervc/neutron/common/__init__.py | 9 + .../powervc/neutron/common/constants.py | 107 + .../powervc/neutron/common/utils.py | 278 ++ .../powervc/neutron/db/__init__.py | 9 + .../powervc/neutron/db/powervc_db_v2.py | 343 ++ .../powervc/neutron/db/powervc_models_v2.py | 41 + neutron-powervc/run_tests.sh | 192 + neutron-powervc/test/__init__.py | 9 + neutron-powervc/test/fake_os_network.py | 33 + neutron-powervc/test/fake_powervc_network.py | 53 + neutron-powervc/test/rpc_client.py | 82 + neutron-powervc/test/rpc_listener.py | 93 + neutron-powervc/test/test_PVCRpcCallbacks.py | 73 + .../test/test_neutron_powervc_agent.py | 132 + neutron-powervc/test/test_powervc_db_v2.py | 250 ++ neutron-powervc/test/test_utils.py | 73 + nova-powervc/.project | 21 + nova-powervc/.pydevproject | 7 + nova-powervc/bin/nova-powervc | 26 + nova-powervc/init/openstack-nova-powervc | 103 + nova-powervc/pep8.txt | 0 nova-powervc/powervc/__init__.py | 0 nova-powervc/powervc/nova/__init__.py | 0 nova-powervc/powervc/nova/cmd/__init__.py | 29 + nova-powervc/powervc/nova/cmd/compute.py | 32 + nova-powervc/powervc/nova/common/__init__.py | 9 + nova-powervc/powervc/nova/common/config.py | 56 + nova-powervc/powervc/nova/common/exception.py | 19 + nova-powervc/powervc/nova/driver/__init__.py | 0 .../powervc/nova/driver/compute/__init__.py | 0 .../powervc/nova/driver/compute/computes.py | 248 ++ .../powervc/nova/driver/compute/constants.py | 71 + .../powervc/nova/driver/compute/manager.py | 1843 ++++++++ .../nova/driver/compute/task_states.py | 16 + .../powervc/nova/driver/virt/__init__.py | 0 .../nova/driver/virt/powervc/__init__.py | 9 + .../nova/driver/virt/powervc/driver.py | 1408 ++++++ .../nova/driver/virt/powervc/pvc_vm_states.py | 17 + .../nova/driver/virt/powervc/rpcapi.py | 71 + .../nova/driver/virt/powervc/service.py | 1314 ++++++ .../nova/driver/virt/powervc/sync/__init__.py | 0 .../driver/virt/powervc/sync/flavorsync.py | 200 + .../powervc/nova/extension/__init__.py | 9 + .../nova/extension/extended_powervm.py | 146 + nova-powervc/powervc/utils.py | 122 + nova-powervc/run_tests.sh | 192 + nova-powervc/test/__init__.py | 9 + nova-powervc/test/compute/__init__.py | 9 + .../test/compute/test_sync_instance.py | 122 + nova-powervc/test/fake_ctx.py | 30 + nova-powervc/test/fake_os_flavor.py | 51 + nova-powervc/test/fake_os_image.py | 48 + nova-powervc/test/fake_os_instance.py | 104 + nova-powervc/test/fake_pvc_flavor.py | 44 + nova-powervc/test/fake_pvc_image.py | 56 + nova-powervc/test/fake_pvc_instance.py | 119 + nova-powervc/test/nova/__init__.py | 9 + nova-powervc/test/nova/driver/__init__.py | 9 + .../test/nova/driver/virt/__init__.py | 9 + .../test/nova/driver/virt/powervc/__init__.py | 9 + .../nova/driver/virt/powervc/sync/__init__.py | 9 + .../virt/powervc/sync/testflavorsync.py | 83 + .../nova/driver/virt/powervc/test_driver.py | 709 +++ .../nova/driver/virt/powervc/test_startvm.py | 61 + .../nova/driver/virt/powervc/test_stopvm.py | 63 + nova-powervc/test/sample_pvc_instance.json | 1 + nova-powervc/test/test_utils.py | 57 + 163 files changed, 26406 insertions(+) create mode 100644 cinder-powervc/.project create mode 100644 cinder-powervc/.pydevproject create mode 100644 cinder-powervc/bin/cinder-powervc create mode 100644 cinder-powervc/init/openstack-cinder-powervc create mode 100644 cinder-powervc/powervc/__init__.py create mode 100644 cinder-powervc/powervc/volume/__init__.py create mode 100644 cinder-powervc/powervc/volume/driver/__init__.py create mode 100644 cinder-powervc/powervc/volume/driver/powervc.py create mode 100644 cinder-powervc/powervc/volume/driver/service.py create mode 100644 cinder-powervc/powervc/volume/manager/__init__.py create mode 100644 cinder-powervc/powervc/volume/manager/constants.py create mode 100644 cinder-powervc/powervc/volume/manager/manager.py create mode 100755 cinder-powervc/run_tests.sh create mode 100644 cinder-powervc/test/__init__.py create mode 100644 cinder-powervc/test/cinderclienttest.py create mode 100644 cinder-powervc/test/fake_volume_type.py create mode 100644 cinder-powervc/test/powervc/__init__.py create mode 100644 cinder-powervc/test/powervc/volume/__init__.py create mode 100644 cinder-powervc/test/powervc/volume/driver/__init__.py create mode 100644 cinder-powervc/test/powervc/volume/driver/test_powervc_cinder.py create mode 100644 cinder-powervc/test/powervc/volume/manager/__init__.py create mode 100644 cinder-powervc/test/powervc/volume/manager/test_manager.py create mode 100644 common-powervc/.project create mode 100644 common-powervc/.pydevproject create mode 100644 common-powervc/etc/powervc.conf create mode 100644 common-powervc/logrotate.d/openstack-powervc-driver create mode 100644 common-powervc/powervc/__init__.py create mode 100644 common-powervc/powervc/common/__init__.py create mode 100644 common-powervc/powervc/common/client/__init__.py create mode 100644 common-powervc/powervc/common/client/config.py create mode 100644 common-powervc/powervc/common/client/delegate.py create mode 100644 common-powervc/powervc/common/client/extensions/__init__.py create mode 100644 common-powervc/powervc/common/client/extensions/base.py create mode 100644 common-powervc/powervc/common/client/extensions/cinder.py create mode 100644 common-powervc/powervc/common/client/extensions/glance.py create mode 100644 common-powervc/powervc/common/client/extensions/nova.py create mode 100644 common-powervc/powervc/common/client/factory.py create mode 100644 common-powervc/powervc/common/client/patch/__init__.py create mode 100644 common-powervc/powervc/common/client/patch/cinder.py create mode 100644 common-powervc/powervc/common/client/patch/glance.py create mode 100644 common-powervc/powervc/common/client/patch/neutron.py create mode 100644 common-powervc/powervc/common/client/patch/nova.py create mode 100644 common-powervc/powervc/common/client/service.py create mode 100644 common-powervc/powervc/common/config.py create mode 100644 common-powervc/powervc/common/constants.py create mode 100644 common-powervc/powervc/common/exception.py create mode 100644 common-powervc/powervc/common/gettextutils.py create mode 100644 common-powervc/powervc/common/messaging.py create mode 100644 common-powervc/powervc/common/netutils.py create mode 100644 common-powervc/powervc/common/utils.py create mode 100755 common-powervc/run_tests.sh create mode 100644 common-powervc/test/__init__.py create mode 100644 common-powervc/test/common/__init__.py create mode 100644 common-powervc/test/common/client/__init__.py create mode 100644 common-powervc/test/common/client/test_cinder.py create mode 100644 common-powervc/test/common/client/test_delegate.py create mode 100644 common-powervc/test/common/client/test_glance.py create mode 100644 common-powervc/test/common/client/test_nova.py create mode 100644 common-powervc/test/common/powervc_test_1.conf create mode 100644 common-powervc/test/common/powervc_test_2.conf create mode 100644 common-powervc/test/common/test_config.py create mode 100644 common-powervc/test/common/test_messaging.py create mode 100644 common-powervc/test/common/test_netutils.py create mode 100644 common-powervc/test/common/test_utils.py create mode 100644 glance-powervc/.project create mode 100644 glance-powervc/.pydevproject create mode 100644 glance-powervc/bin/glance-powervc create mode 100644 glance-powervc/init/openstack-glance-powervc create mode 100644 glance-powervc/powervc/__init__.py create mode 100644 glance-powervc/powervc/glance/__init__.py create mode 100644 glance-powervc/powervc/glance/common/__init__.py create mode 100644 glance-powervc/powervc/glance/common/config.py create mode 100644 glance-powervc/powervc/glance/common/constants.py create mode 100644 glance-powervc/powervc/glance/manager/__init__.py create mode 100644 glance-powervc/powervc/glance/manager/manager.py create mode 100755 glance-powervc/run_tests.sh create mode 100644 glance-powervc/test/__init__.py create mode 100644 glance-powervc/test/test_glance_client.py.fails create mode 100644 glance-powervc/test/test_messaging.py.fails create mode 100644 neutron-powervc/.project create mode 100644 neutron-powervc/.pydevproject create mode 100755 neutron-powervc/bin/neutron-powervc create mode 100755 neutron-powervc/bin/neutron-powervc-agent create mode 100644 neutron-powervc/etc/powervc-neutron.conf create mode 100644 neutron-powervc/init/openstack-neutron-powervc create mode 100644 neutron-powervc/powervc/__init__.py create mode 100644 neutron-powervc/powervc/neutron/__init__.py create mode 100644 neutron-powervc/powervc/neutron/agent/__init__.py create mode 100644 neutron-powervc/powervc/neutron/agent/neutron_powervc_agent.py create mode 100644 neutron-powervc/powervc/neutron/api/__init__.py create mode 100644 neutron-powervc/powervc/neutron/api/client_rpc.py create mode 100644 neutron-powervc/powervc/neutron/api/powervc_rpc.py create mode 100644 neutron-powervc/powervc/neutron/client/__init__.py create mode 100644 neutron-powervc/powervc/neutron/client/local_os_bindings.py create mode 100644 neutron-powervc/powervc/neutron/client/neutron_client_bindings.py create mode 100644 neutron-powervc/powervc/neutron/client/powervc_bindings.py create mode 100644 neutron-powervc/powervc/neutron/common/__init__.py create mode 100644 neutron-powervc/powervc/neutron/common/constants.py create mode 100644 neutron-powervc/powervc/neutron/common/utils.py create mode 100644 neutron-powervc/powervc/neutron/db/__init__.py create mode 100644 neutron-powervc/powervc/neutron/db/powervc_db_v2.py create mode 100644 neutron-powervc/powervc/neutron/db/powervc_models_v2.py create mode 100755 neutron-powervc/run_tests.sh create mode 100644 neutron-powervc/test/__init__.py create mode 100644 neutron-powervc/test/fake_os_network.py create mode 100644 neutron-powervc/test/fake_powervc_network.py create mode 100755 neutron-powervc/test/rpc_client.py create mode 100755 neutron-powervc/test/rpc_listener.py create mode 100644 neutron-powervc/test/test_PVCRpcCallbacks.py create mode 100644 neutron-powervc/test/test_neutron_powervc_agent.py create mode 100644 neutron-powervc/test/test_powervc_db_v2.py create mode 100644 neutron-powervc/test/test_utils.py create mode 100644 nova-powervc/.project create mode 100644 nova-powervc/.pydevproject create mode 100644 nova-powervc/bin/nova-powervc create mode 100644 nova-powervc/init/openstack-nova-powervc create mode 100644 nova-powervc/pep8.txt create mode 100644 nova-powervc/powervc/__init__.py create mode 100644 nova-powervc/powervc/nova/__init__.py create mode 100644 nova-powervc/powervc/nova/cmd/__init__.py create mode 100644 nova-powervc/powervc/nova/cmd/compute.py create mode 100644 nova-powervc/powervc/nova/common/__init__.py create mode 100644 nova-powervc/powervc/nova/common/config.py create mode 100644 nova-powervc/powervc/nova/common/exception.py create mode 100644 nova-powervc/powervc/nova/driver/__init__.py create mode 100644 nova-powervc/powervc/nova/driver/compute/__init__.py create mode 100644 nova-powervc/powervc/nova/driver/compute/computes.py create mode 100644 nova-powervc/powervc/nova/driver/compute/constants.py create mode 100644 nova-powervc/powervc/nova/driver/compute/manager.py create mode 100644 nova-powervc/powervc/nova/driver/compute/task_states.py create mode 100644 nova-powervc/powervc/nova/driver/virt/__init__.py create mode 100644 nova-powervc/powervc/nova/driver/virt/powervc/__init__.py create mode 100644 nova-powervc/powervc/nova/driver/virt/powervc/driver.py create mode 100644 nova-powervc/powervc/nova/driver/virt/powervc/pvc_vm_states.py create mode 100644 nova-powervc/powervc/nova/driver/virt/powervc/rpcapi.py create mode 100644 nova-powervc/powervc/nova/driver/virt/powervc/service.py create mode 100644 nova-powervc/powervc/nova/driver/virt/powervc/sync/__init__.py create mode 100644 nova-powervc/powervc/nova/driver/virt/powervc/sync/flavorsync.py create mode 100644 nova-powervc/powervc/nova/extension/__init__.py create mode 100644 nova-powervc/powervc/nova/extension/extended_powervm.py create mode 100644 nova-powervc/powervc/utils.py create mode 100755 nova-powervc/run_tests.sh create mode 100644 nova-powervc/test/__init__.py create mode 100644 nova-powervc/test/compute/__init__.py create mode 100644 nova-powervc/test/compute/test_sync_instance.py create mode 100644 nova-powervc/test/fake_ctx.py create mode 100644 nova-powervc/test/fake_os_flavor.py create mode 100644 nova-powervc/test/fake_os_image.py create mode 100644 nova-powervc/test/fake_os_instance.py create mode 100644 nova-powervc/test/fake_pvc_flavor.py create mode 100644 nova-powervc/test/fake_pvc_image.py create mode 100644 nova-powervc/test/fake_pvc_instance.py create mode 100644 nova-powervc/test/nova/__init__.py create mode 100644 nova-powervc/test/nova/driver/__init__.py create mode 100644 nova-powervc/test/nova/driver/virt/__init__.py create mode 100644 nova-powervc/test/nova/driver/virt/powervc/__init__.py create mode 100644 nova-powervc/test/nova/driver/virt/powervc/sync/__init__.py create mode 100644 nova-powervc/test/nova/driver/virt/powervc/sync/testflavorsync.py create mode 100644 nova-powervc/test/nova/driver/virt/powervc/test_driver.py create mode 100644 nova-powervc/test/nova/driver/virt/powervc/test_startvm.py create mode 100644 nova-powervc/test/nova/driver/virt/powervc/test_stopvm.py create mode 100644 nova-powervc/test/sample_pvc_instance.json create mode 100644 nova-powervc/test/test_utils.py diff --git a/cinder-powervc/.project b/cinder-powervc/.project new file mode 100644 index 0000000..9526b0f --- /dev/null +++ b/cinder-powervc/.project @@ -0,0 +1,21 @@ + + + cinder-powervc + + + cinder + cinder-client + common-powervc + oslo + + + + org.python.pydev.PyDevBuilder + + + + + + org.python.pydev.pythonNature + + diff --git a/cinder-powervc/.pydevproject b/cinder-powervc/.pydevproject new file mode 100644 index 0000000..4d328ce --- /dev/null +++ b/cinder-powervc/.pydevproject @@ -0,0 +1,10 @@ + + + + + +/cinder-powervc + +python 2.7 +Default + diff --git a/cinder-powervc/bin/cinder-powervc b/cinder-powervc/bin/cinder-powervc new file mode 100644 index 0000000..244320a --- /dev/null +++ b/cinder-powervc/bin/cinder-powervc @@ -0,0 +1,63 @@ +#!/usr/bin/env python +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +"""Starter script for the PowerVC cinder-volume Service.""" + +import os +import sys +import eventlet +import traceback + +# If ../powervc/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python. +POSSIBLE_TOPDIR = os.path.normpath(os.path.join( + os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) + +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'powervc', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +from cinder.openstack.common import gettextutils + +# TODO RYKAL +# This should go in the base __init__ folder I think +gettextutils.install('cinder') + +from cinder import utils +from cinder.openstack.common import log as logging +from cinder.openstack.common import service +from cinder.common import config as cinder_config +from powervc.common import config +# NOTE: parse config before import manager +config.parse_power_config(sys.argv, 'cinder') + +from powervc.volume.manager import manager + +eventlet.patcher.monkey_patch(os=False, socket=True, time=True) + +logging.setup('powervc') + + +LOG = logging.getLogger(__name__) + +if __name__ == '__main__': + try: + logging.setup('powervc') + utils.monkey_patch() + LOG.info(_('Launching PowerVC Driver StorageManager service...')) + launcher = service.ServiceLauncher() + launcher.launch_service(manager.PowerVCCinderManager()) + launcher.wait() + LOG.info(_('PowerVC Driver StorageManager service ended')) + except Exception: + traceback.print_exc() + raise diff --git a/cinder-powervc/init/openstack-cinder-powervc b/cinder-powervc/init/openstack-cinder-powervc new file mode 100644 index 0000000..0c2a131 --- /dev/null +++ b/cinder-powervc/init/openstack-cinder-powervc @@ -0,0 +1,103 @@ +#!/bin/sh +# +# openstack-cinder-powervc OpenStack PowerVC Cinder Manager +# +# chkconfig: - 98 02 +# description: Provides PowerVC manage-to support. + +### BEGIN INIT INFO +# Provides: +# Required-Start: $remote_fs $network $syslog +# Required-Stop: $remote_fs $syslog +# Default-Stop: 0 1 6 +# Short-Description: OpenStack PowerVC Cinder Manager +# Description: +### END INIT INFO + +. /etc/rc.d/init.d/functions + +suffix=powervc +prog=openstack-cinder-powervc +exec="/opt/ibm/openstack/powervc-driver/bin/cinder-$suffix" +config="/etc/powervc/powervc.conf" +cinderconf="/etc/cinder/cinder.conf" +pidfile="/var/run/$suffix/cinder-$suffix.pid" +logfile="/var/log/$suffix/cinder-$suffix.log" + +[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog + +lockfile=/var/lock/subsys/$prog + +start() { + [ -x $exec ] || exit 5 + [ -f $config ] || exit 6 + echo -n $"Starting $prog: " + daemon --user powervc --pidfile $pidfile "$exec --config-file $config --config-file $cinderconf --logfile $logfile &>/dev/null & echo \$! > $pidfile" + retval=$? + echo + [ $retval -eq 0 ] && touch $lockfile + return $retval +} + +stop() { + echo -n $"Stopping $prog: " + killproc -p $pidfile $prog + retval=$? + echo + [ $retval -eq 0 ] && rm -f $lockfile + return $retval +} + +restart() { + stop + start +} + +reload() { + restart +} + +force_reload() { + restart +} + +rh_status() { + status -p $pidfile $prog +} + +rh_status_q() { + rh_status >/dev/null 2>&1 +} + + +case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart) + $1 + ;; + reload) + rh_status_q || exit 7 + $1 + ;; + force-reload) + force_reload + ;; + status) + rh_status + ;; + condrestart|try-restart) + rh_status_q || exit 0 + restart + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac +exit $? diff --git a/cinder-powervc/powervc/__init__.py b/cinder-powervc/powervc/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/cinder-powervc/powervc/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/cinder-powervc/powervc/volume/__init__.py b/cinder-powervc/powervc/volume/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cinder-powervc/powervc/volume/driver/__init__.py b/cinder-powervc/powervc/volume/driver/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/cinder-powervc/powervc/volume/driver/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/cinder-powervc/powervc/volume/driver/powervc.py b/cinder-powervc/powervc/volume/driver/powervc.py new file mode 100644 index 0000000..59f53b6 --- /dev/null +++ b/cinder-powervc/powervc/volume/driver/powervc.py @@ -0,0 +1,310 @@ +from __future__ import absolute_import +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" +import logging +import sys + +from cinder import exception +from cinder.openstack.common import log as cinderLogging +from cinder.volume.driver import VolumeDriver +from cinderclient.exceptions import NotFound +from oslo.config import cfg +from powervc.common import config +from powervc.common import constants as common_constants +from powervc.common.gettextutils import _ +from powervc.volume.manager import constants +from powervc.volume.driver import service + +volume_driver_opts = [ + + # Ignore delete errors so an exception is not thrown during a + # delete. When set to true, this allows the volume to be deleted + # on the hosting OS even if an exception occurs. When set to false, + # exceptions during delete prevent the volume from being deleted + # on the hosting OS. + cfg.BoolOpt('volume_driver_ignore_delete_error', default=False) +] + +CONF = config.CONF +CONF.register_opts(volume_driver_opts, group='powervc') + +LOG = cinderLogging.getLogger(__name__) + + +def _load_power_config(argv): + """ + Loads the powervc config. + """ + # Cinder is typically started with the --config-file option. + # This prevents the default config files from loading since + # the olso config code will only load those + # config files as specified on the command line. + # If the cinder is started with the + # --config-file option then append our powervc.conf file to + # the command line so it gets loaded as well. + for arg in argv: + if arg == '--config-file' or arg.startswith('--config-file='): + argv[len(argv):] = ["--config-file"] + \ + [cfg.find_config_files(project='powervc', + prog='powervc')[0]] + break + + config.parse_power_config(argv, 'cinder') + +_load_power_config(sys.argv) + +# must load powervc config before importing factory when +# called with import utils for a driver +from powervc.common.client import factory + + +class PowerVCDriver(VolumeDriver): + + """ + Implements the cinder volume driver for powerVC + """ + + def __init__(self, *args, **kwargs): + CONF.log_opt_values(LOG, logging.INFO) + self._service = service.PowerVCService() + if not service.PowerVCService._client: + service.PowerVCService._client = factory.POWERVC.new_client(str( + common_constants.SERVICE_TYPES.volume)) + + def check_for_setup_error(self): + """ + Checks for setup errors. Nothing to do for powervc. + """ + pass + + def initialize_connection(self, volume, connector): + """ + Allow connection to connector and return connection info. + In the PowerVC cinder driver, it does not need to be implemented. + """ + LOG.debug("Enter - initialize_connection") + return {'driver_volume_type': '', 'data': {}} + LOG.debug("Exit - initialize_connection") + + def validate_connector(self, connector): + """ + Fail if connector doesn't contain all the data needed by driver. + In the PowerVC cinder driver, it does not need to be implemented. + """ + return True + + def terminate_connection(self, volume_ref, connector, force): + """Do nothing since connection is not used""" + pass + + def create_export(self, context, volume): + """ + Exports the volume. Nothing to do for powervc + """ + pass + + def accept_transfer(self, context, volume_ref, new_user, new_project): + """ + Accept a volume that has been offered for transfer. + Nothing to do for powervc + """ + pass + + def create_cloned_volume(self, volume_ref, srcvol_ref): + """ + Clone a volume from an existing volume. + Currently not supported by powervc. + Add stub to pass tempest. + """ + pass + + def copy_image_to_volume(self, context, volume_ref, image_service, + image_id): + """ + Copy a glance image to a volume. + Currently not supported by powervc. + Add stub to pass tempest. + """ + pass + + def copy_volume_to_image(self, context, volume, image_service, image_meta): + """ + Upload an exsiting volume into powervc as a glance image + Currently not supported by powervc. + Add stub to pass tempest. + """ + pass + + def create_snapshot(self, snapshot_ref): + """ + Create a snapshot. + Currently not supported by powervc. + Add stub to pass tempest. + """ + pass + + def delete_snapshot(self, snapshot_ref): + """ + Delete a snapshot. + Currently not supported by powervc. + Add stub to pass tempest. + """ + pass + + def create_volume_from_snapshot(self, volume, snapshot_ref): + """ + Create a volume from the snapshot. + Currently not supported by powervc. + Add stub to pass tempest. + """ + pass + + def extend_volume(self, volume, new_size): + """ + Extend a volume size. + Currently not supported by powervc. + Add stub to pass tempest. + """ + pass + + def create_volume(self, volume): + """ + Creates a volume with the specified volume attributes + + :returns: a dictionary of updates to the volume db, for example + adding metadata + """ + LOG.info(_("Creating volume with volume: %s."), volume) + size = getattr(volume, 'size', None) + display_name = getattr(volume, 'display_name', None) + display_description = getattr(volume, 'display_description', None) + volume_type_obj = getattr(volume, 'volume_type', None) + metadatas = getattr(volume, 'volume_metadata', None) + meta = {} + if metadatas: + # Use map() to get a list of 'key', 'value' tuple + # dict() can convert a list of tuple to dict obj + meta = dict(map(lambda m: (getattr(m, 'key'), + getattr(m, 'value')), metadatas)) + + if (size is None): + raise exception.InvalidVolume(reason='size is None') + LOG.info(_("Creating volume %s of size %sG."), + self._get_vol_name(volume), + size) + + volume_data_updates = self._service.create_volume( + local_volume_id=volume.id, + size=size, + display_name=display_name, + display_description=display_description, + metadata=meta, + volume_type=getattr(volume_type_obj, 'id', + None)) + + return volume_data_updates + + def delete_volume(self, volume): + """ + Deletes the specfied volume from powervc + """ + try: + LOG.info(_("Deleting volume %s."), self._get_vol_name(volume)) + + pvc_volume_id = None + for metaDataItem in volume.volume_metadata: + if metaDataItem.key == constants.LOCAL_PVC_PREFIX + 'id': + pvc_volume_id = metaDataItem.value + break + + if pvc_volume_id is not None: + self._service.delete_volume(pvc_volume_id) + else: + LOG.warning(_("Volume metadata does not " + "contain a powervc volume identifier.")) + + except NotFound: + LOG.debug(_("Volume id %s was already deleted on powervc"), + pvc_volume_id) + LOG.info(_("Volume %s deleted."), self._get_vol_name(volume)) + except Exception as e: + if CONF.powervc.volume_driver_ignore_delete_error: + LOG.error(_("Volume %s deleted, however the following " + "error occurred " + "which prevented the backing volume in PowerVC " + "from being deleted: %s"), + self._get_vol_name(volume), + str(e)) + else: + raise + + def ensure_export(self, context, volume): + """ + Makes sure the volume is exported. Nothing to do for powervc + """ + pass + + def remove_export(self, context, volume): + """ + Removes the export. Nothing to do for powervc + """ + pass + + def get_volume_stats(self, refresh=False): + """ + Gets the volume statistics for this driver. Cinder periodically calls + this to get the latest volume stats. The stats are stored in the + instance attribute called _stats + """ + if refresh: + self._update_volume_status() + + return self._stats + + def _update_volume_status(self): + """ + Retrieve volumes stats info from powervc. + For now just make something up + """ + LOG.debug(_("Getting volume stats from powervc")) + + # get accessible storage providers list + sp_list = self._list_storage_providers() + free_capacity_gb = 0 + total_capacity_gb = 0 + for sp in sp_list: + free_capacity_gb += getattr(sp, 'free_capacity_gb', 0) + total_capacity_gb += getattr(sp, 'total_capacity_gb', 0) + + data = {} + data["volume_backend_name"] = constants.POWERVC_VOLUME_BACKEND + data["vendor_name"] = 'IBM' + data["driver_version"] = 1.0 + data["storage_protocol"] = 'Openstack' + data['total_capacity_gb'] = total_capacity_gb + data['free_capacity_gb'] = free_capacity_gb + data['reserved_percentage'] = 0 + data['QoS_support'] = False + + self._stats = data + LOG.debug(self._stats) + + def _list_storage_providers(self): + return self._service.list_storage_providers() + + def _get_vol_name(self, volume): + """ + Returns the name of the volume or its id + """ + name = getattr(volume, 'display_name', None) + if name: + return name + else: + return volume.id diff --git a/cinder-powervc/powervc/volume/driver/service.py b/cinder-powervc/powervc/volume/driver/service.py new file mode 100644 index 0000000..05d659d --- /dev/null +++ b/cinder-powervc/powervc/volume/driver/service.py @@ -0,0 +1,280 @@ +from __future__ import absolute_import +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import httplib + +from cinderclient import exceptions +from cinder.openstack.common import log as logging +from powervc.common import constants as common_constants +from powervc.common.gettextutils import _ +from powervc.volume.manager import constants +from cinder import exception +from cinder import db +from cinder import context +from cinder.openstack.common import loopingcall + +LOG = logging.getLogger(__name__) + + +class PowerVCService(object): + + """A service that exposes PowerVC functionality. + The services provided here are called by the driver. + The services leverage the nova client to interface to the PowerVC. + This design keeps the driver and client interface clean and simple + and provides a workspace for any data manipulation and utility work + that may need to be done. + """ + _client = None + + def __init__(self, pvc_client=None): + """Initializer.""" + from powervc.common.client import factory + if(PowerVCService._client is None): + PowerVCService._client = \ + factory.POWERVC.new_client( + str(common_constants.SERVICE_TYPES.volume)) + + # Add version checking as required + + def create_volume(self, local_volume_id, size, snapshot_id=None, + source_volid=None, + display_name=None, display_description=None, + volume_type=None, user_id=None, + project_id=None, availability_zone=None, + metadata=None, imageRef=None): + """ + Creates a volume on powervc + """ + + # Use the standard cinderclient to create volume + # TODO Do not pass metadata to PowerVC currently as we don't + # know if this has a conflict with PowerVC design. + pvc_volume = PowerVCService._client.volumes.create(size, + snapshot_id, + source_volid, + display_name, + display_description, + volume_type, + user_id, + project_id, + availability_zone, + {}, + imageRef) + + # update powervc uuid to db immediately to avoid duplicated + # synchronization + additional_volume_data = {} + additional_volume_data['metadata'] = metadata + additional_volume_data['metadata'][constants.LOCAL_PVC_PREFIX + 'id'] \ + = pvc_volume.id + db.volume_update(context.get_admin_context(), + local_volume_id, + additional_volume_data) + LOG.info(_("Volume %s start to create with PVC UUID: %s"), + local_volume_id, pvc_volume.id) + + temp_status = getattr(pvc_volume, 'status', None) + if temp_status == constants.STATUS_CREATING: + LOG.debug(_( + 'wait until created volume status is available or ERROR')) + timer = loopingcall.FixedIntervalLoopingCall( + self._wait_for_state_change, pvc_volume.id, + getattr(pvc_volume, 'status', None), + constants.STATUS_AVAILABLE, + constants.STATUS_CREATING) + + try: + timer.start(interval=10).wait() + # set status to available + additional_volume_data['status'] = \ + constants.STATUS_AVAILABLE + except: + latest_pvc_volume = PowerVCService._client.volumes.get( + pvc_volume.id) + additional_volume_data['status'] = getattr(latest_pvc_volume, + 'status', '') + else: + LOG.debug(_('Not in creating status, just set as powerVC status')) + additional_volume_data['status'] = temp_status + + # return updated volume status information + return additional_volume_data + + def _wait_for_state_change(self, volume_id, original_state, expected_state, + middle_state): + """ + Utility method to wait for a volume to change to the + expected state. + The process of some operation contains three states. + + during the operation. If the operation has no middle state, + it can be set as original state. + """ + volume = None + try: + volume = PowerVCService._client.volumes.get(volume_id) + except exceptions.NotFound: + raise exception.VolumeNotFound('volume not found: %s' % + volume_id) + + if volume.status == expected_state: + LOG.debug( + "Operation %(vm_id)s successfully, " + + "status changed to %(state)s" + % {'vm_id': volume.id, 'state': expected_state}) + raise loopingcall.LoopingCallDone() + if (volume.status != original_state and + volume.status != expected_state and + volume.status != middle_state): + raise exception.InvalidVolume() + + def delete_volume(self, pvc_volume_id): + """ + Deletes the specified powervc volume id from powervc + """ + LOG.debug(_("Deleting pvc volume: %s"), pvc_volume_id) + if not pvc_volume_id: + raise AttributeError(_("Powervc volume identifier must be " + "specified")) + existed_pvc_volume = None + try: + existed_pvc_volume = PowerVCService._client.volumes.get( + pvc_volume_id) + except exceptions.NotFound: + LOG.critical(_("pvc: %s no longer existed in powervc, ignore"), + pvc_volume_id) + raise + + temp_status = getattr(existed_pvc_volume, 'status', None) + if temp_status == constants.STATUS_DELETING: + # Volume in deleting status, do not perform delete operation + # again + LOG.warning( + _("pvc: %s is deleting in powervc, wait for status"), + pvc_volume_id) + else: + # volume available for deleting, perform delete opeartion + PowerVCService._client.volumes.delete(pvc_volume_id) + + LOG.debug(_( + 'wait until created volume deleted or status is ERROR')) + timer = loopingcall.FixedIntervalLoopingCall( + self._wait_for_state_change, existed_pvc_volume.id, + getattr(existed_pvc_volume, 'status', None), + '', + constants.STATUS_DELETING) + + try: + timer.start(interval=10).wait() + except exception.VolumeNotFound: + # deleted complete + LOG.info(_("pvc: %s deleted successfully"), + pvc_volume_id) + except exception.InvalidVolume: + LOG.critical(_("pvc: %s deleted failed, "), + pvc_volume_id) + # when delete failed raise exception + raise exception.CinderException( + _('Volume deletion failed for id: %s'), + pvc_volume_id) + + def _validate_response(self, response): + """ + Validates an HTTP response to a REST API request made by this service. + + The method will simply return if the HTTP error code indicates success + (i.e. between 200 and 300). + Any other errors, this method will raise the exception. + Note: Appropriate exceptions to be added... + Nova client throws an exception for 404 + + :param response: the HTTP response to validate + """ + if response is None: + return + httpResponse = response[0] + # Any non-successful response >399 is an error + if httpResponse.status_code >= httplib.BAD_REQUEST: + LOG.critical(_("Service: got this response: %s") + % httpResponse) + LOG.debug("Service: got this response: %s" + % httpResponse) + raise exceptions.BadRequest(httpResponse) + + def list_volume_types(self): + return PowerVCService._client.volume_types.list() + + def get_volume_type(self, vol_type_id): + return PowerVCService._client.volume_types.get(vol_type_id) + + def get_volume_type_by_name(self, volume_type_name): + pvc_volume_type = None + + if volume_type_name is None or PowerVCService._client is None: + return pvc_volume_type + + pvc_volume_type_list = self.list_volume_types() + + if pvc_volume_type_list is None: + return volume_type_name + + for volume_type in pvc_volume_type_list: + if volume_type_name == volume_type._info["name"]: + pvc_volume_type = volume_type + break + + return pvc_volume_type + + def get_volumes(self): + pvc_volumes = None + + if PowerVCService._client is None: + return pvc_volumes + + pvc_volumes = PowerVCService._client.volumes.list() + + return pvc_volumes + + def get_volume_by_name(self, display_name): + pvc_volume = None + + if display_name is None or PowerVCService._client is None: + return pvc_volume + + pvc_volume_list = self.get_volumes() + if pvc_volume_list is None: + return pvc_volume + + for volume in pvc_volume_list: + if display_name == volume._info["display_name"]: + pvc_volume = volume + break + + return pvc_volume + + def get_volume_by_id(self, volume_id): + pvc_volume = None + + if volume_id is None or PowerVCService._client is None: + return pvc_volume + + try: + pvc_volume = PowerVCService._client.volumes.get(volume_id) + except exceptions.NotFound: + LOG.debug("get_volume_by_id volume %s not found" + % volume_id) + pvc_volume = None + + return pvc_volume + + def list_storage_providers(self): + return PowerVCService._client.storage_providers.list() diff --git a/cinder-powervc/powervc/volume/manager/__init__.py b/cinder-powervc/powervc/volume/manager/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/cinder-powervc/powervc/volume/manager/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/cinder-powervc/powervc/volume/manager/constants.py b/cinder-powervc/powervc/volume/manager/constants.py new file mode 100644 index 0000000..062fa84 --- /dev/null +++ b/cinder-powervc/powervc/volume/manager/constants.py @@ -0,0 +1,61 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" +All constants. +""" +# Instance metadata keys that will store pvc related infor. +# in the local nova DB. +PVC_TENANT = "pvc_tenant" # project in pvc +PVC_SCG = "pvc_scg" # pvc storage connection group +PVC_ID = "pvc_id" # pvc instance uuid + +PPC64 = "ppc64" # Found on the wiki +# The default image for pvc instance if no match found. +DEFAULT_IMG = "SCE Default Image" +DEFAULT_SCG = "storage connection group" + +# Suffix to append to sync event notifications +SYNC_EVENT_SUFFIX = 'sync' + +LOCAL_PVC_VOLUME_TYPE_PREFIX = 'pvc:' + +LOCAL_PVC_PREFIX = 'pvc:' + +# The composite PowerVC storage backend +POWERVC_VOLUME_BACKEND = 'powervc' + +# PowerVC volume & volume type notification events that we listen for +EVENT_VOLUME_TYPE_CREATE = 'volume_type.create' +EVENT_VOLUME_TYPE_DELETE = 'volume_type.delete' +EVENT_VOLUME_TYPE_EXTRA_SPECS_CREATE = 'volume_type_extra_specs.create' +EVENT_VOLUME_TYPE_EXTRA_SPECS_UPDATE = 'volume_type_extra_specs.update' +EVENT_VOLUME_TYPE_EXTRA_SPECS_DELETE = 'volume_type_extra_specs.delete' + +EVENT_VOLUME_CREATE_START = 'volume.create.start' +EVENT_VOLUME_CREATE_END = 'volume.create.end' +EVENT_VOLUME_DELETE_START = 'volume.delete.start' +EVENT_VOLUME_DELETE_END = 'volume.delete.end' +EVENT_VOLUME_UPDATE = 'volume.update' +EVENT_VOLUME_ATTACH_START = 'volume.attach.start' +EVENT_VOLUME_ATTACH_END = 'volume.attach.end' +EVENT_VOLUME_DETACH_START = 'volume.detach.start' +EVENT_VOLUME_DETACH_END = 'volume.detach.end' +EVENT_VOLUME_IMPORT_START = 'volume.import.start' +EVENT_VOLUME_IMPORT_END = 'volume.import.end' + +# PowerVC volume operation status +STATUS_AVAILABLE = 'available' +STATUS_ERROR = 'error' +STATUS_CREATING = 'creating' +STATUS_DELETING = 'deleting' + +#multi-backends configuration option for PowerVCDriver +BACKEND_POWERVCDRIVER = "powervcdriver" diff --git a/cinder-powervc/powervc/volume/manager/manager.py b/cinder-powervc/powervc/volume/manager/manager.py new file mode 100644 index 0000000..a2f10b8 --- /dev/null +++ b/cinder-powervc/powervc/volume/manager/manager.py @@ -0,0 +1,1316 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +# from cinderclient.v1 import client +import cinder.db.sqlalchemy.models +import sys +import logging + +from oslo.config import cfg +from cinder import db +from cinder import context +from cinder import service as taskservice +from cinder.openstack.common import service +from cinder.openstack.common import log +from powervc.common import config +from powervc.common.gettextutils import _ +from powervc.volume.manager import constants +from powervc.common import messaging +from powervc.volume.driver import service as pvcservice +from powervc.common import utils +from powervc.common.client import delegate as ctx_delegate + +CONF = config.CONF +LOG = log.getLogger(__name__) + +volume_sync_opts = [ + cfg.IntOpt('volume_sync_interval', + default=20, + help=_('Volume periodic sync interval specified in ' + 'seconds.')), + cfg.IntOpt('full_volume_sync_frequency', + default=30, + help=_('How many volume sync intervals between full volume ' + 'syncs. Only volumes known to be out of sync are ' + 'synced on the interval except after this many ' + 'intervals when all volumes are synced.')), + cfg.IntOpt('volume_type_sync_interval', + default=20, + help=_('Volume type periodic sync interval specified in ' + 'seconds.')), + cfg.IntOpt('full_volume_type_sync_frequency', + default=30, + help=_('How many volume type sync intervals between full volume' + ' type syncs. Only volumes known to be out of sync are ' + 'synced on the interval except after this many ' + 'intervals when all volumes are synced.')), +] + +CONF.register_opts(volume_sync_opts, group='powervc') + + +class PowerVCCinderManager(service.Service): + + """ + Manages the synchronization of volume types and volumes + TODO + """ + + def __init__(self): + ''' + Constructor + ''' + super(PowerVCCinderManager, self).__init__() + self._load_power_config(sys.argv) + + self._service = pvcservice.PowerVCService() + + ctx = self._get_context() + + self._staging_cache = utils.StagingCache() + + if not utils.get_utils().validate_scgs(): + LOG.error(_('Cinder-powervc service terminated, Invalid Storage' + ' Connectivity Group specified.')) + sys.exit(1) + + # Keep track of whether or not we need to sync all volume types on the + # next volume type sync interval. + self.full_volume_type_sync_required = False + self.full_volume_sync_required = False + self.sync_volume_types = {} + self.sync_volumes = {} + + # Delete volums first! + # It will try delete un-referred volume-types + self._synchronize_volumes(ctx) + self._synchronize_volume_types(ctx) + + # Uncomment line below to start cinder-volume along with cinder-powervc +# self.start_volume_service() + + # Listen for out-of-band PowerVC changes + self._create_powervc_listeners(ctx) + + # Set up periodic polling to sync instances + self._start_periodic_volume_type_sync(ctx) + self._start_periodic_volume_sync(ctx) + + def _get_context(self): + # lazy import factory to avoid connect to env when load manager + from powervc.common.client import factory + keystone = factory.LOCAL.keystone + orig_ctx = context.get_admin_context() + orig_ctx.project_id = keystone.tenant_id + orig_ctx.user_id = keystone.user_id + + return ctx_delegate.context_dynamic_auth_token(orig_ctx, keystone) + + def _load_power_config(self, argv): + """ + Loads the powervc config. + """ + # Cinder is typically started with the --config-file option. + # This prevents the default config files from loading since + # the olso config code will only load those + # config files as specified on the command line. + # If the cinder is started with the + # --config-file option then append our powervc.conf file to + # the command line so it gets loaded as well. + for arg in argv: + if arg == '--config-file' or arg.startswith('--config-file='): + argv[len(argv):] = ["--config-file"] + \ + [cfg.find_config_files(project='powervc', + prog='powervc')[0]] + break + + config.parse_power_config(argv, 'cinder') + CONF.log_opt_values(LOG, logging.INFO) + + def start_volume_service(self): + """ + Creates and starts a cinder-volume service + """ + if CONF.enabled_backends: + for backend in CONF.enabled_backends: + host = "%s@%s" % (CONF.host, backend) + self.volume_service = \ + taskservice.Service.create(host=host, + service_name=backend) + else: + self.volume_service = \ + taskservice.Service.create(binary='cinder-volume') + self.volume_service.start() + + def _create_powervc_listeners(self, ctx): + """ + Listen for out-of-band changes made in PowerVC. + + This method creates the connection to the PowerVC Qpid broker and + sets up handlers so that any changes made directly in PowerVC are + reflected in the local OS. + + :param: ctx The security context + """ + LOG.debug("Enter _create_powervc_listeners method") + # Function to call if we lose the Qpid connection and then get it back + + def reconnect_handler(): + LOG.debug('Re-established connection to Qpid broker, sync all ' + 'volume types on next sync interval') + self.full_volume_type_sync_required = True + + # Create Qpid connection and listener + LOG.debug("Building connection with AMQP server") + conn = messaging.PowerVCConnection(reconnect_handler=reconnect_handler, + context=ctx, + log=logging) + LOG.debug("Creating message listener to linsten PowerVC event") + listener = conn.create_listener('cinder', 'notifications.info') + + # Volume type creation + LOG.debug(_("Register event handler for %s event ") + % constants.EVENT_VOLUME_TYPE_CREATE) + listener.register_handler(constants.EVENT_VOLUME_TYPE_CREATE, + self._handle_powervc_volume_type_create) + + # Volume type deletion + LOG.debug(_("Register event handler for %s event ") + % constants.EVENT_VOLUME_TYPE_DELETE) + listener.register_handler(constants.EVENT_VOLUME_TYPE_DELETE, + self._handle_powervc_volume_type_delete) + + # Volume type extra spec changes + LOG.debug(_("Register event handler for %s event ") + % constants.EVENT_VOLUME_TYPE_EXTRA_SPECS_UPDATE) + listener.register_handler([ + constants.EVENT_VOLUME_TYPE_EXTRA_SPECS_UPDATE], + self._handle_powervc_volume_type_extra_spec_update) + + LOG.debug(_("Register event handler for %s event ") + % constants.EVENT_VOLUME_CREATE_END) + listener.register_handler([constants.EVENT_VOLUME_CREATE_END], + self._handle_powervc_volume_create) + + LOG.debug(_("Register event handler for %s event ") + % constants.EVENT_VOLUME_IMPORT_END) + listener.register_handler([constants.EVENT_VOLUME_IMPORT_END], + self._handle_powervc_volume_create) + + LOG.debug(_("Register event handler for %s event ") + % constants.EVENT_VOLUME_DELETE_END) + listener.register_handler([constants.EVENT_VOLUME_DELETE_END], + self._handle_powervc_volume_delete) + + LOG.debug(_("Register event handler for %s event ") + % constants.EVENT_VOLUME_UPDATE) + listener.register_handler([constants.EVENT_VOLUME_UPDATE], + self._handle_powervc_volume_update) + + LOG.debug(_("Register event handler for %s event ") + % constants.EVENT_VOLUME_ATTACH_END) + listener.register_handler([constants.EVENT_VOLUME_ATTACH_END], + self._handle_powervc_volume_update) + + LOG.debug(_("Register event handler for %s event ") + % constants.EVENT_VOLUME_DETACH_END) + listener.register_handler([constants.EVENT_VOLUME_DETACH_END], + self._handle_powervc_volume_update) + + LOG.debug("Starting to listen...... ") + conn.start() + LOG.debug("Exit _create_powervc_listeners method") + + def _periodic_volume_type_sync(self, context, vol_type_ids=None): + """ + Called to synchronize volume type after initial boot. This does almost + the same thing as the synchronize that happens on boot except this + function will check that the instance states meet certain requirements + before adding, removing, or updating them locally. + + :param: context The security context + :param: instance_ids List of PowerVC volume type IDs to sync + """ + LOG.info(_("Starting volume type synchronization...")) + # Some counters to record instances modified + count_new_vol_types = 0 + count_updated_vol_types = 0 + count_deleted_vol_types = 0 + + # If a list of volume type IDs is passed in then this is a targeted + # sync operation and not a full sync. + is_full_sync = not vol_type_ids + + # If this is a full sync then reset the marked instances map, otherwise + # just remove instances we are about to update. Do this up front so + # that we minimize the likelihood of losing any instances that might + # get marked during the sync operation. + if is_full_sync: + self.sync_vol_types = {} + else: + for vol_type_id in vol_type_ids: + del self.sync_vol_types[vol_type_id] + + # Get both lists from local DB and PowerVC + pvc_vol_types = {} + local_vol_types = {} + if is_full_sync: + pvc_vol_types = self._service.list_volume_types() + local_vol_types = self._get_all_local_pvc_volume_types(context) + else: + pvc_vol_types = [self._service.get_volume_type(x) + for x in vol_type_ids] + + # Sync. from PowerVC to local nova DB, to insert new instances and + # update existing instances. + for index, pvc_vol_type in enumerate(pvc_vol_types): + """ + """ + # If we are syncing a set of given PowerVC volume type IDs then we + # first check if the PowerVC volume type exists. If it doesn't then + # we attempt to delete the local corresponding volume type and move + # on. + if not is_full_sync and pvc_vol_type is None: + matched_vol_types = self._get_local_volume_type_by_pvc_id( + context, vol_type_ids[index]) + for local_vol_type in matched_vol_types: + if self._unregister_volume_types( + context, local_vol_type.get('id')): + count_deleted_vol_types += 1 + continue + + # Convert PowerVC instance object to dictionary + pvc_volume_type = pvc_vol_type.__dict__ + matched_vol_types = self._get_local_volume_type_by_pvc_id( + context, pvc_volume_type.get('id')) + + # If not found locally then try to add the new instance + if len(matched_vol_types) == 0: + if self._insert_pvc_volume_type(context, pvc_volume_type): + count_new_vol_types += 1 + continue + + if len(matched_vol_types) > 1: + LOG.warning('More than one local volume type matches one ' + 'PowerVC volume type: %s' % + (pvc_volume_type.get('id'))) + local_vol_type = matched_vol_types[0] + + # Local instance exists so try to update it + if self._sync_existing_volume_type( + context, local_vol_type, pvc_volume_type): + count_updated_vol_types += 1 + + # Sync. from local nova DB to PowerVC, to remove invalid instances + # that are not in PowerVC anymore. This only happens during a full + # sync of all instances. + for local_vol_type in local_vol_types: + if not self._is_valid_pvc_volume_type(context, + local_vol_types[ + local_vol_type], + pvc_vol_types): + if self._unregister_volume_types( + context, local_vol_types[local_vol_type].get('id')): + count_deleted_vol_types += 1 + + LOG.info(""" + ******************************* + Volume type sync. is complete. + Full sync: %(full_sync)s + [ %(insert)s inserted, + %(update)s updated, + %(delete)s deleted ] + ******************************* + """ % + {'full_sync': is_full_sync, + 'insert': count_new_vol_types, + 'update': count_updated_vol_types, + 'delete': count_deleted_vol_types}) + + def _start_periodic_volume_type_sync(self, context): + """ + Initialize the periodic syncing of instances from PowerVC into the + local OS. The powervc_instance_sync_interval config property determines + how often the sync will occur, and the + powervc_full_instance_sync_frequency config property determines the + number of marked instance sync operations between full instance syncs. + + :param: context The security context + """ + # Enforce some minimum values for the sync interval properties + # TODO: Minimum values should at least be documented + conf_sync = CONF.powervc.volume_type_sync_interval + conf_full_sync = CONF.powervc.full_volume_type_sync_frequency + sync_interval = conf_sync if conf_sync > 10 else 10 + full_sync_frequency = conf_full_sync if conf_full_sync > 2 else 2 + self._volume_type_sync_counter = 0 + + def sync(): + """Called on the volume type sync intervals""" + self._volume_type_sync_counter += 1 + + try: + # Check if it's time to do a full sync + if self.full_volume_type_sync_required or \ + self._volume_type_sync_counter == full_sync_frequency: + self.full_volume_type_sync_required = False + self._volume_type_sync_counter = 0 + LOG.debug('Syncing all volume type on interval') + self._periodic_volume_type_sync(context) + return + + # If there are no marked instances to sync stop here + vol_type_ids = self.sync_volume_types.keys() + if len(vol_type_ids) == 0: + return + + LOG.debug('Syncing marked volume types') + self._periodic_volume_type_sync(context, type_ids=vol_type_ids) + except Exception as e: + LOG.exception(_("Error occurred during volume type " + "synchronization: %s"), str(e)) + LOG.info(_("Volume type synchronization will occur at the " + "next scheduled interval.")) + + self.tg.add_timer(sync_interval, sync) + + def _handle_powervc_volume_type_create(self, context, message): + """ + Handle instance create messages sent from PowerVC. + + :param: context The security context + :param: message The AMQP message sent from OpenStack (dictionary) + """ + LOG.debug("Handling notification: %s" % message.get('event_type')) + + payload = message.get('payload') + vol_type = payload.get('volume_types') + if(vol_type is None): + LOG.warning("Null volume type in volume.create notification") + return + + pvc_vol_type_id = vol_type.get('id') + if(pvc_vol_type_id is None): + LOG.warning("Null volume type id in volume.create notification") + return + + # Check for matching local instance + matched_vol_types = self.\ + _get_local_volume_type_by_pvc_id(context, pvc_vol_type_id) + # If the instance already exists locally then ignore + if len(matched_vol_types) > 0: + LOG.debug('Volume type already exists locally') + return + + # Filter out the vol-type in scg that is not specified in powervc.conf + extra_specs = getattr(vol_type, 'extra_specs', {}) + # condition 1: volume-type has no extra_specs, add + if not extra_specs: + LOG.info(_("No extra_specs in storage template, just add")) + self._insert_pvc_volume_type(context, vol_type) + else: + volume_backend_name = (extra_specs. + get('capabilities:volume_backend_name', '')) + # condition 2: extra_specs has no volume_backend_name, return + if not volume_backend_name: + LOG.info(_('No volume_backend_name specified' + + ' return')) + return + + accessible_storage_providers = utils.get_utils().\ + get_multi_scg_accessible_storage_providers(None, None) + if not accessible_storage_providers: + LOG.info(_("No accessible_storage_providers, return")) + return + + # condition 3: extra_specs's volume_backend_name == + # accessible_storage_provider's storage_hostname, add + for storage_provider in accessible_storage_providers: + storage_hostname = getattr(storage_provider, + 'storage_hostname', '') + if volume_backend_name == storage_hostname: + self._insert_pvc_volume_type(context, vol_type) + + def _handle_powervc_volume_type_delete(self, context, message): + """ + Handle instance delete messages sent from PowerVC. + + :param: context The security context + :param: message The AMQP message sent from OpenStack (dictionary) + """ + LOG.debug("Handling notification: %s" % message.get('event_type')) + + payload = message.get('payload') + vol_type = payload.get('volume_types') + if(vol_type is None): + LOG.warning("Null volume type, ignore volume.create notification") + return + + pvc_vol_type_id = vol_type.get('id') + if(pvc_vol_type_id is None): + LOG.warning("Null volume type id, ignore volume.create") + return + + # Check for matching local instance + matched_vol_types = self.\ + _get_local_volume_type_by_pvc_id(context, pvc_vol_type_id) + # If the instance does not exist then ignore + if len(matched_vol_types) == 0: + LOG.debug('Volume type does not exist locally') + return + # Remove the instance from the local OS + self._unregister_volume_types(context, pvc_vol_type_id) + + def _handle_powervc_volume_type_extra_spec_update(self, context, message): + """ + Handle instance state changes sent from PowerVC. This includes + instance update and all other state changes caused by events like + power on, power off, resize, live migration, and snapshot. + + :param: context The security context + :param: message The AMQP message sent from OpenStack (dictionary) + """ + event_type = message.get('event_type') + LOG.debug("Handling notification: %s" % event_type) + payload = message.get('payload') + pvc_vol_type_id = payload.get('type_id') + if(pvc_vol_type_id is None): + LOG.debug('Null volume type id, ignore extra specs update') + return + + # Get the matching local instance + matched_vol_types = self.\ + _get_local_volume_type_by_pvc_id(context, pvc_vol_type_id) + + # If the instance does not exist locally then ignore + if len(matched_vol_types) == 0: + LOG.debug('Volume type does not exist locally') + # defer the insert to periodical check + return + + # Warn if more than one local instance matches the PowerVC instance + if len(matched_vol_types) > 1: + LOG.warning('More than one volume types in DB ' + 'match one PowerVC instance: %s' % (pvc_vol_type_id)) + # TODO: We should do something about this but scheduling a sync + # won't help since that does nothing to remove duplicate local + # instances. + + # Get the PowerVC instance so we can compare it to the local instance + local_vol_type = matched_vol_types[0] + pvc_volume_type = self._service.get_volume_type(pvc_vol_type_id) + + # Warn if PowerVC instance is not found + if pvc_volume_type is None: + LOG.warning('PowerVC volume type could not be found: %s' % + (pvc_vol_type_id)) + return + + self._sync_existing_volume_type(context, + local_vol_type, + pvc_volume_type.__dict__) + + def _synchronize_volumes(self, context): + """ + Synchronize volumes + """ + local_volumes = self._get_all_local_volumes(context) + pvc_volumes = self._service.get_volumes() + + self._synchronize_volumes_ex(context, local_volumes, pvc_volumes) + + def _delete_unused_volume_types(self, context, + local_volume_types, + pvc_volume_types): + """ + Delete volume-types that not in current powervc + """ + if local_volume_types is None: + local_volume_types = self._get_all_local_pvc_volume_types(context) + if pvc_volume_types is None: + pvc_volume_types = self._service.list_volume_types() + + count_deleted_volume_types = 0 + for local_volume_type in local_volume_types: + if not self._is_valid_pvc_volume_type(context, + local_volume_types[ + local_volume_type], + pvc_volume_types): + # If it is not valid in pvc, also delete form the local. + self._unregister_volume_types(context, + local_volume_types[ + local_volume_type]. + get('id')) + count_deleted_volume_types += 1 + return count_deleted_volume_types + + def _synchronize_volume_types(self, context): + """ + Synchronize volume types + """ + # Some counters to record instances modified. + count_new_volume_types = 0 + count_updated_volume_types = 0 + + local_volume_types = self._get_all_local_pvc_volume_types(context) + pvc_volume_types = self._service.list_volume_types() + + # Sync. from local nova DB ---> PowerVC, + # to remove invalid instances that are not in pvc anymore. + count_deleted_volume_types = ( + self._delete_unused_volume_types(context, + local_volume_types, + pvc_volume_types)) + + # Sync. from PowerVC ---> local nova DB, + # to insert new instances and update existing instances + for volume_type in pvc_volume_types: + # Convert an object to dictionary, + # because some filed names has spaces. + pvc_volume_type = volume_type.__dict__ + matched_volume_types = self.\ + _get_matched_volume_type_by_pvc_id( + local_volume_types, + pvc_volume_type.get('id')) + if len(matched_volume_types) == 0: + # Not found + self._insert_pvc_volume_type(context, pvc_volume_type) + count_new_volume_types += 1 + else: + # Found + if len(matched_volume_types) > 1: + LOG.warning("More than one volume type in DB match " + "one PowerVC volume type: " + + pvc_volume_type.get('id')) + self._sync_existing_volume_type(context, + matched_volume_types[0], + pvc_volume_type) + count_updated_volume_types += 1 + + LOG.info(""" + ******************************* + Initial volume type sync. is complete. + [ %(insert)s inserted, + %(update)s updated, + %(delete)s deleted ] + ******************************* + """ % + {'insert': count_new_volume_types, + 'update': count_updated_volume_types, + 'delete': count_deleted_volume_types}) + + def _get_all_local_pvc_volume_types(self, context): + """ Get all local volume types that are mapped from PowerVC""" + all_types = db.volume_type_get_all(context) + filters = [] + # Filter out non-powervc volume types + for each in all_types: + name = all_types[each]['name'] + if(not name.startswith(constants.LOCAL_PVC_VOLUME_TYPE_PREFIX)): + filters.append(name) + + for name in filters: + del all_types[name] + + return all_types + + def _get_local_volume_type_by_pvc_id(self, context, pvcid): + """ Get a local instance by a PowerVC uuid.""" + local_volume_types = self._get_all_local_pvc_volume_types(context) + return self._get_matched_volume_type_by_pvc_id( + local_volume_types, pvcid) + + def _get_matched_volume_type_by_pvc_id(self, local_volume_types, pvcid): + """ Get a local instance by a PowerVC uuid.""" + matches = [] + for item in local_volume_types: + volume_type_id = local_volume_types[item].get('id') + if(volume_type_id == pvcid): + matches.append(local_volume_types[item]) + return matches + + def _mask_pvc_volume_type_name(self, + pvc_volume_type_name, + storage_backend): + if pvc_volume_type_name is None: + return pvc_volume_type_name + + if storage_backend is None: + storage_backend = '' + + return constants.LOCAL_PVC_VOLUME_TYPE_PREFIX + \ + storage_backend + ':' + pvc_volume_type_name + + def _insert_pvc_volume_type(self, context, pvc_volume_type): + storage_backend = '' + extra_specs = pvc_volume_type.get('extra_specs') + if(extra_specs is None): + extra_specs = {} + elif ('capabilities:volume_backend_name' in extra_specs): + storage_backend = \ + extra_specs['capabilities:volume_backend_name'] + # Overwrite the volume backend name + extra_specs['capabilities:volume_backend_name'] = \ + constants.POWERVC_VOLUME_BACKEND + + volume_type = { + 'id': pvc_volume_type.get('id'), + 'name': self._mask_pvc_volume_type_name( + pvc_volume_type.get('name'), storage_backend), + 'extra_specs': extra_specs + } + ret = None + try: + ret = db.volume_type_create(context, volume_type) + except Exception as e: + ret = None + LOG.debug(_("Failed to create volume type %s , Exception: %s") + % (volume_type['name'], e)) + return ret + + def _sync_existing_volume_type(self, context, + local_volume_type, pvc_volume_type): + if local_volume_type is None or pvc_volume_type is None: + return False + extra_specs = pvc_volume_type.get('extra_specs') + if(extra_specs is None): + extra_specs = {} + # overwrite the volume backend name + extra_specs['capabilities:volume_backend_name'] = \ + constants.POWERVC_VOLUME_BACKEND + + try: + db.volume_type_extra_specs_update_or_create( + context, local_volume_type.get('id'), + extra_specs) + except Exception as e: + LOG.debug(_("Failed to update volume type %s , Exception: %s") + % (local_volume_type.get('id'), e)) + return False + + return True + + def _unregister_volume_types(self, ctx, vol_type_id): + """ + Unregister the volume type from the local database. This does not use + the Cinder API which would send an RPC to have the instance deleted. + The instance has already been removed from PowerVC so we just send our + own notifications locally and remove it from the database. + """ + # If the instance does not exist then ignore + if vol_type_id is None: + LOG.debug('Volume type does not exist locally') + return False + + try: + db.volume_type_destroy(ctx, vol_type_id) + except Exception as e: + LOG.debug(_("Failed to delete volume type %s , Exception: %s") + % (vol_type_id, e)) + return False + + return True + + def _is_valid_pvc_volume_type(self, context, + local_volume_type, pvc_volume_types): + found = False + for volume_type in pvc_volume_types: + pvc_vol_type = volume_type.__dict__ + if (local_volume_type.get('id') == pvc_vol_type.get('id')): + found = True + break + return found + + def _handle_powervc_volume_create(self, context, message): + """ + Handle volume create messages sent from PowerVC. + + :param: context The security context + :param: message The AMQP message sent from OpenStack (dictionary) + """ + LOG.debug("Handling notification: %s" % message.get('event_type')) + + payload = message.get('payload') + pvc_volume_id = payload.get('volume_id') + + # If the volume already exists locally then ignore + local_volume = self._get_local_volume_by_pvc_id(context, pvc_volume_id) + if local_volume is not None: + LOG.debug('Volume already exists locally') + return + + volume = self._service.get_volume_by_id(pvc_volume_id) + if volume is not None: + volume_id = volume.__dict__.get("id") + scg_accessible_volumes = self._service.get_volumes() + for accessible_volume in scg_accessible_volumes: + accessible_volume_id = accessible_volume.__dict__.get("id") + if(accessible_volume_id == volume_id): + self._insert_pvc_volume(context, volume.__dict__) + return + + LOG.debug('Volume not accessible, ignored!') + return + + def _handle_powervc_volume_delete(self, context, message): + """ + Handle volume create messages sent from PowerVC. + + :param: context The security context + :param: message The AMQP message sent from OpenStack (dictionary) + """ + LOG.debug("Handling notification: %s" % message.get('event_type')) + + payload = message.get('payload') + pvc_volume_id = payload.get('volume_id') + + # If the volume does not already exist locally then ignore + local_volume = self._get_local_volume_by_pvc_id(context, pvc_volume_id) + if local_volume is None: + LOG.debug('Volume is non-existent locally, ignore delete handle') + return + + self._unregister_volumes(context, local_volume.get('id')) + + def _handle_powervc_volume_update(self, context, message): + """ + Handle volume create messages sent from PowerVC. + + :param: context The security context + :param: message The AMQP message sent from OpenStack (dictionary) + """ + LOG.debug("Handling notification: %s" % message.get('event_type')) + + payload = message.get('payload') + pvc_volume_id = payload.get('volume_id') + + local_volume = self._get_local_volume_by_pvc_id(context, pvc_volume_id) + if local_volume is None: + LOG.debug('Volume is non-existent locally, ignore update handle') + return + + pvc_volume = self._service.get_volume_by_id(pvc_volume_id) + if pvc_volume is not None: + self._sync_existing_volume(context, + local_volume, + pvc_volume.__dict__) + else: + LOG.debug('Tried to add newly created volume but it could not ' + 'be found in PowerVC') + + def _get_all_local_volumes(self, context): + local_pvc_volumes = [] + try: + db_matches = db.volume_get_all(context, + marker=None, + limit=None, + sort_key='created_at', + sort_dir='desc') + for local_volume in db_matches: + if self._get_pvc_id_from_local_volume(local_volume)is not None: + local_pvc_volumes.append(local_volume) + except Exception as e: + local_pvc_volumes = None + LOG.debug(_('Failed to get all local volumes, \ + Exception: %s') % (e)) + + return local_pvc_volumes + + def _get_local_volume_by_pvc_id(self, context, pvc_id, is_map=True): + """ Get a local volume by volume id.""" + ret_volume = None + if pvc_id is None: + return ret_volume + + if is_map is False: + try: + ret_volume = db.volume_get(context, pvc_id) + except Exception: + ret_volume = None + LOG.debug(_('Volume %s could not be found.') % pvc_id) + else: + all_local_volumes = None + try: + all_local_volumes = self._get_all_local_volumes(context) + except: + all_local_volumes = None + + if all_local_volumes is not None: + for volume in all_local_volumes: + temp = self._get_pvc_id_from_local_volume(volume) + if temp == pvc_id: + ret_volume = volume + break + + return ret_volume + + def _get_local_volume_type_by_id(self, + context, + volume_type_id, + inactive=False): + """ Get a local volume type by volume id.""" + ret_volume_type = None + try: + ret_volume_type = db.api.volume_type_get(context=context, + id=volume_type_id) + except Exception as e: + ret_volume_type = None + LOG.debug(_("Failed to get local volume type by id [%s]. \ + Exception: %s") % (volume_type_id, e)) + + return ret_volume_type + + def _get_local_volume_type_by_name(self, + context, + volume_type_name, + inactive=False): + ret_volume_type = None + if volume_type_name is None: + return ret_volume_type + + try: + ret_volume_type = db.api.volume_type_get_by_name(context, + volume_type_name) + except Exception as e: + ret_volume_type = None + LOG.debug(_("Failed to get local volume type by name [%s]. \ + Exception: %s") % (volume_type_name, e)) + + return ret_volume_type + + def _exist_local_volume_type(self, + context, + volume_type_id, + searchInactive): + """ Check if exist volume type by volume type id .""" + if volume_type_id is None: + return False + + volume_type = self._get_local_volume_type_by_id(context, + volume_type_id, + False) + if volume_type is not None: + return True + + if searchInactive is True: + volume_type = self._get_local_volume_type_by_id(context, + volume_type_id, + True) + if volume_type is not None: + return True + + return False + + def _get_matched_volume_by_pvc_id(self, local_volumes, pvcid): + """ Get a local instance by a PowerVC uuid.""" + matches = [] + if local_volumes is None or pvcid is None: + return matches + + for item in local_volumes: + volume_id = self._get_pvc_id_from_local_volume(item) + if(volume_id == pvcid): + matches.append(item) + return matches + + def _is_valid_pvc_volume(self, context, + local_volume, pvc_volumes): + found = False + + for volume in pvc_volumes: + pvc_volume = volume.__dict__ + local_volume_id = self._get_pvc_id_from_local_volume(local_volume) + if (local_volume_id == pvc_volume.get('id')): + found = True + break + + return found + + def _sync_existing_volume(self, context, local_volume, pvc_volume): + ret = False + if local_volume is None or pvc_volume is None: + LOG.debug('Local volume or PVC volume is none and ignore it') + return ret + + if not self._staging_cache.is_valid: + LOG.warning(_("Staging user or project invalid." + " Skipping volume sync.")) + return ret + + values = self._get_values_from_volume(context, + pvc_volume, + local_volume) + + try: + db.volume_update(context, local_volume.get('id'), values) + ret = True + except Exception as e: + ret = False + LOG.debug(_("Failed to update volume [%s] existed. Exception: %s") + % (local_volume.get('display_name'), e)) + + return ret + + def _unregister_volumes(self, context, volume_id): + """ + Unregister the volume from the local database. This does not use + the Cinder API which would send an RPC to have the instance deleted. + The instance has already been removed from PowerVC so we just send our + own notifications locally and remove it from the database. + """ + ret = False + + if volume_id is None: + LOG.debug('Volume id is none and ignore it') + return ret + + try: + db.volume_destroy(context, volume_id) + ret = True + except Exception as e: + ret = False + LOG.debug(_("Failed to delete local volume %s, Exception: %s") + % (volume_id, e)) + + return ret + + def _insert_pvc_volume(self, context, volume): + """ Create one volume""" + if volume is None: + LOG.debug("Volume is None, cannot insert it") + return + + volume_info = volume + volume_type = volume_info.get('volume_type') + volume_display_name = volume_info.get('display_name') + + if volume_type is None or volume_type == 'None': + LOG.debug(_("Volume type is None for volume: %s") + % volume_display_name) + else: + LOG.debug("Check if exist volume type in local hosting OS, \ + only including active") + pvc_volume_type = None + try: + pvc_volume_type = self._service \ + .get_volume_type_by_name(volume_type) + except Exception as e: + LOG.debug(_("Failed to get volume type from " + "PowerVC by name [%s]. Exception: %s") + % (volume_type, e)) + + if pvc_volume_type is not None: + dict_pvc_volume_type = pvc_volume_type.__dict__ + + exist_volume_type = self.\ + _exist_local_volume_type(context, + dict_pvc_volume_type.get("id"), + False) + if exist_volume_type is False: + LOG.debug(_('''Volume type [%s] is non-existent, + insert into hosting OS''') % volume_type) + + try: + self._insert_pvc_volume_type(context, + dict_pvc_volume_type) + except Exception: + LOG.debug("Failed to insert volume type") + LOG.debug("Insert volume type successfully") + else: + LOG.debug(_("Volume type [%s] existed") % volume_type) + + values = self._get_values_from_volume(context, volume) + + if values is None: + LOG.warning(_("Staging user or project invalid." + " Skipping volume sync.")) + return None + else: + try: + db.volume_create(context, values) + except Exception as e: + LOG.debug(_("Failed to create volume %s. Exception: %s") + % (str(values), str(e))) + return None + + LOG.debug(_("Create volume %s successfully") % values) + + def _get_values_from_volume(self, context, volume, local_volume=None): + if volume is None: + return None + + project_id = None + user_id = None + + if local_volume is None: + user_id, project_id = \ + self._staging_cache.get_staging_user_and_project() + if user_id is None: + LOG.warning(_("Staging user or project invalid.")) + return None + else: + project_id = local_volume.get('project_id') + user_id = local_volume.get('user_id') + + metadata = volume.get('metadata') + if metadata is None: + metadata = {} + + metadata[constants.LOCAL_PVC_PREFIX + 'os-vol-tenant-attr:tenant_id']\ + = volume.get('os-vol-tenant-attr:tenant_id') + + health_value = None + health_status = volume.get('health_status') + if health_status is not None: + health_value = health_status.get('health_value') + metadata[constants.LOCAL_PVC_PREFIX + 'health_status.health_value']\ + = health_value + + metadata[constants.LOCAL_PVC_PREFIX + 'os-vol-host-attr:host'] \ + = volume.get('os-vol-host-attr:host') + metadata[constants.LOCAL_PVC_PREFIX + 'id'] \ + = volume.get('id') + + # Get volume type id + volume_type_id = None + volume_type_name = volume.get('volume_type') + if(volume_type_name is not None and volume_type_name != 'None'): + storage_backend = volume.get('os-vol-host-attr:host') + local_volume_type_name = self._mask_pvc_volume_type_name( + volume_type_name, storage_backend) + if local_volume_type_name is not None: + volume_type = self.\ + _get_local_volume_type_by_name(context, + local_volume_type_name) + if volume_type is not None: + volume_type_id = volume_type.get('id') + + # Get attachment information + attachments = volume.get('attachments') +# attach_time = None + attach_status = None + attached_host = None + mountpoint = None + instance_uuid = None + if attachments is not None and len(attachments) > 0: + attach_status = 'attached' + attach = attachments[0] + attached_host = attach.get('host_name') + mountpoint = attach.get('device') + # Here instance_uuid also can be assigned metadata['instance_uuid'] + # metadata['instance_uuid'] equal to attach['server_id'] + instance_uuid = attach.get('server_id') + + instance_uuid = self._get_local_instance_id(instance_uuid) + + bootable = 0 + if volume.get('bootable') == 'true': + bootable = 1 + + host = CONF.host + if CONF.enabled_backends is not None and\ + constants.BACKEND_POWERVCDRIVER in CONF.enabled_backends: + host = "%s@%s" % (CONF.host, constants.BACKEND_POWERVCDRIVER) + values = {'display_name': (volume.get('display_name', '') or 'None'), + 'display_description': volume.get('display_description'), + # 'volume_type_id': volume_type_id, + # 'id': volume['id'], + 'status': volume.get('status'), + 'host': host, + 'size': volume.get('size'), + 'availability_zone': volume.get('availability_zone'), + 'bootable': bootable, + 'snapshot_id': volume.get('snapshot_id'), + 'source_volid': volume.get('source_volid'), + 'metadata': metadata, + 'project_id': project_id, + 'user_id': user_id, + 'attached_host': attached_host, + 'mountpoint': mountpoint, + 'instance_uuid': instance_uuid, + 'attach_status': attach_status + } + + if(volume_type_id is not None): + values['volume_type_id'] = volume_type_id + + return values + + def _start_periodic_volume_sync(self, context): + """ + Initialize the periodic syncing of instances from PowerVC into the + local OS. The powervc_instance_sync_interval config property determines + how often the sync will occur, and the + powervc_full_instance_sync_frequency config property determines the + number of marked instance sync operations between full instance syncs. + + :param: context The security context + """ + # Enforce some minimum values for the sync interval properties + # TODO: Minimum values should at least be documented + conf_sync = CONF.powervc.volume_sync_interval + conf_full_sync = CONF.powervc.full_volume_sync_frequency + sync_interval = conf_sync if conf_sync > 10 else 10 + full_sync_frequency = conf_full_sync if conf_full_sync > 2 else 2 + self._volume_sync_counter = 0 + + def sync(): + """Called on the volume sync intervals""" + self._volume_sync_counter += 1 + + try: + local_volumes = None + is_full_sync = True + # Check if it's time to do a full sync + if self.full_volume_sync_required or \ + self._volume_sync_counter == full_sync_frequency: + self.full_volume_sync_required = False + self._volume_sync_counter = 0 + local_volumes = self._get_all_local_volumes(context) + LOG.debug('Syncing all volume on interval') + else: + # If there are no marked volumes to sync stop here + if len(self.sync_volumes) == 0: + return + is_full_sync = False + local_volumes = self.sync_volumes + LOG.debug('Syncing marked volumes') + + pvc_volumes = self._service.get_volumes() + self._synchronize_volumes_ex(context, local_volumes, + pvc_volumes, is_full_sync) + except Exception as e: + LOG.exception(_("Error occurred during volume " + "sychronization: %s."), e) + LOG.info(_("Volume synchronization will occur at the next " + "scheduled interval.")) + + self.tg.add_timer(sync_interval, sync) + + def _synchronize_volumes_ex(self, + context, + local_volumes, + pvc_volumes, + is_full_sync=True): + """ + Synchronize volumes + """ + LOG.info(_("Volume synchronization started...")) + if pvc_volumes is None: + pvc_volumes = [] + + if local_volumes is None: + local_volumes = [] + + count_created_volumes = 0 + count_updated_volumes = 0 + count_deleted_volumes = 0 + + # Local ---> Powervc + # First Delete local unused volumes + for local_volume in local_volumes: + if not self._is_valid_pvc_volume(context, + local_volume, + pvc_volumes): + # If it is not valid in pvc, also delete form the local. + self._unregister_volumes(context, local_volume.get('id')) + count_deleted_volumes += 1 + + # Try delete unused volume-types + # parameter None will force to get inf from local and powervc + deleted_volume_types = ( + self._delete_unused_volume_types(context, + local_volume_types=None, + pvc_volume_types=None)) + LOG.info(' Delete %i unused volume-types when sync volumes' + % deleted_volume_types) + + # Powervc ---> Local + for volume in pvc_volumes: + pvc_volume = volume.__dict__ + matched_volumes = self._get_matched_volume_by_pvc_id( + local_volumes, + pvc_volume.get('id')) + if len(matched_volumes) == 0: + self._insert_pvc_volume(context, pvc_volume) + count_created_volumes += 1 + else: + if len(matched_volumes) > 1: + LOG.warning("More than one volume in DB match " + "one PowerVC volume: " + pvc_volume.get('id')) + # TODO: We should do something about this but scheduling + # a sync won't help since that does nothing to remove + # duplicate local volumes. + self._sync_existing_volume(context, + matched_volumes[0], + pvc_volume) + count_updated_volumes += 1 + + LOG.info(""" + ******************************* + Volume sync. is complete. + Full sync: %(full_sync)s + [ %(insert)s inserted, + %(update)s updated, + %(delete)s deleted ] + ******************************* + """ % + {'full_sync': is_full_sync, + 'insert': count_created_volumes, + 'update': count_updated_volumes, + 'delete': count_deleted_volumes}) + + def _get_local_instance_id(self, pvc_instance_id, is_map=True): + ret_instance_id = pvc_instance_id + if is_map is False: + return ret_instance_id + + if pvc_instance_id is None: + return ret_instance_id + + from powervc.common.constants import SERVICE_TYPES + # lazy import factory to avoid connect to env when load manager + from powervc.common.client import factory + novaclient = factory.LOCAL.get_client(str(SERVICE_TYPES.compute)) + local_instances = novaclient.manager.list_all_servers() + for inst in local_instances: + metadata = inst._info['metadata'] + meta_pvc_id = None + if 'pvc_id' in metadata: + meta_pvc_id = metadata['pvc_id'] + + if meta_pvc_id == pvc_instance_id: + ret_instance_id = inst._info['id'] + break + + return ret_instance_id + + def _get_pvc_id_from_local_volume(self, local_volume, is_map=True): + ret_pvc_id = None + + if local_volume is None: + return ret_pvc_id + + if is_map is False: + ret_pvc_id = local_volume.get('id') + else: + id_key = constants.LOCAL_PVC_PREFIX + 'id' + if isinstance(local_volume, cinder.db.sqlalchemy.models.Volume): + metadata = local_volume.get('volume_metadata') + for item in metadata: + if id_key == item['key']: + ret_pvc_id = item['value'] + break + elif isinstance(local_volume, dict): + metadata = local_volume.get('metadata') + if metadata is not None and id_key in metadata: + ret_pvc_id = metadata[id_key] + + return ret_pvc_id diff --git a/cinder-powervc/run_tests.sh b/cinder-powervc/run_tests.sh new file mode 100755 index 0000000..53c10d9 --- /dev/null +++ b/cinder-powervc/run_tests.sh @@ -0,0 +1,192 @@ +#!/bin/bash + +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -eu + +function usage { + echo "Usage: $0 [OPTION]..." + echo "Run PowerVC Cinder's test suite(s)" + echo "" + echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" + echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" + echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)." + echo " -n, --no-recreate-db Don't recreate the test database." + echo " -x, --stop Stop running tests after the first error or failure." + echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." + echo " -u, --update Update the virtual environment with any newer package versions" + echo " -p, --pep8 Just run flake8" + echo " -8, --8 Just run flake8, don't show PEP8 text for each error" + echo " -P, --no-pep8 Don't run flake8" + echo " -c, --coverage Generate coverage report" + echo " -h, --help Print this usage message" + echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list" + echo " --standard-threads Don't do the eventlet threading monkeypatch." + echo "" + echo "Note: with no options specified, the script will try to run the tests in a virtual environment," + echo " If no virtualenv is found, the script will ask if you would like to create one. If you " + echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." + exit +} + +function process_option { + case "$1" in + -h|--help) usage;; + -V|--virtual-env) always_venv=1; never_venv=0;; + -N|--no-virtual-env) always_venv=0; never_venv=1;; + -r|--recreate-db) recreate_db=1;; + -n|--no-recreate-db) recreate_db=0;; + -f|--force) force=1;; + -u|--update) update=1;; + -p|--pep8) just_flake8=1;; + -8|--8) short_flake8=1;; + -P|--no-pep8) no_flake8=1;; + -c|--coverage) coverage=1;; + --standard-threads) + export STANDARD_THREADS=1 + ;; + -*) noseopts="$noseopts $1";; + *) noseargs="$noseargs $1" + esac +} + +venv=.venv +with_venv=tools/with_venv.sh +always_venv=0 +never_venv=0 +force=0 +noseargs= +noseopts= +wrapper="" +just_flake8=0 +short_flake8=0 +no_flake8=0 +coverage=0 +recreate_db=1 +update=0 + +for arg in "$@"; do + process_option $arg +done + +# If enabled, tell nose to collect coverage data +if [ $coverage -eq 1 ]; then + noseopts="$noseopts --with-coverage --cover-package=cinder-powervc" +fi + +function run_tests { + # Just run the test suites in current environment + ${wrapper} $NOSETESTS + # If we get some short import error right away, print the error log directly + RESULT=$? + if [ "$RESULT" -ne "0" ]; + then + ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'` + if [ "$ERRSIZE" -lt "40" ]; + then + cat run_tests.log + fi + fi + return $RESULT +} + +function run_flake8 { + FLAGS=--show-pep8 + if [ $# -gt 0 ] && [ 'short' == ''$1 ] + then + FLAGS='' + fi + + + echo "Running flake8 ..." + # Just run flake8 in current environment + #echo ${wrapper} flake8 $FLAGS powervc | tee pep8.txt + ${wrapper} flake8 $FLAGS powervc | tee pep8.txt + RESULT=${PIPESTATUS[0]} + return $RESULT +} + +NOSETESTS="nosetests $noseopts $noseargs" + +if [ $never_venv -eq 0 ] +then + # Remove the virtual environment if --force used + if [ $force -eq 1 ]; then + echo "Cleaning virtualenv..." + rm -rf ${venv} + fi + if [ $update -eq 1 ]; then + echo "Updating virtualenv..." + python tools/install_venv.py + fi + if [ -e ${venv} ]; then + wrapper="${with_venv}" + else + if [ $always_venv -eq 1 ]; then + # Automatically install the virtualenv + python tools/install_venv.py + wrapper="${with_venv}" + else + echo -e "No virtual environment found...create one? (Y/n) \c" + read use_ve + if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then + # Install the virtualenv and run the test suite in it + python tools/install_venv.py + wrapper=${with_venv} + fi + fi + fi +fi + +# Delete old coverage data from previous runs +if [ $coverage -eq 1 ]; then + ${wrapper} coverage erase +fi + + +if [ $just_flake8 -eq 1 ]; then + run_flake8 + RESULT=$? + echo "RESULT $RESULT" + exit $RESULT +fi + +if [ $short_flake8 -eq 1 ]; then + run_flake8 short + RESULT=$? + exit $RESULT +fi + +run_tests +RESULT=$? + +# NOTE(sirp): we only want to run flake8 when we're running the full-test +# suite, not when we're running tests individually. To handle this, we need to +# distinguish between options (noseopts), which begin with a '-', and arguments +# (noseargs). +if [ -z "$noseargs" ]; then + if [ $no_flake8 -eq 0 ]; then + run_flake8 + TMP_RESULT=$? + RESULT=$(($TMP_RESULT + $RESULT)) + fi +fi + +if [ $coverage -eq 1 ]; then + echo "Generating coverage report in covhtml/" + ${wrapper} coverage html -d covhtml -i +fi + +exit $RESULT \ No newline at end of file diff --git a/cinder-powervc/test/__init__.py b/cinder-powervc/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cinder-powervc/test/cinderclienttest.py b/cinder-powervc/test/cinderclienttest.py new file mode 100644 index 0000000..057d66f --- /dev/null +++ b/cinder-powervc/test/cinderclienttest.py @@ -0,0 +1,30 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" +Simple cinder client tests + +TODO: Convert to pyunit and use config file +""" + +import powervc.common.constants as constants + +from powervc.common import config +config.parse_power_config((), 'powervc') +import powervc.common.client.factory as clients + + +cinder_client = clients.POWERVC.new_client(str(constants.SERVICE_TYPES.volume)) + +print '=' * 10, 'Listing volumes', '=' * 10 +vol_list = cinder_client.volumes.list() +for vol in vol_list: + print str(vol.display_name), str(vol.display_description), \ + vol.id diff --git a/cinder-powervc/test/fake_volume_type.py b/cinder-powervc/test/fake_volume_type.py new file mode 100644 index 0000000..c441a67 --- /dev/null +++ b/cinder-powervc/test/fake_volume_type.py @@ -0,0 +1,51 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" + The class FakeVolumeType is used to produce the fake + data of the OpenStack cinder volume type +""" + +import datetime + + +class FakeVolumeType(): + + volume_type = dict() + + items = { + 'created_at', + 'updated_at', + 'deleted_at', + 'deleted', + 'id', + 'name', + 'extra_specs' + } + + def __init__(self): + + self.volume_type['id'] = "18b28659-966d-4913-bdda-2ca3cc68fb59" + self.volume_type['created_at'] = \ + datetime.datetime(2013, 8, 12, 5, 59, 25) + self.volume_type['updated_at'] = \ + datetime.datetime(2013, 8, 12, 5, 59, 25) + self.volume_type['deleted_at'] = None + self.os_instance['deleted'] = False + self.os_instance['name'] = "mengxd-01" + self.os_instance['extra_specs'] = { + 'drivers:rsize': '2', + 'drivers:storage_pool': 'r3-c3-ch1-jhusta', + 'capabilities:volume_backend_name': 'shared_v7000_1' + } + + def update(self, **update): + + self.self.volume_type.update(**update) diff --git a/cinder-powervc/test/powervc/__init__.py b/cinder-powervc/test/powervc/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cinder-powervc/test/powervc/volume/__init__.py b/cinder-powervc/test/powervc/volume/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cinder-powervc/test/powervc/volume/driver/__init__.py b/cinder-powervc/test/powervc/volume/driver/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cinder-powervc/test/powervc/volume/driver/test_powervc_cinder.py b/cinder-powervc/test/powervc/volume/driver/test_powervc_cinder.py new file mode 100644 index 0000000..5c4dedd --- /dev/null +++ b/cinder-powervc/test/powervc/volume/driver/test_powervc_cinder.py @@ -0,0 +1,225 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" +# mock module +import mock +import sys +import stubout +import unittest + +sys.modules['powervc.common.client'] = mock.MagicMock() +# import _ +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') +from powervc.common import config +from cinder import exception +from cinder import db +from powervc.volume.driver.service import PowerVCService + +import six + + +class StorageProvider(): + def __init__(self, i): + self.free_capacity_gb = (i + 1) * 5 + self.total_capacity_gb = (i + 1) * 10 + + +class VolumeMetadataWithPVCID(): + + def __init__(self, pvc_id="1234"): + self.key = "pvc:id" + self.value = pvc_id + + +class Volume(): + def __init__(self, info): + self._info = info + self._add_details(info) + + def setattr(self, key, val): + self.__setattr__(key, val) + + def _add_details(self, info): + for (k, v) in six.iteritems(info): + try: + setattr(self, k, v) + except AttributeError: + # In this case we already defined the attribute on the class + pass + + +class PowerVCDriverTestCase(unittest.TestCase): + stubs = stubout.StubOutForTesting() + + def setUp(self): + super(PowerVCDriverTestCase, self).setUp() + + self.stubs.Set(PowerVCService, '_client', mock.MagicMock()) + # we need mock load config file before import PowerVCDriver class + config.parse_power_config = mock.MagicMock() + config.CONF.log_opt_values = mock.MagicMock() + from powervc.volume.driver.powervc import PowerVCDriver + self.powervc_cinder_driver = PowerVCDriver() + + def test_create_volume_no_size_raise_exception(self): + self.assertRaises(exception.InvalidVolume, + self.powervc_cinder_driver.create_volume, + None) + + def test_create_volume_succeed(self): + # local volume passed to driver + vol = {'id': 1234, + 'size': 1} + volume = Volume(vol) + # fake volume after call creating volume from pvc + ret_vol_after_created = {'id': 4321, + 'status': 'creating'} + ret_volume_after_created = Volume(ret_vol_after_created) + # fake volume after call get volume from pvc + ret_vol_get = {'id': 4321, + 'status': 'available'} + ret_volume_get = Volume(ret_vol_get) + + # mock create volume restAPI + PowerVCService._client.volumes.create = \ + mock.MagicMock(return_value=ret_volume_after_created) + # mock get volume restAPI + PowerVCService._client.volumes.get = \ + mock.MagicMock(return_value=ret_volume_get) + # mock db access operation + db.volume_update = mock.MagicMock(return_value=None) + + dic = self.powervc_cinder_driver.create_volume(volume) + self.assertEqual({'status': 'available', + 'metadata': {'pvc:id': 4321}}, + dic, "return vol doesn't match") + + def test_create_volume_failed(self): + # local volume passed to driver + vol = {'id': 1234, + 'size': 1} + volume = Volume(vol) + # fake volume after call creating volume from pvc + ret_vol_after_created = {'id': 4321, + 'status': 'creating'} + ret_volume_after_created = Volume(ret_vol_after_created) + # fake volume after call get volume from pvc + ret_vol_get = {'id': 4321, + 'status': 'error'} + ret_volume_get = Volume(ret_vol_get) + + # mock create volume restAPI + PowerVCService._client.volumes.create = \ + mock.MagicMock(return_value=ret_volume_after_created) + # mock get volume restAPI + PowerVCService._client.volumes.get = \ + mock.MagicMock(return_value=ret_volume_get) + # mock db access operation + db.volume_update = mock.MagicMock(return_value=None) + + dic = self.powervc_cinder_driver.create_volume(volume) + self.assertEqual({'status': 'error', + 'metadata': {'pvc:id': 4321}}, + dic, "return vol doesn't match") + + def test_create_volume_not_found(self): + # local volume passed to driver + vol = {'id': 1234, + 'size': 1} + volume = Volume(vol) + # fake volume after call creating volume from pvc + ret_vol_after_created = {'id': 4321, + 'status': 'creating'} + ret_volume_after_created = Volume(ret_vol_after_created) + # fake volume after call get volume from pvc + ret_vol_get = {'id': 4321, + 'status': 'error'} + ret_volume_get = Volume(ret_vol_get) + + # mock create volume restAPI + PowerVCService._client.volumes.create = \ + mock.MagicMock(return_value=ret_volume_after_created) + # mock get volume restAPI + # first time raise an exception, + # second time return a error volume + PowerVCService._client.volumes.get = \ + mock.MagicMock(side_effect=[exception.NotFound, + ret_volume_get]) + # mock db access operation + db.volume_update = mock.MagicMock(return_value=None) + + dic = self.powervc_cinder_driver.create_volume(volume) + self.assertEqual({'status': 'error', + 'metadata': {'pvc:id': 4321}}, + dic, "return vol doesn't match") + + def test_delete_volume_success(self): + #fake volume which will be passed to driver service + vol_info = {'id': 1234, + 'size': 1} + volume = Volume(vol_info) + setattr(volume, 'volume_metadata', [VolumeMetadataWithPVCID("1234")]) + #fake existed volume + existed_vol_info = {"status": 'available', 'id': 1234} + existed_volume_get = Volume(existed_vol_info) + + #fake volume after delete + after_delete_vol_info = {"status": '', 'id': 1234} + after_delete_volume_get = Volume(after_delete_vol_info) + + #mock rest API + PowerVCService._client.volumes.get = \ + mock.MagicMock(side_effect=[existed_volume_get, + after_delete_volume_get]) + + self.powervc_cinder_driver.delete_volume(volume) + + def test_delete_volume_no_powervc_attribute_error(self): + #fake volume which will be passed to driver service + vol_info = {'id': 1234, 'size': 1} + volume = Volume(vol_info) + self.assertRaises(AttributeError, + self.powervc_cinder_driver.delete_volume, + volume) + + def test_delete_volume_not_found_exception(self): + vol_info = {'id': 1234, 'size': 1} + volume = Volume(vol_info) + setattr(volume, 'volume_metadata', [VolumeMetadataWithPVCID("1234")]) + + PowerVCService._client.volumes.get = \ + mock.MagicMock(side_effect=exception.NotFound()) + + self.assertRaises(exception.NotFound, + self.powervc_cinder_driver.delete_volume, + volume) + + def test_get_volume_stats(self): + # fake a storage provider list + ret_sp = [StorageProvider(i) for i in range(10)] + # mock rest api + PowerVCService._client.storage_providers.list = \ + mock.MagicMock(return_value=ret_sp) + # fake a expected return dictionary + expected_ret_dic = {} + expected_ret_dic["volume_backend_name"] = 'powervc' + expected_ret_dic["vendor_name"] = 'IBM' + expected_ret_dic["driver_version"] = 1.0 + expected_ret_dic["storage_protocol"] = 'Openstack' + expected_ret_dic['total_capacity_gb'] = 550 + expected_ret_dic['free_capacity_gb'] = 275 + expected_ret_dic['reserved_percentage'] = 0 + expected_ret_dic['QoS_support'] = False + + ret_dic = self.powervc_cinder_driver.get_volume_stats(True) + + self.assertEqual(expected_ret_dic, + ret_dic, + "return stats should be matched") diff --git a/cinder-powervc/test/powervc/volume/manager/__init__.py b/cinder-powervc/test/powervc/volume/manager/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/cinder-powervc/test/powervc/volume/manager/test_manager.py b/cinder-powervc/test/powervc/volume/manager/test_manager.py new file mode 100644 index 0000000..fbeff1e --- /dev/null +++ b/cinder-powervc/test/powervc/volume/manager/test_manager.py @@ -0,0 +1,121 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" +from cinder.openstack.common import gettextutils +gettextutils.install('cinder') +import unittest +import mox +from powervc.volume.manager.manager import PowerVCCinderManager +from powervc.volume.driver.service import PowerVCService + +fake_volume_type = {'id': '', + 'name': 'fake_volume_type' + } + + +fake_volume = {'display_name': 'fake_volume', + 'display_description': 'This is a fake volume', + 'volume_type_id': '', + 'status': '', + 'host': 'powervc', + 'size': 1, + 'availability_zone': 'nova', + 'bootable': 0, + 'snapshot_id': '', + 'source_volid': '', + 'metadata': {}, + 'project_id': 'admin', + 'user_id': 'admin', + 'attached_host': 'fake_attached_host', + 'mountpoint': '', + 'instance_uuid': '', + 'attach_status': ''} + +fake_message = {'payload': {'volume_id': '', 'display_name': ''}} + +fake_context = {} + + +class FakeDBVolume(): + def __init__(self): + pass + + +class FakeVolume(): + def __init__(self): + pass + + __dict__ = fake_volume + + +class FakePowerVCService(PowerVCService): + def __init__(self): + pass + +fake_db_volume = FakeDBVolume() + + +class Test(unittest.TestCase): + def setUp(self): + self.moxer = mox.Mox() + + def __init__(self): + pass + + PowerVCCinderManager.__init__ = __init__ + self.manager = PowerVCCinderManager() + + def tearDown(self): + pass + + def test_handle_powervc_volume_create_not_create(self): + self.manager._service = self.moxer.CreateMock(PowerVCService) + self.moxer.StubOutWithMock(self.manager, + '_get_local_volume_by_pvc_id') + self.moxer.StubOutWithMock(self.manager._service, + 'get_volume_by_id') + self.moxer.StubOutWithMock(self.manager, '_insert_pvc_volume') + + pvc_id = '' + self.manager._get_local_volume_by_pvc_id(fake_context, pvc_id)\ + .AndReturn(fake_db_volume) + + self.moxer.ReplayAll() + + self.manager._handle_powervc_volume_create(fake_context, fake_message) + + self.moxer.UnsetStubs() + self.moxer.VerifyAll() + + def test_handle_powervc_volume_create_create(self): + self.manager._service = self.moxer.CreateMock(PowerVCService) + self.moxer.StubOutWithMock(self.manager, + '_get_local_volume_by_pvc_id') + self.moxer.StubOutWithMock(self.manager._service, + 'get_volume_by_id') + self.moxer.StubOutWithMock(self.manager, '_insert_pvc_volume') + + pvc_id = '' + volume_id = '' + fake_volume_instance = FakeVolume() + self.manager._get_local_volume_by_pvc_id(fake_context, pvc_id)\ + .AndReturn(None) + self.manager._service.get_volume_by_id(volume_id)\ + .AndReturn(fake_volume_instance) + + self.moxer.ReplayAll() + + self.manager._handle_powervc_volume_create(fake_context, fake_message) + + self.moxer.UnsetStubs() + self.moxer.VerifyAll() + +if __name__ == "__main__": + #import sys;sys.argv = ['', 'Test.testName'] + unittest.main() diff --git a/common-powervc/.project b/common-powervc/.project new file mode 100644 index 0000000..b0e1f88 --- /dev/null +++ b/common-powervc/.project @@ -0,0 +1,24 @@ + + + common-powervc + + + cinder-client + glance-client + keystone-client + neutron-client + nova + nova-client + oslo + + + + org.python.pydev.PyDevBuilder + + + + + + org.python.pydev.pythonNature + + diff --git a/common-powervc/.pydevproject b/common-powervc/.pydevproject new file mode 100644 index 0000000..b798607 --- /dev/null +++ b/common-powervc/.pydevproject @@ -0,0 +1,10 @@ + + + + + +/common-powervc + +python 2.7 +Default + diff --git a/common-powervc/etc/powervc.conf b/common-powervc/etc/powervc.conf new file mode 100644 index 0000000..93b8699 --- /dev/null +++ b/common-powervc/etc/powervc.conf @@ -0,0 +1,232 @@ +# This file contains configuration properties that affects how the powervc driver functions and how it +# communicates with the PowerVC server. Most properties have default values based on a default +# PowerVC configuration. However, some connection properties, such as PowerVC host name +# do not have default values and must be configured prior to running the powervc driver. These +# properties are marked with the text INPUT REQUIRED. Also you may have to change other +# properties depending on your environment and how your PowerVC sever is configured. For +# more information, see the Smart Cloud Entry Administration Guide. + +[DEFAULT] +# The following group of properties needs to be configured +# in order for the PowerVC Driver to be able to authenticate with keystone +# and obtain information from it, that might be required to run background +# tasks (such as discovering a new image), or simply to connect to a +# secured Glance. +# When running secured Glance, make sure the 'auth_strategy' property in +# nova.conf is set to 'keystone'. + +# Log info messages +verbose = true + + +[openstack] +# Authentication url to authenticate with keystone (string value) +auth_url = http://localhost:5000/v2.0 + +# v2.0 or v3 +keystone_version = v2.0 + +# Tenant name for connecting to keystone in admin context (string value) +admin_tenant_name = demo + +# Username for connecting to keystone in admin context (string value) +admin_user = demo + +# Password for connecting to keystone in admin context (string value) +admin_password = openstack + +# For local SSL connections, specify the path and filename of the cacert file +#connection_cacert = + +http_insecure = True + +# Region name for the local Openstack. Must be set to correct name when local +# Openstack is configured for working in multiple regions environment. +#region_name = + +# +# Qpid connection information +# + +# Qpid broker hostname (string value) +qpid_hostname = + +# Qpid broker port (integer value) +qpid_port = 5672 + +# Username for qpid connection (string value) +qpid_username = + +# Password for qpid connection (string value) +qpid_password = + +# Transport to use, either 'tcp'(default) or 'ssl' +qpid_protocol = tcp + +[powervc] + +# Full class name for the manager for PowerVC Manager Service (string value) +powervc_manager = powervc.nova.driver.compute.manager.PowerVCCloudManager + +# Full class name for the driver for PowerVC Driver Service (string value) +powervc_driver = powervc.nova.driver.virt.powervc.driver.PowerVCDriver + +# +# Connection information for PowerVC. +# + +# Authentication url of the PowerVC to connect to +# INPUT REQUIRED +# Provide 'host' portion by updating it to the hostname of the PowerVC system +auth_url = https://host/powervc/openstack/identity/v3 + +# v2.0 or v3 +keystone_version = v3 + +# Username for PowerVC connection (string value) +admin_user = root + +# Password for PowerVC connection (string value) +admin_password = passw0rd + +# Tenant name for PowerVC connection (string value) +admin_tenant_name = ibm-default + +# For PowerVC SSL connections, specify the path and filename of the cacert file +# INPUT REQUIRED +# Provide the cacert file by copying it from its install location on the +# PowerVC host (e.g. /etc/pki/tls/certs/powervc.crt) to the local hosting +# Openstack system. +#connection_cacert = + +# Value of insecure option for PowerVC connections (Default=True) +# INPUT REQUIRED +# Change to False when using a secure connection and providing a cacert file. +http_insecure = True + +# Value of authorization token expiration stale duration (Default=3600) +# INPUT REQUIRED +# Due to PowerVC requirement, all the REST API customers need to pre-refresh +# authorization token at least 1 hour before expiration +expiration_stale_duration = 3600 + +# The names of the storage connectivity groups supported by our driver +# INPUT REQUIRED +# Provide the PowerVC storage connectivity group (SCG) names by getting the name +# from the PowerVC system, or using the PowerVC default SCG of "Any host, all VIOS". +# If there are more than one SCG you want to specify, just add more SCG values with +# more storage_connectivity_group +# Note: The value of this property must exactly match the value as specified on the +# PowerVC server, including case, punctuation, and spaces. +storage_connectivity_group = Any host, all VIOS +#storage_connectivity_group = + +# +# Qpid connection information for PowerVC +# + +# Qpid broker hostname (string value) +# INPUT REQUIRED +# Change 'host' to the hostname of the PowerVC system +qpid_hostname = host + +# Qpid broker port (integer value) + +# uncomment following line for non-ssl +# qpid_port = 5672 +qpid_port = 5671 + +# Username for qpid connection (string value) +qpid_username = powervc_qpid + +# Password for qpid connection (string value) +# INPUT REQUIRED +# Provide the qpid connection password from the PowerVC system +# by using the cat command on the pw.file in the directory where +# PowerVC is installed (e.g. cat /opt/ibm/powervc/data/pw.file) +qpid_password = + +# Transport to use, either 'tcp'(default) or 'ssl' + +# uncomment following line for non-ssl +# qpid_protocol = tcp +qpid_protocol = ssl + +# +# Sync variables +# + +# The name of the staging project (string value) +# If not set defaults to 'Public'. If set the named project should exist and +# be accessible by the staging_user. +staging_project_name = Public + +# The name of the staging user (string value) +# If not set defaults to 'admin'. If set the user should exist and +# have access to the project identified by staging_project_name. +staging_user = admin + +# The prefix that will be added to the flavor name from PowerVC +# and stored (string value). This should be unique for every +# connection to help distinguish the flavors +flavor_prefix = PVC- + +# This is a list of PowerVC flavor names that should be synced. +# If no flavor name is specified, then all flavors are synced. +flavor_white_list = + +# This is a list of PowerVC flavor names that should not be synced. +flavor_black_list = + +# The periodic flavor sync interval in seconds. +flavor_sync_interval = 300 + +# Instance periodic sync interval specified in seconds +instance_sync_interval = 20 + +# How many instance sync intervals between full instance syncs. Only instances +# known to be out of sync are synced on the interval except after this many +# intervals when all instances are synced. +full_instance_sync_frequency = 30 + +# Image periodic sync interval specified in seconds. This is the time from the end +# of one successful image periodic sync operation to the start of the next. +image_periodic_sync_interval_in_seconds = 300 + +# The time in seconds between image sync retry attempts if an error was +# encountered during an image sync operation +image_sync_retry_interval_time_in_seconds = 60 + +# The maximum number of images to return. The default is 500 images. If your PowerVC +# has more than 500 images, this limit should be increased to include all images. +image_limit = 500 + +# Volume periodic sync interval specified in seconds +volume_sync_interval = 20 + +# How many volume sync intervals between full volume syncs. +# Only volumes known to be out of sync are synced on the interval +# except after this many intervals when all volumes are synced. +full_volume_sync_frequency = 30 + +# Volume type periodic sync interval specified in seconds +volume_type_sync_interval = 20 + +# How many volume type sync intervals between full volume type syncs. +# Only volumes known to be out of sync are synced on the interval +# except after this many intervals when all volumes are synced. +full_volume_type_sync_frequency = 30 + +# Ignore delete errors so an exception is not thrown during a +# delete. When set to true, this allows the volume to be deleted +# on the hosting OS even if an exception occurs. When set to false, +# exceptions during delete prevent the volume from being deleted +# on the hosting OS. +volume_driver_ignore_delete_error = False + +# The times to check whether attaching/detaching the volume succeed +volume_max_try_times = 12 + +# Minimum delay interval and initial delay seconds for long run tasks. +longrun_loop_interval = 7 +longrun_initial_delay = 10 diff --git a/common-powervc/logrotate.d/openstack-powervc-driver b/common-powervc/logrotate.d/openstack-powervc-driver new file mode 100644 index 0000000..fe35245 --- /dev/null +++ b/common-powervc/logrotate.d/openstack-powervc-driver @@ -0,0 +1,10 @@ +compress + +/var/log/powervc/*.log { + weekly + rotate 4 + missingok + compress + minsize 100k + size 50M +} diff --git a/common-powervc/powervc/__init__.py b/common-powervc/powervc/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/common-powervc/powervc/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/common-powervc/powervc/common/__init__.py b/common-powervc/powervc/common/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/common-powervc/powervc/common/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/common-powervc/powervc/common/client/__init__.py b/common-powervc/powervc/common/client/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/common-powervc/powervc/common/client/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/common-powervc/powervc/common/client/config.py b/common-powervc/powervc/common/client/config.py new file mode 100644 index 0000000..1d431c9 --- /dev/null +++ b/common-powervc/powervc/common/client/config.py @@ -0,0 +1,42 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +from powervc.common import config +from powervc.common import netutils + +CONF = config.CONF + +# http client opts from config file normalized +# to keystone client form +OS_OPTS = None +PVC_OPTS = None + + +def _build_base_http_opts(config_section, opt_map): + configuration = CONF[config_section] + opt_map['tenant_name'] = configuration['admin_tenant_name'] + opt_map['username'] = configuration['admin_user'] + opt_map['password'] = configuration['admin_password'] + opt_map['cacert'] = configuration['connection_cacert'] + opt_map['insecure'] = configuration['http_insecure'] + if opt_map['insecure'] is False: + opt_map['auth_url'] = netutils.hostname_url(configuration['auth_url']) + else: + opt_map['auth_url'] = configuration['auth_url'] + return opt_map + + +# init client opts for powervc and openstack only once +if OS_OPTS is None: + OS_OPTS = _build_base_http_opts('openstack', {}) + #support mulitple region on local openstack + OS_OPTS['region_name'] = CONF['openstack']['region_name'] +if PVC_OPTS is None: + PVC_OPTS = _build_base_http_opts('powervc', {}) diff --git a/common-powervc/powervc/common/client/delegate.py b/common-powervc/powervc/common/client/delegate.py new file mode 100644 index 0000000..fabde65 --- /dev/null +++ b/common-powervc/powervc/common/client/delegate.py @@ -0,0 +1,83 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + + +def new_composite_deletgate(delegates): + """create and return a new class which delegates + calls to the delegates. the facade object returned + from this method allows you to extend functionality + of existing objects using containment rather than + inherritance. + + for example suppose you have obj1 which has method + x() and you have obj2 which has method y(). you can + create a single view of those objects like this: + + composite = new_composite_delegate([obj1, obj2]) + composite.x() # calls x() on obj1 + composite.y() # calls y() on obj2 + + :param delegates: a list of objects which make up the + delegates. when a method call or attr access is made + on the returned wrapper, the list of delegates will + be tried in order until an object is found with the + attr. + """ + + class CompositeDelegator(object): + def __init__(self, *args): + super(CompositeDelegator, self).__init__() + + def __getattribute__(self, name): + for instance in delegates: + if hasattr(instance, name): + attr = instance.__getattribute__(name) + if hasattr(attr, '__call__'): + def _f(*args, **kwargs): + return attr(*args, **kwargs) + return _f + else: + return attr + return None + + return CompositeDelegator() + + +def context_dynamic_auth_token(ctx, keystone): + """ + create a delegate specifically for security context + This is because security context need to access renew + auth_token for each request. But this property in context + is static. Delegate this auth_token property to keystone + dynamic property auth_token. + + Every context created for long live usage should wrap + this delegate to ensure it always uses the newest + auth_token for every REST request + """ + + class ContextDAT(ctx.__class__): + def __init__(self): + super(ctx.__class__, self).__init__() + + def __getattribute__(self, name): + if name != 'auth_token': + if hasattr(ctx, name): + attr = ctx.__getattribute__(name) + if hasattr(attr, '__call__'): + def _f(*args, **kwargs): + return attr(*args, **kwargs) + return _f + else: + return attr + else: + return keystone.auth_token + + return ContextDAT() diff --git a/common-powervc/powervc/common/client/extensions/__init__.py b/common-powervc/powervc/common/client/extensions/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/common-powervc/powervc/common/client/extensions/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/common-powervc/powervc/common/client/extensions/base.py b/common-powervc/powervc/common/client/extensions/base.py new file mode 100644 index 0000000..05dfe11 --- /dev/null +++ b/common-powervc/powervc/common/client/extensions/base.py @@ -0,0 +1,16 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + + +class ClientExtension(object): + """base class for all extensions. + """ + def __init__(self, client): + self.client = client diff --git a/common-powervc/powervc/common/client/extensions/cinder.py b/common-powervc/powervc/common/client/extensions/cinder.py new file mode 100644 index 0000000..d3f8890 --- /dev/null +++ b/common-powervc/powervc/common/client/extensions/cinder.py @@ -0,0 +1,200 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import six +import urllib + +try: + from urllib import urlencode +except ImportError: + from urllib.parse import urlencode + +from cinderclient import base as client_base +from cinderclient.v1 import volumes +from cinderclient.v1 import volume_types +from powervc.common.client.extensions import base +from powervc.common import utils + + +class Client(base.ClientExtension): + + def __init__(self, client): + super(Client, self).__init__(client) + # Initialize Storage Provider Manager + self.storage_providers = StorageProviderManager(client) + # Initialize PVC specified Volume Manager + self.volumes = PVCVolumeManager(client) + # Initialize PVC specified StorageTemplate Manager + self.volume_types = PVCStorageTemplateManager(client) + # any extensions to std cinder client go below + + +class StorageProvider(client_base.Resource): + """ + Entity class for StorageProvider + """ + def __repr__(self): + return ("" % + (self.id, self.storage_hostname)) + + +class StorageProviderManager(client_base.Manager): + """ + Manager class for StorageProvider + Currently get and list functions for StorageProvider + are implemented. + """ + resource_class = StorageProvider + + def get(self, spUUID): + """ + Get a StorageProvider. + + :param server: UUID `StorageProvider` to get. + :rtype: :class:`Server` + """ + return self._get("/storage-providers/%s" % spUUID, + "storage_provider") + + def list(self, detailed=True, search_opts=None, + scgUUID=None, + scgName=None): + """ + Get a list of the Storage Template that filtered by a specified + SCG UUID or SCG name, if both SCG UUID and SCG name are specified, + UUID has the high priority to check. + + :rtype: list of :class:`StorageProvider` + """ + # Get accessible volumes by SCG + if scgUUID or scgName: + return (utils.get_utils(). + get_scg_accessible_storage_providers( + scgUUID=scgUUID, scgName=scgName, + detailed=detailed, search_opts=search_opts) + ) + else: + return (utils.get_utils(). + get_multi_scg_accessible_storage_providers( + None, None, detailed=detailed, search_opts=search_opts) + ) + + def list_all_providers(self, detailed=True, search_opts=None): + """ + Get a list of StorageProvider. + Optional detailed returns details StorageProvider info. + + :rtype: list of :class:`StorageProvider` + """ + if search_opts is None: + search_opts = {} + + qparams = {} + + for opt, val in six.iteritems(search_opts): + if val: + qparams[opt] = val + + query_string = "?%s" % urllib.urlencode(qparams) if qparams else "" + + detail = "" + if detailed: + detail = "/detail" + return self._list("/storage-providers%s%s" % + (detail, query_string), + "storage_providers") + + +class PVCVolumeManager(volumes.VolumeManager): + """ + The PVC specified VolumeManager that got and list volumes + which filtered by Storage Connectivity Group + """ + def list(self, detailed=True, search_opts=None, + scgUUID=None, + scgName=None): + """ + Get a list of the volumes that filtered by a specified SCG UUID + or SCG name, if both SCG UUID and SCG name are specified, UUID has the + high priority to check. + + :rtype: list of :class:`Volume` + """ + # Get accessible volumes by SCG + if scgUUID or scgName: + return (utils.get_utils(). + get_scg_accessible_volumes(scgUUID=scgUUID, + scgName=scgName, + detailed=detailed, + search_opts=search_opts)) + else: + return (utils.get_utils(). + get_multi_scg_accessible_volumes(None, + None, + detailed=detailed, + search_opts=search_opts) + ) + + def list_all_volumes(self, detailed=True, search_opts=None): + """ + Get a list of all volumes. + + :rtype: list of :class:`Volume` + """ + if search_opts is None: + search_opts = {} + + qparams = {} + + for opt, val in six.iteritems(search_opts): + if val: + qparams[opt] = val + + query_string = "?%s" % urlencode(qparams) if qparams else "" + + detail = "" + if detailed: + detail = "/detail" + + return self._list("/volumes%s%s" % (detail, query_string), + "volumes") + + +class PVCStorageTemplateManager(volume_types.VolumeTypeManager): + """ + The PVC specified StorageTemplateManager that list Storage Templates + (VolumeType in OpenStack) which filtered by Storage Connectivity Group + """ + + def list(self, scgUUID=None, scgName=None): + """ + Get a list of the Storage Template that filtered by a specified + SCG UUID or SCG name, if both SCG UUID and SCG name are specified, + UUID has the high priority to check. + + :rtype: list of :class:`VolumeType` + """ + # Get accessible volumes by SCG + if scgUUID or scgName: + return (utils.get_utils(). + get_scg_accessible_storage_templates(scgUUID=scgUUID, + scgName=scgName)) + else: + return (utils.get_utils(). + get_multi_scg_accessible_storage_templates(None, + None)) + + def list_all_storage_templates(self): + """ + Get a list of all Storage Templates + + :rtype: list of :class:`VolumeType`. + """ + return self._list("/types", "volume_types") diff --git a/common-powervc/powervc/common/client/extensions/glance.py b/common-powervc/powervc/common/client/extensions/glance.py new file mode 100644 index 0000000..28f7348 --- /dev/null +++ b/common-powervc/powervc/common/client/extensions/glance.py @@ -0,0 +1,57 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import powervc.common.client.extensions.base as base + + +class Client(base.ClientExtension): + + def __init__(self, client): + super(Client, self).__init__(client) + +###################Images functions########################## + def listImages(self): + return [image for image in self.client.images.list()] + + def getImage(self, image_id): + return self.client.images.get(image_id) + + def getImageFile(self, image_id): + return self.client.images.data(image_id) + + def deleteImage(self, image_id): + return self.client.images.delete(image_id) + + def updateImage(self, image_id, **kwargs): + return self.client.images.update(image_id, **kwargs) + +##################Image member functions####################### + def listImageMembers(self, image_id): + return [imageMember for imageMember in + self.client.image_members.list(image_id)] + + def deleteImageMember(self, image_id, member_id): + self.client.image_members.delete(image_id, member_id) + + def updateImageMember(self, image_id, member_id, member_status): + return self.client.image_members.update(image_id, member_id, + member_status) + + def createImageMember(self, image_id, member_id): + return self.client.image_members.create(image_id, member_id) + +##################Image tag functions (v2 only)################ + def updateImageTag(self, image_id, tag_value): + if self.client_version == 2: + return self.client.image_tags.update(image_id, tag_value) + + def deleteImageTag(self, image_id, tag_value): + if self.client_version == 2: + return self.client.image_tags.delete(image_id, tag_value) diff --git a/common-powervc/powervc/common/client/extensions/nova.py b/common-powervc/powervc/common/client/extensions/nova.py new file mode 100644 index 0000000..26c088d --- /dev/null +++ b/common-powervc/powervc/common/client/extensions/nova.py @@ -0,0 +1,489 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import six +import urllib +import base64 +from novaclient import base as client_base +from novaclient.v1_1 import servers +from novaclient.v1_1 import hypervisors +from novaclient.v1_1 import images +from novaclient.v1_1 import flavors +from novaclient.v1_1 import volumes +from novaclient.v1_1.volume_types import VolumeType +from novaclient.openstack.common import strutils +from powervc.common.client.extensions import base +from powervc.common import utils +import logging + +LOG = logging.getLogger(__name__) + + +class Client(base.ClientExtension): + + def __init__(self, client): + super(Client, self).__init__(client) + self.manager = PVCServerManager(client) + self.servers = servers + self.hypervisors = hypervisors.HypervisorManager(client) + self.images = images.ImageManager(client) + self.flavors = flavors.FlavorManager(client) + self.storage_connectivity_groups = \ + StorageConnectivityGroupManager(client) + self.volumes = volumes.VolumeManager(client) + self.scg_images = SCGImageManager(client) + # any extensions to std nova client go below + + +class PVCServerManager(servers.ServerManager): + """ + This ServerManager class is specific for PowerVC booting a VM. + As the PowerVC boot API does not follow the standard openstack boot API, + need to rewrite the default boot method to satisfy powerVC boot restAPI + content. + """ + + def list(self, detailed=True, search_opts=None, + scgUUID=None, + scgName=None): + """ + Get a list of the Servers that filtered by a specified SCG UUID + or SCG name, if both SCG UUID and SCG name are specified, UUID has the + high priority to check. + + :rtype: list of :class:`Server` + """ + if scgUUID or scgName: + return utils.get_utils().get_scg_accessible_servers(scgUUID, + scgName, + detailed, + search_opts + ) + else: + # This will get all scgs accessible servers + return utils.get_utils().\ + get_multi_scg_accessible_servers(None, + None, + detailed, + search_opts + ) + + def list_all_servers(self, detailed=True, search_opts=None): + """ + Get a list of all servers without filters. + Optional detailed returns details server info. + Optional reservation_id only returns instances with that + reservation_id. + + :rtype: list of :class:`Server` + """ + if search_opts is None: + search_opts = {} + + qparams = {} + + for opt, val in six.iteritems(search_opts): + if val: + qparams[opt] = val + + query_string = "?%s" % urllib.urlencode(qparams) if qparams else "" + + detail = "" + if detailed: + detail = "/detail" + return self._list("/servers%s%s" % (detail, query_string), "servers") + + + # This function was copied from (/usr/lib/python2.6/site-packages/ + # novaclient/v1_1/servers.py) before, but changes needed when activation + # data contains userdata and files, because in a boot action, local OS + # novaclient's _boot will read them from CLI or GUI firstly, then when our + # driver is triggered, this version of _boot should just forward the data + # or file content to PowerVC without any reading, otherwise error happens. + # RTC/172018, add support to boot server with activation data. + def _boot(self, resource_url, response_key, name, image, flavor, + meta=None, files=None, userdata=None, reservation_id=None, + return_raw=False, min_count=None, max_count=None, + security_groups=None, key_name=None, availability_zone=None, + block_device_mapping=None, nics=None, scheduler_hints=None, + config_drive=None, admin_pass=None, **kwargs): + """Create (boot) a new server. + + :param name: Server Name. + :param image: The string of PowerVC `Image` UUID to boot with. + :param flavor: The :dict of `Flavor` that need to boot onto. + :param meta: A dict of arbitrary key/value metadata to store for this + server. A maximum of five entries is allowed, and both + keys and values must be 255 characters or less. + :param files: A dict of files to overrwrite on the server upon boot. + Keys are file names (i.e. ``/etc/passwd``) and values + are the file contents (either as a string or as a + file-like object). A maximum of five entries is allowed, + and each file must be 10k or less. + :param userdata: user data to pass to make config drive this can be a + file type object as well or a string. PowerVC don't use + metadata server for security considerations. + :param reservation_id: a UUID for the set of servers being requested. + :param return_raw: If True, don't try to coearse the result into + a Resource object. + :param security_groups: list of security group names + :param key_name: (optional extension) name of keypair to inject into + the instance + :param availability_zone: Name of the availability zone for instance + placement. + :param block_device_mapping: A dict of block device mappings for this + server. + :param nics: (optional extension) an ordered list of nics to be + added to this server, with information about + connected networks, fixed ips, etc. + :param scheduler_hints: (optional extension) arbitrary key-value pairs + specified by the client to help boot an instance. + :param config_drive: (optional extension) value for config drive + either boolean, or volume-id + :param admin_pass: admin password for the server. + """ + body = {"server": { + "name": name, + "imageRef": image, + "flavor": {}, + }} + + # Add the flavor information to PowerVC for booting VM + body["server"]["flavor"]['ram'] = flavor['memory_mb'] + body["server"]["flavor"]['vcpus'] = flavor['vcpus'] + body["server"]["flavor"]['disk'] = flavor['root_gb'] + body["server"]["flavor"]['OS-FLV-EXT-DATA:ephemeral'] = \ + flavor.get('OS-FLV-EXT-DATA:ephemeral', 0) + body["server"]["flavor"]['extra_specs'] = flavor['extra_specs'] + + # If hypervisor ID specified: + if kwargs.get("hypervisor", None): + body["server"]['hypervisor_hostname'] = kwargs["hypervisor"] + + if userdata: + # RTC/172018 -- start + # comment out the following, already done by local OS nova client + # if hasattr(userdata, 'read'): + # userdata = userdata.read() + + # userdata = strutils.safe_encode(userdata) + # body["server"]["user_data"] = base64.b64encode(userdata) + body["server"]["user_data"] = userdata + # RTC/172018 -- end + if meta: + body["server"]["metadata"] = meta + if reservation_id: + body["server"]["reservation_id"] = reservation_id + if key_name: + body["server"]["key_name"] = key_name + if scheduler_hints: + body['os:scheduler_hints'] = scheduler_hints + if config_drive: + body["server"]["config_drive"] = config_drive + if admin_pass: + body["server"]["adminPass"] = admin_pass + if not min_count: + min_count = 1 + if not max_count: + max_count = min_count + body["server"]["min_count"] = min_count + body["server"]["max_count"] = max_count + + if security_groups: + body["server"]["security_groups"] = ([{'name': sg} + for sg in security_groups]) + + # Files are a slight bit tricky. They're passed in a "personality" + # list to the POST. Each item is a dict giving a file name and the + # base64-encoded contents of the file. We want to allow passing + # either an open file *or* some contents as files here. + + if files: + personality = body['server']['personality'] = [] + # RTC/172018 -- start + # comment out the following, already done by local OS nova client + #for filepath, file_or_string in files.items(): + # if hasattr(file_or_string, 'read'): + # data = file_or_string.read() + # else: + # data = file_or_string + + for file in files: + personality.append({ + 'path': file[0], + 'contents': file[1].encode('base64'), + }) + # RTC/172018 -- end + + if availability_zone: + body["server"]["availability_zone"] = availability_zone + + # Block device mappings are passed as a list of dictionaries + if block_device_mapping: + bdm = body['server']['block_device_mapping'] = [] + for device_name, mapping in block_device_mapping.items(): + # + # The mapping is in the format: + # :[]:[]:[] + # + bdm_dict = {'device_name': device_name} + + mapping_parts = mapping.split(':') + id_ = mapping_parts[0] + if len(mapping_parts) == 1: + bdm_dict['volume_id'] = id_ + if len(mapping_parts) > 1: + type_ = mapping_parts[1] + if type_.startswith('snap'): + bdm_dict['snapshot_id'] = id_ + else: + bdm_dict['volume_id'] = id_ + if len(mapping_parts) > 2: + bdm_dict['volume_size'] = mapping_parts[2] + if len(mapping_parts) > 3: + bdm_dict['delete_on_termination'] = mapping_parts[3] + bdm.append(bdm_dict) + + if nics is not None: + # NOTE(tr3buchet): nics can be an empty list + all_net_data = [] + for nic_info in nics: + net_data = {} + # if value is empty string, do not send value in body + if nic_info.get('net-id'): + net_data['uuid'] = nic_info['net-id'] + if nic_info.get('v4-fixed-ip'): + net_data['fixed_ip'] = nic_info['v4-fixed-ip'] + if nic_info.get('port-id'): + net_data['port'] = nic_info['port-id'] + all_net_data.append(net_data) + body['server']['networks'] = all_net_data + + return self._create(resource_url, body, response_key, + return_raw=return_raw, **kwargs) + + def _resize_pvc(self, server, info, **kwargs): + """ + This method is used to overwrite the resize in the + class ServerManager + """ + return self._action('resize', server, info=info, **kwargs) + + def list_instance_storage_viable_hosts(self, server): + """ + Get a list of hosts compatible with this server. + Used for getting candidate host hypervisors from powervc for + live migration. We need to do things a bit different + since there not a common schema apperently for the content + returned. See below.. + + { + "8233E8B_100008P":{ + "host":"8233E8B_100008P" + }, + "8233E8B_100043P":{ + "host":"8233E8B_100043P" + } + } + + :param server: ID of the :class:`Server` to get. + :rtype: dict + """ + url = "/storage-viable-hosts?instance_uuid=%s"\ + % (client_base.getid(server)) + + _resp, body = self.api.client.get(url) + return body + + +class StorageConnectivityGroup(client_base.Resource): + """ + Entity class for StorageConnectivityGroup + """ + def __repr__(self): + return ("" % + (self.id, self.display_name)) + + def list_all_volumes(self): + """ + Get a list of accessible volume for this SCG. + + :rtype: list of :class:`Volume` + """ + return self.manager.list_all_volumes(self.id) + + def list_all_volume_types(self): + """ + Get a list of accessible volume types for this SCG. + + :rtype: list of :class:`VolumeType` + """ + return self.manager.list_all_volume_types(self.id) + + +class StorageConnectivityGroupManager(client_base.Manager): + """ + Manager class for StorageConnectivityGroup + Currently get and list functions for StorageConnectivityGroup + are implemented. + """ + resource_class = StorageConnectivityGroup + + def get(self, scgUUID): + """ + Get a StorageConnectivityGroup. + + :param server: UUID `StorageConnectivityGroup` to get. + :rtype: :class:`Server` + """ + try: + return self._get("/storage-connectivity-groups/%s" % scgUUID, + "storage_connectivity_group") + except Exception as e: + # If PowerVC Express installations in IVM mode + # would receive BadRequest + LOG.error('A problem was encountered while getting the ' + ' Storage Connectivity Group %s: %s ' + % (scgUUID, str(e))) + return None + + def list_for_image(self, imageUUID): + """ + Get a list of StorageConnectivityGroups for the specified image. If + an error occurs getting the SCGs for an image, an exception is logged + and raised. + + :param: imageUUID The image UUID: + :rtype: list of :class:`StorageConnectivityGroup` + """ + try: + return self._list("/images/%s/storage-connectivity-groups" % + imageUUID, "storage_connectivity_groups") + except Exception as e: + LOG.error('A problem was encountered while getting a list of ' + 'Storage Connectivity Groups for image %s: %s ' + % (imageUUID, str(e))) + raise e + + def list_all_volumes(self, scgUUID): + """ + Get a list of accessible volume for this SCG. + + :rtype: list of :class:`Volume` + """ + try: + return self._list("/storage-connectivity-groups/%s/volumes" + % scgUUID, "volumes", volumes.Volume) + except Exception as e: + LOG.error('A problem was encountered while getting a list of ' + 'accessible volumes for scg %s: %s ' + % (scgUUID, str(e))) + raise e + + def list_all_volume_types(self, scgUUID): + """ + Get a list of accessible volume types for this SCG. + + :rtype: list of :class:`VolumeType` + """ + try: + return self._list("/storage-connectivity-groups/%s/volume-types" + % scgUUID, "volume-types", VolumeType) + except Exception as e: + LOG.error('A problem was encountered while getting a list of ' + 'accessible volume types for scg %s: %s ' + % (scgUUID, str(e))) + raise e + + def list(self, detailed=True, search_opts=None): + """ + Get a list of StorageConnectivityGroups. + Optional detailed returns details StorageConnectivityGroup info. + + :rtype: list of :class:`StorageConnectivityGroup` + """ + if search_opts is None: + search_opts = {} + + qparams = {} + + for opt, val in six.iteritems(search_opts): + if val: + qparams[opt] = val + + query_string = "?%s" % urllib.urlencode(qparams) if qparams else "" + + detail = "" + if detailed: + detail = "/detail" + try: + return self._list("/storage-connectivity-groups%s%s" % + (detail, query_string), + "storage_connectivity_groups") + except Exception as e: + # If PowerVC Express installations in IVM mode + # would receive BadRequest + LOG.error('A problem was encountered while getting a list' + ' of Storage Connectivity Groups: %s ' + % str(e)) + return [] + + +class SCGImage(client_base.Resource): + """ + Entity class for SCGImage + """ + def __repr__(self): + return ("" % + (self.id, self.name)) + + +class SCGImageManager(client_base.Manager): + """ + Manager class for SCGImage + Currently the list function for SCGImages in a StorageConnectivityGroup, + and the image identifiers of SCGImages in a StorageConnectivityGroup is + implemented. + """ + resource_class = SCGImage + + def list(self, scgUUID): + """ + Get a list of SCGImages for the specified StorageConnectivityGroup. If + an error occurs getting the SCGImages, and exception is logged and + raised. + + :param: scgUUID The StorageConnectivityGroup UUID: + :rtype: list of :class:`SCGImage` + """ + try: + return self._list("/storage-connectivity-groups/%s/images" % + scgUUID, "images") + except Exception as e: + LOG.error('A problem was encountered while getting a list of ' + 'images for Storage Connectivity Group \'%s\': %s ' + % (scgUUID, str(e))) + raise e + + def list_ids(self, scgUUID): + """ + Get a list of SCGImage identifiers for the specified + StorageConnectivityGroup. If an error occurs getting the SCGImage ids, + and exception is logged and raised. + + :param: scgUUID The StorageConnectivityGroup UUID: + :rtype: list of :class:`SCGImage` identifiers + """ + ids = [] + SCGImages = self.list(scgUUID) + if SCGImages: + for image in SCGImages: + ids.append(image.id) + return ids diff --git a/common-powervc/powervc/common/client/factory.py b/common-powervc/powervc/common/client/factory.py new file mode 100644 index 0000000..0821f7e --- /dev/null +++ b/common-powervc/powervc/common/client/factory.py @@ -0,0 +1,64 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import powervc.common.client.service as service +from powervc.common.client.config import CONF as CONF +from powervc.common.client.config import OS_OPTS as OS_OPTS +from powervc.common.client.config import PVC_OPTS as PVC_OPTS +from powervc.common.constants import SERVICE_TYPES as SERVICE_TYPES + +"""sample useage + +New PowerVC v1 glance client: + + pvc_glance_v1 = factory.POWERVC.get_client( + str(constants.SERVICE_TYPES.image), 'v1') + +New PowerVC glance client for latest known version: + + pvc_lastest_glance = factory.POWERVC.get_client( + str(constants.SERVICE_TYPES.image)) + +New PowerVC cinder client of latest version: + + pvc_cinder_versions = factory.POWERVC.get_versions( + str(constants.SERVICE_TYPES.volume)) + +List the services types on the local openstack host: + + known_lcl_service_types = factory.LOCAL.get_service_types() + +Get a reference to keystone client for PowerVC: + + pvc_keystone = factory.POWERVC.keystone + +""" + +# global access to local openstack and powervc services +LOCAL = None +POWERVC = None + + +if LOCAL is None: + keystone = service.KeystoneService(str(SERVICE_TYPES.identity), + CONF['openstack']['keystone_version'], + OS_OPTS['auth_url'], OS_OPTS, + None).new_client() + LOCAL = service.ClientServiceCatalog(OS_OPTS, keystone) + +if POWERVC is None: + keystone_opts = PVC_OPTS.copy() + keystone_opts['stale_duration']\ + = CONF['powervc']['expiration_stale_duration'] + keystone = service.KeystoneService(str(SERVICE_TYPES.identity), + CONF['powervc']['keystone_version'], + PVC_OPTS['auth_url'], keystone_opts, + None).new_client() + POWERVC = service.ClientServiceCatalog(PVC_OPTS, keystone) diff --git a/common-powervc/powervc/common/client/patch/__init__.py b/common-powervc/powervc/common/client/patch/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/common-powervc/powervc/common/client/patch/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/common-powervc/powervc/common/client/patch/cinder.py b/common-powervc/powervc/common/client/patch/cinder.py new file mode 100644 index 0000000..b9affdd --- /dev/null +++ b/common-powervc/powervc/common/client/patch/cinder.py @@ -0,0 +1,27 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + + +def patch_client(service_wrapper, client): + org = client.client._cs_request + + """patch the _cs_request method of cinder client and inject + a keystone managed token and management url. this allows us + to ensure a valid token is maintained an also support keystone + v3 apis. + """ + def _authd_cs_request(url, method, **kwargs): + # patch cinders HTTPClient to use our keystone for tokens + # and support for non standard URLs + client.client.auth_token = service_wrapper.keystone.auth_token + client.client.management_url = service_wrapper.management_url + return org(url, method, **kwargs) + + client.client._cs_request = _authd_cs_request diff --git a/common-powervc/powervc/common/client/patch/glance.py b/common-powervc/powervc/common/client/patch/glance.py new file mode 100644 index 0000000..f177e96 --- /dev/null +++ b/common-powervc/powervc/common/client/patch/glance.py @@ -0,0 +1,117 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import warlock + + +def patch_client(service_wrapper, client): + + http_client = client + if hasattr(client, 'http_client'): + http_client = client.http_client + + org_http_request = http_client._http_request + + """ + Patch the _http_request method of glance client and inject + a keystone managed token and management url. this allows us + to ensure a valid token is maintained an also support keystone + v3 apis. + """ + def _patched_http_request(url, method, **kwargs): + # patch glance HTTPClient to use our keystone for tokens + # and support for non standard URLs + if http_client.endpoint_path and\ + not http_client.endpoint_path.endswith('/'): + http_client.endpoint_path += '/' + http_client.auth_token = service_wrapper.keystone.auth_token + if url.startswith('/'): + url = url[1:] + return org_http_request(url, method, **kwargs) + + http_client._http_request = _patched_http_request + + def _patched_raw_request(method, url, **kwargs): + ''' + Patch the http raw_request method to fix a problem. If there is no + image data set the content-type in the headers to application/json. + Failure to do so can lead to errors during image updates and creates. + ''' + kwargs.setdefault('headers', {}) + if 'body' in kwargs: + if kwargs['body'] is None: + kwargs['headers'].setdefault('Content-Type', + 'application/json') + else: + kwargs['headers'].setdefault('Content-Type', + 'application/octet-stream') + if (hasattr(kwargs['body'], 'read') + and method.lower() in ('post', 'put')): + # We use 'Transfer-Encoding: chunked' because + # body size may not always be known in advance. + kwargs['headers']['Transfer-Encoding'] = 'chunked' + else: + kwargs['headers'].setdefault('Content-Type', + 'application/json') + return _patched_http_request(url, method, **kwargs) + + http_client.raw_request = _patched_raw_request + + """ + Patch v2 glanceclient controller for update image + """ + ver = str(client).split('.')[1] + if ver != 'v2': + # if not v2 client, nothing else to do + return + + org_image_controller = client.images + + def _patched_image_update(image_id, remove_props=None, **kwargs): + """ + Update attributes of an image. + + This is patched to fix an issue. The Content-Type should reflect v2.1 + since that is the version of the patch schema that is used. + + :param image_id: ID of the image to modify. + :param remove_props: List of property names to remove + :param **kwargs: Image attribute names and their new values. + """ + image = org_image_controller.get(image_id) + for (key, value) in kwargs.items(): + try: + setattr(image, key, value) + except warlock.InvalidOperation as e: + raise TypeError(unicode(e)) + + if remove_props is not None: + cur_props = image.keys() + new_props = kwargs.keys() + #NOTE(esheffield): Only remove props that currently exist on the + # image and are NOT in the properties being updated / added + props_to_remove = set(cur_props).intersection( + set(remove_props).difference(new_props)) + + for key in props_to_remove: + delattr(image, key) + + url = '/v2/images/%s' % image_id + hdrs = {'Content-Type': 'application/openstack-images-v2.1-json-patch'} + http_client.raw_request('PATCH', url, + headers=hdrs, + body=image.patch) + + #NOTE(bcwaldon): calling image.patch doesn't clear the changes, so + # we need to fetch the image again to get a clean history. This is + # an obvious optimization for warlock + return org_image_controller.get(image_id) + + org_image_controller.update = _patched_image_update diff --git a/common-powervc/powervc/common/client/patch/neutron.py b/common-powervc/powervc/common/client/patch/neutron.py new file mode 100644 index 0000000..aa72b52 --- /dev/null +++ b/common-powervc/powervc/common/client/patch/neutron.py @@ -0,0 +1,25 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + + +def patch_client(service_wrapper, client): + org_auth_and_fetch = client.httpclient.authenticate_and_fetch_endpoint_url + + """patch the authenticate_and_fetch_endpoint_url method to inject + our own managed keystone token and endpoint + """ + def _patched_auth_and_fetch(): + # inject our keystone managed token + client.httpclient.auth_token = service_wrapper.keystone.auth_token + client.httpclient.endpoint_url = service_wrapper.management_url + return org_auth_and_fetch() + + client.httpclient.authenticate_and_fetch_endpoint_url = \ + _patched_auth_and_fetch diff --git a/common-powervc/powervc/common/client/patch/nova.py b/common-powervc/powervc/common/client/patch/nova.py new file mode 100644 index 0000000..b9e06fe --- /dev/null +++ b/common-powervc/powervc/common/client/patch/nova.py @@ -0,0 +1,24 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + + +def patch_client(service_wrapper, client): + + """ wrapper the _cs_request call in an authenticated version + of it so we can reuse our keystone connection + """ + org_cs_request = client.client._cs_request + + def _authd_cs_request(url, method, **kwargs): + client.client.auth_token = service_wrapper.keystone.auth_token + client.client.management_url = service_wrapper.management_url + return org_cs_request(url, method, **kwargs) + + client.client._cs_request = _authd_cs_request diff --git a/common-powervc/powervc/common/client/service.py b/common-powervc/powervc/common/client/service.py new file mode 100644 index 0000000..2392023 --- /dev/null +++ b/common-powervc/powervc/common/client/service.py @@ -0,0 +1,422 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import urlparse +import re +import powervc.common.client.delegate as delegate +from glanceclient.openstack.common import importutils +from powervc.common.constants import SERVICE_TYPES as SERVICE_TYPES +from powervc.common import netutils + + +class AbstractService(object): + """a stub over a service endpoint which permits consumers + to create openstack python clients directly from this object. + """ + def __init__(self, svc_type, version, url, base_args, keystone): + self.svc_type = svc_type + self.version = version + self.url = url + self.base_args = base_args.copy() + self.keystone = keystone + self.base_name = SERVICE_TYPES[svc_type].to_codename() + self.client_version = version = version.replace('.', '_') + self.clazz = self._lookup_client() + self.extension = self._lookup_extension() + self.management_url = url + + def _extend(self, client, client_extension=None, *extension_args): + if self.extension is None and client_extension is None: + return client + delegates = [] + if client_extension is not None: + delegates.append(client_extension(client, *extension_args)) + if self.extension is not None: + delegates.append(self.extension(client, *extension_args)) + delegates.append(client) + # extend the base client using a mixin type delegate + return delegate.new_composite_deletgate(delegates) + + def _patch(self, client): + try: + # if applicable patch the client + module = (importutils. + import_module("powervc.common.client.patch.%s" % + (self.base_name))) + module.patch_client(self, client) + except ImportError: + pass + return client + + def _lookup_client(self): + return importutils.import_class("%sclient.%s.client.Client" % + (self.base_name, + self.get_client_version())) + + def _lookup_extension(self): + try: + return (importutils. + import_class("powervc.common.client.extensions.%s.Client" % + (self.base_name))) + except ImportError: + return None + return None + + def _chomp_version(self, version): + match = re.search('(v[0-9])[_]*[0-9]*', version, re.IGNORECASE) + if match: + version = match.group(1) + return version + + def _init_std_client(self): + return self._patch(self.clazz(self.base_args['username'], + self.base_args['password'], + self.base_args['tenant_name'], + self.base_args['auth_url'], + self.base_args['insecure'], + cacert=self.base_args['cacert'])) + + def new_client(self, client_extension=None, *extension_args): + """build and return a new python client for this service + + :param client_extension: the optional subclass of + powervc.common.client.extensions.base to extend the python client with. + :param extension_args: optional arguments to pass to the client + extension when it is created. + """ + return self._extend(self._init_std_client(), client_extension, + *extension_args) + + def get_client_version(self): + """returns the version of the client for this service + """ + return self.client_version + + +class KeystoneService(AbstractService): + """wrappers keystone service endpoint + """ + def __init__(self, *kargs): + super(KeystoneService, self).__init__(*kargs) + + def new_client(self, client_extension=None, *extension_args): + return self._extend(self.clazz(**self.base_args), client_extension, + *extension_args) + + def get_client_version(self): + if self.client_version == 'v3_0': + return 'v3' + return self.client_version + + +class CinderService(AbstractService): + """wrappers cinder service endpoint + """ + def __init__(self, *kargs): + super(CinderService, self).__init__(*kargs) + + def get_client_version(self): + return self._chomp_version(self.client_version) + + +class NovaService(AbstractService): + """wrappers nova service endpoint + """ + def __init__(self, *kargs): + super(NovaService, self).__init__(*kargs) + + def get_client_version(self): + if re.search('v2', self.client_version) is not None: + return 'v1_1' + return self.client_version + + +class GlanceService(AbstractService): + """wrappers glance service endpoint + """ + def __init__(self, *kargs): + super(GlanceService, self).__init__(*kargs) + + def new_client(self, client_extension=None, *extension_args): + url = self.url + if not url.endswith('/'): + url += '/' + return (self. + _extend(self. + _patch(self.clazz(url, token=self.keystone.auth_token, + insecure=self.base_args['insecure'], + cacert=self.base_args['cacert'])), + client_extension, *extension_args)) + + def get_client_version(self): + return self._chomp_version(self.client_version) + + +class NeutronService(AbstractService): + """wrappers neutron service endpoint + """ + def __init__(self, *kargs): + super(NeutronService, self).__init__(*kargs) + + def new_client(self, client_extension=None, *extension_args): + return self._extend(self._patch(self.clazz( + username=self.base_args['username'], + tenant_name=self.base_args['tenant_name'], + password=self.base_args['password'], + auth_url=self.base_args['auth_url'], + endpoint_url=self.management_url, + insecure=self.base_args['insecure'], + token=self.keystone.auth_token, + ca_cert=self.base_args['cacert'])), + client_extension, *extension_args) + + def get_client_version(self): + if self.client_version.startswith('v1'): + return 'v2_0' + return self.client_version + + +class ClientServiceCatalog(object): + """provides a simple catalog of openstack services + for a single host and permits consumers to query + those services based on service types, versions + as well as create new python clients from the service + directly. + """ + def __init__(self, base_client_opts, keystone): + self.base_opts = base_client_opts + self.keystone = keystone + + # validate authN + self.token = self.keystone.auth_token + + self.host = urlparse.urlsplit(self.base_opts['auth_url']).hostname + self.endpoints = {} + self.blacklist = [str(SERVICE_TYPES.s3), str(SERVICE_TYPES.ec2), + str(SERVICE_TYPES.ttv)] + + self._discover_services() + + def new_client(self, svc_type, client_extension=None, *extension_args): + """creates a new python client for the given service type + using the most recent version of the service in the catalog. + + :param svc_type: the service type to create a client for + :param client_extension: the optional extension to decorate + the base client with + :param extension_args: optional arguments to pass to the client + extension when it is created. + """ + service_versions = self.get_services(svc_type) + if service_versions: + return service_versions[0].new_client(client_extension, + *extension_args) + return None + + def get_services(self, svc_type, version_filter=None): + """queries this catalogs services based on service type + and version filter. + + :param svc_type: the type of service to query. + :param version_filter: a filter string to indicate the + service version the caller wants. if None the highest + version of the service is returned. + """ + if svc_type not in self.endpoints: + return None + versions = self.endpoints[svc_type] + if version_filter is None: + return versions[max(versions, key=str)] + for version in versions.keys(): + if version.find(version_filter) > -1: + return versions[version] + return None + + def get_versions(self, svc_type): + """return a list of the versions for the given service type + + :param svc_type: the type of service to query + """ + if svc_type not in self.endpoints: + return None + return self.endpoints[svc_type].keys() + + def get_version(self, svc_type, version_filter=None): + """query a service to determine if a given version exists. + + :param svc_type: the service type to query. + :param version_filter: a string to search for in the version. + if None the most recent version of the service type is returned. + """ + if svc_type not in self.endpoints: + return None + for version in self.endpoints[svc_type].keys(): + if not version_filter or version.find(version_filter) > -1: + return version + return None + + def get_service_types(self): + """returns a list of all service types in this catalog. + """ + return self.endpoints.keys() + + def get_token(self): + """returns a keystone token for the host this catalog + belongs to. + """ + return self.keystone.auth_token + + def get_client(self, svc_type, version_filter=None, client_extension=None, + *extension_args): + """creates a new python cient for the given service type + and version. + + :param svc_type: the service type to create a client for. + :param version_filter: a string to search for in the version + the caller wants. if None the most recent version is used. + :param client_extension: the optional class to extend + the client with + """ + services = self.get_services(svc_type, version_filter) + if not services: + return None + return services[0].new_client(client_extension, *extension_args) + + def _parse_link_href(self, links): + hrefs = [] + for link_meta in links: + if link_meta['rel'] == 'self': + href = self._filter_host(link_meta['href']) + hrefs.append(href) + return hrefs + + def _filter_host(self, loc): + # endpoint urls from base api query will often + # return localhost in the url; resolve those + return loc.replace('localhost', + self.host).replace('127.0.0.1', + self.host).replace('0.0.0.0', + self.host) + + def _parse_version_meta(self, ver, ver_map={}): + ver_map[ver['id']] = self._parse_link_href(ver['links']) + return ver_map + + def _parse_version(self, response_json, url): + if response_json is not None: + if 'version' in response_json: + return {response_json['version']['id']: + [self._filter_host(url)]} + elif 'versions' in response_json: + services = {} + versions = response_json['versions'] + if 'values' in versions: + versions = versions['values'] + for version_meta in versions: + if 'status' in version_meta and \ + version_meta['status'] == 'CURRENT': + ver = version_meta['id'] + if not ver in services: + services[ver] = [] + services[ver].append(self._filter_host(url)) + return services + return None + + def _parse_version_from_url(self, url): + for seg in reversed(url.split('/')): + match = re.search('^(v[0-9][.]?[0-9]?$)', seg, re.IGNORECASE) + if match: + return match.group(0) + return None + + def _build_wrappered_services(self, version_map, svc_type): + services = {} + for version in version_map.keys(): + wrappers = [] + for s_url in version_map[version]: + if svc_type == (str(SERVICE_TYPES.compute) or + svc_type == str(SERVICE_TYPES.computev3)): + wrappers.append(NovaService(svc_type, version, + s_url, self.base_opts, + self.keystone)) + elif svc_type == str(SERVICE_TYPES.image): + wrappers.append(GlanceService(svc_type, version, + s_url, self.base_opts, + self.keystone)) + elif svc_type == str(SERVICE_TYPES.identity): + # keystone is a special case as the auth url given + # in the base opts may not match the auth url from + # the catalog + keystone_opts = self.base_opts.copy() + keystone_opts['auth_url'] = s_url + wrappers.append(KeystoneService(svc_type, version, + s_url, keystone_opts, + self.keystone)) + elif svc_type == str(SERVICE_TYPES.volume): + wrappers.append(CinderService(svc_type, version, + s_url, self.base_opts, + self.keystone)) + elif svc_type == str(SERVICE_TYPES.network): + wrappers.append(NeutronService(svc_type, version, + s_url, self.base_opts, + self.keystone)) + services[version] = wrappers + return services + + def _query_endpoint(self, url): + # query the endpoint to get version info + client = netutils.JSONRESTClient(self.get_token()) + urldata = urlparse.urlsplit(url) + host = urldata.scheme + '://' + urldata.netloc + segments = filter(lambda x: x != '', urldata.path.split('/')) + if not segments: + segments = [''] + # chomp uri until we find base of endpoint + for segment in segments[:] or ['']: + endpoint_url = "%s/%s/" % (host, '/'.join(segments)) + segments.pop() + response = None + try: + response = client.get(endpoint_url) + except: + continue + versions = self._parse_version(response, url) + if versions is not None: + return versions + return {'v1': [url]} + + def _build_endpoint_services(self, url, svc_type): + # try to parse from the url + ver = self._parse_version_from_url(url) + if ver is not None: + return self._build_wrappered_services({ver: [url]}, svc_type) + versions = self._query_endpoint(url) + return self._build_wrappered_services(versions, svc_type) + + def _normalize_catalog_entry(self, entry): + for key in entry.keys(): + if re.search('url', key, re.IGNORECASE): + entry[key] = self._filter_host(entry[key]) + if self.keystone.version == 'v2.0': + # keystone v2.0 entries differ from v3; normalize + entry['url'] = entry['publicURL'] + return entry + + def _discover_services(self): + public_eps = (self.keystone. + service_catalog.get_endpoints(endpoint_type='publicURL')) + self.endpoints = {} + for svc_type in public_eps.keys(): + if svc_type in self.blacklist: + continue + for entry in public_eps[svc_type]: + entry = self._normalize_catalog_entry(entry) + self.endpoints[svc_type] = \ + self._build_endpoint_services(entry['url'], svc_type) diff --git a/common-powervc/powervc/common/config.py b/common-powervc/powervc/common/config.py new file mode 100644 index 0000000..6da10d5 --- /dev/null +++ b/common-powervc/powervc/common/config.py @@ -0,0 +1,107 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +"""Config file utility + +""" +import constants + +from oslo.config import cfg +CONF = cfg.CONF + + +def parse_power_config(argv, base_project, base_prog=None): + """ + Loads configuration information from powervc.conf as well as a project + specific file. Expectation is that all powervc config options will be in + the common powervc.conf file and the base_project will represent open stack + component configuration like nova.conf or cinder.conf. A base_prog file + name can be optionally specified as well. That is a specific file name to + use from the specified open stack component. This function should only be + called once, in the startup path of a component (probably as soon as + possible since many modules will have a dependency on the config options). + """ + # Ensure that we only try to load the config once. Loading it a second + # time will result in errors. + if hasattr(parse_power_config, 'power_config_loaded'): + return + + if base_project and base_project.startswith('powervc-'): + default_files = cfg.find_config_files(project='powervc', + prog=base_project) + else: + default_files = cfg.find_config_files(project=base_project, + prog=(base_project + if base_prog is None + else base_prog)) + default_files.extend(cfg.find_config_files(project='powervc', + prog='powervc')) + # reduce duplicates + default_files = list(set(default_files)) + CONF(argv[1:], default_config_files=default_files) + parse_power_config.power_config_loaded = True + +FILE_OPTIONS = { + '': [], + 'openstack': [ + # Keystone info + cfg.StrOpt('auth_url', default='http://localhost:5000/v2.0/'), + cfg.StrOpt('admin_user'), + cfg.StrOpt('admin_password', secret=True), + cfg.StrOpt('admin_tenant_name'), + cfg.StrOpt('connection_cacert', default=None), + cfg.BoolOpt('http_insecure', default=False), + cfg.StrOpt('keystone_version', default="v3"), + cfg.StrOpt('region_name', default=None), + # Hosting OS Qpid connection info + cfg.StrOpt('qpid_hostname'), + cfg.IntOpt('qpid_port', default=5672), + cfg.StrOpt('qpid_username', default='anonymous'), + cfg.StrOpt('qpid_password', secret=True, default=''), + cfg.StrOpt('qpid_protocol', default='tcp')], + 'powervc': [ + # Keystone info + cfg.StrOpt('auth_url', default='http://localhost:5000/v2.0/'), + cfg.StrOpt('admin_user'), + cfg.StrOpt('admin_password', secret=True), + cfg.StrOpt('admin_tenant_name'), + cfg.StrOpt('connection_cacert', default=None), + cfg.StrOpt('powervc_default_image_name', + default='PowerVC Default Image'), + cfg.BoolOpt('http_insecure', default=False), + cfg.StrOpt('keystone_version', default="v3"), + cfg.IntOpt('expiration_stale_duration', default=3600), + # Hosting OS Qpid connection info + cfg.StrOpt('qpid_hostname'), + cfg.IntOpt('qpid_port', default=5672), + cfg.StrOpt('qpid_username', default='anonymous'), + cfg.StrOpt('qpid_password', secret=True, default=''), + cfg.StrOpt('qpid_protocol', default='tcp'), + # manager + cfg.StrOpt('powervc_manager', + default='powervc.compute.manager.PowerVCCloudManager'), + # driver + cfg.StrOpt('powervc_driver', + default='powervc.virt.powervc.driver.PowerVCDriver'), + cfg.MultiStrOpt('storage_connectivity_group'), + # Hosting OS staging project name. This project must exist in the + # hosting OS + cfg.StrOpt('staging_project_name', + default=constants.DEFAULT_STAGING_PROJECT_NAME), + cfg.StrOpt('staging_user', + default=constants.DEFAULT_STAGING_USER_NAME)] +} + +for section in FILE_OPTIONS: + for option in FILE_OPTIONS[section]: + if section: + CONF.register_opt(option, group=section) + else: + CONF.register_opt(option) diff --git a/common-powervc/powervc/common/constants.py b/common-powervc/powervc/common/constants.py new file mode 100644 index 0000000..70dee4a --- /dev/null +++ b/common-powervc/powervc/common/constants.py @@ -0,0 +1,82 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" +All Common PowerVC Driver Constants +""" + +# The user domain default value +DEFAULT_USER_DOMAIN_NAME = 'Default' + +# The project domain default value +DEFAULT_PROJECT_DOMAIN_NAME = 'Default' + +# The default staging project name +DEFAULT_STAGING_PROJECT_NAME = 'Public' + +# The default staging user name +DEFAULT_STAGING_USER_NAME = 'admin' + +# The property key used to store a PowerVC resource UUID in +# a hosting OS resource. +POWERVC_UUID_KEY = 'powervc_uuid' + +# The property key used to mark a powervc image with the +# corresponding powervc driver image uuid. +LOCAL_UUID_KEY = 'powervcdriver_uuid' + +# OpenStack instance identifier +LOCAL_OS = 'local' +POWERVC_OS = 'powervc' + +# AMQP topic for the commun. between nova and neutron +PVC_TOPIC = 'powervcrpc' + +# Storage Type that SCG can access +SCG_SUPPORTED_STORAGE_TYPE = 'fc' + + +class ServiceType(object): + """Wrappers service type to project codename. + """ + def __init__(self, svc_type, codename): + self.svc_type = svc_type + self.codename = codename + + def __str__(self): + return self.svc_type + + def to_codename(self): + """Returns the codename of this service. + """ + return self.codename + + +class ServiceTypes(object): + """The service types known to this infrastructure which can be + referenced using attr based notation. + """ + def __init__(self): + self.volume = ServiceType('volume', 'cinder') + self.compute = ServiceType('compute', 'nova') + self.network = ServiceType('network', 'neutron') + self.identity = ServiceType('identity', 'keystone') + self.computev3 = ServiceType('computev3', 'nova') + self.image = ServiceType('image', 'glance') + self.s3 = ServiceType('s3', 'nova') + self.ec2 = ServiceType('ec2', 'nova'), + self.ttv = ServiceType('ttv', 'ttv') + + def __getitem__(self, name): + if name in self.__dict__: + return self.__dict__[name] + return None + +SERVICE_TYPES = ServiceTypes() diff --git a/common-powervc/powervc/common/exception.py b/common-powervc/powervc/common/exception.py new file mode 100644 index 0000000..acdd791 --- /dev/null +++ b/common-powervc/powervc/common/exception.py @@ -0,0 +1,73 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" +PowerVC Driver Common Exceptions +""" + +from powervc.common.gettextutils import _ + +_FATAL_EXCEPTION_FORMAT_ERRORS = False + + +class CommonException(Exception): + """ + PowerVC Driver Common Exception + + To correctly use this class, inherit from it and define a 'message' + property. That message will get printed with the keyword arguments + provided to the constructor. + """ + message = _('An unknown exception occurred') + + def __init__(self, message=None, *args, **kwargs): + if not message: + message = self.message + try: + message = message % kwargs + except Exception: + if _FATAL_EXCEPTION_FORMAT_ERRORS: + raise + else: + # at least get the core message out if something happened + pass + + super(CommonException, self).__init__(message) + + +class StorageConnectivityGroupNotFound(CommonException): + """ + Exception thrown when the PowerVC Storage Connectivity Group specified + cannot be found. + + :param scg: The PowerVC Storage Connectivity Group name or id + """ + message = _('The PowerVC Storage Connectivity Group \'%(scg)s\' was not ' + 'found.') + + +class StagingProjectNotFound(CommonException): + """ + Exception thrown when the staging project specified in the conf cannot be + found. + + :param name: The name of the staging project which was not found. + """ + message = _('The staging project \'%(name)s\' was not found.') + + +class StagingUserNotFound(CommonException): + """ + Exception thrown when the staging user specified in the conf cannot be + found. + + :param name: The name of the staging user which was not found. + """ + message = _('The staging user \'%(name)s\' was not found.') diff --git a/common-powervc/powervc/common/gettextutils.py b/common-powervc/powervc/common/gettextutils.py new file mode 100644 index 0000000..c58ab41 --- /dev/null +++ b/common-powervc/powervc/common/gettextutils.py @@ -0,0 +1,17 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import gettext + +t = gettext.translation('powervc-driver-common', fallback=True) + + +def _(msg): + return t.ugettext(msg) diff --git a/common-powervc/powervc/common/messaging.py b/common-powervc/powervc/common/messaging.py new file mode 100644 index 0000000..beb6935 --- /dev/null +++ b/common-powervc/powervc/common/messaging.py @@ -0,0 +1,499 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" +This module contains Qpid connection utilities that can be used to connect +to a Qpid message broker and listen for notifications. + +Examples: + + # Import common messaging module + from powervc.common import messaging + + # Connect to host OS Qpid broker and handle instance update notifications. + conn = messaging.LocalConnection( + reconnect_handler=self.handle_qpid_reconnect) + listener = conn.create_listener('nova', 'notifications.info') + listener.register_handler('compute.instance.update', + self._handle_instance_update) + conn.start() + + # Connect to PowerVC Qpid broker and handle two event types with a single + # handler function. + conn = messaging.PowerVCConnection() + listener = conn.create_listener('nova', 'notifications.info') + listener.register_handler(['compute.instance.create.start', + 'compute.instance.create.end'], + self._handle_instance_create) + conn.start() + + # Connect to PowerVC Qpid broker and handle any instance notifications. + conn = messaging.PowerVCConnection() + listener = conn.create_listener('nova', 'notifications.info') + listener.register_handler('compute.instance.*', + self._handle_instance_notifications) + conn.start() +""" + +import sys +import threading +import traceback +import fnmatch +import json + +from time import sleep + +from qpid.messaging import Connection +from qpid.messaging.exceptions import ConnectionError + +from oslo.config import cfg + +from powervc.common.gettextutils import _ + +CONF = cfg.CONF + + +def log(log, level, msg): + """ + Log a message. + + :param: log The log to write to. + :param: level The logging level for the message + :param: msg The message to log + """ + if not log: + return + if level == 'critical': + log.critical(msg) + elif level == 'error': + log.error(msg) + elif level == 'warn': + log.warn(msg) + elif level == 'info': + log.info(msg) + elif level == 'debug': + log.debug(msg) + + +class QpidConnection(object): + """ + This class represents a connection to a Qpid broker. A QpidConnection must + be created in order to send or receive AMQP messages using a Qpid broker. + """ + + def __init__(self, url, username, password, transport='tcp', + reconnection_interval=60, reconnect_handler=None, + context=None, log=None): + """ + Create a new connection to a Qpid message broker in order to send or + receive AMQP messages. + + :param: url URL for the Qpid connection, e.g. 9.10.49.164:5672 + :param: username Qpid username + :param: password Qpid password + :param: transport Transport mechanism, one of tcp, tcp+tls, + or ssl (alias for tcp+tls). + :param: reconnection_interval Interval in seconds between reconnect + attempts. + :param: reconnect_handler The function to call upon reconnecting to + the Qpid broker after connection was lost and + then reestablished. This function will be called after the + connections is reestablished but before the listeners are + started up again. It is not passed any parameters. + :param: context The security context + :param: log The logging module used for logging messages. If not + provided then no logging will be done. + """ + self.url = url + self.username = username + self.password = password + self.context = context + self.log = log.getLogger(__name__) if log else None + self.transport = transport + self.reconnection_interval = reconnection_interval + self.reconnect_handler = reconnect_handler + self._listeners = [] + self._is_connected = False + + def create_listener(self, exchange, topic): + """ + Create a new listener on the given exchange for the given topic. + + :param: exchange The name of the Qpid exchange, e.g. 'nova' + :param: topic The topic to listen for, e.g. 'notifications.info' + :returns: A new QpidListener that will listen for messages on the + given exchange and topic. + """ + listener = QpidListener(self, exchange, topic) + self._listeners.append(listener) + return listener + + def start(self, is_reconnect=False): + """ + Initiate the Qpid connection and start up any listeners. + + :param: is_reconnect True if this method is called as part of a + reconnect attempt, False otherwise + :raise: ConnectionError if a connection cannot be established + """ + # If the Qpid broker URL is not specified (or just the hostname is not + # specified) then we can't make a connection. + if not self.url or self.url.startswith(':'): + log(self.log, 'warn', _('Qpid broker not specified, cannot start ' + 'connection.')) + return + + if not self._is_connected: + self.conn = Connection(self.url, username=self.username, + password=self.password, + transport=self.transport) + try: + self.conn.open() + except ConnectionError as e: + log(self.log, 'critical', _('Cannot connect to Qpid message ' + 'broker: %s') % (e.message)) + # close this connection when encounter connection error + # otherwise, it will leave an ESTABLISHED connection + # to qpid server forever. + if self.conn is not None: + self.conn.close() + raise e + + self._is_connected = True + + if is_reconnect and self.reconnect_handler: + self.reconnect_handler() + + for listener in self._listeners: + listener._start(self.conn) + + log(self.log, 'info', _('Connected to Qpid message broker: ' + '%s@%s') % (self.username, self.url)) + + def _reconnect(self): + """ + Attempt to reconnect to the Qpid message broker in intervals until the + connection comes back. + """ + self.conn = None + + class ReconnectionThread(threading.Thread): + def __init__(self, qpid_connection): + super(ReconnectionThread, self).__init__( + name='ReconnectionThread') + self.qpid_connection = qpid_connection + + def run(self): + while not self.qpid_connection._is_connected: + try: + self.qpid_connection.start(is_reconnect=True) + except ConnectionError: + sleep(self.qpid_connection.reconnection_interval) + pass + + reconnection_thread = ReconnectionThread(self) + reconnection_thread.start() + + def set_reconnect_handler(self, reconnect_handler): + """ + Set the function to call upon reconnecting to the Qpid broker after + connection is lost and then reestablished. + + :param: reconnect_handler The function to call upon reconnecting. + """ + self.reconnect_handler = reconnect_handler + + +class PowerVCConnection(QpidConnection): + """ + This class represents a connection to the PowerVC Qpid broker as defined + in the configuration property files. + """ + + def __init__(self, reconnect_handler=None, context=None, log=None): + """ + Create a new connection to the PowerVC Qpid message broker in order + to send or receive AMQP messages. + + :param: reconnect_handler The function to call upon reconnecting to + the Qpid broker after connection was lost and + then reestablished. This function will be called after the + connection is reestablished but before the listeners are + started up again. It is not passed any parameters. + :param: context The security context + :param: log The logging module used for logging messages. If not + provided then no logging will be done. + """ + if CONF.powervc.qpid_protocol == 'ssl': + transport = 'ssl' + else: + transport = 'tcp' + super(PowerVCConnection, + self).__init__('%s:%d' % (CONF.powervc.qpid_hostname, + CONF.powervc.qpid_port), + CONF.powervc.qpid_username, + CONF.powervc.qpid_password, + reconnect_handler=reconnect_handler, + context=context, log=log, + transport=transport) + + +class LocalConnection(QpidConnection): + """ + This class represents a connection to the local OS Qpid broker as defined + in the configuration property files. + """ + + def __init__(self, reconnect_handler=None, context=None, log=None): + """ + Create a new connection to the local OS Qpid message broker in order + to send or receive AMQP messages. + + :param: reconnect_handler The function to call upon reconnecting to + the Qpid broker after connection was lost and + then reestablished. This function will be called after the + connection is reestablished but before the listeners are + started up again. It is not passed any parameters. + :param: context The security context + :param: log The logging module used for logging messages. If not + provided then no logging will be done. + """ + if CONF.openstack.qpid_protocol == 'ssl': + transport = 'ssl' + else: + transport = 'tcp' + super(LocalConnection, + self).__init__('%s:%d' % (CONF.openstack.qpid_hostname, + CONF.openstack.qpid_port), + CONF.openstack.qpid_username, + CONF.openstack.qpid_password, + reconnect_handler=reconnect_handler, + context=context, log=log, + transport=transport) + + +class QpidListener(object): + ''' + This class is used to listen for AMQP message notifications. It should + probably not be instantiated directly. First create a QpidConnection and + then add a QpidListener to the connection using the + QpidConnection.create_listener() method. + ''' + + def __init__(self, qpid_connection, exchange, topic): + """ + Create a new QpidListener object to listen for AMQP messages. + + :param: qpid_connection The QpidConnection object used for connecting + to the Qpid message broker. + :param: exchange The name of the Qpid exchange, e.g. 'nova' + :param: topic The topic to listen for, e.g. 'notifications.info' + """ + self.qpid_connection = qpid_connection + self.exchange = exchange + self.topic = topic + self._handler_map = {} + self._count_since_acknowledge = 0 + + def register_handler(self, event_type, handler): + """ + Register a handler function for one or more message notification event + types. The handler function will be called when a message is + received that matches the event type. The handler function will be + passed two arguments: the security context and a dictionary containing + the message attributes. The message attributes include: event_type, + timestamp, message_id, priority, publisher_id, payload. + + The following wildcards are allowed when registering an event type + handler (see the documentation for fnmatch): + + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any character not in seq + + For example, registering the following event type handler will cause + the handler function to be called for any event type starting with + 'compute.instance.'. + + listener = conn.register_handler('compute.instance.*', + self.handle_instance_messages) + + If a single notification event type matches multiple registered + handlers, each matching handler will be called. The order in which the + handlers are called is not guaranteed. If the execution order is + important for the multiple handlers of a single event type then ensure + that only a single handler will be called for the event type and + perform the multiple operations in the single handler. + + :param: event_type The event type or list of event types to associate + with the handler + :param: handler The handler function to handle a message with the given + event type + """ + if not isinstance(event_type, list): + event_type = [event_type] + for et in event_type: + self._handler_map[et] = handler + + def unregister_handler(self, event_type): + """ + Stop handling the given message notification event type. + + :param: event_type The event type to unregister + """ + try: + self._handler_map.pop(event_type) + except KeyError: + log(self.qpid_connection.log, 'warn', + _('There is no handler for this event type: %s') % event_type) + + def _start(self, connection): + """ + Start listening for messages. This method should probably not be called + directly. After creating a QpidConnection and adding listeners using + the create_listener() method, use the QpidConnection.start() method to + start listening for messages. The QpidConnection will start up all of + the listeners. + + :param: connection The qpid.messaging.endpoints.Connection object used + to establish the connection to the message broker. + """ + self.session = connection.session('%s/%s' % + (self.exchange, self.topic)) + addr_opts = { + "create": "always", + "node": { + "type": "topic", + "x-declare": { + "durable": True, + "auto-delete": True + }, + }, + } + + connection_info = "%s / %s ; %s" % (self.exchange, self.topic, + json.dumps(addr_opts)) + self.receiver = self.session.receiver(connection_info) + log(self.qpid_connection.log, 'debug', + _('QpidListener session info: %s') % (json.dumps(connection_info))) + + """ + A listener blocks while it waits for the next message on the queue, + so we initiate a thread to run the listening function. + """ + t = threading.Thread(target=self._listen) + t.start() + + def _has_more_messages(self): + ''' + Determine if there are any new messages in the queue. + + :returns: True if there are messages on the queue, False otherwise + ''' + return bool(self.receiver) + + def _next_message(self): + ''' + Wait for the next message on the queue. + + :returns: The raw message object from the message queue + ''' + return self.receiver.fetch() + + def _acknowledge(self): + ''' + Acknowledge a message has been received. + ''' + self.session.acknowledge(sync=False) + + def _get_handlers(self, event_type): + """ + Get a list of all the registered handlers that match the given event + type. + """ + handlers = [] + for event_type_pattern in self._handler_map: + if fnmatch.fnmatch(event_type, event_type_pattern): + handlers.append(self._handler_map.get(event_type_pattern)) + return handlers + + def _dispatch(self, message): + ''' + Dispatch a message to its specific handler. + + :param: message A dictionary containing the OpenStack message + notification attributes (event_type, timestamp, + message_id, priority, publisher_id, payload) + ''' + event_type = message.get('event_type') + handlers = self._get_handlers(event_type) + log_ = self.qpid_connection.log + self._count_since_acknowledge += 1 + + try: + if handlers: + log(log_, 'debug', _('Dispatching message to handlers')) + log(log_, 'info', _('Qpid listener received ' + 'message of event type: %s' + % message['event_type'])) + for handler in handlers: + handler(self.qpid_connection.context, message) + except Exception, e: + log(log_, 'error', _('Error handling message: %s: %s. Message: ' + '%s.') % (Exception, e, message)) + + # Print stack trace + exc_type, exc_value, exc_traceback = sys.exc_info() + log(log_, 'error', _('error type %s') % (exc_type)) + log(log_, 'error', _('error object %s') % (exc_value)) + log(log_, 'error', ''.join(traceback.format_tb(exc_traceback))) + finally: + if self._count_since_acknowledge > 100: + self._count_since_acknowledge = 0 + self._acknowledge() + + def _resolve_message(self, raw_message): + ''' + Resolves the given raw message obtained from the Qpid message queue + into a message that can be dispatched to a handler function. + + :param: raw_message A raw message obtained from the Qpid message + queue + :returns: A dictionary containing the following keys: + event_type, timestamp, message_id, priority, publisher_id, payload + ''' + content = raw_message.content + message = dict() + for attr in ['event_type', 'timestamp', 'message_id', 'priority', + 'publisher_id', 'payload']: + message[attr] = content.get(attr) + log(self.qpid_connection.log, 'debug', _('Qpid listener received ' + 'message: %s') % (message)) + return message + + def _listen(self): + ''' + Handle messages when they arrive on the message queue. + ''' + while True: + try: + if self._has_more_messages(): + raw_message = self._next_message() + message = self._resolve_message(raw_message) + self._dispatch(message) + else: + break + except ConnectionError, e: + log(self.qpid_connection.log, 'warning', + _("Connection error: %s") % (e)) + self.qpid_connection._is_connected = False + self.qpid_connection._reconnect() + break diff --git a/common-powervc/powervc/common/netutils.py b/common-powervc/powervc/common/netutils.py new file mode 100644 index 0000000..7b85029 --- /dev/null +++ b/common-powervc/powervc/common/netutils.py @@ -0,0 +1,115 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import json +import socket +import urllib2 +import urlparse + + +def is_ipv4_address(ip_or_host): + """Determines if a netloc is an IPv4 address. + + :param ip_or_host: the host/ip to check + """ + try: + socket.inet_aton(ip_or_host) + return True + except: + return False + + +def hostname_url(url): + """Converts the URL into its FQHN form. + This requires DNS to be setup on the OS or the hosts table + to be updated. + + :param url: the url to convert to FQHN form + """ + frags = urlparse.urlsplit(url) + if is_ipv4_address(frags.hostname) is True: + return url + try: + fqhn, alist, ip = socket.gethostbyaddr(frags.hostname) + except: + # likely no DNS configured, return inital url + return url + port_str = '' + if frags.port is not None: + port_str = ':' + str(frags.port) + return frags.scheme + '://' + fqhn + port_str + frags.path + + +def extract_url_segment(url, needles): + """searches the url segments for the 1st occurence + of an element in the list of search keys. + + :param url: the url or uri to search + :param needles: the keys to search for + """ + for seg in reversed(url.split('/')): + if seg in needles: + return seg + return None + + +class JSONRESTClient(object): + """a simple json rest client + """ + def __init__(self, token): + self.token = token + + def get(self, url): + """perform a http GET on the url + + :param url: the url to GET + """ + return self._rest_call(url) + + def post(self, url, json_body): + """perform a http POST on the url + + :param url: the url to POST + :param json_body: the body to POST + """ + return self._rest_call(url, 'POST', json_body) + + def put(self, url, json_body): + """perform a http PUT on the url + + :param url: the url to PUT + :param json_body: the body to PUT + """ + return self._rest_call(url, 'PUT', json_body) + + def delete(self, url): + """perform an http DELETE on the url + + :param url: the url to DELETE + """ + return self._rest_call(url, 'DELETE') + + def _rest_call(self, url, method='GET', json_body=None): + request = urllib2.Request(url) + request.add_header('Content-Type', 'application/json;charset=utf8') + request.add_header('Accept', 'application/json') + request.add_header('User-Agent', 'python-client') + if self.token: + request.add_header('X-Auth-Token', self.token) + if json_body: + request.add_data(json.dumps(json_body)) + request.get_method = lambda: method + try: + response = urllib2.urlopen(request) + except urllib2.HTTPError as e: + if e.code == 300: + return json.loads(e.read()) + raise e + return json.loads(response.read()) diff --git a/common-powervc/powervc/common/utils.py b/common-powervc/powervc/common/utils.py new file mode 100644 index 0000000..dc867eb --- /dev/null +++ b/common-powervc/powervc/common/utils.py @@ -0,0 +1,829 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" +import logging +import exception +import os +import sys +import threading +import time + +from eventlet.semaphore import Semaphore +from glanceclient.openstack.common import importutils +from powervc.common import config +from powervc.common import constants +from powervc.common.exception import StorageConnectivityGroupNotFound +from powervc.common.gettextutils import _ + +LOG = logging.getLogger(__name__) + +CONF = config.CONF +DEFAULT_TTL = 600 + + +class TimeLivedCache(object): + """ + The base class to provide the functionality of a timed cache. + The default refresh time is 10 mins. + """ + def __init__(self, ttl=DEFAULT_TTL): + self._cache = {} + self._last_updated = -1 + self._lock = threading.Lock() + self.ttl = ttl + + def _cache_resources(self): + """ + Refreshes the cached values if the cached time has expired, + or if there are no cached values. + """ + now = round(time.time()) + if now - self._last_updated < self.ttl and len(self._cache) != 0: + return + with self._lock: + if now - self._last_updated < self.ttl: + return + self._cache = self._get_cache() + LOG.debug(_("Updated %s at %s. Last update: %s") % + (str(self), now, self._last_updated)) + self._last_updated = now + + def _get_cache(self): + tmp_cache = {} + resources = self._get_resources() + if resources: + for resource in resources: + tmp_cache[self._id_for_resource(resource)] = resource + return tmp_cache + + def list(self): + """ + Returns the cached values + """ + self._cache_resources() + return self._cache.values() + + def _id_for_resource(self, resource): + raise NotImplementedError() + + def _get_resources(self): + raise NotImplementedError() + + +class GreenTimeLivedCache(TimeLivedCache): + """ + Extend the TimeLivedCache to use green thread. + """ + def __init__(self, ttl=DEFAULT_TTL): + super(GreenTimeLivedCache, self).__init__(ttl) + # Replace with the semaphore. + self._lock = Semaphore() + + +class VolumeCache(GreenTimeLivedCache): + """ + Caches the volumes + """ + def __init__(self, driver, ttl=DEFAULT_TTL): + assert driver + self._driver = driver + super(VolumeCache, self).__init__(ttl) + + def _get_resources(self): + return self._driver.cache_volume_data() + + def _get_cache(self): + return self._get_resources() + + def set_by_id(self, pvc_id, local_id): + with self._lock: + self._cache[pvc_id] = local_id + + def get_by_id(self, pvc_id, default=None): + self._cache_resources() + if (len(self._cache) != 0): + if pvc_id in self._cache: + LOG.info(_("Found volume id equals: '%s'" % pvc_id)) + return self._cache[pvc_id] + LOG.info(_("No volume found which equals: '%s'" % pvc_id)) + return default + + +class SCGCache(GreenTimeLivedCache): + """ + Caches the SCGs. + """ + def __init__(self, nova, ttl=DEFAULT_TTL): + assert nova + self._nova = nova + super(SCGCache, self).__init__(ttl) + + def __str__(self): + return _("Storage Connectivity Group Cache") + + def _id_for_resource(self, resource): + return resource.display_name + + def _get_resources(self): + """ + Calls the api to get all SCGs + """ + return self._nova.storage_connectivity_groups.list(detailed=True) + + def by_name(self, name, default=None): + """ + Returns the SCG by name + """ + self._cache_resources() + if (len(self._cache) != 0): + if name in self._cache: + LOG.info(_("Found scg which name equals: '%s'" % name)) + return self._cache[name] + LOG.info(_("No scg found which equals name: '%s'" % name)) + return default + + def by_id(self, scg_id, default=None): + """ + Returns the SCG by id + """ + self._cache_resources() + if (len(self._cache) != 0): + for scg in self.list(): + if scg.id == scg_id: + LOG.info(_("Found scg which equals id: '%s'" % scg_id)) + return scg + LOG.info(_("No scg found which equals id: '%s'" % scg_id)) + return default + +__lock = threading.Lock() +__utils = None + + +def get_utils(): + """ + Returns a singleton Utils object + """ + global __lock + global __utils + if __utils is not None: + return __utils + with __lock: + if __utils is not None: + return __utils + __utils = Utils() + return __utils + + +class Utils(object): + """ + This Utils class leverages the pvcnovaclient and pvccinderclient + to retrieve the Storage Connectivity Group, Storage Providers and + Storage Templates information, etc. + + Usage sample: + username = 'root' + password = 'passw0rd' + tenant = 'ibm-default' + auth_url = 'https://z3-9-5-127-193.rch.nimbus.kstart.ibm.com/\ + powervc/openstack/admin/v3' + cacert = '/home/osadmin/z3-9-5-127-193.rch.nimbus.kstart.ibm.com' + + utils = utils.Utils(username=username, + api_key=password, + project_id=tenant, + auth_url=auth_url, + insecure=False, + cacert=cacert) + sps = utils.get_scg_accessible_storage_providers() + sts = utils.get_scg_accessible_storage_templates() + volumes = utils.get_scg_accessible_volumes() + """ + def __init__(self): + factory = importutils.import_module('powervc.common.client.factory') + self._novaclient = factory.POWERVC.new_client( + str(constants.SERVICE_TYPES.compute)) + self._cinderclient = factory.POWERVC.new_client( + str(constants.SERVICE_TYPES.volume)) + self._localkeystoneclient = factory.LOCAL.new_client( + str(constants.SERVICE_TYPES.identity)) + self.scg_cache = self.get_scg_cache(self._novaclient) + + def get_scg_cache(self, novaclient): + """ + Return the SCGCache object. + """ + return SCGCache(novaclient) + + def get_all_scgs(self): + """ + Get all Storage Connectivity Groups from PowerVC + + :returns: A list of all Storage Connectivity Groups on PowerVC + """ + return self.scg_cache.list() + + def get_our_scg_list(self): + """ + If SCG names are specified in our configuration, see if the scgs exist. + If they do not exist, raise an exception. If they exist, return the scg + list for the name specified. If no SCG name is specified, return + [] for the scg list. + + :returns: The StorageConnectivityGroup object list if found, else [] + :raise StorageConnectivityGroupNotFound: if the Storage Connectivity + Groups could not be found on PowerVC + """ + our_scg_list = [] + scg_to_use_list = CONF['powervc'].storage_connectivity_group + for scg_to_use in scg_to_use_list: + if scg_to_use: + scg = self.scg_cache.by_name(scg_to_use) + if scg is not None: + LOG.debug(_('PowerVC Storage Connectivity Group \'%s\' ' + 'found.'), scg.display_name) + our_scg = scg + our_scg_list.append(our_scg) + else: + # If a SCG is specified and it's not found on the PowerVC, + # raise an exception. + LOG.error(_('The PowerVC Storage Connectivity Group' + ' \'%s\' was not found.'), scg_to_use) + raise StorageConnectivityGroupNotFound(scg=scg_to_use) + else: + LOG.error(_('No Storage Connectivity Group is specified in ' + 'the configuration settings.')) + return our_scg_list + + def validate_scgs(self): + """ + Validate the SCG name specified in the configuration, + Return validated SCG list if successful + Return [] if SCGs are not specified in the configuration file OR + SCG specified is not found in PowerVC. + """ + validated_scgs = [] + try: + validated_scgs = self.get_our_scg_list() + except StorageConnectivityGroupNotFound: + return [] + return validated_scgs + + def get_scg_by_scgName(self, scg_name): + """ + Get the SCG by scgName + """ + return self.scg_cache.by_name(scg_name) + + def get_scg_by_scgUUID(self, scg_uuid): + """ + Get the SCG by uuid + """ + return self.scg_cache.by_id(scg_uuid) + + def get_scg_id_by_scgName(self, scg_name): + """ + Get the SCG_ID by scg_name + """ + if scg_name == "": + return "" + # If no scg_name is found, None is returned. + scg = self.get_scg_by_scgName(scg_name) + if scg is not None: + return scg.id + return "" + + def get_multi_scg_accessible_servers(self, scg_uuid_list, scg_name_list, + detailed=True, search_opts=None): + """ + Get accessible virtual servers by specified SCG UUID list + or SCG Name list, + If both SCG UUID and SCG Name are specified specified, UUID is prior, + If none of SCG UUID and Name specified, get all servers + """ + class WrapServer(): + def __init__(self, server): + self.server = server + + def __eq__(self, other): + if isinstance(other, WrapServer): + return self.server.id == other.server.id + else: + return False + + def __hash__(self): + return hash(self.server.id) + + wrap_servers = set() + if scg_uuid_list: + for scg_uuid in scg_uuid_list: + scg_servers = self.get_scg_accessible_servers(scg_uuid, + None, + detailed, + search_opts) + wrap_scg_servers = [WrapServer(scg_server) + for scg_server in scg_servers] + wrap_servers.update(wrap_scg_servers) + return [wrap_server.server for wrap_server in wrap_servers] + + if not scg_name_list: + scg_name_list = CONF.powervc.storage_connectivity_group + + if scg_name_list: + for scg_name in scg_name_list: + scg_servers = self.get_scg_accessible_servers(None, + scg_name, + detailed, + search_opts) + wrap_scg_servers = [WrapServer(scg_server) + for scg_server in scg_servers] + wrap_servers.update(wrap_scg_servers) + return [wrap_server.server for wrap_server in wrap_servers] + + def get_scg_accessible_servers(self, scgUUID=None, scgName=None, + detailed=True, search_opts=None): + """ + Get accessible virtual servers by specified SCG UUID or scgName, + If both SCG UUID and SCG Name are specified specified, UUID is prior, + If none of SCG UUID and Name specified, get all servers + """ + scg = None + # If no scgUUID specified. + if not scgUUID: + if scgName: + # If scgName specified, then search by scgName + scg = self.get_scg_by_scgName(scgName) + else: + # If scgName not specified, return None + scg = None + else: + LOG.debug("Specified scgUUID: '%s'" % scgUUID) + # retrieve scg by scgUUID + scg = self.scg_cache.by_id(scgUUID) + + if not scg: + # If no scg, then it's a IVM based PowerVC, + # return all servers + return self._novaclient.manager.list_all_servers(detailed, search_opts) + + # accessible_storage_servers to return + accessible_storage_servers = [] + all_servers = self._novaclient.manager.list_all_servers(detailed, search_opts) + + # Filter the servers for the SCG + for server in all_servers: + server_scg = getattr(server, 'storage_connectivity_group_id', None) + if server_scg and server_scg == scg.id: + accessible_storage_servers.append(server) + elif server_scg is None: + # onboarding VMs + accessible_storage_servers.append(server) + + LOG.info("All accessible_storage_servers: %s" % + accessible_storage_servers) + + return accessible_storage_servers + + def get_multi_scg_accessible_storage_providers(self, + scg_uuid_list, + scg_name_list, + detailed=True, + search_opts=None): + """ + Get accessible storage providers by specified SCG UUID list + or SCG Name list, + If both SCG UUID and SCG Name are specified specified, UUID is prior, + """ + class WrapProvider(): + def __init__(self, provider): + self.provider = provider + + def __eq__(self, other): + if isinstance(other, WrapProvider): + return self.provider.id == other.provider.id + else: + return False + + def __hash__(self): + return hash(self.provider.id) + + wrap_providers = set() + if scg_uuid_list: + for scg_uuid in scg_uuid_list: + scg_providers = self.get_scg_accessible_storage_providers( + scg_uuid, None, detailed, search_opts) + wrap_scg_providers = [WrapProvider(scg_provider) + for scg_provider in scg_providers] + wrap_providers.update(wrap_scg_providers) + return [wrap_provider.provider for wrap_provider in wrap_providers] + + if not scg_name_list: + scg_name_list = CONF.powervc.storage_connectivity_group + + if scg_name_list: + for scg_name in scg_name_list: + scg_providers = self.get_scg_accessible_storage_providers( + None, scg_name, detailed, search_opts) + wrap_scg_providers = [WrapProvider(scg_provider) + for scg_provider in scg_providers] + wrap_providers.update(wrap_scg_providers) + return [wrap_provider.provider for wrap_provider in wrap_providers] + + def get_scg_accessible_storage_providers(self, scgUUID=None, scgName=None, + detailed=True, search_opts=None): + """ + Get accessible storage providers by specified SCG UUID or scgName, + If both SCG UUID and SCG Name are specified specified, UUID is prior, + If none of SCG UUID and Name specified, get the first SCG from powerVC + """ + scg = None + # If no scgUUID specified. + if not scgUUID: + if scgName: + # If scgName specified, then search by scgName + scg = self.get_scg_by_scgName(scgName) + else: + # If scgName not specified, return None + scg = None + else: + LOG.debug(_("Specified scgUUID: '%s'" % scgUUID)) + # retrieve scg by scgUUID + scg = self.scg_cache.by_id(scgUUID) + + if not scg: + # If no scg, then it's a IVM based PowerVC, + # return all storage providers + return (self._cinderclient.storage_providers. + list_all_providers(detailed, search_opts)) + + # accessible_storage_providers to return + accessible_storage_providers = [] + + # retrieve fc_storage_access + fc_storage_access = getattr(scg, 'fc_storage_access', False) or False + LOG.info(_("scg['fc_storage_access']: '%s'" % fc_storage_access)) + + # retrieve provider_id in vios_cluster + provider_id = None + vios_cluster = getattr(scg, 'vios_cluster', {}) + if vios_cluster: + provider_id = vios_cluster.get('provider_id', '') + LOG.info(_("scg['vios_cluster']['provider_id']: '%s'" % + (provider_id))) + + # retrieve all the storage-providers + storage_providers = (self._cinderclient.storage_providers. + list_all_providers(detailed, search_opts)) + LOG.info(_("storage_providers: %s" % storage_providers)) + # Loop over the storage providers, if the 'storage_hostname' matches + # SCG['vios_cluster']['provider_id'], or if SCG['fc_storage_access'] + # is "True" AND the provider's storage_type is "fc", then add to list + for storage_provider in storage_providers: + storage_hostname = getattr(storage_provider, + 'storage_hostname', '') + storage_type = getattr(storage_provider, + 'storage_type', '') + LOG.info(_("storage_provider['storage_hostname']: '%s'" % + (storage_hostname))) + if storage_hostname and storage_hostname == provider_id: + LOG.info(_("Add to accessible_storage_providers: %s" % + (storage_provider))) + accessible_storage_providers.append(storage_provider) + elif fc_storage_access and (constants.SCG_SUPPORTED_STORAGE_TYPE == + storage_type): + LOG.info(_("Add to accessible_storage_providers: %s" % + (storage_provider))) + accessible_storage_providers.append(storage_provider) + # TODO as currently provider_id and storage_type are not + # implemented + else: + accessible_storage_providers.append(storage_provider) + + LOG.info(_("All accessible_storage_providers: %s" % + (accessible_storage_providers))) + + return accessible_storage_providers + + def get_multi_scg_accessible_storage_templates(self, + scg_uuid_list, + scg_name_list): + """ + Get accessible storage templates by specified SCG UUID list + or SCG Name list, + If both SCG UUID and SCG Name are specified specified, UUID is prior, + """ + class WrapType(): + def __init__(self, volume_type): + self.type = volume_type + + def __eq__(self, other): + if isinstance(other, WrapType): + return self.type.id == other.type.id + else: + return False + + def __hash__(self): + return hash(self.type.id) + + wrap_types = set() + if scg_uuid_list: + for scg_uuid in scg_uuid_list: + scg_types = self.get_scg_accessible_storage_templates( + scg_uuid, None) + wrap_scg_types = [WrapType(scg_type) for scg_type in scg_types] + wrap_types.update(wrap_scg_types) + return [wrap_type.type for wrap_type in wrap_types] + + if not scg_name_list: + scg_name_list = CONF.powervc.storage_connectivity_group + + if scg_name_list: + for scg_name in scg_name_list: + scg_types = self.get_scg_accessible_storage_templates( + None, scg_name) + wrap_scg_types = [WrapType(scg_type) for scg_type in scg_types] + wrap_types.update(wrap_scg_types) + return [wrap_type.type for wrap_type in wrap_types] + + def get_scg_accessible_storage_templates(self, scgUUID=None, scgName=None): + """ + Get accessible storage templates by specified SCG UUID or scgName, + If both SCG UUID and SCG Name are specified specified, UUID is prior, + If none of SCG UUID and Name specified, get the first SCG from powerVC + """ + scg = None + # If no scgUUID specified. + if not scgUUID: + if scgName: + # If scgName specified, then search by scgName + scg = self.get_scg_by_scgName(scgName) + else: + # If scgName not specified, get the SCG from the value + # configured in powervc.conf + scg = self.get_configured_scg() + else: + LOG.debug(_("Specified scgUUID: '%s'" % scgUUID)) + # retrieve scg by scgUUID + scg = self.scg_cache.by_id(scgUUID) + if not scg: + # If no scg, then it's a IVM based PowerVC, + # return all volumes + return (self._cinderclient.volume_types. + list_all_storage_templates()) + + # accessible_storage_templates to return + accessible_storage_templates = [] + #filter out all the accessible storage template uuid + volume_types = scg.list_all_volume_types() + volume_type_ids = [] + for vol_type in volume_types: + volume_type_ids.append(vol_type.__dict__.get("id")) + all_volume_types = \ + self._cinderclient.volume_types.list_all_storage_templates() + for storage_template in all_volume_types: + if(storage_template.__dict__.get("id") in volume_type_ids): + accessible_storage_templates.append(storage_template) + + LOG.info(_('accessible_storage_templates: %s' % + (accessible_storage_templates))) + return accessible_storage_templates + + def get_multi_scg_accessible_volumes(self, + scg_uuid_list, + scg_name_list, + detailed=True, + search_opts=None): + """ + Get accessible storage providers by specified SCG UUID list + or SCG Name list, + If both SCG UUID and SCG Name are specified specified, UUID is prior, + """ + class WrapVolume(): + def __init__(self, volume): + self.volume = volume + + def __eq__(self, other): + if isinstance(other, WrapVolume): + return self.volume.id == other.volume.id + else: + return False + + def __hash__(self): + return hash(self.volume.id) + + wrap_volumes = set() + if scg_uuid_list: + for scg_uuid in scg_uuid_list: + scg_volumes = self.get_scg_accessible_volumes(scg_uuid, + None, + detailed, + search_opts) + wrap_scg_volumes = [WrapVolume(scg_volume) + for scg_volume in scg_volumes] + wrap_volumes.update(wrap_scg_volumes) + return [wrap_volume.volume for wrap_volume in wrap_volumes] + + if not scg_name_list: + scg_name_list = CONF.powervc.storage_connectivity_group + + if scg_name_list: + for scg_name in scg_name_list: + scg_volumes = self.get_scg_accessible_volumes(None, + scg_name, + detailed, + search_opts) + wrap_scg_volumes = [WrapVolume(scg_volume) + for scg_volume in scg_volumes] + wrap_volumes.update(wrap_scg_volumes) + return [wrap_volume.volume for wrap_volume in wrap_volumes] + + def get_scg_accessible_volumes(self, scgUUID=None, scgName=None, + detailed=True, search_opts=None): + """ + Get SCG accessible volumes providers by specified SCG UUID or scgName, + If both SCG UUID and SCG Name are specified specified, UUID is prior, + If none of SCG UUID and Name specified, get the first SCG from powerVC + """ + scg = None + # If no scgUUID specified. + if not scgUUID: + if scgName: + # If scgName specified, then search by scgName + scg = self.get_scg_by_scgName(scgName) + else: + # If scgName not specified, get the SCG from the value + # configured in powervc.conf + scg = self.get_configured_scg() + else: + LOG.debug(_("Specified scgUUID: '%s'" % scgUUID)) + # retrieve scg by scgUUID + scg = self.scg_cache.by_id(scgUUID) + if not scg: + # If no scg, then it's a IVM based PowerVC, + # return all volumes + return (self._cinderclient.volumes.list_all_volumes()) + + # accessible_storage_volumes to return + accessible_storage_volumes = [] + volumes = scg.list_all_volumes() + volume_ids = [] + for vol in volumes: + volume_ids.append(vol.__dict__.get("id")) + all_volumes = \ + self._cinderclient.volumes.list_all_volumes(detailed, search_opts) + for storage_volume in all_volumes: + if(storage_volume.__dict__.get("id") in volume_ids): + metadata = storage_volume.__dict__.get("metadata") + if(metadata is not None): + is_boot_volume = metadata.get("is_boot_volume") + # Filter out the boot volumes + if(is_boot_volume != "True"): + accessible_storage_volumes.append(storage_volume) + else: + accessible_storage_volumes.append(storage_volume) + + LOG.info(_('accessible_storage_volumes: %s' % ( + accessible_storage_volumes))) + return accessible_storage_volumes + + def get_image_scgs(self, imageUUID): + """ + Get the Storage Connectivity Groups for the specified image. + + :param: imageUUID The UUID of the image + :returns: The Storage Connectivity Groups for the specified image or an + empty list if none are found. + """ + if imageUUID is not None: + return self._novaclient.storage_connectivity_groups.list_for_image( + imageUUID) + else: + return [] + + def get_scg_image_ids(self, scgUUID): + """ + Get the SCGImage identifiers for the specified Storage Connectivity + Group. + + :param: scgUUID The UUID of the StorageConnectvityGroup + :returns: The list of SCGImage identifiers for the specified Storage + Connectivity Group or an empty list if none are found. + """ + if scgUUID is not None: + return self._novaclient.scg_images.list_ids(scgUUID) + else: + return [] + + def get_local_staging_project_id(self): + """ + Get the local hosting OS staging project Id. If a staging + project name is not found, a exception.StagingProjectNotFound + exception will be raised. If no staging project is specified in + the conf, the default value will be used as specified in constants. + + :returns: The local hosting OS staging project Id + """ + ks_client = self._localkeystoneclient + stagingname = CONF.powervc.staging_project_name or \ + constants.DEFAULT_STAGING_PROJECT_NAME + try: + for tenant in ks_client.tenants.list(): + projectname = tenant.name + projectid = tenant.id + if projectname == stagingname: + LOG.debug(_('The staging_project_name %s has id %s'), + stagingname, projectid) + return projectid + except Exception as e: + LOG.debug(_('An error occurred getting the tenant list: %s.'), e) + LOG.debug(_('Unable to find staging project: %s'), stagingname) + raise exception.StagingProjectNotFound(name=stagingname) + + def get_local_staging_user_id(self): + """ + Get the local hosting OS staging user Id which defaults to + constants.DEFAULT_STAGING_USERNAME if not set in the conf. + If a staging user name is not found, a StagingUserNotFound + exception will be raised. + + :returns: The local hosting OS staging user Id + """ + ks_client = self._localkeystoneclient + staginguser = CONF.powervc.staging_user or \ + constants.DEFAULT_STAGING_USER_NAME + try: + for user in ks_client.users.list(): + username = user.name + userid = user.id + if staginguser == username: + LOG.debug(_('The staging_user %s has id %s'), + staginguser, userid) + return userid + except Exception as e: + LOG.debug(_('An error occurred getting the user list: %s'), e) + LOG.debug(_('Unable to find staging user: %s'), staginguser) + raise exception.StagingUserNotFound(name=staginguser) + + +def import_relative_module(relative_import_str, import_str): + """ + Imports a module relative to another. Can be used when more + than 1 module of the given name exists in the python path + to resolve any discrepency in multiple paths. + + :param relative_import_str: a module import string which + neighbors the actual import. for example 'glanceclient'. + :param import_str: the module import string. for example + 'tests.utils' + + example: + utils = import_relative_module('glanceclient', 'tests.utils') + fapi = utils.FakeAPI(...) + """ + mod = importutils.import_module(relative_import_str) + mpath = os.path.dirname(os.path.dirname(os.path.realpath(mod.__file__))) + if not sys.path[0] is mpath: + sys.path.insert(0, mpath) + return importutils.import_module(import_str) + + +class StagingCache(object): + """ + Provides a lazy cache around the local staging user and project. + Consumers can use the staging_user_and_project property to retrieve the + (user_id, project_id) pair for the staging user. These values are + lazily fetched at most once + """ + + def __init__(self): + super(StagingCache, self).__init__() + self.utils = get_utils() + self.staging_user = None + self.staging_project = None + + @property + def is_valid(self): + uid, pid = self.get_staging_user_and_project() + return uid is not None and pid is not None + + def get_staging_user_and_project(self, raise_on_invalid=False): + try: + if not self.staging_user: + self.staging_user = self.utils.get_local_staging_user_id() + if not self.staging_project: + self.staging_project = \ + self.utils.get_local_staging_project_id() + return (self.staging_user, self.staging_project) + except exception.StagingProjectNotFound as e: + if raise_on_invalid: + raise e + return (None, None) + except exception.StagingUserNotFound as e: + if raise_on_invalid: + raise e + return (None, None) diff --git a/common-powervc/run_tests.sh b/common-powervc/run_tests.sh new file mode 100755 index 0000000..8640b21 --- /dev/null +++ b/common-powervc/run_tests.sh @@ -0,0 +1,192 @@ +#!/bin/bash + +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -eu + +function usage { + echo "Usage: $0 [OPTION]..." + echo "Run PowerVC Common test suite(s)" + echo "" + echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" + echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" + echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)." + echo " -n, --no-recreate-db Don't recreate the test database." + echo " -x, --stop Stop running tests after the first error or failure." + echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." + echo " -u, --update Update the virtual environment with any newer package versions" + echo " -p, --pep8 Just run flake8" + echo " -8, --8 Just run flake8, don't show PEP8 text for each error" + echo " -P, --no-pep8 Don't run flake8" + echo " -c, --coverage Generate coverage report" + echo " -h, --help Print this usage message" + echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list" + echo " --standard-threads Don't do the eventlet threading monkeypatch." + echo "" + echo "Note: with no options specified, the script will try to run the tests in a virtual environment," + echo " If no virtualenv is found, the script will ask if you would like to create one. If you " + echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." + exit +} + +function process_option { + case "$1" in + -h|--help) usage;; + -V|--virtual-env) always_venv=1; never_venv=0;; + -N|--no-virtual-env) always_venv=0; never_venv=1;; + -r|--recreate-db) recreate_db=1;; + -n|--no-recreate-db) recreate_db=0;; + -f|--force) force=1;; + -u|--update) update=1;; + -p|--pep8) just_flake8=1;; + -8|--8) short_flake8=1;; + -P|--no-pep8) no_flake8=1;; + -c|--coverage) coverage=1;; + --standard-threads) + export STANDARD_THREADS=1 + ;; + -*) noseopts="$noseopts $1";; + *) noseargs="$noseargs $1" + esac +} + +venv=.venv +with_venv=tools/with_venv.sh +always_venv=0 +never_venv=0 +force=0 +noseargs= +noseopts= +wrapper="" +just_flake8=0 +short_flake8=0 +no_flake8=0 +coverage=0 +recreate_db=1 +update=0 + +for arg in "$@"; do + process_option $arg +done + +# If enabled, tell nose to collect coverage data +if [ $coverage -eq 1 ]; then + noseopts="$noseopts --with-coverage --cover-package=common-powervc" +fi + +function run_tests { + # Just run the test suites in current environment + ${wrapper} $NOSETESTS + # If we get some short import error right away, print the error log directly + RESULT=$? + if [ "$RESULT" -ne "0" ]; + then + ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'` + if [ "$ERRSIZE" -lt "40" ]; + then + cat run_tests.log + fi + fi + return $RESULT +} + +function run_flake8 { + FLAGS=--show-pep8 + if [ $# -gt 0 ] && [ 'short' == ''$1 ] + then + FLAGS='' + fi + + + echo "Running flake8 ..." + # Just run flake8 in current environment + #echo ${wrapper} flake8 $FLAGS powervc | tee pep8.txt + ${wrapper} flake8 $FLAGS powervc | tee pep8.txt + RESULT=${PIPESTATUS[0]} + return $RESULT +} + +NOSETESTS="nosetests $noseopts $noseargs" + +if [ $never_venv -eq 0 ] +then + # Remove the virtual environment if --force used + if [ $force -eq 1 ]; then + echo "Cleaning virtualenv..." + rm -rf ${venv} + fi + if [ $update -eq 1 ]; then + echo "Updating virtualenv..." + python tools/install_venv.py + fi + if [ -e ${venv} ]; then + wrapper="${with_venv}" + else + if [ $always_venv -eq 1 ]; then + # Automatically install the virtualenv + python tools/install_venv.py + wrapper="${with_venv}" + else + echo -e "No virtual environment found...create one? (Y/n) \c" + read use_ve + if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then + # Install the virtualenv and run the test suite in it + python tools/install_venv.py + wrapper=${with_venv} + fi + fi + fi +fi + +# Delete old coverage data from previous runs +if [ $coverage -eq 1 ]; then + ${wrapper} coverage erase +fi + + +if [ $just_flake8 -eq 1 ]; then + run_flake8 + RESULT=$? + echo "RESULT $RESULT" + exit $RESULT +fi + +if [ $short_flake8 -eq 1 ]; then + run_flake8 short + RESULT=$? + exit $RESULT +fi + +run_tests +RESULT=$? + +# NOTE(sirp): we only want to run flake8 when we're running the full-test +# suite, not when we're running tests individually. To handle this, we need to +# distinguish between options (noseopts), which begin with a '-', and arguments +# (noseargs). +if [ -z "$noseargs" ]; then + if [ $no_flake8 -eq 0 ]; then + run_flake8 + TMP_RESULT=$? + RESULT=$(($TMP_RESULT + $RESULT)) + fi +fi + +if [ $coverage -eq 1 ]; then + echo "Generating coverage report in covhtml/" + ${wrapper} coverage html -d covhtml -i +fi + +exit $RESULT \ No newline at end of file diff --git a/common-powervc/test/__init__.py b/common-powervc/test/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/common-powervc/test/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/common-powervc/test/common/__init__.py b/common-powervc/test/common/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/common-powervc/test/common/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/common-powervc/test/common/client/__init__.py b/common-powervc/test/common/client/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/common-powervc/test/common/client/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/common-powervc/test/common/client/test_cinder.py b/common-powervc/test/common/client/test_cinder.py new file mode 100644 index 0000000..03d11d2 --- /dev/null +++ b/common-powervc/test/common/client/test_cinder.py @@ -0,0 +1,459 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +from cinderclient.tests.v1 import fakes +from cinderclient.tests.v1.test_volumes import VolumesTest +from cinderclient.tests.v1.test_types import TypesTest +from cinderclient.tests import utils +from cinderclient.v1.volumes import Volume +from cinderclient.v1.volume_types import VolumeType +from cinderclient.v1.volume_types import VolumeTypeManager +from powervc.common.client.extensions import cinder as ext_cinder +from powervc.common.client import delegate +from powervc.common import utils as commonutils + +import mock +import sys + + +""" + This class similarly extend the current cinder client test cases + and also provided are examples of how someone can override and existing + method in the event we need to test something unique to powerVC. + + The current methods that are overridden expect the same results as the base + class test cases and are only provided for example. + + For specific PowerVC data model, just override the parent fake data + structure and corresponding testcase methods logic that could verify + the functions. + + To run the testcases, alternatively: + 1. Right click the TestCinderClient.py --> Run As --> Python unit-test + or + 2. Refer to this link for detail UT running information: + https://jazz04.rchland.ibm.com:9443/jazz/service/ + + com.ibm.team.workitem.common.internal.rest.IAttachmentRestService/ + + itemName/com.ibm.team.workitem.Attachment/67843 + + All the testcases should be run successfully. +""" + + +class PVCFakeClient(fakes.FakeClient): + + """ + This PVCFakeClient class extends the current cinder FakeClient, + and pvccinderclient.CinderClient. + aiming to set the self client variable to PVCFakeHTTPClient + """ + + def __init__(self, *args, **kwargs): + fakes.FakeClient.__init__(self, *args, **kwargs) + self.client = PVCFakeHTTPClient(**kwargs) + sys.modules['powervc.common.client.factory'] = mock.MagicMock() + + +class PVCFakeHTTPClient(fakes.FakeHTTPClient): + + """ + This PVCFakeHTTPClient class extends the current cinder FakeHTTPClient. + For all the HTTP requests in this class, it returns a fake json data + as specified beforehand instead of requesting to a real environment. + Ex, to test if json data from powerVC volume RESTAPI: + 1. Add expected powerVC volumes json raw data into + get_volumes_detail() method + 2. Add get_volumes_{volume_id} method to return the volume + 3. Add post_volumes_{volume_id}_action to handle post logic + 4. Add testcase and new added methods will be called + """ + + def __init__(self, **kwargs): + fakes.FakeHTTPClient.__init__(self, **kwargs) + + def get_volumes_pvcvolume(self, **kw): + r = {'volume': self.get_volumes_detail()[2]['volumes'][1]} + return (200, {}, r) + + def get_volumes_detail(self, **kw): + """ + Override the parent method to a new powerVC specified volume, + Here is the same structure as OpenStack one for example. + """ + return (200, {}, {"volumes": [ + {'id': 1234, + 'name': 'sample-volume for cinder', + 'attachments': [{'server_id': 12234}]}, + {'id': 'pvcvolume', + 'name': 'pvc sample-volume for cinder', + 'attachments': [{'server_id': 54321}]} + ]}) + + def post_volumes_pvcvolume_action(self, body, **kw): + """ + Add this method to handle powerVC volume post actions + Here is the same logic as OpenStack one for example. + """ + _body = None + resp = 202 + assert len(list(body.keys())) == 1 + action = list(body.keys())[0] + if action == 'os-attach': + assert sorted(list(body[action])) == ['instance_uuid', + 'mode', + 'mountpoint'] + elif action == 'os-detach': + assert body[action] is None + elif action == 'os-reserve': + assert body[action] is None + elif action == 'os-unreserve': + assert body[action] is None + elif action == 'os-initialize_connection': + assert list(body[action].keys()) == ['connector'] + return (202, {}, {'connection_info': 'foos'}) + elif action == 'os-terminate_connection': + assert list(body[action].keys()) == ['connector'] + elif action == 'os-begin_detaching': + assert body[action] is None + elif action == 'os-roll_detaching': + assert body[action] is None + elif action == 'os-reset_status': + assert 'status' in body[action] + else: + raise AssertionError("Unexpected action: %s" % action) + return (resp, {}, _body) + + def get_storage_providers_2(self, **kw): + """ + To get a fake detail storage_providers + """ + return (200, {}, {"storage_provider": + { + "backend_type": "svc", + "volume_count": "null", + "service": { + "host_display_name": "shared_v7000_1", + "host": "shared_v7000_1", + "id": 4 + }, + "backend_id": "00000200A0204C30", + "health_status": { + "health_value": "OK" + }, + "free_capacity_gb": 873.5, + "total_capacity_gb": 1115.5, + "storage_hostname": "shared_v7000_1", + "id": 2, + "backend_state": "running" + }}) + + def get_storage_providers_detail(self, **kw): + """ + To return a fake detail storage_providers + """ + return (200, {}, {"storage_providers": [ + { + "backend_type": "svc", + "volume_count": "null", + "service": { + "host_display_name": "shared_v7000_1", + "host": "shared_v7000_1", + "id": 4 + }, + "backend_id": "00000200A0204C30", + "health_status": { + "health_value": "OK" + }, + "free_capacity_gb": 873.5, + "total_capacity_gb": 1115.5, + "storage_hostname": "shared_v7000_1", + "id": 2, + "backend_state": "running" + }, + { + "backend_type": "fc", + "volume_count": "null", + "service": { + "host_display_name": "shared_v7000_1", + "host": "shared_v7000_1", + "id": 4 + }, + "backend_id": "00000200A0204C31", + "health_status": { + "health_value": "OK" + }, + "free_capacity_gb": 73.5, + "total_capacity_gb": 115.5, + "storage_hostname": "shared_v7000_2", + "id": 3, + "backend_state": "running" + } + ]}) + + def get_types(self, **kw): + return (200, {}, { + "volume_types": [ + { + "extra_specs": { + "drivers:storage_pool": "P-NGP01-pool", + "capabilities:volume_backend_name": "shared_v7000_1", + "drivers:rsize": "-1" + }, + "name": "shared_v7000_1-default", + "id": "6627888e-9f59-4996-8c22-5d528c3273f0" + }, + { + "extra_specs": {}, + "name": "dm-crypt", + "id": "a3ae95f6-4aab-4446-b1d2-0fc2f60a89bb" + }, + { + "extra_specs": {}, + "name": "LUKS", + "id": "291f81a2-591b-4164-b2b2-829abc935573" + } + ] + }) + + +class PVCCinderVolumesTest(VolumesTest): + + """ + This PVCCinderVolumesTest class extends the current cinder + VolumesTest class to provide volume related UT cases. + """ + + volume_list = [ + { + 'id': 1234, + 'name': 'sample-volume for cinder', + 'attachments': [{'server_id': 12234}]}, + { + 'id': 'pvcvolume', + 'name': 'pvc sample-volume for cinder', + 'attachments': [{'server_id': 54321}] + }] + + def setUp(self): + super(PVCCinderVolumesTest, self).setUp() + # get cinder client + cinder_fakeclient = PVCFakeClient('r', 'p') + # delegate to nova extension class + cinder_client = delegate.new_composite_deletgate( + [ext_cinder.Client(cinder_fakeclient), cinder_fakeclient]) + self.cs = cinder_client + + def tearDown(self): + super(PVCCinderVolumesTest, self).tearDown() + + def test_pvcvolume_attach(self): + """ + Add this method to test if powerVC volume attach functions + Here is the same logic as OpenStack for example. + """ + v = self.cs.volumes.get('pvcvolume') + self.cs.volumes.attach(v, 1, '/dev/vdc') + self.cs.assert_called('POST', + '/volumes/pvcvolume/action') + + def test_list_all_volumes(self): + resluts = self.cs.volumes.list_all_volumes() + + self.cs.assert_called('GET', '/volumes/detail') + self.assertEqual(resluts[0].id, 1234) + self.assertEqual(resluts[1].name, 'pvc sample-volume for cinder') + + def test_list_volumes_1(self): + returnvalues = [Volume(self, res, loaded=True) + for res in self.volume_list if res] + commonutils.get_utils().get_multi_scg_accessible_volumes = \ + mock.MagicMock(return_value=returnvalues) + result = self.cs.volumes.list() + + self.assertEquals(result[0].id, 1234) + self.assertEquals(result[1].name, "pvc sample-volume for cinder") + + def test_list_volumes_2(self): + returnvalues = [Volume(self, res, loaded=True) + for res in self.volume_list if res] + commonutils.get_utils().get_scg_accessible_volumes = \ + mock.MagicMock(return_value=returnvalues) + + result = self.cs.volumes.list(True, None, 'SCGUUID', None) + self.assertEquals(result[0].name, "sample-volume for cinder") + + +class PVCCinderTypesTest(TypesTest): + + """ + This PVCCinderTypesTest class extends the current cinder + TypesTest class to provide volume Type related UT cases. + """ + volumes_type_list = [ + { + "extra_specs": { + "drivers:storage_pool": "P-NGP01-pool", + "capabilities:volume_backend_name": "shared_v7000_1", + "drivers:rsize": "-1" + }, + "name": "shared_v7000_1-default", + "id": "6627888e-9f59-4996-8c22-5d528c3273f" + }, + { + "extra_specs": {}, + "name": "dm-crypt", + "id": "a3ae95f6-4aab-4446-b1d2-0fc2f60a89b" + }, + { + "extra_specs": {}, + "name": "LUKS", + "id": "291f81a2-591b-4164-b2b2-829abc93557" + }] + + def setUp(self): + super(PVCCinderTypesTest, self).setUp() + # get cinder client + cinder_fakeclient = PVCFakeClient('r', 'p') + # delegate to nova extension class + cinder_client = delegate.new_composite_deletgate( + [ext_cinder.Client(cinder_fakeclient), cinder_fakeclient]) + self.cs = cinder_client + + def tearDown(self): + super(PVCCinderTypesTest, self).tearDown() + + def test_list_all_storage_templates(self): + + reslut = self.cs.volume_types.list_all_storage_templates() + + self.assertEqual(reslut[0].name, "shared_v7000_1-default") + + def test_list_storage_templates_1(self): + returnvalues = [VolumeType(VolumeTypeManager, res, loaded=True) + for res in self.volumes_type_list if res] + + commonutils.get_utils().get_multi_scg_accessible_storage_templates = \ + mock.MagicMock(return_value=returnvalues) + result = self.cs.volume_types.list() + + self.assertEquals(result[0].id, "6627888e-9f59-4996-8c22-5d528c3273f") + self.assertEquals(result[1].name, "dm-crypt") + self.assertEquals(result[2].name, "LUKS") + + def test_list_storage_templates_2(self): + data = self.volumes_type_list[2] + returnvalues = [VolumeType(VolumeTypeManager, res, loaded=True) + for res in [data] if res] + + commonutils.get_utils().get_scg_accessible_storage_templates = \ + mock.MagicMock(return_value=returnvalues) + result = self.cs.volume_types.list("SCGUUID", None) + + self.assertEquals(result[0].name, "LUKS") + + +class PVCStorageProvidersTest(utils.TestCase): + + """ + Class PVCStorageProvidersTest is used to provide + Storage Providers related UT cases. + """ + expected_sp = [ + dict( + backend_type="svc", + volume_count="null", + service=dict( + host_display_name="shared_v7000_1", + host="shared_v7000_1", + id=4), + backend_id="00000200A0204C30", + health_status=dict(health_value="OK"), + free_capacity_gb=873.5, + total_capacity_gb=1115.5, + storage_hostname="shared_v7000_1", + id=2, + backend_state="running", + storage_type="fc")] + + def setUp(self): + super(PVCStorageProvidersTest, self).setUp() + # get cinder client + cinder_fakeclient = PVCFakeClient('r', 'p') + # delegate to nova extension class + cinder_client = delegate.new_composite_deletgate( + [ext_cinder.Client(cinder_fakeclient), cinder_fakeclient]) + self.cs = cinder_client + + def tearDown(self): + super(PVCStorageProvidersTest, self).tearDown() + + def compare_to_expected(self, expected, hyper): + for key, value in expected.items(): + self.assertEqual(getattr(hyper, key), value) + + def test_get_detail_SPs(self): + expected = [ + dict(id=2, + backend_type="svc", + backend_id="00000200A0204C30", + free_capacity_gb=873.5, + total_capacity_gb=1115.5, + storage_hostname="shared_v7000_1", + backend_state="running"), + dict(id=3, + backend_type="fc", + backend_id="00000200A0204C31", + free_capacity_gb=73.5, + total_capacity_gb=115.5, + storage_hostname="shared_v7000_2", + backend_state="running")] + + result = self.cs.storage_providers.list_all_providers() + self.cs.assert_called('GET', '/storage-providers/detail') + + for idx, hyper in enumerate(result): + self.compare_to_expected(expected[idx], hyper) + + def test_get_storage_provider(self): + expected = dict(id=2, + backend_type="svc", + backend_id="00000200A0204C30", + free_capacity_gb=873.5, + total_capacity_gb=1115.5, + storage_hostname="shared_v7000_1", + backend_state="running") + + result = self.cs.storage_providers.get(2) + self.cs.assert_called('GET', + '/storage-providers/2') + + self.compare_to_expected(expected, result) + + def test_list_SP_1(self): + expected = self.expected_sp + returnvalue = [ext_cinder.StorageProvider(None, expected[0], True)] + + commonutils.get_utils().get_scg_accessible_storage_providers = \ + mock.MagicMock(return_value=returnvalue) + result = self.cs.storage_providers.list(True, None, "SCGUUID", None) + + for idx, hyper in enumerate(result): + self.compare_to_expected(expected[idx], hyper) + + def test_list_SP_2(self): + expected = self.expected_sp + returnvalue = [ext_cinder.StorageProvider(None, expected[0], True)] + + commonutils.get_utils().get_multi_scg_accessible_storage_providers = \ + mock.MagicMock(return_value=returnvalue) + result = self.cs.storage_providers.list() + + for idx, hyper in enumerate(result): + self.compare_to_expected(expected[idx], hyper) diff --git a/common-powervc/test/common/client/test_delegate.py b/common-powervc/test/common/client/test_delegate.py new file mode 100644 index 0000000..8fb4fb8 --- /dev/null +++ b/common-powervc/test/common/client/test_delegate.py @@ -0,0 +1,53 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import unittest +from powervc.common.client import delegate + + +class FakeDelegator1(object): + def x(self): + return 'x' + + +class FakeDelegator2(object): + def y(self): + return 'y' + + +class FakeContext(object): + def __init__(self): + self.auth_token = 'Context Auth Token' + self.project_id = 'Project Id' + + +class FakeKeyStone(object): + def __init__(self): + self.auth_token = 'KeyStone Auth Token' + + +class DelegateTest(unittest.TestCase): + + def test_new_composite_deletgate(self): + d1 = FakeDelegator1() + d2 = FakeDelegator2() + dele = delegate.new_composite_deletgate([d1, d2]) + self.assertEqual(dele.x(), 'x') + self.assertEqual(dele.y(), 'y') + + def test_context_dynamic_auth_token(self): + ctx = FakeContext() + keystone = FakeKeyStone() + dele_ctx_keystone = delegate.context_dynamic_auth_token(ctx, keystone) + self.assertEqual(dele_ctx_keystone.auth_token, 'KeyStone Auth Token') + self.assertEqual(dele_ctx_keystone.project_id, 'Project Id') + +if __name__ == "__main__": + unittest.main() diff --git a/common-powervc/test/common/client/test_glance.py b/common-powervc/test/common/client/test_glance.py new file mode 100644 index 0000000..8eb2776 --- /dev/null +++ b/common-powervc/test/common/client/test_glance.py @@ -0,0 +1,177 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" +import unittest +import mox +from mock import MagicMock +import glanceclient.v1.images as imagesV1 +import glanceclient.v1.image_members as membersV1 + +from glanceclient.openstack.common import gettextutils +gettextutils.install('common-glance-client-ut') + +import powervc.common.utils as common_utils + +utils = common_utils.import_relative_module('glanceclient', 'tests.utils') +test_images = common_utils.import_relative_module('glanceclient', + 'tests.v1.test_images') +test_image_members = common_utils.import_relative_module( + 'glanceclient', + 'tests.v1.test_image_members') + +from powervc.common.client.extensions.glance import Client as PVCGlanceClient + + +class FakeGlanceClient(object): + + """ + Fake client to populate the pvcglanceclient.Client + """ + + def __init__(self, images, members): + self.images = images + self.image_members = members + self.image_tags = MagicMock() + + +class TestPVCGlanceClient(unittest.TestCase): + + def setUp(self): + # prepare the fake api + images_api = utils.FakeAPI(test_images.fixtures) # @UndefinedVariable + images_manager = imagesV1.ImageManager(images_api) + + members_api = utils.FakeAPI( # @UndefinedVariable + test_image_members.fixtures + ) + members_manager = membersV1.ImageMemberManager(members_api) + + # create mock object + self.moxer = mox.Mox() + client = self.moxer.CreateMockAnything() + self.pvc_gc = PVCGlanceClient(client) + + # append the fake api to mock object + self.pvc_gc.client.images = images_manager + self.pvc_gc.client.image_members = members_manager + self.pvc_gc.client.image_tags = MagicMock() + + def test_listImages(self): + self.moxer.ReplayAll() + images = self.pvc_gc.listImages() + self.moxer.VerifyAll() + self.assertEqual(images[0].id, 'a') + self.assertEqual(images[0].name, 'image-1') + self.assertEqual(images[1].id, 'b') + self.assertEqual(images[1].name, 'image-2') + + def test_getImage(self): + self.moxer.ReplayAll() + image = self.pvc_gc.getImage('1') + self.moxer.VerifyAll() + self.assertEqual(image.id, '1') + self.assertEqual(image.name, 'image-1') + + def test_deleteImage(self): + self.moxer.ReplayAll() + self.pvc_gc.deleteImage('1') + expect = [ + ('DELETE', '/v1/images/1', {}, None), + ] + self.moxer.VerifyAll() + self.assertEqual(self.pvc_gc. + client. + images. + api.calls, + expect) + + def test_listImageMembers(self): + self.moxer.ReplayAll() + image_id = '1' + image_members = self.pvc_gc.listImageMembers(image_id) + self.moxer.VerifyAll() + self.assertEqual(image_members[0].image_id, '1') + self.assertEqual(image_members[0].member_id, '1') + + def test_deleteImageMember(self): + self.moxer.ReplayAll() + image_id = '1' + member_id = '1' + self.pvc_gc.deleteImageMember(image_id, member_id) + expect = [ + ('DELETE', + '/v1/images/{image}/members/{mem}'. + format(image='1', + mem='1'), + {}, + None)] + self.moxer.VerifyAll() + self.assertEqual(self.pvc_gc.client.image_members. + api.calls, + expect) + + def test_getImageFile(self): + self.pvc_gc.client.images.data = MagicMock(return_value='FILE') + ret = self.pvc_gc.getImageFile('image_id') + self.pvc_gc.client.images.data.assert_called_once_with('image_id') + self.assertEqual(ret, 'FILE') + + def test_updateImage(self): + self.pvc_gc.client.images.update = MagicMock(return_value='updated') + ret = self.pvc_gc.updateImage('image_id') + self.pvc_gc.client.images.update.assert_called_once_with('image_id') + self.assertEqual(ret, 'updated') + + def test_updateImageMember(self): + self.pvc_gc.client.image_members.update =\ + MagicMock(return_value='member updated') + ret = self.pvc_gc.updateImageMember('image_id', + 'member_id', + 'member_status') + self.pvc_gc.client.image_members.update.\ + assert_called_once_with('image_id', + 'member_id', + 'member_status') + self.assertEqual(ret, 'member updated') + + def test_createImageMember(self): + self.pvc_gc.client.image_members.create =\ + MagicMock(return_value='member created') + ret = self.pvc_gc.createImageMember('image_id', 'member_id') + self.pvc_gc.client.image_members.create.\ + assert_called_once_with('image_id', + 'member_id') + self.assertEqual(ret, 'member created') + + def test_updateImageTag(self): + self.pvc_gc.client.image_tags.update =\ + MagicMock(return_value='tag updated') + self.pvc_gc.client_version = 2 + ret = self.pvc_gc.updateImageTag('image_id', 'tag_value') + self.pvc_gc.client.image_tags.update.\ + assert_called_once_with('image_id', + 'tag_value') + self.assertEqual(ret, 'tag updated') + + def test_deleteImageTag(self): + self.pvc_gc.client.image_tags.delete =\ + MagicMock(return_value='tag deleted') + self.pvc_gc.client_version = 2 + ret = self.pvc_gc.deleteImageTag('image_id', 'tag_value') + self.pvc_gc.client.image_tags.delete.\ + assert_called_once_with('image_id', + 'tag_value') + self.assertEqual(ret, 'tag deleted') + + def tearDown(self): + pass + + +if __name__ == "__main__": + unittest.main() diff --git a/common-powervc/test/common/client/test_nova.py b/common-powervc/test/common/client/test_nova.py new file mode 100644 index 0000000..de0a2d4 --- /dev/null +++ b/common-powervc/test/common/client/test_nova.py @@ -0,0 +1,561 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" +import unittest +from mock import MagicMock +from mock import patch +import novaclient.tests.v1_1.test_servers as servers_testbox +import novaclient.tests.v1_1.test_flavors as flavors_testbox +import novaclient.tests.v1_1.test_hypervisors as hypervisors_testbox +from novaclient.tests.v1_1 import fakes +from novaclient.v1_1 import servers +from novaclient.v1_1 import flavors +from novaclient.tests import utils +from powervc.common.client.extensions import nova as ext_nova +from powervc.common.client import delegate +from powervc.common import utils as comm_utils + +""" + This class similarly extend the current nova client test cases + and also provided are examples of how someone can override and existing + method in the event we need to test something unique to powerVC. + + The current methods that are overridden expect the same results as the base + class test cases and are only provided for example. + + For specific PowerVC data model, just override the parent fake data + structure and corresponding testcase methods logic that could verify + the functions. + + To run the testcases, alternatively: + 1. Right click the TestNovaClient.py --> Run As --> Python unit-test + or + 2. Refer to this link for detail UT running information: + https://jazz04.rchland.ibm.com:9443/jazz/service/ + + com.ibm.team.workitem.common.internal.rest.IAttachmentRestService/ + + itemName/com.ibm.team.workitem.Attachment/67843 + + All the testcases should be run successfully. +""" + + +class PVCFakeClient(fakes.FakeClient): + """ + This PVCFakeClient class extends the current nova FakeClient, + aiming to set the self.client variable to PVCFakeHTTPClient + """ + def __init__(self, *args, **kwargs): + fakes.FakeClient.__init__(self, *args, **kwargs) + self.client = PVCFakeHTTPClient(**kwargs) + + +class PVCFakeHTTPClient(fakes.FakeHTTPClient): + """ + This PVCFakeHTTPClient class extends the current nova FakeHTTPClient. + For all the HTTP requests in this class, it returns a fake json data + as specified beforehand instead of requesting to a real environment. + """ + def __init__(self, **kwargs): + fakes.FakeHTTPClient.__init__(self, **kwargs) + + def get_servers(self, **kw): + """ + Override the parent method to a new powerVC specified server. + """ + return (200, {}, {"servers": [ + {'id': 1234, 'name': 'sample-server'}, + {'id': 5678, 'name': 'powerVC sample-server'} + ]}) + + def get_servers_detail(self, **kw): + """ + Override the parent method to specify powerVC specified server + detail. + """ + return (200, {}, {"servers": [ + { + "id": 1234, + "name": "sample-server", + "image": { + "id": 2, + "name": "sample image", + }, + "flavor": { + "id": 1, + "name": "256 MB Server", + }, + "hostId": "e4d909c290d0fb1ca068ffaddf22cbd0", + "status": "BUILD", + "progress": 60, + "addresses": { + "public": [{ + "version": 4, + "addr": "1.2.3.4", + }, { + "version": 4, + "addr": "5.6.7.8", + }], + "private": [{ + "version": 4, + "addr": "10.11.12.13", + }], + }, + "metadata": { + "Server Label": "Web Head 1", + "Image Version": "2.1" + }, + "OS-EXT-SRV-ATTR:host": "computenode1", + "security_groups": [{ + 'id': 1, 'name': 'securitygroup1', + 'description': 'FAKE_SECURITY_GROUP', + 'tenant_id': '4ffc664c198e435e9853f2538fbcd7a7' + }], + "OS-EXT-MOD:some_thing": "mod_some_thing_value"}, + { + "id": 5678, + "name": "powerVC sample-server", + "image": { + "id": 2, + "name": "sample image", + }, + "flavor": { + "id": 1, + "name": "256 MB Server", + }, + "hostId": "9e107d9d372bb6826bd81d3542a419d6", + "status": "ACTIVE", + "addresses": { + "public": [{ + "version": 4, + "addr": "4.5.6.7", + }, { + "version": 4, + "addr": "5.6.9.8", + }], + "private": [{ + "version": 4, + "addr": "10.13.12.13", + }], + }, + "metadata": { + "Server Label": "DB 1" + }, + "OS-EXT-SRV-ATTR:host": "computenode2", + }, + { + "id": 9012, + "name": "sample-server3", + "image": "", + "flavor": { + "id": 1, + "name": "256 MB Server", + }, + "hostId": "9e107d9d372bb6826bd81d3542a419d6", + "status": "ACTIVE", + "addresses": { + "public": [{ + "version": 4, + "addr": "4.5.6.7", + }, { + "version": 4, + "addr": "5.6.9.8", + }], + "private": [{ + "version": 4, + "addr": "10.13.12.13", + }], + }, + "metadata": { + "Server Label": "DB 1" + } + } + ]}) + + def get_flavors_detail(self, **kw): + """ + Override the parent method to specify powerVC specified flavors + detail. + """ + return (200, {}, {'flavors': [ + {'id': 1, 'name': '256 MB Server', 'ram': 256, 'disk': 10, + 'OS-FLV-EXT-DATA:ephemeral': 10, + 'os-flavor-access:is_public': True, + 'links': {}}, + {'id': 2, 'name': '128 MB Server', 'ram': 512, 'disk': 0, + 'OS-FLV-EXT-DATA:ephemeral': 20, + 'os-flavor-access:is_public': False, + 'links': {}}, + {'id': 'aa1', 'name': 'PowerVC 128 MB Server', 'ram': 5120, + 'disk': 5678, 'OS-FLV-EXT-DATA:ephemeral': 0, + 'os-flavor-access:is_public': True, + 'links': {}} + ]}) + + def get_os_hypervisors(self, **kw): + """ + Override the parent method to specify powerVC specified hypervisors + detail. + """ + return (200, {}, {"hypervisors": [ + {'id': 1234, 'hypervisor_hostname': 'hyper1'}, + {'id': 5678, 'hypervisor_hostname': 'hyper2'}, + ]}) + + def get_storage_connectivity_groups_f4b541cb_f418_4b4b_83b9_a8148650d4e9( + self, **kw): + """ + To get a fake detail storage_connectivity_group + """ + return (200, {}, {"storage_connectivity_group": + { + "auto_add_vios": True, + "fc_storage_access": True, + "display_name": "Auto-SCG for Registered SAN", + "host_list": [ + { + "name": "ngp01_02_vios_1", + "vios_list": [ + { + "lpar_id": 1, + "name": "10-F715A", + "id": "ngp01_02_vios_1##1" + } + ] + }, + { + "name": "ngp01_03_vios_1", + "vios_list": [ + { + "lpar_id": 1, + "name": "10-F76CA", + "id": "ngp01_03_vios_1##1" + } + ] + } + ], + "created_at": "2013-08-23 14:56:11.787465", + "enabled": True, + "auto_defined": True, + "id": "f4b541cb-f418-4b4b-83b9-a8148650d4e9" + }}) + + def get_storage_connectivity_groups(self, **kw): + """ + To return a fake storage_connectivity_groups + """ + return (200, {}, {"storage_connectivity_groups": [ + { + "display_name": "Auto-SCG for Registered SAN", + "id": "f4b541cb-f418-4b4b-83b9-a8148650d4e9" + }, + { + "display_name": "SCG sample", + "id": "sdfb541cb-f418-4b4b-3129-a814865023fs" + }]}) + + def get_storage_connectivity_groups_detail(self, **kw): + """ + To return a fake detail storage_connectivity_groups + """ + return (200, {}, {"storage_connectivity_groups": [ + { + "auto_add_vios": True, + "fc_storage_access": True, + "display_name": "Auto-SCG for Registered SAN", + "host_list": [ + { + "name": "ngp01_02_vios_1", + "vios_list": [ + { + "lpar_id": 1, + "name": "10-F715A", + "id": "ngp01_02_vios_1##1" + } + ] + }, + { + "name": "ngp01_03_vios_1", + "vios_list": [ + { + "lpar_id": 1, + "name": "10-F76CA", + "id": "ngp01_03_vios_1##1" + } + ] + } + ], + "created_at": "2013-08-23 14:56:11.787465", + "enabled": True, + "auto_defined": True, + "id": "f4b541cb-f418-4b4b-83b9-a8148650d4e9"}, + { + "auto_add_vios": True, + "fc_storage_access": True, + "display_name": "SCG Sample", + "host_list": [ + { + "name": "ngp01_02_vios_1", + "vios_list": [ + { + "lpar_id": 1, + "name": "10-F715A", + "id": "ngp01_02_vios_1##1" + } + ] + }, { + "name": "ngp01_03_vios_1", + "vios_list": [ + { + "lpar_id": 1, + "name": "10-F76CA", + "id": "ngp01_03_vios_1##1" + } + ] + } + ], + "created_at": "2013-08-23 14:56:11.787465", + "enabled": True, + "auto_defined": True, + "id": "sdfb541cb-f418-4b4b-3129-a814865023fs" + } + ]}) + + +class PVCNovaServersTest(servers_testbox.ServersTest): + """ + This PVCNovaServersTest class extends the current nova + ServersTest class to provide servers related UT cases. + """ + + def setUp(self): + super(PVCNovaServersTest, self).setUp() + nova_fakeclient = PVCFakeClient('r', 'p', 's', + 'http://localhost:5000/') + # delegate to nova extension class + nova_client = delegate.new_composite_deletgate( + [ext_nova.Client(nova_fakeclient), nova_fakeclient]) + + self.cs = nova_client + + def tearDown(self): + super(PVCNovaServersTest, self).tearDown() + + def test_list(self): + comm_utils.get_utils = MagicMock() + comm_utils.get_utils().get_multi_scg_accessible_servers = MagicMock() + self.cs.manager.list() + comm_utils.get_utils().get_multi_scg_accessible_servers.\ + assert_called_once_with(None, None, True, None) + + def test_list_servers(self): + """ + Override this method to test listing powerVC server + Here is the same logic as OpenStack for example. + """ + sl = self.cs.manager.list_all_servers() + print sl + self.cs.assert_called('GET', '/servers/detail') + [self.assertTrue(isinstance(s, servers.Server)) for s in sl] + + def test_list_instance_storage_viable_hosts(self): + with patch('novaclient.base.getid') as mock: + mock.return_value = 'server_id' + mock('server') + self.cs.manager.api.client.get = MagicMock( + return_value=('head', 'body')) + ret = self.cs.manager.list_instance_storage_viable_hosts('server') + self.cs.manager.api.client.get.assert_called_once_with( + '/storage-viable-hosts?instance_uuid=server_id') + self.assertEqual(ret, 'body') + + +class PVCNovaFlavorsTest(flavors_testbox.FlavorsTest): + """ + This PVCNovaFlavorsTest class extends the current nova + FlavorsTest class to provide flavors related UT cases. + """ + + def setUp(self): + super(PVCNovaFlavorsTest, self).setUp() + nova_fakeclient = PVCFakeClient('r', 'p', 's', + 'http://localhost:5000/') + # delegate to nova extension class + nova_client = delegate.new_composite_deletgate( + [ext_nova.Client(nova_fakeclient), nova_fakeclient]) + + self.cs = nova_client + + def tearDown(self): + super(PVCNovaFlavorsTest, self).tearDown() + + def test_get_flavor_details_alphanum_id(self): + """ + Override this method to test list specified powerVC + flavors. Here is the same logic as OpenStack for example. + """ + f = self.cs.flavors.get('aa1') + self.cs.assert_called('GET', '/flavors/aa1') + self.assertTrue(isinstance(f, flavors.Flavor)) + # Verify the preset value + self.assertEqual(f.ram, 5120) + self.assertEqual(f.disk, 5678) + self.assertEqual(f.ephemeral, 0) + self.assertEqual(f.is_public, True) + + +class PVCNovaHypervisorsTest(hypervisors_testbox.HypervisorsTest): + """ + This PVCNovaHypervisorsTest class extends the current nova + HypervisorsTest class to provide hypervisors related UT cases. + """ + + def setUp(self): + super(PVCNovaHypervisorsTest, self).setUp() + nova_fakeclient = PVCFakeClient('r', 'p', 's', + 'http://localhost:5000/') + # delegate to nova extension class + nova_client = delegate.new_composite_deletgate( + [ext_nova.Client(nova_fakeclient), nova_fakeclient]) + + self.cs = nova_client + + def tearDown(self): + super(PVCNovaHypervisorsTest, self).tearDown() + + def test_hypervisor_detail(self): + """ + Override this method to test if listing powerVC hypervisors + function works. + Here is the same logic as OpenStack for example. + """ + expected = [ + dict(id=1234, + service=dict(id=1, host='compute1'), + vcpus=4, + memory_mb=10 * 1024, + local_gb=250, + vcpus_used=2, + memory_mb_used=5 * 1024, + local_gb_used=125, + hypervisor_type="xen", + hypervisor_version=3, + hypervisor_hostname="hyper1", + free_ram_mb=5 * 1024, + free_disk_gb=125, + current_workload=2, + running_vms=2, + cpu_info='cpu_info', + disk_available_least=100), + dict(id=2, + service=dict(id=2, host="compute2"), + vcpus=4, + memory_mb=10 * 1024, + local_gb=250, + vcpus_used=2, + memory_mb_used=5 * 1024, + local_gb_used=125, + hypervisor_type="xen", + hypervisor_version=3, + hypervisor_hostname="hyper2", + free_ram_mb=5 * 1024, + free_disk_gb=125, + current_workload=2, + running_vms=2, + cpu_info='cpu_info', + disk_available_least=100)] + + result = self.cs.hypervisors.list() + print result + self.cs.assert_called('GET', '/os-hypervisors/detail') + + for idx, hyper in enumerate(result): + self.compare_to_expected(expected[idx], hyper) + + +class PVCSCGTest(utils.TestCase): + def setUp(self): + super(PVCSCGTest, self).setUp() + nova_fakeclient = PVCFakeClient('r', 'p', 's', + 'http://localhost:5000/') + # delegate to nova extension class + nova_client = delegate.new_composite_deletgate( + [ext_nova.Client(nova_fakeclient), nova_fakeclient]) + + self.cs = nova_client + + def compare_to_expected(self, expected, hyper): + for key, value in expected.items(): + self.assertEqual(getattr(hyper, key), value) + + def test_get_detail_SCGs(self): + expected = [ + dict(id="f4b541cb-f418-4b4b-83b9-a8148650d4e9", + auto_add_vios=True, + fc_storage_access=True, + display_name="Auto-SCG for Registered SAN", + enabled=True, + auto_defined=True), + dict(id="sdfb541cb-f418-4b4b-3129-a814865023fs", + auto_add_vios=True, + fc_storage_access=True, + display_name="SCG Sample", + enabled=True, + auto_defined=True)] + + result = self.cs.storage_connectivity_groups.list() + self.cs.assert_called('GET', '/storage-connectivity-groups/detail') + + for idx, hyper in enumerate(result): + self.compare_to_expected(expected[idx], hyper) + + def test_get_SCGs(self): + expected = dict(id="f4b541cb-f418-4b4b-83b9-a8148650d4e9", + auto_add_vios=True, + fc_storage_access=True, + display_name="Auto-SCG for Registered SAN", + enabled=True, + auto_defined=True) + + result = self.cs.storage_connectivity_groups.\ + get('f4b541cb-f418-4b4b-83b9-a8148650d4e9') + self.cs.assert_called('GET', + '/storage-connectivity-groups/' + + 'f4b541cb-f418-4b4b-83b9-a8148650d4e9') + + self.compare_to_expected(expected, result) + + +class SCGImageManagerTest(unittest.TestCase): + def setUp(self): + super(SCGImageManagerTest, self).setUp() + nova_fakeclient = PVCFakeClient('r', 'p', 's', + 'http://localhost:5000/') + # delegate to nova extension class + nova_client = delegate.new_composite_deletgate( + [ext_nova.Client(nova_fakeclient), nova_fakeclient]) + + self.cs = nova_client + + def test_list(self): + with patch('novaclient.base.Manager._list') as mock: + mock.return_value = ['image1', 'image2', 'image3'] + ret = self.cs.scg_images.list('scgUUID') + mock.assert_called_once_with( + '/storage-connectivity-groups/scgUUID/images', 'images') + self.assertEqual(ret, ['image1', 'image2', 'image3']) + + def test_list_ids(self): + class FakeImage(object): + def __init__(self, image_id): + self.id = image_id + + self.cs.scg_images.list = MagicMock( + return_value=[FakeImage(1), FakeImage(2), FakeImage(3)]) + ret = self.cs.scg_images.list_ids('scgUUID') + self.assertEqual(ret, [1, 2, 3]) diff --git a/common-powervc/test/common/powervc_test_1.conf b/common-powervc/test/common/powervc_test_1.conf new file mode 100644 index 0000000..495d4c1 --- /dev/null +++ b/common-powervc/test/common/powervc_test_1.conf @@ -0,0 +1,228 @@ +# This file contains configuration properties that affects how the powervc driver functions and how it +# communicates with the PowerVC server. Most properties have default values based on a default +# PowerVC configuration. However, some connection properties, such as PowerVC host name +# do not have default values and must be configured prior to running the powervc driver. These +# properties are marked with the text INPUT REQUIRED. Also you may have to change other +# properties depending on your environment and how your PowerVC sever is configured. For +# more information, see the Smart Cloud Entry Administration Guide. + +[DEFAULT] +# The following group of properties needs to be configured +# in order for the PowerVC Driver to be able to authenticate with keystone +# and obtain information from it, that might be required to run background +# tasks (such as discovering a new image), or simply to connect to a +# secured Glance. +# When running secured Glance, make sure the 'auth_strategy' property in +# nova.conf is set to 'keystone'. + +# Log info messages +verbose = true + + +[openstack] +# Authentication url to authenticate with keystone (string value) +auth_url = http://localhost:5000/v2.0 + +# v2.0 or v3 +keystone_version = v2.0 + +# Tenant name for connecting to keystone in admin context (string value) +admin_tenant_name = demo + +# Username for connecting to keystone in admin context (string value) +admin_user = demo + +# Password for connecting to keystone in admin context (string value) +admin_password = openstack + +# For local SSL connections, specify the path and filename of the cacert file +#connection_cacert = + +http_insecure = True + +# +# Qpid connection information +# + +# Qpid broker hostname (string value) +qpid_hostname = host + +# Qpid broker port (integer value) +qpid_port = 5675 + +# Username for qpid connection (string value) +qpid_username = + +# Password for qpid connection (string value) +qpid_password = + +# Transport to use, either 'tcp'(default) or 'ssl' +qpid_protocol = tcp + +[powervc] + +# Full class name for the manager for PowerVC Manager Service (string value) +powervc_manager = powervc.nova.driver.compute.manager.PowerVCCloudManager + +# Full class name for the driver for PowerVC Driver Service (string value) +powervc_driver = powervc.nova.driver.virt.powervc.driver.PowerVCDriver + +# +# Connection information for PowerVC. +# + +# Authentication url of the PowerVC to connect to +# INPUT REQUIRED +# Provide 'host' portion by updating it to the hostname of the PowerVC system +#auth_url = https://host/powervc/openstack/identity/v3 + +# v2.0 or v3 +keystone_version = v3 + +# Username for PowerVC connection (string value) +admin_user = root + +# Password for PowerVC connection (string value) +admin_password = passw0rd + +# Tenant name for PowerVC connection (string value) +admin_tenant_name = ibm-default + +# For PowerVC SSL connections, specify the path and filename of the cacert file +# INPUT REQUIRED +# Provide the cacert file by copying it from its install location on the +# PowerVC host (e.g. /etc/pki/tls/certs/powervc.crt) to the local hosting +# Openstack system. +#connection_cacert = + +# Value of insecure option for PowerVC connections (Default=True) +# INPUT REQUIRED +# Change to False when using a secure connection and providing a cacert file. +http_insecure = True + +# Value of authorization token expiration stale duration (Default=3600) +# INPUT REQUIRED +# Due to PowerVC requirement, all the REST API customers need to pre-refresh +# authorization token at least 1 hour before expiration +expiration_stale_duration = 3600 + +# The names of the storage connectivity groups supported by our driver +# INPUT REQUIRED +# Provide the PowerVC storage connectivity group (SCG) names by getting the name +# from the PowerVC system, or using the PowerVC default SCG of "Any host, all VIOS". +# If there are more than one SCG you want to specify, just add more SCG values with +# more storage_connectivity_group +# Note: The value of this property must exactly match the value as specified on the +# PowerVC server, including case, punctuation, and spaces. +storage_connectivity_group = Any host, all VIOS +#storage_connectivity_group = + +# +# Qpid connection information for PowerVC +# + +# Qpid broker hostname (string value) +# INPUT REQUIRED +# Change 'host' to the hostname of the PowerVC system +qpid_hostname = host + +# Qpid broker port (integer value) + +# uncomment following line for non-ssl +# qpid_port = 5672 +qpid_port = 5679 + +# Username for qpid connection (string value) +#qpid_username = powervc_qpid + +# Password for qpid connection (string value) +# INPUT REQUIRED +# Provide the qpid connection password from the PowerVC system +# by using the cat command on the pw.file in the directory where +# PowerVC is installed (e.g. cat /opt/ibm/powervc/data/pw.file) +qpid_password = + +# Transport to use, either 'tcp'(default) or 'ssl' + +# uncomment following line for non-ssl +# qpid_protocol = tcp +qpid_protocol = ssl + +# +# Sync variables +# + +# The name of the staging project (string value) +# If not set defaults to 'Public'. If set the named project should exist and +# be accessible by the staging_user. +staging_project_name = Public + +# The name of the staging user (string value) +# If not set defaults to 'admin'. If set the user should exist and +# have access to the project identified by staging_project_name. +staging_user = admin + +# The prefix that will be added to the flavor name from PowerVC +# and stored (string value). This should be unique for every +# connection to help distinguish the flavors +flavor_prefix = PVC- + +# This is a list of PowerVC flavor names that should be synced. +# If no flavor name is specified, then all flavors are synced. +flavor_white_list = + +# This is a list of PowerVC flavor names that should not be synced. +flavor_black_list = + +# The periodic flavor sync interval in seconds. +flavor_sync_interval = 300 + +# Instance periodic sync interval specified in seconds +instance_sync_interval = 20 + +# How many instance sync intervals between full instance syncs. Only instances +# known to be out of sync are synced on the interval except after this many +# intervals when all instances are synced. +full_instance_sync_frequency = 30 + +# Image periodic sync interval specified in seconds. This is the time from the end +# of one successful image periodic sync operation to the start of the next. +image_periodic_sync_interval_in_seconds = 300 + +# The time in seconds between image sync retry attempts if an error was +# encountered during an image sync operation +image_sync_retry_interval_time_in_seconds = 60 + +# The maximum number of images to return. The default is 500 images. If your PowerVC +# has more than 500 images, this limit should be increased to include all images. +image_limit = 500 + +# Volume periodic sync interval specified in seconds +volume_sync_interval = 20 + +# How many volume sync intervals between full volume syncs. +# Only volumes known to be out of sync are synced on the interval +# except after this many intervals when all volumes are synced. +full_volume_sync_frequency = 30 + +# Volume type periodic sync interval specified in seconds +volume_type_sync_interval = 20 + +# How many volume type sync intervals between full volume type syncs. +# Only volumes known to be out of sync are synced on the interval +# except after this many intervals when all volumes are synced. +full_volume_type_sync_frequency = 30 + +# Ignore delete errors so an exception is not thrown during a +# delete. When set to true, this allows the volume to be deleted +# on the hosting OS even if an exception occurs. When set to false, +# exceptions during delete prevent the volume from being deleted +# on the hosting OS. +volume_driver_ignore_delete_error = False + +# The times to check whether attaching/detaching the volume succeed +volume_max_try_times = 12 + +# Minimum delay interval and initial delay seconds for long run tasks. +longrun_loop_interval = 7 +longrun_initial_delay = 10 diff --git a/common-powervc/test/common/powervc_test_2.conf b/common-powervc/test/common/powervc_test_2.conf new file mode 100644 index 0000000..990681c --- /dev/null +++ b/common-powervc/test/common/powervc_test_2.conf @@ -0,0 +1,228 @@ +# This file contains configuration properties that affects how the powervc driver functions and how it +# communicates with the PowerVC server. Most properties have default values based on a default +# PowerVC configuration. However, some connection properties, such as PowerVC host name +# do not have default values and must be configured prior to running the powervc driver. These +# properties are marked with the text INPUT REQUIRED. Also you may have to change other +# properties depending on your environment and how your PowerVC sever is configured. For +# more information, see the Smart Cloud Entry Administration Guide. + +[DEFAULT] +# The following group of properties needs to be configured +# in order for the PowerVC Driver to be able to authenticate with keystone +# and obtain information from it, that might be required to run background +# tasks (such as discovering a new image), or simply to connect to a +# secured Glance. +# When running secured Glance, make sure the 'auth_strategy' property in +# nova.conf is set to 'keystone'. + +# Log info messages +verbose = true + + +[openstack] +# Authentication url to authenticate with keystone (string value) +auth_url = http://localhost:5000/v2.0 + +# v2.0 or v3 +keystone_version = v2.0 + +# Tenant name for connecting to keystone in admin context (string value) +admin_tenant_name = demo + +# Username for connecting to keystone in admin context (string value) +admin_user = demo + +# Password for connecting to keystone in admin context (string value) +admin_password = openstack + +# For local SSL connections, specify the path and filename of the cacert file +#connection_cacert = + +http_insecure = True + +# +# Qpid connection information +# + +# Qpid broker hostname (string value) +qpid_hostname = monica + +# Qpid broker port (integer value) +qpid_port = 5675 + +# Username for qpid connection (string value) +qpid_username = + +# Password for qpid connection (string value) +qpid_password = + +# Transport to use, either 'tcp'(default) or 'ssl' +qpid_protocol = tcp + +[powervc] + +# Full class name for the manager for PowerVC Manager Service (string value) +powervc_manager = powervc.nova.driver.compute.manager.PowerVCCloudManager + +# Full class name for the driver for PowerVC Driver Service (string value) +powervc_driver = powervc.nova.driver.virt.powervc.driver.PowerVCDriver + +# +# Connection information for PowerVC. +# + +# Authentication url of the PowerVC to connect to +# INPUT REQUIRED +# Provide 'host' portion by updating it to the hostname of the PowerVC system +auth_url = https://host/powervc/openstack/identity/v3 + +# v2.0 or v3 +keystone_version = v3 + +# Username for PowerVC connection (string value) +admin_user = root + +# Password for PowerVC connection (string value) +admin_password = passw0rd + +# Tenant name for PowerVC connection (string value) +admin_tenant_name = ibm-default + +# For PowerVC SSL connections, specify the path and filename of the cacert file +# INPUT REQUIRED +# Provide the cacert file by copying it from its install location on the +# PowerVC host (e.g. /etc/pki/tls/certs/powervc.crt) to the local hosting +# Openstack system. +#connection_cacert = + +# Value of insecure option for PowerVC connections (Default=True) +# INPUT REQUIRED +# Change to False when using a secure connection and providing a cacert file. +http_insecure = True + +# Value of authorization token expiration stale duration (Default=3600) +# INPUT REQUIRED +# Due to PowerVC requirement, all the REST API customers need to pre-refresh +# authorization token at least 1 hour before expiration +expiration_stale_duration = 3600 + +# The names of the storage connectivity groups supported by our driver +# INPUT REQUIRED +# Provide the PowerVC storage connectivity group (SCG) names by getting the name +# from the PowerVC system, or using the PowerVC default SCG of "Any host, all VIOS". +# If there are more than one SCG you want to specify, just add more SCG values with +# more storage_connectivity_group +# Note: The value of this property must exactly match the value as specified on the +# PowerVC server, including case, punctuation, and spaces. +storage_connectivity_group = Any host, all VIOS +#storage_connectivity_group = + +# +# Qpid connection information for PowerVC +# + +# Qpid broker hostname (string value) +# INPUT REQUIRED +# Change 'host' to the hostname of the PowerVC system +qpid_hostname = host + +# Qpid broker port (integer value) + +# uncomment following line for non-ssl +# qpid_port = 5672 +qpid_port = 5678 + +# Username for qpid connection (string value) +qpid_username = powervc_qpid_2 + +# Password for qpid connection (string value) +# INPUT REQUIRED +# Provide the qpid connection password from the PowerVC system +# by using the cat command on the pw.file in the directory where +# PowerVC is installed (e.g. cat /opt/ibm/powervc/data/pw.file) +qpid_password = + +# Transport to use, either 'tcp'(default) or 'ssl' + +# uncomment following line for non-ssl +# qpid_protocol = tcp +qpid_protocol = ssl + +# +# Sync variables +# + +# The name of the staging project (string value) +# If not set defaults to 'Public'. If set the named project should exist and +# be accessible by the staging_user. +staging_project_name = Public + +# The name of the staging user (string value) +# If not set defaults to 'admin'. If set the user should exist and +# have access to the project identified by staging_project_name. +staging_user = admin + +# The prefix that will be added to the flavor name from PowerVC +# and stored (string value). This should be unique for every +# connection to help distinguish the flavors +flavor_prefix = PVC- + +# This is a list of PowerVC flavor names that should be synced. +# If no flavor name is specified, then all flavors are synced. +flavor_white_list = + +# This is a list of PowerVC flavor names that should not be synced. +flavor_black_list = + +# The periodic flavor sync interval in seconds. +flavor_sync_interval = 300 + +# Instance periodic sync interval specified in seconds +instance_sync_interval = 20 + +# How many instance sync intervals between full instance syncs. Only instances +# known to be out of sync are synced on the interval except after this many +# intervals when all instances are synced. +full_instance_sync_frequency = 30 + +# Image periodic sync interval specified in seconds. This is the time from the end +# of one successful image periodic sync operation to the start of the next. +image_periodic_sync_interval_in_seconds = 300 + +# The time in seconds between image sync retry attempts if an error was +# encountered during an image sync operation +image_sync_retry_interval_time_in_seconds = 60 + +# The maximum number of images to return. The default is 500 images. If your PowerVC +# has more than 500 images, this limit should be increased to include all images. +image_limit = 500 + +# Volume periodic sync interval specified in seconds +volume_sync_interval = 20 + +# How many volume sync intervals between full volume syncs. +# Only volumes known to be out of sync are synced on the interval +# except after this many intervals when all volumes are synced. +full_volume_sync_frequency = 30 + +# Volume type periodic sync interval specified in seconds +volume_type_sync_interval = 20 + +# How many volume type sync intervals between full volume type syncs. +# Only volumes known to be out of sync are synced on the interval +# except after this many intervals when all volumes are synced. +full_volume_type_sync_frequency = 30 + +# Ignore delete errors so an exception is not thrown during a +# delete. When set to true, this allows the volume to be deleted +# on the hosting OS even if an exception occurs. When set to false, +# exceptions during delete prevent the volume from being deleted +# on the hosting OS. +volume_driver_ignore_delete_error = False + +# The times to check whether attaching/detaching the volume succeed +volume_max_try_times = 12 + +# Minimum delay interval and initial delay seconds for long run tasks. +longrun_loop_interval = 7 +longrun_initial_delay = 10 diff --git a/common-powervc/test/common/test_config.py b/common-powervc/test/common/test_config.py new file mode 100644 index 0000000..190ae6c --- /dev/null +++ b/common-powervc/test/common/test_config.py @@ -0,0 +1,62 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + + +import mock +import testtools +import os + +from powervc.common import config + + +class PVCConfigTest(testtools.TestCase): + + def setUp(self): + super(PVCConfigTest, self).setUp() + + def tearDown(self): + super(PVCConfigTest, self).tearDown() + del config.parse_power_config.power_config_loaded + + def test_parse_config_1(self): + p1 = mock.patch('oslo.config.cfg.find_config_files', + new=mock.MagicMock(return_value=["%s%s%s" % + (os.path.dirname(__file__), + os.sep, + "powervc_test_1.conf")])) + try: + p1.start() + config.parse_power_config([], "powervc-baseproject", None) + # default value + self.assertEqual(config.CONF.powervc.auth_url, + "http://localhost:5000/v2.0/") + # value in file + self.assertEqual(config.CONF.powervc.qpid_port, 5679) + finally: + p1.stop() + + def test_parse_config_2(self): + p2 = mock.patch('oslo.config.cfg.find_config_files', + new=mock.MagicMock(side_effect=[["%s%s%s" % + (os.path.dirname(__file__), + os.sep, + "powervc_test_1.conf")], + ["%s%s%s" % + (os.path.dirname(__file__), + os.sep, + "powervc_test_2.conf")]])) + try: + p2.start() + config.parse_power_config([], "baseproject", None) + # extend value in second file + self.assertEqual(config.CONF.powervc.qpid_username, + "powervc_qpid_2") + finally: + p2.stop() diff --git a/common-powervc/test/common/test_messaging.py b/common-powervc/test/common/test_messaging.py new file mode 100644 index 0000000..530d9e1 --- /dev/null +++ b/common-powervc/test/common/test_messaging.py @@ -0,0 +1,64 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" +import unittest +from powervc.common.messaging import QpidConnection + + +class QpidTest(unittest.TestCase): + + def setUp(self): + super(QpidTest, self).setUp() + self.conn = QpidConnection(url='127.0.0.1:5989', + username='test_username', + password='test_passwd', + transport='tcp', + reconnection_interval=60, + reconnect_handler=None, + context=None, + log=None) + + def test_create_listener(self): + self.listener = self.conn.\ + create_listener('test_exchange', 'test_topic') + self.assertNotEqual(self.listener, None) + self.assertEqual([self.listener], self.conn._listeners) + + def test_register_handler(self): + def _fake_handler(): + pass + + if not hasattr(self, 'listener'): + self.listener = self.conn.\ + create_listener('test_exchange', 'test_topic') + + self.listener.register_handler('foo.bar.*', _fake_handler) + self.assertEqual(self.listener._handler_map['foo.bar.*'], + _fake_handler) + + def test_unregister_handler(self): + def _fake_handler(): + pass + + if not hasattr(self, 'listener'): + self.listener = self.conn.\ + create_listener('test_exchange', 'test_topic') + + self.listener.register_handler('foo.bar.*', _fake_handler) + self.assertEqual(self.listener._handler_map['foo.bar.*'], + _fake_handler) + self.listener.unregister_handler('foo.bar.*') + self.assertEqual(self.listener._handler_map, + {}) + + def tearDown(self): + unittest.TestCase.tearDown(self) + +if __name__ == "__main__": + unittest.main() diff --git a/common-powervc/test/common/test_netutils.py b/common-powervc/test/common/test_netutils.py new file mode 100644 index 0000000..f04d6b1 --- /dev/null +++ b/common-powervc/test/common/test_netutils.py @@ -0,0 +1,45 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + + +import testtools + +from powervc.common import netutils + + +class PVCNetUtilsTest(testtools.TestCase): + + def setUp(self): + super(PVCNetUtilsTest, self).setUp() + + def tearDown(self): + super(PVCNetUtilsTest, self).tearDown() + + def test_is_ipv4_address_1(self): + isipv4_address = netutils.is_ipv4_address("localhost") + self.assertFalse(isipv4_address) + + def test_is_ipv4_address_2(self): + isipv4_address = netutils.is_ipv4_address("127.0.0.1") + self.assertTrue(isipv4_address) + + def test_hostname_url_1(self): + url = netutils.hostname_url("http://127.0.0.1:5000/v2.0") + self.assertEqual(url, "http://127.0.0.1:5000/v2.0") + + def test_hostname_url_2(self): + url = netutils\ + .hostname_url("https://9.110.75.155/powervc/openstack/identity/v3") + self.assertEqual(url, + "https://9.110.75.155/powervc/openstack/identity/v3") + + def test_hostname_url_3(self): + url = netutils.hostname_url("http://random_host:5000/v2.0") + self.assertEqual(url, "http://random_host:5000/v2.0") diff --git a/common-powervc/test/common/test_utils.py b/common-powervc/test/common/test_utils.py new file mode 100644 index 0000000..f0bc2f7 --- /dev/null +++ b/common-powervc/test/common/test_utils.py @@ -0,0 +1,1045 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import sys +import eventlet +import mock +import testtools +from novaclient.tests.v1_1 import fakes as novafakes +from cinderclient.tests.v1 import fakes as cinderfakes +from novaclient.tests import utils +from powervc.common import utils as pvc_utils +from powervc.common.client.extensions import nova as ext_nova +from powervc.common.client.extensions import cinder as ext_cinder +from powervc.common.client import delegate +from powervc.common import config +from powervc.common.utils import SCGCache +from powervc.common.utils import VolumeCache + +sys.modules['powervc.common.config'] = mock.MagicMock() + + +""" + This class similarly extend the current nova client test cases + and cinder client testcases to provide powervc specified + storage-connectivity-group, storage template and volume related testcases + + To run the testcases, alternatively: + 1. Right click the TestNovaClient.py --> Run As --> Python unit-test + or + 2. Refer to this link for detail UT running information: + https://jazz04.rchland.ibm.com:9443/jazz/service/ + + com.ibm.team.workitem.common.internal.rest.IAttachmentRestService/ + + itemName/com.ibm.team.workitem.Attachment/67843 + + The UtilsRealTest connect to real PowerVC v1.2 and retrieve information. + This cases might be fail due to real environment unavailable, all other + fake testcases should be run successfully. +""" + + +class PVCFakeNovaClient(novafakes.FakeClient): + + """ + This PVCFakeClient class extends the current nova FakeClient, + aiming to set the self.client variable to PVCFakeHTTPClient + """ + + def __init__(self, *args, **kwargs): + novafakes.FakeClient.__init__(self, *args, **kwargs) + self.client = PVCFakeNovaHTTPClient(**kwargs) + + +class PVCFakeNovaHTTPClient(novafakes.FakeHTTPClient): + + """ + This PVCFakeHTTPClient class extends the current nova FakeHTTPClient. + For all the HTTP requests in this class, it returns a fake json data + as specified beforehand instead of requesting to a real environment. + """ + + def __init__(self, **kwargs): + novafakes.FakeHTTPClient.__init__(self, **kwargs) + + def get_servers_detail(self, **kw): + return (200, {}, { + "servers": [ + { + "OS-EXT-STS:task_state": "activating", + "addresses": { + "VLAN1": [ + { + "version": 4, + "addr": "10.4.11.113", + "OS-EXT-IPS:type": "fixed" + } + ] + }, + "image": { + "id": "fd2a0fdc-fcda-45fc-b5dd-96b9d9e0aa4d", + "links": [ + { + "href": "https://localhost/powervc/openstack/\ + compute/2ec48b8ec30f4328bf95b8a5ad147c4b/\ + images/fd2a0fdc-fcda-45fc-b5dd-96b9d9e0aa4d", + "rel": "bookmark" + } + ] + }, + "ephemeral_gb": 1, + "cpus": "1", + "flavor": { + "id": "726544ff-9f0a-41ad-8b26-e6575bfe8146", + "links": [ + { + "href": "https://localhost/powervc/openstack/\ + compute/2ec48b8ec30f4328bf95b8a5ad147c4b/\ + flavors/726544ff-9f0a-41ad-8b26-e6575bfe8146", + "rel": "bookmark" + } + ] + }, + "user_id": "8a326a8c5a774022a1ec49f5692bc316", + "vcpu_mode": "shared", + "desired_compatibility_mode": "default", + "updated": "2013-09-04T07:09:33Z", + "memory_mode": "dedicated", + "key_name": None, + "min_memory_mb": 512, + "name": "hc-22", + "min_vcpus": "0.10", + "vcpus": "0.50", + "max_memory_mb": 4096, + "min_cpus": "1", + "links": [ + { + "href": "https://localhost/powervc/openstack/\ + compute/\ + v2/2ec48b8ec30f4328bf95b8a5ad147c4b/servers/\ + 6e205d64-7651-42bf-9c8b-b0cb4208e813", + "rel": "self" + }, + { + "href": "https://localhost/powervc/openstack/\ + compute/\ + 2ec48b8ec30f4328bf95b8a5ad147c4b/servers/\ + 6e205d64-7651-42bf-9c8b-b0cb4208e813", + "rel": "bookmark" + } + ], + "max_vcpus": "16.00", + "OS-EXT-STS:vm_state": "active", + "OS-EXT-SRV-ATTR:instance_name": + "nova-z3-9-5-125-55-00000075", + "OS-EXT-SRV-ATTR:host": "ngp01_03_vios_1", + "id": "6e205d64-7651-42bf-9c8b-b0cb4208e813", + "security_groups": [ + { + "name": "default" + } + ], + "OS-DCF:diskConfig": "MANUAL", + "health_status": { + "health_value": "UNKNOWN", + "unknown_reason": + "Unable to get related hypervisor data" + }, + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "OS-EXT-AZ:availability_zone": "nova", + "metadata": {}, + "status": "ACTIVE", + "hostId": + "db8f3c353837a52c3782b4d04a767b33bd7dfa72983b4ab9aef91cb0", + "cpu_utilization": 0, + "compliance_status": [ + { + "status": "compliant", + "category": "resource.allocation" + } + ], + "current_compatibility_mode": "POWER7", + "root_gb": 4, + "OS-EXT-SRV-ATTR:hypervisor_hostname": + "ngp01-03-vios-1.rtp.stglabs.ibm.com", + "created": "2013-09-04T07:08:31Z", + "tenant_id": "2ec48b8ec30f4328bf95b8a5ad147c4b", + "memory_mb": 512, + "max_cpus": "16" + }, + { + "OS-EXT-STS:task_state": "activating", + "addresses": { + "VLAN1": [ + { + "version": 4, + "addr": "10.4.11.112", + "OS-EXT-IPS:type": "fixed" + } + ] + }, + "image": { + "id": "fd2a0fdc-fcda-45fc-b5dd-96b9d9e0aa4d", + "links": [ + { + "href": "https://localhost/powervc/openstack/\ + compute/2ec48b8ec30f4328bf95b8a5ad147c4b/\ + images/\ + fd2a0fdc-fcda-45fc-b5dd-96b9d9e0aa4d", + "rel": "bookmark" + } + ] + }, + "ephemeral_gb": 1, + "cpus": "1", + "flavor": { + "id": "726544ff-9f0a-41ad-8b26-e6575bfe8146", + "links": [ + { + "href": "https://localhost/powervc/openstack/\ + compute/2ec48b8ec30f4328bf95b8a5ad147c4b/\ + flavors/726544ff-9f0a-41ad-8b26-e6575bfe8146", + "rel": "bookmark" + } + ] + }, + "user_id": "8a326a8c5a774022a1ec49f5692bc316", + "vcpu_mode": "shared", + "desired_compatibility_mode": "default", + "updated": "2013-09-04T07:02:57Z", + "memory_mode": "dedicated", + "key_name": None, + "min_memory_mb": 512, + "name": "hc-11", + "min_vcpus": "0.10", + "vcpus": "0.50", + "max_memory_mb": 4096, + "min_cpus": "1", + "links": [ + { + "href": "https://localhost/powervc/openstack/\ + compute\ + /v2/2ec48b8ec30f4328bf95b8a5ad147c4b/servers/\ + 2eab7ee2-62eb-4f31-8628-20f8b06df86a", + "rel": "self" + }, + { + "href": "https://localhost/powervc/openstack/\ + compute/2ec48b8ec30f4328bf95b8a5ad147c4b/\ + servers/2eab7ee2-62eb-4f31-8628-20f8b06df86a", + "rel": "bookmark" + } + ], + "max_vcpus": "16.00", + "OS-EXT-STS:vm_state": "active", + "OS-EXT-SRV-ATTR:instance_name": + "nova-z3-9-5-125-55-00000074", + "OS-EXT-SRV-ATTR:host": "ngp01_02_vios_1", + "id": "2eab7ee2-62eb-4f31-8628-20f8b06df86a", + "security_groups": [ + { + "name": "default" + } + ], + "OS-DCF:diskConfig": "MANUAL", + "health_status": { + "health_value": "UNKNOWN", + "unknown_reason": + "Unable to get related hypervisor data" + }, + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "OS-EXT-AZ:availability_zone": "nova", + "metadata": {}, + "status": "ACTIVE", + "hostId": + "a67be7805b2dccafc012b2225f59cbad7504e8716c0fd4631bb6af73", + "cpu_utilization": 0.02, + "compliance_status": [ + { + "status": "compliant", + "category": "resource.allocation" + } + ], + "current_compatibility_mode": "POWER7", + "root_gb": 4, + "OS-EXT-SRV-ATTR:hypervisor_hostname": + "ngp01-02-vios-1.rtp.stglabs.ibm.com", + "created": "2013-09-04T07:01:10Z", + "tenant_id": "2ec48b8ec30f4328bf95b8a5ad147c4b", + "memory_mb": 512, + "max_cpus": "16" + } + ] + }) + + def get_storage_connectivity_groups_f4b541cb_f418_4b4b_83b9_a8148650d4e9( + self, **kw): + """ + To get a fake detail storage_connectivity_group + """ + return (200, {}, {"storage_connectivity_group": + { + "auto_add_vios": True, + "fc_storage_access": False, + "display_name": "Auto-SCG for Registered SAN", + "vios_cluster": + { + "provider_id": "shared_v7000_1" + }, + "host_list": [ + { + "name": "ngp01_02_vios_1", + "vios_list": [ + { + "lpar_id": 1, + "name": "10-F715A", + "id": "ngp01_02_vios_1##1" + } + ] + }, + { + "name": "ngp01_03_vios_1", + "vios_list": [ + { + "lpar_id": 1, + "name": "10-F76CA", + "id": "ngp01_03_vios_1##1" + } + ] + } + ], + "created_at": "2013-08-23 14:56:11.787465", + "enabled": True, + "auto_defined": True, + "id": "f4b541cb-f418-4b4b-83b9-a8148650d4e9" + }}) + + def get_storage_connectivity_groups(self, **kw): + """ + To return a fake storage_connectivity_groups + """ + return (200, {}, {"storage_connectivity_groups": [ + { + "display_name": "Auto-SCG for Registered SAN", + "id": "f4b541cb-f418-4b4b-83b9-a8148650d4e9" + }, + { + "display_name": "SCG sample", + "id": "sdfb541cb-f418-4b4b-3129-a814865023fs" + } + ]}) + + def get_storage_connectivity_groups_detail(self, **kw): + """ + To return a fake detail storage_connectivity_groups + """ + return (200, {}, {"storage_connectivity_groups": [ + { + "auto_add_vios": True, + "fc_storage_access": True, + "display_name": "Auto-SCG for Registered SAN", + "host_list": [ + { + "name": "ngp01_02_vios_1", + "vios_list": [ + { + "lpar_id": 1, + "name": "10-F715A", + "id": "ngp01_02_vios_1##1" + } + ] + }, + { + "name": "ngp01_03_vios_1", + "vios_list": [ + { + "lpar_id": 1, + "name": "10-F76CA", + "id": "ngp01_03_vios_1##1" + } + ] + } + ], + "created_at": "2013-08-23 14:56:11.787465", + "enabled": True, + "auto_defined": True, + "id": "f4b541cb-f418-4b4b-83b9-a8148650d4e9" + }, + { + "auto_add_vios": True, + "fc_storage_access": True, + "display_name": "SCG Sample", + "host_list": [ + { + "name": "ngp01_02_vios_1", + "vios_list": [ + { + "lpar_id": 1, + "name": "10-F715A", + "id": "ngp01_02_vios_1##1" + } + ] + }, + { + "name": "ngp01_03_vios_1", + "vios_list": [ + { + "lpar_id": 1, + "name": "10-F76CA", + "id": "ngp01_03_vios_1##1" + } + ] + } + ], + "created_at": "2013-08-23 14:56:11.787465", + "enabled": True, + "auto_defined": True, + "id": "sdfb541cb-f418-4b4b-3129-a814865023fs" + } + ]}) + + +class PVCFakeCinderClient(cinderfakes.FakeClient): + + """ + This PVCFakeClient class extends the current cinder FakeClient, + and pvccinderclient.CinderClient. + aiming to set the self client variable to PVCFakeHTTPClient + """ + + def __init__(self, *args, **kwargs): + cinderfakes.FakeClient.__init__(self, *args, **kwargs) + self.client = PVCFakeCinderHTTPClient(**kwargs) + + +class PVCFakeCinderHTTPClient(cinderfakes.FakeHTTPClient): + + """ + This PVCFakeHTTPClient class extends the current cinder FakeHTTPClient. + For all the HTTP requests in this class, it returns a fake json data + as specified beforehand instead of requesting to a real environment. + """ + + def __init__(self, **kwargs): + cinderfakes.FakeHTTPClient.__init__(self, **kwargs) + + # + # Volumes related + # + def get_volumes_2eab9958(self, **kw): + r = {'volume': self.get_volumes_detail()[2]['volumes'][0]} + return (200, {}, r) + + def get_volumes_detail(self, **kw): + """ + Override the parent method to a powerVC specified volume data. + """ + return (200, {}, { + "volumes": [ + { + "status": "available", + "display_name": "abcabc", + "attachments": [], + "availability_zone": "nova", + "bootable": False, + "created_at": "2013-08-30T07:22:20.729677", + "display_description": "None", + "volume_type": "shared_v7000_1-default", + "snapshot_id": "None", + "source_volid": "None", + "metadata": {}, + "id": "ab41ee79-0f84-4f0d-976e-0aa122c8b89d", + "size": 1 + }, + { + "status": "in-use", + "display_name": "", + "attachments": [ + { + "host_name": "None", + "device": "/dev/sda", + "server_id": + "103c1f3a-c2b2-4b90-80f8-cc2dd756b636", + "id": "2eab9958-16e1-4559-b3e6-e723360a4f27", + "volume_id": + "2eab9958-16e1-4559-b3e6-e723360a4f27" + } + ], + "availability_zone": "nova", + "bootable": False, + "created_at": "2013-08-30T03:33:06.272849", + "os-vol-tenant-attr:tenant_id": + "2ec48b8ec30f4328bf95b8a5ad147c4b", + "display_description": "", + "os-vol-host-attr:host": "shared_v7000_1", + "health_status": { + "health_value": "OK" + }, + "volume_type": "None", + "snapshot_id": "None", + "source_volid": "5f7c7d0d-b4e1-4ebc-80d4-4f1e8734f7e5", + "metadata": { + "instance_uuid": + "103c1f3a-c2b2-4b90-80f8-cc2dd756b636", + "is_boot_volume": "True" + }, + "id": "2eab9958", + "size": 4 + }, + { + "status": "in-use", + "display_name": "", + "attachments": [ + { + "host_name": "None", + "device": "/dev/sda", + "server_id": + "6a81591c-1671-43d1-b8c2-e0eb09cdab84", + "id": "6c21891a-ce09-4701-98d7-1c8d0c6872cf", + "volume_id": "6c21891a-ce09-4701-98d7-1c8d0c6872cf" + } + ], + "availability_zone": "nova", + "bootable": False, + "created_at": "2013-08-30T03:32:30.922320", + "os-vol-tenant-attr:tenant_id": + "2ec48b8ec30f4328bf95b8a5ad147c4b", + "display_description": "", + "os-vol-host-attr:host": "shared_v7000_1", + "health_status": { + "health_value": "OK" + }, + "volume_type": "None", + "snapshot_id": "None", + "source_volid": "5f7c7d0d-b4e1-4ebc-80d4-4f1e8734f7e5", + "metadata": { + "instance_uuid": + "6a81591c-1671-43d1-b8c2-e0eb09cdab84", + "is_boot_volume": "True" + }, + "id": "6c21891a-ce09-4701-98d7-1c8d0c6872cf", + "size": 4 + }, + { + "status": "in-use", + "display_name": "", + "attachments": [ + { + "host_name": "None", + "device": "/dev/sda", + "server_id": + "57625362-279c-4e02-bc9c-c6035904b2f1", + "id": "ff681131-9eab-4723-8261-6a80f8e3518d", + "volume_id": "ff681131-9eab-4723-8261-6a80f8e3518d" + } + ], + "availability_zone": "nova", + "bootable": False, + "created_at": "2013-08-30T03:32:03.243339", + "os-vol-tenant-attr:tenant_id": + "2ec48b8ec30f4328bf95b8a5ad147c4b", + "display_description": "", + "os-vol-host-attr:host": "shared_v7000_1", + "health_status": { + "health_value": "OK" + }, + "volume_type": "None", + "snapshot_id": "None", + "source_volid": "5f7c7d0d-b4e1-4ebc-80d4-4f1e8734f7e5", + "metadata": { + "instance_uuid": + "57625362-279c-4e02-bc9c-c6035904b2f1", + "is_boot_volume": "True" + }, + "id": "ff681131-9eab-4723-8261-6a80f8e3518d", + "size": 4 + } + ] + }) + + def post_volumes_pvcvolume_action(self, body, **kw): + """ + Add this method to handle powerVC volume post actions + Here is the same logic as OpenStack one for example. + """ + _body = None + resp = 202 + assert len(list(body.keys())) == 1 + action = list(body.keys())[0] + if action == 'os-attach': + assert sorted(list(body[action])) == ['instance_uuid', + 'mode', + 'mountpoint'] + elif action == 'os-detach': + assert body[action] is None + elif action == 'os-reserve': + assert body[action] is None + elif action == 'os-unreserve': + assert body[action] is None + elif action == 'os-initialize_connection': + assert list(body[action].keys()) == ['connector'] + return (202, {}, {'connection_info': 'foos'}) + elif action == 'os-terminate_connection': + assert list(body[action].keys()) == ['connector'] + elif action == 'os-begin_detaching': + assert body[action] is None + elif action == 'os-roll_detaching': + assert body[action] is None + elif action == 'os-reset_status': + assert 'status' in body[action] + else: + raise AssertionError("Unexpected action: %s" % action) + return (resp, {}, _body) + + # + # volume type related + # + def get_types(self, **kw): + return (200, {}, { + "volume_types": [ + { + "extra_specs": { + "drivers:storage_pool": "P-NGP01-pool", + "capabilities:volume_backend_name": "shared_v7000_1", + "drivers:rsize": "-1" + }, + "name": "shared_v7000_1-default", + "id": "6627888e-9f59-4996-8c22-5d528c3273f0" + }, + { + "extra_specs": {}, + "name": "dm-crypt", + "id": "a3ae95f6-4aab-4446-b1d2-0fc2f60a89bb" + }, + { + "extra_specs": {}, + "name": "LUKS", + "id": "291f81a2-591b-4164-b2b2-829abc935573" + } + ] + }) + + def get_storage_providers_2(self, **kw): + """ + To get a fake detail storage_provider which id is 2 + """ + return (200, {}, {"storage_provider": + { + "backend_type": "svc", + "volume_count": "null", + "service": { + "host_display_name": "shared_v7000_1", + "host": "shared_v7000_1", + "id": 4 + }, + "backend_id": "00000200A0204C30", + "health_status": { + "health_value": "OK" + }, + "free_capacity_gb": 873.5, + "total_capacity_gb": 1115.5, + "storage_hostname": "shared_v7000_1", + "id": 2, + "backend_state": "running" + }}) + + def get_storage_providers_detail(self, **kw): + """ + To return a fake detail storage_providers + """ + return (200, {}, {"storage_providers": [ + { + "backend_type": "svc", + "volume_count": "null", + "service": { + "host_display_name": "shared_v7000_1", + "host": "shared_v7000_1", + "id": 4 + }, + "backend_id": "00000200A0204C30", + "health_status": { + "health_value": "OK" + }, + "free_capacity_gb": 873.5, + "total_capacity_gb": 1115.5, + "storage_hostname": "shared_v7000_1", + "id": 2, + "backend_state": "running", + "storage_type": "fc" + }, + { + "backend_type": "fc", + "volume_count": "null", + "service": { + "host_display_name": "shared_v7000_1", + "host": "shared_v7000_1", + "id": 4 + }, + "backend_id": "00000200A0204C31", + "health_status": { + "health_value": "OK" + }, + "free_capacity_gb": 73.5, + "total_capacity_gb": 115.5, + "storage_hostname": "shared_v7000_2", + "id": 3, + "backend_state": "running", + "storage_type": "fc" + } + ]}) + + +class FakeUtils(pvc_utils.Utils): + + def __init__(self): + self._novaclient = None + self._cinderclient = None + self.scg_cache = None + + +class UtilsFakeTest(utils.TestCase): + + """ + Testcases for utils.py in this class reads the storage connectivity + group, storage provider, storage template and volume from fake data. + All the cases in this class should be run successfully. + """ + + def setUp(self): + super(UtilsFakeTest, self).setUp() + config.parse_power_config(['/etc/powervc/powervc.conf'], 'cinder') + self.utils = FakeUtils() + # get nova_client + nova_fakeclient = PVCFakeNovaClient('r', 'p', 's', + 'http://localhost:5000/') + # delegate to nova extension class + nova_client = delegate.new_composite_deletgate( + [ext_nova.Client(nova_fakeclient), nova_fakeclient]) + + # get cinder client + cinder_fakeclient = PVCFakeCinderClient('r', 'p') + # delegate to nova extension class + cinder_client = delegate.new_composite_deletgate( + [ext_cinder.Client(cinder_fakeclient), cinder_fakeclient]) + + self.utils._novaclient = nova_client + self.utils._cinderclient = cinder_client + self.utils.scg_cache = SCGCache(nova_client) + + self.scg_id_list = ['sdfb541cb-f418-4b4b-3129-a814865023fs', + 'f4b541cb-f418-4b4b-83b9-a8148650d4e9'] + self.scg_name_list = ['Auto-SCG for Registered SAN', + 'SCG Sample'] + + def test_get_multi_scg_accessible_storage_providers_1(self): + accessible_storage_providers = \ + self.utils.get_multi_scg_accessible_storage_providers( + scg_uuid_list=self.scg_id_list, + scg_name_list=None) + self.assertEqual([provider.storage_hostname + for provider in accessible_storage_providers], + ['shared_v7000_1', 'shared_v7000_2']) + + def test_get_multi_scg_accessible_storage_providers_2(self): + accessible_storage_providers = \ + self.utils.get_multi_scg_accessible_storage_providers( + scg_uuid_list=None, + scg_name_list=self.scg_name_list) + self.assertEqual([provider.id + for provider in accessible_storage_providers], + [2, 3]) + + def test_get_scg_accessible_storage_providers_1(self): + accessible_storage_providers = \ + self.utils.get_scg_accessible_storage_providers( + "f4b541cb_f418_4b4b_83b9_a8148650d4e9") + self.assertEqual(accessible_storage_providers[0].storage_hostname, + "shared_v7000_1") + + def test_get_scg_accessible_storage_providers_2(self): + """ + Test when scg not specified + """ + accessible_storage_providers = \ + self.utils.get_scg_accessible_storage_providers() + self.assertEqual(accessible_storage_providers[0].storage_hostname, + "shared_v7000_1") + + def test_get_multi_scg_accessible_storage_templates_1(self): + accessible_storage_templates = \ + self.utils.get_multi_scg_accessible_storage_templates( + scg_uuid_list=self.scg_id_list, + scg_name_list=None) + # Shoud return the storage template which in the accessible + # storage providers + self.assertEqual([st.name for st in accessible_storage_templates], + ['dm-crypt', 'LUKS', 'shared_v7000_1-default']) + + def test_get_multi_scg_accessible_storage_templates_2(self): + accessible_storage_templates = \ + self.utils.get_multi_scg_accessible_storage_templates( + scg_uuid_list=None, + scg_name_list=self.scg_name_list) + # Shoud return the storage template which in the accessible + # storage providers + self.assertEqual([st.name for st in accessible_storage_templates], + ['dm-crypt', 'LUKS', 'shared_v7000_1-default']) + + def test_get_scg_accessible_storage_templates_1(self): + accessible_storage_templates = \ + self.utils.get_scg_accessible_storage_templates( + "f4b541cb_f418_4b4b_83b9_a8148650d4e9") + # Shoud return the storage template which in the accessible + # storage providers + self.assertEqual(accessible_storage_templates[0].name, + "shared_v7000_1-default") + + def test_get_multi_scg_accessible_volumes_1(self): + scg_accessible_volumes = \ + self.utils.get_multi_scg_accessible_volumes( + scg_uuid_list=self.scg_id_list, + scg_name_list=None) + # Shoud return the volume which in the accessible + # storage templates + self.assertEqual([volume.id for volume in scg_accessible_volumes], + ["ab41ee79-0f84-4f0d-976e-0aa122c8b89d"]) + + def test_get_scg_accessible_volumes_1(self): + scg_accessible_volumes = \ + self.utils.get_scg_accessible_volumes( + "f4b541cb_f418_4b4b_83b9_a8148650d4e9") + # Shoud return the volume which in the accessible + # storage templates + self.assertEqual(scg_accessible_volumes[0].id, + "ab41ee79-0f84-4f0d-976e-0aa122c8b89d") + + def test_get_multi_scg_accessible_volumes_2(self): + scg_accessible_volumes = \ + self.utils.get_multi_scg_accessible_volumes( + scg_uuid_list=None, + scg_name_list=self.scg_name_list) + # Shoud return the volume which in the accessible + # storage templates + self.assertEqual([volume.id for volume in scg_accessible_volumes], + ["ab41ee79-0f84-4f0d-976e-0aa122c8b89d"]) + + def test_get_scg_accessible_volumes_2(self): + scg_accessible_volumes = \ + self.utils.get_scg_accessible_volumes( + scgName="Auto-SCG for Registered SAN") + # Shoud return the volume which in the accessible + # storage templates + self.assertEqual(scg_accessible_volumes[0].id, + "ab41ee79-0f84-4f0d-976e-0aa122c8b89d") + + def test_get_scg_cache(self): + new_scg = self.utils.get_scg_cache(self.utils._novaclient) + self.assertNotEqual(new_scg, self.utils.scg_cache) + + def test_get_all_scgs(self): + scg_list = self.utils.get_all_scgs() + self.assertEqual([scg.id for scg in scg_list], + self.scg_id_list) + + def test_get_our_scg_list(self): + from powervc.common import config as cg + cg.CONF['powervc'].storage_connectivity_group = self.scg_name_list + scg_list = self.utils.get_our_scg_list() + self.assertIsNotNone(scg_list) + + def test_validate_scgs(self): + from powervc.common import config as cg + cg.CONF['powervc'].storage_connectivity_group = self.scg_name_list + ret = self.utils.validate_scgs() + self.assertTrue(ret) + + def test_get_scg_by_scgName_1(self): + scg = self.utils.get_scg_by_scgName("Auto-SCG for Registered SAN") + self.assertIsNotNone(scg) + + def test_get_scg_id_by_scgName_1(self): + scg_id = self.utils.\ + get_scg_id_by_scgName("Auto-SCG for Registered SAN") + self.assertEqual(scg_id, "f4b541cb-f418-4b4b-83b9-a8148650d4e9") + + def test_get_scg_id_by_scgName_2(self): + scg_id = self.utils.\ + get_scg_id_by_scgName("Auto-SCG for Registered SAN") + self.assertIsNotNone(scg_id) + + def test_get_scg_id_by_scgName_3(self): + scg_id = self.utils.\ + get_scg_id_by_scgName("NON-Auto-SCG for Registered SAN") + self.assertEqual(scg_id, "") + + def test_get_scg_accessible_storage_servers_1(self): + servers = self.utils.get_scg_accessible_servers() + self.assertIsNotNone(servers) + + def test_get_scg_accessible_storage_servers_2(self): + servers = self.utils.get_scg_accessible_servers( + scgName="Auto-SCG for Registered SAN") + self.assertIsNotNone(servers) + + def compare_to_expected(self, expected, hyper): + for key, value in expected.items(): + self.assertEqual(getattr(hyper, key), value) + + def test_get_image_scgs(self): + self.utils._novaclient = mock.MagicMock() + self.utils.get_image_scgs('imageUUID') + self.utils._novaclient.storage_connectivity_groups.\ + list_for_image.assert_called_with('imageUUID') + + scgs = self.utils.get_image_scgs(None) + self.assertEqual(scgs, []) + + def test_get_scg_image_ids(self): + self.utils._novaclient = mock.MagicMock() + self.utils.get_scg_image_ids('scgUUID') + self.utils._novaclient.scg_images.\ + list_ids.assert_called_with('scgUUID') + imgs = self.utils.get_image_scgs(None) + self.assertEqual(imgs, []) + + def test_get_local_staging_project_id(self): + class Tenant(object): + def __init__(self, name, tid): + self.name = name + self.id = tid + + self.utils._localkeystoneclient = mock.MagicMock() + self.utils._localkeystoneclient.tenants.list.return_value = \ + [Tenant('fake_tenant_name1', 1), Tenant('fake_tenant_name2', 2)] + from powervc.common import config as cg + cg.CONF.powervc.staging_project_name = 'fake_tenant_name1' + ret_id = self.utils.get_local_staging_project_id() + self.assertEqual(ret_id, 1) + + cg.CONF.powervc.staging_project_name = 'no_tenant_name' + from powervc.common.exception import StagingProjectNotFound + self.assertRaises(StagingProjectNotFound, + self.utils.get_local_staging_project_id) + + def test_get_local_staging_user_id(self): + class User(object): + def __init__(self, name, tid): + self.name = name + self.id = tid + + self.utils._localkeystoneclient = mock.MagicMock() + self.utils._localkeystoneclient.users.list.return_value = \ + [User('fake_user_name1', 1), User('fake_user_name2', 2)] + from powervc.common import config as cg + cg.CONF.powervc.staging_user = 'fake_user_name1' + ret_id = self.utils.get_local_staging_user_id() + self.assertEqual(ret_id, 1) + + cg.CONF.powervc.staging_user = 'no_user_name' + from powervc.common.exception import StagingUserNotFound + self.assertRaises(StagingUserNotFound, + self.utils.get_local_staging_user_id) + + def test_multi_thread_scgcache(self): + # Launch one thousand one tasks to test the scg cache. + class FakeScg(object): + def __init__(self, scgid, name): + self.id = scgid + self.display_name = name + + def fake_get_resource(): + eventlet.greenthread.sleep(1) + data1 = {} + for i in range(1001): + data1[FakeScg(str(i), 'scg' + str(i))] = 'scg' + str(i) + return data1 + + self.utils.scg_cache._get_resources = fake_get_resource + + def cache_task(key): + scg1 = self.utils.scg_cache.by_id(key) + self.assertEqual('scg' + key, scg1.display_name) + scg2 = self.utils.scg_cache.by_name('scg' + key) + self.assertEqual(key, scg2.id) + print eventlet.greenthread.getcurrent + + pool = eventlet.GreenPool() + pool.imap(cache_task, [str(i) for i in xrange(1001)]) + + +class FakeDriver(object): + def set_data(self, data): + self._data = data + + def cache_volume_data(self): + return self._data + + +class VolumeCacheTest(testtools.TestCase): + def setUp(self): + super(VolumeCacheTest, self).setUp() + self._driver = FakeDriver() + + def tearDown(self): + super(VolumeCacheTest, self).tearDown() + + def test_get_resources(self): + self._driver.set_data(None) + volume_cache = VolumeCache(self._driver) + self.assertEqual(None, volume_cache._get_resources()) + + data1 = {'p000': 'l000'} + self._driver.set_data(data1) + volume_cache = VolumeCache(self._driver) + self.assertEqual(data1, volume_cache._get_resources()) + + def test_get_by_id(self): + data1 = {'p000': 'l000'} + self._driver.set_data(data1) + volume_cache = VolumeCache(self._driver) + self.assertEqual('l000', volume_cache.get_by_id('p000')) + self.assertIsNone(volume_cache.get_by_id('p0001')) + self.assertNotEquals('l001', volume_cache.get_by_id('p000')) + + def test_set_by_id(self): + data1 = {'p000': 'l000'} + self._driver.set_data(data1) + volume_cache = VolumeCache(self._driver, 10000000) + self.assertEqual('l000', volume_cache.get_by_id('p000')) + volume_cache.set_by_id('p001', 'l001') + self.assertEqual('l001', volume_cache.get_by_id('p001')) + self.assertEqual('l000', volume_cache.get_by_id('p000')) + + def test_multi_thread(self): + # Launch one thousand one tasks to test the cache. + data1 = {} + for i in range(1001): + data1[str(i)] = 'value' + str(i) + self._driver.set_data(data1) + volume_cache = VolumeCache(self._driver, 10000000) + + def cache_task(key): + str1 = volume_cache.get_by_id(key) + self.assertEqual('value' + key, str1) + volume_cache.set_by_id('country', 'china') + str2 = volume_cache.get_by_id('country') + self.assertEqual('china', str2) + return "%s-%s, %s" % (key, str1, str2) + + pool = eventlet.GreenPool() + i = 0 + for rtn in pool.imap(cache_task, data1.keys()): + print "Got return from ", str(i), ': ', rtn + i += 1 diff --git a/glance-powervc/.project b/glance-powervc/.project new file mode 100644 index 0000000..6d59d51 --- /dev/null +++ b/glance-powervc/.project @@ -0,0 +1,19 @@ + + + glance-powervc + + + common-powervc + glance + + + + org.python.pydev.PyDevBuilder + + + + + + org.python.pydev.pythonNature + + diff --git a/glance-powervc/.pydevproject b/glance-powervc/.pydevproject new file mode 100644 index 0000000..8b7f087 --- /dev/null +++ b/glance-powervc/.pydevproject @@ -0,0 +1,11 @@ + + + + + +/glance-powervc +/glance-powervc/bin + +python 2.7 +Default + diff --git a/glance-powervc/bin/glance-powervc b/glance-powervc/bin/glance-powervc new file mode 100644 index 0000000..5ccb6ae --- /dev/null +++ b/glance-powervc/bin/glance-powervc @@ -0,0 +1,62 @@ +#!/usr/bin/env python +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +"""Starter script for the PowerVC Driver ImageManager Service.""" + +import eventlet +import os +import socket +import sys +import traceback + +eventlet.patcher.monkey_patch(os=False, socket=True, time=True) + +# FIXME: Is there a way to keep multiple instances from running at the same time? +# FIXME: Haven't really looked too close at this yet. It may need more work. +# If ../powervc/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) + +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'powervc', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +# TODO RYKAL +# This should go in the base __init__ folder I think +from glance.openstack.common import gettextutils +gettextutils.install('glance') + +from powervc.glance.common import config +from glance.openstack.common import log +from glance.openstack.common import service +from glance.openstack.common import importutils + +CONF = config.CONF + +LOG = log.getLogger(__name__) + +if __name__ == '__main__': + try: + # Obtain glance opts from glance-api.conf + config.parse_config(sys.argv, 'glance', 'glance-api') + log.setup('powervc') + LOG.info(_('Launching PowerVC Driver ImageManager service...')) + manager = importutils.import_object( + 'powervc.glance.manager.manager.PowerVCImageManager') + launcher = service.ServiceLauncher() + launcher.launch_service(manager) + LOG.info(_('PowerVC Driver ImageManager service started')) + launcher.wait() + LOG.info(_('PowerVC Driver ImageManager service ended')) + except Exception: + traceback.print_exc() + raise diff --git a/glance-powervc/init/openstack-glance-powervc b/glance-powervc/init/openstack-glance-powervc new file mode 100644 index 0000000..59471f6 --- /dev/null +++ b/glance-powervc/init/openstack-glance-powervc @@ -0,0 +1,102 @@ +#!/bin/sh +# +# openstack-glance-powervc OpenStack PowerVC Glance Driver +# +# chkconfig: - 98 02 +# description: Provides PowerVC manage-to support. + +### BEGIN INIT INFO +# Provides: +# Required-Start: $remote_fs $network $syslog +# Required-Stop: $remote_fs $syslog +# Default-Stop: 0 1 6 +# Short-Description: OpenStack PowerVC Glance Driver +# Description: +### END INIT INFO + +. /etc/rc.d/init.d/functions + +suffix=powervc +prog=openstack-glance-powervc +exec="/opt/ibm/openstack/powervc-driver/bin/glance-$suffix" +config="/etc/powervc/powervc.conf" +pidfile="/var/run/$suffix/glance-$suffix.pid" +logfile="/var/log/$suffix/glance-$suffix.log" + +[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog + +lockfile=/var/lock/subsys/$prog + +start() { + [ -x $exec ] || exit 5 + [ -f $config ] || exit 6 + echo -n $"Starting $prog: " + daemon --user powervc --pidfile $pidfile "$exec --config-file $config --logfile $logfile &>/dev/null & echo \$! > $pidfile" + retval=$? + echo + [ $retval -eq 0 ] && touch $lockfile + return $retval +} + +stop() { + echo -n $"Stopping $prog: " + killproc -p $pidfile $prog + retval=$? + echo + [ $retval -eq 0 ] && rm -f $lockfile + return $retval +} + +restart() { + stop + start +} + +reload() { + restart +} + +force_reload() { + restart +} + +rh_status() { + status -p $pidfile $prog +} + +rh_status_q() { + rh_status >/dev/null 2>&1 +} + + +case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart) + $1 + ;; + reload) + rh_status_q || exit 7 + $1 + ;; + force-reload) + force_reload + ;; + status) + rh_status + ;; + condrestart|try-restart) + rh_status_q || exit 0 + restart + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac +exit $? diff --git a/glance-powervc/powervc/__init__.py b/glance-powervc/powervc/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/glance-powervc/powervc/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/glance-powervc/powervc/glance/__init__.py b/glance-powervc/powervc/glance/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/glance-powervc/powervc/glance/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/glance-powervc/powervc/glance/common/__init__.py b/glance-powervc/powervc/glance/common/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/glance-powervc/powervc/glance/common/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/glance-powervc/powervc/glance/common/config.py b/glance-powervc/powervc/glance/common/config.py new file mode 100644 index 0000000..516066a --- /dev/null +++ b/glance-powervc/powervc/glance/common/config.py @@ -0,0 +1,45 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" +PowerVC Driver ImageManager Configuration +""" + +from oslo.config import cfg +import powervc.common.config as common_config +from powervc.glance.common import constants + +CONF = common_config.CONF + +# PowerVC Driver ImageManager specific configuration +image_opts = [ + + # The image periodic sync interval in seconds. Default is 300. + cfg.IntOpt('image_periodic_sync_interval_in_seconds', + default=constants.IMAGE_PERIODIC_SYNC_INTERVAL_IN_SECONDS), + + # In case of error, the image sync retry interval time in seconds. Default + # is 60. + cfg.IntOpt('image_sync_retry_interval_time_in_seconds', + default=constants.IMAGE_SYNC_RETRY_INTERVAL_TIME_IN_SECONDS), + + # The maximum number of images to read for each query request. Default is + # 500. + cfg.IntOpt('image_limit', default=constants.IMAGE_LIMIT) +] + +CONF.register_opts(image_opts, group='powervc') + +# Import glance opts +CONF.import_opt('owner_is_tenant', 'glance.api.middleware.context') + + +def parse_config(*args, **kwargs): + common_config.parse_power_config(*args, **kwargs) diff --git a/glance-powervc/powervc/glance/common/constants.py b/glance-powervc/powervc/glance/common/constants.py new file mode 100644 index 0000000..196f1b0 --- /dev/null +++ b/glance-powervc/powervc/glance/common/constants.py @@ -0,0 +1,99 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" +All PowerVC Driver ImageManager Constants +""" + +import powervc.common.constants as consts + +# Maximum size of a property that can be handled by the v1 Image APIs +MAX_HEADER_LEN_V1 = 8192 + +# Interval in seconds between periodic image syncs +IMAGE_PERIODIC_SYNC_INTERVAL_IN_SECONDS = 300 + +# The maximum number of images to return with the v1 image list API call. The +# default is 500 images. If the PowerVC has more than 500 images, this limit +# should be increased to include all images. +IMAGE_LIMIT = 500 + +# the V2 URI patch value +V2_URI_PATH = 'v2.0' + +# The image client service type +CLIENT_SERVICE_TYPE = 'image' + +# The image client endpoint type to use +CLIENT_ENDPOINT_TYPE = 'publicURL' + +# Image location path +IMAGE_LOCATION_PATH = 'v2/images/' + +# List of image create parameters to filter out +IMAGE_CREATE_PARAMS_FILTER = ['id'] + +# List of image update parameters to filter out +IMAGE_UPDATE_PARAMS_FILTER = ['owner', 'location'] + +# List of image properties which should have HTML/XML entities unescaped +IMAGE_UNESCAPE_PROPERTIES = ['configuration_strategy'] + +# List of v2image update parameters to filter out +v2IMAGE_UPDATE_PARAMS_FILTER = IMAGE_UPDATE_PARAMS_FILTER + ['deleted', 'size', 'checksum'] + +# List of image properties to filter out during an update +IMAGE_UPDATE_PROPERTIES_FILTER = [consts.POWERVC_UUID_KEY, + consts.LOCAL_UUID_KEY] + +# Timestamp format of image updated_at field +IMAGE_TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%S' + +# The expiration period for image events in hours +EVENT_TUPLE_EXPIRATION_PERIOD_IN_HOURS = 1 + +# The number of seconds in an hour +SECONDS_IN_HOUR = 3600 + +# PowerVC identifier +POWER_VC = 'pvc' + +# Local hosting OS identifier +LOCAL = 'local' + +# Event queue event constants +EVENT_TYPE = 'type' +EVENT_CONTEXT = 'context' +EVENT_MESSAGE = 'message' + +# Event queue event types +LOCAL_IMAGE_EVENT = LOCAL +PVC_IMAGE_EVENT = POWER_VC +PERIODIC_SCAN_EVENT = 'periodic' +STARTUP_SCAN_EVENT = 'startup' + +# Image notification event exchange +IMAGE_EVENT_EXCHANGE = 'glance' + +# Image notification event topic +IMAGE_EVENT_TOPIC = 'notifications.info' + +# Image notification event types +IMAGE_EVENT_TYPE_ALL = 'image.*' +IMAGE_EVENT_TYPE_ACTIVATE = 'image.activate' +IMAGE_EVENT_TYPE_CREATE = 'image.create' +IMAGE_EVENT_TYPE_UPDATE = 'image.update' +IMAGE_EVENT_TYPE_DELETE = 'image.delete' + +# Constants used by the ImageSyncController +SYNC_PASSED = 1 +SYNC_FAILED = -1 +IMAGE_SYNC_RETRY_INTERVAL_TIME_IN_SECONDS = 60 +IMAGE_SYNC_CHECK_INTERVAL_TIME_IN_SECONDS = 1 diff --git a/glance-powervc/powervc/glance/manager/__init__.py b/glance-powervc/powervc/glance/manager/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/glance-powervc/powervc/glance/manager/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/glance-powervc/powervc/glance/manager/manager.py b/glance-powervc/powervc/glance/manager/manager.py new file mode 100644 index 0000000..fbd001f --- /dev/null +++ b/glance-powervc/powervc/glance/manager/manager.py @@ -0,0 +1,3815 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" +PowerVC Driver ImageManager service +""" + +import sys +import time +import hashlib +import Queue +import threading +import itertools +from operator import itemgetter +import HTMLParser + +from glance.openstack.common import service +from glance.openstack.common import log as logging +from glance.openstack.common import timeutils +from glanceclient.v1 import images as v1images +from glanceclient.exc import CommunicationError +from glanceclient.exc import HTTPNotFound + +from powervc.common import constants as consts +from powervc.common import messaging +from powervc.common.exception import StorageConnectivityGroupNotFound +from powervc.common.gettextutils import _ +from powervc.common.client import factory as clients +from powervc.glance.common import constants +from powervc.glance.common import config +from powervc.common import utils + +CONF = config.CONF + +LOG = logging.getLogger(__name__) + + +class PowerVCImageManager(service.Service): + """ + The PowerVCImageManager is responsible for initiating the task that + synchronizes the images between PowerVC and the hosting OS, both at startup + and periodically. It also starts the image notification event handlers + which listen for image events from both PowerVC Glance, and the hosting OS + Glance, and keeps changes synchronized between the two. + """ + + def __init__(self): + super(PowerVCImageManager, self).__init__() + + # Our Storage Connectivity Group + self.our_scg_list = [] + + # See if out scg is specified. If not, terminate the ImageManager + # service. + self._check_scg_at_startup() + + self._staging_cache = utils.StagingCache() + + # The local, and PowerVC updated_at timestamp dicts, and the master + # image dict are very important. They are used to drive the periodic + # syncs. They should be kept up to date, but ONLY at the proper time. + # The updated_at and master_image values should only be set upon + # the successful completion of image creates, updates, or deletes. Both + # the PowerVC and local hostingOS changes should complete successfully + # before setting these values. Failure to do so will result in images + # out of sync. The keys for all of these dicts are PowerVC image UUIDs + self.local_updated_at = {} + self.pvc_updated_at = {} + self.master_image = {} + + # Flags set when the event handlers are up and running + self.local_event_handler_running = False + self.pvc_event_handler_running = False + + # The cached local and PowerVC v1 and v2 glance clients + self.local_v1client = None + self.local_v2client = None + self.pvc_v1client = None + self.pvc_v2client = None + + # Dicts of events to ignore. These are used to keep events from + # ping-ponging back and forth between the local hostingOS and PowerVC. + # The dict is made up of timestamp keys, and event tuple values. The + # timestamp key is the time that the event tuple is added to the dict. + # Some events may be missed so these dicts must be purged of expired + # event tuples periodically. + self.local_events_to_ignore_dict = {} + self.pvc_events_to_ignore_dict = {} + + # Summary display counters displayed after syncing + self._clear_sync_summary_counters() + + # The queue used to synchronize events, and the startup and periodic + # sync scans + self.event_queue = Queue.Queue() + + # A dict used to map the PowerVC image UUIDs to local hostingOS image + # UUIDs + self.ids_dict = {} + + # The ImageSyncController is used to manage when sync operations occur + self.image_sync_controller = ImageSyncController(self) + + def start(self): + """ + Start the PowerVC Driver ImageManager service. + + This will startup image synchronization between PowerVC and the hosting + OS. The image synchronization will be a period task that will run after + a given interval. + """ + self._start_image_sync_task() + + def _start_image_sync_task(self): + """ + Kick off the image sync task. + + The image sync task will run every 300 seconds (default) and keep the + hosting OS images in sync with the PowerVC images. + """ + LOG.debug(_( + 'Starting image sync periodic task with %s second intervals...'), + CONF['powervc'].image_periodic_sync_interval_in_seconds) + + # Start image synchronization. This will also start the periodic sync. + self.image_sync_controller.start() + + # Start a thread here to process the event queue + t = threading.Thread(target=self._process_event_queue) + t.daemon = True + t.start() + + def sync_images(self): + """ + Synchronize the images between PowerVC and the hosting OS. This + method is typically run as a periodic task so that synchronization + is done continuously at some specified interval. + + This method initially provides startup synchronization. PowerVC is the + master. When the synchronization task is done, the PowerVC images + will be reflected into the hosting OS Glance. + + When the synchronization is complete the image notification + event handlers will be started if they are not already running. + + After the startup synchronizations runs successfully, subsequent + synchronizations are done using periodic 2-way synchronization. + """ + + if not self.image_sync_controller.is_startup_sync_done(): + + # Add an event to the event queue to start the startup scan. + self._add_startup_sync_to_queue() + else: + + # Add an event to the event queue to start the periodic scan. + # This synchronizes the periodic scans with the image event + # processing. + self._add_periodic_sync_to_queue() + + def startup_sync(self): + """ + Perform the startup sync of images. PowerVC is the master. All active + images from PowerVC are reflected into the local hosting OS. + """ + LOG.info(_('Performing startup image synchronization...')) + + # Initialize the sync result value + sync_result = constants.SYNC_FAILED + + # Save start time for elapsed time calculation + start_time = time.time() + + # Build a dict of PowerVC images with the UUID as the key. + # NOTE: If holding all images in memory becomes a problem one + # option may be to rewrite the code to only get a full image + # when needed. + pvc_image_dict = {} + + # Build a dict of hosting OS images that came from PowerVC with the + # PowerVC UUID as the key. Images from PowerVC will have the property + # 'powervc_uuid'. If that is not present, ignore + # NOTE: If holding all images in memory becomes a problem one + # option may be to rewrite the code to only get a full image + # when needed. + local_image_dict = {} + + # Clear the updated_at timestamp dicts, master_image dict and the + # ids dict in case startup sync is called more than once. These are + # never cleared again. + self.local_updated_at.clear() + self.pvc_updated_at.clear() + self.master_image.clear() + self.ids_dict.clear() + + # Initialize stats for summary display + self._clear_sync_summary_counters() + + # Try catching all exceptions so we don't end our periodic task. + # If an error occurs during synchronization it is logged + try: + + # Get the images dict for the hosting OS + # NOTE: We using the Glance v1 API here. The Glance v2 API will + # actually list partial/incomplete images. We may want to see which + # version nova uses when getting images so we don't disagree. If + # nova use the v2 glance client it may list images which are not + # complete unless it filters those out. May need more investigation + # here regarding that. + local_v1client = self._get_local_v1_client() + v1local_images = local_v1client.images + local_image_dict = self._get_local_images_and_ids(v1local_images) + + # Get the images dict from PowerVC. Only get images that are + # accessible from our Storage Connectivity Group. If the SCG is + # not found, an exception is raised and the sync will fail here. + pvc_v1client = self._get_pvc_v1_client() + v1pvc_images = pvc_v1client.images + pvc_image_dict = self._get_pvc_images(v1pvc_images) + + # Dump the local image information + self._dump_image_info(local_image_dict, pvc_image_dict) + + # If there are hostingOS images, check for deletes and updates + pvc_v2client = self._get_pvc_v2_client() + local_v2client = self._get_local_v2_client() + v2local_images = local_v2client.images + + # When catching exceptions during sync operations we will look for + # CommunicationError, and raise those so we don't waste time + # trying to process all images when there is a connection failure. + # Other exceptions should be caught and logged during each + # operation so we can attempt to process each image before leaving. + for uuid in local_image_dict.keys(): + local_image = local_image_dict[uuid] + name = local_image.name + if uuid not in pvc_image_dict.keys(): + + # It may be possible to have an orphaned snapshot image + # in the hostingOS if the PowerVC Driver services + # were restarted shortly after an instance capture was + # issued. That may appear as a PowerVC image in the + # hostingOS which does not yet appear in PowerVC. If + # this startup sync is run during that time we will + # delete the orphaned snapshot image. If PowerVC ends + # up finishing the capture we would then add the + # snapshot image a we normally would, and it would have + # it's owner value set to the staging project or user id. + # If this is a problem in the future consider not deleting + # images in the hostingOS which have their powervc_uuid + # property set and which have a queued status if they do + # not yet exist in PowerVC. This would leave the orphaned + # snapshot images in place. If they are not then created + # in PowerVC they will continue to be orphaned in the + # hostingOS until someone manually deletes them. For now, + # we will delete them here. + + # Remove the image since its not on PowerVC now + # Delete the image. Log exceptions here and keep going + LOG.info(_('Deleting hosting OS image \'%s\' for PowerVC ' + 'UUID %s'), name, uuid) + deleted_image = self._delete_local_image(uuid, local_image, + v1local_images) + if deleted_image is None: + LOG.error(_('Local hosting OS image \'%s\' for PowerVC' + ' UUID %s was not deleted during startup ' + 'image synchronization.'), name, uuid) + else: + self.local_deleted_count += 1 + + # Clean up the ids_dict + if uuid in self.ids_dict.keys(): + self.ids_dict.pop(uuid) + else: + + # Update the image if it has changed. (Right now, always + # update it, and update all fields). Update using the + # Glance v1 API if possible. Then update other properties + # using the Glance v2 PATCH API Log exceptions here and + # keep going + LOG.info(_('Updating hosting OS image \'%s\' for PowerVC ' + 'UUID %s'), name, uuid) + updated_image = self._update_local_image( + uuid, pvc_image_dict[uuid], local_image, + v1local_images, v2local_images) + + # Save updated_at timestamp for the local hostingOS image + # to be used during subsequent periodic sync operations + if updated_image is None: + LOG.error(_('Local hosting OS image \'%s\' for PowerVC' + ' UUID %s was not updated during startup ' + 'image synchronization.'), name, uuid) + self.local_updated_at[uuid] = local_image.updated_at + else: + self.local_updated_count += 1 + + # Save updated_at timestamp for local hostingOS image + # to be used during subsequent periodic sync operations + self.local_updated_at[uuid] = updated_image.updated_at + + # Save updated_at timestamp for PowerVC image to be used + # during subsequent periodic sync operations. Also save the + # PowerVC image as the master_image used for merging + # changes during periodic scan image updates. + self.pvc_updated_at[uuid] = pvc_image_dict[uuid].updated_at + self.master_image[uuid] = pvc_image_dict[uuid] + + # Add any new active PowerVC images to the hostingOS. + local_image_owner = self._get_local_staging_owner_id() + if local_image_owner is None: + LOG.warning(_("Invalid staging user or project." + " Skipping new image sync.")) + else: + for uuid in pvc_image_dict.keys(): + if uuid not in local_image_dict.keys(): + pvc_image = pvc_image_dict[uuid] + status = pvc_image.status + pvc_name = pvc_image.name + + # Only sync images from PowerVC that are 'active', and + # that are accessible from our Storage Connectivity + # Group + if status and status == 'active': + + # Add or activate the local image + self._add_or_activate_local_image( + pvc_image, local_image_owner, + pvc_v2client.http_client.endpoint, + v1local_images, v2local_images) + else: + LOG.debug(_('Image \'%s\' with PowerVC UUID %s not' + ' created during startup image ' + 'synchronization because the image ' + 'status is %s'), pvc_name, uuid, + status) + + # All done! Set the startup sync result as passed so subsequent + # syncs will run the periodic sync + sync_result = constants.SYNC_PASSED + + # Start the image notification event handlers to process changes if + # they are not currently running + self._prepare_for_image_events() + + # Format results for summary display + stat_l = '{0:d}/{1:d}/{2:d}'.format(self.local_created_count, + self.local_updated_count, + self.local_deleted_count) + stat_p = '{0:d}/{1:d}/{2:d}'.format(self.pvc_created_count, + self.pvc_updated_count, + self.pvc_deleted_count) + stats = '(local:{0}, powervc:{1})'.format(stat_l, stat_p) + + # Calculate elapsed time + end_time = time.time() + elapsed_time = '{0:.4} seconds'.format(end_time - start_time) + LOG.info(_('Startup image synchronization is complete. Elapsed ' + 'time: %s %s'), elapsed_time, stats) + except Exception as e: + LOG.warning(_('An error occurred during startup image ' + 'synchronization: %s'), e) + LOG.info(_('Startup image synchronization did not complete ' + 'successfully. It will run again in %s seconds.'), + CONF['powervc'].image_sync_retry_interval_time_in_seconds) + finally: + + # Tell the ImageSyncController that startup sync has ended + self.image_sync_controller.set_startup_sync_result(sync_result) + + def periodic_sync(self): + """ + Do a periodic two way sync. First the event handlers need to be + stopped if they are running. Then the local hosting OS and PowerVC + event locks are grabbed so that the periodic sync will not start + until all pending events have been processed. + """ + try: + + # Perform the periodic sync + self._perform_periodic_sync() + finally: + + # Start all event handlers if not running + self._prepare_for_image_events() + + def _perform_periodic_sync(self): + """ + Perform the periodic sync of images. A periodic sync of the images is + done to ensure that any changes that may be missed by image + notification events processing are still synchronized. + + The PowerVC and local hosting OS images are inspected for changes. + Adds, deletes, and updates are determined for each server, and then + applied to the other server. When complete, the PowerVC images on + each server will be synchronized. + """ + LOG.info(_('Performing periodic image synchronization...')) + + # Initialize the sync result value + sync_result = constants.SYNC_FAILED + + # Save start time for elapsed time calculation + start_time = time.time() + + # Need to stop or disable image event notification processing + # here in the future. + + # Build a dict of PowerVC images with the UUID as the key + # NOTE: If holding all images in memory becomes a problem one + # option may be to rewrite the code to only get a full image + # when needed. + pvc_image_dict = {} + + # Build a dict of hosting OS images that came from PowerVC with the + # PowerVC UUID as the key. Images from PowerVC will have the property + # 'powervc_uuid'. If that is not present, ignore + # NOTE: If holding all images in memory becomes a problem one + # option may be to rewrite the code to only get a full image + # when needed. + local_image_dict = {} + + # Initialize stats for summary display + self._clear_sync_summary_counters() + + # Try catching all exceptions so we don't end our periodic task + # If an error occurs during synchronization it is logged + try: + + # Get the images dict for the hosting OS + local_v1client = self._get_local_v1_client() + v1local_images = local_v1client.images + local_image_dict = self._get_local_images_and_ids(v1local_images) + + # Get the images dict from PowerVC. Only get images that are + # accessible from our Storage Connectivity Group. If the SCG is + # not found, an exception is raised and the sync will fail here. + pvc_v1client = self._get_pvc_v1_client() + v1pvc_images = pvc_v1client.images + pvc_image_dict = self._get_pvc_images(v1pvc_images) + + # Dump the local image information + self._dump_image_info(local_image_dict, pvc_image_dict) + + # Get the images to work with for adds, deletes, and updates + # When catching exceptions during sync operations we will look for + # CommunicationError, and raise those so we don't waste time + # trying to process all images when there is a connection failure. + # Other exceptions should be caught and logged during each + # operation so we can attempt to process each image before leaving. + pvc_v2client = self._get_pvc_v2_client() + local_v2client = self._get_local_v2_client() + v2local_images = local_v2client.images + v2pvc_images = pvc_v2client.images + + # Get the image sets from the past run, and the current run + past_local_image_set = set(self.local_updated_at) + past_pvc_image_set = set(self.pvc_updated_at) + cur_local_image_set = set(local_image_dict) + cur_pvc_image_set = set(pvc_image_dict) + + # We only need to update sync images that are in both PowerVC and + # the local hosting OS. If an image is missing from either side + # it will be added or deleted, so no need to try to update it. + # Do the update syncing first followed by delete, and add syncing. + # Start the update sync by getting common images on both sides. + update_candidates = \ + cur_local_image_set.intersection(cur_pvc_image_set) + + # Only update sync images that were updated on either the local + # hosting OS or on PowerVC. If both images seem to be in sync + # based on their updated_at values, use the checksum to determine + # if they are the same, and if they are not, merge them. Also, + # check for the instance capture snapshot image condition. If the + # local image status is queued and the PowerVC image status is + # active force an update from the PowerVC to the local hostingOS to + # activate the local snapshot image. + for uuid in update_candidates: + local_image = local_image_dict[uuid] + pvc_image = pvc_image_dict[uuid] + local_updated = self._local_image_updated(uuid, local_image) + pvc_updated = self._pvc_image_updated(uuid, pvc_image) + local_checksum = \ + self._get_image_checksum(local_image.to_dict()) + pvc_checksum = self._get_image_checksum(pvc_image.to_dict()) + + # See if we need to activate a local queued snapshot image from + # an instance capture + if local_image.status == 'queued' and \ + pvc_image.status == 'active': + LOG.info(_('Performing update sync of snapshot image ' + '\'%s\' from PowerVC to the local hosting OS to' + ' activate the image.'), local_image.name) + + # Update sync PowerVC image to local snapshot image to + # activate it + updated_image = self._update_local_image(uuid, pvc_image, + local_image, + v1local_images, + v2local_images) + if updated_image is None: + LOG.error(_('Local hosting OS snapshot image \'%s\' ' + 'for PowerVC UUID %s was not activated ' + 'during periodic image synchronization. It' + ' will be activated again during the next ' + 'periodic image synchronization ' + 'operation.'), local_image.name, uuid) + else: + self.local_updated_count += 1 + + # Capture the current update times for use during the + # next periodic sync operation. The update times are + # stored in a dict with the PowerVC UUID as the keys + # and the updated_at image attribute as the values. + self.local_updated_at[uuid] = updated_image.updated_at + self.pvc_updated_at[uuid] = pvc_image.updated_at + + # Save the PowerVC image as the master image + self.master_image[uuid] = pvc_image + elif local_updated and pvc_updated: + + # If the image was updated on the local hostingOS and + # on PowerVC since the last periodic scan, then the two + # images need to be merged, and both updated with the + # result. + updated_local_image, updated_pvc_image = \ + self._update_with_merged_images(uuid, local_image, + pvc_image, + v1local_images, + v2local_images, + v1pvc_images, + v2pvc_images) + if updated_local_image is not None: + self.local_updated_count += 1 + if updated_pvc_image is not None: + self.pvc_updated_count += 1 + elif local_updated: + LOG.info(_('Performing update sync of image \'%s\' from ' + 'the local hosting OS to PowerVC'), + local_image.name) + + # Update sync local image to PowerVC + updated_image = self._update_pvc_image(uuid, local_image, + pvc_image, + v1pvc_images, + v2pvc_images) + if updated_image is None: + LOG.error(_('PowerVC image \'%s\' with UUID %s was not' + ' updated during periodic image ' + 'synchronization. It will be updated again' + ' during the next periodic image ' + 'synchronization operation.'), + pvc_image.name, uuid) + else: + self.pvc_updated_count += 1 + + # Capture the current update times for use during the + # next periodic sync operation. The update times are + # stored in a dict with the PowerVC UUID as the keys + # and the updated_at image attribute as the values. + self.pvc_updated_at[uuid] = updated_image.updated_at + self.local_updated_at[uuid] = local_image.updated_at + + # Save the PowerVC image as the master image + self.master_image[uuid] = pvc_image + elif pvc_updated: + LOG.info(_('Performing update sync of image \'%s\' from ' + 'PowerVC to the local hosting OS'), + local_image.name) + + # Update sync PowerVC image to local + updated_image = self._update_local_image(uuid, pvc_image, + local_image, + v1local_images, + v2local_images) + if updated_image is None: + LOG.error(_('Local hosting OS image \'%s\' for PowerVC' + ' UUID %s was not updated during periodic ' + 'image synchronization. It will be updated' + ' again during the next periodic image ' + 'synchronization operation.'), + local_image.name, uuid) + else: + self.local_updated_count += 1 + + # Capture the current update times for use during the + # next periodic sync operation. The update times are + # stored in a dict with the PowerVC UUID as the keys + # and the updated_at image attribute as the values. + self.local_updated_at[uuid] = updated_image.updated_at + self.pvc_updated_at[uuid] = pvc_image.updated_at + + # Save the PowerVC image as the master image + self.master_image[uuid] = pvc_image + elif local_checksum != pvc_checksum: + + # This is a fail-safe check. This should not happen if the + # image updated_at values were handled properly. If we get + # here and the image checksum values are different, then + # merge the two images together to sync them up, and apply + # to both sides + LOG.info(_('Image \'%s\' is not in sync. The images from ' + 'the local hosting OS and PowerVC will be ' + 'merged to synchronize them.'), + local_image.name) + + updated_local_image, updated_pvc_image = \ + self._update_with_merged_images(uuid, local_image, + pvc_image, + v1local_images, + v2local_images, + v1pvc_images, + v2pvc_images) + if updated_local_image is not None: + self.local_updated_count += 1 + if updated_pvc_image is not None: + self.pvc_updated_count += 1 + else: + LOG.info(_('Image \'%s\' is in sync'), local_image.name) + + # Find local adds, and deletes + # Deletes are images in the past that are not in the current + local_deletes = \ + past_local_image_set.difference(cur_local_image_set) + + # Adds are images in the current that are not in the past + local_adds = cur_local_image_set.difference(past_local_image_set) + + # Process local adds, and deletes by applying to the PowerVC + # There should not be any adds from the hosting OS to + # PowerVC since that is not currently supported. If any are + # found, log it, and ignore + for uuid in local_deletes: + if uuid in pvc_image_dict.keys(): + pvc_image = pvc_image_dict[uuid] + LOG.info(_('Deleting PowerVC image \'%s\' for UUID %s'), + pvc_image.name, uuid) + deleted_image = self._delete_pvc_image(uuid, pvc_image, + v1pvc_images) + if deleted_image is None: + LOG.error(_('PowerVC image \'%s\' with UUID %s was not' + ' deleted during periodic image' + 'synchronization.'), pvc_image.name, + uuid) + else: + self.pvc_deleted_count += 1 + + # Clean up the updated_at time and master_image + if uuid in self.pvc_updated_at.keys(): + self.pvc_updated_at.pop(uuid) + if uuid in self.master_image.keys(): + self.master_image.pop(uuid) + + # Clean up the updated_at time. Only do this if the + # PowerVC image is also gone, else it won't be deleted + # during the next periodic sync. + if uuid in self.local_updated_at.keys(): + self.local_updated_at.pop(uuid) + + # Clean up the ids_dict + if uuid in self.ids_dict.keys(): + self.ids_dict.pop(uuid) + else: + + # Clean up the updated_at time. Only do this if the PowerVC + # image is also gone, else it won't be deleted during the + # next periodic sync. + if uuid in self.local_updated_at.keys(): + self.local_updated_at.pop(uuid) + + # This could happen if an instance capture was started on the + # hostingOS, which results in a snapshot image on the hostingOS, + # but there may not be a corresponding snapshot image on PowerVC + # yet. In that case, log it, and continue. Otherwise, this should + # not happen. If it does, log a warning, and ignore + for uuid in local_adds: + if uuid not in pvc_image_dict.keys(): + local_image = local_image_dict[uuid] + if local_image.status == 'queued': + + # It is possible that there are images on the hosting + # OS that are queued. These would be from instance + # captures that are in progress. We will go ahead and + # track those, and keep their updated_at timestamp so + # they are not treated as an add later on. + self.local_updated_at[uuid] = local_image.updated_at + self.ids_dict[uuid] = local_image.id + + # If there is no master_image for this UUID, create + # one now. It will be used to merge the PowerVC + # image with this one when one is available. + if uuid not in self.master_image.keys(): + self.master_image[uuid] = local_image + LOG.debug(_('A new PowerVC snapshot image \'%s\' with ' + 'PowerVC UUID %s was detected on the local' + ' hosting OS, but it is not yet present on' + ' the PowerVC.'), local_image.name, uuid) + else: + LOG.warning(_('A new PowerVC image \'%s\' was detected' + ' on the local hosting OS. This is not ' + 'supported!'), local_image.name) + + # Find PowerVC adds, and deletes + # Deletes are images in the past that are not in the current + pvc_deletes = past_pvc_image_set.difference(cur_pvc_image_set) + + # Adds are images in the current that are not in the past + pvc_adds = cur_pvc_image_set.difference(past_pvc_image_set) + + # Process PowerVC adds, and deletes by applying them to the local + # hosting OS + for uuid in pvc_deletes: + if uuid in local_image_dict.keys(): + local_image = local_image_dict[uuid] + LOG.info(_('Deleting local hosting OS image \'%s\' for ' + 'PowerVC UUID %s'), local_image.name, uuid) + deleted_image = self._delete_local_image(uuid, local_image, + v1local_images) + if deleted_image is None: + LOG.error(_('Local hosting OS image \'%s\' for PowerVC' + ' UUID %s was not deleted during periodic ' + 'image synchronization.'), + local_image.name, uuid) + else: + self.local_deleted_count += 1 + + # Clean up the updated_at time and master_image + if uuid in self.local_updated_at.keys(): + self.local_updated_at.pop(uuid) + if uuid in self.master_image.keys(): + self.master_image.pop(uuid) + + # Clean up the updated_at time. Only do this if the + # local hostingOS image is also gone, else it won't be + # deleted during the next periodic sync. + if uuid in self.pvc_updated_at.keys(): + self.pvc_updated_at.pop(uuid) + + # Clean up the ids_dict + if uuid in self.ids_dict.keys(): + self.ids_dict.pop(uuid) + else: + + # Clean up the updated_at time. Only do this if the local + # hostingOS image is also gone, else it won't be deleted + # during the next periodic sync. + if uuid in self.pvc_updated_at.keys(): + self.pvc_updated_at.pop(uuid) + + # Process PowerVC adds + local_image_owner = self._get_local_staging_owner_id() + if local_image_owner is None: + LOG.warning(_("Invalid staging user or project." + " Skipping new image sync.")) + else: + for uuid in pvc_adds: + pvc_image = pvc_image_dict[uuid] + if uuid not in local_image_dict.keys(): + status = pvc_image.status + pvc_name = pvc_image.name + + # Only add images from PowerVC that are 'active', and + # that are accessible on our Storage Connectivity Group + if status and status == 'active': + + # Add or activate the local image + self._add_or_activate_local_image( + pvc_image, local_image_owner, + pvc_v2client.http_client.endpoint, + v1local_images, v2local_images) + else: + + # PowerVC image which are not in the active state + # will not be tracked, and so their updated_at + # timestamp will not be stored. + LOG.debug(_('Image \'%s\' with UUID %s not created' + ' during periodic image ' + 'synchronization because the image ' + 'status is %s'), pvc_name, uuid, + status) + + # All done! Set the periodic sync result as passed so subsequent + # periodic syncs will run at the specified interval + sync_result = constants.SYNC_PASSED + + # Format results for summary display + stat_l = '{0:d}/{1:d}/{2:d}'.format(self.local_created_count, + self.local_updated_count, + self.local_deleted_count) + stat_p = '{0:d}/{1:d}/{2:d}'.format(self.pvc_created_count, + self.pvc_updated_count, + self.pvc_deleted_count) + stats = '(local:{0}, powervc:{1})'.format(stat_l, stat_p) + + # Calculate elapsed time + end_time = time.time() + elapsed_time = '{0:.4} seconds'.format(end_time - start_time) + LOG.info(_('Periodic image synchronization is complete. Elapsed ' + 'time: %s %s'), elapsed_time, stats) + except Exception as e: + LOG.exception(_('An error occurred during periodic image ' + 'synchronization: %s'), e) + LOG.info(_('Periodic image synchronization did not complete ' + 'successfully. It will be run again in %s seconds.'), + CONF['powervc'].image_sync_retry_interval_time_in_seconds) + finally: + + # Tell the ImageSyncController that periodic sync has ended + self.image_sync_controller.set_periodic_sync_result(sync_result) + + def _add_or_activate_local_image(self, pvc_image, local_image_owner, + endpoint, v1local_images, v2local_images): + """ + Add or activate a local hosting OS image from a PowerVC image. + + This is called when a new local image is to be added. The PowerVC image + is first checked for the local UUID property. If it exists, the image + is a snapshot image, and the local UUID property specifies the local + snapshot image that is queued and is awaiting activation. + + :param: pvc_image The PowerVC image to add or activate on the local + hosting OS + :param: local_image_owner The local image owner id + :param: endpoint The PowerVC client endpoint to use for the image + location + :param: v1local_images The local hostingOS v1 image manager of image + the controller to use + :param: v2local_images The local hostingOS v2 image controller to use + """ + + # Check here for an existing local image. If one exists for this + # PowerVC image, just update it. This can happen if an instance capture + # was performed and a snapshot image was created, and no events were + # received for the newly created image yet, and the local image doesn't + # yet contain the powervc_uuid property. + local_image = None + pvc_id = pvc_image.id + pvc_name = pvc_image.name + props = self._get_image_properties(pvc_image.to_dict()) + if props and consts.LOCAL_UUID_KEY in props.keys(): + + # Look for the LOCAL_UUID_KEY in the PowerVC image. If it is found + # it will be used to get the local image. This should be set when + # an instance is captured, and a snapshot image is created on the + # PowerVC. + local_id = props.get(consts.LOCAL_UUID_KEY) + if self._local_image_exists(local_id, v1local_images): + local_image = self._get_image(pvc_id, local_id, pvc_name, + v1local_images, v2local_images) + + # Update the image if it is in the local hosting OS, else add it + if local_image is not None: + LOG.info(_('The local hosting OS image \'%s\' with PowerVC UUID %s' + ' already exists so it will be updated.'), pvc_name, + pvc_id) + + # If this is a snapshot image, it may not have an entry in the ids + # dict so add one here. + self.ids_dict[pvc_id] = local_image.id + LOG.info(_('Performing update sync of snapshot image \'%s\' from ' + 'PowerVC to the local hosting OS to activate the ' + 'image.'), local_image.name) + + # Update sync PowerVC image to local snapshot image to activate it + updated_image = self._update_local_image(pvc_id, pvc_image, + local_image, + v1local_images, + v2local_images) + if updated_image is None: + LOG.error(_('Local hosting OS snapshot image \'%s\' for ' + 'PowerVC UUID %s was not activated during ' + 'image synchronization. It will be activated again' + ' during the next image synchronization ' + 'operation.'), local_image.name, pvc_id) + else: + self.local_updated_count += 1 + + # Capture the current update times for use during the next + # periodic sync operation. The update times are stored in a + # dict with the PowerVC UUID as the keys and the updated_at + # image attribute as the values. + self.local_updated_at[pvc_id] = updated_image.updated_at + self.pvc_updated_at[pvc_id] = pvc_image.updated_at + + # Save the PowerVC image as the master image + self.master_image[pvc_id] = pvc_image + else: + LOG.info(_('Creating image \'%s\' on the local hosting OS'), + pvc_name) + new_image = self._add_local_image(pvc_id, pvc_image, + local_image_owner, endpoint, + v1local_images, v2local_images) + if new_image is None: + LOG.error(_('Local hosting OS image \'%s\' for PowerVC UUID %s' + ' was not created during image synchronization.'), + pvc_name, pvc_id) + else: + self.local_created_count += 1 + + # Capture the current update times for use during the next + # periodic sync operation. The update times are stored in dicts + # with the PowerVC UUID as the keys and the updated_at image + # attribute as the values. + self.pvc_updated_at[pvc_id] = pvc_image.updated_at + self.local_updated_at[pvc_id] = new_image.updated_at + + # Save the PowerVC image as the master_image + self.master_image[pvc_id] = pvc_image + + # Save the ids in the ids_dict + self.ids_dict[pvc_id] = new_image.id + + def _update_with_merged_images(self, uuid, local_image, pvc_image, + v1local_images, v2local_images, + v1pvc_images, v2pvc_images): + """ + Both the local hostingOS image, and the PowerVC image have been + updated. Merge the two images with the master_image to come up + with the image that will be used to update the local hostingOS, + and PowerVC. + + If an image first appears on PowerVC and the local hostingOS without + events, there will be no master_image set. In that case, use the oldest + image as the master_image, and then merge in the newest image. + + :param: uuid The PowerVC UUID of the image + :param: local_image The local hostingOS copy of the image + :param: pvc_image The PowerVC copy of the image + :param: v1local_images The local hostingOS v1 image manager of image + the controller to use + :param: v2local_images The local hostingOS v2 image controller to use + :param: v1pvc_images The PowerVC v1 image manager of the image + controller to use + :param: v2pvc_images The PowerVC v2 image controller to use + :returns: A tuple containing the updated local hostingOS image, and the + updated PowerVC image. If a problem was encountered + updating either image, None is returned for that image. + """ + try: + local_updated_at = self._get_v1_datetime(local_image.updated_at) + pvc_updated_at = self._get_v1_datetime(pvc_image.updated_at) + LOG.debug(_('local_updated_at %s, pvc_updated_at %s'), + local_updated_at, pvc_updated_at) + except Exception as e: + LOG.exception(_('An error occurred determining image ' + 'update time for %s: %s'), local_image.name, e) + + # Updated images to return to the caller + updated_local_image = None + updated_pvc_image = None + if local_updated_at and pvc_updated_at: + LOG.info(_('Image \'%s\' for PowerVC UUID %s was updated on the ' + 'local hostingOS, and on PowerVC. Attempting to ' + 'merge the changes together and update both with ' + 'the result.'), local_image.name, uuid) + + # If we have a master copy of the image we can merge changes from + # the local hostingOS and PowerVC. If there is no master copy of + # the image, use the oldest image as the master copy to merge with. + if uuid not in self.master_image.keys(): + LOG.debug(_('A master copy of image \'%s\' for PowerVC UUID %s' + ' is not available. The oldest image will be the ' + 'master copy used to merge the newer changes' + 'with.'), local_image.name, uuid) + if (local_updated_at > pvc_updated_at): + LOG.debug(_('The PowerVC image \'%s\' with UUID %s will be' + ' the master copy to merge with.'), + pvc_image.name, uuid) + + # The PowerVC image will be the master copy for the merge. + # Get a copy of the PowerVC image to use as the master. + master_image = self._get_image(uuid, pvc_image.id, + pvc_image.name, + v1pvc_images, v2pvc_images) + else: + LOG.debug(_('The local hostingOS image \'%s\' for PowerVC ' + 'UUID %s will be the master copy to merge ' + 'with.'), local_image.name, uuid) + + # The local hostingOS image will be the master copy for the + # Get a copy of the local hostingOS image to use as the + # master. + master_image = self._get_image(uuid, local_image.id, + local_image.name, + v1local_images, + v2local_images) + else: + master_image = self.master_image[uuid] + + # Determine what has changed in the hostingOS and PowerVC images. + # This is done by first comparing the older image with the master + # copy of the image, and then the newer image with the master copy + # of the image. Then any changes are merged into the master copy of + # the image, and that is used to update sync both the hostingOS and + # PowerVC images. + attribute_changes = {} + property_changes = {} + deleted_property_keys = [] + if local_updated_at > pvc_updated_at: + self._get_image_changes(pvc_image, master_image, + attribute_changes, property_changes, + deleted_property_keys) + self._get_image_changes(local_image, master_image, + attribute_changes, property_changes, + deleted_property_keys) + else: + self._get_image_changes(local_image, master_image, + attribute_changes, property_changes, + deleted_property_keys) + self._get_image_changes(pvc_image, master_image, + attribute_changes, property_changes, + deleted_property_keys) + + # Merge the image attribute and property changes found with a copy + # of the master image and update sync the master image + # with the local hostingOS and PowerVC. + self._merge_image_changes(attribute_changes, property_changes, + deleted_property_keys, master_image) + + # Update both PowerVC and the local hostingOS images with the + # master copy. The same rule applies here as elsewhere. The + # updated_at timestamp dicts, and the master_image will not be + # reset until both updates are successful. That way, if one fails, + # the merge will be tried again in the next periodic scan. An + # attempt will first be made to update the local hostingOS image + # since it is customer facing. If that is successful, the + # PowerVC image is updated. + LOG.info(_('Performing update sync of image \'%s\' from merged ' + 'master image to the local hosting OS for PowerVC UUID ' + '%s'), master_image.name, uuid) + + # Update sync master image to local hostingOS. This merge could be + # of a PowerVC active snapshot image to a hostingOS queued snapshot + # image. In that case, the master_image status must be set to + # active for the hostingOS update to work properly. Modify the + # image by setting the attribute first, and then the _info dict. + if pvc_image.status == 'active': + setattr(master_image, 'status', pvc_image.status) + master_image._info['status'] = pvc_image.status + LOG.debug(_('Master image for local: %s'), str(master_image)) + updated_local_image = self._update_local_image(uuid, master_image, + local_image, + v1local_images, + v2local_images) + if updated_local_image is None: + LOG.error(_('Local hosting OS image \'%s\' for PowerVC UUID %s' + ' was not updated. The PowerVC image was also not ' + 'updated. An attempt to synchronize both will be ' + 'tried again during the next periodic image ' + 'synchronization operation.'), local_image.name, + uuid) + else: + LOG.info(_('Performing update sync of image \'%s\' from the ' + 'merged master image to PowerVC for PowerVC UUID ' + '%s'), master_image.name, uuid) + + # Update sync master image to PowerVC + LOG.debug(_('Master image for pvc: %s'), str(master_image)) + updated_pvc_image = self._update_pvc_image(uuid, master_image, + pvc_image, + v1pvc_images, + v2pvc_images) + if updated_pvc_image is None: + LOG.error(_('PowerVC image \'%s\' with UUID %s was not ' + 'updated, however, the corresponding local ' + 'hostingOS image was updated. An attempt to ' + 'synchronize both will be tried again during ' + 'the next periodic image synchronization ' + 'operation.'), pvc_image.name, uuid) + else: + + # Capture the current update times for use during the next + # periodic sync operation. The update times are stored in + # dicts with the PowerVC UUID as the key and the updated_at + # image attribute as the values. + self.local_updated_at[uuid] = \ + updated_local_image.updated_at + self.pvc_updated_at[uuid] = updated_pvc_image.updated_at + + # Save the PowerVC image as the master_image + self.master_image[uuid] = updated_pvc_image + else: + + # There was an error getting the updated_at time for an image. + # This should not happen, but if it does, sync the PowerVC image + # to the local hosting OS + LOG.info(_('Performing update sync of image \'%s\' from PowerVC to' + ' the local hosting OS'), local_image.name) + + # Update sync PowerVC image to local hostingOS + updated_local_image = self._update_local_image(uuid, pvc_image, + local_image, + v1local_images, + v2local_images) + if updated_local_image is None: + LOG.error(_('Local hosting OS image \'%s\' for PowerVC UUID %s' + ' was not updated during periodic ' + 'synchronization.'), local_image.name, uuid) + else: + + # Capture the current update times for use during the next + # periodic sync operation. The update times are stored in dicts + # with the PowerVC UUID as the keys and the updated_at image + # attribute as the values. + self.local_updated_at[uuid] = updated_local_image.updated_at + self.pvc_updated_at[uuid] = pvc_image.updated_at + + # Save the PowerVC image as the master_image + self.master_image[uuid] = pvc_image + + # return the updated images to the caller + return updated_local_image, updated_pvc_image + + def _get_image_changes(self, updated_image, master_image, + attribute_changes, property_changes, + deleted_property_keys): + """ + Compare the updated image with the master copy of the image. Look at + the UPDATE_PARAMS and properties for any changes. Image attributes + can only be added or updated. Image properties can only be added, + updated, or deleted. Any image attribute or property changed is + added to the dict of image changes. Any deleted properties are added + to the dict of deleted properties. + + This method is first called for the side that is oldest, and then the + more recent side. If a property is deleted on one side, it will be + kept if it was updated on the more recent side. The most recent changes + are used over the older ones. + + When looking for image changes, filter out the appropriate attributes + and properties using the update filters. + + :param: updated_image The updated image to check for changes against + the master copy of the image + :param: master_image The master copy of the image to compare to + :param: attribute_changes The dict of image attribute changes. + :param: property_changes The dict of image property changes. + :param: deleted_property_keys The list of deleted image property keys. + """ + updated_image_dict = updated_image.to_dict() + master_image_dict = master_image.to_dict() + + # Process the image attributes we care about + for imagekey in updated_image_dict.keys(): + + # Only update attributes in UPDATE_PARAMS if they are not in the + # update param filter list. Also, skip over the properties + # attribute and process those separately. + if imagekey in v1images.UPDATE_PARAMS and \ + imagekey not in constants.IMAGE_UPDATE_PARAMS_FILTER and \ + imagekey != 'properties': + field_value = updated_image_dict.get(imagekey) + if field_value is not None: + + # If the key is not in the master image, add it. If it + # is in the master_image, see if it has changed. + if imagekey not in master_image_dict.keys(): + attribute_changes[imagekey] = field_value + elif field_value != master_image_dict.get(imagekey): + attribute_changes[imagekey] = field_value + + # Process the image properties + updated_props = self._get_image_properties(updated_image_dict, {}) + master_props = self._get_image_properties(master_image_dict, {}) + for propkey in updated_props.keys(): + if propkey not in constants.IMAGE_UPDATE_PROPERTIES_FILTER: + prop_value = updated_props.get(propkey) + if prop_value is not None: + + # If the property is not in the master image, add it. If it + # is in the master_image, see if it has changed. + if propkey not in master_props.keys(): + property_changes[propkey] = prop_value + elif prop_value != master_props.get(propkey): + + # The property has changed. If this property + # is in the deleted_properties dict, from a + # previous call, remove it. It has been updated + # on the other server so keep it for now. + property_changes[propkey] = prop_value + if propkey in deleted_property_keys: + deleted_property_keys.remove(propkey) + + # Detect any deleted properties. Those are properties that are in the + # master image, but no longer available in the updated image. The + # filtered properties will not be looked at. + for propkey in master_props.keys(): + if propkey not in constants.IMAGE_UPDATE_PROPERTIES_FILTER and \ + propkey not in updated_props.keys(): + deleted_property_keys.append(propkey) + + def _merge_image_changes(self, attribute_changes, property_changes, + deleted_property_keys, master_image): + """ + Go through all of the image attribute and property changes, and + apply them to the master copy of the image. + + :param: attribute_changes The dict of image attribute changes. + :param: property_changes The dict of image property changes. + :param: deleted_property_keys The list of deleted image property keys. + :param: master_image The master copy of the image to merge + changes into + """ + # Merge the changes into the master_image which is a v1 Image. A v1 + # Image has both attributes and a Resource _info dict. To modify a v1 + # Image we must first set the attribute, followed by the _info dict. + # The _info dict is important here. It is what is used when updating + # the image. We will try to update both to be complete, but testing has + # shown that the setattr does not work as expected here. + LOG.debug(_('attribute changes: %s'), str(attribute_changes)) + LOG.debug(_('property changes: %s'), str(property_changes)) + LOG.debug(_('deleted properties: %s'), str(deleted_property_keys)) + for key in attribute_changes.keys(): + if key in master_image._info.keys() and hasattr(master_image, key): + setattr(master_image, key, attribute_changes.get(key)) + master_image._info[key] = attribute_changes.get(key) + else: + + # This is unexpected so log a warning + LOG.warning(_('Image attribute \'%s\' was not updated for ' + 'image \'%s\'.'), key, master_image.name) + + # Process image properties + master_props = self._get_image_properties(master_image._info, {}) + + # Process property adds and updates + for prop_key in property_changes.keys(): + master_props[prop_key] = property_changes.get(prop_key) + + # Process property deletes + for prop_key in deleted_property_keys: + if prop_key in master_props.keys(): + master_props.pop(prop_key) + + # Reset the image properties + master_image.properties = master_props + LOG.debug(_('Master image for merge: %s'), str(master_image)) + + def _get_image(self, uuid, image_id, image_name, v1images, v2images): + """ + Get the specified image using the v1 API. If the image has one or more + large properties, get the v2 image, and fixup the properties of the v1 + image. + + :param: uuid The PowerVC UUID of the image + :param: image_id The identifier of the image to get + :param: image_name The name of the image to get. This is optional. It + is used for logging. + :param: v1images The image manager of the image controller to use + :param: v2images The image controller to use + :returns: The v1 image specified, or None if the image could not be + obtained + """ + try: + v1image = v1images.get(image_id) + props = self._get_image_properties(v1image.to_dict(), {}) + large_props = {} + for propkey in props.keys(): + propval = props.get(propkey) + + # If the property value is large, read it in with the v2 GET + # API to make sure we get the whole thing. Setting a limit of + # MAX_HEADER_LEN_V1/2 seems to work well. + if propval is not None and len(str(propval)) >= \ + constants.MAX_HEADER_LEN_V1 / 2: + large_props[propkey] = propval + if large_props: + v2image = v2images.get(image_id) + for propkey in large_props.keys(): + if propkey in v2image.keys(): + props[propkey] = v2image[propkey] + self._unescape(props) + v1image.properties = props + v1image._info['properties'] = props + return v1image + except CommunicationError as e: + raise e + except Exception as e: + LOG.exception(_('An error occurred getting image \'%s\' for ' + 'PowerVC UUID %s: %s'), image_name, uuid, e) + return None + + def _delete_local_image(self, uuid, image, v1images): + """ + Delete the specified local image using the v1 API. + + Also, set to ignore any image delete events that may be generated by + the image delete operation here. + + :param: uuid The PowerVC UUID of the image + :param: image The v1 image to delete + :param: v1images The image manager of the image controller to use + :returns: The deleted v1 image if the delete was successful, else None + """ + deleted_image = self._delete_image(uuid, image, v1images) + if deleted_image is not None: + self._ignore_local_event(constants.IMAGE_EVENT_TYPE_DELETE, + deleted_image.to_dict()) + return deleted_image + + def _delete_pvc_image(self, uuid, image, v1images): + """ + Delete the specified PowerVC image using the v1 API. + + Also, set to ignore any image delete events that may be generated by + the image delete operation here. + + :param: uuid The PowerVC UUID of the image + :param: image The v1 image to delete + :param: v1images The image manager of the image controller to use + :returns: The deleted v1 image if the delete was successful, else None + """ + deleted_image = self._delete_image(uuid, image, v1images) + if deleted_image is not None: + self._ignore_pvc_event(constants.IMAGE_EVENT_TYPE_DELETE, + deleted_image.to_dict()) + return deleted_image + + def _delete_image(self, uuid, image, v1images): + """ + Delete the specified image using the v1 API. + + This method should not be called directly. It should only be called by + _delete_local_image and _delete_pvc_image. + + :param: uuid The PowerVC UUID of the image + :param: image The v1 image to delete + :param: v1images The image manager of the image controller to use + :returns: The deleted v1 image if the delete was successful, else None + """ + try: + deleted_image = image + v1images.delete(image) + return deleted_image + except CommunicationError as e: + raise e + except HTTPNotFound: + LOG.info(_('An attempt was made to delete image \'%s\' for PowerVC' + ' UUID %s, but the image was not found.'), image.name, + uuid) + return deleted_image + except Exception as e: + LOG.exception(_('An error occurred deleting image ' + '%s for PowerVC UUID %s: %s'), + image.name, uuid, e) + return None + + def _add_local_image(self, uuid, src_image, image_owner, image_endpoint, + v1images, v2images): + """ + Add an the image represented by the source image using the v1 + and v2 APIs. The local hostingOS image is returned to the caller. + + We currently only add images to the local hosting OS. + + :param: uuid The PowerVC UUID of the image + :param: src_image The source v1 image to add + :param: image_owner The id of the image owner + :param: image_endpoint The endpoint to use for the image location + :param: v1images The v1 image manager to use for creating + :param: v2images The v2 image controller to use for patching + :returns: A tuple containing the added v1 images. The first image + returned is from the v1 image create, and the second + image returned is from the v2 image PATCH update if any. + """ + image1, image2 = self._add_image(uuid, src_image, image_owner, + image_endpoint, v1images, v2images) + + # FIXME - Should we also ignore the activate event? + # FIXME - Do we get an update event for a create/activate? + # Set to ignore any update events generated by adding the image + create_event_type = constants.IMAGE_EVENT_TYPE_CREATE + activate_event_type = constants.IMAGE_EVENT_TYPE_ACTIVATE + update_event_type = constants.IMAGE_EVENT_TYPE_UPDATE + if image1 is not None: + self._ignore_local_event(create_event_type, image1.to_dict()) + self._ignore_local_event(activate_event_type, image1.to_dict()) + self._ignore_local_event(update_event_type, image1.to_dict()) + if image2 is not None: + self._ignore_local_event(update_event_type, image2.to_dict()) + return image1 if image2 is None else image2 + + def _add_image(self, uuid, src_image, image_owner, image_endpoint, + v1images, v2images): + """ + Add an the image represented by the source image using the v1 + and v2 APIs. The local hostingOS image is returned to the caller. + + We currently only add images to the local hosting OS. + + This method should not be called directly. It should only be called by + _add_local_image. + + :param: uuid The PowerVC UUID of the image + :param: src_image The source v1 image to add + :param: image_owner The id of the image owner + :param: image_endpoint The endpoint to use for the image location + :param: v1images The v1 image manager to use for creating + :param: v2images The v2 image controller to use for patching + :returns: A tuple containing the added v1 images. The first image + returned is from the v1 image create, and the second + image returned is from the v2 image PATCH update if any. + """ + try: + field_dict, update_field_dict = self._get_v1image_create_fields( + src_image, image_owner, image_endpoint) + # Community fix needs the property 'checksum' must be set + field_dict['checksum'] = self._get_image_checksum(src_image.to_dict()) + new_image = v1images.create(**field_dict) + updated_image = None + if len(update_field_dict) > 0: + + # After creating the image, update it with the + # remaining attributes and metadata. The v2 API + # PATCH update will figure out what to add, + # or replace. Deletes are not possible. + v2images.update(new_image.id, **update_field_dict) + + # refresh the v1 image to return after the update + updated_image = self._get_image(uuid, new_image.id, + new_image.name, v1images, + v2images) + return new_image, updated_image + except CommunicationError as e: + raise e + except Exception as e: + LOG.exception(_('An error occurred creating image \'%s\' for ' + 'PowerVC UUID %s: %s'), src_image.name, uuid, e) + return None, None + + def _update_local_image(self, uuid, src_image, tgt_image, v1images, + v2images): + """ + Update the local hostingOS target image with the source image + attributes and properties. If the update is being used to activate the + image, or if the image size is changing, the v1 Glance client is used, + else the v2 Glance client is used. + + :param: uuid The PowerVC UUID of the image + :param: src_image The source PowerVC v1 image to use for the update + :param: tgt_image The target local hostingOS v1 image to update + :param: v1images The v1 image manager to use for updating + :param: v2images The v2 image controller to use for patching + :returns: The updated v1 image, or None if the update was not + successful. + """ + if ((src_image.status == 'active' and tgt_image.status == 'queued') or + (src_image.size != tgt_image.size)): + return self._v1update_local_image(uuid, src_image, tgt_image, + v1images, v2images) + else: + return self._v2update_local_image(uuid, src_image, tgt_image, + v1images, v2images) + + def _update_pvc_image(self, uuid, src_image, tgt_image, v1images, + v2images): + """ + Update the PowerVC target image with the source image attributes and + properties. If image size is changing, the v1 Glance client is used, + else the v2 Glance client is used. + + :param: uuid The PowerVC UUID of the image + :param: src_image The source local hostingOS v1 image to use for the + update + :param: tgt_image The target PowerVC image to update + :param: v1images The v1 image manager to use for updating. + :param: v2images The v2 image controller to use for patching + :returns: The updated v1 image, or None if the update was not + successful. + """ + if src_image.size != tgt_image.size: + return self._v1update_pvc_image(uuid, src_image, tgt_image, + v1images, v2images) + else: + return self._v2update_pvc_image(uuid, src_image, tgt_image, + v1images, v2images) + + def _v1update_local_image(self, uuid, src_image, tgt_image, v1images, + v2images): + """ + Update the local hostingOS target image with the source image + attributes and properties using the v1 and v2 Glance clients. + + Also, set to ignore any image update events that may be generated by + the image update operation here. + + :param: uuid The PowerVC UUID of the image + :param: src_image The source PowerVC v1 image to use for the update + :param: tgt_image The target local hostingOS v1 image to update + :param: v1images The v1 image manager to use for updating + :param: v2images The v2 image controller to use for patching + :returns: The updated v1 image, or None if the update was not + successful. + """ + image1, image2 = self._v1update_image(uuid, src_image, tgt_image, + v1images, v2images, + constants.LOCAL) + + # Set to ignore any update events generated by updating the image + add_event_type = constants.IMAGE_EVENT_TYPE_ACTIVATE + update_event_type = constants.IMAGE_EVENT_TYPE_UPDATE + if image1 is not None: + + # If this is going to activate an instance capture on the local + # hostingOS set to ignore the activate, and the update that comes + # along with every activate. + if src_image.status == 'active' and tgt_image.status == 'queued': + self._ignore_local_event(add_event_type, image1.to_dict()) + self._ignore_local_event(update_event_type, image1.to_dict()) + self._ignore_local_event(update_event_type, image1.to_dict()) + if image2 is not None: + self._ignore_local_event(update_event_type, image2.to_dict()) + return image1 if image2 is None else image2 + + def _v1update_pvc_image(self, uuid, src_image, tgt_image, v1images, + v2images): + """ + Update the PowerVC target image with the source image attributes and + properties using the v1 and v2 Glance clients. + + Also, set to ignore any image update events that may be generated by + the image update operation here. + + :param: uuid The PowerVC UUID of the image + :param: src_image The source local hostingOS v1 image to use for the + update + :param: tgt_image The target PowerVC image to update + :param: v1images The v1 image manager to use for updating + :param: v2images The v2 image controller to use for patching + :returns: The updated v1 image, or None if the update was not + successful. + """ + image1, image2 = self._v1update_image(uuid, src_image, tgt_image, + v1images, v2images, + constants.POWER_VC) + + # Set to ignore any update events generated by updating the image + event_type = constants.IMAGE_EVENT_TYPE_UPDATE + if image1 is not None: + self._ignore_pvc_event(event_type, image1.to_dict()) + if image2 is not None: + self._ignore_pvc_event(event_type, image2.to_dict()) + return image1 if image2 is None else image2 + + def _v1update_image(self, uuid, src_image, tgt_image, v1images, v2images, + target_type): + """ + Update the target image with the source image attributes and + properties using the v1 and v2 Glance clients. + + All image properties will only be updated using the v2 glance client. + Using the v1 glance client to update properties would result in any + image properties with null values being removed since those properties + are not synced. + + The v1 glance client must be used to update an image size attribute. + The v2 glance client does not support updating the image size. + + This is also called to finalize the snapshot image creation process. + When an instance is captured on the hostingOS, a snapshot image is + created on the hostingOS in the queued state, with a powervc_uuid + value set. When that snapshot image becomes active on PowerVC, the + hostingOS image is updated with the latest image attributes and + properties and it's location is set which cause the image's status to + go active. + + This method should not be called directly. It should only be called by + _v1update_local_image and _v1update_pvc_image. + + :param: uuid The PowerVC UUID of the image + :param: src_image The source v1 image to use for the update + :param: tgt_image The target v1 image to update + :param: v1images The v1 image manager to use for updating + :param: v2images The v2 image controller to use for patching + :param: target_type The target image type (pvc or local) + :returns: A tuple containing the updated v1 images. The first image + returned is from the v1 image update, and the second + image returned is from the v2 image PATCH update if any. + """ + try: + field_dict, patch_dict, remove_list = \ + self._get_v1image_update_fields(src_image, tgt_image) + + # If the target image is on the hostingOS, and it's status is + # queued, and the source PowerVC image's status is active, write + # the location to the target image so that it's status will go + # active. This will take care of finalizing the snapshot image + # creation process. + if target_type == constants.LOCAL and \ + src_image.status == 'active' and \ + tgt_image.status == 'queued': + pvc_v2client = self._get_pvc_v2_client() + field_dict['location'] = self._get_image_location( + pvc_v2client.http_client.endpoint, src_image) + image1 = v1images.update(tgt_image, **field_dict) + image2 = None + if len(patch_dict) > 0: + + # Update the properties, and any large image attributes + v2images.update(tgt_image.id, remove_props=remove_list, + **patch_dict) + + # refresh the v1 image to return after the udpate + image2 = self._get_image(uuid, image1.id, image1.name, + v1images, v2images) + return image1, image2 + except CommunicationError as e: + raise e + except Exception as e: + LOG.exception(_('An error occurred updating image \'%s\' for ' + 'PowerVC UUID %s: %s'), tgt_image.name, uuid, e) + return None, None + + def _get_image_location(self, endpoint, v1image): + """ + Return the image location for the specified image and endpoint. + + :param: endpoint The v2 glance http client endpoint + :param: v1image The v1 image + :returns: The image location url + """ + location = endpoint + if not location.endswith('/'): + location += '/' + location += constants.IMAGE_LOCATION_PATH + location += v1image.id + return location + + def _get_v1image_update_fields(self, v1src_image, v1tgt_image): + """ + Get the attributes and properties for an image update. Filter out + attributes and properties specified with filter constants. + + All image properties will be separated from the image attributes being + updated. Image properties should not be updated using the v1 glance + client. Doing so could remove any image properties with NULL values + since those properties are not synced. + + :param: v1src_image The v1 image to pull attributes and properties from + to be used for a v1 and v2 image update operations. + :param: v1tgt_image The v1 image that is being updated. + :returns: A tuple containing with the dict containing the image + attribute fields to update using the v1 image update + operation, the dict of the image properties to update using + the v2 Image PATCH API, as well as the list of image + properties to remove. + """ + field_dict = {} + patch_dict = {} + remove_list = None + image_dict = v1src_image.to_dict() + src_props = self._get_image_properties(image_dict) + if src_props is not None: + tgt_image_dict = v1tgt_image.to_dict() + tgt_props = self._get_image_properties(tgt_image_dict, {}) + + # Add image properties to be patched after filtering out specified + # properties. Properties with NULL values have already been + # filtered by _get_image_properties(). Also, find image properties + # that need to be removed. + filtered_src_props = self._filter_v1image_properties(src_props) + filtered_tgt_props = self._filter_v1image_properties(tgt_props) + + # Get the image propeprty key sets + src_prop_set = set(filtered_src_props) + tgt_prop_set = set(filtered_tgt_props) + + # Find the added/update properties, and the removed properties + # Updates are keys in both the source and the target + updates = src_prop_set.intersection(tgt_prop_set) + + # Adds are keys in the source that are not in the target + adds = src_prop_set.difference(tgt_prop_set) + + # Deletes are keys in the target that are not in the source. + deletes = tgt_prop_set.difference(src_prop_set) + + # Get the adds and updates + for key in adds: + patch_dict[key] = filtered_src_props[key] + + # Add all update keys if their values are the same or different + for key in updates: + patch_dict[key] = filtered_src_props[key] + + # Find the deletes. If there are none, return None + if deletes: + remove_list = [] + for key in deletes: + remove_list.append(key) + + # Set to not purge the properties in the image when processing the + # field_dict with the v1 glance client update. That will leave the + # properties there after the v1 update. The v2 image update will + # add, update, or remove the properties we care about. + field_dict['purge_props'] = False + else: + + # If there are no properties in the source image, force the v1 + # image update to purge all properties. + field_dict['purge_props'] = True + for imagekey in image_dict.keys(): + + # Only update attributes in UPDATE_PARAMS if they are not in the + # update param filter list. Also, skip over the properties + # attribute since all properties were already added to patch_dict + if imagekey in v1images.UPDATE_PARAMS and \ + imagekey not in constants.IMAGE_UPDATE_PARAMS_FILTER and \ + imagekey != 'properties': + field_value = image_dict.get(imagekey) + if field_value is not None: + if len(str(field_value)) < constants.MAX_HEADER_LEN_V1: + field_dict[imagekey] = field_value + else: + patch_dict[imagekey] = field_value + return field_dict, patch_dict, remove_list + + def _filter_v1image_properties(self, props): + """ + Filter the v1 image properties. Only update properties that are not + None, and are not in the image update properties filter list. + + :param: props The image properties dict to filter + :returns: Filtered image properties dict + """ + filtered_props = {} + if props is not None: + for propkey in props.keys(): + propvalue = props[propkey] + if (propkey not in constants.IMAGE_UPDATE_PROPERTIES_FILTER and + propvalue is not None): + filtered_props[propkey] = propvalue + return filtered_props + + def _get_v1image_create_fields(self, v1image, owner, pvc_endpoint): + """ + Get the properties for an image create. + + This only works one way right now. Creating an image is only + done on the local hostingOS. If that changes in the future, this + method may need some changes. + + :param: image The v1image to copy + :param: owner The hosting OS image owner. This should be the + staging project or user Id + :param: pvc_endpoint The PowerVC endpoint to use for the image + location + :returns: The create_field_dict which is a dict of properties to use + with the v1 create function, and an update_field_dict + which is a dict of the properties to use with a + subsequent update of the newly created image. + """ + create_field_dict = {} + update_field_dict = {} + + # Remove large properties before processing. They will be added + # using a v2 update + image_dict = v1image.to_dict() + props = self._get_image_properties(image_dict) + if props is not None: + update_field_dict = self._remove_large_properties(props) + image_dict['properties'] = props + for imagekey in image_dict.keys(): + field_value = image_dict.get(imagekey) + if field_value is not None: + if imagekey in v1images.CREATE_PARAMS and \ + imagekey not in constants.IMAGE_CREATE_PARAMS_FILTER: + + # Set the hosting OS image owner to the staging project Id + if imagekey == 'owner': + field_value = owner + if len(str(field_value)) < constants.MAX_HEADER_LEN_V1: + create_field_dict[imagekey] = field_value + else: + update_field_dict[imagekey] = field_value + + # We require a 'location' with no actual image data, or the image will + # remain in the 'queued' state. There may be another way to do this. + if 'location' not in create_field_dict: + create_field_dict['location'] = self._get_image_location( + pvc_endpoint, v1image) + + # Add the PowerVC UUID property + props = create_field_dict.get('properties', {}) + props[consts.POWERVC_UUID_KEY] = v1image.id + create_field_dict['properties'] = props + return create_field_dict, update_field_dict + + def _remove_large_properties(self, properties): + """ + Remove any properties that are too large to be processed by the v1 APIs + and return them in a dict to the caller. After removing the single + properties that are too large the total size of the remaining + properties are examined. If the total properties size is too large + to be processed by the v1 APIs, the largest properties are removed + until the total properties size is within the size allowed. The + properties passed in are also modified. + + :param: properties. The properties dict to remove large properties + from. Large properties are removed from the original + properties dict + :returns: A dict containing properties that are too large to + be processed by v1 Image APIs + """ + too_large_properties = {} + property_size = {} + if properties is not None: + for propkey in properties.keys(): + propvalue = properties.get(propkey) + if propvalue is not None: + if len(str(propvalue)) >= \ + constants.MAX_HEADER_LEN_V1: + too_large_properties[propkey] = properties.pop(propkey) + else: + property_size[propkey] = len(str(propvalue)) + + # The properties that are too large for the v1 API have been + # removed, but it is still possible that the resulting properties + # are too large. If that is the case, remove the largest properties + # until the total properties size is less than the + # MAX_HEADER_LEN_V1 value. + if len(str(properties)) >= constants.MAX_HEADER_LEN_V1: + smaller_props = {} + for propkey, propsize in sorted(property_size.iteritems(), + key=itemgetter(1)): + if propsize and properties.get(propkey) is not None: + smaller_props[propkey] = properties.get(propkey) + if len(str(smaller_props)) >= \ + constants.MAX_HEADER_LEN_V1: + too_large_properties[propkey] = \ + properties.pop(propkey) + return too_large_properties + + def _v2update_local_image(self, uuid, src_image, tgt_image, v1images, + v2images): + """ + Update the local hostingOS target image with the source image + attributes and properties using the v2 Glance client. + + Also, set to ignore any image update events that may be generated by + the image update operation here. + + :param: uuid The PowerVC UUID of the image + :param: src_image The source PowerVC v1 image to use for the update + :param: tgt_image The target local hostingOS v1 image to update + :param: v1images The v1 image manager to use for getting image + :param: v2images The v2 image controller to use for updating + :returns: The updated v1 image, or None if the update was not + successful. + """ + v1image = self._v2update_image(uuid, src_image, tgt_image, v1images, + v2images, constants.LOCAL) + + # Set to ignore any update events generated by updating the image + if v1image is not None: + self._ignore_local_event(constants.IMAGE_EVENT_TYPE_UPDATE, + v1image.to_dict()) + return v1image + + def _v2update_pvc_image(self, uuid, src_image, tgt_image, v1images, + v2images): + """ + Update the PowerVC target image with the source image attributes and + properties using the v2 Glance client. + + Also, set to ignore any image update events that may be generated by + the image update operation here. + + :param: uuid The PowerVC UUID of the image + :param: src_image The source local hostingOS v1 image to use for the + update + :param: tgt_image The target PowerVC image to update + :param: v1images The v1 image manager to use for getting image + :param: v2images The v2 image controller to use for updating + :returns: The updated v1 image, or None if the update was not + successful. + """ + v1image = self._v2update_image(uuid, src_image, tgt_image, v1images, + v2images, constants.POWER_VC) + + # Set to ignore any update events generated by updating the image + if v1image is not None: + self._ignore_pvc_event(constants.IMAGE_EVENT_TYPE_UPDATE, + v1image.to_dict()) + return v1image + + def _v2update_image(self, uuid, src_image, tgt_image, v1images, v2images, + target_type): + """ + Update the target image with the source image attributes and properties + using the v2 Glance client. + + This cannot be called to finalize the snapshot image creation process + Do not use this v2 update to activate an image. Use the v1 update to + activate images. + + This method should not be called directly. It should only be called by + _v2update_local_image and _v2update_pvc_image. + + :param: uuid The PowerVC UUID of the image + :param: src_image The source v1 image to use for the update + :param: tgt_image The target v1 image to update + :param: v1images The v1 image manager to use for getting image + :param: v2images The v2 image controller to use for updating + :param: target_type The target image type (pvc or local) + :returns: The updated v1 image, or None if the update was not + successful. + """ + try: + attr_dict, remove_list = self._get_v2image_update_fields(src_image, + tgt_image) + image = v2images.update(tgt_image.id, remove_props=remove_list, + **attr_dict) + + # Get the v1 image to return after the update + v1image = self._get_image(uuid, image['id'], image['name'], + v1images, v2images) + return v1image + except CommunicationError as e: + raise e + except Exception as e: + LOG.exception(_('An error occurred updating image \'%s\' for ' + 'PowerVC UUID %s: %s'), tgt_image.name, uuid, e) + return None + + def _get_v2image_update_fields(self, src_image, tgt_image): + """ + Get the attributes and properties for a v2 image update. Filter + out attributes and properties specified with filter constants. Also + flatten out the properties, converting them into v2 image attibutes. + + :param: src_image The v1 image to pull properties from to be used + for a v2 image update operation. + :param: tgt_image The v1 image to derived removed properties from to be + used for a v2 image update operation. + :returns: A tuple containing with the dict containing the properties + that are added or modified, and the list of the property + names that are to be removed during the v2 image update + operation. If no properties are to be deleted, the + remove list will be None + """ + + # Filter out any attributes that should not be updated + v1src_image_dict = \ + self._filter_v1image_for_v2_update(src_image.to_dict()) + v1tgt_image_dict = \ + self._filter_v1image_for_v2_update(tgt_image.to_dict()) + + # Convert v1 image to v2 image + v2src_image_dict = self._convert_v1_to_v2(v1src_image_dict) + v2tgt_image_dict = self._convert_v1_to_v2(v1tgt_image_dict) + + # Get the image key sets + src_image_set = set(v2src_image_dict) + tgt_image_set = set(v2tgt_image_dict) + + # Find the added/update attributes, and the removed attributes + # Updates are keys in both the source and the target + updates = src_image_set.intersection(tgt_image_set) + + # Adds are keys in the source that are not in the target + adds = src_image_set.difference(tgt_image_set) + + # Deletes are keys in the target that are not in the source. + deletes = tgt_image_set.difference(src_image_set) + + # Get the adds and updates + add_update_dict = {} + for key in adds: + add_update_dict[key] = v2src_image_dict[key] + + # Add all update keys if their values are the same or different + for key in updates: + add_update_dict[key] = v2src_image_dict[key] + + # Find the deletes. If there are none, return None + if deletes: + remove_list = [] + for key in deletes: + remove_list.append(key) + else: + remove_list = None + + return add_update_dict, remove_list + + def _filter_v1image_for_v2_update(self, v1image_dict): + """ + Filter the v1 image dict. for a v2 update. Only update properties that + are not None, and are in UPDATE_PARAMS, and that are not in the v2 + image params filter list, or the image update properties filter list. + + :param: v1image_dict The v1 image dict to filter + :returns: A filtered v1 image dict + """ + filtered_image = {} + + # Process the image attributes we care about + for imagekey in v1image_dict.keys(): + + # Only update attributes in UPDATE_PARAMS if they are not in the + # update param filter list. Also, skip over the properties + # attribute and process those separately. + if imagekey in v1images.UPDATE_PARAMS and \ + imagekey not in constants.v2IMAGE_UPDATE_PARAMS_FILTER and \ + imagekey != 'properties': + field_value = v1image_dict.get(imagekey) + if field_value is not None: + filtered_image[imagekey] = field_value + + # Process the image properties + props = self._get_image_properties(v1image_dict) + if props is not None: + for propkey in props.keys(): + if propkey in constants.IMAGE_UPDATE_PROPERTIES_FILTER or \ + props[propkey] is None: + props.pop(propkey) + filtered_image['properties'] = props + return filtered_image + + def _convert_v1_to_v2(self, v1image_dict): + """ + Convert a v1 image update dict to a v2 image update dict. No attribute + or property filtering is done. + + :returns: The v2 image dict representation of the specified v1 image + to be used for a v2 image update + """ + v2image_dict = {} + for imagekey in v1image_dict.keys(): + + # The v1 is_public attribute should be converted to the v2 + # visibility attribute, and image properties are converted to image + # attributes + field_value = v1image_dict.get(imagekey) + if imagekey == 'is_public': + v2image_dict['visibility'] = \ + 'public' if field_value else 'private' + elif imagekey == 'properties': + props = field_value + if props is not None: + for prop_key in props.keys(): + v2image_dict[prop_key] = props[prop_key] + else: + v2image_dict[imagekey] = field_value + return v2image_dict + + def _get_local_images_and_ids(self, v1images): + """ + Get the local hosting OS v1 images, and return in a dict with the + PowerVC UUIDs as the keys. + + Also populate the ids_dict which is a map of the PowerVC image UUIDs to + the local hosting OS image UUIDs. + + :param: v1images The image manager used to obtain images from the + local hosting OS v1 glance client + :returns: A dict of the local hosting OS images with the PowerVC UUID + as the key and the image as the value + """ + local_images = {} + + # The v1 API on the hosting OS filters the images with is_public = True + # Get the public and non-public images. + params1 = self._get_limit_filter_params() + params2 = self._get_limit_filter_params() + params2 = self._get_not_ispublic_filter_params(params2) + for image in itertools.chain(v1images.list(**params1), + v1images.list(**params2)): + + # Save image in dict if it is from PowerVC + if consts.POWERVC_UUID_KEY in image.properties.keys(): + + # If the image status is not active, only save the image if + # it's pvc_id is not already known. Some snapshot images + # which are not 'active' can contain an incorrect pvc_id + # value. Don't add those images to the list if the image for + # that pvc_id was already found. + pvc_id = image.properties[consts.POWERVC_UUID_KEY] + if image.status != 'active' and pvc_id in local_images.keys(): + continue + local_images[pvc_id] = image + self.ids_dict[pvc_id] = image.id + return local_images + + def _get_pvc_images(self, v1images): + """ + Get the PowerVC v1 images, and return in a dict with the PowerVC UUIDs + as the keys. + + Only the images associated with our Storage Connectivity Group will be + returned. + + If our Storage Connectivity Group cannot be found at this time, a + StorageConnectivityGroupNotFound exception is raised. + + :param: v1images The image manager used to obtain images from the + PowerVC v1 glance client + :returns: A dict with the PowerVC UUID as the key and the image as the + value. Only images for our Storage Connectivity Group will + be returned + """ + pvc_images = {} + + # Get our SCG if specified, or None + try: + self.our_scg_list = self._get_our_scg_list() + except StorageConnectivityGroupNotFound as e: + + # If the our Storage Connectivity Groups is not found on PowerVC, + # log the error, and raise the exception to end the startup or + # periodic sync operation. The startup or periodic sync will go + # into error retry mode managed by the ImageSyncController until + # the Storage Connectivity Group is found. If the Storage + # Connectivity Group goes away during a periodic sync, update and + # delete event processing will continue to work, but periodic sync + # will not work again until the Storage Connectivity Group is + # present. If the Storage Connectivity Group cannot be found during + # the startup sync, events will not be processed since the startup + # sync did not finish successfully. + LOG.error(_('The specified PowerVC Storage Connectivity Group was ' + 'not found. No PowerVC images are available.')) + raise e + + # We allow testing with our_scg set to None. We just have to comment + # out the check in __init__() and we can run with no SCG specified for + # testing purposes. In that case, work with all PowerVC images. + multi_scg_image_ids = set() + for scg in self.our_scg_list: + if scg is not None: + LOG.info(_('Getting accessible PowerVC images for Storage ' + 'Connectivity Group \'%s\'...'), + scg.display_name) + + # Get all of the images for our SCG. If an error occurs, an + # exception will be raised, the image sync operation will fail, + # and the sync operation will be retried later. + scg_image_ids = \ + utils.get_utils().get_scg_image_ids(scg.id) + + # If no SCG image ids were found, return now. + # There are no images to retrieve + if not scg_image_ids: + LOG.warning(_('The specified PowerVC Storage Connectivity ' + 'Group \'%s\' has no images. No PowerVC ' + 'images are available.'), + scg.display_name) + else: + multi_scg_image_ids.update(scg_image_ids) + LOG.info(_('Found %s images for Storage Connectivity ' + 'Group \'%s\''), str(len(scg_image_ids)), + scg.display_name) + + # The v1 API on PowerVC does not filter the images with is_public = + # True at this time. Get the public and non-public images. This does + # not seem to be required for PowerVC, but that could change + params1 = self._get_limit_filter_params() + params2 = self._get_limit_filter_params() + params2 = self._get_not_ispublic_filter_params(params2) + for image in itertools.chain(v1images.list(**params1), + v1images.list(**params2)): + + # If this image is accessible, add it to the dict + if not multi_scg_image_ids: + pvc_images[image.id] = image + else: + if image.id in multi_scg_image_ids: + pvc_images[image.id] = image + else: + + # If the we knew about the image before this, and it is now + # being removed due to it not being in the SCG we should + # log a warning so the user knows why we are deleting the + # image from the hosting OS. If we knew about the image + # before, it's UUID would be in the updated_at dict keys. + if image.id in self.pvc_updated_at.keys(): + LOG.warning(_('Image \'%s\' is no longer accessible on' + ' Storage Connectivity Group. It ' + 'will be removed from the hosting OS.'), + image.name) + else: + LOG.debug(_('Image \'%s\' is not accessible on Storage' + ' Connectivity Group'), image.name) + return pvc_images + + def _dump_image_info(self, local_images, pvc_images): + """ + Dump out the current image information + + :param: local_images A dict of the local hostingOS images + :param: pvc_images A dict of the PowerVC images + """ + # Dump the hostingOS image dict + LOG.debug(_('Local hosting OS image dict: %s'), str(local_images)) + # Dump the PowerVC image dict + LOG.debug(_('PowerVC image dict: %s'), str(pvc_images)) + # Dump the image ids dict + LOG.debug(_('Image ids dict: %s'), str(self.ids_dict)) + # Dump the local update_at dict + LOG.debug(_('Local hosting OS updated_at dict: %s'), + str(self.local_updated_at)) + # Dump the PowerVC update_at dict + LOG.debug(_('PowerVC updated_at dict: %s'), str(self.pvc_updated_at)) + + def _local_image_updated(self, uuid, v1image): + """ + Test whether the local hosting OS image has been updated. + + :param: uuid The PowerVC UUID of the image + :param: v1image The v1 representation of the image + returns True if the image has been updated or if there was a problem + making the determination. + """ + if uuid not in self.local_updated_at.keys(): + return True + past = self.local_updated_at[uuid] + cur = v1image.updated_at + if past and cur: + try: + past_updated_datetime = self._get_v1_datetime(past) + cur_updated_datetime = self._get_v1_datetime(cur) + return past_updated_datetime != cur_updated_datetime + except Exception as e: + LOG.exception(_('An error occurred determining image update ' + 'status for %s: %s'), v1image.name, e) + return True + else: + return True + + def _pvc_image_updated(self, uuid, v1image): + """ + Test whether the PowerVC image has been updated. + + :param: uuid The PowerVC UUID of the image + :param: v1image The v1 representation of the image + returns True if the image has been updated or if there was a problem + making the determination. + """ + if uuid not in self.pvc_updated_at.keys(): + return True + past = self.pvc_updated_at[uuid] + cur = v1image.updated_at + if past and cur: + try: + past_updated_datetime = self._get_v1_datetime(past) + cur_updated_datetime = self._get_v1_datetime(cur) + return past_updated_datetime != cur_updated_datetime + except Exception as e: + LOG.exception(_('An error occurred determining image update ' + 'status for %s: %s'), v1image.name, e) + return True + else: + return True + + def _get_v1_datetime(self, v1timestamp): + """ + Get the datetime for a v1 timestamp formatted string. If the timestamp + has decimal seconds, truncate it. + + :param: v1timestamp The v1 formatted timestamp string + """ + if '.' in v1timestamp: + v1timestamp = v1timestamp.split('.')[0] + return timeutils.parse_strtime(v1timestamp, + constants.IMAGE_TIMESTAMP_FORMAT) + + def _add_startup_sync_to_queue(self): + """ + Add an event to the event queue to start the startup sync operation. + """ + event = {} + event[constants.EVENT_TYPE] = constants.STARTUP_SCAN_EVENT + LOG.debug(_('Adding startup sync event to event queue: %s'), + str(event)) + self.event_queue.put(event) + + def _add_periodic_sync_to_queue(self): + """ + Add an event to the event queue to start the periodic sync operation. + This synchronizes the periodic scans with the image event processing. + """ + event = {} + event[constants.EVENT_TYPE] = constants.PERIODIC_SCAN_EVENT + LOG.debug(_('Adding periodic sync event to event queue: %s'), + str(event)) + self.event_queue.put(event) + + def _prepare_for_image_events(self): + """ + Prepare for image events processing. This should be called after the + startup sync is successful, and then after every periodic sync + completes to make sure the image event handlers are running. + + Expired event tuples are also cleard from the local and PowerVC events + to ignore lists. + """ + + # Remove expired event tuples from the event to ignore dicts + self._purge_expired_local_events_to_ignore() + self._purge_expired_pvc_events_to_ignore() + + # Start the image notification event handlers to process changes if + # they are not currently running + self._start_local_event_handler() + self._start_pvc_event_handler() + + def _start_local_event_handler(self): + """ + Start the local hosting OS image notification event handler if it's not + already running. + + The event handler is not started if the qpid_hostname is not specified + in the configuration. + """ + + # If already running, exit + if self.local_event_handler_running: + return + + def local_event_reconnect_handler(): + """ + The reconnect handler will start a periodic scan operation. + """ + LOG.info(_("Processing local event handler reconnection...")) + self._add_periodic_sync_to_queue() + + try: + + # See if the host is specified. If not, do not attempt to connect + # and register the event handler + host = CONF['openstack'].qpid_hostname + if host and host is not None: + local_conn = messaging.LocalConnection( + reconnect_handler=local_event_reconnect_handler, + log=logging) + local_listener = local_conn.create_listener( + constants.IMAGE_EVENT_EXCHANGE, + constants.IMAGE_EVENT_TOPIC) + + # Register the handler to begin processing messages + local_listener.register_handler( + constants.IMAGE_EVENT_TYPE_ALL, + self._local_image_notifications) + local_conn.start() + LOG.info(_('Monitoring local hosting OS for Image ' + 'notification events...')) + self.local_event_handler_running = True + else: + LOG.warning(_('Local hosting OS image event handling could ' + 'not be started because the qpid_host was not ' + 'specified in the configuration file.')) + except Exception as e: + LOG.exception(_('An error occurred starting the local hosting OS ' + 'image notification event handler: %s'), e) + + def _start_pvc_event_handler(self): + """ + Start the PowerVC image notification event handler if not already + running. + + The event handler is not started if the powervc_qpid_hostname is + not specified in the configuration. + """ + + # If already running, exit + if self.pvc_event_handler_running: + return + + def pvc_event_reconnect_handler(): + """ + The reconnect handler will start a periodic scan operation. + """ + LOG.info(_("Processing PowerVC event handler reconnection...")) + self._add_periodic_sync_to_queue() + + try: + + # See if the host is specified. If not, do not attempt to connect + # and register the event handler + host = CONF['powervc'].qpid_hostname + if host and host is not None: + pvc_conn = messaging.PowerVCConnection( + reconnect_handler=pvc_event_reconnect_handler, log=logging) + pvc_listener = pvc_conn.create_listener( + constants.IMAGE_EVENT_EXCHANGE, + constants.IMAGE_EVENT_TOPIC) + + # Register the handler to begin processing messages + pvc_listener.register_handler( + constants.IMAGE_EVENT_TYPE_ALL, + self._pvc_image_notifications) + pvc_conn.start() + LOG.info(_('Monitoring PowerVC for Image notification ' + 'events...')) + self.pvc_event_handler_running = True + else: + LOG.warning(_('PowerVC image event handling could not be ' + 'started because the powervc_qpid_host was not ' + 'specified in the configuration file.')) + + except Exception as e: + LOG.exception(_('An error occurred starting the PowerVC image ' + 'notification event handler: %s'), e) + + def _process_event_queue(self): + """ + Process the event queue. When the image notification event handlers are + called, they place the image events on the event queue to be processed + synchronously here. When the sync_images method is called periodically, + it too places an event on the event queue for running the periodic + scan. This provides synchronization between the event processing and + the periodic scan. + + The event queue events are a dict made up of the event type, the + context, and the message. + """ + while True: + event = self.event_queue.get() + try: + LOG.debug(_('local events to ignore: %s'), + str(self.local_events_to_ignore_dict)) + LOG.debug(_('pvc events to ignore: %s'), + str(self.pvc_events_to_ignore_dict)) + event_type = event.get(constants.EVENT_TYPE) + context = event.get(constants.EVENT_CONTEXT) + message = event.get(constants.EVENT_MESSAGE) + if event_type == constants.LOCAL_IMAGE_EVENT: + LOG.debug(_('Processing a local hostingOS image event on ' + 'the event queue: %s'), str(event)) + self._handle_local_image_notifications(context, message) + elif event_type == constants.PVC_IMAGE_EVENT: + LOG.debug(_('Processing a PowerVC image event on ' + 'the event queue: %s'), str(event)) + self._handle_pvc_image_notifications(context, message) + elif event_type == constants.PERIODIC_SCAN_EVENT: + LOG.debug(_('Processing a periodic sync event on ' + 'the event queue: %s'), str(event)) + self.periodic_sync() + elif event_type == constants.STARTUP_SCAN_EVENT: + LOG.debug(_('Processing a startup sync event on ' + 'the event queue: %s'), str(event)) + self.startup_sync() + else: + LOG.debug(_('An unknown event type was found on the event ' + 'queue: %s'), str(event)) + except Exception as e: + LOG.exception(_('An error occurred processing the image event ' + 'from the event queue: %s'), e) + finally: + self.event_queue.task_done() + + def _local_image_notifications(self, context, message): + """ + Place the local image event on the event queue for processing. + + :param: context The event security context + :param: message The event message + """ + event = {} + event[constants.EVENT_TYPE] = constants.LOCAL_IMAGE_EVENT + event[constants.EVENT_CONTEXT] = context + event[constants.EVENT_MESSAGE] = message + LOG.debug(_('Adding local image event to event queue: %s'), str(event)) + self.event_queue.put(event) + + def _handle_local_image_notifications(self, context, message): + """ + Handle image notification events received from the local hosting OS. + Only handle update, and delete event types. The activate event + is processed, but only to add the new image to the update_at dict. + + There is a scheme in place to keep events from ping-ponging back + and forth. If we are processing an event, we add the expected + event from PowerVC to the ignore list. Then when that event arrives + from PowerVC because of this update we will ignore it. + + :param: context The event security context + :param: message The event message + """ + if message is None: + LOG.debug(_('The local image event notification had no message!')) + return + event_type = message.get('event_type') + v1image_dict = message.get('payload') + if event_type == constants.IMAGE_EVENT_TYPE_UPDATE: + self._process_local_image_update_event(v1image_dict) + elif event_type == constants.IMAGE_EVENT_TYPE_DELETE: + self._process_local_image_delete_event(v1image_dict) + elif event_type == constants.IMAGE_EVENT_TYPE_ACTIVATE: + self._process_local_image_activate_event(v1image_dict) + elif event_type == constants.IMAGE_EVENT_TYPE_CREATE: + self._process_local_image_create_event(v1image_dict) + else: + LOG.debug(_('Did not process event: %s'), str(message)) + + def _process_local_image_update_event(self, v1image_dict): + """ + Process a local hostingOS image update event. + + :param: v1image_dict The updated v1 image dict + """ + LOG.debug(_('Local hosting OS update event received: %s'), + str(v1image_dict)) + + # Only process PowerVC images + event_type = constants.IMAGE_EVENT_TYPE_UPDATE + local_id = v1image_dict.get('id') + local_name = v1image_dict.get('name') + props = self._get_image_properties(v1image_dict) + if props and consts.POWERVC_UUID_KEY in props.keys(): + + # Determine if we should ignore this event + evt = self._get_event(constants.LOCAL, event_type, v1image_dict) + if self._get_local_event_to_ignore(evt) is not None: + LOG.debug(_('Ignoring event %s for %s'), str(evt), local_name) + return + else: + LOG.debug(_('Processing event %s for %s'), str(evt), + local_name) + + # Also ignore all image update events for images that are not + # active. Those would most likely be 'queued' images created + # during the instance capture process. There should be no + # corresponding image to process on the PowerVC yet. + if v1image_dict.get('status') != 'active': + LOG.debug(_('Ignoring image update event for \'%s\' because ' + 'the image is not active.'), local_name) + return + + # Process the event + pvc_id = props.get(consts.POWERVC_UUID_KEY) + try: + local_v1client = self._get_local_v1_client() + v1local_images = local_v1client.images + local_v2client = self._get_local_v2_client() + v2local_images = local_v2client.images + local_image = self._get_image(pvc_id, local_id, local_name, + v1local_images, v2local_images) + if local_image is None: + LOG.debug(_('The local image \'%s\' with PowerVC UUID %s ' + 'was not update synchronized because it could ' + 'not be found.'), local_name, pvc_id) + return + + # Try processing the local image update + LOG.info(_('Performing update sync of image \'%s\' from the ' + 'local hosting OS to PowerVC after an image update ' + 'event'), local_image.name) + + # Update sync local image to PowerVC + pvc_v1client = self._get_pvc_v1_client() + v1pvc_images = pvc_v1client.images + pvc_v2client = self._get_pvc_v2_client() + v2pvc_images = pvc_v2client.images + pvc_image = self._get_image(pvc_id, pvc_id, local_name, + v1pvc_images, v2pvc_images) + + # Update the image if it is in PowerVC + if pvc_image is None: + LOG.info(_('The PowerVC image \'%s\' with UUID %s was not ' + 'updated because it could not be found.'), + local_image.name, pvc_id) + return + + # If the PowerVC image has changed, do not update it. This + # only happens if we lost an event. In that case we need to + # wait for the periodic scan to merge changes. + if self._pvc_image_updated(pvc_id, pvc_image): + LOG.info(_('The PowerVC image \'%s\' for PowerVC UUID %s ' + 'has changed. Changes between the local ' + 'hostingOS and the PowerVC image will be ' + 'merged during the next periodic scan.'), + pvc_image.name, pvc_id) + return + + # Perform the image update to PowerVC + image = self._update_pvc_image(pvc_id, local_image, pvc_image, + v1pvc_images, v2pvc_images) + if image is None: + LOG.error(_('PowerVC image \'%s\' with UUID %s was not ' + 'updated after an image update event.'), + pvc_image.name, pvc_id) + return + + # NOTE: Do not reset the updated_at values until after both + # the local hostingOS image and PowerVC image are successfully + # updated. + + # Since the hostingOS image was updated, update the entry + # in the update_at dict so the change isn't processed + # during a periodic scan + if pvc_id in self.local_updated_at.keys(): + self.local_updated_at[pvc_id] = local_image.updated_at + + # Attempt to update the entry for this image in the PowerVC + # updated_at dict so that it is not processed during a + # periodic sync due to this update. + if pvc_id in self.pvc_updated_at.keys(): + self.pvc_updated_at[pvc_id] = image.updated_at + + # Set the new master image + self.master_image[pvc_id] = image + LOG.info(_('Completed update sync of image \'%s\' from the ' + 'local hosting OS to PowerVC after an image update ' + 'event'), local_image.name) + except Exception as e: + LOG.exception(_('An error occurred processing the local ' + 'hosting OS image update event: %s'), e) + + def _process_local_image_delete_event(self, v1image_dict): + """ + Process a local hostingOS image delete event. + + :param: v1image_dict The deleted v1 image dict + """ + LOG.debug(_('Local hosting OS delete event received: %s'), + str(v1image_dict)) + + def clean_up(uuid): + """ + Clean up the update_at and master_image copy for the deleted image + with the specified idenfitier. Also clean up the ids_dict. + + :param: uuid The PowerVC UUID of the deleted image + """ + if uuid in self.pvc_updated_at.keys(): + self.pvc_updated_at.pop(uuid) + if uuid in self.master_image.keys(): + self.master_image.pop(uuid) + if uuid in self.ids_dict.keys(): + self.ids_dict.pop(uuid) + + # Since the hostingOS image was deleted, remove the entry from + # the update_at dict so the change isn't processed during a + # periodic scan. Only do this if the PowerVC image is also + # deleted, or the PowerVC image will not be deleted during + # the next periodic scan. + if uuid in self.local_updated_at.keys(): + self.local_updated_at.pop(uuid) + + # Only process PowerVC images + event_type = constants.IMAGE_EVENT_TYPE_DELETE + local_name = v1image_dict.get('name') + props = self._get_image_properties(v1image_dict) + if props and consts.POWERVC_UUID_KEY in props.keys(): + + # Determine if we should ignore this event + evt = self._get_event(constants.LOCAL, event_type, v1image_dict) + if self._get_local_event_to_ignore(evt) is not None: + LOG.debug(_('Ignoring event %s for %s'), str(evt), local_name) + return + else: + LOG.debug(_('Processing event %s for %s'), str(evt), + local_name) + + # Also ignore all image delete events for images that are not + # active. Those would most likely be 'queued' images created + # during the instance capture process. There should be no + # corresponding image to process on the PowerVC yet. + if v1image_dict.get('status') != 'active': + LOG.debug(_('Ignoring image delete event for \'%s\' because ' + 'the image is not active.'), local_name) + return + + # Process the event + pvc_id = props.get(consts.POWERVC_UUID_KEY) + try: + + # Try processing the local image delete + LOG.info(_('Performing delete sync of image \'%s\' from the ' + 'local hosting OS to PowerVC after an image delete ' + 'event'), local_name) + + # Delete sync local image to PowerVC + pvc_v1client = self._get_pvc_v1_client() + v1pvc_images = pvc_v1client.images + pvc_v2client = self._get_pvc_v2_client() + v2pvc_images = pvc_v2client.images + pvc_image = self._get_image(pvc_id, pvc_id, local_name, + v1pvc_images, v2pvc_images) + + # Delete the image if it is in PowerVC + if pvc_image is None: + LOG.info(_('The PowerVC image \'%s\' with UUID %s was not ' + 'deleted because it could not be found.'), + local_name, pvc_id) + + # Since the PowerVC image was deleted, remove the entry + # from the update_at dict so the change isn't processed + # during a periodic scan. Also delete the master_image + # copy. + clean_up(pvc_id) + return + + # Perform the image delete to PowerVC + image = self._delete_pvc_image(pvc_id, pvc_image, v1pvc_images) + if image is None: + LOG.error(_('PowerVC image \'%s\' with UUID %s could not ' + 'be deleted after an image delete event.'), + pvc_image.name, pvc_id) + return + + # Add delete to event ignore list so we don't process it + # again try to delete the local hosting OS image again. + # Only do this if event handling is running. + self._ignore_pvc_event(event_type, image.to_dict()) + + # Since the PowerVC image was deleted, remove the entry + # from the update_at dict so the change isn't processed + # during a periodic scan Also delete the master_image + # copy. + clean_up(pvc_id) + LOG.info(_('Completed delete sync of image \'%s\' from the ' + 'local hosting OS to PowerVC after an image delete ' + 'event'), local_name) + except Exception as e: + LOG.exception(_('An error occurred processing the local ' + 'hosting OS image delete event: %s'), e) + + def _process_local_image_activate_event(self, v1image_dict): + """ + Process a local hostingOS image activate event. All that is required + is to add the new image to the update_at dict and make sure an entry is + in the ids_dict to map the image UUIDs. + + :param: v1image_dict The activated v1 image dict + """ + LOG.debug(_('Local hosting OS activate event received: %s'), + str(v1image_dict)) + + # Only process PowerVC images + local_name = v1image_dict.get('name') + props = self._get_image_properties(v1image_dict) + if props and consts.POWERVC_UUID_KEY in props.keys(): + + # Determine if we should ignore this event + evt = self._get_event(constants.LOCAL, + constants.IMAGE_EVENT_TYPE_ACTIVATE, + v1image_dict) + if self._get_local_event_to_ignore(evt) is not None: + LOG.debug(_('Ignoring event %s for %s'), str(evt), local_name) + return + else: + LOG.debug(_('Processing event %s for %s'), str(evt), + local_name) + + # Add the new image to the updated_at dict so this add isn't + # processed during a periodic sync. This may already be there, + # but go ahead and update it anyway. The only way these can + # occur is for a new image that was created by a sync operation, + # or by an update of a snapshot image, setting the location + # value to activate it. In both cases, the PowerVC image is + # already there. There is no real add here to process here. + pvc_id = props.get(consts.POWERVC_UUID_KEY) + self.local_updated_at[pvc_id] = v1image_dict.get('updated_at') + + # Add an entry into the ids_dict + self.ids_dict[pvc_id] = v1image_dict.get('id') + LOG.debug(_('Completed processing of image activate event for ' + 'image \'%s\' for PowerVC UUID %s'), local_name, + pvc_id) + + def _process_local_image_create_event(self, v1image_dict): + """ + Process a local hostingOS image create event. All that is required + is to add the new image to the update_at dict and make sure an entry is + in the ids_dict to map the image UUIDs. We will get this event on the + local hostingOS during an instance capture. + + :param: v1image_dict The created v1 image dict + """ + LOG.debug(_('Local hosting OS create event received: %s'), + str(v1image_dict)) + + # Only process PowerVC images + local_name = v1image_dict.get('name') + props = self._get_image_properties(v1image_dict) + if props and consts.POWERVC_UUID_KEY in props.keys(): + + # Determine if we should ignore this event + evt = self._get_event(constants.LOCAL, + constants.IMAGE_EVENT_TYPE_CREATE, + v1image_dict) + if self._get_local_event_to_ignore(evt) is not None: + LOG.debug(_('Ignoring event %s for %s'), str(evt), local_name) + return + else: + LOG.debug(_('Processing event %s for %s'), str(evt), + local_name) + + # Add the new image to the updated_at dict so this add isn't + # processed during a periodic sync. This may already be there, + # but go ahead and update it anyway. The only way these can + # occur is for a new image that was created by a sync operation, + # or by an update of a snapshot image, setting the location + # value to activate it. In both cases, the PowerVC image is + # already there. There is no real add here to process here. + pvc_id = props.get(consts.POWERVC_UUID_KEY) + + # If the pvc_id is already known, this is probably the initial + # snapshot image from an instance capture. It will contain the + # pvc_id from the original image used to create the instance + # being captured. In that case, don't do the rest of the + # processing here. + if pvc_id not in self.local_updated_at.keys(): + self.local_updated_at[pvc_id] = v1image_dict.get('updated_at') + + # Add an entry into the ids_dict + self.ids_dict[pvc_id] = v1image_dict.get('id') + LOG.debug(_('Completed processing of image create event for ' + 'image %s for PowerVC UUID %s'), local_name, + pvc_id) + else: + LOG.debug(_('Did not process image create event for image ' + '\'%s\'. The PowerVC UUID is not known.'), + local_name) + + def _pvc_image_notifications(self, context, message): + """ + Place the PowerVC image event on the event queue for processing. + + :param: context The event security context + :param: message The event message + """ + event = {} + event[constants.EVENT_TYPE] = constants.PVC_IMAGE_EVENT + event[constants.EVENT_CONTEXT] = context + event[constants.EVENT_MESSAGE] = message + LOG.debug(_('Adding PowerVC image event to event queue: %s'), + str(event)) + self.event_queue.put(event) + + def _handle_pvc_image_notifications(self, context, message): + """ + Handle image notification events received from PowerVC. + Only handle activate, update, and delete event types. + + There is a scheme in place to keep events from ping-ponging back + and forth. If we are processing an event, we add the expected + event from the local hosting OS to the ignore list. Then when + that event arrives from the hosting OS because of this update we + will ignore it. + + :param: context The event security context + :param: message The event message + """ + if message is None: + LOG.debug(_('The PowerVC image event notification had no ' + 'message!')) + return + event_type = message.get('event_type') + v1image_dict = message.get('payload') + if event_type == constants.IMAGE_EVENT_TYPE_UPDATE: + self._process_pvc_image_update_event(v1image_dict) + elif event_type == constants.IMAGE_EVENT_TYPE_DELETE: + self._process_pvc_image_delete_event(v1image_dict) + elif event_type == constants.IMAGE_EVENT_TYPE_ACTIVATE: + self._process_pvc_image_activate_event(v1image_dict) + else: + LOG.debug(_('Did not process event: %s'), str(message)) + + def _process_pvc_image_update_event(self, v1image_dict): + """ + Process a PowerVC image update event. + + :param: v1image_dict The updated v1 image dict + """ + LOG.debug(_('PowerVC update event received: %s'), str(v1image_dict)) + event_type = constants.IMAGE_EVENT_TYPE_UPDATE + pvc_id = v1image_dict.get('id') + pvc_name = v1image_dict.get('name') + + # Determine if we should ignore this event + evt = self._get_event(constants.POWER_VC, event_type, v1image_dict) + if self._get_pvc_event_to_ignore(evt) is not None: + LOG.debug(_('Ignoring event %s for %s'), str(evt), pvc_name) + return + else: + LOG.debug(_('Processing event %s for %s'), str(evt), pvc_name) + + # Process the event + try: + pvc_v1client = self._get_pvc_v1_client() + v1pvc_images = pvc_v1client.images + pvc_v2client = self._get_pvc_v2_client() + v2pvc_images = pvc_v2client.images + pvc_image = self._get_image(pvc_id, pvc_id, pvc_name, + v1pvc_images, v2pvc_images) + if pvc_image is None: + LOG.debug(_('The PowerVC image \'%s\' with UUID %s was not ' + 'update synchronized because it could not be ' + 'found.'), pvc_name, pvc_id) + return + + # Try processing the PowerVC image update + LOG.info(_('Performing update sync of image \'%s\' from PowerVC to' + ' the local hosting OS after an image update event'), + pvc_image.name) + + # Update sync PowerVC image to the local hosting OS + local_v1client = self._get_local_v1_client() + v1local_images = local_v1client.images + local_v2client = self._get_local_v2_client() + v2local_images = local_v2client.images + local_image = self._get_local_image_from_pvc_id(pvc_id, pvc_name, + v1local_images, + v2local_images) + + # Update the image if it is in the local hosting OS + if local_image is None: + LOG.info(_('The local hosting OS image \'%s\' with PowerVC ' + 'UUID %s was not updated because it could not be ' + 'found.'), pvc_image.name, pvc_id) + return + + # If the PowerVC image has changed, do not update it. This only + # happens if we lost an event. In that case we need to wait for + # the periodic scan to merge changes. + if self._local_image_updated(pvc_id, local_image): + LOG.info(_('The local hostingOS image \'%s\' for PowerVC UUID ' + '%s has changed. Changes between the local ' + 'hostingOS and the PowerVC image will be merged ' + 'during the next periodic scan.'), local_image.name, + pvc_id) + return + + # Perform the image update to the local hosting OS + image = self._update_local_image(pvc_id, pvc_image, local_image, + v1local_images, v2local_images) + if image is None: + LOG.error(_('Local hosting OS image \'%s\' for PowerVC UUID %s' + ' was not updated after an image update event.'), + local_image.name, pvc_id) + return + + # NOTE: Do not reset the updated_at values until after both the + # local hostingOS image and PowerVC image are successfully updated. + + # Since the PowerVC image was updated, update the entry in the + # update_at dict so the change isn't processed during a periodic + # scan + if pvc_id in self.pvc_updated_at.keys(): + self.pvc_updated_at[pvc_id] = pvc_image.updated_at + + # Attempt to update the entry for this image in the local + # updated_at dict so that it is not processed during a periodic + # sync due to this update. + if pvc_id in self.local_updated_at.keys(): + self.local_updated_at[pvc_id] = image.updated_at + + # Set the new master image + self.master_image[pvc_id] = pvc_image + LOG.info(_('Completed update sync of image \'%s\' from PowerVC to ' + 'the local hosting OS after an image update event'), + pvc_image.name) + except Exception as e: + LOG.exception(_('An error occurred processing the PowerVC image ' + 'update event: %s'), e) + + def _process_pvc_image_delete_event(self, v1image_dict): + """ + Process a PowerVC image delete event. + + :param: v1image_dict The deleted v1 image dict + """ + LOG.debug(_('PowerVC delete event received: %s'), str(v1image_dict)) + + def clean_up(uuid): + """ + Clean up the update_at and master_image copy for the deleted image + with the specified idenfitier. Also clean up the ids_dict. + + :param: uuid The PowerVC UUID of the deleted image + """ + if uuid in self.local_updated_at.keys(): + self.local_updated_at.pop(uuid) + if uuid in self.master_image.keys(): + self.master_image.pop(uuid) + if uuid in self.ids_dict.keys(): + self.ids_dict.pop(uuid) + + # Since the PowerVC image was deleted, remove the entry from the + # update_at dict so the change isn't processed during a periodic + # scan. Only do this if the local hostingOS image was also deleted, + # or it will not be deleted during the next periodic scan. + if uuid in self.pvc_updated_at.keys(): + self.pvc_updated_at.pop(uuid) + + event_type = constants.IMAGE_EVENT_TYPE_DELETE + pvc_id = v1image_dict.get('id') + pvc_name = v1image_dict.get('name') + + # Determine if we should ignore this event + evt = self._get_event(constants.POWER_VC, event_type, v1image_dict) + if self._get_pvc_event_to_ignore(evt) is not None: + LOG.debug(_('Ignoring event %s for %s'), str(evt), pvc_name) + return + else: + LOG.debug(_('Processing event %s for %s'), str(evt), pvc_name) + + # Process the event + try: + + # Try processing the local hosting OS image update + LOG.info(_('Performing delete sync of image \'%s\' from PowerVC to' + ' the local hosting OS after an image delete event'), + pvc_name) + + # Delete sync PowerVC image to the local hosting OS + local_v1client = self._get_local_v1_client() + v1local_images = local_v1client.images + local_v2client = self._get_local_v2_client() + v2local_images = local_v2client.images + local_image = self._get_local_image_from_pvc_id(pvc_id, pvc_name, + v1local_images, + v2local_images) + + # Delete the image if it is in the local hosting OS + if local_image is None: + LOG.info(_('The local hosting OS image \'%s\' with PowerVC ' + 'UUID %s was not deleted because it could not be ' + 'found.'), pvc_name, pvc_id) + + # Since the local hostingOS image was deleted, remove the entry + # from the update_at dict so the change isn't processed during + # a periodic scan. Also delete the master_image copy. + clean_up(pvc_id) + return + + # Perform the image delete to the local hosting OS + image = self._delete_local_image(pvc_id, local_image, + v1local_images) + if image is None: + LOG.error(_('Local hosting OS image \'%s\' for PowerVC UUID %s' + ' could not be deleted after an image delete ' + 'event.'), local_image.name, pvc_id) + return + + # Add delete to event ignore list so we don't process it again try + # to delete the local hosting OS image again. Only do this if event + # handling is running. + self._ignore_local_event(event_type, image.to_dict()) + + # Since the local hostingOS image was deleted, remove the entry + # from the update_at dict so the change isn't processed during a + # periodic scan. Also delete the master_image copy + clean_up(pvc_id) + LOG.info(_('Completed delete sync of image \'%s\' from PowerVC to ' + 'the local hosting OS after an image delete event'), + pvc_name) + except Exception as e: + LOG.exception(_('An error occurred processing the PowerVC image ' + 'delete event: %s'), e) + + def _process_pvc_image_activate_event(self, v1image_dict): + """ + Process a PowerVC image activate event. + + :param: v1image_dict The activated v1 image dict + """ + LOG.debug(_('PowerVC activate event received: %s'), + str(v1image_dict)) + pvc_id = v1image_dict.get('id') + pvc_name = v1image_dict.get('name') + + # Process the event + try: + pvc_v1client = self._get_pvc_v1_client() + v1pvc_images = pvc_v1client.images + pvc_v2client = self._get_pvc_v2_client() + v2pvc_images = pvc_v2client.images + pvc_image = self._get_image(pvc_id, pvc_id, pvc_name, + v1pvc_images, v2pvc_images) + + # Nothing to do if the image was not found + if pvc_image is None: + LOG.debug(_('The PowerVC image \'%s\' with UUID %s was not ' + 'add synchronized because it could not be found.'), + pvc_name, pvc_id) + return + + # The first image update event after an activate will not have + # the config strategy if the image has one. That is written after + # the image is created using the glance v2 PATCH API. We do not + # want to process the first update event after the create if it + # has the same checksum value as the activate event. If that + # update event is not added to the ignore list, the result could + # be the event ping-pong effect. Image update events with and + # without the config strategy will go back and forth between + # the local hostingOS and PowerVC. + self._ignore_pvc_event(constants.IMAGE_EVENT_TYPE_UPDATE, + pvc_image.to_dict()) + + # Nothing to do if the image is not accesible + if not self._image_is_accessible(pvc_image): + LOG.debug(_('The PowerVC image \'%s\' with UUID %s was not ' + 'add synchronized because it is not accessible.'), + pvc_name, pvc_id) + return + + # Try processing the PowerVC image add + LOG.info(_('Performing add sync of image \'%s\' from PowerVC to ' + 'the local hosting OS after an image activate event'), + pvc_image.name) + + # Add sync PowerVC image to the local hosting OS + local_v1client = self._get_local_v1_client() + v1local_images = local_v1client.images + local_v2client = self._get_local_v2_client() + v2local_images = local_v2client.images + + # No need to add the ACTIVATE event to the event ignore since the + # local hosting OS does not process them. This could change in a + # future release. + + # Check to see if this PowerVC image is already in the local + # hostingOS. This would be the case if an instance capture was + # initiated on the local hostingOS, and a queued snapshot image was + # created. If the image is already on the local hostingOS, simply + # update it. + props = self._get_image_properties(v1image_dict) + if props and consts.LOCAL_UUID_KEY in props.keys(): + + # Look for the LOCAL_UUID_KEY in the PowerVC image. If it is + # found it will be used to get the local image. This should be + # set when an instance is captured, and a snapshot image is + # created on the PowerVC. + local_id = props.get(consts.LOCAL_UUID_KEY) + if self._local_image_exists(local_id, v1local_images): + local_image = self._get_image(pvc_id, local_id, pvc_name, + v1local_images, + v2local_images) + else: + local_image = None + else: + + # If the LOCAL_UUID_KEY is missing, check for a local image + # with the PowerVC UUID of the image event. + local_image = self._get_local_image_from_pvc_id(pvc_id, + pvc_name, + v1local_images, + v2local_images) + + # Update the image if it is in the local hosting OS, else add it + if local_image is not None: + LOG.info(_('The local hosting OS image \'%s\' with PowerVC ' + 'UUID %s already exists so it will be updated.'), + pvc_image.name, pvc_id) + + # If this is a snapshot image, it may not have an entry in the + # ids_dict so add one here. + self.ids_dict[pvc_id] = local_image.id + + # If the PowerVC image has changed, do not update it. This only + # happens if we lost an event. In that case we need to wait for + # the periodic scan to merge changes. If the image is queued, + # it should be updated anyway since this is the local hostingOS + # snapshot image of an instance capture. + if local_image.status != 'queued' and \ + self._local_image_updated(pvc_id, local_image): + LOG.info(_('The local hostingOS image \'%s\' for PowerVC ' + 'UUID %s has changed. Changes between the local' + ' hostingOS and the PowerVC image will be ' + 'merged during the next periodic scan.'), + local_image.name, pvc_id) + return + + # Perform the image update to the local hosting OS + image = self._update_local_image(pvc_id, pvc_image, + local_image, v1local_images, + v2local_images) + if image is None: + LOG.error(_('Local hosting OS image \'%s\' for PowerVC ' + 'UUID %s could not be updated after an image ' + 'create event.'), local_image.name, pvc_id) + return + + # NOTE: Do not reset the updated_at values until after both the + # local hostingOS image and PowerVC image are successfully + # updated. + + # Update the entry for this image in the local updated_at dict + # so that it is not processed during a periodic sync due to + # this update. + self.local_updated_at[pvc_id] = image.updated_at + else: + + # Perform the image add to the local hosting OS + local_image_owner = self._get_local_staging_owner_id() + if local_image_owner is None: + LOG.warning(_("Invalid staging user or project." + " Skipping new image sync.")) + return + else: + pvc_v2client = self._get_pvc_v2_client() + image = self._add_local_image( + pvc_id, pvc_image, local_image_owner, + pvc_v2client.http_client.endpoint, v1local_images, + v2local_images) + if image is None: + LOG.error(_('Local hosting OS image \'%s\' for PowerVC' + 'UUID %s could not be created after an ' + 'image create event.'), pvc_image.name, + pvc_id) + return + + # NOTE: Do not set the updated_at values until after both the + # local hostingOS image and PowerVC image are successfully + # added. + + # Add the new local image to the updated_at dict so this add + # isn't processed as an add durng a periodic sync + self.local_updated_at[pvc_id] = image.updated_at + + # Add an entry into the ids_dict + self.ids_dict[pvc_id] = image.id + + # Add the new image to the updated_at dict so this add isn't + # processed as an add during a periodic sync + self.pvc_updated_at[pvc_id] = v1image_dict.get('updated_at') + + # A new image was added. Add that image to the master_image dict + # for use in the periodic scan later. It is OK to do it here and + # not wait for an ACTIVATE event in the local hostingOS. It will + # only be used if the there is an image for the UUID on hoth + # servers. + self.master_image[pvc_id] = pvc_image + LOG.info(_('Completed add sync of image \'%s\' from PowerVC to the' + ' local hosting OS after an image activate event'), + pvc_image.name) + except Exception as e: + LOG.exception(_('An error occurred processing the PowerVC image ' + 'create event: %s'), e) + + def _ignore_local_event(self, event_type, v1image_dict): + """ + Set to ignore a local image event. + + Whenever we perform an add, update, or delete operation on an image + that operation should prepare the event handlers to ignore any + events generated by that operation. This will prevent image events + from ping-ponging between sides. + + :param: event_type: The type of event to ignore + :param: v1image_dict: The v1 image dict of the image the event will be + generated for. + """ + if self.local_event_handler_running: + evt = self._get_event(constants.LOCAL, event_type, v1image_dict) + self.local_events_to_ignore_dict[time.time()] = evt + LOG.debug(_('Set to ignore event %s for %s'), str(evt), + v1image_dict.get('name')) + + def _ignore_pvc_event(self, event_type, v1image_dict): + """ + Set to ignore a PowerVC image event. + + Whenever we perform an add, update, or delete operation on an image + that operation should prepare the event handlers to ignore any + events generated by that operation. This will prevent image events + from ping-ponging between sides. + + :param: event_type: The type of event to ignore + :param: v1image_dict: The v1 image dict of the image the event will be + generated for. + """ + if self.pvc_event_handler_running: + evt = self._get_event(constants.POWER_VC, event_type, v1image_dict) + self.pvc_events_to_ignore_dict[time.time()] = evt + LOG.debug(_('Set to ignore event %s for %s'), str(evt), + v1image_dict.get('name')) + + def _get_event(self, side, event_type, v1image_dict): + """ + Get an image event for the image, and event type. + + :param: side The side to ignore the event on, This is either LOCAL or + POWER_VC. + :param: event_type: The type of event to ignore + :param: v1image_dict: The v1 image dict of the image the event will be + generated for. + :returns: The image event representation + """ + checksum = self._get_image_checksum(v1image_dict) + return (side, event_type, v1image_dict.get('id'), checksum) + + def _get_image_checksum(self, v1image_dict): + """ + Calculate and return the md5 checksum of the parts of the specified + v1image that that can be updated. + + :param: v1image_dict The dict of the v1 image + :returns: The calculated md5 checksum value for the image + """ + md5 = hashlib.md5() + + # Process the UPDATE_PARAMS attributes that are not filtered + for attr in sorted(v1image_dict.keys()): + if attr in v1images.UPDATE_PARAMS and \ + attr not in constants.IMAGE_UPDATE_PARAMS_FILTER and \ + attr != 'properties': + value = v1image_dict.get(attr) + if value is not None: + md5.update(str(value)) + + # Process the properties that are not filtered + props = self._get_image_properties(v1image_dict, {}) + for propkey in sorted(props.keys()): + if propkey not in constants.IMAGE_UPDATE_PROPERTIES_FILTER: + prop_value = props.get(propkey) + if prop_value is not None: + md5.update(str(prop_value)) + + # Return the md5 checksum value of the image attributes and properties + return md5.hexdigest() + + def _get_local_staging_owner_id(self): + """ + If the local staging owner id has not been obtained, get it and store + for use later. + + An image owner can be either a tenant id or a user id depending on the + configuration value owner_is_tenant. If owner_is_tenant is True, get + the staging project id and use that as the owner. If owner_is_tenant + is False, get the staging user id and use that as the owner. + + :returns: The local hostingOS staging owner id or None if the staging + user or project have been incorrectly configured or are unavailable. + """ + if not self._staging_cache.is_valid: + LOG.warning(_("Invalid staging user or project")) + return None + + user_id, project_id = \ + self._staging_cache.get_staging_user_and_project() + + if CONF.owner_is_tenant: + return project_id + else: + return user_id + + def _local_image_exists(self, uuid, v1local_images): + """ + Determine if a local image with the specified uuid exists without + raising an error if it does not. + + :param: uuid The local image UUID + :param: v1local_images The image manager of the image controller to use + :returns: True if the local image exists, else False + """ + if uuid is None: + return False + if uuid in self.ids_dict.values(): + return True + params1 = self._get_limit_filter_params() + params2 = self._get_limit_filter_params() + params2 = self._get_not_ispublic_filter_params(params2) + v1images = itertools.chain(v1local_images.list(**params1), + v1local_images.list(**params2)) + for image in v1images: + if image is not None and image.id == uuid: + return True + return False + + def _get_local_image_from_pvc_id(self, pvc_id, pvc_name, v1local_images, + v2local_images): + """ + Find the local hostingOS v1 image with the given PowerVC UUID. + + :param: pvc_id The PowerVC UUID + :param: pvc_name The image name + :param: v1local_images The image manager of the image controller to use + :param: v2local_images The image controller to use + """ + if pvc_id is None: + return None + local_image = None + if pvc_id in self.ids_dict.keys(): + local_id = self.ids_dict[pvc_id] + if local_id is not None: + local_image = self._get_image(pvc_id, local_id, pvc_name, + v1local_images, v2local_images) + + # If the imageId was not known or it was not found, look again through + # all local hostingOS images + if local_image is None: + params1 = self._get_limit_filter_params() + params2 = self._get_limit_filter_params() + params2 = self._get_not_ispublic_filter_params(params2) + local_image = \ + self._get_v1image_from_pvc_id(pvc_id, itertools.chain( + v1local_images.list(**params1), + v1local_images.list(**params2))) + + # Save for next time + if local_image is not None: + self.ids_dict[pvc_id] = local_image.id + return local_image + + def _get_v1image_from_pvc_id(self, pvc_id, v1images): + """ + Look through all v1 local hostingOS images for the image that has the + given PowerVC image UUID. + + :param: pvc_id The PowerVC image id + :param: v1images The image manager used to obtain images from the v1 + glance client + :returns: The image for the specified PowerVC id or None if not found. + """ + for image in v1images: + if image is not None: + props = image.properties + if props and consts.POWERVC_UUID_KEY in props.keys(): + uuid = props.get(consts.POWERVC_UUID_KEY) + if uuid == pvc_id: + return image + return None + + def _get_local_v1_client(self): + """ + Get a local v1 glance client if not already created. + + :returns: The glance v1 client for the local hostingOS + """ + if self.local_v1client is None: + self.local_v1client = clients.LOCAL.get_client( + str(consts.SERVICE_TYPES.image), 'v1') + return self.local_v1client + + def _get_local_v2_client(self): + """ + Get a local v2 glance client if not already created. + + :returns: The glance v2 client for the local hostingOS + """ + if self.local_v2client is None: + self.local_v2client = clients.LOCAL.get_client( + str(consts.SERVICE_TYPES.image), 'v2') + return self.local_v2client + + def _get_pvc_v1_client(self): + """ + Get a PowerVC v1 glance client if not already created. + + :returns: The glance v1 client for PowerVC + """ + if self.pvc_v1client is None: + self.pvc_v1client = clients.POWERVC.get_client( + str(consts.SERVICE_TYPES.image), 'v1') + return self.pvc_v1client + + def _get_pvc_v2_client(self): + """ + Get a PowerVC v2 glance client if not already created. + + :returns: The glance v2 client for PowerVC + """ + if self.pvc_v2client is None: + self.pvc_v2client = clients.POWERVC.get_client( + str(consts.SERVICE_TYPES.image), 'v2') + return self.pvc_v2client + + def _get_limit_filter_params(self, params=None): + """ + Build up the image manager list filter params for filters for + image limit if it is specified. This is used for the v1 API to work + around a bug that the glance has with DB2. This may not be necessary + on all versions of OpenStack. + + :param: params The existing parameters if any. The default is None + :returns: The image mananger list filter params dict for setting + the the glance limit argument + """ + if params is None: + params = {} + filters = {} + else: + filters = params.get('filters', {}) + filters['limit'] = CONF['powervc'].image_limit + params['filters'] = filters + return params + + def _get_not_ispublic_filter_params(self, params=None): + """ + Build up the image manager list filter params for filters for + is_public=False. This is used for the v1 API to get the non-public + images. This may not be necessary on all versions of OpenStack. + + :param: params The existing parameters if any. The default is None + :returns: The image mananger list filter params dict for setting + is_public=False + """ + if params is None: + params = {} + filters = {} + else: + filters = params.get('filters', {}) + filters['is_public'] = False + params['filters'] = filters + return params + + def _check_scg_at_startup(self): + """ + If the Storage Connectivity Groups are not specified, terminate the + ImageManager service here. If the Storage Connectivity Group is not + found at startup, keep running. It may appear later. + """ + scg_not_found = False + try: + + # Cache the scg if it is specified, and found on PowerVC + self.our_scg_list = utils.get_utils().get_our_scg_list() + except StorageConnectivityGroupNotFound: + + # If we get this exception, our_scg will be None, but we know + # the scg was specified because it was not found on PowerVC. + # That is accceptable. + scg_not_found = True + + # If our_scg is None and we didn't get a + # StorageConnectivityGroupNotFound exception, then the SCG is not + # specified so the ImageManager service must terminate. + if not self.our_scg_list and not scg_not_found: + LOG.error(_('Glance-powervc service terminated. No Storage ' + 'Connectivity Group specified.')) + sys.exit(1) + + def _get_our_scg_list(self): + """ + If a SCG name or id is specified in our configuration, see if the scg + exists. If it does not exist an exception is raised. If it exists, the + scg for the name or id specified is returned. If no SCG name or id is + specified, None is returned for the scg. + + :returns: The StorageConnectivityGroup object if found, else None. If a + specified scg is not found, a :exc:'StorageConnectivityGroupNotFound' + exception is raised. + """ + our_scg_list = utils.get_utils().get_our_scg_list() + if our_scg_list: + LOG.debug(_('Only images found in the PowerVC Storage Connectivity' + ' Group \'%s\' will be used.'), + str([scg.display_name for scg in our_scg_list])) + else: + LOG.debug(_('No Storage Connectivity Group is specified in the ' + 'configuration settings, so all PowerVC images will ' + 'be used.')) + return our_scg_list + + def _image_is_accessible(self, image): + """ + Determine whether the specified image is accessible. To be accessible, + the image must belong to our storage conectivity group. + + If our_scg was found, the image must belong to that scg. If the scg was + not specified, then the image is considered accessible. + + If an error occurs while getting the SCGs for an image, and exception + is raised. The caller should expect that an exception may occur. + + :param: image The v1 image + :returns: True if the specified image is accessible + """ + if image is None: + return False + if self.our_scg_list is not None: + our_scg_id_list = [our_scg.id for our_scg in self.our_scg_list] + + # Get all of the SCGS for the image. If an error occurs, an + # exception will be raised, and the current operation will fail. + # The caller should catch the exception and continue. + scgs = utils.get_utils().get_image_scgs(image.id) + LOG.debug(_('Image \'%s\': Storage Connectivity Groups: %s'), + image.name, str(scgs)) + for scg in scgs: + if scg.id in our_scg_id_list: + return True + LOG.debug(_('Image \'%s\' is not accessible on Storage ' + 'Connectivity Group \'%s\''), image.name, + str([our_scg.display_name + for our_scg in self.our_scg_list])) + return False + else: + return True + + def _get_local_event_to_ignore(self, evt): + """ + Get the specified local event tuple to ignore from the + local_events_to_ignore_dict. If the event tuple is found in the dict, + remove it, and return it to the caller, else return None. + + :param: evt The event tuple to get from the local_events_to_ignore_dict + :returns: The event tuple if found, else None + """ + for evt_time in sorted(self.local_events_to_ignore_dict.keys()): + if evt == self.local_events_to_ignore_dict[evt_time]: + return self.local_events_to_ignore_dict.pop(evt_time) + + def _get_pvc_event_to_ignore(self, evt): + """ + Get the specified PowerVC event tuple to ignore from the + pvc_events_to_ignore_dict. If the event tuple is found in the dict, + remove it, and return it to the caller, else return None. + + :param: evt The event tuple to get from the pvc_events_to_ignore_dict + :returns: The event tuple if found, else None + """ + for evt_time in sorted(self.pvc_events_to_ignore_dict.keys()): + if evt == self.pvc_events_to_ignore_dict[evt_time]: + return self.pvc_events_to_ignore_dict.pop(evt_time) + + def _purge_expired_local_events_to_ignore(self): + """ + Remove expired local hostingOS event tuples from the + local_events_to_ignore_dict. The event tuple expiration time is defined + by the constant EVENT_TUPLE_EXPIRATION_PERIOD_IN_HOURS. + """ + cur_time = time.time() + for evt_time in sorted(self.local_events_to_ignore_dict.keys()): + if cur_time - evt_time >= ( + constants.EVENT_TUPLE_EXPIRATION_PERIOD_IN_HOURS * + constants.SECONDS_IN_HOUR): + self.local_events_to_ignore_dict.pop(evt_time) + else: + break + + def _purge_expired_pvc_events_to_ignore(self): + """ + Remove expired PowerVC event tuples from the pvc_events_to_ignore_dict. + The event tuple expiration time is defined by the constant + EVENT_TUPLE_EXPIRATION_PERIOD_IN_HOURS. + """ + cur_time = time.time() + for evt_time in sorted(self.pvc_events_to_ignore_dict.keys()): + if cur_time - evt_time >= ( + constants.EVENT_TUPLE_EXPIRATION_PERIOD_IN_HOURS * + constants.SECONDS_IN_HOUR): + self.pvc_events_to_ignore_dict.pop(evt_time) + else: + break + + def _clear_sync_summary_counters(self): + """ + Clear the counters used for the sync summary display + """ + self.local_created_count = 0 + self.local_updated_count = 0 + self.local_deleted_count = 0 + self.pvc_created_count = 0 + self.pvc_updated_count = 0 + self.pvc_deleted_count = 0 + + def _unescape(self, props): + """ + Unescape any HTML/XML entities in certain image properties. + + :param: props The image properties + """ + if props is not None: + for key in props.keys(): + if key in constants.IMAGE_UNESCAPE_PROPERTIES: + props[key] = HTMLParser.HTMLParser().unescape(props[key]) + + def _get_image_properties(self, v1image_dict, default_props=None): + """ + Get the image properties from a v1 image dict. The properties may + contain HTML/XML escaped entities so unescape any we suspect could + be there before returning. Any properties with null values are also + filtered. There is no need to process/sync any properties that have + a null value. Having a null value should mean the same thing as the + property not existing. + + This method should always be called to get the properties from a v1 + image before modifying them or using them do perform an image update. + + :param: v1image_dict A v1 image dict + :param: default_props The default value to use for the properties if + they are not found in the v1 image dict. The + default is None + :returns: The properties from the v1 image with certain properties + unescaped if found. Returns None if no properties are found + """ + filtered_props = None + if v1image_dict is not None: + props = v1image_dict.get('properties', default_props) + if props is not None: + filtered_props = {} + for prop in props.keys(): + if props[prop] is not None: + filtered_props[prop] = props[prop] + self._unescape(filtered_props) + return filtered_props + + +class ImageSyncController(): + """ + The ImageSyncController starts the next image startup or periodic sync when + appropriate. Startup sync will run first. It will run every minute by + default until it completes successfully. After that, it will run the + periodic sync every five mintues by default. If the periodic sync does not + complete successfully, it is run every minute by default until it completes + successfully, and then it resumes running every five minutes. This allows + for retries due to communications errors to occur more frequently. + + The elpased time to wait from the end of one sync to the start of another + is determined by whether the previous sync operation passed, or failed. If + it failed, the time to wait is specified by retry_interval_in_seconds. If + the previous sync operation passed, the time to wait is specified by + image_periodic_sync_interval_in_seconds. Those values default to 60 + seconds, and 300 seconds respectfully, but can be set by the user in the + powervc.conf file. + """ + + def __init__(self, image_manager): + self.image_manager = image_manager + self.started = False + self.sync_running = False + self.startup_sync_completed = False + self.startup_sync_result = constants.SYNC_FAILED + self.periodic_sync_result = constants.SYNC_FAILED + self.elapsed_time_in_seconds = 0 + self.next_sync_time_in_seconds = 0 + self.periodic_sync_interval_in_seconds = \ + CONF['powervc'].image_periodic_sync_interval_in_seconds + self.retry_interval_in_seconds = \ + CONF['powervc'].image_sync_retry_interval_time_in_seconds + self.sync_check_interval_in_seconds = \ + constants.IMAGE_SYNC_CHECK_INTERVAL_TIME_IN_SECONDS + + def start(self): + """ + Start the ImageSyncController. This will start the Startup Sync + operation, and then start a timer which will call an internal method + used to detemine when the next sync operation should begin. + + Startup sync will repeat with a delay in between until it completes + successfully. After that, periodic sync will run at the configured + interval. If a periodic sync fails for a communications error, it will + repeat with a delay in between runs until it completes successfully. + """ + if not self.started: + self.started = True + + # Start by doing a startup sync + self.sync_running = True + self.image_manager.sync_images() + + # Start a threadgroup timer here to wake up the ImageSyncController + # every second by default, and call _sync_images(). That method + # will determine when the next sync should be run. + self.image_manager.tg.add_timer( + self.sync_check_interval_in_seconds, self._sync_images) + + def set_startup_sync_result(self, result): + """ + This should be called when startup sync ends to set it's result. + + :param: result The startup sync result code of SYNC_PASSED or + SYNC_FAILED + """ + self.startup_sync_result = result + if self.startup_sync_result == constants.SYNC_PASSED: + self.startup_sync_completed = True + self.next_sync_time_in_seconds = \ + self.periodic_sync_interval_in_seconds + else: + self.next_sync_time_in_seconds = self.retry_interval_in_seconds + self.sync_running = False + + def is_startup_sync_done(self): + """ + Determine if startup sync has completed successfully. + + :returns: True if startup sync has completed successfully, else False. + """ + return self.startup_sync_completed + + def set_periodic_sync_result(self, result): + """ + This should be called when periodic sync ends to set it's result. + + :param: result The periodic sync result code of SYNC_PASSED or + SYNC_FAILED + """ + self.periodic_sync_result = result + if self.periodic_sync_result == constants.SYNC_PASSED: + self.next_sync_time_in_seconds = \ + self.periodic_sync_interval_in_seconds + else: + self.next_sync_time_in_seconds = self.retry_interval_in_seconds + self.sync_running = False + + def _sync_images(self): + """ + This is called by the timer every one second by default. If a sync + operation is currently running, it will do nothing and return. If a + sync operation is not running, it will determine the elapsed time + since the end of the last sync operation. If that elapsed time has + reached the predetermined amount, a sync operation will be initiated. + """ + # If a sync operation is running, do nothing + if self.sync_running: + return + + # If the time is right, call image_manager.sync_images() + self.elapsed_time_in_seconds += self.sync_check_interval_in_seconds + if self.elapsed_time_in_seconds >= self.next_sync_time_in_seconds: + self.elapsed_time_in_seconds = 0 + self.sync_running = True + self.image_manager.sync_images() diff --git a/glance-powervc/run_tests.sh b/glance-powervc/run_tests.sh new file mode 100755 index 0000000..2f69516 --- /dev/null +++ b/glance-powervc/run_tests.sh @@ -0,0 +1,192 @@ +#!/bin/bash + +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -eu + +function usage { + echo "Usage: $0 [OPTION]..." + echo "Run PowerVC Glance test suite(s)" + echo "" + echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" + echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" + echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)." + echo " -n, --no-recreate-db Don't recreate the test database." + echo " -x, --stop Stop running tests after the first error or failure." + echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." + echo " -u, --update Update the virtual environment with any newer package versions" + echo " -p, --pep8 Just run flake8" + echo " -8, --8 Just run flake8, don't show PEP8 text for each error" + echo " -P, --no-pep8 Don't run flake8" + echo " -c, --coverage Generate coverage report" + echo " -h, --help Print this usage message" + echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list" + echo " --standard-threads Don't do the eventlet threading monkeypatch." + echo "" + echo "Note: with no options specified, the script will try to run the tests in a virtual environment," + echo " If no virtualenv is found, the script will ask if you would like to create one. If you " + echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." + exit +} + +function process_option { + case "$1" in + -h|--help) usage;; + -V|--virtual-env) always_venv=1; never_venv=0;; + -N|--no-virtual-env) always_venv=0; never_venv=1;; + -r|--recreate-db) recreate_db=1;; + -n|--no-recreate-db) recreate_db=0;; + -f|--force) force=1;; + -u|--update) update=1;; + -p|--pep8) just_flake8=1;; + -8|--8) short_flake8=1;; + -P|--no-pep8) no_flake8=1;; + -c|--coverage) coverage=1;; + --standard-threads) + export STANDARD_THREADS=1 + ;; + -*) noseopts="$noseopts $1";; + *) noseargs="$noseargs $1" + esac +} + +venv=.venv +with_venv=tools/with_venv.sh +always_venv=0 +never_venv=0 +force=0 +noseargs= +noseopts= +wrapper="" +just_flake8=0 +short_flake8=0 +no_flake8=0 +coverage=0 +recreate_db=1 +update=0 + +for arg in "$@"; do + process_option $arg +done + +# If enabled, tell nose to collect coverage data +if [ $coverage -eq 1 ]; then + noseopts="$noseopts --with-coverage --cover-package=glance-powervc" +fi + +function run_tests { + # Just run the test suites in current environment + ${wrapper} $NOSETESTS + # If we get some short import error right away, print the error log directly + RESULT=$? + if [ "$RESULT" -ne "0" ]; + then + ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'` + if [ "$ERRSIZE" -lt "40" ]; + then + cat run_tests.log + fi + fi + return $RESULT +} + +function run_flake8 { + FLAGS=--show-pep8 + if [ $# -gt 0 ] && [ 'short' == ''$1 ] + then + FLAGS='' + fi + + + echo "Running flake8 ..." + # Just run flake8 in current environment + #echo ${wrapper} flake8 $FLAGS powervc | tee pep8.txt + ${wrapper} flake8 $FLAGS powervc | tee pep8.txt + RESULT=${PIPESTATUS[0]} + return $RESULT +} + +NOSETESTS="nosetests $noseopts $noseargs" + +if [ $never_venv -eq 0 ] +then + # Remove the virtual environment if --force used + if [ $force -eq 1 ]; then + echo "Cleaning virtualenv..." + rm -rf ${venv} + fi + if [ $update -eq 1 ]; then + echo "Updating virtualenv..." + python tools/install_venv.py + fi + if [ -e ${venv} ]; then + wrapper="${with_venv}" + else + if [ $always_venv -eq 1 ]; then + # Automatically install the virtualenv + python tools/install_venv.py + wrapper="${with_venv}" + else + echo -e "No virtual environment found...create one? (Y/n) \c" + read use_ve + if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then + # Install the virtualenv and run the test suite in it + python tools/install_venv.py + wrapper=${with_venv} + fi + fi + fi +fi + +# Delete old coverage data from previous runs +if [ $coverage -eq 1 ]; then + ${wrapper} coverage erase +fi + + +if [ $just_flake8 -eq 1 ]; then + run_flake8 + RESULT=$? + echo "RESULT $RESULT" + exit $RESULT +fi + +if [ $short_flake8 -eq 1 ]; then + run_flake8 short + RESULT=$? + exit $RESULT +fi + +run_tests +RESULT=$? + +# NOTE(sirp): we only want to run flake8 when we're running the full-test +# suite, not when we're running tests individually. To handle this, we need to +# distinguish between options (noseopts), which begin with a '-', and arguments +# (noseargs). +if [ -z "$noseargs" ]; then + if [ $no_flake8 -eq 0 ]; then + run_flake8 + TMP_RESULT=$? + RESULT=$(($TMP_RESULT + $RESULT)) + fi +fi + +if [ $coverage -eq 1 ]; then + echo "Generating coverage report in covhtml/" + ${wrapper} coverage html -d covhtml -i +fi + +exit $RESULT \ No newline at end of file diff --git a/glance-powervc/test/__init__.py b/glance-powervc/test/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/glance-powervc/test/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/glance-powervc/test/test_glance_client.py.fails b/glance-powervc/test/test_glance_client.py.fails new file mode 100644 index 0000000..c835d80 --- /dev/null +++ b/glance-powervc/test/test_glance_client.py.fails @@ -0,0 +1,55 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" +Simple Glance client test. This should be more like junits someday. For +now its just some random tests +""" + +from keystoneclient.v3 import client as v3client +from keystoneclient.v2_0 import client +from glanceclient.v2 import client as gc + +keystone_client = client.Client(username='root', password='passw0rd', + tenant_name='ibm-default', insecure=True, + auth_url='https://9.5.125.55/powervc/open' + 'stack/identity/v2.0') + +keystonev3_client = v3client.Client(username='root', password='passw0rd', + project_name='ibm-default', insecure=True, + auth_url='https://9.5.125.55/powervc/' + 'openstack/identity/v3') + +token = keystone_client.auth_token +tokenv3 = keystonev3_client.auth_token # Keystone v3 test, but not used +print token + +glanceUrl = None +if keystone_client.auth_ref: + if keystone_client.auth_ref.service_catalog: + glanceUrl_ret = keystone_client.\ + auth_ref.service_catalog.\ + get_urls(service_type='image', endpoint_type='publicURL') + if glanceUrl_ret: + glanceUrl = glanceUrl_ret[0] + +print glanceUrl + +glance_client = gc.Client(endpoint=glanceUrl, token=token, insecure=True) +###print glance_client.images.list() + +print '=' * 8, 'PowerVC Images using the v2 Glance API', '=' * 8 +v2pvc_images = glance_client.images +for v2image in v2pvc_images.list(): + print '-' * 40 + print 'Image ', v2image.get('name') + for v2imagekey in v2image.keys(): + print v2imagekey, '=', v2image.get(v2imagekey) + print '-' * 40 diff --git a/glance-powervc/test/test_messaging.py.fails b/glance-powervc/test/test_messaging.py.fails new file mode 100644 index 0000000..0340f74 --- /dev/null +++ b/glance-powervc/test/test_messaging.py.fails @@ -0,0 +1,254 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import sys +import itertools +import time +import traceback + +from oslo.config import cfg +from glance.openstack.common import gettextutils +gettextutils.install('glance') +import glance.openstack.common.log as logging +from glance.common import config as logging_config +from glanceclient.v1 import images as v1images + +from powervc.common import config +from powervc.glance.common import constants + +# PowerVC Driver ImageManager specific configuration +image_opts = [ + + # The image period sync interval in seconds + cfg.IntOpt('image_periodic_sync_interval_in_seconds', + default=constants.IMAGE_PERIODIC_SYNC_INTERVAL_IN_SECONDS) +] + +CONF = config.CONF +CONF.register_opts(image_opts, 'powervc') + +LOG = logging.getLogger(__name__) + +config.parse_power_config(sys.argv, 'glance') + +from powervc.common import messaging +from powervc.common import constants as consts +import powervc.common.client.factory as clients + + +def test_image_events(wait_forever=True): + + def local_reconnect(): + LOG.debug(_('Re-established connection to local hosting OS ' + 'Qpid broker')) + + local_conn = messaging.LocalConnection(log=logging, + reconnect_handler=local_reconnect) +# local_conn = messaging.QpidConnection('localhost:5672', \ +# 'admin', 'ICA1NTQxNzI5ODgK') +# conn = messaging.QpidConnection('localhost:5672', 'admin', 'openstack1') + local_listener = local_conn.create_listener('glance', 'notifications.info') + local_listener.register_handler('image.*', + handle_local_image_notifications) + local_conn.start() + +# pvc_conn = messaging.QpidConnection('9.5.125.55:5672', \ +# 'anonymous', '') + + def pvc_reconnect(): + LOG.debug(_('Re-established connection to PowerVC Qpid broker')) + + pvc_conn = messaging.PowerVCConnection(log=logging, + reconnect_handler=pvc_reconnect) + +# pvc_conn = messaging.QpidConnection('9.5.125.55:5672', \ +# 'root', 'passw0rd') + pvc_listener = pvc_conn.create_listener('glance', 'notifications.info') + pvc_listener.register_handler('image.*', + handle_pvc_image_notifications) + pvc_conn.start() + + print 'Monitoring hosting OS and PowerVC for Image notifications...' + while wait_forever: + time.sleep(5) + + +def test_pvc_image_events(wait_forever=True): + +# pvc_conn = messaging.QpidConnection('9.5.125.55:5672', \ +# 'anonymous', '') + + def pvc_reconnect(): + LOG.debug(_('Re-established connection to PowerVC Qpid broker')) + + pvc_conn = messaging.PowerVCConnection(log=logging, + reconnect_handler=pvc_reconnect) + +# pvc_conn = messaging.QpidConnection('9.5.125.55:5672', \ +# 'root', 'passw0rd') + pvc_listener = pvc_conn.create_listener('glance', 'notifications.info') + pvc_listener.register_handler('image.*', + handle_pvc_image_notifications) + pvc_conn.start() + + print 'Monitoring PowerVC for Image notifications...' + while wait_forever: + time.sleep(5) + + +def handle_local_image_notifications(context, message): + print '=' * 80 + print 'LOCAL:', str(context) + print 'LOCAL:', str(message) + image = message.get('payload') # should be the v1 image as a dict + dump_image(image) + print '=' * 80 + + +def handle_pvc_image_notifications(context, message): + print '=' * 80 + print 'PVC:', str(context) + print 'PVC:', str(message) + image = message.get('payload') # should be the v1 image as a dict + dump_image(image) + print '=' * 80 + + +def dump_image(image_dict): + for v1imagekey in image_dict.keys(): + print v1imagekey, '=', image_dict.get(v1imagekey) + props = image_dict.get('properties') + if props: + for v1imageprop in props.keys(): + print 'property: ', v1imageprop, '=',\ + props.get(v1imageprop) + + +def test_update_local_image(image_id): + params = {} + filters = {} + filters['is_public'] = False + params['filters'] = filters + local_v1client = \ + clients.LOCAL.get_client(str(consts.SERVICE_TYPES.image), 'v1') + v1local_images = local_v1client.images + image = \ + get_v1image_from_id(image_id, itertools.chain( + v1local_images.list(), v1local_images.list(**params))) + if image: + field_dict, patch_dict = get_v1image_update_fields(image) + if 'is_public' in field_dict.keys(): + public = field_dict['is_public'] + field_dict['is_public'] = not public + v1local_images.update(image, **field_dict) + if len(patch_dict) > 0: + local_v2client = \ + clients.LOCAL.get_client(str(consts.SERVICE_TYPES.image), 'v2') + v2local_images = local_v2client.images + v2local_images.update(image.id, **patch_dict) + print 'Image', image.name, 'updated.' + else: + print 'Image', image_id, 'not found!' + + +def get_v1image_update_fields(image): + """ + Get the properties for an image update + + :param: image The image to pull properties from to be used + for an image update operation. + :returns: A tuple containing with the dict containing the + properties to use for an image update operation, + and the dict of the properties that are too + large to be processed by v1 Image APIs. Those + properties should be updated using the + v2 Image PATCH API. + """ + field_dict = {} + patch_dict = {} + props = image.properties + if props and props is not None: + patch_dict = remove_large_properties(props) + image.properties = props + image_dict = image.to_dict() + for imagekey in image_dict.keys(): + if imagekey in v1images.UPDATE_PARAMS and \ + imagekey not in constants.IMAGE_UPDATE_PARAMS_FILTER: + field_value = image_dict.get(imagekey) + if field_value is not None: + if len(str(field_value)) < constants.MAX_HEADER_LEN_V1: + field_dict[imagekey] = field_value + else: + patch_dict[imagekey] = field_value + return field_dict, patch_dict + + +def remove_large_properties(properties): + """ + Remove any properties that are too large to be processed by + the v1 APIs and return them in a dict to the caller. The properties + passed in are also modified. + + :param: properties. The properties dict to remove large properties + from. Large properties are removed from the original + properties dict + :returns: A dict containing properties that are too large to + be processed by v1 Image APIs + """ + too_large_properties = {} + if properties and properties is not None: + for propkey in properties.keys(): + propvalue = properties.get(propkey) + if propvalue and propvalue is not None: + if properties.get(propkey) and (len(str(propvalue)) >= + constants.MAX_HEADER_LEN_V1): + too_large_properties[propkey] = properties.pop(propkey) + return too_large_properties + + +def test_delete_local_image(image_id): + pass + + +def get_v1image_from_id(image_id, v1images): + """ + Get a v1 Image from an image id. + + :param: image_id The image id + :param: v1images The image manager used to obtain images from the + v1 glance client + :returns: The image for the specified id or None if not found. + """ + for image in v1images: + if image and image.id == image_id: + return image + return None + +""" +Main test entry point +""" +if __name__ == '__main__': + try: + # turn off debug logging +# CONF.debug = False + logging_config.setup_logging() + logging.setup('powervc') + + # test getting the staging project id +# test_image_events(wait_forever=True) + test_pvc_image_events(wait_forever=True) +# image_id = '3060d198-c951-4693-9b1d-6314ac0539bf' +# test_update_local_image(image_id) +# test_delete_local_image(image_id) + + print 'Tests done!' + except Exception: + traceback.print_exc() + raise diff --git a/neutron-powervc/.project b/neutron-powervc/.project new file mode 100644 index 0000000..64fd475 --- /dev/null +++ b/neutron-powervc/.project @@ -0,0 +1,21 @@ + + + neutron-powervc + + + common-powervc + neutron + neutron-client + oslo + + + + org.python.pydev.PyDevBuilder + + + + + + org.python.pydev.pythonNature + + diff --git a/neutron-powervc/.pydevproject b/neutron-powervc/.pydevproject new file mode 100644 index 0000000..1e2badb --- /dev/null +++ b/neutron-powervc/.pydevproject @@ -0,0 +1,10 @@ + + + + + +/neutron-powervc + +python 2.7 +Default + diff --git a/neutron-powervc/bin/neutron-powervc b/neutron-powervc/bin/neutron-powervc new file mode 100755 index 0000000..87fe2db --- /dev/null +++ b/neutron-powervc/bin/neutron-powervc @@ -0,0 +1,120 @@ +#!/usr/bin/env python +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import sys +import traceback +import os + +if ('eventlet' in sys.modules and + os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'): + raise ImportError('eventlet imported before neutron agent ' + '(env var set to %s)' + % os.environ.get('EVENTLET_NO_GREENDNS')) + +os.environ['EVENTLET_NO_GREENDNS'] = 'yes' +import eventlet +eventlet.patcher.monkey_patch(os=False, thread=False) + +from oslo.config import cfg + +from neutron import context +from neutron.common import config as logging_config +from neutron.openstack.common import log as logging + +from powervc.common import config +from powervc.neutron.api.client_rpc import RpcClient + +LOG = logging.getLogger(__name__) +VALID_COMMANDS = ['net-list', 'subnet-list', 'port-list', + 'net-show', 'subnet-show', 'port-show', + 'get-local-net', 'get-pvc-net'] + +def usage(): + print 'Usage neutron-powervc' + print '' + print 'Commands:' + print ' net-list List networks mapped in the DB' + print ' net-show Retrieve a specific network mapping' + print '' + print ' subnet-list List subnets mapped in the DB' + print ' subnet-show Retrieve a specific subnet mapping' + print '' + print ' port-list List ports mapped in the DB' + print ' port-show Retrieve a specific port mapping' + print '' + print ' get-local-net Retrieve local id for PVC network id' + print ' get-pvc-net Retrieve PVC id for local network id' + print '' + sys.exit(0) + +def main(): + try: + if len(sys.argv) == 1: + usage() + cmd = sys.argv[1] + opt = None + if len(sys.argv) == 3: + opt = sys.argv[2] + if cmd not in VALID_COMMANDS: + print 'Invlaid request:', cmd + usage() + + argv = [sys.argv[0]] + config.parse_power_config(argv, 'powervc-neutron') + # logging_config.setup_logging(cfg.CONF) + + LOG.debug(_('Create RPC interface')) + ctx = context.get_admin_context_without_session() + rpc = RpcClient(ctx) + + if cmd == 'net-list': + LOG.debug(_('Calling RPC method')) + rpc.get_networks() + elif cmd == 'subnet-list': + LOG.debug(_('Calling RPC method')) + rpc.get_subnets() + elif cmd == 'port-list': + LOG.debug(_('Calling RPC method')) + rpc.get_ports() + elif cmd == 'net-show': + if not opt: + usage() + LOG.debug(_('Calling RPC method')) + rpc.get_network(opt) + elif cmd == 'subnet-show': + if not opt: + usage() + LOG.debug(_('Calling RPC method')) + rpc.get_subnet(opt) + elif cmd == 'port-show': + if not opt: + usage() + LOG.debug(_('Calling RPC method')) + rpc.get_port(opt) + elif cmd == 'get-local-net': + if not opt: + usage() + LOG.debug(_('Calling RPC method')) + rpc.get_local_network_uuid(opt) + elif cmd == 'get-pvc-net': + if not opt: + usage() + LOG.debug(_('Calling RPC method')) + rpc.get_pvc_network_uuid(opt) + else: + usage() + + except Exception: + traceback.print_exc() + raise + +if __name__ == "__main__": + main() diff --git a/neutron-powervc/bin/neutron-powervc-agent b/neutron-powervc/bin/neutron-powervc-agent new file mode 100755 index 0000000..518e2f1 --- /dev/null +++ b/neutron-powervc/bin/neutron-powervc-agent @@ -0,0 +1,53 @@ +#!/usr/bin/env python +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import os +import sys + +# NOTE(mikal): All of this is because if dnspython is present in your +# environment then eventlet monkeypatches socket.getaddrinfo() with an +# implementation which doesn't work for IPv6. What we're checking here is +# that the magic environment variable was set when the import happened. +if ('eventlet' in sys.modules and + os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'): + raise ImportError('eventlet imported before neutron agent ' + '(env var set to %s)' + % os.environ.get('EVENTLET_NO_GREENDNS')) + +os.environ['EVENTLET_NO_GREENDNS'] = 'yes' + +import eventlet +eventlet.patcher.monkey_patch(os=False, thread=False) + +# If ../powervc/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python. +POSSIBLE_TOPDIR = os.path.normpath(os.path.join( + os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) + +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'powervc', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +# TODO RYKAL +# This should go in the base __init__ folder I think +from neutron.openstack.common import gettextutils +gettextutils.install('neutron') + +from neutron.common import config as logging_config +from powervc.common import config + +config.parse_power_config(sys.argv, 'powervc-neutron') +logging_config.setup_logging(config.CONF) + +from powervc.neutron.agent.neutron_powervc_agent import main + +main() diff --git a/neutron-powervc/etc/powervc-neutron.conf b/neutron-powervc/etc/powervc-neutron.conf new file mode 100644 index 0000000..36f6a8a --- /dev/null +++ b/neutron-powervc/etc/powervc-neutron.conf @@ -0,0 +1,58 @@ +[DEFAULT] +debug = False +verbose = True + +# The messaging module to use, defaults to kombu. +# rpc_backend = neutron.openstack.common.rpc.impl_kombu +# AMQP password +# rabbit_password = openstack1 +# AMQP host +# rabbit_host = localhost +# Size of RPC thread pool +# rpc_thread_pool_size = 64 +# Size of RPC connection pool +# rpc_conn_pool_size = 30 +# Seconds to wait for a response from call or multicall +# rpc_response_timeout = 60 +# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq. +# rpc_cast_timeout = 30 +# Modules of exceptions that are permitted to be recreated +# upon receiving exception data from an rpc call. +# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception +# AMQP exchange to connect to if using RabbitMQ or QPID +control_exchange = nova + +# QPID +# rpc_backend=neutron.openstack.common.rpc.impl_qpid +# Qpid broker hostname +# qpid_hostname = localhost +# Qpid broker port +# qpid_port = 5672 +# Username for qpid connection +# qpid_username = qpidclient +# Password for qpid connection +# qpid_password = openstack1 +# Space separated list of SASL mechanisms to use for auth +# qpid_sasl_mechanisms = '' +# Seconds between connection keepalive heartbeats +# qpid_heartbeat = 60 +# Transport to use, either 'tcp' or 'ssl' +# qpid_protocol = tcp +# Disable Nagle algorithm +# qpid_tcp_nodelay = True + +[AGENT] +# Agent's polling interval in seconds +polling_interval = 60 + +# (ListOpt) Comma-separated list of PowerVC network names to be mapped to +# local OS as networks. If the network does not exist in the local OS, it +# will be automatically created. PowerVC networks listed here do not have +# to exist prior to starting up the powervc_neutron_agent. Wildcard +# characters ('*') can be specified. By default, all PowerVC networks will +# be available in the local OS. +# map_powervc_networks = * + +[DATABASE] +# Database where agent will store mapping data +connection = mysql://root:openstack1@localhost/powervc?charset=utf8 diff --git a/neutron-powervc/init/openstack-neutron-powervc b/neutron-powervc/init/openstack-neutron-powervc new file mode 100644 index 0000000..67f9625 --- /dev/null +++ b/neutron-powervc/init/openstack-neutron-powervc @@ -0,0 +1,104 @@ +#!/bin/sh +# +# openstack-neutron-powervc OpenStack PowerVC Neutron Driver +# +# chkconfig: - 98 02 +# description: Provides PowerVC manage-to support. + +### BEGIN INIT INFO +# Provides: +# Required-Start: $remote_fs $network $syslog +# Required-Stop: $remote_fs $syslog +# Default-Stop: 0 1 6 +# Short-Description: OpenStack PowerVC Neutron Driver +# Description: +### END INIT INFO + +. /etc/rc.d/init.d/functions + +suffix=powervc +prog=openstack-neutron-powervc +exec="/opt/ibm/openstack/powervc-driver/bin/neutron-$suffix-agent" +config="/etc/$suffix/$suffix-neutron.conf" +powervcconf="/etc/$suffix/$suffix.conf" +pidfile="/var/run/$suffix/neutron-$suffix.pid" +logfile="/var/log/$suffix/neutron-$suffix.log" + +[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog + +lockfile=/var/lock/subsys/$prog + +start() { + [ -x $exec ] || exit 5 + [ -f $config ] || exit 6 + [ -f $powervcconf ] || exit 6 + echo -n $"Starting $prog: " + daemon --user powervc --pidfile $pidfile "$exec --config-file $config --config-file $powervcconf --logfile $logfile &>/dev/null & echo \$! > $pidfile" + retval=$? + echo + [ $retval -eq 0 ] && touch $lockfile + return $retval +} + +stop() { + echo -n $"Stopping $prog: " + killproc -p $pidfile $prog + retval=$? + echo + [ $retval -eq 0 ] && rm -f $lockfile + return $retval +} + +restart() { + stop + start +} + +reload() { + restart +} + +force_reload() { + restart +} + +rh_status() { + status -p $pidfile $prog +} + +rh_status_q() { + rh_status >/dev/null 2>&1 +} + + +case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart) + $1 + ;; + reload) + rh_status_q || exit 7 + $1 + ;; + force-reload) + force_reload + ;; + status) + rh_status + ;; + condrestart|try-restart) + rh_status_q || exit 0 + restart + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac +exit $? diff --git a/neutron-powervc/powervc/__init__.py b/neutron-powervc/powervc/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/neutron-powervc/powervc/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/neutron-powervc/powervc/neutron/__init__.py b/neutron-powervc/powervc/neutron/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/neutron-powervc/powervc/neutron/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/neutron-powervc/powervc/neutron/agent/__init__.py b/neutron-powervc/powervc/neutron/agent/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/neutron-powervc/powervc/neutron/agent/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/neutron-powervc/powervc/neutron/agent/neutron_powervc_agent.py b/neutron-powervc/powervc/neutron/agent/neutron_powervc_agent.py new file mode 100644 index 0000000..2d2792c --- /dev/null +++ b/neutron-powervc/powervc/neutron/agent/neutron_powervc_agent.py @@ -0,0 +1,1422 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" +Handles all of the Neutron logic necessary for PowerVC driver. + +The :py:class:`PowerVCNeutronAgent` class is a Neutron agent. +""" + +''' +Created on Jul 30, 2013 + +@author: John Kasperski +''' + +import Queue +import threading +import os +import time +from exceptions import KeyboardInterrupt + +from neutron.openstack.common import rpc +from neutron.openstack.common import log as logging + +from oslo.config import cfg + +from powervc.common.constants import LOCAL_OS +from powervc.common.constants import POWERVC_OS +from powervc.common.constants import PVC_TOPIC +from powervc.common.constants import SERVICE_TYPES +from powervc.common.client import factory +from powervc.common.gettextutils import _ +from powervc.neutron.api import powervc_rpc +from powervc.neutron.client import local_os_bindings +from powervc.neutron.client import powervc_bindings +from powervc.neutron.common import constants +from powervc.neutron.common import utils +from powervc.neutron.db import powervc_db_v2 + +LOG = logging.getLogger(__name__) + +agent_opts = [ + cfg.ListOpt('map_powervc_networks', + default=['*'], + help=_('List of ' + 'to be mapped up to the local OS')), + cfg.IntOpt('polling_interval', + default=60, + help=_("The number of seconds the agent will wait between " + "polling for network changes.")), +] + +CONF = cfg.CONF +CONF.register_opts(agent_opts, "AGENT") + + +class PowerVCNeutronAgent(object): + """This is the main PowerVC Neutron agent class""" + + def __init__(self): + self.end_thread = False + self.polling_interval = CONF.AGENT.polling_interval + self.retry_sync = time.time() + self.polling_interval + self.db = powervc_db_v2.PowerVCAgentDB() + self.event_q = Queue.Queue() + self.handlers = {} + self._register_handler(LOCAL_OS, constants.EVENT_NETWORK_CREATE, + self._handle_local_network_create) + self._register_handler(LOCAL_OS, constants.EVENT_NETWORK_UPDATE, + self._handle_local_network_update) + self._register_handler(LOCAL_OS, constants.EVENT_NETWORK_DELETE, + self._handle_local_network_delete) + self._register_handler(LOCAL_OS, constants.EVENT_SUBNET_CREATE, + self._handle_local_subnet_create) + self._register_handler(LOCAL_OS, constants.EVENT_SUBNET_UPDATE, + self._handle_local_subnet_update) + self._register_handler(LOCAL_OS, constants.EVENT_SUBNET_DELETE, + self._handle_local_subnet_delete) + self._register_handler(LOCAL_OS, constants.EVENT_PORT_CREATE, + self._handle_local_port_create) + self._register_handler(LOCAL_OS, constants.EVENT_PORT_UPDATE, + self._handle_local_port_update) + self._register_handler(LOCAL_OS, constants.EVENT_PORT_DELETE, + self._handle_local_port_delete) + self._register_handler(POWERVC_OS, constants.EVENT_NETWORK_CREATE, + self._handle_pvc_network_create) + self._register_handler(POWERVC_OS, constants.EVENT_NETWORK_UPDATE, + self._handle_pvc_network_update) + self._register_handler(POWERVC_OS, constants.EVENT_NETWORK_DELETE, + self._handle_pvc_network_delete) + self._register_handler(POWERVC_OS, constants.EVENT_SUBNET_CREATE, + self._handle_pvc_subnet_create) + self._register_handler(POWERVC_OS, constants.EVENT_SUBNET_UPDATE, + self._handle_pvc_subnet_update) + self._register_handler(POWERVC_OS, constants.EVENT_SUBNET_DELETE, + self._handle_pvc_subnet_delete) + self._register_handler(POWERVC_OS, constants.EVENT_PORT_CREATE, + self._handle_pvc_port_create) + self._register_handler(POWERVC_OS, constants.EVENT_PORT_UPDATE, + self._handle_pvc_port_update) + self._register_handler(POWERVC_OS, constants.EVENT_PORT_DELETE, + self._handle_pvc_port_delete) + self.pvc = powervc_bindings.Client(None, self) + self.pvc = factory.POWERVC.new_client(str(SERVICE_TYPES.network), + powervc_bindings.Client, + self) + self.local = local_os_bindings.Client(None, self) + self.local = factory.LOCAL.new_client(str(SERVICE_TYPES.network), + local_os_bindings.Client, + self) + self._setup_rpc() + +#============================================================================== +# Generate DB stats string +#============================================================================== + def _generate_db_stats(self): + net_creating, net_active, net_deleting = self.db.get_network_stats() + sub_creating, sub_active, sub_deleting = self.db.get_subnet_stats() + port_creating, port_active, port_deleting = self.db.get_port_stats() + stat_n = '{0:d}/{1:d}/{2:d}'.format(net_creating, + net_active, + net_deleting) + stat_s = '{0:d}/{1:d}/{2:d}'.format(sub_creating, + sub_active, + sub_deleting) + stat_p = '{0:d}/{1:d}/{2:d}'.format(port_creating, + port_active, + port_deleting) + return '(n:{0}, s:{1}, p:{2})'.format(stat_n, stat_s, stat_p) + +#============================================================================== +# Handle network create +#============================================================================== + + def _handle_local_network_create(self, network): + net_id = network.get('id') + db_net = self.db.get_network(local_id=net_id) + if db_net: + LOG.info(_("DB entry for local network %s already exists"), net_id) + return + #verify that if local network has no subnet, not handle it. + if not utils.network_has_subnet(network): + # No subnet, but maybe one was created when this event was queued + # up waiting to be processed. Refresh with current network + # that is actually on Local + local_net = self.local.get_network(net_id) + if not local_net: + LOG.info(_("Local network %s might have been deleted"), + local_net.get('name')) + return + if not utils.network_has_subnet(local_net): + LOG.info(_("Local network % has no subnet"), + local_net.get('name')) + return + if not utils.network_has_mappable_subnet(self.local, local_net): + LOG.info(_("Local network % has no mappable subnet"), + local_net.get('name')) + return + + sync_key = utils.gen_network_sync_key(network) + db_net = self.db.get_network(sync_key=sync_key) + if db_net: + self.db.set_network_local_id(db_net, net_id) + else: + db_net = self.db.create_network(network, sync_key, local_id=net_id) + new_net = self.pvc.create_network(network) + if new_net: + self.db.set_network_pvc_id(db_net, new_net.get('id')) + + def _handle_pvc_network_create(self, network): + net_id = network.get('id') + db_net = self.db.get_network(pvc_id=net_id) + if db_net: + LOG.info(_("DB entry for PowerVC network %s already exists"), + net_id) + return + # Verify that the PVC network has a subnet (most likely it won't) + if not utils.network_has_subnet(network): + # No subnet, but maybe one was created when this event was queued + # up waiting to be processed. Refresh with current network + # that is actually on PowerVC + network = self.pvc.get_network(net_id) + if not network: + LOG.warning(_("Unable to retrieve PowerVC network %s. " + "Network may have been deleted."), net_id) + return + # Check to see if the network has a subnet now (it might) + if not utils.network_has_subnet(network): + LOG.info(_("PowerVC network has no subnets: %s"), + network.get('name')) + return + sync_key = utils.gen_network_sync_key(network) + db_net = self.db.get_network(sync_key=sync_key) + if db_net: + self.db.set_network_pvc_id(db_net, net_id) + else: + # Create at local only if the name is in the white list. + if utils.is_network_in_white_list(network): + db_net = self.db.create_network(network, sync_key, + pvc_id=net_id) + new_net = self.local.create_network(network) + if new_net: + self.db.set_network_local_id(db_net, new_net.get('id')) + else: + LOG.info(_("PowerVC network is not allowed: %s"), + network.get('name')) + +#============================================================================== +# Handle network update +#============================================================================== + + def _handle_local_network_update(self, network): + net_id = network.get('id') + db_net = self.db.get_network(local_id=net_id) + if not db_net: + LOG.info(_("DB entry for local network %s does not exist"), net_id) + return + pvc_id = db_net.get('pvc_id') + if not pvc_id: + LOG.info(_("No PowerVC network for local network %s"), net_id) + return + pvc_net = self.pvc.get_network(pvc_id) + if not pvc_net: + LOG.warning(_("Unable to retrieve PowerVC network %s. " + "Network may have been deleted."), pvc_id) + return + if not utils.equal_networks(pvc_net, network): + self.pvc.update_network(pvc_net, network) + update_data = utils.gen_network_update_data(network) + self.db.set_network_update_data(db_net, update_data) + else: + LOG.info(_("Network changes do not need to be updated")) + + def _handle_pvc_network_update(self, network): + net_id = network.get('id') + db_net = self.db.get_network(pvc_id=net_id) + if not db_net: + LOG.info(_("DB entry for PowerVC network %s does not exist"), + net_id) + return + local_id = db_net.get('local_id') + if not local_id: + LOG.info(_("No local network for PowerVC network %s"), net_id) + return + local_net = self.local.get_network(local_id) + if not local_net: + LOG.warning(_("Unable to retrieve local network %s. " + "Network may have been deleted."), local_id) + return + if not utils.equal_networks(local_net, network): + self.local.update_network(local_net, network) + update_data = utils.gen_network_update_data(network) + self.db.set_network_update_data(db_net, update_data) + else: + LOG.info(_("Network changes do not need to be updated")) + +#============================================================================== +# Handle network delete +#============================================================================== + + def _handle_local_network_delete(self, net_id): + db_net = self.db.get_network(local_id=net_id) + if not db_net: + LOG.info(_("DB entry for local network %s does not exist"), net_id) + return + pvc_id = db_net.get('pvc_id') + self.db.set_network_local_id(db_net, None) + if pvc_id: + port_list = self.pvc.get_ports_on_network(pvc_id) + if len(port_list) > 0: + LOG.info(_("Ports still defined on PowerVC network %s"), + pvc_id) + return + self.pvc.delete_network(pvc_id) + network = self.pvc.get_network(pvc_id) + if network: + return + self.db.delete_network(db_net) + + def _handle_pvc_network_delete(self, net_id): + db_net = self.db.get_network(pvc_id=net_id) + if not db_net: + LOG.info(_("DB entry for PowerVC network %s does not exist"), + net_id) + return + local_id = db_net.get('local_id') + self.db.set_network_pvc_id(db_net, None) + if local_id: + port_list = self.local.get_ports_on_network(local_id) + if len(port_list) > 0: + LOG.info(_("Ports still defined on local network %s"), + local_id) + return + self.local.delete_network(local_id) + network = self.local.get_network(local_id) + if network: + return + self.db.delete_network(db_net) + +#============================================================================== +# Handle subnet create +#============================================================================== + + def _handle_local_subnet_create(self, subnet): + local_id = subnet.get('id') + db_sub = self.db.get_subnet(local_id=local_id) + if db_sub: + LOG.info(_("DB entry for local subnet %s already exists"), + local_id) + return + net_id = subnet.get('network_id') + db_net = self.db.get_network(local_id=net_id) + if not db_net: + # No database entry for the network. This may be the first subnet + # created on the network -or- the network may be not "mappable". + # Retrieve the network and pass it into the handler routine if + # it is valid. + network = self.local.get_network(net_id) + if network and utils.is_network_mappable(network): + self._handle_local_network_create(network) + db_net = self.db.get_network(local_id=net_id) + if not db_net: + LOG.info(_("Unable to find DB entry for local network %s"), + net_id) + return + if db_net.get('status') == constants.STATUS_DELETING: + LOG.info(_("Network %s is currently being deleted"), net_id) + return + sync_key = utils.gen_subnet_sync_key(subnet, db_net) + db_sub = self.db.get_subnet(sync_key=sync_key) + if db_sub: + self.db.set_subnet_local_id(db_sub, local_id) + else: + db_sub = self.db.create_subnet(subnet, sync_key, local_id=local_id) + new_sub = self.pvc.create_subnet(subnet) + if new_sub: + self.db.set_subnet_pvc_id(db_sub, new_sub.get('id')) + + def _handle_pvc_subnet_create(self, subnet): + pvc_id = subnet.get('id') + db_sub = self.db.get_subnet(pvc_id=pvc_id) + if db_sub: + LOG.info(_("DB entry for PowerVC subnet %s already exists"), + pvc_id) + return + net_id = subnet.get('network_id') + db_net = self.db.get_network(pvc_id=net_id) + if not db_net: + # No database entry for the network. This may be the first subnet + # created on the network -or- the network may be not "mappable". + # Retrieve the network and pass it into the handler routine if + # it is valid. + pvc_net = self.pvc.get_network(net_id) + if pvc_net and utils.is_network_mappable(pvc_net): + self._handle_pvc_network_create(pvc_net) + # Database entry for the network should exist now + db_net = self.db.get_network(pvc_id=net_id) + if not db_net: + LOG.info(_("Unable to find DB entry for PowerVC network %s"), + net_id) + return + if db_net.get('status') == constants.STATUS_DELETING: + LOG.info(_("Network %s is currently being deleted"), net_id) + return + sync_key = utils.gen_subnet_sync_key(subnet, db_net) + db_sub = self.db.get_subnet(sync_key=sync_key) + if db_sub: + self.db.set_subnet_pvc_id(db_sub, pvc_id) + else: + db_sub = self.db.create_subnet(subnet, sync_key, pvc_id=pvc_id) + new_sub = self.local.create_subnet(subnet) + if new_sub: + self.db.set_subnet_local_id(db_sub, new_sub.get('id')) + +#============================================================================== +# Handle subnet update +#============================================================================== + + def _handle_local_subnet_update(self, subnet): + local_id = subnet.get('id') + db_sub = self.db.get_subnet(local_id=local_id) + if not db_sub: + LOG.info(_("DB entry for local subnet %s does not exist"), + local_id) + return + pvc_id = db_sub.get('pvc_id') + if not pvc_id: + LOG.info(_("No PowerVC subnet for local subnet %s"), local_id) + return + pvc_sub = self.pvc.get_subnet(pvc_id) + if not pvc_sub: + LOG.warning(_("Unable to retrieve PowerVC subnet %s. " + "Subnet may have been deleted."), pvc_id) + return + if not utils.equal_subnets(pvc_sub, subnet): + self.pvc.update_subnet(pvc_sub, subnet) + update_data = utils.gen_subnet_update_data(subnet) + self.db.set_subnet_update_data(db_sub, update_data) + else: + LOG.info(_("Subnet changes do not need to be updated")) + + def _handle_pvc_subnet_update(self, subnet): + pvc_id = subnet.get('id') + db_sub = self.db.get_subnet(pvc_id=pvc_id) + if not db_sub: + LOG.info(_("DB entry for PowerVC subnet %s does not exist"), + pvc_id) + return + local_id = db_sub.get('local_id') + if not local_id: + LOG.info(_("No local subnet for PowerVC subnet %s"), pvc_id) + return + local_sub = self.local.get_subnet(local_id) + if not local_sub: + LOG.warning(_("Unable to retrieve local subnet %s. " + "Subnet may have been deleted."), local_id) + return + if not utils.equal_subnets(local_sub, subnet): + self.local.update_subnet(local_sub, subnet) + update_data = utils.gen_subnet_update_data(subnet) + self.db.set_subnet_update_data(db_sub, update_data) + else: + LOG.info(_("Subnet changes do not need to be updated")) + +#============================================================================== +# Handle subnet delete +#============================================================================== + + def _handle_local_subnet_delete(self, sub_id): + db_sub = self.db.get_subnet(local_id=sub_id) + if not db_sub: + LOG.info(_("DB entry for local subnet %s does not exist"), sub_id) + return + pvc_id = db_sub.get('pvc_id') + self.db.set_subnet_local_id(db_sub, None) + if not pvc_id: + # Other half of database object has already been cleaned up + return + subnet = self.pvc.get_subnet(pvc_id) + if not subnet: + LOG.warning(_("Unable to retrieve PowerVC subnet %s. " + "Subnet may have been deleted."), pvc_id) + self.db.delete_subnet(db_sub) + return + net_id = subnet.get('network_id') + port_list = self.pvc.get_ports_on_subnet(net_id, pvc_id) + if len(port_list) > 0: + LOG.info(_("Ports still defined on PowerVC subnet %s"), pvc_id) + return + self.pvc.delete_subnet(pvc_id) + subnet = self.pvc.get_subnet(pvc_id) + if subnet: + return + self.db.delete_subnet(db_sub) + + def _handle_pvc_subnet_delete(self, sub_id): + db_sub = self.db.get_subnet(pvc_id=sub_id) + if not db_sub: + LOG.info(_("DB entry for PowerVC subnet %s does not exist"), + sub_id) + return + local_id = db_sub.get('local_id') + self.db.set_subnet_pvc_id(db_sub, None) + if not local_id: + # Other half of database object has already been cleaned up + return + subnet = self.local.get_subnet(local_id) + if not subnet: + LOG.warning(_("Unable to retrieve local subnet %s. " + "Subnet may have been deleted."), local_id) + self.db.delete_subnet(db_sub) + return + net_id = subnet.get('network_id') + port_list = self.local.get_ports_on_subnet(net_id, local_id) + + if len(port_list) > 0: + if (self._ports_valid(port_list)): + LOG.info(_("Ports still defined on local subnet %s"), local_id) + return + # no local ports left, delete the subnet + self.local.delete_subnet(local_id) + subnet = self.local.get_subnet(local_id) + if subnet: + return + self.db.delete_subnet(db_sub) + + def _ports_valid(self, port_list): + """ + Check if these ports are still valid + :returns: True, if any of the ports is still valid; + False if none of them is valid. + """ + # handle case: + # local port is created and pvc port is not created; + # local port status will be 'Creating', delete such port. + deleted = 0 + for local_port in port_list: + local_port_id = local_port.get('id') + db_port = self.db.get_port(local_id=local_port_id) + if db_port and db_port.get('status') == constants.STATUS_CREATING: + # delete this local port + self.local.delete_port(local_port_id) + # if it is really deleted + local_port = self.local.get_port(local_port_id) + if not local_port: + self.db.delete_port(db_port) + deleted += 1 + # still some ports left there + if (deleted != len(port_list)): + return True + # No port left + return False + +#============================================================================== +# Handle port create +#============================================================================== + + def _handle_local_port_create(self, port): + local_id = port.get('id') + db_port = self.db.get_port(local_id=local_id) + if db_port: + LOG.info(_("DB entry for local port %s already exists"), local_id) + return + net_id = port.get('network_id') + db_net = self.db.get_network(local_id=net_id) + if not db_net: + LOG.info(_("Unable to find DB entry for local network %s"), net_id) + return + if db_net.get('status') == constants.STATUS_DELETING: + LOG.info(_("Network %s is currently being deleted"), net_id) + return + valid_subnet = False + subnet_ids = utils.extract_subnets_from_port(port) + for local_sub_id in subnet_ids: + db_sub = self.db.get_subnet(local_id=local_sub_id) + if db_sub: + valid_subnet = True + break + if not valid_subnet: + LOG.info(_("Unable to map local port %s. The subnet %s " + "is not mapped."), local_id, subnet_ids) + return + sync_key = utils.gen_port_sync_key(port, db_net) + db_port = self.db.get_port(sync_key=sync_key) + if db_port: + self.db.set_port_local_id(db_port, local_id) + return + # Create the database entry for this new port + db_port = self.db.create_port(port, sync_key, local_id=local_id) + # Determine which instance owns this port + device_id = port.get('device_id') + # Determine if the instance is (HyperV / KVM) or PowerVC + # if PowerVC, return. + # If HyperV/KVM, reserve IP address in PowerVC + if not self.local.is_instance_on_power(device_id): + new_port = self.pvc.create_port(port) + if new_port: + self.db.set_port_pvc_id(db_port, new_port.get('id')) + + def _handle_pvc_port_create(self, port): + pvc_id = port.get('id') + db_port = self.db.get_port(pvc_id=pvc_id) + if db_port: + LOG.info(_("DB entry for PowerVC port %s already exists"), pvc_id) + return + net_id = port.get('network_id') + db_net = self.db.get_network(pvc_id=net_id) + if not db_net: + LOG.info(_("Unable to find DB entry for PowerVC network %s"), + net_id) + return + if db_net.get('status') == constants.STATUS_DELETING: + LOG.info(_("Network %s is currently being deleted"), net_id) + return + valid_subnet = False + subnet_ids = utils.extract_subnets_from_port(port) + for pvc_sub_id in subnet_ids: + db_sub = self.db.get_subnet(pvc_id=pvc_sub_id) + if db_sub: + valid_subnet = True + break + if not valid_subnet: + LOG.info(_("Unable to map PowerVC port %s. The subnet %s " + "is not mapped."), pvc_id, subnet_ids) + return + sync_key = utils.gen_port_sync_key(port, db_net) + db_port = self.db.get_port(sync_key=sync_key) + if db_port: + self.db.set_port_pvc_id(db_port, pvc_id) + return + db_port = self.db.create_port(port, sync_key, pvc_id=pvc_id) + new_port = self.local.create_port(port) + if new_port: + self.db.set_port_local_id(db_port, new_port.get('id')) + +#============================================================================== +# Handle port update +#============================================================================== + + def _handle_local_port_update(self, port): + local_id = port.get('id') + db_port = self.db.get_port(local_id=local_id) + if not db_port: + LOG.info(_("DB entry for local port %s does not exist"), local_id) + return + pvc_id = db_port.get('pvc_id') + if not pvc_id: + LOG.info(_("No PowerVC port for local port %s"), local_id) + return + pvc_port = self.pvc.get_port(pvc_id) + if not pvc_port: + LOG.warning(_("Unable to retrieve PowerVC port %s. " + "Port may have been deleted."), pvc_id) + return + if not utils.equal_ports(pvc_port, port): + self.pvc.update_port(pvc_port, port) + update_data = utils.gen_port_update_data(port) + self.db.set_port_update_data(db_port, update_data) + else: + LOG.info(_("Port changes do not need to be updated")) + + def _handle_pvc_port_update(self, port): + pvc_id = port.get('id') + db_port = self.db.get_port(pvc_id=pvc_id) + if not db_port: + LOG.info(_("DB entry for PowerVC port %s does not exist"), pvc_id) + return + local_id = db_port.get('local_id') + if not local_id: + LOG.info(_("No local port for PowerVC port %s"), pvc_id) + return + local_port = self.local.get_port(local_id) + if not local_port: + LOG.warning(_("Unable to retrieve local port %s. " + "Port may have been deleted."), local_id) + return + if not utils.equal_ports(local_port, port): + self.local.update_port(local_port, port) + update_data = utils.gen_port_update_data(port) + self.db.set_port_update_data(db_port, update_data) + else: + LOG.info(_("Port changes do not need to be updated")) + +#============================================================================== +# Handle port delete +#============================================================================== + + def _handle_local_port_delete(self, port_id): + db_port = self.db.get_port(local_id=port_id) + if not db_port: + LOG.info(_("DB entry for local port %s does not exist"), port_id) + return + pvc_id = db_port.get('pvc_id') + self.db.set_port_local_id(db_port, None) + if not pvc_id: + # Other half of database object has already been cleaned up + return + pvc_port = self.pvc.get_port(pvc_id) + if not pvc_port: + LOG.warning(_("Unable to retrieve PowerVC port %s. " + "Port may have been deleted."), pvc_id) + self.db.delete_port(db_port) + return + device_id = pvc_port.get('device_id') + if device_id and len(device_id) > 0: + LOG.info(_("PowerVC port %s can not be deleted. Port is in-use " + "by VM %s."), pvc_id, device_id) + LOG.info(_("Recreate the local port to prevent this IP " + "address from being used by another instance.")) + new_port = self.local.create_port(pvc_port) + if new_port: + # Update the database entry with new port uuid + self.db.set_port_local_id(db_port, new_port.get('id')) + return + self.pvc.delete_port(pvc_id) + pvc_port = self.pvc.get_port(pvc_id) + if pvc_port: + return + self.db.delete_port(db_port) + + def _handle_pvc_port_delete(self, port_id): + db_port = self.db.get_port(pvc_id=port_id) + if not db_port: + LOG.info(_("DB entry for PowerVC port %s does not exist"), port_id) + return + local_id = db_port.get('local_id') + self.db.set_port_pvc_id(db_port, None) + if not local_id: + # Other half of database object has already been cleaned up + return + local_port = self.local.get_port(local_id) + if not local_port: + LOG.warning(_("Unable to retrieve local port %s. " + "Port may have been deleted."), local_id) + self.db.delete_port(db_port) + return + self._delete_local_port(local_port, db_port) + + def _delete_local_port(self, local_port, db_port): + # complex logic here on how to handle it + # some possible cases for this local port: + # 1) device_id = None occurs when lock IP address done using SCE UI. + # Delete the local port + # 2) device_owner = network:router_interface (see issue 173350). + # re-create the PVC port + # 3) device_id = instance that no longer exists. + # Delete the local port + # 4) device_id = HyperV/KVM instance. + # Re-create the PVC port + # 5) device_id = PowerVC deployed instance. + # Delete the local port + # + local_id = db_port.get('local_id') + # case 2 + device_owner = local_port.get('device_owner') + if device_owner and (device_owner == "network:router_interface"): + LOG.info(_("Local port %s can not be deleted. Port is in-use " + "by device_owner %s."), local_id, device_owner) + new_port = self.pvc.create_port(local_port) + if new_port: + self.db.set_port_pvc_id(db_port, new_port.get('id')) + return + + device_id = local_port.get('device_id') + if device_id and self.local.is_instance_valid(device_id): + if not self.local.is_instance_on_power(device_id): + # case 4) + LOG.info(_("Local port %s can not be deleted. Port is in-use " + "by VM %s."), local_id, device_id) + new_port = self.pvc.create_port(local_port) + if new_port: + self.db.set_port_pvc_id(db_port, new_port.get('id')) + return + # for case 1) 3) 5) + self.local.delete_port(local_id) + local_port = self.local.get_port(local_id) + if local_port: + return + self.db.delete_port(db_port) + +#============================================================================== +# Register handlers routines +#============================================================================== + + def _register_handler(self, event_os, event_type, handler): + key = event_type + if event_os: + key = event_os + ':' + event_type + self.handlers[key] = handler + +#============================================================================== +# Handle event +#============================================================================== + + def _handle_event(self, event): + event_os = event.get(constants.EVENT_OS) + event_type = event.get(constants.EVENT_TYPE) + event_obj = event.get(constants.EVENT_OBJECT) + if event_type == constants.EVENT_END_THREAD: + return + elif event_type == constants.EVENT_FULL_SYNC: + self._synchronize(event_os) + return + key = event_type + if event_os: + key = event_os + ':' + event_type + handler = self.handlers.get(key) + if not handler: + LOG.error(_("No handler found for: %s"), key) + return + return handler(event_obj) + +#============================================================================== +# Queue event for procesing by the daemon loop +#============================================================================== + + def queue_event(self, event_os, event_type, event_obj): + event = {} + event[constants.EVENT_OS] = event_os + event[constants.EVENT_TYPE] = event_type + event[constants.EVENT_OBJECT] = event_obj + self.event_q.put(event) + +#============================================================================== +# Setup RPC routine +#============================================================================== + + def _setup_rpc(self): + """ + set up RPC support + """ + self.topic = PVC_TOPIC + self.conn = rpc.create_connection(new=True) + self.callbacks = powervc_rpc.PVCRpcCallbacks(self) + self.dispatcher = self.callbacks.create_rpc_dispatcher() + self.conn.create_consumer(self.topic, self.dispatcher, fanout=False) + self.conn.consume_in_thread() + LOG.info(_("RPC listener created")) + +#============================================================================== +# Synchronize all Neutron objects +#============================================================================== + + def _synchronize(self, default_target=LOCAL_OS): + """Main synchronize routine""" + start = time.time() + LOG.info(_("Synchronizing all networks/subnets/ports...")) + self._synchronize_networks(default_target) + self._synchronize_subnets(default_target) + self._synchronize_ports(default_target) + db_stats = self._generate_db_stats() + end = time.time() + elapsed = '{0:.4} seconds'.format(end - start) + LOG.info(_("Full sync elapsed time: %s %s"), elapsed, db_stats) + self.retry_sync = time.time() + self.polling_interval + +#============================================================================== +# Synchronize networks +#============================================================================== + + def _synchronize_networks(self, target=LOCAL_OS): + pvc_nets = self.pvc.get_networks() + local_nets = self.local.get_networks() + self._sync_deleted_nets(pvc_nets, local_nets) + self._sync_new_pvc_nets(pvc_nets) + self._sync_new_local_nets(local_nets) + self._sync_updated_nets(pvc_nets, local_nets, target) + self._sync_deleting_nets() + self._sync_creating_nets(pvc_nets, local_nets) + + def _sync_deleted_nets(self, pvc_nets, local_nets): + db_networks = self.db.get_networks() + for db_net in db_networks: + pvc_id, local_id = utils.extract_ids_from_entry(db_net) + if pvc_id and pvc_id not in pvc_nets.keys(): + self.db.set_network_pvc_id(db_net, None) + if local_id and local_id not in local_nets.keys(): + self.db.set_network_local_id(db_net, None) + + def _sync_new_pvc_nets(self, pvc_nets): + for pvc_net in pvc_nets.values(): + pvc_id = pvc_net.get('id') + db_net = self.db.get_network(pvc_id=pvc_id) + if db_net: + # DB entry for this PVC network already exists + continue + # Verify that the PVC network has a subnet. A network without + # a subnet is considered a DHCP network by PowerVC. We do not + # support DHCP networks + if not utils.network_has_subnet(pvc_net): + LOG.info(_("PowerVC network has no subnets: %s"), + pvc_net.get('name')) + continue + sync_key = utils.gen_network_sync_key(pvc_net) + db_net = self.db.get_network(sync_key=sync_key) + if db_net: + self.db.set_network_pvc_id(db_net, pvc_id) + else: + # Check if the pvc network is allowed to sync. + if utils.is_network_in_white_list(pvc_net): + self.db.create_network(pvc_net, sync_key, pvc_id=pvc_id) + else: + LOG.info(_("PowerVC network is not allowed: %s"), + pvc_net.get('name')) + + def _sync_new_local_nets(self, local_nets): + for local_net in local_nets.values(): + local_id = local_net.get('id') + db_net = self.db.get_network(local_id=local_id) + if db_net: + # DB entry for this local network already exists + continue + #if local network has no subnet, not handle it. + if not utils.network_has_subnet(local_net): + LOG.info(_("Local network %s has no subnet"), + local_net.get('name')) + continue + #if local network has subnet, verify if the subnet is mappable + if not utils.network_has_mappable_subnet(self.local, + local_net): + LOG.info(_("Local network %s has no mappable subnet"), + local_net.get('name')) + continue + sync_key = utils.gen_network_sync_key(local_net) + db_net = self.db.get_network(sync_key=sync_key) + if db_net: + self.db.set_network_local_id(db_net, local_id) + else: + self.db.create_network(local_net, sync_key, local_id=local_id) + + def _sync_updated_nets(self, pvc_nets, local_nets, target): + db_active_list = self.db.get_networks(constants.STATUS_ACTIVE) + for db_net in db_active_list: + pvc_id, local_id = utils.extract_ids_from_entry(db_net) + pvc_net = pvc_nets.get(pvc_id) + local_net = local_nets.get(local_id) + result = utils.compare_networks(local_net, pvc_net, db_net, target) + if result: + if result == LOCAL_OS: + self.local.update_network(local_net, pvc_net) + update_data = utils.gen_network_update_data(pvc_net) + else: + self.pvc.update_network(pvc_net, local_net) + update_data = utils.gen_network_update_data(local_net) + self.db.set_network_update_data(db_net, update_data) + + def _sync_deleting_nets(self): + db_delete_list = self.db.get_networks(constants.STATUS_DELETING) + for db_net in db_delete_list: + pvc_id, local_id = utils.extract_ids_from_entry(db_net) + if pvc_id and local_id: + self.db.fix_incorrect_state(db_net) + continue + if pvc_id: + pvc_ports = self.pvc.get_ports_on_network(pvc_id) + if len(pvc_ports) > 0: + LOG.info(_("Ports are still defined on PowerVC network " + "%s. Network can not be deleted."), pvc_id) + continue + self.pvc.delete_network(pvc_id) + pvc_net = self.pvc.get_network(pvc_id) + if pvc_net: + continue + if local_id: + local_ports = self.local.get_ports_on_network(local_id) + if len(local_ports) > 0: + LOG.info(_("Ports are still defined on local network " + "%s. Network can not be deleted."), local_id) + continue + self.local.delete_network(local_id) + local_net = self.local.get_network(local_id) + if local_net: + continue + self.db.delete_network(db_net) + + def _sync_creating_nets(self, pvc_nets, local_nets): + db_create_list = self.db.get_networks(constants.STATUS_CREATING) + for db_net in db_create_list: + pvc_id, local_id = utils.extract_ids_from_entry(db_net) + if pvc_id: + pvc_net = pvc_nets.get(pvc_id) + local_net = self.local.create_network(pvc_net) + if local_net: + local_id = local_net.get('id') + self.db.set_network_local_id(db_net, local_id) + continue + if local_id: + local_net = local_nets.get(local_id) + pvc_net = self.pvc.create_network(local_net) + if pvc_net: + pvc_id = pvc_net.get('id') + self.db.set_network_pvc_id(db_net, pvc_id) + continue + +#============================================================================== +# Synchronize subnets +#============================================================================== + + def _synchronize_subnets(self, target=LOCAL_OS): + pvc_subnets = self.pvc.get_subnets() + local_subnets = self.local.get_subnets() + self._sync_deleted_subnets(pvc_subnets, local_subnets) + self._sync_new_pvc_subnets(pvc_subnets) + self._sync_new_local_subnets(local_subnets) + self._sync_updated_subnets(pvc_subnets, local_subnets, target) + self._sync_deleting_subnets(pvc_subnets, local_subnets) + self._sync_creating_subnets(pvc_subnets, local_subnets) + + def _sync_deleted_subnets(self, pvc_subnets, local_subnets): + db_subnets = self.db.get_subnets() + for db_sub in db_subnets: + pvc_id, local_id = utils.extract_ids_from_entry(db_sub) + if pvc_id and pvc_id not in pvc_subnets.keys(): + self.db.set_subnet_pvc_id(db_sub, None) + if local_id and local_id not in local_subnets.keys(): + self.db.set_subnet_local_id(db_sub, None) + + def _sync_new_pvc_subnets(self, pvc_subnets): + for pvc_sub in pvc_subnets.values(): + pvc_id = pvc_sub.get('id') + db_sub = self.db.get_subnet(pvc_id=pvc_id) + if db_sub: + # DB entry for this PVC subnet already exists + continue + pvc_net_id = pvc_sub.get('network_id') + db_net = self.db.get_network(pvc_id=pvc_net_id) + if not db_net: + # Subnet is associated with a network that is not mapped + continue + if db_net.get('status') == constants.STATUS_DELETING: + # Do not create new subnet if network is being deleted + continue + sync_key = utils.gen_subnet_sync_key(pvc_sub, db_net) + db_sub = self.db.get_subnet(sync_key=sync_key) + if db_sub: + self.db.set_subnet_pvc_id(db_sub, pvc_id) + else: + self.db.create_subnet(pvc_sub, sync_key, pvc_id=pvc_id) + + def _sync_new_local_subnets(self, local_subnets): + for local_sub in local_subnets.values(): + local_id = local_sub.get('id') + db_sub = self.db.get_subnet(local_id=local_id) + if db_sub: + # DB entry for this local subnet already exists + continue + local_net_id = local_sub.get('network_id') + db_net = self.db.get_network(local_id=local_net_id) + if not db_net: + # Subnet is associated with a network that is not mapped + continue + if db_net.get('status') == constants.STATUS_DELETING: + # Do not create new subnet if network is being deleted + continue + sync_key = utils.gen_subnet_sync_key(local_sub, db_net) + db_sub = self.db.get_subnet(sync_key=sync_key) + if db_sub: + self.db.set_subnet_local_id(db_sub, local_id) + else: + self.db.create_subnet(local_sub, sync_key, local_id=local_id) + + def _sync_updated_subnets(self, pvc_subnets, local_subnets, target): + db_active_list = self.db.get_subnets(constants.STATUS_ACTIVE) + for db_sub in db_active_list: + pvc_id, local_id = utils.extract_ids_from_entry(db_sub) + pvc_sub = pvc_subnets.get(pvc_id) + local_sub = local_subnets.get(local_id) + result = utils.compare_subnets(local_sub, pvc_sub, db_sub, target) + if result: + if result == LOCAL_OS: + self.local.update_subnet(local_sub, pvc_sub) + update_data = utils.gen_subnet_update_data(pvc_sub) + else: + self.pvc.update_subnet(pvc_sub, local_sub) + update_data = utils.gen_subnet_update_data(local_sub) + self.db.set_subnet_update_data(db_sub, update_data) + + def _sync_deleting_subnets(self, pvc_subnets, local_subnets): + db_delete_list = self.db.get_subnets(constants.STATUS_DELETING) + for db_sub in db_delete_list: + pvc_id, local_id = utils.extract_ids_from_entry(db_sub) + if pvc_id and local_id: + self.db.fix_incorrect_state(db_sub) + continue + if pvc_id: + pvc_sub = pvc_subnets.get(pvc_id) + pvc_net_id = pvc_sub.get('network_id') + pvc_ports = self.pvc.get_ports_on_subnet(pvc_net_id, + pvc_id) + if len(pvc_ports) > 0: + LOG.info(_("Ports are still defined on PowerVC subnet " + "%s. Subnet can not be deleted."), pvc_id) + continue + self.pvc.delete_subnet(pvc_id) + pvc_sub = self.pvc.get_subnet(pvc_id) + if pvc_sub: + continue + if local_id: + local_sub = local_subnets.get(local_id) + local_net_id = local_sub.get('network_id') + local_ports = self.local.get_ports_on_subnet(local_net_id, + local_id) + if len(local_ports) > 0: + if (self._ports_valid(local_ports)): + LOG.info(_("Ports are still defined on local OS" + " subnet %s. Subnet can not be deleted."), + local_id) + continue + self.local.delete_subnet(local_id) + local_sub = self.local.get_subnet(local_id) + if local_sub: + continue + self.db.delete_subnet(db_sub) + + def _sync_creating_subnets(self, pvc_subnets, local_subnets): + db_create_list = self.db.get_subnets(constants.STATUS_CREATING) + for db_sub in db_create_list: + pvc_id, local_id = utils.extract_ids_from_entry(db_sub) + if pvc_id: + pvc_sub = pvc_subnets.get(pvc_id) + local_sub = self.local.create_subnet(pvc_sub) + if local_sub: + local_id = local_sub.get('id') + self.db.set_subnet_local_id(db_sub, local_id) + continue + if local_id: + local_sub = local_subnets.get(local_id) + pvc_sub = self.pvc.create_subnet(local_sub) + if pvc_sub: + pvc_id = pvc_sub.get('id') + self.db.set_subnet_pvc_id(db_sub, pvc_id) + continue + +#============================================================================== +# Synchronize ports +#============================================================================== + + def _synchronize_ports(self, target=LOCAL_OS): + pvc_ports = self.pvc.get_ports() + local_ports = self.local.get_ports() + self._sync_deleted_ports(pvc_ports, local_ports) + self._sync_new_pvc_ports(pvc_ports) + self._sync_new_local_ports(local_ports) + self._sync_updated_ports(pvc_ports, local_ports, target) + self._sync_deleting_ports() + self._sync_creating_ports(pvc_ports, local_ports) + + def _sync_deleted_ports(self, pvc_ports, local_ports): + db_ports = self.db.get_ports() + for db_port in db_ports: + pvc_id, local_id = utils.extract_ids_from_entry(db_port) + if pvc_id and pvc_id not in pvc_ports.keys(): + self.db.set_port_pvc_id(db_port, None) + if local_id and local_id not in local_ports.keys(): + self.db.set_port_local_id(db_port, None) + + def _sync_new_pvc_ports(self, pvc_ports): + for pvc_port in pvc_ports.values(): + pvc_id = pvc_port.get('id') + db_port = self.db.get_port(pvc_id=pvc_id) + if db_port: + # DB entry for this PVC port already exists + continue + pvc_net_id = pvc_port.get('network_id') + db_net = self.db.get_network(pvc_id=pvc_net_id) + if not db_net: + # Port is associated with a network that is not mapped + continue + if db_net.get('status') == constants.STATUS_DELETING: + # Do not create new port if network is being deleted + continue + valid_subnet = False + subnet_ids = utils.extract_subnets_from_port(pvc_port) + for pvc_sub_id in subnet_ids: + db_sub = self.db.get_subnet(pvc_id=pvc_sub_id) + if db_sub: + valid_subnet = True + break + if not valid_subnet: + LOG.info(_("Unable to map PowerVC port %s. The subnet %s " + "is not mapped."), pvc_id, subnet_ids) + continue + sync_key = utils.gen_port_sync_key(pvc_port, db_net) + db_port = self.db.get_port(sync_key=sync_key) + if db_port: + self.db.set_port_pvc_id(db_port, pvc_id) + else: + self.db.create_port(pvc_port, sync_key, pvc_id=pvc_id) + + def _sync_new_local_ports(self, local_ports): + for local_port in local_ports.values(): + local_id = local_port.get('id') + db_port = self.db.get_port(local_id=local_id) + if db_port: + # DB entry for this local port already exists + continue + local_net_id = local_port.get('network_id') + db_net = self.db.get_network(local_id=local_net_id) + if not db_net: + # Port is associated with a network that is not mapped + continue + if db_net.get('status') == constants.STATUS_DELETING: + # Do not create new port if network is being deleted + continue + if not db_net.get('pvc_id'): + # The PowerVC network no longer exists + continue + valid_subnet = False + subnet_ids = utils.extract_subnets_from_port(local_port) + for local_sub_id in subnet_ids: + db_sub = self.db.get_subnet(local_id=local_sub_id) + if db_sub: + valid_subnet = True + break + if not valid_subnet: + LOG.info(_("Unable to map local port %s. The subnet %s " + "is not mapped."), local_id, subnet_ids) + continue + sync_key = utils.gen_port_sync_key(local_port, db_net) + db_port = self.db.get_port(sync_key=sync_key) + if db_port: + self.db.set_port_local_id(db_port, local_id) + else: + self.db.create_port(local_port, sync_key, local_id=local_id) + + def _sync_updated_ports(self, pvc_ports, local_ports, target): + db_active_list = self.db.get_ports(constants.STATUS_ACTIVE) + vm_map = None + for db_port in db_active_list: + pvc_id, local_id = utils.extract_ids_from_entry(db_port) + pvc_port = pvc_ports.get(pvc_id) + local_port = local_ports.get(local_id) + if not pvc_port or not local_port: + continue + # Fix up device id in local port (if necessary) + pvc_device = pvc_port.get('device_id') + local_device = local_port.get('device_id') + if (not local_device or len(local_device) == 0 or + local_device.startswith(constants.RSVD_PORT_PREFIX)): + if pvc_device and len(pvc_device) > 0: + if vm_map is None: + LOG.info(_("Retrieving PowerVC to local VM mappings")) + vm_map = self.local.get_power_vm_mapping() + if pvc_device in vm_map: + local_device_id = vm_map[pvc_device] + LOG.info(_("Update local port %s with device id %s"), + local_id, local_device_id) + self.local.set_port_device_id(local_port, + local_device_id) + else: + LOG.info(_("Unable to update local port %s. Local " + "instance for PowerVC %s can not be found"), + local_id, pvc_device) + # Do any of the other fields in the ports need to be updated + result = utils.compare_ports(local_port, pvc_port, db_port, target) + if result: + if result == LOCAL_OS: + self.local.update_port(local_port, pvc_port) + update_data = utils.gen_port_update_data(pvc_port) + else: + self.pvc.update_port(pvc_port, local_port) + update_data = utils.gen_port_update_data(local_port) + self.db.set_port_update_data(db_port, update_data) + + def _sync_deleting_ports(self): + db_delete_list = self.db.get_ports(constants.STATUS_DELETING) + for db_port in db_delete_list: + pvc_id, local_id = utils.extract_ids_from_entry(db_port) + if pvc_id and local_id: + self.db.fix_incorrect_state(db_port) + continue + if pvc_id: + pvc_port = self.pvc.get_port(pvc_id) + if not pvc_port: + self.db.delete_port(db_port) + continue + device_id = pvc_port.get('device_id') + if device_id and len(device_id) > 0: + LOG.info(_("PVC port %s can not be deleted. Port is " + "in-use by VM %s."), pvc_id, device_id) + LOG.info(_("Recreate the local port to prevent this IP " + "address from being used by another instance.")) + new_port = self.local.create_port(pvc_port) + if new_port: + # Update the database entry with new port uuid + self.db.set_port_local_id(db_port, new_port.get('id')) + continue + self.pvc.delete_port(pvc_id) + pvc_port = self.pvc.get_port(pvc_id) + if pvc_port: + continue + self.db.delete_port(db_port) + if local_id: + local_port = self.local.get_port(local_id) + if not local_port: + self.db.delete_port(db_port) + continue + self._delete_local_port(local_port, db_port) + continue + + def _sync_creating_ports(self, pvc_ports, local_ports): + db_create_list = self.db.get_ports(constants.STATUS_CREATING) + for db_port in db_create_list: + pvc_id, local_id = utils.extract_ids_from_entry(db_port) + if pvc_id: + pvc_port = pvc_ports.get(pvc_id) + local_port = self.local.create_port(pvc_port) + if local_port: + local_id = local_port.get('id') + self.db.set_port_local_id(db_port, local_id) + continue + if local_id: + local_port = local_ports.get(local_id) + # Determine which instance owns this port + device_id = local_port.get('device_id') + if not self.local.is_instance_on_power(device_id): + # Create a port on PVC if this is a local instance, + # so PVC won't use its IP address. + pvc_port = self.pvc.create_port(local_port) + if pvc_port: + pvc_id = pvc_port.get('id') + self.db.set_port_pvc_id(db_port, pvc_id) + continue + +#============================================================================== +# RPC methods +#============================================================================== + + def set_device_id_on_port_by_pvc_instance_uuid(self, + db_api, + device_id, + pvc_ins_uuid): + """ + Query the ports by pvc instance uuid, and set its + local instance id(device_id). + """ + local_ids = [] + pvc_ports = self.pvc.get_ports_by_instance_uuid(pvc_ins_uuid) + if pvc_ports and len(pvc_ports) > 0: + for pvc_port in pvc_ports: + pvc_id = pvc_port.get('id') + # Can't use self.db because of thread sync. issue, + # so passed in one from the caller. + db_port = db_api.get_port(pvc_id=pvc_id) + if not db_port: + LOG.debug(_("No db_port found: %s"), pvc_id) + continue + local_id = db_port.get('local_id') + if not local_id: + LOG.debug(_("No local_port_id found: %s"), pvc_id) + continue + local_port = self.local.get_port(local_id) + if not local_port: + LOG.debug(_("No local_port found: %s"), pvc_id) + continue + self.local.set_port_device_id(local_port, device_id) + local_ids.append(local_id) + LOG.debug(_("Set device_id for %s with %s"), pvc_id, device_id) + return local_ids + +#============================================================================== +# Main loop of the agent +#============================================================================== + + def _process_event_queue(self): + """Main loop for the agent""" + while not self.end_thread: + try: + # Perform a full synchronization of all neutron objects + self._synchronize() + except Exception as e: + LOG.exception(_("Error during synchronize: %s"), e) + # We don't want to kill the agent on a sync-error. Continue + # running and retry the operation when the polling interval + # wait_time time has elapsed. + self.retry_sync = time.time() + self.polling_interval + pass + + # Process events while waiting the polling interval + while (time.time() < self.retry_sync or not self.event_q.empty()): + event = None + try: + wait = self.retry_sync - time.time() + if wait <= 0: + wait = 1 + event = self.event_q.get(True, wait) + except Queue.Empty: + LOG.info(_("No events posted")) + pass + except Exception as e: + LOG.exception(_("Error while waiting for event: %s"), e) + return + if self.end_thread: + LOG.info(_("Event thread signaled to end")) + return + if event: + try: + self.event_q.task_done() + LOG.info(_("Event received: %s"), event) + self._handle_event(event) + except Exception as e: + LOG.exception(_("Error handling event: %s"), e) + # We don't want to kill the agent if an error occurs + # handling an event + pass + +#============================================================================== +# Main loop of the agent +#============================================================================== + + def daemon_loop(self): + # Start a thread here to process the event queue. If the event queue + # is called from the main thread, incoming RPC requests are delayed + # until the full sync is done. We could have dropped the event queue + # wait time and added a small sleep() to the Queue.Empty exception, + # but this would cause RPC events to be delayed until this occurs. + t = threading.Thread(target=self._process_event_queue) + t.setDaemon(True) + t.start() + + # While the worker thread is alive, sleep + while t.isAlive(): + try: + time.sleep(self.polling_interval) + except KeyboardInterrupt: + LOG.info(_("Waiting for worker thread to end")) + self.end_thread = True + event = {} + event[constants.EVENT_TYPE] = constants.EVENT_END_THREAD + self.event_q.put(event) + t.join(self.polling_interval) + LOG.info(_("Worker thread is dead. Exiting")) + + +#============================================================================== +# Main routine +#============================================================================== + +def main(): + try: + LOG.info(_("-" * 80)) + LOG.info(_("Agent initializing... ")) + agent = PowerVCNeutronAgent() + + # Start everything. + LOG.info(_("Agent running... ")) + agent.daemon_loop() + + except Exception as e: + LOG.exception(_("Exception occurred in agent: %s"), e) + + finally: + # Use hard exit here so that QPID threads will be killed + LOG.info(_("Agent exiting...")) + os._exit(os.EX_OK) diff --git a/neutron-powervc/powervc/neutron/api/__init__.py b/neutron-powervc/powervc/neutron/api/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/neutron-powervc/powervc/neutron/api/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/neutron-powervc/powervc/neutron/api/client_rpc.py b/neutron-powervc/powervc/neutron/api/client_rpc.py new file mode 100644 index 0000000..5cae3ef --- /dev/null +++ b/neutron-powervc/powervc/neutron/api/client_rpc.py @@ -0,0 +1,117 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +from oslo.config import cfg +from prettytable import PrettyTable + +from neutron.openstack.common.rpc import proxy +from neutron.openstack.common import log as logging + +from powervc.common.gettextutils import _ + +LOG = logging.getLogger(__name__) +LIST_COLUMNS = ['status', 'local_id', 'pvc_id', 'sync_key'] + + +#============================================================================== +# RPC client +#============================================================================== + +class RpcClient(proxy.RpcProxy): + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, context): + LOG.debug(_('__init__')) + self.topic = 'powervcrpc' + self.context = context + self.host = cfg.CONF.host + super(RpcClient, self).__init__( + topic=self.topic, default_version=self.BASE_RPC_API_VERSION) + + def _print_table(self, result): + if result and len(result) > 0: + pt = PrettyTable(LIST_COLUMNS) + for obj in result: + row = [] + for col in LIST_COLUMNS: + row.append(obj.get(col)) + pt.add_row(row) + print pt + + def _print_object(self, result): + if result: + pt = PrettyTable(['Field', 'Value']) + pt.align['Field'] = 'l' + pt.align['Value'] = 'l' + for field in result.keys(): + row = [field, result.get(field)] + pt.add_row(row) + print pt + else: + print 'Result from RPC call: ', result + + def get_local_network_uuid(self, network_id): + LOG.debug(_('get_local_network_uuid')) + result = self.call(self.context, + self.make_msg('get_local_network_uuid', + network_id=network_id), + topic=self.topic) + print 'Result from RPC call:', result + + def get_pvc_network_uuid(self, network_id): + LOG.debug(_('get_pvc_network_uuid')) + result = self.call(self.context, + self.make_msg('get_pvc_network_uuid', + network_id=network_id), + topic=self.topic) + print 'Result from RPC call:', result + + def get_network(self, opt): + LOG.debug(_('get_network: %s'), opt) + result = self.call(self.context, + self.make_msg('get_network', sync_key=opt), + topic=self.topic) + self._print_object(result) + + def get_networks(self): + LOG.debug(_('get_networks')) + result = self.call(self.context, + self.make_msg('get_networks'), + topic=self.topic) + self._print_table(result) + + def get_subnet(self, opt): + LOG.debug(_('get_subnet: %s'), opt) + result = self.call(self.context, + self.make_msg('get_subnet', sync_key=opt), + topic=self.topic) + self._print_object(result) + + def get_subnets(self): + LOG.debug(_('get_subnets')) + result = self.call(self.context, + self.make_msg('get_subnets'), + topic=self.topic) + self._print_table(result) + + def get_port(self, opt): + LOG.debug(_('get_port: %s'), opt) + result = self.call(self.context, + self.make_msg('get_port', sync_key=opt), + topic=self.topic) + self._print_object(result) + + def get_ports(self): + LOG.debug(_('get_ports')) + result = self.call(self.context, + self.make_msg('get_ports'), + topic=self.topic) + self._print_table(result) diff --git a/neutron-powervc/powervc/neutron/api/powervc_rpc.py b/neutron-powervc/powervc/neutron/api/powervc_rpc.py new file mode 100644 index 0000000..4e0cc3c --- /dev/null +++ b/neutron-powervc/powervc/neutron/api/powervc_rpc.py @@ -0,0 +1,115 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +from neutron.openstack.common import log as logging +from neutron.openstack.common.rpc import dispatcher + +from powervc.common.constants import LOCAL_OS +from powervc.common.constants import POWERVC_OS +from powervc.common.gettextutils import _ +from powervc.neutron.common import utils +from powervc.neutron.db import powervc_db_v2 + +LOG = logging.getLogger(__name__) + + +#============================================================================== +# RPC callback +#============================================================================== + +class PVCRpcCallbacks(object): + """ + RPC callbacks for nova driver calling this agent. + MUST set topic=powervc at both sides. + """ + + # Set RPC API version to 1.0 by default. + RPC_API_VERSION = '1.0' + + def __init__(self, neutron_agent): + + super(PVCRpcCallbacks, self).__init__() + self.agent = neutron_agent + self.db = powervc_db_v2.PowerVCAgentDB() + + def create_rpc_dispatcher(self): + return dispatcher.RpcDispatcher([self]) + + def get_local_network_uuid(self, context, network_id): + LOG.info(_("Neutron Agent RPC: get_local_network_uuid:")) + LOG.info(_("- pvc_net_id: %s"), network_id) + local_net_id = utils.translate_net_id(self.db, network_id, LOCAL_OS) + LOG.info(_("- local_net_id: %s"), local_net_id) + return local_net_id + + def get_pvc_network_uuid(self, context, network_id): + LOG.info(_("Neutron Agent RPC: get_pvc_network_uuid:")) + LOG.info(_("- local_net_id: %s"), network_id) + pvc_net_id = utils.translate_net_id(self.db, network_id, POWERVC_OS) + LOG.info(_("- pvc_net_id: %s"), pvc_net_id) + return pvc_net_id + + def get_network(self, context, sync_key): + LOG.info(_("Neutron Agent RPC: get_network:")) + LOG.info(_("- sync_key: %s"), sync_key) + net = self.db.get_network(sync_key=sync_key) + LOG.info(_("- net: %s"), net) + return net + + def get_networks(self, context): + LOG.info(_("Neutron Agent RPC: get_networks:")) + nets = self.db.get_networks() + LOG.info(_("- nets: %s"), nets) + return nets + + def get_subnet(self, context, sync_key): + LOG.info(_("Neutron Agent RPC: get_subnet:")) + LOG.info(_("- sync_key: %s"), sync_key) + subnet = self.db.get_subnet(sync_key=sync_key) + LOG.info(_("- subnet: %s"), subnet) + return subnet + + def get_subnets(self, context): + LOG.info(_("Neutron Agent RPC: get_subnets:")) + subnets = self.db.get_subnets() + LOG.info(_("- subnets: %s"), subnets) + return subnets + + def get_port(self, context, sync_key): + LOG.info(_("Neutron Agent RPC: get_port:")) + LOG.info(_("- sync_key: %s"), sync_key) + port = self.db.get_port(sync_key=sync_key) + LOG.info(_("- port: %s"), port) + return port + + def get_ports(self, context): + LOG.info(_("Neutron Agent RPC: get_ports:")) + ports = self.db.get_ports() + LOG.info(_("- ports: %s"), ports) + return ports + + def set_device_id_on_port_by_pvc_instance_uuid(self, + context, + device_id, + pvc_ins_uuid): + """ + Query the ports by pvc instance uuid, and set its + local instance id(device_id). + """ + LOG.info(_("Neutron Agent RPC: " + "set_device_id_on_port_by_pvc_instance_uuid:")) + LOG.info(_("- device_id: %s"), device_id) + LOG.info(_("- pvc_ins_uuid: %s"), pvc_ins_uuid) + local_ids = self.agent.\ + set_device_id_on_port_by_pvc_instance_uuid(self.db, + device_id, + pvc_ins_uuid) + LOG.info(_("- local_ids: %s"), local_ids) + return local_ids diff --git a/neutron-powervc/powervc/neutron/client/__init__.py b/neutron-powervc/powervc/neutron/client/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/neutron-powervc/powervc/neutron/client/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/neutron-powervc/powervc/neutron/client/local_os_bindings.py b/neutron-powervc/powervc/neutron/client/local_os_bindings.py new file mode 100644 index 0000000..495e805 --- /dev/null +++ b/neutron-powervc/powervc/neutron/client/local_os_bindings.py @@ -0,0 +1,253 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" +Module to contain all of the local OS routines +""" + +''' +Created on Aug 1, 2013 + +@author: John Kasperski +''' + +from neutron.openstack.common import log as logging + +from powervc.common import messaging +from powervc.common.client import factory +from powervc.common.constants import SERVICE_TYPES +from powervc.common.constants import LOCAL_OS +from powervc.common.gettextutils import _ +from powervc.neutron.client import neutron_client_bindings +from powervc.neutron.common import constants +from powervc.neutron.common import utils +from powervc.neutron.db import powervc_db_v2 + +LOG = logging.getLogger(__name__) + + +class Client(neutron_client_bindings.Client): + """Local OS access methods""" + + def __init__(self, client, agent): + if not client: + return + self.os = LOCAL_OS + self.db = powervc_db_v2.PowerVCAgentDB() + self.agent = agent + super(Client, self).__init__(client, self.os) + self._create_amqp_listeners() + # A cache to save image uuids on power. + self.power_image_cache = [] + # Save nova/glance client + self.nova = None + self.glance = None + + def _create_amqp_listeners(self): + """Listen for AMQP messages from the local OS""" + LOG.debug(_('Creating AMQP listeners')) + + def reconnect(): + LOG.info(_('Re-established connection to local OS Qpid broker')) + self.agent.queue_event(self.os, constants.EVENT_FULL_SYNC, None) + + connection = messaging.LocalConnection(log=logging, + reconnect_handler=reconnect) + listener = connection.create_listener(constants.QPID_EXCHANGE, + constants.QPID_TOPIC) + listener.register_handler(constants.EVENT_NETWORK_CREATE, + self._handle_network_create) + listener.register_handler(constants.EVENT_NETWORK_UPDATE, + self._handle_network_update) + listener.register_handler(constants.EVENT_NETWORK_DELETE, + self._handle_network_delete) + listener.register_handler(constants.EVENT_SUBNET_CREATE, + self._handle_subnet_create) + listener.register_handler(constants.EVENT_SUBNET_UPDATE, + self._handle_subnet_update) + listener.register_handler(constants.EVENT_SUBNET_DELETE, + self._handle_subnet_delete) + listener.register_handler(constants.EVENT_PORT_CREATE, + self._handle_port_create) + listener.register_handler(constants.EVENT_PORT_UPDATE, + self._handle_port_update) + listener.register_handler(constants.EVENT_PORT_DELETE, + self._handle_port_delete) + connection.start() + + def _handle_network_create(self, context, message): + event, payload = self._extact_event_payload(message) + network = payload.get('network') + network_id = network.get('id') + if not utils.is_network_mappable(network): + LOG.info(_("Network %s is not mappable"), network_id) + return + db_net = self.db.get_network(local_id=network_id) + if db_net: + LOG.info(_("DB entry for network %s already exists"), network_id) + return + self.agent.queue_event(self.os, event, network) + + def _handle_network_update(self, context, message): + event, payload = self._extact_event_payload(message) + network = payload.get('network') + self.agent.queue_event(self.os, event, network) + + def _handle_network_delete(self, context, message): + event, payload = self._extact_event_payload(message) + network_id = payload.get('network_id') + self.agent.queue_event(self.os, event, network_id) + + def _handle_subnet_create(self, context, message): + event, payload = self._extact_event_payload(message) + subnet = payload.get('subnet') + subnet_id = subnet.get('id') + if not utils.is_subnet_mappable(subnet): + LOG.info(_("Subnet %s is not mappable"), subnet_id) + return + db_sub = self.db.get_subnet(local_id=subnet_id) + if db_sub: + LOG.info(_("DB entry for subnet %s already exists"), subnet_id) + return + self.agent.queue_event(self.os, event, subnet) + + def _handle_subnet_update(self, context, message): + event, payload = self._extact_event_payload(message) + subnet = payload.get('subnet') + self.agent.queue_event(self.os, event, subnet) + + def _handle_subnet_delete(self, context, message): + event, payload = self._extact_event_payload(message) + subnet_id = payload.get('subnet_id') + self.agent.queue_event(self.os, event, subnet_id) + + def _handle_port_create(self, context, message): + event, payload = self._extact_event_payload(message) + port = payload.get('port') + port_id = port.get('id') + if not utils.is_port_mappable(port): + LOG.info(_("Port %s is not mappable"), port_id) + return + db_port = self.db.get_port(local_id=port_id) + if db_port: + LOG.info(_("DB entry for port %s already exists"), port_id) + return + self.agent.queue_event(self.os, event, port) + + def _handle_port_update(self, context, message): + event, payload = self._extact_event_payload(message) + port = payload.get('port') + self.agent.queue_event(self.os, event, port) + + def _handle_port_delete(self, context, message): + event, payload = self._extact_event_payload(message) + port_id = payload.get('port_id') + self.agent.queue_event(self.os, event, port_id) + +#============================================================================== +# Local OS - Utility routines using other clients (Nova, Glance) +#============================================================================== + + def get_power_vm_mapping(self): + """ + Return dict with PowerVC to local instance uuid mappings + """ + vm_map = {} + if not self.nova: + self.nova = factory.LOCAL.get_client(str(SERVICE_TYPES.compute)) + try: + local_instances = self.nova.manager.list_all_servers() + except Exception as e: + LOG.exception(_("Exception occurred getting servers: %s"), e) + return vm_map + for inst in local_instances: + metadata = inst._info.get(constants.METADATA) + if metadata: + pvc_id = metadata.get(constants.PVC_ID) + if pvc_id: + vm_map[pvc_id] = inst._info.get('id') + return vm_map + + def is_instance_valid(self, uuid): + """ + Check if this VM instance is still valid. Call nova client + to retrieve the VM information. + """ + # Verify uuid is valid + if not uuid or len(uuid) == 0: + return False + # Check to see if this is a reserved port that we created while we + # are waiting for the PowerVC side to go away + if uuid.startswith(constants.RSVD_PORT_PREFIX): + return False + + if not self.nova: + self.nova = factory.LOCAL.get_client(str(SERVICE_TYPES.compute)) + try: + inst = self.nova.manager.get(uuid) + except Exception as e: + """ + If the instance can not be found, exception will be thrown. These + exceptions should be caught and not break the agent. + """ + LOG.exception(_("Exception occurred getting server %s: %s"), + uuid, e) + return False + if inst: + return True + return False + + def is_instance_on_power(self, uuid): + """ + Return True if an instance is hosted on power. + """ + # Verify uuid is valid + if not uuid or len(uuid) == 0: + return False + + if not self.nova: + self.nova = factory.LOCAL.get_client(str(SERVICE_TYPES.compute)) + try: + inst = self.nova.manager.get(uuid) + except Exception as e: + """ + If the instance can not be found, exception will be thrown. These + exceptions should be caught and not break the agent. + """ + LOG.exception(_("Exception occurred getting server %s: %s"), + uuid, e) + return False + if inst: + metadata = inst._info[constants.METADATA] + if constants.PVC_ID in metadata: + # Return true if we have pvc_id for this instance. + return True + else: + img_uuid = inst.image.get('id', '') + if img_uuid in self.power_image_cache: + return True + else: + # Check if the image is hosted on power. + if not self.glance: + self.glance = factory.LOCAL.\ + get_client(str(SERVICE_TYPES.image)) + try: + img = self.glance.getImage(img_uuid) + except Exception as e: + LOG.exception(_("Exception occurred getting image " + "%s: %s"), img_uuid, e) + return False + if constants.POWERVM == img.get(constants.HYPERVISOR_TYPE, + ''): + self.power_image_cache.append(img_uuid) + return True + return False + # Return false if we can't find this instance locally. + return False diff --git a/neutron-powervc/powervc/neutron/client/neutron_client_bindings.py b/neutron-powervc/powervc/neutron/client/neutron_client_bindings.py new file mode 100644 index 0000000..7f109c9 --- /dev/null +++ b/neutron-powervc/powervc/neutron/client/neutron_client_bindings.py @@ -0,0 +1,328 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" +Module to contain all of the base Neutron client interfaces +""" + +''' +Created on Aug 1, 2013 + +@author: John Kasperski +''' + +from neutron.openstack.common import log as logging +from neutronclient.common import exceptions + +import powervc.common.client.extensions.base as base +from powervc.common.constants import POWERVC_OS +from powervc.common.gettextutils import _ +from powervc.neutron.common import constants +from powervc.neutron.common import utils +from powervc.neutron.db import powervc_db_v2 + +LOG = logging.getLogger(__name__) + + +class Client(base.ClientExtension): + """Neutron Client access methods""" + + def __init__(self, client, os): + super(Client, self).__init__(client) + self.os = os + self.db = powervc_db_v2.PowerVCAgentDB() + self.client = client + + def _extact_event_payload(self, message): + event = message.get('event_type') + payload = message.get('payload') + LOG.info(_("Handling AMQP message from %s: %s"), self.os, event) + return (event, payload) + + def create_network(self, net): + body = {} + for field in constants.NETWORK_CREATE_FIELDS: + if field in net: + body[field] = net[field] + request = {} + request['network'] = body + try: + LOG.info(_("Create %s network: %s"), self.os, body) + response = self.client.create_network(request) + if response and 'network' in response: + return response.get('network') + return None + except exceptions.NeutronClientException as e: + LOG.exception(_("Error creating network: %s\nError message: %s"), + body, e) + return None + + def create_subnet(self, sub): + net_id = utils.translate_net_id(self.db, sub.get('network_id'), + self.os) + if not net_id: + return None + body = {} + body['network_id'] = net_id + for field in constants.SUBNET_CREATE_FIELDS: + if field in sub: + body[field] = sub[field] + request = {} + request['subnet'] = body + try: + LOG.info(_("Create %s subnet: %s"), self.os, body) + response = self.client.create_subnet(request) + if response and 'subnet' in response: + return response.get('subnet') + return None + except exceptions.NeutronClientException as e: + LOG.exception(_("Error creating subnet: %s\nError message: %s"), + body, e) + return None + + def create_port(self, port): + net_id = utils.translate_net_id(self.db, port.get('network_id'), + self.os) + if not net_id: + return None + body = {} + body['network_id'] = net_id + body['fixed_ips'] = [] + for field in constants.PORT_CREATE_FIELDS: + if field in port: + body[field] = port[field] + if self.os == POWERVC_OS: + body['device_owner'] = constants.POWERVC_DEVICE_OWNER + elif port.get('device_id'): + # If we are creating a local port and the PowerVC port has a + # device id, then set the device id of the new local port to be + # "pvc:" + PowerVC device id. + body['device_id'] = constants.RSVD_PORT_PREFIX + port['device_id'] + fixed_ips = port.get('fixed_ips') + if not fixed_ips: + return None + for ip in fixed_ips: + ip_addr = ip.get('ip_address') + if not ip_addr or ':' in ip_addr: + continue + sub_id = utils.translate_subnet_id(self.db, ip.get('subnet_id'), + self.os) + if not sub_id: + LOG.warning(_("%s subnet does not exist for: %s"), + self.os, ip_addr) + continue + new_ip = {} + new_ip['ip_address'] = ip_addr + new_ip['subnet_id'] = sub_id + body['fixed_ips'].append(new_ip) + if len(body['fixed_ips']) == 0: + return None + request = {} + request['port'] = body + try: + LOG.info(_("Create %s port: %s"), self.os, body) + response = self.client.create_port(request) + if response and 'port' in response: + return response.get('port') + return None + except exceptions.NeutronClientException as e: + LOG.exception(_("Error creating port: %s\nError message: %s"), + body, e) + return None + + def delete_network(self, net_id): + try: + LOG.info(_("Delete %s network: %s"), self.os, net_id) + return self.client.delete_network(net_id) + except exceptions.NeutronClientException as e: + LOG.exception(_("Error deleting network: %s"), e) + return e + + def delete_subnet(self, sub_id): + try: + LOG.info(_("Delete %s subnet: %s"), self.os, sub_id) + return self.client.delete_subnet(sub_id) + except exceptions.NeutronClientException as e: + LOG.exception(_("Error deleting subnet: %s"), e) + return e + + def delete_port(self, port_id): + try: + LOG.info(_("Delete %s port: %s"), self.os, port_id) + return self.client.delete_port(port_id) + except exceptions.NeutronClientException as e: + LOG.exception(_("Error deleting port: %s"), e) + return e + + def get_networks(self): + response = self.client.list_networks() + if 'networks' in response: + net_list = response['networks'] + networks = {} + for net in net_list: + if utils.is_network_mappable(net): + net_id = net['id'] + networks[net_id] = net + return networks + return {} + + def get_subnets(self): + response = self.client.list_subnets() + if 'subnets' in response: + sub_list = response['subnets'] + subnets = {} + for sub in sub_list: + if utils.is_subnet_mappable(sub): + sub_id = sub['id'] + subnets[sub_id] = sub + return subnets + return {} + + def get_ports(self): + response = self.client.list_ports() + if 'ports' in response: + port_list = response['ports'] + ports = {} + for port in port_list: + if utils.is_port_mappable(port): + port_id = port['id'] + ports[port_id] = port + return ports + return {} + + def get_ports_on_network(self, net_id): + response = self.client.list_ports(network_id=net_id) + if 'ports' in response: + return response['ports'] + return [] + + def get_ports_on_subnet(self, net_id, subnet_id): + port_list = self.get_ports_on_network(net_id) + if len(port_list) == 0: + return [] + ports = [] + for port in port_list: + fixed_ips = port.get('fixed_ips') + if not fixed_ips: + continue + for ip in fixed_ips: + if ip.get('subnet_id') == subnet_id: + ports.append(port) + break + return ports + + def get_network(self, net_id, log_error=False): + try: + response = self.client.show_network(net_id) + if 'network' in response: + return response['network'] + return None + except exceptions.NeutronClientException as e: + if log_error: + LOG.exception(_("Error retrieving network: %s"), e) + return None + + def get_subnet(self, sub_id, log_error=False): + try: + response = self.client.show_subnet(sub_id) + if 'subnet' in response: + return response['subnet'] + return None + except exceptions.NeutronClientException as e: + if log_error: + LOG.exception(_("Error retrieving subnet: %s"), e) + return None + + def get_port(self, port_id, log_error=False): + try: + response = self.client.show_port(port_id) + if 'port' in response: + return response['port'] + return None + except exceptions.NeutronClientException as e: + if log_error: + LOG.exception(_("Error retrieving port: %s"), e) + return None + + def set_port_device_id(self, port, device_id): + body = {} + body['device_id'] = device_id + request = {} + request['port'] = body + try: + LOG.info(_("Update %s port: %s"), self.os, body) + return self.client.update_port(port['id'], request) + except exceptions.NeutronClientException as e: + LOG.exception(_("Error updating port: %s"), e) + return None + return None + + def update_network(self, net_dest, net_src): + body = {} + request = None + for field in constants.NETWORK_UPDATE_FIELDS: + if net_src[field] != net_dest[field]: + body[field] = net_src[field] + if not request: + request = {} + request['network'] = body + if request: + try: + LOG.info(_("Update %s network: %s"), self.os, body) + return self.client.update_network(net_dest['id'], request) + except exceptions.NeutronClientException as e: + LOG.exception(_("Error updating network: %s"), e) + return None + return None + + def update_subnet(self, sub_dest, sub_src): + body = {} + request = None + for field in constants.SUBNET_UPDATE_FIELDS: + if sub_src[field] != sub_dest[field]: + body[field] = sub_src[field] + if not request: + request = {} + request['subnet'] = body + if request: + try: + LOG.info(_("Update %s subnet: %s"), self.os, body) + return self.client.update_subnet(sub_dest['id'], request) + except exceptions.NeutronClientException as e: + LOG.exception(_("Error updating subnet: %s"), e) + return None + return None + + def update_port(self, port_dest, port_src): + body = {} + request = None + for field in constants.PORT_UPDATE_FIELDS: + if port_src[field] != port_dest[field]: + body[field] = port_src[field] + if not request: + request = {} + request['port'] = body + if request: + try: + LOG.info(_("Update %s port: %s"), self.os, body) + return self.client.update_port(port_dest['id'], request) + except exceptions.NeutronClientException as e: + LOG.exception(_("Error updating port: %s"), e) + return None + return None + + def get_ports_by_instance_uuid(self, ins_id): + """ + Query all network ports by an instance id. + """ + response = self.client.list_ports(device_id=ins_id) + if 'ports' in response: + return response['ports'] + return [] diff --git a/neutron-powervc/powervc/neutron/client/powervc_bindings.py b/neutron-powervc/powervc/neutron/client/powervc_bindings.py new file mode 100644 index 0000000..d3a2f8e --- /dev/null +++ b/neutron-powervc/powervc/neutron/client/powervc_bindings.py @@ -0,0 +1,145 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" +Module to contain all of the PowerVC routines +""" + +''' +Created on Aug 1, 2013 + +@author: John Kasperski +''' + +from neutron.openstack.common import log as logging + +from powervc.common import messaging +from powervc.common.constants import POWERVC_OS +from powervc.common.gettextutils import _ +from powervc.neutron.client import neutron_client_bindings +from powervc.neutron.common import constants +from powervc.neutron.common import utils +from powervc.neutron.db import powervc_db_v2 + +LOG = logging.getLogger(__name__) + + +class Client(neutron_client_bindings.Client): + """PowerVC access methods""" + + def __init__(self, client, agent): + if not client: + return + self.os = POWERVC_OS + self.db = powervc_db_v2.PowerVCAgentDB() + self.agent = agent + super(Client, self).__init__(client, self.os) + self._create_amqp_listeners() + + def _create_amqp_listeners(self): + """Listen for AMQP messages from PowerVC""" + LOG.debug(_('Creating AMQP listeners')) + + def reconnect(): + LOG.info(_('Re-established connection to PowerVC Qpid broker')) + self.agent.queue_event(self.os, constants.EVENT_FULL_SYNC, None) + + connection = messaging.PowerVCConnection(log=logging, + reconnect_handler=reconnect) + listener = connection.create_listener(constants.QPID_EXCHANGE, + constants.QPID_TOPIC) + listener.register_handler(constants.EVENT_NETWORK_CREATE, + self._handle_network_create) + listener.register_handler(constants.EVENT_NETWORK_UPDATE, + self._handle_network_update) + listener.register_handler(constants.EVENT_NETWORK_DELETE, + self._handle_network_delete) + listener.register_handler(constants.EVENT_SUBNET_CREATE, + self._handle_subnet_create) + listener.register_handler(constants.EVENT_SUBNET_UPDATE, + self._handle_subnet_update) + listener.register_handler(constants.EVENT_SUBNET_DELETE, + self._handle_subnet_delete) + listener.register_handler(constants.EVENT_PORT_CREATE, + self._handle_port_create) + listener.register_handler(constants.EVENT_PORT_UPDATE, + self._handle_port_update) + listener.register_handler(constants.EVENT_PORT_DELETE, + self._handle_port_delete) + connection.start() + + def _handle_network_create(self, context, message): + event, payload = self._extact_event_payload(message) + network = payload.get('network') + network_id = network.get('id') + if not utils.is_network_mappable(network): + LOG.info(_("Network %s is not mappable"), network_id) + return + db_net = self.db.get_network(pvc_id=network_id) + if db_net: + LOG.info(_("DB entry for network %s already exists"), network_id) + return + self.agent.queue_event(self.os, event, network) + + def _handle_network_update(self, context, message): + event, payload = self._extact_event_payload(message) + network = payload.get('network') + self.agent.queue_event(self.os, event, network) + + def _handle_network_delete(self, context, message): + event, payload = self._extact_event_payload(message) + network_id = payload.get('network_id') + self.agent.queue_event(self.os, event, network_id) + + def _handle_subnet_create(self, context, message): + event, payload = self._extact_event_payload(message) + subnet = payload.get('subnet') + subnet_id = subnet.get('id') + if not utils.is_subnet_mappable(subnet): + LOG.info(_("Subnet %s is not mappable"), subnet_id) + return + db_sub = self.db.get_subnet(pvc_id=subnet_id) + if db_sub: + LOG.info(_("DB entry for subnet %s already exists"), subnet_id) + return + self.agent.queue_event(self.os, event, subnet) + + def _handle_subnet_update(self, context, message): + event, payload = self._extact_event_payload(message) + subnet = payload.get('subnet') + self.agent.queue_event(self.os, event, subnet) + + def _handle_subnet_delete(self, context, message): + event, payload = self._extact_event_payload(message) + subnet_id = payload.get('subnet_id') + self.agent.queue_event(self.os, event, subnet_id) + + def _handle_port_create(self, context, message): + event, payload = self._extact_event_payload(message) + port = payload.get('port') + port_id = port.get('id') + if not utils.is_port_mappable(port): + LOG.info(_("Port %s is not mappable"), port_id) + return + db_port = self.db.get_port(pvc_id=port_id) + if db_port: + LOG.info(_("DB entry for port %s already exists"), port_id) + return + self.agent.queue_event(self.os, event, port) + + def _handle_port_update(self, context, message): + event, payload = self._extact_event_payload(message) + port = payload.get('port') + self.agent.queue_event(self.os, event, port) + + def _handle_port_delete(self, context, message): + event, payload = self._extact_event_payload(message) + port_id = payload.get('port_id') + self.agent.queue_event(self.os, event, port_id) diff --git a/neutron-powervc/powervc/neutron/common/__init__.py b/neutron-powervc/powervc/neutron/common/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/neutron-powervc/powervc/neutron/common/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/neutron-powervc/powervc/neutron/common/constants.py b/neutron-powervc/powervc/neutron/common/constants.py new file mode 100644 index 0000000..85597df --- /dev/null +++ b/neutron-powervc/powervc/neutron/common/constants.py @@ -0,0 +1,107 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +''' +Created on Aug 2, 2013 + +@author: John Kasperski +''' + +#============================================================================== +# Device owner value for Neutron ports we create +#============================================================================== +POWERVC_DEVICE_OWNER = 'network:IBM SmartCloud' +RSVD_PORT_PREFIX = 'pvc:' + +#============================================================================== +# Mapping enum values +#============================================================================== + +OBJ_TYPE_NETWORK = 'Network' +OBJ_TYPE_SUBNET = 'Subnet' +OBJ_TYPE_PORT = 'Port' + +STATUS_CREATING = 'Creating' +STATUS_ACTIVE = 'Active' +STATUS_DELETING = 'Deleting' + +MAX_UPDATE_DATA_LENGTH = 512 + +#============================================================================== +# Neutron network fields (that we care about) +#============================================================================== + +NETWORK_CREATE_FIELDS = ['name', + 'shared', + 'provider:network_type', + 'provider:segmentation_id', + 'provider:physical_network'] +NETWORK_UPDATE_FIELDS = ['name', + 'shared'] + +#============================================================================== +# Neutron subnet fields (that we care about) +#============================================================================== + +SUBNET_CREATE_FIELDS = ['name', + 'ip_version', + 'cidr', + 'gateway_ip', + 'dns_nameservers', + 'allocation_pools', + 'enable_dhcp'] +SUBNET_UPDATE_FIELDS = ['name', + 'gateway_ip', + 'dns_nameservers', + 'enable_dhcp'] + +#============================================================================== +# Neutron port fields (that we care about) +#============================================================================== + +PORT_CREATE_FIELDS = ['name', + 'mac_address', + 'device_owner'] +PORT_UPDATE_FIELDS = ['name'] + +#============================================================================== +# Qpid message handling +#============================================================================== + +QPID_EXCHANGE = 'neutron' +QPID_TOPIC = 'notifications.info' + +EVENT_END_THREAD = 'thread.end' +EVENT_FULL_SYNC = 'full.sync' + +EVENT_NETWORK_CREATE = 'network.create.end' +EVENT_NETWORK_UPDATE = 'network.update.end' +EVENT_NETWORK_DELETE = 'network.delete.end' + +EVENT_SUBNET_CREATE = 'subnet.create.end' +EVENT_SUBNET_UPDATE = 'subnet.update.end' +EVENT_SUBNET_DELETE = 'subnet.delete.end' + +EVENT_PORT_CREATE = 'port.create.end' +EVENT_PORT_UPDATE = 'port.update.end' +EVENT_PORT_DELETE = 'port.delete.end' + +# Event queue event constants +EVENT_OS = 'os' +EVENT_TYPE = 'type' +EVENT_OBJECT = 'obj' + +# metadata key for pvc uuid +METADATA = 'metadata' +PVC_ID = 'pvc_id' + +# power image hypervisor type +POWERVM = 'powervm' +HYPERVISOR_TYPE = 'hypervisor_type' diff --git a/neutron-powervc/powervc/neutron/common/utils.py b/neutron-powervc/powervc/neutron/common/utils.py new file mode 100644 index 0000000..e086d26 --- /dev/null +++ b/neutron-powervc/powervc/neutron/common/utils.py @@ -0,0 +1,278 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +''' +Created on Aug 2, 2013 + +@author: John Kasperski +''' +import fnmatch + +from powervc.common.constants import LOCAL_OS +from powervc.common.constants import POWERVC_OS +from powervc.neutron.common import constants +from oslo.config import cfg +import json + +from neutron.openstack.common import log as logging +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF + + +#============================================================================== +# Utility routines +#============================================================================== + +def _compare_objects(local_obj, pvc_obj, db_obj, + update_fields, default_target): + for field in update_fields: + if pvc_obj.get(field) != local_obj.get(field): + update_data = db_obj.get('update_data') + if not update_data or len(update_data) == 0: + return default_target + try: + update_dict = json.loads(update_data) + except ValueError: + pass + update_dict = None + if not update_dict: + return default_target + db_field = update_dict.get(field) + if db_field != pvc_obj.get(field): + return LOCAL_OS + else: + return POWERVC_OS + return None + + +def compare_networks(local_net, pvc_net, db_net, default_target): + return _compare_objects(local_net, pvc_net, db_net, + constants.NETWORK_UPDATE_FIELDS, default_target) + + +def compare_subnets(local_sub, pvc_sub, db_sub, default_target): + return _compare_objects(local_sub, pvc_sub, db_sub, + constants.SUBNET_UPDATE_FIELDS, default_target) + + +def compare_ports(local_port, pvc_port, db_port, default_target): + return _compare_objects(local_port, pvc_port, db_port, + constants.PORT_UPDATE_FIELDS, default_target) + + +def _equal_objects(obj1, obj2, update_fields): + for field in update_fields: + if obj1.get(field) != obj2.get(field): + return False + return True + + +def equal_networks(net1, net2): + return _equal_objects(net1, net2, constants.NETWORK_UPDATE_FIELDS) + + +def equal_subnets(sub1, sub2): + return _equal_objects(sub1, sub2, constants.SUBNET_UPDATE_FIELDS) + + +def equal_ports(port1, port2): + return _equal_objects(port1, port2, constants.PORT_UPDATE_FIELDS) + + +def extract_ids_from_entry(obj): + pvc_id = obj.get('pvc_id') + local_id = obj.get('local_id') + return (pvc_id, local_id) + + +def extract_subnets_from_port(port): + subnets = [] + fixed_ips = port.get('fixed_ips') + if not fixed_ips: + return [] + for ip in fixed_ips: + subnet = ip.get('subnet_id') + if subnet and len(subnet) > 0: + subnets.append(subnet) + return subnets + + +def gen_network_sync_key(net): + result = '' + if 'provider:network_type' in net: + result += net['provider:network_type'] + if 'provider:segmentation_id' in net: + if net['provider:segmentation_id']: + result += '_' + str(net['provider:segmentation_id']) + if 'provider:physical_network' in net: + if net['provider:physical_network']: + result += '_' + net['provider:physical_network'] + return result + + +def gen_subnet_sync_key(sub, db_net): + return sub['cidr'] + '_' + db_net['pvc_id'] + + +def gen_port_sync_key(port, db_net): + result = '' + fixed_ips = port.get('fixed_ips') + if not fixed_ips: + return False + for ip in fixed_ips: + ipaddr = ip.get('ip_address') + if ipaddr and '.' in ipaddr: + if len(result) == 0: + result += ipaddr + else: + result += '_' + ipaddr + return result + '_' + db_net['pvc_id'] + + +def _gen_object_update_data(obj, update_fields): + data = {} + for field in update_fields: + data[field] = obj.get(field) + result = json.dumps(data) + if len(result) > constants.MAX_UPDATE_DATA_LENGTH: + return None + return result + + +def gen_network_update_data(net): + return _gen_object_update_data(net, constants.NETWORK_UPDATE_FIELDS) + + +def gen_subnet_update_data(sub): + return _gen_object_update_data(sub, constants.SUBNET_UPDATE_FIELDS) + + +def gen_port_update_data(port): + return _gen_object_update_data(port, constants.PORT_UPDATE_FIELDS) + + +def _get_map_white_list(): + """ + Get pvc network white list. Easy to mock in a function. + """ + return CONF.AGENT.map_powervc_networks + + +def network_has_subnet(net): + """ + Check if a network has a subnet. PowerVC networks that do not have + a subnet are considerd DHCP networks. We don't support DHCP + """ + subnets = net.get('subnets') + if not subnets or len(subnets) == 0: + return False + return True + + +def is_network_mappable(net): + """ + Check if network can be sync + """ + if 'provider:network_type' in net: + network_type = net['provider:network_type'] + if network_type != 'vlan': + return False + if 'provider:physical_network' in net: + physical_network = net['provider:physical_network'] + if physical_network != 'default': + return False + return True + + +def network_has_mappable_subnet(client, net): + """ + Check if a network has mappable subnet, mappable subnet is defined in + method is is_subnet_mappable() + """ + subnets_id = net.get('subnets') + if subnets_id: + for sub_id in subnets_id: + subnet = client.get_subnet(sub_id) + if subnet and is_subnet_mappable(subnet): + return True + return False + + +def is_network_in_white_list(net): + """ + Check if a network's name is in the white list. + """ + whitelist = _get_map_white_list() + if whitelist: + """ + The following wildcards are allowed when + the network name matches a pattern in the white list. + (see the documentation for fnmatch): + * matches everything + ? matches any single character + [seq] matches any character in seq + [!seq] matches any character not in seq + """ + for pat in whitelist: + if pat == '*': + return True + elif net.get('name') and fnmatch.fnmatch(net.get('name'), pat): + return True + # No match found. + return False + else: + # No network is allowed to sync. + return False + + +def is_subnet_mappable(sub): + if 'ip_version' in sub: + if sub['ip_version'] == 6: + return False + if 'enable_dhcp' in sub: + if sub['enable_dhcp']: + return False + return True + + +def is_port_mappable(port): + fixed_ips = port.get('fixed_ips') + if not fixed_ips: + return False + for ip in fixed_ips: + ipaddr = ip.get('ip_address') + if ipaddr and '.' in ipaddr: + return True + return False + + +def translate_net_id(db, net_id, target_os): + if target_os == LOCAL_OS: + db_net = db.get_network(pvc_id=net_id) + if db_net: + return db_net.get('local_id') + else: + db_net = db.get_network(local_id=net_id) + if db_net: + return db_net.get('pvc_id') + return None + + +def translate_subnet_id(db, sub_id, target_os): + if target_os == LOCAL_OS: + db_sub = db.get_subnet(pvc_id=sub_id) + if db_sub: + return db_sub.get('local_id') + else: + db_sub = db.get_subnet(local_id=sub_id) + if db_sub: + return db_sub.get('pvc_id') + return None diff --git a/neutron-powervc/powervc/neutron/db/__init__.py b/neutron-powervc/powervc/neutron/db/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/neutron-powervc/powervc/neutron/db/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/neutron-powervc/powervc/neutron/db/powervc_db_v2.py b/neutron-powervc/powervc/neutron/db/powervc_db_v2.py new file mode 100644 index 0000000..14851bb --- /dev/null +++ b/neutron-powervc/powervc/neutron/db/powervc_db_v2.py @@ -0,0 +1,343 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +from sqlalchemy.orm import exc + +import neutron.db.api as db_api +from neutron.openstack.common import log as logging + +from powervc.common.gettextutils import _ +from powervc.neutron.common import constants +from powervc.neutron.common import utils +from powervc.neutron.db import powervc_models_v2 as model + +LOG = logging.getLogger(__name__) + + +class PowerVCAgentDB(object): + """PowerVC Agent DB access methods""" + +#============================================================================== +# Internal "object" methods +#============================================================================== + + def __init__(self): + self.session = db_api.get_session() + db_api.configure_db() + + def _create_object(self, obj_type, sync_key, update_data=None, + local_id=None, pvc_id=None): + """Create mapping entry for a Neutron object""" + with self.session.begin(subtransactions=True): + obj = model.PowerVCMapping(obj_type, sync_key) + if local_id: + obj.local_id = local_id + if pvc_id: + obj.pvc_id = pvc_id + if local_id and pvc_id: + obj.status = constants.STATUS_ACTIVE + if update_data: + obj.update_data = update_data + self.session.add(obj) + LOG.info(_("Created %(obj_type)s %(sync_key)s for " + "local id %(local_id)s and pvc id %(pvc_id)s"), + {'obj_type': obj_type, + 'sync_key': obj.sync_key, + 'local_id': obj.local_id, + 'pvc_id': obj.pvc_id}) + return obj + + def _delete_object(self, obj): + """Delete a mapping object""" + if not obj: + return + try: + obj_id = obj['id'] + existing = (self.session.query(model.PowerVCMapping). + filter_by(id=obj_id).one()) + except exc.NoResultFound: + existing = None + LOG.warning(_("Object not found in DB: %(object)s"), + {'object': obj}) + if existing: + with self.session.begin(subtransactions=True): + self.session.delete(existing) + LOG.info(_("Deleted %(obj_type)s %(sync_key)s for " + "local id %(local_id)s and pvc id %(pvc_id)s"), + {'obj_type': existing.obj_type, + 'sync_key': existing.sync_key, + 'local_id': existing.local_id, + 'pvc_id': existing.pvc_id}) + + def _get_objects(self, obj_type, status=None): + """Retrieve all mappings for a given object type and status""" + try: + if status: + objects = (self.session.query(model.PowerVCMapping). + filter_by(obj_type=obj_type, + status=status).all()) + else: + objects = (self.session.query(model.PowerVCMapping). + filter_by(obj_type=obj_type).all()) + except exc.NoResultFound: + objects = None + return objects + + def _get_object(self, obj_type, obj_id=None, local_id=None, pvc_id=None, + sync_key=None): + """Retrieve the object with the specified type and id""" + try: + if obj_id: + obj = (self.session.query(model.PowerVCMapping). + filter_by(obj_type=obj_type, id=obj_id).one()) + elif local_id: + obj = (self.session.query(model.PowerVCMapping). + filter_by(obj_type=obj_type, local_id=local_id).one()) + elif pvc_id: + obj = (self.session.query(model.PowerVCMapping). + filter_by(obj_type=obj_type, pvc_id=pvc_id).one()) + elif sync_key: + obj = (self.session.query(model.PowerVCMapping). + filter_by(obj_type=obj_type, sync_key=sync_key).one()) + else: + obj = None + except exc.NoResultFound: + obj = None + return obj + + def _get_object_stats(self, obj_type): + """Retrieve counts for the specified object type""" + try: + creating = (self.session.query(model.PowerVCMapping). + filter_by(obj_type=obj_type, + status=constants.STATUS_CREATING).count()) + active = (self.session.query(model.PowerVCMapping). + filter_by(obj_type=obj_type, + status=constants.STATUS_ACTIVE).count()) + deleting = (self.session.query(model.PowerVCMapping). + filter_by(obj_type=obj_type, + status=constants.STATUS_DELETING).count()) + except exc.NoResultFound: + return (0, 0, 0) + return (creating, active, deleting) + + def _set_object_pvc_id(self, obj, pvc_id): + """Update object with the pvc_id field""" + if not obj: + return + try: + obj_id = obj['id'] + obj = (self.session.query(model.PowerVCMapping). + filter_by(id=obj_id).one()) + if pvc_id and obj['pvc_id']: + LOG.warning(_("Field in database entry is already set. " + "Unable to set pvc id %s into database " + "entry %s"), pvc_id, obj) + return + obj['pvc_id'] = pvc_id + if pvc_id: + if obj['local_id']: + obj['status'] = constants.STATUS_ACTIVE + else: + obj['status'] = constants.STATUS_CREATING + else: + if obj['local_id']: + obj['status'] = constants.STATUS_DELETING + else: + self._delete_object(obj) + return + self.session.merge(obj) + self.session.flush + LOG.info(_("Updated %(obj_type)s %(sync_key)s for " + "local id %(local_id)s and pvc id %(pvc_id)s"), + {'obj_type': obj.obj_type, + 'sync_key': obj.sync_key, + 'local_id': obj.local_id, + 'pvc_id': obj.pvc_id}) + return + except exc.NoResultFound: + LOG.warning(_("Object not found")) + return + + def _set_object_local_id(self, obj, local_id): + """Update object with the specific fields""" + if not obj: + return + try: + obj_id = obj['id'] + obj = (self.session.query(model.PowerVCMapping). + filter_by(id=obj_id).one()) + if local_id and obj['local_id']: + LOG.warning(_("Field in database entry is already set. " + "Unable to set local id %s into database " + "entry %s"), local_id, obj) + return + obj['local_id'] = local_id + if local_id: + if obj['pvc_id']: + obj['status'] = constants.STATUS_ACTIVE + else: + obj['status'] = constants.STATUS_CREATING + else: + if obj['pvc_id']: + obj['status'] = constants.STATUS_DELETING + else: + self._delete_object(obj) + return + self.session.merge(obj) + self.session.flush + LOG.info(_("Updated %(obj_type)s %(sync_key)s for " + "local id %(local_id)s and pvc id %(pvc_id)s"), + {'obj_type': obj.obj_type, + 'sync_key': obj.sync_key, + 'local_id': obj.local_id, + 'pvc_id': obj.pvc_id}) + return + except exc.NoResultFound: + LOG.warning(_("Object not found")) + return + + def _set_object_update_data(self, obj, update_data): + """Update object with the specific fields""" + if not obj: + return + try: + obj_id = obj['id'] + obj = (self.session.query(model.PowerVCMapping). + filter_by(id=obj_id).one()) + obj['update_data'] = update_data + self.session.merge(obj) + self.session.flush + LOG.info(_("Updated %(obj_type)s %(sync_key)s with new " + "update data %(update_data)s"), + {'obj_type': obj.obj_type, + 'sync_key': obj.sync_key, + 'update_data': obj.update_data}) + return obj + except exc.NoResultFound: + LOG.warning(_("Object not found")) + return None + + def fix_incorrect_state(self, obj): + """Correct state error on the database entry""" + LOG.warning(_("DB entry is not in correct state: %s"), obj) + if not obj: + return + try: + obj_id = obj['id'] + obj = (self.session.query(model.PowerVCMapping). + filter_by(id=obj_id).one()) + if obj['pvc_id'] and obj['local_id']: + obj['status'] = constants.STATUS_ACTIVE + LOG.info(_("Updated DB entry state: %s"), obj) + self.session.merge(obj) + self.session.flush + except exc.NoResultFound: + LOG.warning(_("Object not found")) + return None + +#============================================================================== +# Network methods +#============================================================================== + + def create_network(self, net, sync_key, local_id=None, pvc_id=None): + return self._create_object(constants.OBJ_TYPE_NETWORK, sync_key, + utils.gen_network_update_data(net), + local_id, pvc_id) + + def delete_network(self, obj): + return self._delete_object(obj) + + def get_networks(self, status=None): + return self._get_objects(constants.OBJ_TYPE_NETWORK, status) + + def get_network(self, obj_id=None, local_id=None, pvc_id=None, + sync_key=None): + return self._get_object(constants.OBJ_TYPE_NETWORK, obj_id=obj_id, + local_id=local_id, pvc_id=pvc_id, + sync_key=sync_key) + + def get_network_stats(self): + return self._get_object_stats(constants.OBJ_TYPE_NETWORK) + + def set_network_pvc_id(self, obj, pvc_id): + return self._set_object_pvc_id(obj, pvc_id) + + def set_network_local_id(self, obj, local_id): + return self._set_object_local_id(obj, local_id) + + def set_network_update_data(self, obj, update_data): + return self._set_object_update_data(obj, update_data) + +#============================================================================== +# Subnet methods +#============================================================================== + + def create_subnet(self, sub, sync_key, local_id=None, pvc_id=None): + return self._create_object(constants.OBJ_TYPE_SUBNET, sync_key, + utils.gen_subnet_update_data(sub), + local_id, pvc_id) + + def delete_subnet(self, obj): + return self._delete_object(obj) + + def get_subnets(self, status=None): + return self._get_objects(constants.OBJ_TYPE_SUBNET, status) + + def get_subnet(self, obj_id=None, local_id=None, pvc_id=None, + sync_key=None): + return self._get_object(constants.OBJ_TYPE_SUBNET, obj_id=obj_id, + local_id=local_id, pvc_id=pvc_id, + sync_key=sync_key) + + def get_subnet_stats(self): + return self._get_object_stats(constants.OBJ_TYPE_SUBNET) + + def set_subnet_pvc_id(self, obj, pvc_id): + return self._set_object_pvc_id(obj, pvc_id) + + def set_subnet_local_id(self, obj, local_id): + return self._set_object_local_id(obj, local_id) + + def set_subnet_update_data(self, obj, update_data): + return self._set_object_update_data(obj, update_data) + +#============================================================================== +# Port methods +#============================================================================== + + def create_port(self, port, sync_key, local_id=None, pvc_id=None): + return self._create_object(constants.OBJ_TYPE_PORT, sync_key, + utils.gen_port_update_data(port), + local_id, pvc_id) + + def delete_port(self, obj): + return self._delete_object(obj) + + def get_ports(self, status=None): + return self._get_objects(constants.OBJ_TYPE_PORT, status) + + def get_port(self, obj_id=None, local_id=None, pvc_id=None, + sync_key=None): + return self._get_object(constants.OBJ_TYPE_PORT, obj_id=obj_id, + local_id=local_id, pvc_id=pvc_id, + sync_key=sync_key) + + def get_port_stats(self): + return self._get_object_stats(constants.OBJ_TYPE_PORT) + + def set_port_pvc_id(self, obj, pvc_id): + return self._set_object_pvc_id(obj, pvc_id) + + def set_port_local_id(self, obj, local_id): + return self._set_object_local_id(obj, local_id) + + def set_port_update_data(self, obj, update_data): + return self._set_object_update_data(obj, update_data) diff --git a/neutron-powervc/powervc/neutron/db/powervc_models_v2.py b/neutron-powervc/powervc/neutron/db/powervc_models_v2.py new file mode 100644 index 0000000..7fff2cb --- /dev/null +++ b/neutron-powervc/powervc/neutron/db/powervc_models_v2.py @@ -0,0 +1,41 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import sqlalchemy as sa + +from neutron.db import model_base +from neutron.openstack.common import uuidutils +from powervc.neutron.common import constants + + +class PowerVCMapping(model_base.BASEV2): + """Represents mapping between local OS and PowerVC Neutron object""" + id = sa.Column(sa.String(36), + primary_key=True, + default=uuidutils.generate_uuid) + obj_type = sa.Column(sa.Enum(constants.OBJ_TYPE_NETWORK, + constants.OBJ_TYPE_SUBNET, + constants.OBJ_TYPE_PORT, + name='mapping_object_type'), + nullable=False) + status = sa.Column(sa.Enum(constants.STATUS_CREATING, + constants.STATUS_ACTIVE, + constants.STATUS_DELETING, + name='mapping_state'), + nullable=False) + sync_key = sa.Column(sa.String(255), nullable=False) + local_id = sa.Column(sa.String(36)) + pvc_id = sa.Column(sa.String(36)) + update_data = sa.Column(sa.String(512)) + + def __init__(self, obj_type, sync_key): + self.obj_type = obj_type + self.status = constants.STATUS_CREATING + self.sync_key = sync_key diff --git a/neutron-powervc/run_tests.sh b/neutron-powervc/run_tests.sh new file mode 100755 index 0000000..c1aa4d9 --- /dev/null +++ b/neutron-powervc/run_tests.sh @@ -0,0 +1,192 @@ +#!/bin/bash + +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -eu + +function usage { + echo "Usage: $0 [OPTION]..." + echo "Run PowerVC Neutron test suite(s)" + echo "" + echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" + echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" + echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)." + echo " -n, --no-recreate-db Don't recreate the test database." + echo " -x, --stop Stop running tests after the first error or failure." + echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." + echo " -u, --update Update the virtual environment with any newer package versions" + echo " -p, --pep8 Just run flake8" + echo " -8, --8 Just run flake8, don't show PEP8 text for each error" + echo " -P, --no-pep8 Don't run flake8" + echo " -c, --coverage Generate coverage report" + echo " -h, --help Print this usage message" + echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list" + echo " --standard-threads Don't do the eventlet threading monkeypatch." + echo "" + echo "Note: with no options specified, the script will try to run the tests in a virtual environment," + echo " If no virtualenv is found, the script will ask if you would like to create one. If you " + echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." + exit +} + +function process_option { + case "$1" in + -h|--help) usage;; + -V|--virtual-env) always_venv=1; never_venv=0;; + -N|--no-virtual-env) always_venv=0; never_venv=1;; + -r|--recreate-db) recreate_db=1;; + -n|--no-recreate-db) recreate_db=0;; + -f|--force) force=1;; + -u|--update) update=1;; + -p|--pep8) just_flake8=1;; + -8|--8) short_flake8=1;; + -P|--no-pep8) no_flake8=1;; + -c|--coverage) coverage=1;; + --standard-threads) + export STANDARD_THREADS=1 + ;; + -*) noseopts="$noseopts $1";; + *) noseargs="$noseargs $1" + esac +} + +venv=.venv +with_venv=tools/with_venv.sh +always_venv=0 +never_venv=0 +force=0 +noseargs= +noseopts= +wrapper="" +just_flake8=0 +short_flake8=0 +no_flake8=0 +coverage=0 +recreate_db=1 +update=0 + +for arg in "$@"; do + process_option $arg +done + +# If enabled, tell nose to collect coverage data +if [ $coverage -eq 1 ]; then + noseopts="$noseopts --with-coverage --cover-package=neutron-powervc" +fi + +function run_tests { + # Just run the test suites in current environment + ${wrapper} $NOSETESTS + # If we get some short import error right away, print the error log directly + RESULT=$? + if [ "$RESULT" -ne "0" ]; + then + ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'` + if [ "$ERRSIZE" -lt "40" ]; + then + cat run_tests.log + fi + fi + return $RESULT +} + +function run_flake8 { + FLAGS=--show-pep8 + if [ $# -gt 0 ] && [ 'short' == ''$1 ] + then + FLAGS='' + fi + + + echo "Running flake8 ..." + # Just run flake8 in current environment + #echo ${wrapper} flake8 $FLAGS powervc | tee pep8.txt + ${wrapper} flake8 $FLAGS powervc | tee pep8.txt + RESULT=${PIPESTATUS[0]} + return $RESULT +} + +NOSETESTS="nosetests $noseopts $noseargs" + +if [ $never_venv -eq 0 ] +then + # Remove the virtual environment if --force used + if [ $force -eq 1 ]; then + echo "Cleaning virtualenv..." + rm -rf ${venv} + fi + if [ $update -eq 1 ]; then + echo "Updating virtualenv..." + python tools/install_venv.py + fi + if [ -e ${venv} ]; then + wrapper="${with_venv}" + else + if [ $always_venv -eq 1 ]; then + # Automatically install the virtualenv + python tools/install_venv.py + wrapper="${with_venv}" + else + echo -e "No virtual environment found...create one? (Y/n) \c" + read use_ve + if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then + # Install the virtualenv and run the test suite in it + python tools/install_venv.py + wrapper=${with_venv} + fi + fi + fi +fi + +# Delete old coverage data from previous runs +if [ $coverage -eq 1 ]; then + ${wrapper} coverage erase +fi + + +if [ $just_flake8 -eq 1 ]; then + run_flake8 + RESULT=$? + echo "RESULT $RESULT" + exit $RESULT +fi + +if [ $short_flake8 -eq 1 ]; then + run_flake8 short + RESULT=$? + exit $RESULT +fi + +run_tests +RESULT=$? + +# NOTE(sirp): we only want to run flake8 when we're running the full-test +# suite, not when we're running tests individually. To handle this, we need to +# distinguish between options (noseopts), which begin with a '-', and arguments +# (noseargs). +if [ -z "$noseargs" ]; then + if [ $no_flake8 -eq 0 ]; then + run_flake8 + TMP_RESULT=$? + RESULT=$(($TMP_RESULT + $RESULT)) + fi +fi + +if [ $coverage -eq 1 ]; then + echo "Generating coverage report in covhtml/" + ${wrapper} coverage html -d covhtml -i +fi + +exit $RESULT \ No newline at end of file diff --git a/neutron-powervc/test/__init__.py b/neutron-powervc/test/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/neutron-powervc/test/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/neutron-powervc/test/fake_os_network.py b/neutron-powervc/test/fake_os_network.py new file mode 100644 index 0000000..e67cc5b --- /dev/null +++ b/neutron-powervc/test/fake_os_network.py @@ -0,0 +1,33 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" + The class FakeOSNetwork is used to represent the OpenStack Network instance +""" + + +class FakeOSNetwork(): + + fakeOSNetworkInstance = dict() + + def __init__(self): + + self.fakeOSNetworkInstance[ + 'tenant_id'] = "54c0ae7d58484d8e90bd482015db6b61" + self.fakeOSNetworkInstance[ + 'id'] = "272c42ac-fb16-46df-83b0-64dc5aa6032f" + self.fakeOSNetworkInstance['name'] = "private" + self.fakeOSNetworkInstance['status'] = "ACTIVE" + self.fakeOSNetworkInstance['admin_state_up'] = True + self.fakeOSNetworkInstance['shared'] = False + + def update(self, **update): + + self.fakeOSNetworkInstance.update(**update) diff --git a/neutron-powervc/test/fake_powervc_network.py b/neutron-powervc/test/fake_powervc_network.py new file mode 100644 index 0000000..18199f6 --- /dev/null +++ b/neutron-powervc/test/fake_powervc_network.py @@ -0,0 +1,53 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" + One example of PowerVC network instance: + + "network":{ + "status":"ACTIVE", + "subnets":[ + ], + "name":"network-temp", + "provider:physical_network":"default", + "admin_state_up":true, + "tenant_id":"cd8833b1f49c4f1f9c47e8ba1050f916", + "provider:network_type":"vlan", + "shared":false, + "id":"a5f8cf45-1d4d-47b4-b114-b886b3c816da", + "provider:segmentation_id":1 + } +""" + +""" + The class FakePowerVCNetwork is used to represent the PowerVC Network. +""" + + +class FakePowerVCNetwork(): + + powerNetInstance = dict() + + def __init__(self): + + self.powerNetInstance['status'] = "ACTIVE" + self.powerNetInstance['subnets'] = None + self.powerNetInstance['name'] = "network-temp" + self.powerNetInstance['provider:physical_network'] = "default" + self.powerNetInstance['admin_state_up'] = True + self.powerNetInstance['tenant_id'] = "cd8833b1f49c4f1f9c47e8ba1050f916" + self.powerNetInstance['provider:network_type'] = "vlan" + self.powerNetInstance['shared'] = False + self.powerNetInstance['id'] = "a5f8cf45-1d4d-47b4-b114-b886b3c816da" + self.powerNetInstance['provider:segmentation_id'] = 1 + + def update(self, **update): + + self.powerNetInstance.update(**update) diff --git a/neutron-powervc/test/rpc_client.py b/neutron-powervc/test/rpc_client.py new file mode 100755 index 0000000..2f3e7db --- /dev/null +++ b/neutron-powervc/test/rpc_client.py @@ -0,0 +1,82 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import sys +import traceback +import os + +if ('eventlet' in sys.modules and + os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'): + raise ImportError('eventlet imported before neutron agent ' + '(env var set to %s)' + % os.environ.get('EVENTLET_NO_GREENDNS')) + +os.environ['EVENTLET_NO_GREENDNS'] = 'yes' +import eventlet +eventlet.patcher.monkey_patch(os=False, thread=False) + +from oslo.config import cfg + +CONF = cfg.CONF + +from neutron.common import config as logging_config +from neutron.openstack.common.rpc import proxy +from neutron.openstack.common import log as logging +from powervc.common.gettextutils import _ + +from neutron import context + +from powervc.common import config + +LOG = logging.getLogger(__name__) + + +class RpcClient(proxy.RpcProxy): + + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, context): + LOG.info(_('__init__')) + self.topic = 'powervcrpc' + self.context = context + self.host = cfg.CONF.host + super(RpcClient, self).__init__( + topic=self.topic, default_version=self.BASE_RPC_API_VERSION) + + def get_pvc_network_uuid(self, network_id): + LOG.info(_('get_pvc_network_uuid')) + result = self.call(self.context, + self.make_msg('get_pvc_network_uuid', + network_id=network_id), + topic=self.topic) + return result + + +def main(): + try: + config.parse_power_config(sys.argv, 'powervc-neutron') + logging_config.setup_logging(cfg.CONF) + + LOG.info(_('Create RPC interface')) + ctx = context.get_admin_context_without_session() + rpc = RpcClient(ctx) + + LOG.info(_('Calling RPC method')) + result = rpc.get_pvc_network_uuid('abc') + LOG.info(_('Result from RPC call: %s'), result) + + sys.exit(0) + except Exception: + traceback.print_exc() + raise + + +if __name__ == "__main__": + main() diff --git a/neutron-powervc/test/rpc_listener.py b/neutron-powervc/test/rpc_listener.py new file mode 100755 index 0000000..c44a902 --- /dev/null +++ b/neutron-powervc/test/rpc_listener.py @@ -0,0 +1,93 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import sys +import time +import traceback +import os + +if ('eventlet' in sys.modules and + os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'): + raise ImportError('eventlet imported before neutron agent ' + '(env var set to %s)' + % os.environ.get('EVENTLET_NO_GREENDNS')) +os.environ['EVENTLET_NO_GREENDNS'] = 'yes' +import eventlet +eventlet.patcher.monkey_patch(os=False, thread=False) + +from oslo.config import cfg + +CONF = cfg.CONF + +from neutron.common import config as logging_config +from neutron.openstack.common import rpc +from neutron.openstack.common.rpc import dispatcher +from neutron.openstack.common import log as logging +from powervc.common.gettextutils import _ +from neutron import context + +from powervc.common import config + +LOG = logging.getLogger(__name__) + + +class RpcListener(object): + + # Set RPC API version to 1.0 by default. + RPC_API_VERSION = '1.0' + + def __init__(self): + LOG.info(_('__init__')) + self._setup_rpc() + + def _setup_rpc(self): + LOG.info(_('_setup_rpc')) + self.topic = 'powervcrpc' + + # RPC network init + self.context = context.get_admin_context_without_session() + + # Handle updates from service + self.dispatcher = self._create_rpc_dispatcher() + + # Set up RPC connection + self.conn = rpc.create_connection(new=True) + self.conn.create_consumer(self.topic, self.dispatcher, fanout=False) + self.conn.consume_in_thread() + + def _create_rpc_dispatcher(self): + LOG.info(_('_create_rpc_dispatcher')) + return dispatcher.RpcDispatcher([self]) + + def get_pvc_network_uuid(self, context, network_id): + LOG.info(_("get_pvc_network_uuid(): network_id: %s"), network_id) + return '123' + + def daemon_loop(self): + while True: + LOG.info(_("Sleeping...")) + delay = 10 + time.sleep(delay) + + +def main(): + try: + config.parse_power_config(sys.argv, 'powervc-neutron') + logging_config.setup_logging(cfg.CONF) + agent = RpcListener() + agent.daemon_loop() + sys.exit(0) + except Exception: + traceback.print_exc() + raise + + +if __name__ == "__main__": + main() diff --git a/neutron-powervc/test/test_PVCRpcCallbacks.py b/neutron-powervc/test/test_PVCRpcCallbacks.py new file mode 100644 index 0000000..22a77b1 --- /dev/null +++ b/neutron-powervc/test/test_PVCRpcCallbacks.py @@ -0,0 +1,73 @@ +import unittest +import mox + +import neutron.db.api as db_api +from powervc.neutron.api.powervc_rpc import PVCRpcCallbacks +from powervc.neutron.db import powervc_db_v2 + + +class FakeCTX(): + + user_id = None + project_id = None + + def __init__(self): + + self.user_id = "testuser" + self.project_id = "testproject" + + def update(self, **update): + + if not update: + self.user_id = update['user_id'] + self.project_id = update['project_id'] + + +def dummy(): + pass + + +class TestSyncInstance(unittest.TestCase): + + def setUp(self): + # Disable DB init. + db_api.get_session = dummy + db_api.configure_db = dummy + self._db = powervc_db_v2.PowerVCAgentDB() + self._callback = PVCRpcCallbacks(self) + # Replace with the dummy DB. + self._callback.db = self._db + self.moxer = mox.Mox() + + def get_db_api(self): + return self._db + + def tearDown(self): + pass + + def test_get_pvc_network_uuid(self): + rtn = self._get_pvc_network_uuid(None, None) + self.assertEqual(None, rtn, "Should be None.") + + rtn = self._get_pvc_network_uuid("", None) + self.assertEqual(None, rtn, "Should be None") + + rtn = self._get_pvc_network_uuid("123", {'pvc_id': 'pvc123'}) + self.assertEqual("pvc123", rtn) + + def _get_pvc_network_uuid(self, id_in, id_out): + + context = FakeCTX() + + self.moxer.StubOutWithMock(self._db, "get_network") + self._db.get_network(local_id=id_in).AndReturn(id_out) + + self.moxer.ReplayAll() + + rtn = self._callback.get_pvc_network_uuid(context, id_in) + + self.moxer.VerifyAll() + self.moxer.UnsetStubs() + + print str(rtn) + return rtn diff --git a/neutron-powervc/test/test_neutron_powervc_agent.py b/neutron-powervc/test/test_neutron_powervc_agent.py new file mode 100644 index 0000000..ce4c86e --- /dev/null +++ b/neutron-powervc/test/test_neutron_powervc_agent.py @@ -0,0 +1,132 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" +Test methods in PowerVCNeutronAgent +""" +import unittest +import mock +import sys + +# import these modules to patch +import powervc.common.client +import powervc.neutron.api +import powervc.neutron.client +import powervc.neutron.db + +# use them to pass pep8 +powervc.common.client +powervc.neutron.api +powervc.neutron.client +powervc.neutron.db + +# patch work +client_module = sys.modules['powervc.common.client'] +client_module.factory = mock.MagicMock() + +api_module = sys.modules['powervc.neutron.api'] +api_module.powervc_rpc = mock.MagicMock() + +neutron = sys.modules['powervc.neutron.client'] +neutron.local_os_bindings = mock.MagicMock() +neutron.powervc_bindings = mock.MagicMock() + +db = sys.modules['powervc.neutron.db'] +db.powervc_db_v2 = mock.MagicMock() + + +from powervc.neutron.agent.neutron_powervc_agent import PowerVCNeutronAgent +from powervc.neutron.common import constants + + +class port: + def __init__(self): + pass + + def get(self, key): + pass + + +class TestPowerVCNeutronAgent(unittest.TestCase): + def setUp(self): + super(TestPowerVCNeutronAgent, self).setUp() + PowerVCNeutronAgent._setup_rpc = mock.MagicMock + self.powervc_neutron_agent = PowerVCNeutronAgent() + + def tearDown(self): + pass + + def test_delete_local_port_1(self): + # contains device_owner + db_port = mock.MagicMock() + db_port.get = mock.MagicMock() + + local_port = mock.MagicMock() + local_port.get = mock.MagicMock( + return_value="network:router_interface") + + self.powervc_neutron_agent.pvc = mock.MagicMock() + self.powervc_neutron_agent.pvc.create_port = mock.MagicMock() + + self.powervc_neutron_agent._delete_local_port(local_port, db_port) + self.powervc_neutron_agent.pvc.create_port.assert_called_once_with( + local_port) + + def test_delete_local_port_2(self): + # 1) 3) 5) + db_port = mock.MagicMock() + db_port.get = mock.MagicMock(return_value=1) + + local_port = mock.MagicMock() + local_port.get = mock.MagicMock(return_value=1) + + self.powervc_neutron_agent.local = mock.MagicMock() + self.powervc_neutron_agent.local.delete_port = mock.MagicMock() + + self.powervc_neutron_agent._delete_local_port(local_port, db_port) + self.powervc_neutron_agent.local.delete_port.assert_called_once_with(1) + + def test_ports_valid_1(self): + # 2 ports, one creating, one active, return true + port = mock.MagicMock() + port.get = mock.MagicMock(side_effect=[1, 2]) + port_list = [port, port] + + local_port = mock.MagicMock() + local_port.get = mock.MagicMock( + side_effect=[constants.STATUS_CREATING, + constants.STATUS_ACTIVE]) + self.powervc_neutron_agent.db = mock.MagicMock() + self.powervc_neutron_agent.db.get_port = mock.MagicMock( + return_value=local_port) + + self.powervc_neutron_agent.local = mock.MagicMock() + self.powervc_neutron_agent.local.get_port = mock.MagicMock( + return_value=None) + self.assertTrue(self.powervc_neutron_agent._ports_valid(port_list)) + + def test_ports_valid_2(self): + # 2 ports, both creating, return false + port = mock.MagicMock() + port.get = mock.MagicMock(side_effect=[1, 2]) + port_list = [port, port] + + local_port = mock.MagicMock() + local_port.get = mock.MagicMock( + side_effect=[constants.STATUS_CREATING, + constants.STATUS_CREATING]) + self.powervc_neutron_agent.db = mock.MagicMock() + self.powervc_neutron_agent.db.get_port = mock.MagicMock( + return_value=local_port) + + self.powervc_neutron_agent.local = mock.MagicMock() + self.powervc_neutron_agent.local.get_port = mock.MagicMock( + return_value=None) + self.assertFalse(self.powervc_neutron_agent._ports_valid(port_list)) diff --git a/neutron-powervc/test/test_powervc_db_v2.py b/neutron-powervc/test/test_powervc_db_v2.py new file mode 100644 index 0000000..c79e1bc --- /dev/null +++ b/neutron-powervc/test/test_powervc_db_v2.py @@ -0,0 +1,250 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import unittest +import mox +from sqlalchemy.orm.session import Session as session +from powervc.neutron.db.powervc_db_v2 import PowerVCAgentDB +from powervc.neutron.db import powervc_models_v2 as model +from sqlalchemy.engine.base import Transaction as transaction +from test.fake_powervc_network import FakePowerVCNetwork +from test.fake_os_network import FakeOSNetwork +from powervc.neutron.common import utils +from powervc.neutron.db.powervc_models_v2 import PowerVCMapping +from sqlalchemy.orm import Query as query + + +""" + The class TestPowerVCNeutronDB is used to implement + the UT test of the methods in the class PowerVCAgentDB +""" + + +class TestPowerVCNeutronDB(unittest.TestCase): + + def setUp(self): + """ + This method is used to initialize the UT environment + """ + + # Initialize the FakePowerVCNetwork instance + self.fakePowerVCNetwork = FakePowerVCNetwork() + + # Initialize the FakeOSNetwork instance + self.fakeOSNetwork = FakeOSNetwork() + + # Initialize the PowerVCMapping instance + async_key = utils.gen_network_sync_key( + self.fakePowerVCNetwork.powerNetInstance) + self.powerVCMapping = PowerVCMapping( + obj_type="Network", sync_key=async_key) + self.powerVCMapping.local_id = self.fakeOSNetwork.\ + fakeOSNetworkInstance['id'] + self.powerVCMapping.pvc_id = self.fakePowerVCNetwork.\ + powerNetInstance['id'] + self.powerVCMapping.status = "Active" + self.powerVCMapping.id = None + # Initialize the PowerVCAgentDB instance + + def __init__(self, session): + self.session = session + + PowerVCAgentDB.__init__ = __init__ + self.powervcagentdb = PowerVCAgentDB(session) + + # Initialize the MOX instance + self.aMox = mox.Mox() + + def tearDown(self): + pass + + def test_create_object(self): + """ + Test the method def _create_object(self, obj_type, sync_key, + local_id=None, pvc_id=None) + """ + + obj_type = "Network" + sync_key = utils.gen_network_sync_key( + self.fakePowerVCNetwork.powerNetInstance) + local_id = self.fakeOSNetwork.fakeOSNetworkInstance['id'] + pvc_id = self.fakePowerVCNetwork.powerNetInstance['id'] + + inputPowerVCMObj = model.PowerVCMapping(obj_type, sync_key) + + self.aMox.StubOutWithMock(session, 'begin') + session.begin(subtransactions=True).AndReturn(transaction(None, None)) + + self.aMox.StubOutWithMock(model, 'PowerVCMapping') + model.PowerVCMapping(obj_type, sync_key).AndReturn(inputPowerVCMObj) + + self.aMox.StubOutWithMock(session, 'add') + session.add(inputPowerVCMObj).AndReturn("") + + self.aMox.ReplayAll() + + self.powervcagentdb._create_object( + obj_type, sync_key, update_data=None, + local_id=local_id, pvc_id=pvc_id) + + self.aMox.VerifyAll() + + self.assertEqual( + self.powerVCMapping.local_id, inputPowerVCMObj.local_id) + self.assertEqual(self.powerVCMapping.pvc_id, inputPowerVCMObj.pvc_id) + self.assertEqual(self.powerVCMapping.status, inputPowerVCMObj.status) + self.aMox.UnsetStubs() + + def test_delete_existing_object(self): + """ + Test the method _delete_object(self, obj) when the object exists + Test scenario: + When the data is in the database, the delete operation should + complete successfully + """ + + self.aMox.StubOutWithMock(session, 'query') + session.query(model.PowerVCMapping).AndReturn(query) + + self.aMox.StubOutWithMock(query, 'filter_by') + query.filter_by(id=self.powerVCMapping['id']).AndReturn(query) + + self.aMox.StubOutWithMock(query, 'one') + query.one().AndReturn(self.powerVCMapping) + + self.aMox.StubOutWithMock(session, 'begin') + session.begin(subtransactions=True).AndReturn(transaction(None, None)) + + self.aMox.StubOutWithMock(session, 'delete') + returnValue = session.delete(self.powerVCMapping).AndReturn(True) + + self.aMox.ReplayAll() + + self.powervcagentdb._delete_object(self.powerVCMapping) + + self.aMox.VerifyAll() + + self.assertEqual(returnValue, True) + + self.aMox.UnsetStubs() + + def test_get_objects_with_status(self): + """Test the method def _get_objects(self, obj_type, status) + Test scenario: + Get the object when the status is not None + """ + + self.aMox.StubOutWithMock(session, 'query') + session.query(model.PowerVCMapping).AndReturn(query) + + self.aMox.StubOutWithMock(query, 'filter_by') + query.filter_by(obj_type=self.powerVCMapping.obj_type, + status=self.powerVCMapping.status).AndReturn(query) + + self.aMox.StubOutWithMock(query, 'all') + query.all().AndReturn(self.powerVCMapping) + + self.aMox.ReplayAll() + returnValue = self.powervcagentdb._get_objects( + obj_type=self.powerVCMapping.obj_type, + status=self.powerVCMapping.status) + self.aMox.VerifyAll() + self.assertEqual(returnValue, self.powerVCMapping) + + self.aMox.UnsetStubs() + + def test_get_object(self): + """ + Test the method _get_object() using a sync key + Test scenario: + Get the object with sync_key + """ + + obj_type = self.powerVCMapping.obj_type + sync_key = self.powerVCMapping.sync_key + + self.aMox.StubOutWithMock(session, 'query') + session.query(model.PowerVCMapping).AndReturn(query) + + self.aMox.StubOutWithMock(query, 'filter_by') + query.filter_by( + obj_type=obj_type, sync_key=sync_key).AndReturn(query) + + self.aMox.StubOutWithMock(query, 'one') + query.one().AndReturn(self.powerVCMapping) + + self.aMox.ReplayAll() + returnValue = self.powervcagentdb._get_object( + obj_type=obj_type, sync_key=sync_key) + self.aMox.VerifyAll() + self.assertEqual(returnValue, self.powerVCMapping) + self.aMox.UnsetStubs() + + def test_set_object_pvc_id(self): + """ + Test the method _set_object_pvc_id(self, obj, pvc_id) + Test scenario: + Set the pvc_id of the specified object when local_id is none + """ + + obj_id = self.powerVCMapping.id + self.powerVCMapping.pvc_id = None + self.powerVCMapping.local_id = None + self.powerVCMapping.status = None + + self.aMox.StubOutWithMock(session, 'query') + session.query(model.PowerVCMapping).AndReturn(query) + + self.aMox.StubOutWithMock(query, 'filter_by') + query.filter_by(id=obj_id).AndReturn(query) + + self.aMox.StubOutWithMock(query, 'one') + query.one().AndReturn(self.powerVCMapping) + + self.aMox.StubOutWithMock(session, 'merge') + session.merge(self.powerVCMapping).AndReturn("") + + self.aMox.ReplayAll() + self.powervcagentdb._set_object_pvc_id(self.powerVCMapping, 'test') + self.aMox.VerifyAll() + self.assertEqual(self.powerVCMapping.status, 'Creating') + self.assertEqual(self.powerVCMapping.pvc_id, 'test') + self.aMox.UnsetStubs() + + def test_set_object_local_id(self): + """ + Test the method _set_object_local_id(self, obj, local_id) + Test scenario: + Set the local_id of the specified object when the pvc_id is none + """ + + obj_id = self.powerVCMapping.id + self.powerVCMapping.pvc_id = None + self.powerVCMapping.local_id = None + self.powerVCMapping.status = None + + self.aMox.StubOutWithMock(session, 'query') + session.query(model.PowerVCMapping).AndReturn(query) + + self.aMox.StubOutWithMock(query, 'filter_by') + query.filter_by(id=obj_id).AndReturn(query) + + self.aMox.StubOutWithMock(query, 'one') + query.one().AndReturn(self.powerVCMapping) + + self.aMox.StubOutWithMock(session, 'merge') + session.merge(self.powerVCMapping).AndReturn("") + + self.aMox.ReplayAll() + self.powervcagentdb._set_object_local_id(self.powerVCMapping, 'test') + self.aMox.VerifyAll() + self.assertEqual(self.powerVCMapping.status, 'Creating') + self.assertEqual(self.powerVCMapping.local_id, 'test') + self.aMox.UnsetStubs() diff --git a/neutron-powervc/test/test_utils.py b/neutron-powervc/test/test_utils.py new file mode 100644 index 0000000..d04cdbf --- /dev/null +++ b/neutron-powervc/test/test_utils.py @@ -0,0 +1,73 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import unittest +import mox + +from powervc.neutron.common import utils + +""" + UT for utils functions +""" + + +class TestUtils(unittest.TestCase): + + def setUp(self): + # Initialize the MOX instance + self.moxer = mox.Mox() + + def tearDown(self): + pass + + def test_is_network_in_white_list(self): + self.assertTrue(self._test_case_is_network_in_white_list + (['*'], 'anything')) + self.assertTrue(self._test_case_is_network_in_white_list + (['*'], None)) + self.assertTrue(self._test_case_is_network_in_white_list + (['*'], '')) + self.assertFalse(self._test_case_is_network_in_white_list + (['?'], '')) + self.assertFalse(self._test_case_is_network_in_white_list + ([], '')) + self.assertFalse(self._test_case_is_network_in_white_list + ([], 'anything')) + self.assertTrue(self._test_case_is_network_in_white_list + (['VLAN1'], 'VLAN1')) + self.assertFalse(self._test_case_is_network_in_white_list + (['VLAN1'], 'VLAN')) + self.assertFalse(self._test_case_is_network_in_white_list + (['VLAN1'], '')) + self.assertFalse(self._test_case_is_network_in_white_list + (['VLAN1'], None)) + self.assertTrue(self._test_case_is_network_in_white_list + (['VLAN1', 'V2'], 'VLAN1')) + self.assertTrue(self._test_case_is_network_in_white_list + (['VLAN1', 'V2'], 'V2')) + self.assertFalse(self._test_case_is_network_in_white_list + (['VLAN1', 'V2'], 'V3')) + self.assertTrue(self._test_case_is_network_in_white_list + (['VLAN1', 'V?'], 'V3')) + self.assertTrue(self._test_case_is_network_in_white_list + (['VLAN1', 'V[34]'], 'V3')) + self.assertFalse(self._test_case_is_network_in_white_list + (['VLAN1', 'V[34]'], 'V5')) + + def _test_case_is_network_in_white_list(self, whitelist, net_name): + + self.moxer.StubOutWithMock(utils, "_get_map_white_list") + utils._get_map_white_list().AndReturn(whitelist) + self.moxer.ReplayAll() + net = {'name': net_name} + rtn = utils.is_network_in_white_list(net) + self.moxer.VerifyAll() + self.moxer.UnsetStubs() + return rtn diff --git a/nova-powervc/.project b/nova-powervc/.project new file mode 100644 index 0000000..7af27eb --- /dev/null +++ b/nova-powervc/.project @@ -0,0 +1,21 @@ + + + nova-powervc + + + nova + nova-client + oslo-config + common-powervc + + + + org.python.pydev.PyDevBuilder + + + + + + org.python.pydev.pythonNature + + diff --git a/nova-powervc/.pydevproject b/nova-powervc/.pydevproject new file mode 100644 index 0000000..0e50999 --- /dev/null +++ b/nova-powervc/.pydevproject @@ -0,0 +1,7 @@ + +Default +python 2.7 + +/nova-powervc + + \ No newline at end of file diff --git a/nova-powervc/bin/nova-powervc b/nova-powervc/bin/nova-powervc new file mode 100644 index 0000000..645577a --- /dev/null +++ b/nova-powervc/bin/nova-powervc @@ -0,0 +1,26 @@ +#!/usr/bin/python + +import sys +import os + +# If ../nova/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python. +POSSIBLE_TOPDIR = os.path.normpath(os.path.join( + os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) + +if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'powervc', '__init__.py')): + sys.path.insert(0, POSSIBLE_TOPDIR) + +from nova.openstack.common import gettextutils + +# TODO RYKAL +# This should go in the base __init__ folder I think +gettextutils.install('nova') + +from powervc.nova.cmd.compute import main + + +if __name__ == "__main__": + sys.exit(main()) diff --git a/nova-powervc/init/openstack-nova-powervc b/nova-powervc/init/openstack-nova-powervc new file mode 100644 index 0000000..cf8eeb4 --- /dev/null +++ b/nova-powervc/init/openstack-nova-powervc @@ -0,0 +1,103 @@ +#!/bin/sh +# +# openstack-nova-powervc OpenStack PowerVC Nova Driver +# +# chkconfig: - 98 02 +# description: Provides PowerVC manage-to support. + +### BEGIN INIT INFO +# Provides: +# Required-Start: $remote_fs $network $syslog +# Required-Stop: $remote_fs $syslog +# Default-Stop: 0 1 6 +# Short-Description: OpenStack PowerVC Nova Driver +# Description: +### END INIT INFO + +. /etc/rc.d/init.d/functions + +suffix=powervc +prog=openstack-nova-powervc +exec="/opt/ibm/openstack/powervc-driver/bin/nova-$suffix" +config="/etc/$suffix/$suffix.conf" +novaconf="/etc/nova/nova.conf" +pidfile="/var/run/$suffix/nova-$suffix.pid" +logfile="/var/log/$suffix/nova-$suffix.log" + +[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog + +lockfile=/var/lock/subsys/$prog + +start() { + [ -x $exec ] || exit 5 + [ -f $config ] || exit 6 + echo -n $"Starting $prog: " + daemon --user powervc --pidfile $pidfile "$exec --config-file $config --config-file $novaconf --logfile $logfile &>/dev/null & echo \$! > $pidfile" + retval=$? + echo + [ $retval -eq 0 ] && touch $lockfile + return $retval +} + +stop() { + echo -n $"Stopping $prog: " + killproc -p $pidfile $prog + retval=$? + echo + [ $retval -eq 0 ] && rm -f $lockfile + return $retval +} + +restart() { + stop + start +} + +reload() { + restart +} + +force_reload() { + restart +} + +rh_status() { + status -p $pidfile $prog +} + +rh_status_q() { + rh_status >/dev/null 2>&1 +} + + +case "$1" in + start) + rh_status_q && exit 0 + $1 + ;; + stop) + rh_status_q || exit 0 + $1 + ;; + restart) + $1 + ;; + reload) + rh_status_q || exit 7 + $1 + ;; + force-reload) + force_reload + ;; + status) + rh_status + ;; + condrestart|try-restart) + rh_status_q || exit 0 + restart + ;; + *) + echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}" + exit 2 +esac +exit $? diff --git a/nova-powervc/pep8.txt b/nova-powervc/pep8.txt new file mode 100644 index 0000000..e69de29 diff --git a/nova-powervc/powervc/__init__.py b/nova-powervc/powervc/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nova-powervc/powervc/nova/__init__.py b/nova-powervc/powervc/nova/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nova-powervc/powervc/nova/cmd/__init__.py b/nova-powervc/powervc/nova/cmd/__init__.py new file mode 100644 index 0000000..08f38b4 --- /dev/null +++ b/nova-powervc/powervc/nova/cmd/__init__.py @@ -0,0 +1,29 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +# TODO(mikal): move eventlet imports to nova.__init__ once we move to PBR +import os +import sys + +# NOTE(mikal): All of this is because if dnspython is present in your +# environment then eventlet monkeypatches socket.getaddrinfo() with an +# implementation which doesn't work for IPv6. What we're checking here is +# that the magic environment variable was set when the import happened. +if ('eventlet' in sys.modules and + os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'): + raise ImportError('eventlet imported before nova/cmd/__init__ ' + '(env var set to %s)' + % os.environ.get('EVENTLET_NO_GREENDNS')) + +os.environ['EVENTLET_NO_GREENDNS'] = 'yes' + +import eventlet + +eventlet.monkey_patch(os=False) diff --git a/nova-powervc/powervc/nova/cmd/compute.py b/nova-powervc/powervc/nova/cmd/compute.py new file mode 100644 index 0000000..169fdee --- /dev/null +++ b/nova-powervc/powervc/nova/cmd/compute.py @@ -0,0 +1,32 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + + +import sys +import traceback +import powervc.nova.common.config as config +from nova.openstack.common import log +from nova import service +from nova import utils + + +def main(): + CONF = config.CONF + try: + config.parse_config(sys.argv, 'nova') + log.setup('powervc') + utils.monkey_patch() + server = service.Service.create(manager=CONF.powervc.powervc_manager, + binary='nova-powervc') + service.serve(server) + service.wait() + except Exception: + traceback.print_exc() + raise diff --git a/nova-powervc/powervc/nova/common/__init__.py b/nova-powervc/powervc/nova/common/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/nova-powervc/powervc/nova/common/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/nova-powervc/powervc/nova/common/config.py b/nova-powervc/powervc/nova/common/config.py new file mode 100644 index 0000000..93651b3 --- /dev/null +++ b/nova-powervc/powervc/nova/common/config.py @@ -0,0 +1,56 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import powervc.common.config as common_config +from nova import rpc +from oslo.config import cfg +from nova import objects + +CONF = common_config.CONF + +computes_opts = [ + cfg.IntOpt('hypervisor_refresh_interval', + default=30, + help=('The number of seconds between hypervisor refreshes.')), + cfg.IntOpt('instance_sync_interval', + default=20, + help=('Instance periodic sync interval specified in ' + 'seconds.')), + cfg.IntOpt('full_instance_sync_frequency', + default=30, + help=('How many instance sync intervals between full instance ' + 'syncs. Only instances known to be out of sync are ' + 'synced on the interval except after this many ' + 'intervals when all instances are synced.')), + cfg.StrOpt('flavor_prefix', default='PVC-'), + cfg.ListOpt('flavor_white_list', default=[]), + cfg.ListOpt('flavor_black_list', default=[]), + cfg.IntOpt('flavor_sync_interval', default=300), + cfg.IntOpt('volume_max_try_times', default=12), + cfg.IntOpt('longrun_loop_interval', default=7), + cfg.IntOpt('longrun_initial_delay', default=10), + cfg.IntOpt('image_limit', default=500) +] + +CONF.register_opts(computes_opts, group='powervc') + +# import nova opts +CONF.import_opt('compute_manager', 'nova.service') +CONF.import_opt('compute_topic', 'nova.compute.rpcapi') +CONF.import_opt('default_availability_zone', 'nova.availability_zones') +CONF.import_opt('compute_driver', 'nova.virt.driver') + +objects.register_all() + + +def parse_config(*args, **kwargs): + rpc.set_defaults(control_exchange='nova') + common_config.parse_power_config(*args, **kwargs) + rpc.init(CONF) diff --git a/nova-powervc/powervc/nova/common/exception.py b/nova-powervc/powervc/nova/common/exception.py new file mode 100644 index 0000000..c7f8858 --- /dev/null +++ b/nova-powervc/powervc/nova/common/exception.py @@ -0,0 +1,19 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +from nova import exception +from powervc.common.gettextutils import _ + + +class BlockMigrationException(exception.NovaException): + """User attempted to perform live migration with block migration.""" + def __init__(self): + message = _("PowerVC does not support block migration.") + super(BlockMigrationException, self).__init__(message=message) diff --git a/nova-powervc/powervc/nova/driver/__init__.py b/nova-powervc/powervc/nova/driver/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nova-powervc/powervc/nova/driver/compute/__init__.py b/nova-powervc/powervc/nova/driver/compute/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nova-powervc/powervc/nova/driver/compute/computes.py b/nova-powervc/powervc/nova/driver/compute/computes.py new file mode 100644 index 0000000..05724be --- /dev/null +++ b/nova-powervc/powervc/nova/driver/compute/computes.py @@ -0,0 +1,248 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +''' +This module manages the virtual nova compute services managed by the PowerVC +service. +''' +import eventlet +import sys +import traceback + +import powervc.common.config as config +import powervc.common.utils as commonutils + +from nova.compute import api +from nova import context + +from nova.openstack.common import log as logging +from nova.openstack.common import loopingcall +from oslo import messaging + +from nova import service +from powervc import utils +from powervc.common.gettextutils import _ + +CONF = config.CONF + + +LOG = logging.getLogger(__name__) + + +class StatefulService(service.Service): + """ + This class wraps the local compute services and provides added + state information + """ + def start(self): + super(StatefulService, self).start() + self.started = True + + def stop(self): + super(StatefulService, self).stop() + self.started = False + + +class ComputeServiceManager(object): + """ + This class is responsible for creating and managing compute-services + managed by the PowerVC service. + """ + + def __init__(self, driver, scg_list, auto_refresh=True): + """ + Initializes the compute services manager using the given PowerVC driver + + :param driver: an PowerVC driver used to retrieve the hypervisors that + will be used to create the corresponding compute + services. + :param auto_refresh: indicates whether or not to automatically try + to refresh the services based on new hypervisors, + if auto_refresh is false the `refresh()` method + needs to be invoked manually + """ + self.running = False + self.driver = driver + self.services = {} + self.manager = CONF.compute_manager + self.auto_refresh = auto_refresh + self.ctx = context.get_admin_context() + self.api = api.AggregateAPI() + self.scg_list = scg_list + + def start(self): + """ + This method retrieves all services from PowerVC and for each + service it creates a local nova-compute service. + """ + + try: + remote_services = self._get_filtered_remote_services() + + for remote_service in remote_services: + eventlet.greenthread.sleep(0) + self.new_compute_service(remote_service) + + if self.auto_refresh: + refresher = loopingcall.FixedIntervalLoopingCall(self.refresh) + refresher.start( + interval=CONF.powervc.hypervisor_refresh_interval) + LOG.info(_('The PowerVC compute service manager is running.')) + + self.running = True + except Exception: + LOG.exception("exception during startup. Stopping compute" + "driver") + traceback.print_exc() + sys.exit(1) + + def refresh(self): + """ + This method refreshes the computes services based on the PowerVC + services. + """ + # Exit if the compute services have not been started. + if not self.running: + return + + LOG.debug("Refreshing compute services based on remote services ...") + + try: + remote_services = self._get_filtered_remote_services() + remote_hostnames = [utils.normalize_host(remote_service.host) + for remote_service in remote_services] + # First we kill the services for services no longer running + for local_service_name in self.services.keys(): + #calls to greenthread.sleep have been added to all of the + #loops in this class because it has been long running + eventlet.greenthread.sleep(0) + if local_service_name in remote_hostnames: + LOG.debug("Service %s still running compute-service " + "is left intact." % local_service_name) + continue + LOG.debug("Service %s is no longer running. " + "Ending compute-service..." % local_service_name) + self.destroy_service(local_service_name) + + # Then we add services for new services found and update the + # state of existing services + for remote_service in remote_services: + eventlet.greenthread.sleep(0) + hostname = utils.normalize_host(remote_service.host) + if hostname in self.services: + self._sync_service_state(remote_service) + continue + LOG.debug("New service %s found. " + "Will create a new compute-service..." % hostname) + self.new_compute_service(remote_service) + except Exception: + LOG.warning("exception during periodic sync. Stopping compute" + "services") + traceback.print_exc() + self._stop_local_services() + + def new_compute_service(self, remote_service): + """ + Creates and starts a new compute service for the given hypervisor. + """ + host = utils.normalize_host(remote_service.host) + try: + local_service = StatefulService.\ + create(binary='nova-compute', + host=host, + topic=CONF.compute_topic, + manager=CONF.compute_manager, + db_allowed=False) + local_service.start() + self.services[host] = local_service + LOG.info(_('Created nova-compute service for %s') % host) + self._sync_service_state(remote_service) + except messaging.MessagingTimeout as e: + LOG.debug(_('Failed to launch nova-compute service for %s .') % + host) + LOG.debug(_('Most likely the other nova services are not ' + 'running normally. Make sure the nova services ' + 'like nova-network, nova-scheduler, ' + 'nova-conductor all start up and can be reached, ' + 'then restart the PowerVC service.')) + LOG.debug(_('Error: %s') % e) + sys.exit(1) + except (Exception, SystemExit) as e: + LOG.critical(_('Failed to launch nova-compute service for %s') % + host) + LOG.exception(_('Error: %s') % e) + sys.exit(1) + + def destroy_service(self, hostname): + """ + Kills the service for the given hostname, and destroys any + corresponding aggregates and availability zones if necessary. + """ + local_service = self.services[hostname] + local_service.kill() + self.services.pop(hostname) + LOG.info(_("Compute service %s was killed.") % hostname) + + def _sync_service_state(self, remote_service): + """ + Updates the state of the local service which corresponds + to the remote_service. This method assumes the local + service already exists. + """ + local_service = self.services[remote_service.host] + if local_service is None: + LOG.debug("local service not found for %s" % remote_service.host) + return + if remote_service.state == "down" and local_service.started: + LOG.debug("Stopping remote service %s" % local_service.host) + local_service.stop() + return + if remote_service.state == "up" and not local_service.started: + LOG.debug("Starting remote service %s" % local_service.host) + local_service.start() + + def _stop_local_services(self): + """ + Puts all services to a down state. For use in exception handling + """ + if not self.services: + return + for local_service in self.services.itervalues(): + try: + local_service.stop() + except Exception: + LOG.warning("Exception stopping local service") + traceback.print_exc() + + def _get_filtered_remote_services(self): + remote_services = self.driver._service._client.\ + client.services.list(binary="nova-compute") + multi_scg_hosts_names = set() + for old_scg in self.scg_list: + # Try use the latest one? + scg = (commonutils.get_utils(). + get_scg_by_scgName(old_scg.display_name)) + scg_host_list = getattr(scg, 'host_list', []) + for host in scg_host_list: + if host and host.get('name'): + multi_scg_hosts_names.add(host.get('name')) + + if not multi_scg_hosts_names: + LOG.info("No host listed in scg: '%s'" % str(self.scg_list)) + return remote_services + host_names = [utils.normalize_host(name) + for name in multi_scg_hosts_names] + filtered_services = [] + + for remote_service in remote_services: + if utils.normalize_host(remote_service.host) in host_names: + filtered_services.append(remote_service) + + return filtered_services diff --git a/nova-powervc/powervc/nova/driver/compute/constants.py b/nova-powervc/powervc/nova/driver/compute/constants.py new file mode 100644 index 0000000..10bdb8b --- /dev/null +++ b/nova-powervc/powervc/nova/driver/compute/constants.py @@ -0,0 +1,71 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" +All constants. +""" +# Instance metadata keys that will store pvc related infor. +# in the local nova DB. +PVC_ID = "pvc_id" # pvc instance uuid + +PPC64 = "ppc64" # Found on the wiki + +#hypervisor type +PVM_HYPERVISOR_TYPE = "powervm" + +# Flavor constants +SCG_KEY = "powervm:storage_connectivity_group" +EXTRA_SPECS = "extra_specs" +IS_PUBLIC = "os-flavor-access:is_public" + +POWERVC_SUPPORTED_INSTANCES = [('ppc64', 'powervm', 'hvm')] + +# Suffix to append to sync event notifications +SYNC_EVENT_SUFFIX = 'sync' + +# PowerVC instance notification events that we listen for +EVENT_INSTANCE_UPDATE = 'compute.instance.update' +EVENT_INSTANCE_CREATE = 'compute.instance.create.end' +EVENT_INSTANCE_DELETE = 'compute.instance.delete.end' +EVENT_INSTANCE_POWER_ON = 'compute.instance.power_on.end' +EVENT_INSTANCE_POWER_OFF = 'compute.instance.power_off.end' +EVENT_INSTANCE_RESIZE = 'compute.instance.finish_resize.end' +EVENT_INSTANCE_RESIZE_CONFIRM = 'compute.instance.resize.confirm.end' +EVENT_INSTANCE_LIVE_MIGRATE = 'compute.instance.live_migration.post.dest.end' +EVENT_INSTANCE_LIVE_MIGRATE_ROLLBACK = \ + 'compute.instance.live_migration._rollback.end' +EVENT_INSTANCE_SNAPSHOT = 'compute.instance.snapshot.end' +EVENT_INSTANCE_VOLUME_ATTACH = 'compute.instance.volume.attach' +EVENT_INSTANCE_VOLUME_DETACH = 'compute.instance.volume.detach' +EVENT_INSTANCE_IMPORT = 'compute.instance.import.end' + +#Volume id to to be updated by periodic sync +INVALID_VOLUME_ID = '00000000-0000-0000-0000-000000000000' + +LOCAL_PVC_PREFIX = 'powervm:' + +HYPERVISOR_PROP_NAME = 'OS-EXT-SRV-ATTR:hypervisor_hostname' +HOST_PROP_NAME = 'OS-EXT-SRV-ATTR:host' + + +def gen_pvc_key(key): + if key is None: + return key + if key.startswith(LOCAL_PVC_PREFIX): + return key + return LOCAL_PVC_PREFIX + key + + +def parse_pvc_key(pvc_key): + if pvc_key is None: + return pvc_key + if not pvc_key.startswith(LOCAL_PVC_PREFIX): + return pvc_key + return pvc_key[len(LOCAL_PVC_PREFIX):] diff --git a/nova-powervc/powervc/nova/driver/compute/manager.py b/nova-powervc/powervc/nova/driver/compute/manager.py new file mode 100644 index 0000000..2722749 --- /dev/null +++ b/nova-powervc/powervc/nova/driver/compute/manager.py @@ -0,0 +1,1843 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" +Doing PowerVC initialize work, including image, instance sync. +""" + +import math +import time +import sys +from socket import inet_aton +from eventlet import greenthread + +import nova +import powervc.common.config as cfg +from nova import notifications +from nova import db +from nova import exception +from nova import manager +from nova import compute +from nova import conductor +from nova import network +from nova import block_device +from nova.db import api as db_api +from nova.image import glance +from nova.compute import flavors +from nova.compute import task_states +from nova.compute import vm_states +from nova.openstack.common import importutils +from nova.openstack.common import log as logging +from nova.openstack.common import timeutils +from nova.openstack.common import loopingcall +from nova.openstack.common.loopingcall import LoopingCallDone +from nova.openstack.common import jsonutils +from nova.objects import instance as instance_obj +from nova.objects import base as obj_base +from powervc.nova.driver.compute import computes +from powervc.nova.driver.compute import constants +from powervc.nova.driver.compute import task_states as pvc_task_states +from powervc.common import messaging +from powervc.nova.driver.virt.powervc.sync import flavorsync +from powervc import utils +from powervc.common import utils as utills +from powervc.common.gettextutils import _ +from powervc.common.client import delegate as ctx_delegate + + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF + + +class PowerVCCloudManager(manager.Manager): + + def __init__(self, compute_driver=None, *args, **kwargs): + """ + Load configuration options and connect to PowerVC. + + :param compute_driver: the fully qualified name of the compute Driver + that will be used with this manager + """ + super(PowerVCCloudManager, self).__init__(*args, **kwargs) + + # This needs to be defined in new .conf file. + compute_driver = CONF.powervc.powervc_driver + + LOG.info(_("Loading compute driver '%s'") % compute_driver) + + try: + self.driver = importutils.import_object_ns( + 'powervc.nova.driver.virt', compute_driver, None) + except ImportError as e: + LOG.error(_("Unable to load the PowerVC driver: %s") % (e)) + sys.exit(1) + + # The variable used to cache the volume data + self.cache_volume = utills.VolumeCache(self.driver) + self.compute_api = compute.API() + self.network_api = network.API() + self.conductor_api = conductor.API() + + self._default_image = None + + # Have to import here instead of at the top, otherwise + # there is no way to write UT for the manager. + from powervc.common.client import factory as clients + keystone = clients.LOCAL.keystone + + orig_ctx = nova.context.get_admin_context() + orig_ctx.project_id = keystone.tenant_id + orig_ctx.user_id = keystone.user_id + + ctx = ctx_delegate.context_dynamic_auth_token(orig_ctx, keystone) + self.project_id = CONF.powervc.admin_tenant_name + + scg_list = utills.get_utils().validate_scgs() + if not scg_list: + LOG.error(_('Nova-powervc service terminated, Invalid Storage' + ' Connectivity Group specified.')) + sys.exit(1) + + self.scg_id_list = [scg.id for scg in scg_list] + + self._staging_cache = utills.StagingCache() + + # Initialize the compute manager + self.compute_manager = computes.ComputeServiceManager(self.driver, + scg_list) + self.compute_manager.start() + + # Check if necessary services are ready. + self._check_services(ctx) + + # Keep track of instances in need of a sync. Instances are 'marked' + # as needing to be synced for various reasons such as being in an + # unexpected state when we get a notification about the instance from + # PowerVC. The keys are the PowerVC instance IDs, not the local + # instance IDs. + self.sync_instances = {} + + # Keep track of whether or not we need to sync all instances on the + # next instance sync interval. + self.full_instance_sync_required = False + + # Synchronize instances from PowerVC + self._synchronize_instances(ctx) + + # Synchronize the public flavors from PowerVC + flavorsync.FlavorSync(self.driver, + self.scg_id_list).synchronize_flavors(ctx) + + # This can be at the end of this function + flavorsync.periodic_flavor_sync(ctx, self.driver, self.scg_id_list) + + # Listen for out-of-band PowerVC changes + self._create_powervc_listeners(ctx) + + # Listen for local changes, to update the correct + # host/node/hostname information when we defer scheduling + # to powerVC. Currently needed for live migration and resize. + self._create_local_listeners(ctx) + + # Set up periodic polling to sync instances + self._start_periodic_instance_sync(ctx) + + def _check_services(self, ctx): + """ + Check if other necessary services are ready. + """ + try: + params = {} + filters = {} + filters['limit'] = CONF.powervc.image_limit + params['filters'] = filters + glance.get_default_image_service().detail(ctx, **params) + except Exception, e: + # Just give an error, so the user can start + # the glance. + # Don't exit, because the glance might be starting. + LOG.error(_("Glance service is not ready. " + str(e))) + + def _synchronize_instances(self, ctx): + """ + Called to synchronize instances on boot. + Check instances fetched from PowerVC, + if it is not in OpenStack, then insert it, + if it is already in OpenStack, then update it. + + :param: ctx The security context + """ + # Some counters to record instances modified. + count_new_instances = 0 + count_updated_instances = 0 + count_deleted_instances = 0 + count_error = 0 + + try: + # Get both lists from local DB and PowerVC + pvc_instances = self.driver.list_instances() + local_instances = self._get_all_local_instances(ctx) + except Exception, e: + # No point to do any following step, if error happens above. + count_error += 1 + pvc_instances = [] + local_instances = [] + LOG.error(_("Failed to setup a synchronization. " + str(e))) + + # Sync. from PowerVC ---> local nova DB, + # to insert new instances and update existing instances + for instance in pvc_instances: + """ + A sample of returned instance from PowerVC: + https://w3-connections.ibm.com/wikis/home?lang=en-us#!/ + wiki/We32ccda54f51_4ede_bfd6_8f9cc4b70d23/page/REST%20Responses + """ + + greenthread.sleep(0) + + # Convert an object to dictionary, + # because some filed names has spaces. + pvc_instance = instance.__dict__ + matched_instances = self.\ + _get_local_instance_by_pvc_id(ctx, pvc_instance['id']) + if len(matched_instances) == 0: + # Not found, and insert into local DB + try: + if self._add_local_instance(ctx, pvc_instance): + count_new_instances += 1 + except Exception, e: + count_error += 1 + LOG.error(_("Insert a new PVC instance failed." + + str(pvc_instance) + + ", " + str(e))) + else: + # Found + if len(matched_instances) > 1: + LOG.error(_("More than one instance in DB " + "match one PowerVC instance: " + + pvc_instance['id'])) + try: + if self._update_local_instance(ctx, + matched_instances[0], + pvc_instance): + count_updated_instances += 1 + except Exception, e: + count_error += 1 + LOG.error(_("Update a PVC instance failed. " + + str(pvc_instance) + + ", " + str(e))) + + # Sync. from local nova DB ---> PowerVC, + # to remove invalid instances that are not in pvc anymore. + for local_instance in local_instances: + + greenthread.sleep(0) + + if not self._is_valid_pvc_instance(ctx, + local_instance, + pvc_instances): + try: + # If it is not valid in pvc, also delete form the local. + if self._remove_local_instance(ctx, + local_instance, + force_delete=True): + count_deleted_instances += 1 + except Exception, e: + count_error += 1 + LOG.error(_("Delete a PVC instance failed. " + + str(local_instance) + + ", " + str(e))) + + LOG.info(_(""" + ******************************* + Initial instance sync. result: + [ %(insert)s inserted, + %(update)s updated, + %(delete)s deleted ] + Error: %(error)s + ******************************* + """ % + {'insert': count_new_instances, + 'update': count_updated_instances, + 'delete': count_deleted_instances, + 'error': count_error})) + + def _get_all_local_instances(self, context): + """ Get all instances for a PowerVC.""" + filters = {'deleted': False, 'architecture': constants.PPC64} + db_matches = db.instance_get_all_by_filters(context, filters) + local_pvc_instances = [] + for local_instance in db_matches: + if self._is_pvc_instance(context, local_instance): + local_pvc_instances.append(local_instance) + return local_pvc_instances + + def _get_local_instance_by_pvc_id(self, context, pvcid): + """ Get a local instance by a PowerVC uuid.""" + filters = {'deleted': False, 'metadata': {constants.PVC_ID: pvcid}} + db_matches = db.instance_get_all_by_filters(context, filters) + return db_matches + + def _sync_existing_instance(self, context, local_instance, pvc_instance): + """Update a local instance with a PowerVC instance.""" + + base_options, unused_image, unused_flavor = \ + self._translate_pvc_instance(context, pvc_instance, local_instance) + + #In order to support the rename function in the Hosting OS, we will + #avoid the name of the instance is updated. + #In this situation, the name of the same instance will be different in + #the hosting OS and PowerVC. + base_options['display_name'] = local_instance.get('display_name') + + self.compute_api.update(context, local_instance, **base_options) + self.sync_volume_attachment(context, + pvc_instance['id'], + local_instance) + # Try to link network with instance if we haven't. + self._fix_instance_nw_info(context, local_instance) + + def _translate_pvc_instance(self, ctx, pvc_instance, db_instance=None): + """Map fields in a PowerVC instance to a local instance.""" + + def epoch_to_date(seconds_since_epoch): + """ + Converts a string or floating containing the number of seconds + since the epoch, to a date object. If the given seconds are None + the method returns an object with now(). + + :returns: the date from the seconds or now + """ + if not seconds_since_epoch: + return timeutils.utcnow() + + try: + # try directly parse by iso format time as PowerVC return + # already formatted isotime. By issue 166750 + return timeutils.parse_isotime(seconds_since_epoch) + except: + # If failed to parse by iso format time, then parse by + # seconds_since_epoch + time_str = time.strftime(timeutils._ISO8601_TIME_FORMAT, + time.gmtime(seconds_since_epoch)) + return timeutils.parse_strtime( + time_str, fmt=timeutils._ISO8601_TIME_FORMAT) + + image = self._get_image_from_instance(ctx, pvc_instance, db_instance) + flavor = self._get_flavor_from_instance(ctx, pvc_instance, db_instance) + + # Use the instance properties from PowerVC to be accurate + if pvc_instance.get('vcpus') is not None: + vcpus = int(math.ceil(float(pvc_instance.get('vcpus')))) + else: + vcpus = flavor.get('vcpus') + + if pvc_instance.get('memory_mb') is not None: + memory_mb = pvc_instance.get('memory_mb') + else: + memory_mb = flavor.get('memory_mb') + + if pvc_instance.get('root_gb') is not None: + root_gb = pvc_instance.get('root_gb') + else: + root_gb = flavor.get('root_gb') + + if pvc_instance.get('ephemeral_gb') is not None: + ephemeral_gb = pvc_instance.get('ephemeral_gb') + else: + ephemeral_gb = flavor.get('ephemeral_gb') + + root_device_name = None # Don't know what is this for. + + address4 = pvc_instance['accessIPv4'] + # Has to be a valid IP, or null + try: + inet_aton(address4) + except Exception: + LOG.debug(_("null for addressIPv4")) + address4 = None + + launched_at = epoch_to_date(pvc_instance.get('launched_at')) + scheduled_at = epoch_to_date(pvc_instance.get('scheduled_at')) + + # We need to make sure hostname is a string + hostname = pvc_instance['OS-EXT-SRV-ATTR:hypervisor_hostname'] + if hostname is None: + hostname = '' + + if db_instance: + # Get metadata from db_instance + # If it is a dict, leave as it is + # If it is a list, convert the list to dict + metadata = db_instance.get('metadata', {}) + + # Sometimes the instance's metadata will be a list + # Convert the metadata list to metadata dict + if isinstance(metadata, list): + meta_dict = {} + for entry in metadata: + key = entry.get('key', None) + value = entry.get('value', None) + if key and value: + meta_dict[key] = value + metadata = meta_dict + else: + metadata = {} + + # Copy the powervc specified properties into metadata + metadata = utils.fill_metadata_dict_by_pvc_instance(metadata, + pvc_instance) + + ins = { + 'image_ref': image['id'], + 'launch_time': launched_at, + 'launched_at': launched_at, + 'scheduled_at': scheduled_at, + 'memory_mb': memory_mb, + 'vcpus': vcpus, + 'root_gb': root_gb, + 'ephemeral_gb': ephemeral_gb, + 'display_name': pvc_instance['name'], + 'display_description': pvc_instance['name'], + 'locked': False, + 'instance_type_id': flavor['id'], + 'progress': 0, + 'metadata': metadata, + 'architecture': constants.PPC64, + 'host': utils.normalize_host(pvc_instance['OS-EXT-SRV-ATTR:host']), + 'launched_on': pvc_instance['hostId'], + 'hostname': hostname, + 'node': pvc_instance['OS-EXT-SRV-ATTR:hypervisor_hostname'], + 'access_ip_v4': address4, + 'root_device_name': root_device_name, + 'vm_state': pvc_instance['OS-EXT-STS:vm_state'], + 'task_state': None, + 'power_state': pvc_instance['OS-EXT-STS:power_state']} + + # Get user/tenant from context when importing a new instance not in DB + if not db_instance: + # NOTE(boden): can raise if invalid staging user or project + uid, pid = self._staging_cache.get_staging_user_and_project(True) + ins['project_id'] = pid + ins['user_id'] = uid + # Only update the System_metadata when the new instance is inserted + ins['system_metadata'] = flavors.save_flavor_info(dict(), flavor) + else: + # Get user and project from the DB entry + ins['user_id'] = db_instance.get('user_id') + ins['project_id'] = db_instance.get('project_id') + # Need to update the system metadate when the flavor of + # the instance changes + sys_meta = flavors.extract_flavor(db_instance) + instance_type_id = sys_meta['id'] + if instance_type_id != flavor['id']: + ins['system_metadata'] = flavors.\ + save_flavor_info(sys_meta, flavor) + + return (ins, image, flavor) + + def _insert_pvc_instance(self, ctx, pvc_instance): + """ Translate PowerVC instance into OpenStack instance and insert.""" + + if pvc_instance['OS-EXT-STS:vm_state'] == vm_states.ERROR: + pvc_host = pvc_instance.get(constants.HOST_PROP_NAME) + pvc_hypervisor = pvc_instance.get(constants.HYPERVISOR_PROP_NAME) + if pvc_host is None or pvc_hypervisor is None: + LOG.debug(_("The instance: %s is in the Error State and no " + "associated host or hypervisor. Skip to sync" % + pvc_instance['id'])) + return + + ins, image, flavor = self._translate_pvc_instance(ctx, pvc_instance) + security_group_map = self.\ + _get_security_group_for_instance(ctx, pvc_instance) + new_instance = instance_obj.Instance() + new_instance.update(ins) + block_device_map = [block_device.create_image_bdm(image['id'])] + db_instance = self.compute_api.\ + create_db_entry_for_new_instance(ctx, + flavor, + image, + new_instance, + security_group_map, + block_device_map, + 1, + 1) + # The API creates the instance in the BUIDING state, but this + # instance is actually already built most likely, so we update + # the state to whatever the state is in PowerVC. + db_instance = self.compute_api.update( + ctx, db_instance, + power_state=pvc_instance['OS-EXT-STS:power_state'], + vm_state=pvc_instance['OS-EXT-STS:vm_state'], + task_state=pvc_instance['OS-EXT-STS:task_state']) + + self.sync_volume_attachment(ctx, + ins['metadata'][constants.PVC_ID], + db_instance) + + # Fix the network info. + local_port_ids = self.driver._service.\ + set_device_id_on_port_by_pvc_instance_uuid(ctx, + db_instance['uuid'], + pvc_instance['id']) + # If neutron agent has synced ports, then go ahead to fix the network, + # otherwise wait for the next full update. + if local_port_ids and len(local_port_ids) > 0: + self._fix_instance_nw_info(ctx, db_instance) + + # Send notification about instance creation due to sync operation + compute.utils.notify_about_instance_usage( + self.notifier, ctx, db_instance, 'create.sync', network_info={}, + system_metadata={}, extra_usage_info={}) + + # Remove an instance that is not in pvc anymore from local DB. + # TODO: This is not being used. Do we need to worry about deleting metadata + # separately in _unregister_instance? + """ + def _destroy_local_instance(self, ctx, local_instance): + # Get all metadata + metadata = self.compute_api.get_instance_metadata(ctx, local_instance) + # Delete all metadata + for item in metadata.keys(): + self.compute_api.\ + delete_instance_metadata(ctx, local_instance, item) + # Delete instance, actually it just marks records deleted + self.compute_api.delete(ctx, local_instance) + """ + + def sync_volume_attachment(self, ctx, pvc_instance_id, local_instance): + """Sync volume attachment information in BDM""" + #Since PowerVC server resp does not contain this info, it is needed now + #to retrieve it through sending another rest api to list + #volume attachments. + attachments = self.driver.list_os_attachments(pvc_instance_id) + attached_volume_ids = [] + attached_devices = [] + for attachment in attachments: + #Each instance has a default volume, + #which is not what we want to show + if attachment.device != '/dev/sda': + block_device_map = {} + vol_id = self.cache_volume.get_by_id(attachment.id) + if vol_id: + block_device_map['volume_id'] = vol_id + attached_volume_ids.append(vol_id) + else: + LOG.info(_("No cinder volume for powervc volume: " + "%s" % attachment.id)) + block_device_map['volume_id'] = constants.INVALID_VOLUME_ID + block_device_map['device_name'] = attachment.device + attached_devices.append(attachment.device) + block_device_map['instance_uuid'] = local_instance['uuid'] + block_device_map['connection_info'] = jsonutils.dumps( + {"": {}, + "connection_info": {"driver_volume_type": "", + "data": ""} + }) + block_device_map['source_type'] = 'volume' + block_device_map['destination_type'] = 'volume' + db_api.block_device_mapping_update_or_create(ctx, + block_device_map) + #Removing the BDMs are not in powervc + leftover_bdms = [] + primitive_instance = obj_base.obj_to_primitive(local_instance) + local_attachments = self.conductor_api.\ + block_device_mapping_get_all_by_instance(ctx, primitive_instance) + for local_attachment in local_attachments: + if not self._is_volume_type(local_attachment): + continue + local_volume_id = local_attachment['volume_id'] + if local_volume_id in attached_volume_ids: + #this volume is still attached + continue + if local_volume_id == constants.INVALID_VOLUME_ID: + #for invalid volume id, just check the device_name + local_device_name = local_attachment['device_name'] + if local_device_name in attached_devices: + #this volume is still attached even it's + #volume id is not valid + LOG.info(_("retain the volume with device name: %s, " + "although it's volume id is not valid " + "yet" % local_device_name)) + continue + leftover_bdms.append(local_attachment) + + if leftover_bdms: + LOG.info(_("Removing Block Device Mapping for: " + "%s") % leftover_bdms) + self.conductor_api.block_device_mapping_destroy(ctx, + leftover_bdms) + LOG.info(_("Removed Block Device Mapping")) + + def _is_volume_type(self, block_device_mapping): + """ + Test a block_device_mapping is a volume or not + + :param block_device_mapping: block_device_mapping instance to + be tested + """ + if block_device_mapping.get('volume_id') is None: + return False + return True + + def _unregister_instance(self, ctx, local_instance, force_delete=False): + """ + Unregister the instance from the local database. This does not use the + compute API which would send an RPC to have the instance deleted. The + instance has already been removed from PowerVC so we just send our + own notifications locally and remove it from the database. + + :param ctx: The security context + :param local_instance: The instance dict + :param force_delete: If True, the instance will be deleted even if the + task state is set to 'deleting'. + """ + + # If the instance does not exist then ignore + if not local_instance: + LOG.debug(_('Instance does not exist locally')) + return + + instance_ref = local_instance + + # If the task state is not set to deleting, set it. + # If set, then go ahead and delete, which means last time, + # sth. went wrong when actually delete it. + if local_instance.get('task_state') != task_states.DELETING: + # Update the state and send notification for the updated state + old_ref, instance_ref = db_api.instance_update_and_get_original( + ctx, local_instance.get('uuid'), + {'task_state': task_states.DELETING, 'progress': 0}) + notifications.send_update(ctx, old_ref, instance_ref, + service='powervc') + + # Delete the instance from the local database + try: + db_api.instance_destroy(ctx, local_instance.get('uuid')) + except Exception: + LOG.warning(_("Removing PowerVC instance %s in nova failed."), + local_instance.get('name')) + + #delete network resource + self.network_api.deallocate_for_instance(ctx, local_instance) + + # Send notification about instance deletion due to sync operation + compute.utils.notify_about_instance_usage( + self.notifier, ctx, instance_ref, 'delete.sync', network_info={}, + system_metadata={}, extra_usage_info={}) + + def _is_pvc_instance(self, ctx, local_instance): + """ + Check to see if a local instance is synchronized from PowerVC. + If not, return False + """ + + # Get the uuid of pvc from the local instance. + metadata = self.compute_api.get_instance_metadata(ctx, local_instance) + return (constants.PVC_ID in metadata) + + def _is_valid_pvc_instance(self, ctx, local_instance, pvc_instances): + """ + Check to see if a local instance is in the PowerVC list. + If not, return False + """ + + # Get the uuid of pvc from the local instance. + metadata = self.compute_api.get_instance_metadata(ctx, local_instance) + if not constants.PVC_ID in metadata: + return False + + local_uuid = metadata[constants.PVC_ID] + found = False + for instance in pvc_instances: + uuid = instance.id + if local_uuid == uuid: + found = True + break + return found + + def _get_image_from_instance(self, ctx, pvc_instance, db_instance=None): + """ + Get the corresponding image with a PowerVC instance. + :param ctx: The security context + :param pvc_instance: The VM instance from the PowerVC + :param db_instance: The VM instance in the local database, + if it does not exist, set as None. + :returns: the image of the instance + """ + + rtn = None + + # Try to get the PowerVC image id from the PowerVC instance + pvc_instance_image_uuid = '' + pvc_image = pvc_instance.get('image', '') + if pvc_image != '': + pvc_instance_image_uuid = pvc_image.get('id', '') + + # Handle with the situation that the instance has already been + # in the local database + if db_instance is not None: + if 'image_ref' in db_instance: + # If the image is deleted in the Hosting OS, + # the image will not be deleted physically and just be marked + # as 'deleted'. + # So we can get the corresponding image although it is deleted. + try: + local_image = glance.\ + get_default_image_service().\ + show(ctx, db_instance.get('image_ref')) + except Exception as exc: + # This exception is used to handle with the situation + # that the glance service is crashed. + # Return the image as image_reference. + LOG.warning("Fail to get the local image: %s" + % db_instance.get('image_ref')) + LOG.warning("Getting the exception: %s." % str(exc)) + local_image = {} + local_image['id'] = db_instance.get('image_ref') + return local_image + + rtn = self._get_image_from_local_db(local_image, + pvc_instance_image_uuid) + if rtn is not None: + return rtn + + glance_images = self._list_local_images(ctx) + + for image in glance_images: + rtn = self._get_image_from_local_db(image, pvc_instance_image_uuid) + if rtn is not None: + break + + if rtn is None: + rtn = self.get_default_image(ctx) + + return rtn + + def _get_image_from_local_db(self, image, pvc_instance_image_uuid): + """ + This method is used to get the local image with the specified + PowerVC image UUID + :param image: the image from the local database + :pvc_instance_image_uuid: the UUID of the PowerVC image + :returns: if there is a proper image in the local database, + return this image, else return None + """ + rtn = None + local_image_pvc_uuid = None + if 'properties' in image and 'powervc_uuid' in image['properties']: + local_image_pvc_uuid = image['properties']['powervc_uuid'] + if pvc_instance_image_uuid != '' and \ + pvc_instance_image_uuid == local_image_pvc_uuid: + LOG.info("Get the image %s from the local database." + % image['id']) + rtn = image + return rtn + + def _list_local_images(self, ctx): + """ + This method is used to list all the local images. + :param ctx: The security context + :returns : if the local glance service is active, + return the image list, else return {} + """ + params = {} + filters = {} + filters['limit'] = CONF.powervc.image_limit + params['filters'] = filters + + try: + glance_images = glance.\ + get_default_image_service().detail(ctx, **params) + except Exception as exc: + LOG.warning("Get the exception %s during listing the images" + % str(exc)) + return {} + return glance_images + + def _get_flavor_from_instance(self, ctx, pvc_instance, db_instance=None): + """ + Get the local flavor through the PowerVC instance + """ + rtn = None + + # Get the flavorid from the PowerVC instance + pvc_flavor = pvc_instance['flavor'] + pvc_flavor_id = pvc_flavor['id'] + + if db_instance is not None: + # Handle the stituation that the instance is deployed + # through the hosting OS + + # Get the flavor id of the db instance + instance_type_id = db_instance['instance_type_id'] + # Get the db instance flavor + db_instance_flavor = self._get_local_flavor(ctx, instance_type_id) + # Get the PowerVC VM instance flavor + pvc_flavor = self.driver.get_pvc_flavor_by_flavor_id(pvc_flavor_id) + + # Check whether the instance has been resized + pvc_flavor_dict = pvc_flavor.__dict__ + + if (db_instance_flavor['memory_mb'] == + pvc_flavor_dict['ram'] and + db_instance_flavor['vcpus'] == + pvc_flavor_dict['vcpus'] and + db_instance_flavor['root_gb'] == + pvc_flavor_dict['disk'] and + db_instance_flavor['ephemeral_gb'] == + pvc_flavor_dict.get('OS-FLV-EXT-DATA:ephemeral', 0)): + return db_instance_flavor + + if rtn is None: + LOG.debug(_("Get flavor from pvc")) + local_flavorid = CONF.powervc.flavor_prefix + pvc_flavor_id + rtn = self._get_pvc_flavor(ctx, pvc_flavor_id, local_flavorid) + + if rtn is None: + #Get the default flavor + rtn = flavors.get_default_flavor() + return rtn + + def _get_local_flavor(self, ctx, flavor_id): + + """ + Try to get the local flavor. + + This method is used to handle with the situation that + the VM instance is deployed with the local flavor + through the hosting machine + """ + rtn = None + try: + flv = flavors.get_flavor(flavor_id, ctx) + rtn = flv + except Exception: + LOG.info(_("Return None from _get_local_flavor and " + "exception caught")) + + return rtn + + def _get_pvc_flavor(self, ctx, pvc_flavor_id, local_flavorid): + + """ + Try to get the sync PowerVC flavor. + This method is used to handle with the situation that + the VM instance is deployed with the PowerVC flavor + through the hosting machine or the VM instance is synced + from the PowerVC. + """ + rtn = None + + try: + rtn = flavors.get_flavor_by_flavor_id(local_flavorid, ctx) + except Exception: + # PowerVC instance is created with private flavor which is not + # synced + LOG.info(_("PowerVC instance is created with private flavor " + "which is not synced")) + if rtn is None: + try: + pvc_flavor = self.driver.\ + get_pvc_flavor_by_flavor_id(pvc_flavor_id) + + if pvc_flavor is not None: + pvc_flavor_dict = pvc_flavor.__dict__ + memory = pvc_flavor_dict['ram'] + vcpus = pvc_flavor_dict['vcpus'] + root_gb = pvc_flavor_dict['disk'] + ephemeral_gb = pvc_flavor_dict.get( + 'OS-FLV-EXT-DATA:ephemeral', 0) + + p_filter = {'min_memory_mb': memory, + 'min_root_gb': root_gb, + 'is_public': True, + 'disabled': False} + rtns = flavors.get_all_flavors(ctx, + filters=p_filter) + + # The PowerVC driver will not sync the dynamic + # flavor. When the PowerVC instance is created by + # the dynamic flavor and we can not get the similar + # flavor, we will return the first flavor which the + # memory, cpu and disk of is bigger than the + # dynamic flavor + + for key in rtns.keys(): + if memory <= rtns[key].get('memory_mb')\ + and vcpus <= rtns[key].get('vcpus')\ + and root_gb <= rtns[key].get('root_gb')\ + and ephemeral_gb <= rtns[key].\ + get('ephemeral_gb'): + if rtns[key]['name'].find('PVC') != -1: + rtn = rtns[key] + LOG.info(_("Return the first public " + "PowerVC flavor that fits " + "into the resource " + "instance instead"), rtn) + break + if rtn is None: + for key in rtns.keys(): + if memory <= rtns[key].get('memory_mb')\ + and vcpus <= rtns[key].get('vcpus')\ + and root_gb <= rtns[key].\ + get('root_gb')\ + and ephemeral_gb <= rtns[key].\ + get('ephemeral_gb'): + rtn = rtns[key] + LOG.info(_("Return the" + "first public " + "PowerVC flavor" + " that fits " + "into the resource " + "instance instead"), rtn) + break + except Exception: + if rtn is None: + #Get the default flavor when can not get the + #corresponding flavor with the specified + #PowerVC instance + LOG.info("Get the default flavor") + rtn = flavors.get_default_flavor() + + return rtn + + # FIXME: get a security group, shall we map the security group? + def _get_security_group_for_instance(self, ctx, pvc_instance): + return ['default'] + + def _create_local_listeners(self, ctx): + + def reconnect_handler(): + LOG.debug(_('Re-established connection to local Qpid broker')) + + # Create Qpid connection and listener + conn = messaging.LocalConnection(reconnect_handler=reconnect_handler, + context=ctx, + log=logging) + listener = conn.create_listener('nova', 'notifications.info') + + # Instance state changes + listener.register_handler([ + constants.EVENT_INSTANCE_RESIZE, + constants.EVENT_INSTANCE_RESIZE_CONFIRM, + constants.EVENT_INSTANCE_LIVE_MIGRATE], + self._handle_local_deferred_host_updates) + + # Instance creation + listener.register_handler(constants.EVENT_INSTANCE_CREATE, + self._handle_local_instance_create) + + conn.start() + + def _create_powervc_listeners(self, ctx): + """ + Listen for out-of-band changes made in PowerVC. + + This method creates the connection to the PowerVC Qpid broker and + sets up handlers so that any changes made directly in PowerVC are + reflected in the local OS. + + :param: ctx The security context + """ + # Function to call if we lose the Qpid connection and then get it back + def reconnect_handler(): + LOG.debug(_('Re-established connection to Qpid broker, sync all ' + 'instances on next sync interval')) + self.full_instance_sync_required = True + + # Create Qpid connection and listener + conn = messaging.PowerVCConnection(reconnect_handler=reconnect_handler, + context=ctx, + log=logging) + listener = conn.create_listener('nova', 'notifications.info') + + # Instance creation + listener.register_handler(constants.EVENT_INSTANCE_CREATE, + self._handle_powervc_instance_create) + + # onboarding end + listener.register_handler(constants.EVENT_INSTANCE_IMPORT, + self._handle_powervc_instance_create) + + # Instance deletion + listener.register_handler(constants.EVENT_INSTANCE_DELETE, + self._handle_powervc_instance_delete) + + # Instance state changes + listener.register_handler([ + constants.EVENT_INSTANCE_UPDATE, + constants.EVENT_INSTANCE_POWER_ON, + constants.EVENT_INSTANCE_POWER_OFF, + constants.EVENT_INSTANCE_RESIZE, + constants.EVENT_INSTANCE_RESIZE_CONFIRM, + constants.EVENT_INSTANCE_LIVE_MIGRATE, + constants.EVENT_INSTANCE_LIVE_MIGRATE_ROLLBACK, + constants.EVENT_INSTANCE_SNAPSHOT], + self._handle_powervc_instance_state) + + # Instance volume attach/detach event handling + listener.register_handler([ + constants.EVENT_INSTANCE_VOLUME_ATTACH, + constants.EVENT_INSTANCE_VOLUME_DETACH], + self._handle_volume_attach_or_detach) + + conn.start() + + def _handle_local_instance_create(self, context, message): + """ + Handle local deployment completed messages sent from the + hosting OS. This is need so we can tell the hosting OS + to sync the latest state from PowerVC. Once a deployment + completes in PowerVC the instances go into activating task + state. We want to make sure we reflect this as soon as it + happens and based on timing its best to check when we report + back from spawn thus sending the completed event. + + :param: context The security context + :param: message The AMQP message sent from OpenStack (dictionary) + """ + LOG.debug(_("Handling local notification: %s" % + message.get('event_type'))) + payload = message.get('payload') + hosting_id = payload.get('instance_id') + + # Attempt to get the local instance. + instance = None + try: + instance = db.instance_get_by_uuid(context, hosting_id) + except exception.InstanceNotFound: + LOG.debug(_("Local Instance %s Not Found" % hosting_id)) + return + + # Get the PVC instance + pvcid = self.driver._get_pvcid_from_metadata(instance) + powervc_instance = self.driver.get_instance(pvcid) + + if powervc_instance: + self._update_state(context, instance, powervc_instance, pvcid, + constants.EVENT_INSTANCE_UPDATE) + else: + LOG.debug(_('PowerVC instance could not be found')) + + def _handle_local_deferred_host_updates(self, context, message): + """ + Handle live migration completed messages sent from PowerVC. + + :param: context The security context + :param: message The AMQP message sent from OpenStack (dictionary) + """ + hosting_id = self._pre_process_message(message) + + # Attempt to get the local instance. + instance = None + try: + instance = db.instance_get_by_uuid(context, hosting_id) + except exception.InstanceNotFound: + LOG.debug(_("Local Instance %s Not Found" % hosting_id)) + return + + # See if the instance is deferring host scheduling. + # If it is exit immediately. + if not self.driver._check_defer_placement(instance): + LOG.debug(_("Local Instance %s did not defer scheduling" + % hosting_id)) + return + + # Get the PVC instance + pvcid = self.driver._get_pvcid_from_metadata(instance) + + if pvcid is not None: + if instance: + # Convert to primative format from db object + instance = jsonutils.to_primitive(instance) + try: + self.driver.update_instance_host(context, instance) + except Exception: + LOG.debug(_('Problem updating local instance host ' + 'information, instance: %s' % instance['id'])) + else: + LOG.debug(_('Tried to update instance host value but the' + ' instance could not be found in PowerVC')) + + def _handle_powervc_instance_create(self, context, message): + """ + Handle instance create messages sent from PowerVC. + + :param: context The security context + :param: message The AMQP message sent from OpenStack (dictionary) + """ + powervc_instance_id = self._pre_process_message(message) + + # Check for matching local instance + matched_instances = self._get_local_instance_by_pvc_id( + context, powervc_instance_id) + + # If the instance already exists locally then ignore + if len(matched_instances) > 0: + LOG.debug(_('Instance already exists locally')) + return + + # Get the newly added PowerVC instance and add it to the local OS + instance = self.driver.get_instance(powervc_instance_id) + # Filter out the instance in scg that is not specified in conf + instance_scg_id = instance.storage_connectivity_group_id + our_scg_id_list = [scg.id for scg + in utills.get_utils().get_our_scg_list()] + if instance_scg_id and instance_scg_id not in our_scg_id_list: + instance = None + + if instance: + instance = instance.__dict__ + try: + self._add_local_instance(context, instance) + except Exception as e: + LOG.warning(_("Failed to insert instance due to: %s " + % str(e))) + else: + LOG.debug(_('Tried to add newly created instance but it could not ' + 'be found in PowerVC')) + + def _handle_powervc_instance_delete(self, context, message): + """ + Handle instance delete messages sent from PowerVC. + + :param: context The security context + :param: message The AMQP message sent from OpenStack (dictionary) + """ + powervc_instance_id = self._pre_process_message(message) + + # Check for matching local instance + matched_instances = self._get_local_instance_by_pvc_id( + context, powervc_instance_id) + + # If the instance does not exist then ignore + if len(matched_instances) == 0: + LOG.debug(_('Instance does not exist locally')) + return + + # Remove the instance from the local OS + self._remove_local_instance(context, matched_instances[0]) + + def _handle_powervc_instance_state(self, context, message): + """ + Handle instance state changes sent from PowerVC. This includes + instance update and all other state changes caused by events like + power on, power off, resize, live migration, and snapshot. + + :param: context The security context + :param: message The AMQP message sent from OpenStack (dictionary) + """ + powervc_instance_id = self._pre_process_message(message) + event_type = message.get('event_type') + + local_instance = self.\ + _get_matched_instance_by_pvc_id(context, powervc_instance_id) + + if not local_instance: + return + + powervc_instance = self.driver.get_instance(powervc_instance_id) + + self._update_state(context, local_instance, powervc_instance, + powervc_instance_id, event_type) + + def _handle_volume_attach_or_detach(self, context, message): + """ + Handle out of band volume attach or detach event + + :param: context The security context + :param: message The AMQP message sent from OpenStack (dictionary) + """ + powervc_instance_id = self._pre_process_message(message) + + local_instance = self.\ + _get_matched_instance_by_pvc_id(context, powervc_instance_id) + if not local_instance: + return + + payload = message.get('payload') + powervc_volume_id = payload.get('volume_id') + if powervc_volume_id is None: + LOG.warning(_('no valid volume for powervc instance %s' % + powervc_instance_id)) + return + vol_id = self.cache_volume.get_by_id(powervc_volume_id) + if vol_id is None: + #get the local volume info and cache it + LOG.debug(_("Get the local volume info for powervc volume with id:" + " %s") % powervc_volume_id) + local_volume_id = self.driver.\ + get_local_volume_id_from_pvc_id(powervc_volume_id) + LOG.debug(_("Finished to get the local volume info for powervc " + "volume with id: %s") % powervc_volume_id) + if local_volume_id is None: + #continue to process, just log warning + LOG.warning(_('volume does not exist locally for remote ' + 'volume: %s') % powervc_volume_id) + else: + self.cache_volume.set_by_id(powervc_volume_id, local_volume_id) + + self.sync_volume_attachment(context, powervc_instance_id, + local_instance) + + def _pre_process_message(self, message): + """ + Logging the event type and return the instance id of the nova server + instance in the event + + :param: message The AMQP message sent from OpenStack (dictionary) + :returns instance id triggering the event + """ + LOG.debug(_("Handling notification: %s" % message.get('event_type'))) + payload = message.get('payload') + instance_id = payload.get('instance_id') + return instance_id + + def _get_matched_instance_by_pvc_id(self, context, pvc_id): + """ + Get the desired local instance from the powervc instance id, if no + matched local instance, then return None, if more than one matched + local instances, then log a warning message, only return the first one + + :param: message The AMQP message sent from OpenStack (dictionary) + :returns the matched local instance for remote instance in powervc + """ + # Get the matching local instance + matched_instances = self._get_local_instance_by_pvc_id( + context, pvc_id) + + # If the instance does not exist locally then ignore + if len(matched_instances) == 0: + LOG.info(_("Instance with powervc id %s does not exist " + "locally") % pvc_id) + return None + + # Warn if more than one local instance matches the PowerVC instance + if len(matched_instances) > 1: + LOG.warning(_('More than one instance in DB ' + 'match one PowerVC instance: %s' % + (pvc_id))) + # TODO: We should do something about this but scheduling a sync + # won't help since that does nothing to remove duplicate local + # instances. + + # Get the PowerVC instance so we can compare it to the local instance + return matched_instances[0] + + def _update_state(self, context, local_instance, powervc_instance, + powervc_instance_id, event_type): + ''' + Utility method for updatng an instance for local and + powervc based messages. + + :param: context The security context + :param: local_instance The database local instance + :param: powervc_instance The powerVC instance + :param: powervc_instance_id The powerVC instance id + :param: event_type The original notification event type + ''' + # Warn if PowerVC instance is not found + if powervc_instance is None: + LOG.warning(_('PowerVC instance could not be found: %s' % + (powervc_instance_id))) + self._schedule_instance_sync(powervc_instance_id) + return + + powervc_instance = powervc_instance.__dict__ + + # Get the local and PowerVC VM and task states so that we can compare + # them. + states = { + 'vm_local': local_instance.get('vm_state'), + 'vm_powervc': powervc_instance.get('OS-EXT-STS:vm_state'), + 'task_local': local_instance.get('task_state'), + 'task_powervc': powervc_instance.get('OS-EXT-STS:task_state') + } + + # Check if the current VM and task states permit a state change + # for the given event type. If not then we stop here. + if not self._can_apply_state_update(event_type, **states): + + # We can't apply the state update because the current states do not + # allow it. If the local instance is not performing a task and the + # states are not the same then we need to sync. + if states['task_local'] is None and not ( + self._instance_states_equal(**states)): + LOG.warning(_("No local task but the states don't match. " + "Scheduling a sync.")) + self._schedule_instance_sync(powervc_instance_id) + return + + # Get updated instance attributes + updated_instance, unused_image, unused_flavor = \ + self._translate_pvc_instance(context, powervc_instance, + local_instance) + + #In order to support the rename function in the Hosting OS, we will + #avoid the name of the instance is updated. + #In this situation, the name of the same instance will be different in + #the hosting OS and PowerVC. + updated_instance['display_name'] = local_instance.get('display_name') + + # Apply the VM and task state to the updated instance properties based + # on the event type. + updated_instance = self._apply_state_to_instance_update( + event_type, updated_instance, **states) + + # Call the compute API to update the local instance + instance_ref = self.compute_api.update(context, local_instance, + **updated_instance) + + # Send sync notification + self._send_instance_sync_notification(context, event_type, + instance_ref) + + def _send_instance_sync_notification(self, context, event_type, instance): + """ + Send a sync notification message based on the given event type. + + :param: context The security context + :param: event_type The original notification event type + :param: instance The updated local instance + """ + # Instance update events do not result in a sync notification + if event_type == constants.EVENT_INSTANCE_UPDATE: + return + + tokens = event_type.split('.')[2:] + tokens[-1] = constants.SYNC_EVENT_SUFFIX + event = '.'.join(tokens) + LOG.debug(_('Sending instance sync notification: %s' % (event))) + compute.utils.notify_about_instance_usage(self.notifier, context, + instance, event, + network_info={}, + system_metadata={}, + extra_usage_info={}) + + def _can_apply_state_update(self, event_type, **states): + """ + Determine if the instance state update can be applied based on the + given local and PowerVC VM and task states. + + For instance update events, the instance should always be updated even + if the states haven't changed. For other events it will depend on the + current VM and task state. + + :param: event_type The notification event type + :param: states VM and task states for the local and PowerVC instance + """ + # If the local instance task state is anything other than None then + # we shouldn't interrupt, besides activating. When a deployment + # happens on PowerVC, the status is active but the task state is + # activating. We need to sync the activating task state so that + # is why its special cased. + if (states['task_local'] is not None and + states['task_local'] != 'activating'): + return False + + # For instance update events the instance properties should always + # be updated. + if event_type == constants.EVENT_INSTANCE_UPDATE: + return True + + # For power_on event the local instance must be STOPPED and the + # PowerVC instance must be ACTIVE. + if event_type == constants.EVENT_INSTANCE_POWER_ON: + return states['vm_local'] == vm_states.STOPPED and \ + states['vm_powervc'] == vm_states.ACTIVE + + # For power_off event the local instance must be ACTIVE and the + # PowerVC instance must be STOPPED. + if event_type == constants.EVENT_INSTANCE_POWER_OFF: + return states['vm_local'] == vm_states.ACTIVE and \ + states['vm_powervc'] == vm_states.STOPPED + + # For finish_resize event the local instance must be ACTIVE or STOPPED + # and the PowerVC instance must be RESIZED. + if event_type == constants.EVENT_INSTANCE_RESIZE: + return ((states['vm_local'] == vm_states.ACTIVE or + states['vm_local'] == vm_states.STOPPED) and + states['vm_powervc'] == vm_states.RESIZED) + + # For resize confirm event the local instance must be RESIZED and the + # PowerVC instance must be ACTIVE or STOPPED. + if event_type == constants.EVENT_INSTANCE_RESIZE_CONFIRM: + return (states['vm_local'] == vm_states.RESIZED and + (states['vm_powervc'] == vm_states.ACTIVE or + states['vm_powervc'] == vm_states.STOPPED)) + + # For snapshot event the local instance must be ACTIVE or STOPPED and + # the PowerVC instance must be the same. + if event_type == constants.EVENT_INSTANCE_SNAPSHOT: + return ((states['vm_local'] == vm_states.ACTIVE and + states['vm_powervc'] == vm_states.ACTIVE) or + (states['vm_local'] == vm_states.STOPPED and + states['vm_powervc'] == vm_states.STOPPED)) + + # For the other instance events the local instance VM state and the + # PowerVC VM state must both be ACTIVE. + return states['vm_local'] == vm_states.ACTIVE and ( + states['vm_powervc'] == vm_states.ACTIVE) + + def _instance_states_equal(self, **states): + """ + Determine if the local and PowerVC instance states are the same. + + :param: states VM and task states for the local and PowerVC instance + """ + return states['vm_local'] == states['vm_powervc'] and ( + states['task_local'] == states['task_powervc']) + + def _apply_state_to_instance_update(self, event_type, updated_instance, + **states): + """ + Apply the vm_state and task_state properties to the updated instance + properties. The new vm_state and task_state will depend on the type + of event that triggered the state change. The initial updated instance + properties include the VM state already updated to match the PowerVC + VM state and the task state set to None. + + :param: event_type The notification event type + :param: updated_instance The updated instance properties from PowerVC + :param: states VM and task states for the local and PowerVC instance + """ + # For instance updates we have to do some checks to determine if we + # should update the VM and task states. + if event_type == constants.EVENT_INSTANCE_UPDATE: + return self._apply_state_update(updated_instance, **states) + + # For other event types we don't update the task state + del updated_instance['task_state'] + + # We only update the VM state for the following event types + vm_state_events = [constants.EVENT_INSTANCE_POWER_ON, + constants.EVENT_INSTANCE_POWER_OFF] + if not event_type in vm_state_events: + del updated_instance['vm_state'] + + return updated_instance + + def _apply_state_update(self, updated_instance, **states): + """ + Apply the new vm_state and task_state properties for the instance + update event. + + :param: updated_instance The updated instance properties from PowerVC + :param: states VM and task states for the local and PowerVC instance + """ + # If the PowerVC VM state moves into or out of ERROR state then the + # local instance VM state must be updated to match. + if (states['vm_local'] == vm_states.ERROR and ( + states['vm_powervc'] != vm_states.ERROR) or + (states['vm_local'] != vm_states.ERROR and + states['vm_powervc'] == vm_states.ERROR)): + # The updated instance attributes already has the VM state set to + # match the PowerVC VM state. + LOG.debug(_('VM state change: %s --> %s' % + (str(states['vm_local']), str(states['vm_powervc'])))) + else: + # Otherwise remove the VM state from the update + del updated_instance['vm_state'] + + # If the PowerVC VM state is ACTIVE and the task state moves into or + # out of ACTIVATING then the local task state must be updated to + # match. Enforce strict control of the expected task states. + if states['vm_powervc'] == vm_states.ACTIVE and ( + # Sync task state to activating. + (states['task_local'] is None and + states['task_powervc'] == pvc_task_states.ACTIVATING) or + # Sync task state from activating to None + (states['task_local'] == pvc_task_states.ACTIVATING and + states['task_powervc'] is None)): + updated_instance.update({'task_state': states['task_powervc']}) + LOG.debug(_('Task state change: %s --> %s' % + (str(states['task_local']), + str(states['task_powervc'])))) + else: + # Otherwise remove the task state from the update + del updated_instance['task_state'] + + return updated_instance + + def _schedule_instance_sync(self, powervc_instance_id): + """ + Schedule a sync for the given PowerVC instance ID. A sync will occur + at the next instance sync interval for marked instances. + + :param: powervc_instance_id The ID of the PowerVC instance that needs + to be synced. + """ + self.sync_instances[powervc_instance_id] = True + + def _remove_local_instance(self, context, local_instance, + force_delete=False): + """Remove the local instance if it's not performing a task and + its vm_state is not BUILDING|DELETED|SOFT_DELETED|DELETING(force). + """ + local_task_state = local_instance.get('task_state') + local_vm_state = local_instance.get('vm_state') + LOG.debug(_('Remove local instance %(ins)s, vm_state: %(vm)s, ' + 'task_state: %(task)s' + % {'ins': local_instance.get('uuid'), + 'vm': str(local_vm_state), + 'task': str(local_task_state)})) + + if ( + local_vm_state == vm_states.DELETED or + local_vm_state == vm_states.SOFT_DELETED or + (local_task_state == task_states.DELETING and not force_delete) + ): + LOG.debug(_('Skip remove local_instance,' + 'Because the VM already deleted or being deleted')) + return False + + if ( + (local_task_state is None or + local_task_state == pvc_task_states.ACTIVATING or + (local_task_state == task_states.DELETING and + force_delete)) and + local_vm_state != vm_states.BUILDING + ): + self._unregister_instance(context, local_instance) + return True + + LOG.debug(_('Skip remove local_instance %(ins)s from local DB, because' + 'task_state is %(task_state)s, vm_state is %(vm_state)s' + % {'ins': local_instance, + 'task_state': local_task_state, + 'vm_state': local_vm_state})) + return False + + def _add_local_instance(self, context, pvc_instance): + """Add a new local instance if the PowerVC instance is not + performing a task or performing a 'ACTIVATING' task, + and its vm_state is not BUILDING, RESIZED, + DELETED, SOFT_DELETED. + """ + pvc_task_state = pvc_instance['OS-EXT-STS:task_state'] + pvc_vm_state = pvc_instance['OS-EXT-STS:vm_state'] + if ( + (pvc_task_state is None or + pvc_task_state == pvc_task_states.ACTIVATING) and + pvc_vm_state != vm_states.BUILDING and + pvc_vm_state != vm_states.RESIZED and + pvc_vm_state != vm_states.DELETED and + pvc_vm_state != vm_states.SOFT_DELETED + ): + self._insert_pvc_instance(context, pvc_instance) + return True + + LOG.debug(_('Skip add pvc_instance %(ins)s to local DB, because' + 'task_state is %(task_state)s, vm_state is %(vm_state)s' + % {'ins': pvc_instance, + 'task_state': pvc_task_state, + 'vm_state': pvc_vm_state})) + return False + + def _update_local_instance(self, context, local_instance, pvc_instance): + """Update the local instance if both the local instance and the + PowerVC instance are not performing a task. + """ + # Syncing RESIZED state can create problems for the local instance. + # The hosting OpenStack maintains a list of resizes initiated from + # it so users can confirm them later. Overwriting the local RESIZED + # state will cause resizes to be left in the list and never confirmed. + # Syncing the PowerVC RESIZED state will not work either. The local + # instance does not have the correct internal state to allow resize + # confirmation. + local_task_state = local_instance.get('task_state') + pvc_task_state = pvc_instance['OS-EXT-STS:task_state'] + local_vm_state = local_instance.get('vm_state') + pvc_vm_state = pvc_instance['OS-EXT-STS:vm_state'] + if ( + (local_task_state is None or + local_task_state == pvc_task_states.ACTIVATING) and + pvc_task_state is None and + pvc_vm_state != vm_states.RESIZED and + local_vm_state != vm_states.RESIZED + ): + self._sync_existing_instance(context, + local_instance, + pvc_instance) + return True + + LOG.debug(_('Skip update %(pvc_ins)s to %(local_ins)s, because' + 'pvc_task_state is %(pvc_task_state)s,' + 'pvc_vm_state is %(pvc_vm_state)s,' + 'local_task_state is %(local_task_state)s,' + 'local_vm_state is %(local_vm_state)s' + % {'pvc_ins': pvc_instance, + 'local_ins': local_instance, + 'pvc_task_state': pvc_task_state, + 'pvc_vm_state': pvc_vm_state, + 'local_task_state': local_task_state, + 'local_vm_state': local_vm_state})) + return False + + def _periodic_instance_sync(self, context, instance_ids=None): + """ + Called to synchronize instances after initial boot. This does almost + the same thing as the synchronize that happens on boot except this + function will check that the instance states meet certain requirements + before adding, removing, or updating them locally. + + :param: context The security context + :param: instance_ids List of PowerVC instance IDs to sync + """ + # Some counters to record instances modified + count_new_instances = 0 + count_updated_instances = 0 + count_deleted_instances = 0 + count_errors = 0 + + # If a list of instance IDs is passed in then this is a targeted sync + # operation and not a full sync. + is_full_sync = not instance_ids + + # If this is a full sync then reset the marked instances map, otherwise + # just remove instances we are about to update. Do this up front so + # that we minimize the likelihood of losing any instances that might + # get marked during the sync operation. + if is_full_sync: + self.sync_instances = {} + else: + for instance_id in instance_ids: + del self.sync_instances[instance_id] + + # Get both lists from local DB and PowerVC + pvc_instances = [] + local_instances = [] + if is_full_sync: + pvc_instances = self.driver.list_instances() + local_instances = self._get_all_local_instances(context) + else: + for idx in instance_ids: + try: + instance = self.driver.get_instance(idx) + pvc_instances.append(instance) + except Exception, e: + LOG.warning(_('Error occured during get pvc instance \ + [id:%s], %s' % (idx, e))) + + # Sync. from PowerVC to local nova DB, to insert new instances and + # update existing instances. + for index, instance in enumerate(pvc_instances): + try: + + greenthread.sleep(0) + + """ + A sample of returned instance from PowerVC: + https://w3-connections.ibm.com/wikis/home?lang=en-us#!/wiki/ + We32ccda54f51_4ede_bfd6_8f9cc4b70d23/page/REST%20Responses + """ + # If we are syncing a set of given PowerVC instance IDs then we + # first check if the PowerVC instance exists.If it doesn't then + # we attempt to delete the local corresponding + # instance and move on. + if not is_full_sync and instance is None: + matched_instances = self.\ + _get_local_instance_by_pvc_id(context, + instance_ids[index]) + for local_instance in matched_instances: + if self._remove_local_instance(context, + local_instance): + count_deleted_instances += 1 + continue + + # Convert PowerVC instance object to dictionary + pvc_instance = instance.__dict__ + matched_instances = self.\ + _get_local_instance_by_pvc_id(context, pvc_instance['id']) + + # If not found locally then try to add the new instance + if len(matched_instances) == 0: + if self._add_local_instance(context, + pvc_instance): + count_new_instances += 1 + continue + + if len(matched_instances) > 1: + LOG.warning(_('More than one local instance matches one ' + 'PowerVC instance: %s' % (pvc_instance['id']))) + local_instance = matched_instances[0] + + # Local instance exists so try to update it + if self._update_local_instance(context, + local_instance, + pvc_instance): + count_updated_instances += 1 + except Exception, e: + count_errors += 1 + LOG.error(_("_periodic_instance_sync pvc to local: ") + str(e)) + + # Sync. from local nova DB to PowerVC, to remove invalid instances + # that are not in PowerVC anymore. This only happens during a full + # sync of all instances. + for local_instance in local_instances: + try: + + greenthread.sleep(0) + + if not self._is_valid_pvc_instance(context, local_instance, + pvc_instances): + if self._remove_local_instance(context, + local_instance, + True): + count_deleted_instances += 1 + except Exception, e: + count_errors += 1 + LOG.error(_("_periodic_instance_sync local to pvc: " + str(e))) + + LOG.info(_(""" + ******************************* + Instance sync. is complete. + Full sync: %(full_sync)s + [ %(insert)s inserted, + %(update)s updated, + %(delete)s deleted] + Error: %(error)s + ******************************* + """ % + {'full_sync': is_full_sync, + 'insert': count_new_instances, + 'update': count_updated_instances, + 'delete': count_deleted_instances, + 'error': count_errors})) + + def _start_periodic_instance_sync(self, context): + """ + Initialize the periodic syncing of instances from PowerVC into the + local OS. The powervc_instance_sync_interval config property determines + how often the sync will occur, and the + powervc_full_instance_sync_frequency config property determines the + number of marked instance sync operations between full instance syncs. + + :param: context The security context + """ + # Enforce some minimum values for the sync interval properties + # TODO: Minimum values should at least be documented + conf_sync = CONF.powervc.instance_sync_interval + conf_full_sync = CONF.powervc.full_instance_sync_frequency + sync_interval = conf_sync if conf_sync > 10 else 10 + full_sync_frequency = conf_full_sync if conf_full_sync > 2 else 2 + self._instance_sync_counter = 0 + + # Decorator of ignoring most of exceptions except some specified + def exception_swallowed(func): + def __swallowed(): + LOG.debug(_('Begin: decorator of exception_swallowed for %s' % + str(func))) + try: + func() + except LoopingCallDone, lcd: + LOG.error(_('Exception: LoopingCallDone: ' + str(lcd))) + raise lcd + except Exception, e: + LOG.error(_('Exception: Exception: ' + str(e))) + LOG.debug(_('End: decorator of exception_swallowed for %s' % + str(func))) + return __swallowed + + @exception_swallowed + def sync(): + """Called on the instance sync intervals""" + self._instance_sync_counter += 1 + + # Check if it's time to do a full sync + if self.full_instance_sync_required or ( + self._instance_sync_counter == full_sync_frequency): + self.full_instance_sync_required = False + self._instance_sync_counter = 0 + LOG.debug(_('Syncing all instances on interval')) + self._periodic_instance_sync(context) + return + + # If there are no marked instances to sync stop here + instance_ids = self.sync_instances.keys() + if len(instance_ids) == 0: + LOG.debug(_('No marked instances to sync.')) + return + + LOG.debug(_('Syncing marked instances')) + self._periodic_instance_sync(context, instance_ids=instance_ids) + + sync_call = loopingcall.FixedIntervalLoopingCall(sync) + sync_call.start(interval=sync_interval, initial_delay=sync_interval) + + def get_default_image(self, context): + """ + The PowerVC Default Image is used when we can't figure out the actual + image that originated an instance in PowerVC. + + We need to have an actual image reference in nova, because nova must + have an image reference in order to show the instance details, else + 'nova show' will fail, so we create a generic image reference in Glance + that all PowerVC instances for whom we don't know the originating + image will use. + """ + + if self._default_image: + return self._default_image + + glance_images = self._list_local_images(context) + for glance_image in glance_images: + if glance_image['name'] == CONF.powervc.powervc_default_image_name: + self._default_image = glance_image + return self._default_image + + # The default image has not been created, so let's do so + LOG.info(_('Creating PowerVC Default Image in Glance repository')) + image_data = { + 'is_public': True, + 'name': CONF.powervc.powervc_default_image_name, + 'disk_format': 'raw', + 'container_format': 'ovf', + 'protected': 'True', + 'properties': { + 'architecture': + constants.PPC64, + 'hypervisor_type': + constants.PVM_HYPERVISOR_TYPE + } + } + self._default_image = \ + glance.get_default_image_service().create(context, image_data) + return self._default_image + + def _fix_instance_nw_info(self, context, instance): + """ + Fix instance network info if necessary. + """ + if instance.get('info_cache'): + network_info = instance.get('info_cache').get('network_info') + # network_info is a stringnized empty array. + if not network_info or network_info == u'[]': + # Empty network_info, could be missing network_info + search_opts = {'device_id': instance['uuid'], + 'tenant_id': instance['project_id']} + data = self.network_api.list_ports(context, **search_opts) + ports = data.get('ports', []) + # If ports is not empty, should put that into network_info. + if ports: + nets = self.network_api.get_all(context) + # Call this will trigger info_cache update, + # which links instance with the port. + port_ids = [] + for port in ports: + port_ids.append(port.get('id')) + nw_info = self.network_api.get_instance_nw_info(context, + instance, + nets, + port_ids) + LOG.info("_fix_instance_nw_info" + str(nw_info)) diff --git a/nova-powervc/powervc/nova/driver/compute/task_states.py b/nova-powervc/powervc/nova/driver/compute/task_states.py new file mode 100644 index 0000000..cba6291 --- /dev/null +++ b/nova-powervc/powervc/nova/driver/compute/task_states.py @@ -0,0 +1,16 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" +Task states unique to PowerVC. +""" + +# Task state used by PowerVC to indicate an instance is being activated +ACTIVATING = 'activating' diff --git a/nova-powervc/powervc/nova/driver/virt/__init__.py b/nova-powervc/powervc/nova/driver/virt/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nova-powervc/powervc/nova/driver/virt/powervc/__init__.py b/nova-powervc/powervc/nova/driver/virt/powervc/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/nova-powervc/powervc/nova/driver/virt/powervc/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/nova-powervc/powervc/nova/driver/virt/powervc/driver.py b/nova-powervc/powervc/nova/driver/virt/powervc/driver.py new file mode 100644 index 0000000..ab319e2 --- /dev/null +++ b/nova-powervc/powervc/nova/driver/virt/powervc/driver.py @@ -0,0 +1,1408 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" +import nova +from novaclient.exceptions import NotFound +from novaclient.exceptions import BadRequest +from nova import exception +from nova.compute import task_states +from nova.image import glance +from nova.virt import driver +from nova.openstack.common import jsonutils +from nova.openstack.common import log as logging +from nova.openstack.common import excutils +from powervc.nova.driver.virt.powervc import service +from powervc.nova.driver.compute import constants +from powervc.nova.common import exception as pvc_exception +from powervc.common.client import factory +from powervc.common.gettextutils import _ +from powervc.common import constants as common_constants +from oslo.config import cfg +from powervc import utils as novautils +from nova import db +import socket + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + +""" +A driver that connects to a PowerVC system. + +""" + + +class PowerVCDriver(driver.ComputeDriver): + """ + A nova-compute driver for PowerVC. + + This driver provides virtual machine management using IBM PowerVC + hypervisor management software that is itself an openstack implementation. + This driver requires that users provide the hostname, username and + password for the target IBM PowerVC system. + """ + nc = None + + def __init__(self, virtapi): + self.virtapi = virtapi + self._compute_event_callback = None + if(PowerVCDriver.nc is None): + PowerVCDriver.nc = factory.POWERVC.new_client( + str(common_constants.SERVICE_TYPES.compute)) + + self._service = service.PowerVCService(PowerVCDriver.nc) + self._stats = None + + def init_host(self, host): + """Initialize anything that is necessary for the driver to function, + including catching up with currently running VM's on the given host. + """ + # Override the configuration host value for the virtual nova compute + # instance so live migration will have the correct host value and not + # the value defined in nova.conf. For details see + # nova.compute.manager.check_can_live_migrate_destination + CONF.host = host + self.host = host + # Initialize instance members for the powerVC hostname + # and id. + hypervisorlist = self._service.list_hypervisors() + for hypervisor in hypervisorlist: + if hypervisor._info["service"]["host"] == host: + # Cache the hostname and hypervisor id + self.hostname = hypervisor._info["hypervisor_hostname"] + self.hypervisor_id = hypervisor._info["id"] + break + + def _get_instance_by_uuid(self, ctxt, uuid): + filters = {'uuid': uuid} + db_matches = db.instance_get_all_by_filters(ctxt, filters) + return db_matches + + def _get_pvcid_from_metadata(self, instance): + """ + Because the data structure of the instance passed by + the nova manager is different from normal structure, + use this method to get the PowerVC id from the instance + metadata + """ + if not isinstance(instance, dict): + instance = instance.__dict__ + metadata = instance.get('metadata') + # In some cases, it's _metadata + if metadata is None: + metadata = instance.get('_metadata') + + LOG.debug(_("Got metadata: %s") % metadata) + pvc_id = novautils.get_pvc_id_from_metadata(metadata) + LOG.debug(_("Got pvc_id from _get_pvcid_from_metadata: %s") % pvc_id) + return pvc_id + + def _int_or_none(self, value): + try: + return int(value) + except Exception: + return None + + def get_info(self, instance): + """Get the current status of an instance, by name (not ID!) + + Returns a dict containing: + + :state: the running state, one of the power_state codes + :max_mem: (int) the maximum memory in KBytes allowed + :mem: (int) the memory in KBytes used by the domain + :num_cpu: (int) the number of virtual CPUs for the domain + :cpu_time: (int) the CPU time used in nanoseconds + """ + LOG.debug(_("get_info() Enter: %s" % str(instance))) + lpar_instance = None + try: + pvc_id = self._get_pvcid_from_metadata(instance) + if pvc_id is None: + LOG.debug(_("Find pvc_id from DB")) + ctx = nova.context.get_admin_context() + db_instances = self._get_instance_by_uuid(ctx, + instance['uuid']) + pvc_id = self._get_pvcid_from_metadata(db_instances[0]) + LOG.debug(_("pvc_id: %s" % str(pvc_id))) + lpar_instance = self.get_instance(pvc_id) + LOG.debug(_("Found instance: %s" % str(lpar_instance))) + except Exception: + raise exception.NotFound + + if(lpar_instance is None): + raise exception.NotFound + + LOG.debug(_("get_info() Exit")) + max_mem = self._int_or_none(lpar_instance._info.get('max_memory_mb')) + mem = self._int_or_none(lpar_instance._info.get('memory_mb')) + num_cpu = self._int_or_none(lpar_instance._info.get('cpus')) + return {'state': lpar_instance._info['OS-EXT-STS:power_state'], + 'max_mem': max_mem, + 'mem': mem, + 'num_cpu': num_cpu, + 'cpu_time': 0} + + def get_num_instances(self): + """Return the total number of virtual machines. + + Return the number of virtual machines that the hypervisor knows + about. + + .. note:: + + This implementation works for all drivers, but it is + not particularly efficient. Maintainers of the virt drivers are + encouraged to override this method with something more + efficient. + """ + return len(self.list_instances()) + + def list_instances(self): + """ + Return the names of all the instances known to the virtualization + layer, as a list. + """ + return self._service.list_instances() + + def list_instance_uuids(self): + """ + Return the UUIDS of all the instances known to the virtualization + layer, as a list. + """ + servers = self.list_instances() + uuids = [] + for server in servers: + uuids.append(server.id) + return uuids + + def get_instance(self, instance_id): + """ + Get the instance with the given id or None if not found. + """ + instance = None + try: + instance = self._service.get_instance(instance_id) + except NotFound: + pass + return instance + + def list_flavors(self): + """ + Return the names of all the flavors known to the virtualization + layer, as a list. + """ + return self._service.list_flavors() + + def get_flavor_extraspecs(self, flavor): + """ + Return the extraspecs defined for a flavor as a dict. + """ + return self._service.get_flavor_extraspecs(flavor) + + def spawn(self, context, instance, image_meta, injected_files, + admin_password, network_info=None, block_device_info=None): + """ + Create a new instance/VM/domain on the virtualization platform. + + Once this successfully completes, the instance should be + running (power_state.RUNNING). + + If this fails, any partial instance should be completely + cleaned up, and the virtualization platform should be in the state + that it was before this call began. + + :param context: security context + :param instance: Instance object as returned by DB layer. + This function should use the data there to guide + the creation of the new instance. + :param image_meta: image object returned by nova.image.glance that + defines the image from which to boot this instance + :param injected_files: User files to inject into instance. + :param admin_password: Administrator password to set in instance. + :param network_info: + :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` + :param block_device_info: Information about block devices to be + attached to the instance. + """ + LOG.info(_("Deploying instance %(uuid)s") % instance) + + # get PowerVC Image id + pvcimage = self._get_pvc_image_uuid(image_meta) + + # get PowerVC Flavor + pvcflavor = self._get_pvc_flavor(context, instance) + + # check if the host selection will be defer to PowerVC + isDefer = self._check_defer_placement(instance) + + # If hosting OS decide to select one host, + # get the PowerVC Hypervisor host name + # else the host name will be ignore + pvcHypervisor = None + pvcAvailabilityZone = None + if not isDefer: + # When targetting a compute node, uses the cached + # powervc hypervisor id that this nova compute service + # represents, it will be the same. + pvcHypervisor = self.hypervisor_id + pvcAvailabilityZone = self._get_pvc_avalability_zone(instance) + + # get PowerVC network info + pvc_nics = self._get_pvc_network_info(context, network_info) + + LOG.debug("Instance to spawn: %s" % instance) + createdServer = None + try: + createdServer = \ + self._service.spawn(context=context, + instance=instance, + injected_files=injected_files, + name=instance['hostname'], + imageUUID=pvcimage, + flavorDict=pvcflavor, + nics=pvc_nics, + hypervisorID=pvcHypervisor, + availability_zone=pvcAvailabilityZone, + isDefer=isDefer) + except BadRequest as e1: + with excutils.save_and_reraise_exception(): + self._clean_vm_and_save_fault_message(e1, e1.message, + context, instance) + except exception.InstanceInvalidState as e2: + with excutils.save_and_reraise_exception(): + self._clean_vm_and_save_fault_message(e2, e2.message, + context, instance) + except Exception as e: + with excutils.save_and_reraise_exception(): + self._clean_vm_and_save_fault_message(e, e.message, + context, instance) + + LOG.debug("Succeeded to created instance to spawn: %s" % createdServer) + + return createdServer + + def _clean_vm_and_save_fault_message(self, exp, message, context, + instance): + """ + This method does the following things when exception thrown in spawn: + 1. log powervc side error message to hosting os fault property + 2. remove pvc_id from local vm + 3. destroy vm in powerVC with pvc_id set in instance + """ + LOG.warning("Created instance failed: %s" % message) + # In this time, powervc uuid is not saved in instance metadata, get + # it from db then update the instance and call destroy() + # remove pvc_id before destroy to avoid instance synchronization + meta = db.instance_metadata_get(context, instance['uuid']) + pvc_id = meta.get(constants.PVC_ID, '') + + # To log powervc side error message to hosting os fault property, + # raise an InstanceDeployFailure with powervc error message set + # and the framework will help to set the error message to hosting os + # fault property + # But by now openstack framework still has an issue to prevent this: + # https://bugs.launchpad.net/nova/+bug/1161661 + # To workaround this, just do the follow step and the powervc error + # can be shown on hosting os vm instance: + # 1. Set scheduler_max_attempts=1 in /etc/nova/nova.conf + # 2. Restart openstack-nova-scheduler service + + # remove pvc_id + if constants.PVC_ID in meta.keys(): + del(meta[constants.PVC_ID]) + update_properties = {'metadata': meta} + db.instance_update(context, instance['uuid'], update_properties) + + # destory vm in pvc side + instance['metadata'] = {constants.PVC_ID: pvc_id} + try: + self.destroy(None, instance, None) + except Exception as e: + # Ignore the exception in destroy() + LOG.warning("Destroy instance throw exception: %s" % e.message) + + def destroy(self, context, instance, network_info, block_device_info=None, + destroy_disks=True): + """Destroy (shutdown and delete) the specified instance. + + If the instance is not found (for example if networking failed), this + function should still succeed. It's probably a good idea to log a + warning in that case. + + :param instance: Instance object as returned by DB layer. + :param network_info: + :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` + :param block_device_info: Information about block devices that should + be detached from the instance. + :param destroy_disks: Indicates if disks should be destroyed + + """ + return self._service.destroy(instance) + + def reboot(self, context, instance, network_info, reboot_type, + block_device_info=None, bad_volumes_callback=None): + """Reboot the specified instance. + + After this is called successfully, the instance's state + goes back to power_state.RUNNING. The virtualization + platform should ensure that the reboot action has completed + successfully even in cases in which the underlying domain/vm + is paused or halted/stopped. + + :param instance: nova.objects.instance.Instance + :param network_info: + :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` + :param reboot_type: Either a HARD or SOFT reboot + :param block_device_info: Info pertaining to attached volumes + :param bad_volumes_callback: Function to handle any bad volumes + encountered + """ + return self._service.reboot(instance, reboot_type) + + def get_console_pool_info(self, console_type): + raise NotImplementedError() + + def get_console_output(self, instance): + raise NotImplementedError() + + def get_vnc_console(self, instance): + raise NotImplementedError() + + def get_spice_console(self, instance): + raise NotImplementedError() + + def get_diagnostics(self, instance): + """Return data about VM diagnostics.""" + raise NotImplementedError() + + def get_all_bw_counters(self, instances): + """Return bandwidth usage counters for each interface on each + running VM. + """ + raise NotImplementedError() + + def get_all_volume_usage(self, context, compute_host_bdms): + """Return usage info for volumes attached to vms on + a given host.- + """ + raise NotImplementedError() + + def get_host_ip_addr(self): + """ + Retrieves the IP address of the dom0 + """ + default_value = '127.0.0.1' + host_ip = CONF.my_ip + if not host_ip: + host_ip = self._get_local_ips()[0] + if host_ip: + return host_ip + else: + return default_value + + def _get_local_ips(self): + """ + Retrieves all the IP addresses of the host machine + """ + addr_info = socket.getaddrinfo(socket.gethostname(), None, 0, 0, 0) + # Returns IPv4 and IPv6 addresses, ordered by protocol family + addr_info.sort() + index = 0 + host_ips = [] + for one_addr_info in addr_info: + # the data structure of addr_info returned by the method + # getaddrinfo is (family, socktype, proto, canonname, sockaddr). + # Fox example: + # (2, 1, 6, '', ('82.94.164.162', 80)) + # (10, 1, 6, '', ('2001:888:2000:d::a2', 80, 0, 0)) + host_ips[index] = one_addr_info[4][0] + index = index + 1 + return host_ips + + def attach_volume(self, context, connection_info, instance, mountpoint, + disk_bus=None, device_type=None, encryption=None): + """Attach the disk to the instance at mountpoint using info.""" + return self._service.attach_volume(connection_info, instance, + mountpoint) + + def detach_volume(self, connection_info, instance, mountpoint, + encryption=None): + """Detach the disk attached to the instance.""" + return self._service.detach_volume(connection_info, instance, + mountpoint) + + def list_os_attachments(self, server_id): + """List the volumes attached to the specified instance.""" + return self._service.list_os_attachments(server_id) + + def attach_interface(self, instance, image_meta, network_info): + """Attach an interface to the instance.""" + raise NotImplementedError() + + def detach_interface(self, instance, network_info): + """Detach an interface from the instance.""" + raise NotImplementedError() + + def migrate_disk_and_power_off(self, context, instance, dest, + instance_type, network_info, + block_device_info=None): + """ + This method is called at the beginning of a resize instance request. + The sequence for resize operations is the following: + 1) User requests an instance resize to a new flavor + 2) Compute manager calls the driver.migrate_disk_and_power_off() + 3) Compute manager calls the driver.finish_migration() + 4) User can either confirm or revert the resize + 5) If confirmed, driver.confirm_migration() is called + 6) If reverted, driver.finish_revert_migration() is called + Transfers the disk of a running instance in multiple phases, turning + off the instance before the end. + """ + LOG.debug(_("The method migrate_disk_and_power_off is invoked.")) + """ + In order to support the live resize in the PowerVC, remove the + power-off operation. + """ + + def snapshot(self, context, instance, image_id, update_task_state): + """ Capture an image of an instance + :param context: the context for the capture + :param instance: the instance to be capture + :param image_id: the id of the local image created for the snapshot + :param update_task_state: function for updating task state callback + + This function will cause the instance on the powervc server to be + captured resulting in a new instance there. Synchronization of + between the local, hosting OS image and the powervc image will happen + through the glance driver + """ + image = glance.get_default_image_service().show(context, image_id) + update_task_state(task_state=task_states.IMAGE_PENDING_UPLOAD) + self._service.snapshot(context, instance, + image_id, image) + update_task_state(task_state=task_states.IMAGE_UPLOADING, + expected_state=task_states.IMAGE_PENDING_UPLOAD) + + def finish_migration(self, context, migration, instance, disk_info, + network_info, image_meta, resize_instance, + block_device_info=None, power_on=True): + """Completes a resize. + + :param context: the context for the migration/resize + :param migration: the migrate/resize information + :param instance: the instance being migrated/resized + :param disk_info: the newly transferred disk information + :param network_info: + :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` + :param image_meta: image object returned by nova.image.glance that + defines the image from which this instance + was created + :param resize_instance: True if the instance is being resized, + False otherwise + :param block_device_info: instance volume block device info + :param power_on: True if the instance should be powered on, False + otherwise + """ + returnvalue = False + + if resize_instance: + LOG.debug(_("Begin to resize the instance.")) + returnvalue = self._service.resize_instance(context, migration, + instance, + image_meta) + # Support the auto-confirm + self.confirm_migration(None, instance, None) + # Handle with updating the correct host + self._service.update_correct_host(context, instance) + + else: + # TODO if want to implement the cold migration, we can add + # the corresponding code in this branch. + LOG.debug(_("The cold migration has not been implemented.")) + raise NotImplementedError() + """ + The PowerVC driver can support the live resize now. Do not need to + start the instance directly. + Based on the above reason, remove the 'power-on' operation. + """ + + return returnvalue + + def confirm_migration(self, migration, instance, network_info): + """Confirms a resize, destroying the source VM.""" + LOG.debug(_("Confirm a resize operation.")) + return self._service.confirm_migration(instance) + + def finish_revert_migration(self, instance, network_info, + block_device_info=None, power_on=True): + """ + Finish reverting a resize. + + :param instance: the instance being migrated/resized + :param network_info: + :py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info` + :param block_device_info: instance volume block device info + :param power_on: True if the instance should be powered on, False + otherwise + """ + raise NotImplementedError() + + def pause(self, instance): + """Pause the specified instance.""" + raise NotImplementedError() + + def unpause(self, instance): + """Unpause paused VM instance.""" + raise NotImplementedError() + + def suspend(self, instance): + """suspend the specified instance.""" + raise NotImplementedError() + + def resume(self, instance, network_info, block_device_info=None): + """resume the specified instance.""" + raise NotImplementedError() + + def resume_state_on_host_boot(self, context, instance, network_info, + block_device_info=None): + """resume guest state when a host is booted.""" + raise NotImplementedError() + + def rescue(self, context, instance, network_info, image_meta, + rescue_password): + """Rescue the specified instance.""" + raise NotImplementedError() + + def unrescue(self, instance, network_info): + """Unrescue the specified instance.""" + raise NotImplementedError() + + def power_off(self, instance): + """Power off the specified instance.""" + return self._service.power_off(instance) + + def power_on(self, context, instance, network_info, + block_device_info=None): + """Power on the specified instance.""" + return self._service.power_on(instance) + + def soft_delete(self, instance): + """Soft delete the specified instance.""" + raise NotImplementedError() + + def restore(self, instance): + """Restore the specified instance.""" + raise NotImplementedError() + + def get_available_resource(self, nodename): + """Retrieve resource information. + + This method is called when nova-compute launches, and + as part of a periodic task + + :param nodename: + node which the caller want to get resources from + a driver that manages only one node can safely ignore this + :returns: Dictionary describing resources + """ + hypervisor = self.get_hypervisor_by_hostname(self.hostname) + if hypervisor is None: + return None + info = hypervisor._info + + local_gb = info["local_gb"] + if int(local_gb) == 0: + local_gb = info["local_gb_used"] + + vcpus = int(float(info["vcpus"]) - float(info["proc_units_reserved"])) + memory_mb = int(info["memory_mb"]) - int(info["memory_mb_reserved"]) + + dic = {'vcpus': vcpus, + 'vcpus_used': info["vcpus_used"], + 'memory_mb': memory_mb, + 'memory_mb_used': info["memory_mb_used"], + 'local_gb': local_gb, + 'local_gb_used': info["local_gb_used"], + 'disk_available_least': info["disk_available_least"], + 'hypervisor_hostname': info["hypervisor_hostname"], + 'hypervisor_type': info["hypervisor_type"], + 'hypervisor_version': info["hypervisor_version"], + 'cpu_info': info["cpu_info"], + 'supported_instances': jsonutils.dumps( + constants.POWERVC_SUPPORTED_INSTANCES) + } + return dic + + def _get_cpu_info(self): + """Get cpuinfo information. + + """ + + cpu_info = dict() + + cpu_info['arch'] = 'ppc64' + cpu_info['model'] = 'powervc' + cpu_info['vendor'] = 'IBM' + + topology = dict() + topology['sockets'] = '1' + topology['cores'] = '1' + topology['threads'] = '1' + cpu_info['topology'] = topology + + features = list() + cpu_info['features'] = features + + return jsonutils.dumps(cpu_info) + + def pre_live_migration(self, ctxt, instance_ref, + block_device_info, network_info, disk, + migrate_data=None): + """Prepare an instance for live migration + + :param ctxt: security context + :param instance_ref: instance object that will be migrated + :param block_device_info: instance block device information + :param network_info: instance network information + :param disk: Instance disk information, if doing block migration + :param migrate_data: implementation specific data dict. + """ + return {} + + def pre_block_migration(self, ctxt, instance_ref, disk_info): + """Prepare a block device for migration + + :param ctxt: security context + :param instance_ref: instance object that will have its disk migrated + :param disk_info: information about disk to be migrated (as returned + from get_instance_disk_info()) + """ + raise pvc_exception.BlockMigrationException() + + def live_migration(self, ctxt, instance_ref, dest, + post_method, recover_method, block_migration=False, + migrate_data=None): + """Live migration of an instance to another host. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :params dest: destination host + :params post_method: + post operation method. + expected nova.compute.manager.post_live_migration. + :params recover_method: + recovery method when any exception occurs. + expected nova.compute.manager.recover_live_migration. + :params block_migration: if true, migrate VM disk. + :params migrate_data: implementation specific params. + + """ + isDefer = self._check_defer_placement(instance_ref) + if isDefer: + dest = None + try: + self._service.live_migrate(instance_ref, dest, migrate_data) + except Exception as e: + with excutils.save_and_reraise_exception(): + LOG.error(_("Live Migration failure: %s"), e, + instance=instance_ref) + recover_method(ctxt, instance_ref, dest, block_migration, + migrate_data) + + post_method(ctxt, instance_ref, dest, block_migration, migrate_data) + + def post_live_migration_at_destination(self, ctxt, instance_ref, + network_info, + block_migration=False, + block_device_info=None): + """Post operation of live migration at destination host. + + :param ctxt: security context + :param instance_ref: instance object that is migrated + :param network_info: instance network information + :param block_migration: if true, post operation of block_migration. + """ + pass + + def check_instance_shared_storage_local(self, ctxt, instance): + """Check if instance files located on shared storage. + + This runs check on the destination host, and then calls + back to the source host to check the results. + + :param ctxt: security context + :param instance: nova.db.sqlalchemy.models.Instance + """ + raise NotImplementedError() + + def check_instance_shared_storage_remote(self, ctxt, data): + """Check if instance files located on shared storage. + + :param context: security context + :param data: result of check_instance_shared_storage_local + """ + raise NotImplementedError() + + def check_instance_shared_storage_cleanup(self, ctxt, data): + """Do cleanup on host after check_instance_shared_storage calls + + :param ctxt: security context + :param data: result of check_instance_shared_storage_local + """ + pass + + def check_can_live_migrate_destination(self, ctxt, instance_ref, + src_compute_info, dst_compute_info, + block_migration=False, + disk_over_commit=False): + """Check if it is possible to execute live migration. + + This runs checks on the destination host, and then calls + back to the source host to check the results. + + :param ctxt: security context + :param instance_ref: nova.db.sqlalchemy.models.Instance + :param src_compute_info: Info about the sending machine + :param dst_compute_info: Info about the receiving machine + :param block_migration: if true, prepare for block migration + :param disk_over_commit: if true, allow disk over commit + :returns: a dict containing migration info (hypervisor-dependent) + """ + # Get the latest instance information from powervc and + # validate its safe to request a live migration. + meta = instance_ref.get('metadata') + lpar_instance = self.get_instance(meta['pvc_id']) + + if lpar_instance is None: + reason = (_("Unable to migrate uuid:%s to host %s: " + "Unable to retrieve powerVC instance.") + % (instance_ref['uuid'], + dst_compute_info['hypervisor_hostname'])) + raise exception.MigrationPreCheckError(reason=reason) + + server_dict = lpar_instance.__dict__ + valid = (self._service._is_live_migration_valid( + server_dict['status'], server_dict['health_status'])) + if not valid: + reason = (_("Unable to migrate uuid:%s to host %s: " + "PowerVC validation failed, please verify instance " + "is active and health status is OK. " + "If the RMC connection to the HMC is not active, live " + "migration can not be attempted.") + % (instance_ref['uuid'], + dst_compute_info['hypervisor_hostname'])) + raise exception.MigrationPreCheckError(reason=reason) + + # PowerVC driver does not support block migration or disk over + # commit. Let our callers know with failure. + if block_migration: + reason = (_("Unable to migrate uuid:%s to host %s: " + "Block Migration not supported") + % (instance_ref['uuid'], + dst_compute_info['hypervisor_hostname'])) + raise exception.MigrationPreCheckError(reason=reason) + + if disk_over_commit: + reason = (_("Unable to migrate uuid:%s to host %s: " + "Disk Over Commit not supported") + % (instance_ref['uuid'], + dst_compute_info['hypervisor_hostname'])) + raise exception.MigrationPreCheckError(reason=reason) + + # check if the host selection will be defer to PowerVC + isDefer = self._check_defer_placement(instance_ref) + if not isDefer: + valid_hosts = self._service.get_valid_destinations(instance_ref) + for key in valid_hosts: + if key == CONF.host: + return dst_compute_info + msg = (_('Destination host %s for live migration is invalid' + ' following powervc validation check for the instance %s') + % (dst_compute_info, instance_ref)) + raise exception.Invalid(msg) + else: + return dst_compute_info + + def check_can_live_migrate_destination_cleanup(self, ctxt, + dest_check_data): + """Do required cleanup on dest host after check_can_live_migrate calls + + :param ctxt: security context + :param dest_check_data: result of check_can_live_migrate_destination + """ + pass + + def check_can_live_migrate_source(self, ctxt, instance_ref, + dest_check_data): + """Check if it is possible to execute live migration. + + This checks if the live migration can succeed, based on the + results from check_can_live_migrate_destination. + + :param context: security context + :param instance_ref: nova.db.sqlalchemy.models.Instance + :param dest_check_data: result of check_can_live_migrate_destination + :returns: a dict containing migration info (hypervisor-dependent) + """ + return dest_check_data + + def refresh_security_group_rules(self, security_group_id): + """This method is called after a change to security groups. + + All security groups and their associated rules live in the datastore, + and calling this method should apply the updated rules to instances + running the specified security group. + + An error should be raised if the operation cannot complete. + + """ + raise NotImplementedError() + + def refresh_security_group_members(self, security_group_id): + """This method is called when a security group is added to an instance. + + This message is sent to the virtualization drivers on hosts that are + running an instance that belongs to a security group that has a rule + that references the security group identified by `security_group_id`. + It is the responsibility of this method to make sure any rules + that authorize traffic flow with members of the security group are + updated and any new members can communicate, and any removed members + cannot. + + Scenario: + * we are running on host 'H0' and we have an instance 'i-0'. + * instance 'i-0' is a member of security group 'speaks-b' + * group 'speaks-b' has an ingress rule that authorizes group 'b' + * another host 'H1' runs an instance 'i-1' + * instance 'i-1' is a member of security group 'b' + + When 'i-1' launches or terminates we will receive the message + to update members of group 'b', at which time we will make + any changes needed to the rules for instance 'i-0' to allow + or deny traffic coming from 'i-1', depending on if it is being + added or removed from the group. + + In this scenario, 'i-1' could just as easily have been running on our + host 'H0' and this method would still have been called. The point was + that this method isn't called on the host where instances of that + group are running (as is the case with + :py:meth:`refresh_security_group_rules`) but is called where references + are made to authorizing those instances. + + An error should be raised if the operation cannot complete. + + """ + raise NotImplementedError() + + def refresh_provider_fw_rules(self): + """This triggers a firewall update based on database changes. + + When this is called, rules have either been added or removed from the + datastore. You can retrieve rules with + :py:meth:`nova.db.provider_fw_rule_get_all`. + + Provider rules take precedence over security group rules. If an IP + would be allowed by a security group ingress rule, but blocked by + a provider rule, then packets from the IP are dropped. This includes + intra-project traffic in the case of the allow_project_net_traffic + flag for the libvirt-derived classes. + + """ + raise NotImplementedError() + + def reset_network(self, instance): + """reset networking for specified instance.""" + pass + + def ensure_filtering_rules_for_instance(self, instance_ref, network_info): + """Setting up filtering rules and waiting for its completion. + + To migrate an instance, filtering rules to hypervisors + and firewalls are inevitable on destination host. + ( Waiting only for filtering rules to hypervisor, + since filtering rules to firewall rules can be set faster). + + Concretely, the below method must be called. + - setup_basic_filtering (for nova-basic, etc.) + - prepare_instance_filter(for nova-instance-instance-xxx, etc.) + + to_xml may have to be called since it defines PROJNET, PROJMASK. + but libvirt migrates those value through migrateToURI(), + so , no need to be called. + + Don't use thread for this method since migration should + not be started when setting-up filtering rules operations + are not completed. + + :params instance_ref: nova.db.sqlalchemy.models.Instance object + + """ + pass + + def filter_defer_apply_on(self): + """Defer application of IPTables rules.""" + pass + + def filter_defer_apply_off(self): + """Turn off deferral of IPTables rules and apply the rules now.""" + pass + + def unfilter_instance(self, instance, network_info): + """Stop filtering instance.""" + pass + + def set_admin_password(self, context, instance_id, new_pass=None): + """ + Set the root password on the specified instance. + + The first parameter is an instance of nova.compute.service.Instance, + and so the instance is being specified as instance.name. The second + parameter is the value of the new password. + """ + raise NotImplementedError() + + def inject_file(self, instance, b64_path, b64_contents): + """ + Writes a file on the specified instance. + + The first parameter is an instance of nova.compute.service.Instance, + and so the instance is being specified as instance.name. The second + parameter is the base64-encoded path to which the file is to be + written on the instance; the third is the contents of the file, also + base64-encoded. + """ + raise NotImplementedError() + + def change_instance_metadata(self, context, instance, diff): + """ + Applies a diff to the instance metadata. + + This is an optional driver method which is used to publish + changes to the instance's metadata to the hypervisor. If the + hypervisor has no means of publishing the instance metadata to + the instance, then this method should not be implemented. + """ + pass + + def inject_network_info(self, instance, nw_info): + """inject network info for specified instance.""" + pass + + def poll_rebooting_instances(self, timeout, instances): + """Poll for rebooting instances + + :param timeout: the currently configured timeout for considering + rebooting instances to be stuck + :param instances: instances that have been in rebooting state + longer than the configured timeout + """ + raise NotImplementedError() + + def host_power_action(self, host, action): + """Reboots, shuts down or powers up the host.""" + raise NotImplementedError() + + def host_maintenance_mode(self, host, mode): + """Start/Stop host maintenance window. On start, it triggers + guest VMs evacuation. + """ + raise NotImplementedError() + + def set_host_enabled(self, host, enabled): + """Sets the specified host's ability to accept new instances.""" + raise NotImplementedError() + + def get_host_uptime(self, host): + """Returns the result of calling "uptime" on the target host.""" + raise NotImplementedError() + + def plug_vifs(self, instance, network_info): + """Plug VIFs into networks.""" + # TODO: this is hardcoded + pass + + def unplug_vifs(self, instance, network_info): + """Unplug VIFs from networks.""" + # TODO: this is hardcoded + pass + + def get_host_stats(self, refresh=False): + """Return the current state of the host. + + If 'refresh' is True, run update the stats first. + """ + if refresh or self._stats is None: + self._update_status() + return self._stats + + def node_is_available(self, nodename): + """Return that a given node is known and available.""" + return nodename in self.get_available_nodes(refresh=True) + + def block_stats(self, instance_name, disk_id): + """ + Return performance counters associated with the given disk_id on the + given instance_name. These are returned as [rd_req, rd_bytes, wr_req, + wr_bytes, errs], where rd indicates read, wr indicates write, req is + the total number of I/O requests made, bytes is the total number of + bytes transferred, and errs is the number of requests held up due to a + full pipeline. + + All counters are long integers. + + This method is optional. On some platforms (e.g. XenAPI) performance + statistics can be retrieved directly in aggregate form, without Nova + having to do the aggregation. On those platforms, this method is + unused. + + Note that this function takes an instance ID. + """ + raise NotImplementedError() + + def interface_stats(self, instance_name, iface_id): + """ + Return performance counters associated with the given iface_id on the + given instance_id. These are returned as [rx_bytes, rx_packets, + rx_errs, rx_drop, tx_bytes, tx_packets, tx_errs, tx_drop], where rx + indicates receive, tx indicates transmit, bytes and packets indicate + the total number of bytes or packets transferred, and errs and dropped + is the total number of packets failed / dropped. + + All counters are long integers. + + This method is optional. On some platforms (e.g. XenAPI) performance + statistics can be retrieved directly in aggregate form, without Nova + having to do the aggregation. On those platforms, this method is + unused. + + Note that this function takes an instance ID. + """ + raise NotImplementedError() + + def legacy_nwinfo(self): + """True if the driver requires the legacy network_info format.""" + return False + + def macs_for_instance(self, instance): + """What MAC addresses must this instance have? + + Some hypervisors (such as bare metal) cannot do freeform virtualisation + of MAC addresses. This method allows drivers to return a set of MAC + addresses that the instance is to have. allocate_for_instance will take + this into consideration when provisioning networking for the instance. + + Mapping of MAC addresses to actual networks (or permitting them to be + freeform) is up to the network implementation layer. For instance, + with openflow switches, fixed MAC addresses can still be virtualised + onto any L2 domain, with arbitrary VLANs etc, but regular switches + require pre-configured MAC->network mappings that will match the + actual configuration. + + Most hypervisors can use the default implementation which returns None. + Hypervisors with MAC limits should return a set of MAC addresses, which + will be supplied to the allocate_for_instance call by the compute + manager, and it is up to that call to ensure that all assigned network + details are compatible with the set of MAC addresses. + + This is called during spawn_instance by the compute manager. + + :return: None, or a set of MAC ids (e.g. set(['12:34:56:78:90:ab'])). + None means 'no constraints', a set means 'these and only these + MAC addresses'. + """ + return None + + def manage_image_cache(self, context, all_instances): + """ + Manage the driver's local image cache. + + Some drivers chose to cache images for instances on disk. This method + is an opportunity to do management of that cache which isn't directly + related to other calls into the driver. The prime example is to clean + the cache and remove images which are no longer of interest. + """ + pass + + def add_to_aggregate(self, context, aggregate, host, **kwargs): + """Add a compute host to an aggregate.""" + pass + + def remove_from_aggregate(self, context, aggregate, host, **kwargs): + """Remove a compute host from an aggregate.""" + pass + + def undo_aggregate_operation(self, context, op, aggregate, + host, set_error=True): + """Undo for Resource Pools.""" + raise NotImplementedError() + + def get_volume_connector(self, instance): + """Get connector information for the instance for attaching to volumes. + + Connector information is a dictionary representing the ip of the + machine that will be making the connection, the name of the iscsi + initiator and the hostname of the machine as follows:: + + { + 'ip': ip, + 'initiator': initiator, + 'host': hostname + } + + The PowerVC will only support FC volume. The connector information + as follow + { + 'host': hostname + 'wwpns': WWPNs + } + The PowerVC driver may not check the connection of the volume. It can + use the result of the attach REST API from the PowerVC to determine + whether the attach operation is successful. + """ + + return { + 'ip': '127.0.0.1', + 'host': 'hostname' + } + + def get_per_instance_usage(self): + """Get information about instance resource usage. + + :returns: dict of nova uuid => dict of usage info + """ + # TODO: This is hardcoded + return {} + + def instance_on_disk(self, instance): + """Checks access of instance files on the host. + + :param instance: instance to lookup + + Returns True if files of an instance with the supplied ID accessible on + the host, False otherwise. + + .. note:: + Used in rebuild for HA implementation and required for validation + of access to instance shared disk files + """ + return False + + def register_event_listener(self, callback): + """Register a callback to receive events. + + Register a callback to receive asynchronous event + notifications from hypervisors. The callback will + be invoked with a single parameter, which will be + an instance of the nova.virt.event.Event class. + """ + + self._compute_event_callback = callback + + def list_images(self): + """Return the names of all the images known to the virtualization + layer, as a list. + """ + return self._service.list_images() + + def get_hypervisor_by_hostname(self, hostname): + """Return the information of the specified hypervisors + by the given hostname + """ + if self.hostname: + return self._service.get_hypervisor(self.hypervisor_id) + + # (Re)Initialize the cache + hypervisorlist = self._service.list_hypervisors() + for hypervisor in hypervisorlist: + if hypervisor._info["service"]["host"] == self.host: + # Cache the hostname and hypervisor id + self.hostname = hypervisor._info["hypervisor_hostname"] + self.hypervisor_id = hypervisor._info["id"] + + return self._service.get_hypervisor(self.hypervisor_id) + + def _update_status(self): + """Retrieve status info from PowerVC.""" + LOG.debug(_("Updating host stats")) + hypervisor = self.get_hypervisor_by_hostname(self.hostname) + info = hypervisor._info + + local_gb = info["local_gb"] + if 0 == int(local_gb): + local_gb = info["local_gb_used"] + + vcpus = int(float(info["vcpus"]) - float(info["proc_units_reserved"])) + memory_mb = int(info["memory_mb"]) - int(info["memory_mb_reserved"]) + + data = {'vcpus': vcpus, + 'vcpus_used': info["vcpus_used"], + 'host_memory_total': memory_mb, + 'host_memory_free': info["free_ram_mb"], + 'disk_total': local_gb, + 'disk_used': info["local_gb_used"], + 'disk_available': info["free_disk_gb"], + 'disk_available_least': info["disk_available_least"], + 'hypervisor_hostname': info["hypervisor_hostname"], + 'hypervisor_type': info["hypervisor_type"], + 'hypervisor_version': info["hypervisor_version"], + 'supported_instances': constants.POWERVC_SUPPORTED_INSTANCES, + 'cpu_info': info["cpu_info"]} + self._stats = data + + def _get_pvc_image_uuid(self, image_meta): + """ + Get powerVC image UUID from local image instance property that is + synchronized from PowerVC image + """ + pvcImageUUID = None + + if image_meta['deleted']: + raise exception.ImageNotActive(image_id=image_meta['id']) + + # PowerVC image UUID will be saved in image_meta as + # property when image synchronization finished. + if image_meta['properties']: + pvcImageUUID = image_meta['properties']['powervc_uuid'] + LOG.debug("ImageUUID on powervc: %s" % pvcImageUUID) + + # raise exception if pvcImageUUID not found + if not pvcImageUUID: + raise exception.ImageNotFound(image_id=image_meta['name']) + return pvcImageUUID + + def _get_pvc_flavor(self, context, instance): + """ + Fill flavor detail from instance info into dic, this dic will be + passed into _boot() method to generate the flavor info to + PowerVC later + """ + # get flavor from DB + flavor_id = instance['instance_type_id'] + flavor = db.flavor_get(context, flavor_id) + return flavor + + def _get_pvc_network_info(self, context, network_info): + """ + Create the network info list which is used to fill in + the body of the REST request from local network + synchronized from PowerVC network + """ + + networks = [] + + for network_info_iter in network_info: + + network = dict() + + # Get the PowerVC network id + one_network_info = network_info_iter.get('network') + if one_network_info is None: + continue + local_id = one_network_info.get('id') + if local_id is None: + continue + # the 'net-id' will be changed to the 'uuid' in the boot method + pvc_id = self._service.get_pvc_network_uuid(context, local_id) + if pvc_id is None: + # 167976 abort the boot, if not found pvc network + raise exception.NetworkNotFoundForUUID(uuid=str(local_id)) + + network['net-id'] = pvc_id + + # The v4-fixed-ip will be changed to the fixed-ip in the boot + # method + subnets = one_network_info.get('subnets') + if subnets is None: + networks.append(network) + continue + for subnet_iter in subnets: + ips = subnet_iter.get('ips') + if ips is None: + continue + for ip_iter in ips: + ipaddress = ip_iter.get('address') + if ipaddress is None: + continue + network['v4-fixed-ip'] = ipaddress + + networks.append(network) + + return networks + + def _get_pvc_avalability_zone(self, instance): + """ + Return the availability zone constructed for the specified host + """ + # TODO: Need to revisit this method after confirmation with powervc + return ':' + instance['host'] + + def _check_defer_placement(self, instance): + """ + Get instance meta data from instance + such as "powervm:defer_placement" : "true" + """ + def str2bool(v): + return v.lower() in ('true', u'true') + + # The instance metatdata can be of multiple forms. + # Handle cases : dict, list of class InstanceMetadata + def get_defer_key_value(meta): + if isinstance(meta, dict): + for key in meta: + defer_val = meta[key] + if key == u'powervm:defer_placement': + return str2bool(defer_val) + else: + for entry in meta: + defer_key = entry.get('key', None) + defer_val = entry.get('value', None) + if defer_key == u'powervm:defer_placement': + return str2bool(defer_val) + return False + + isDefer = False + meta = instance.get('metadata', None) + if meta: + isDefer = get_defer_key_value(meta) + + return isDefer + + def get_pvc_flavor_by_flavor_id(self, flavor): + """ + Get detailed info of the flavor from the PowerVC + """ + return self._service.get_flavor_by_flavor_id(flavor) + + def update_instance_host(self, context, instance): + """ + Update the host value of the instance from powerVC. + """ + self._service.update_correct_host(context, instance) + + def cache_volume_data(self): + """ + Cache the volume data during syncing the PowerVC instances. + """ + return self._service.cache_volume_data() + + def get_local_volume_id_from_pvc_id(self, powervc_volume_id): + list_all_volumes = self._service._cinderclient.volumes.list_all_volumes + volume_search_opts = {"metadata": {"pvc:id": powervc_volume_id}} + localvolumes = list_all_volumes(volume_search_opts) + if len(localvolumes) == 0: + return None + if len(localvolumes) > 1: + LOG.warning(_('More than one volume in local cinder ' + 'match one PowerVC volume: %s' % + (powervc_volume_id))) + + localvolume = localvolumes[0] + return localvolume.id diff --git a/nova-powervc/powervc/nova/driver/virt/powervc/pvc_vm_states.py b/nova-powervc/powervc/nova/driver/virt/powervc/pvc_vm_states.py new file mode 100644 index 0000000..1bfd5ce --- /dev/null +++ b/nova-powervc/powervc/nova/driver/virt/powervc/pvc_vm_states.py @@ -0,0 +1,17 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +ACTIVE = u'ACTIVE' # PowerVC VM is running +BUILD = u'BUILD' # PowerVC VM only exists in DB +ERROR = u'ERROR' +SHUTOFF = u'SHUTOFF' +RESIZE = u'RESIZE' +VERIFY_RESIZE = u'VERIFY_RESIZE' +MIGRATING = u' MIGRATING' diff --git a/nova-powervc/powervc/nova/driver/virt/powervc/rpcapi.py b/nova-powervc/powervc/nova/driver/virt/powervc/rpcapi.py new file mode 100644 index 0000000..14edaeb --- /dev/null +++ b/nova-powervc/powervc/nova/driver/virt/powervc/rpcapi.py @@ -0,0 +1,71 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +from nova import rpc +from oslo.messaging import Target +from nova.openstack.common import log as logging +from powervc.common.constants import PVC_TOPIC + +LOG = logging.getLogger(__name__) + +MAX_CACHE_ENTRY = 100 + + +class NetworkAPI(object): + """ + Client side of the PowerVC Neutron agent RPC API. + """ + BASE_RPC_API_VERSION = '1.0' + + def __init__(self, topic=None): + self.topic = topic if topic else PVC_TOPIC + # Caching the map between local network uuid and pvc network uuid + # catch[local_uuid] = pvc_uuid + # Insert entry: when get_pvc_network_uuid is called the first time + # Delete entry:not supported + # Capacity limit: no limit set + self.rpcclient = rpc.get_client(Target(topic=self.topic)) + self._cache = dict() + + def get_pvc_network_uuid(self, ctxt, network_uuid): + LOG.debug("network_uuid_cache has %s entries" % len(self._cache)) + # in case of upper limit, emit a warning + if (len(self._cache) > MAX_CACHE_ENTRY): + # In production env, debug is disabled by default + # there should not be many networks in real env. + # log this for reference, this is not supposed to occur + LOG.warning("network_uuid_cache reach limit:%s" % len(self._cache)) + # check if the entry has been cached + if network_uuid in self._cache: + pvc_uuid = self._cache[network_uuid] + LOG.debug("network_uuid_cache found pvc_uuid %s for %s" % + (pvc_uuid, network_uuid)) + return pvc_uuid + kwargs = {} + kwargs['network_id'] = network_uuid + pvc_id = self.rpcclient.call(ctxt, 'get_pvc_network_uuid', **kwargs) + # in case None, we do not cache it + if pvc_id: + # add this entry to cache + LOG.debug("network_uuid_cache adding pvc_uuid %s for %s to cache" % + (pvc_id, network_uuid)) + self._cache[network_uuid] = pvc_id + return pvc_id + + def set_device_id_on_port_by_pvc_instance_uuid(self, + ctxt, + local_ins_id, + pvc_ins_id): + kwargs = {} + kwargs['device_id'] = local_ins_id + kwargs['pvc_ins_uuid'] = pvc_ins_id + method_name = "set_device_id_on_port_by_pvc_instance_uuid" + result = self.rpcclient.call(ctxt, method_name, **kwargs) + return result diff --git a/nova-powervc/powervc/nova/driver/virt/powervc/service.py b/nova-powervc/powervc/nova/driver/virt/powervc/service.py new file mode 100644 index 0000000..3343c6e --- /dev/null +++ b/nova-powervc/powervc/nova/driver/virt/powervc/service.py @@ -0,0 +1,1314 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import httplib +from novaclient import exceptions +from nova import exception +from nova.image import glance +from nova.openstack.common import loopingcall +from nova.openstack.common import log as logging +from nova.compute import vm_states +from powervc.nova.driver.compute import constants +from powervc.nova.driver.virt.powervc.rpcapi import NetworkAPI +from powervc.nova.driver.virt.powervc import pvc_vm_states +from nova import db +from oslo.config import cfg +from powervc.common import constants as common_constants +from powervc.common import utils +from powervc import utils as powervc_utils +from powervc.common.gettextutils import _ +from nova.exception import Invalid +from nova.openstack.common import excutils +from powervc.nova.driver.compute import task_states +from nova.compute import flavors + +LOG = logging.getLogger(__name__) +CONF = cfg.CONF + + +class InvalidSCG(Invalid): + msg_fmt = _("Storage Connectivity Group is not supported: %(attr)s") + + +class PowerVCService(object): + """A service that exposes PowerVC functionality. + The services provided here are called by the driver. + The services leverage the nova client to interface to the PowerVC. + This design keeps the driver and client interface clean and simple + and provides a workspace for any data manipulation and utility work + that may need to be done. + """ + + def __init__(self, pvc_client): + """Initializer.""" + self._manager = pvc_client.manager + self._servers = pvc_client.servers + self._hypervisors = pvc_client.hypervisors + self._images = pvc_client.images + self._flavors = pvc_client.flavors + self._client = pvc_client + self._api = NetworkAPI() + self._volumes = pvc_client.volumes + + # Import factory here to avoid connection to env for unittest + from powervc.common.client import factory + self._cinderclient = factory.\ + LOCAL.new_client(str(common_constants.SERVICE_TYPES.volume)) + self.max_tries = CONF.powervc.volume_max_try_times + + self.longrun_loop_interval = CONF.powervc.longrun_loop_interval + self.longrun_initial_delay = CONF.powervc.longrun_initial_delay + # Add version checking as required + + def list_instances(self): + """Return the names of all the instances known to the virtualization + layer, as a list. + """ + return self._manager.list() + + def get_instance(self, instance_id): + """Get the instance with the given id or None if not found. + """ + return self._manager.get(instance_id) + + def list_images(self): + """Return the information of all the images known to the virtualization + layer, as a list. + """ + return self._images.list() + + def list_hypervisors(self): + """Return the information of all the hypervisors + known to the virtualization layer, as a list. + """ + return self._hypervisors.list() + + def get_hypervisor(self, hypervisor_id): + """Return the information of a specific hypervisor + known to the virtualization layer. + """ + return self._hypervisors.get(hypervisor_id) + + def _wait_for_state_change(self, server, original_state, expected_state, + middle_state): + """ + Utility method to wait for a server to change to the + expected state. + The process of some operation contains three states. + + param: original_state: the original state + of the instance + param: expected_state: the expected state + of the instance after the operation has been + executed + param: middle_state: the middle state of the instance + during the operation. If the operation has no middle state, + it can be set as original state. + """ + temp_server = self._manager.get(server) + if temp_server.status == expected_state: + LOG.debug("Service: VM %(vm_id)s successfully changed to %(state)s" + % {'vm_id': server.id, 'state': expected_state}) + raise loopingcall.LoopingCallDone(True) + if (temp_server.status != original_state and + temp_server.status != expected_state and + temp_server.status != middle_state): + LOG.debug(_("Expected state check failed, powerVC " + "instance status = %s" % temp_server.status)) + raise exception.InstanceInvalidState( + attr=server.status, + instance_uuid=server.id, + state='state', + method='_wait_for_state_change') + + def _wait_for_spawn_state_change(self, server): + """ + Utility method to wait for a spawned server to change to the + expected state. + """ + temp_server = self._manager.get(server) + temp_server_dict = temp_server.__dict__ + task_state = temp_server_dict.get('OS-EXT-STS:task_state') + if temp_server.status == pvc_vm_states.ACTIVE: + # Fix the issue when the instance in the status 'activating', + # starting or stopping the instance will lead the problem. + if task_state is not None: + LOG.debug("VM %(vm_id)s is in the status %(state)s" + % {'vm_id': server.id, + 'state': task_states.ACTIVATING}) + else: + msg = "Service: VM %(vm_id)s successfully changed to %(state)s" + LOG.debug(msg % {'vm_id': server.id, + 'state': pvc_vm_states.ACTIVE}) + raise loopingcall.LoopingCallDone(True) + if temp_server.status == pvc_vm_states.ERROR: + fault_message = self._get_fault_message_from_pvc_vs(temp_server) + if fault_message is None: + fault_message = 'Unknown error occurred.' + raise exception.InstanceDeployFailure( + reason=fault_message) + if (temp_server.status != pvc_vm_states.BUILD + and temp_server.status != pvc_vm_states.ACTIVE): + LOG.debug(_("Expected state check failed, powerVC " + "instance status = %s" % temp_server.status)) + raise exception.InstanceInvalidState( + attr=temp_server.status, + instance_uuid=server.id, + state='state', + method='_wait_for_spawn_state_change') + + def _wait_for_reboot_state_change(self, server): + """ + Utility method to wait for a rebooted server to change to the + expected state. + """ + temp_server = self._manager.get(server) + task_state = getattr(temp_server, 'OS-EXT-STS:task_state') + if not task_state: + server_state = getattr(temp_server, 'OS-EXT-STS:vm_state') + # Treat reboot failed if vm_state is not active after reboot + if server_state != vm_states.ACTIVE: + reason = "Reboot failed, current VM %(vm_id)s state: " \ + "%(state)s." % {'vm_id': server.id, + 'state': server_state} + LOG.warning(reason) + raise exception.InstanceRebootFailure(reason=reason) + else: + vm_status_dict = {'vm_id': server.id, + 'state': pvc_vm_states.ACTIVE} + LOG.debug("Service: VM %(vm_id)s successfully rebooted. " \ + "Current status: %(state)s" % vm_status_dict) + raise loopingcall.LoopingCallDone(True) + + def _wait_for_resize_state_change(self, context, migration, + server, instance): + """ + Utility method to wait for a server which is resized + to change to the expected state. + + The process of the RESIZE operation contains three states. + SHUTOFF->RESIZE->VERIFY_RESIZE + + Because PowerVC supports the auto confirmation, the + status of server will change to 'SHUTOFF'. + + Note:now this method only supports the 'SHUTOFF' resize mode. + The 'Active' resize mode will be supported in the future + release. + + """ + + temp_server = self._manager.get(server) + new_instance_type = migration['new_instance_type_id'] + + # The status 'VERIFY_RESIZE' is the final status of the + # 'RESIZE' operation + if temp_server.status == pvc_vm_states.VERIFY_RESIZE: + LOG.debug( + "Service: VM %(vm_id)s successfully changed to %(state)s" + % {'vm_id': server.id, 'state': + pvc_vm_states.VERIFY_RESIZE}) + raise loopingcall.LoopingCallDone(True) + + # In the auto-confirmation situation, the stauts 'SHUTOFF' + # can be accepted + # Check whether the resize operation task completes + # a ) the task status of the specified instance is none + # b ) the flavor has been updated to the new flavor + + temp_server_dict = temp_server.__dict__ + temp_server_task = temp_server_dict['OS-EXT-STS:task_state'] + + if ((temp_server.status == pvc_vm_states.SHUTOFF + or temp_server.status == pvc_vm_states.ACTIVE) + and temp_server_task is None): + if self._validate_flavor_update(context, + new_instance_type, + temp_server): + LOG.debug(_("Service: VM %s is auto-confirmed") % server.id) + raise loopingcall.LoopingCallDone(True) + else: + self._roll_back_after_resize_fail(migration, context, instance) + LOG.info(_("Can not resize the service: VM %s for PowerVC\ + has not enough resource.") % server.id) + raise exception.ResizeError("Error during confirming " + "the resize operation.") + + if (temp_server.status != pvc_vm_states.SHUTOFF + and temp_server.status != pvc_vm_states.RESIZE + and temp_server.status != pvc_vm_states.ACTIVE + and temp_server.status != pvc_vm_states.VERIFY_RESIZE): + LOG.debug(_("Service: VM %s is the wrong status.") % server.id) + error_message = self._get_resize_fault_message(temp_server_dict) + if error_message is not None: + LOG.warning("Get error during resizing the instance" + " in the PowerVC:") + self._roll_back_after_resize_fail(migration, context, instance) + raise exception.\ + ResizeError("Get error: %s during" + "resizing the instance in the PowerVC:" + % error_message) + self._roll_back_after_resize_fail(migration, context, instance) + raise exception.\ + InstanceInvalidState(attr=temp_server.status, + instance_uuid=server.id, + state='state', + method='_wait_for_resize_state_change') + + def _get_resize_fault_message(self, server): + """ + Utility to get the error message of the resize operation. + :param server: the PowerVC server instance. + """ + detail_server = server + fault_message = None + if detail_server is not None: + fault = detail_server.get('fault') + if fault is not None: + fault_message = fault.get('message') + else: + LOG.warning("Fail to find the instance with the id: %s", server.id) + + return fault_message + + def _roll_back_after_resize_fail(self, migration, context, instance): + """ + Utility to roll back the instance after the resize instance fails. + :param migration + :param context + :param instance + """ + old_instance_type_id = migration['old_instance_type_id'] + new_instance_type_id = migration['new_instance_type_id'] + if old_instance_type_id != new_instance_type_id: + try: + pvc_flavor = flavors.get_flavor(old_instance_type_id) + except Exception: + LOG.info(_("Getting exception during getting the flavor.")) + LOG.info(_("Rolling back of the flavor fails.")) + return + sys_meta = dict() + sys_meta = flavors.save_flavor_info(sys_meta, pvc_flavor) + instance.instance_type_id = pvc_flavor['id'] + instance.memory_mb = pvc_flavor['memory_mb'] + instance.vcpus = pvc_flavor['vcpus'] + instance.root_gb = pvc_flavor['root_gb'] + instance.ephemeral_gb = pvc_flavor['ephemeral_gb'] + instance.system_metadata = sys_meta + instance.save() + + def _wait_for_confirm_state_change(self, server): + """ + This method is used to wait and check the state of + the confirmation change. + + :param context: the context of the hosting OS. + :param new_instance_type: the new instance type. + :param server: the VM server instance + """ + + temp_server = self._manager.get(server) + + """ + The PowerVC driver supports the auto-confirm. + In the 'SHUTOFF' mode resize, when confirm the resize, + the server is not started. So the accepted status of server + is 'SHUTOFF'. + + """ + + temp_server_dict = temp_server.__dict__ + temp_server_task = temp_server_dict['OS-EXT-STS:task_state'] + + if (temp_server.status == pvc_vm_states.SHUTOFF and + temp_server_task is None): + LOG.debug(_("The resize operation of the service:\ + VM %s is confirmed") % server.id) + raise loopingcall.LoopingCallDone(True) + + if (temp_server.status == pvc_vm_states.ACTIVE and + temp_server_task is None): + LOG.debug(_("The resize operation of the service:\ + VM %s is confirmed") % server.id) + raise loopingcall.LoopingCallDone(True) + + if (temp_server.status != pvc_vm_states.SHUTOFF and + temp_server.status != pvc_vm_states.ACTIVE and + temp_server.status != pvc_vm_states.VERIFY_RESIZE): + raise exception.\ + InstanceInvalidState(attr=temp_server.status, + instance_uuid=server.id, + state='state', + method='_wait_for_confirm_state_change') + + def _validate_flavor_update(self, context, new_instance_type, server): + """ + This method is used to validate whether the flavor is updated + after the resize. + + :param context: the context of the hosting OS. + :param new_instance_type: the new instance type. + :param server: the VM server instance + :return Ture if the flavor is updated, otherwise False. + """ + + is_flavor_updated = False + + flavor = db.flavor_get(context, new_instance_type) + memory_mb = flavor['memory_mb'] + vcpus = flavor['vcpus'] + root_gb = flavor['root_gb'] + ephemeral_gb = flavor['ephemeral_gb'] + + pvc_flavor = server.flavor + pvc_flavor_id = pvc_flavor['id'] + + try: + pvc_flavor = self.get_flavor_by_flavor_id(pvc_flavor_id) + except Exception: + LOG.info(_("Ignore the exception during getting the flavor")) + return is_flavor_updated + + pvc_flavor_dict = pvc_flavor.__dict__ + + if (memory_mb == pvc_flavor_dict['ram'] + and vcpus == pvc_flavor_dict['vcpus'] + and root_gb == pvc_flavor_dict['disk'] + and ephemeral_gb == pvc_flavor_dict. + get('OS-FLV-EXT-DATA:ephemeral', 0)): + LOG.info(_("The flavor of the server %s has been updated\ + successfully.") % server.id) + is_flavor_updated = True + + return is_flavor_updated + + def _validate_response(self, response): + """ + Validates an HTTP response to a REST API request made by this service. + + The method will simply return if the HTTP error code indicates success + (i.e. between 200 and 300). + Any other errors, this method will raise the exception. + Note: Appropriate exceptions to be added... + Nova client throws an exception for 404 + + :param response: the HTTP response to validate + """ + if response is None: + return + httpResponse = response[0] + # Any non-successful response >399 is an error + if httpResponse.status_code >= httplib.BAD_REQUEST: + LOG.critical(_("Service: got this response: %s") + % httpResponse) + LOG.debug("Service: got this response: %s" + % httpResponse) + raise exceptions.BadRequest(httpResponse) + + def list_os_attachments(self, server_id): + """List volumes of the specified instance""" + return self._volumes.get_server_volumes(server_id) + + def detach_volume(self, connection_info, instance, mountpoint): + """Detach the specified volume from the specified instance""" + server_id = instance['metadata']['pvc_id'] + if 'serial' in connection_info: + local_volume_id = connection_info['serial'] + volume_id = self._get_pvc_volume_id(local_volume_id) + else: + LOG.warning(_("VolumeId missing in detaching volume")) + self._volumes.delete_server_volume(server_id, volume_id) + + def _wait_for_detach(server_id, volume_id): + """ + This method is used to call at an interval until the volume is + detached from the server. + """ + try: + volume = self._volumes.get_server_volume(server_id, volume_id) + except exceptions.NotFound: + LOG.info( + _("Detach the volume on instance %s successfully.") + % server_id) + raise loopingcall.LoopingCallDone(True) + + if volume: + if self.try_time > self.max_tries: + LOG.info(_("Volume %s failed to detach.") % volume_id) + # There is no VolumeDetachFailed like exception defined + raise loopingcall.LoopingCallDone(True) + else: + LOG.debug(_("Looping call to check detach of volume %s.") + % volume_id) + self.try_time += 1 + else: + raise loopingcall.LoopingCallDone(True) + + self.try_time = 0 + timer = loopingcall.FixedIntervalLoopingCall(_wait_for_detach, + server_id, + volume_id) + return timer.start(self.longrun_loop_interval, + self.longrun_initial_delay).wait() + + def power_off(self, instance): + """Power off the specified instance.""" + server_instance = self._get_server(instance) + server = self._get_pvcserver(server_instance) + # Exit Immediately if the server is already stopped + # This is only the case when the OS and PVC states are + # not in sync. + # Note: Should verify these states.... + if (server.status == pvc_vm_states.SHUTOFF): + LOG.debug("Service: Instance state out of sync, current state: %s" + % server.status) + return + # When the task status of the instance in the PowerVC is 'ACTIVATING', + # Try to stop this instance will fail. + server_dict = server.__dict__ + task_state = server_dict.get('OS-EXT-STS:task_state') + if (task_state == task_states.ACTIVATING): + LOG.debug("The task status of the instance: %s" + % task_state) + reason = _("The instance in the task status: %s can not" \ + " be stopped." + % task_state) + raise exception.InstanceUnacceptable(instance_id=server.id, + reason=reason) + + response = self._manager.stop(server) + self._validate_response(response) + + timer = loopingcall.FixedIntervalLoopingCall( + self._wait_for_state_change, server, + server.status, pvc_vm_states.SHUTOFF, pvc_vm_states.SHUTOFF) + + return timer.start(self.longrun_loop_interval, + self.longrun_initial_delay).wait() + + def power_on(self, instance): + """Power on the specified instance.""" + server_instance = self._get_server(instance) + server = self._get_pvcserver(server_instance) + + # Exit Immediately if the server is already started + # This is only the case when the OS and PVC states are + # not in sync. + if server.status == pvc_vm_states.ACTIVE: + LOG.debug("Service: Instance state out of sync, current state: %s" + % server.status) + return + + # When the task status of the instance in the PowerVC is 'ACTIVATING', + # Try to start this instance will fail. + server_dict = server.__dict__ + task_state = server_dict.get('OS-EXT-STS:task_state') + if (task_state == task_states.ACTIVATING): + LOG.debug("The task status of the instance: %s." + % task_state) + reason = _("The instance in the task status: %s can not be started" + % task_state) + raise exception.InstanceUnacceptable(instance_id=server.id, + reason=reason) + + response = self._manager.start(server) + self._validate_response(response) + + timer = loopingcall.FixedIntervalLoopingCall( + self._wait_for_state_change, server, + server.status, pvc_vm_states.ACTIVE, pvc_vm_states.ACTIVE) + + return timer.start(self.longrun_loop_interval, + self.longrun_initial_delay).wait() + + def _get_pvcserver(self, server_instance): + """ + This method handles the call to PowerVC to + get the server + """ + return self._manager.get(server_instance) + + def _get_server(self, instance): + """ + This method handles converting a a hosting instance + into an powerVC instance for nova client use. + """ + + server = self._servers.Server(self._manager, instance) + + # Check whether we can get the metadata from instance + key = 'metadata' + pvc_id = 0 + if not key in instance: + LOG.info(_('Could not find the metadata from the instance.')) + server.id = pvc_id + return server + metadatas = instance[key] + + # Check whether we can get the pvc_id from the metadata + key = 'pvc_id' + + # Handle the situation when doing resize operation, + # the metadata in the instance is list type. + if (metadatas is not None and isinstance(metadatas, list)): + for metadata in metadatas: + if metadata['key'] == key: + pvc_id = metadata['value'] + server.id = pvc_id + return server + # If no pvc_id in list, return it by _get_pvcid_from_metadata() + server.id = self._get_pvcid_from_metadata(instance) + return server + + if metadatas == [] or not key in metadatas.keys(): + LOG.info(_('Could not find the pvc_id from the metadata.')) + server.id = pvc_id + return server + + # Get the pvc_id of the instance + pvc_id = metadatas[key] + server.id = pvc_id + return server + + def _get_pvcid_from_metadata(self, instance): + """ + Because the data structure of the instance passed by + the nova manager is different from normal structure, + use this method to get the PowerVC id from the instance + metadata + """ + pvc_id = '' + metadatas = instance['metadata'] + for key in metadatas: + if key == "pvc_id": + pvc_id = metadatas[key] + break + return pvc_id + + def list_flavors(self): + """ + Return the names of all the flavors known to the virtualization + layer, as a list. + """ + return self._flavors.list() + + def get_flavor_by_flavor_id(self, flavor): + """ + Return the specified flavor with the flavor id + """ + return self._flavors.get(flavor) + + def get_flavor_extraspecs(self, flavor): + """ + Return the extraspecs defined for a flavor as a dict. + """ + return flavor.get_keys() + + def _update_local_instance_by_pvc_created_instance(self, + context, + orig_instance, + created_server): + """ + update the original instance with the created instance + """ + created_instance = created_server.__dict__ + # get original metadata from DB and insert the pvc_id + meta = db.instance_metadata_get(context, orig_instance['uuid']) + meta.update(pvc_id=created_instance['id']) + # update powervc specified metadata to hosting os vm instance + powervc_meta = created_instance.get('metadata') + if powervc_meta: + meta.update(powervc_meta) + update_properties = { + 'node': created_instance.get( + 'OS-EXT-SRV-ATTR:hypervisor_hostname', None), + 'host': created_instance.get('OS-EXT-SRV-ATTR:host', None), + 'metadata': meta, + 'architecture': constants.PPC64, + 'power_state': created_instance['OS-EXT-STS:power_state']} + orig_instance['node'] = update_properties['node'] + orig_instance['host'] = update_properties['host'] + db.instance_update(context, orig_instance['uuid'], + update_properties) + + def spawn(self, context, instance, injected_files, name, imageUUID, + flavorDict, nics, hypervisorID, availability_zone, isDefer): + """Call pvcnovaclient to boot a VM on powerVC + :param context: admin context + :param instance: passed-in instance + :param injected_files: User files to inject into instance. + :param name: server name + :param imageUUID: Image UUID on powerVC + :param flavorDict: a dictionary which contains flavor info + :param networkUUID: Network config UUID on powerVC + :param hypervisorID: Hypervisor ID (a number) on powerVC + :param availability_zone: the availability_zone of host + :param isDefer: defer_placement flag + """ + createdServer = None + + self.validate_update_scg(flavorDict) + + # extract activation data from instance + meta = instance._metadata + key_name = instance.key_name + #key_data = instance.key_data + config_drive = instance._config_drive + userdata = instance.user_data # already base64 encoded by local OS + + if not isDefer: + createdServer = \ + self._manager.create(name=name, + image=imageUUID, + flavor=flavorDict, + meta=meta, + files=injected_files, + userdata=userdata, + key_name=key_name, + # OpenStack API doesn't support key_data, + # key_data = key_data, + config_drive=config_drive, + nics=nics, + hypervisor=hypervisorID, + availability_zone=availability_zone) + else: + createdServer = self._manager.create(name=name, + image=imageUUID, + flavor=flavorDict, + meta=meta, + files=injected_files, + userdata=userdata, + key_name=key_name, + # OpenStack API doesn't + # support key_data, + # key_data = key_data, + config_drive=config_drive, + nics=nics) + + LOG.debug(_('Created Server: %s' % createdServer)) + LOG.debug(_( + 'Server status is %s after creating' % createdServer.status)) + # update local DB instance with powervc created one + self._update_local_instance_by_pvc_created_instance( + context, instance, createdServer) + + # If the vm is building, wait until vm status is ACTIVE or ERROR + if createdServer.status == pvc_vm_states.BUILD: + LOG.debug(_('wait until created vm status is ACTIVE or ERROR')) + timer = loopingcall.FixedIntervalLoopingCall( + self._wait_for_spawn_state_change, createdServer) + + try: + timer.start(self.longrun_loop_interval * 2, + self.longrun_initial_delay * 2).wait() + LOG.debug(_('Create VM succeeded')) + except exception.InstanceInvalidState as e: + with excutils.save_and_reraise_exception(): + # set powervc fault message to exception and throw e + e.message = self._get_fault_message(createdServer) + + # verify the server's status after wait + createdServer = self._manager.get(createdServer) + LOG.debug(_( + 'Server status is %s after waiting' % createdServer.status)) + + elif createdServer.status != pvc_vm_states.ACTIVE: + exp = exception.InstanceInvalidState() + exp.message = self._get_fault_message(createdServer) + raise exp + + # Again, update local DB instance with powervc created one + # in case some fields changed after boot. + # Copy the powervc specified properties into metadata + createdServer.metadata = \ + powervc_utils.fill_metadata_dict_by_pvc_instance( + createdServer.metadata, + createdServer.__dict__) + self._update_local_instance_by_pvc_created_instance( + context, instance, createdServer) + return createdServer + + def _get_fault_message(self, createdServer): + """try to get error message from powerVC when boot vm failed + """ + errorServer = self._manager.get(createdServer) + return self._get_fault_message_from_pvc_vs(errorServer) + + def _get_fault_message_from_pvc_vs(self, errorServer): + """try to get error message from powerVC when boot vm failed + """ + fault_message = None + fault_msg = getattr(errorServer, 'fault', None) + if fault_msg: + # set powervc fault message to exception and throw e + fault_message = fault_msg.get('message', None) + LOG.warning(_('Failed to create VM, reason: %s' % fault_message)) + return fault_message + + def validate_update_scg(self, flavorDict): + """ + Validate the flavor dict for scg + -if extra specs key is available and if scg is specified in extra + specs dict, then verify if scg is same as supported by our driver. + If it is not the supported scg, fail the operation + -if extra specs key is not available, or scg is not specified in extra + specs dict, then add the scg information to the extra specs dict and + update the flavor dict. + """ + scg_name_list = CONF.powervc.storage_connectivity_group + scg_id_list = [utils.get_utils().get_scg_id_by_scgName(scg_name) + for scg_name in scg_name_list] + scg_key = constants.SCG_KEY + extra_specs_key = constants.EXTRA_SPECS + + if extra_specs_key in flavorDict: + extra_specs = flavorDict[extra_specs_key] + if scg_key in extra_specs: + if extra_specs[scg_key] in scg_id_list: + return + else: + LOG.info(_("Function failed due to unsupported" + " storage connectivity group.")) + raise InvalidSCG(attr=extra_specs[scg_key]) + + if extra_specs_key not in flavorDict: + LOG.info(_("Flavor updated with default storage connectivity" + " group in extra specs.")) + flavorDict[extra_specs_key] = {scg_key: scg_id_list[0]} + else: + LOG.info(_("Extra specs updated with default storage connectivity" + " group info.")) + flavorDict[extra_specs_key][scg_key] = scg_id_list[0] + return + + def destroy(self, instance): + """ + Destroy the VM instance in the PowerVC host. + """ + server_instance = self._get_server(instance) + + # If we can not find the VM instance in the PowerVC host, + # the destroy operation should be successful. + try: + server = self._manager.get(server_instance) + except exceptions.NotFound: + LOG.debug("Service: Can not find VM %s in the PowerVC." + % server_instance.id) + return True + + delete_response = self._manager.delete(server) + + self._validate_response(delete_response) + + def _wait_for_destroy(): + """ + The method is used to call at an interval until the VM + is gone. + """ + try: + get_server_response = self._manager.get(server) + except exceptions.NotFound: + LOG.info( + _("VM instance %s was successfully deleted.") + % server.id) + raise loopingcall.LoopingCallDone(True) + + server_response = get_server_response.__dict__ + + # There is a window where the instance will go out of deleting + # task state and report a status of DELETED and a task state of + # None. Recognize this as a sucessful delete completion as well. + if (server_response['OS-EXT-STS:task_state'] is None and + server_response['status'] == 'DELETED'): + LOG.info( + _("VM instance %s was successfully deleted.") + % server.id) + raise loopingcall.LoopingCallDone(True) + + if (server_response['OS-EXT-STS:task_state'] != 'deleting' and + server_response['status'] != 'DELETED'): + LOG.info(_("VM %s failed to delete, instance details: %s ") % + (server.id, server_response)) + raise exception.InstanceTerminationFailure(server) + + timer = loopingcall.FixedIntervalLoopingCall(_wait_for_destroy) + return timer.start(self.longrun_loop_interval * 2, + self.longrun_initial_delay * 2).wait() + + def set_device_id_on_port_by_pvc_instance_uuid(self, + ctx, + local_ins_id, + pvc_ins_id): + """ + Query a sync. local port by a pvc instance id, + then set its device_id to a local instance id. + """ + local_ports = self._api.\ + set_device_id_on_port_by_pvc_instance_uuid(ctx, + local_ins_id, + pvc_ins_id) + return local_ports + + def get_pvc_network_uuid(self, ctx, local_id): + """ + Given a local netowrk id, return a powerVC network id. + """ + pvc_id = self._api.get_pvc_network_uuid(ctx, local_id) + return pvc_id + + def _get_instance_resize_properties(self, context, new_instance_type, + server): + """ + Get the dynamic instance customization properties. + The dynamic properties are those that can be modified on an + existing instance. + + :param instance: the instance which needs to be resized + :returns: dictionary of dynamic properties + :new_instance_type: the flavor type + """ + flavor = db.flavor_get(context, new_instance_type) + flavor_extras = self.\ + _get_flavor_extra_specs(context, flavor) + flavor_extras_target = dict() + if server.status == pvc_vm_states.ACTIVE: + flavor_extras_source = flavor_extras + for key in flavor_extras_source.keys(): + if (key.find('min') == -1 and + key.find('max') == -1): + flavor_extras_target[key] = flavor_extras_source[key] + else: + flavor_extras_target = flavor_extras + + flavor_props = {'vcpus': flavor['vcpus'], + 'ram': flavor['memory_mb'], + 'disk': flavor['root_gb'], + 'extra_specs': flavor_extras_target + } + + self.validate_update_scg(flavor_props) + props = {'flavor': flavor_props} + return props + + def _get_flavor_extra_specs(self, context, flavor): + """ + The method _get_flavor_extra_specs is used to get the PowerVC flavor + extra_specs data + """ + flavor_id = flavor['flavorid'] + value = db.flavor_extra_specs_get(context, flavor_id) + return value + + def _resize(self, server, props): + """ + Resize a server's resources. + :para server; the :class:`Server` to share onto. + :para body: the body of rest request + + """ + response = self._manager._resize_pvc(server, props) + + return response + + def resize_instance(self, context, migration, instance, + image_meta): + """ + Resize the specified VM instance on the PowerVC host. + """ + # The resize operation REST API of PowerVC is different + # from the standard OpenStack. + + server_instance = self._servers.Server(self._manager, instance) + server_instance.id = self._get_pvcid_from_metadata(instance) + server = self._manager.get(server_instance) + + LOG.debug("Starting to resize the instance %s", + server.id) + new_instance_type = migration['new_instance_type_id'] + props = self._get_instance_resize_properties(context, + new_instance_type, + server) + response = self._resize(server, props) + self._validate_response(response) + + timer = loopingcall.FixedIntervalLoopingCall( + self._wait_for_resize_state_change, context, + migration, server, instance) + + return timer.start(self.longrun_loop_interval * 3, + self.longrun_initial_delay * 2).wait() + + def confirm_migration(self, instance): + """ + Confirm a resize operation. + """ + server_instance = self._get_server(instance) + server = self._manager.get(server_instance) + + server_dict = server.__dict__ + server_task = server_dict['OS-EXT-STS:task_state'] + + if server.status == pvc_vm_states.ERROR: + raise exception.ResizeError("Error during confirming " + "the resize operation.") + + # Handle with the auto-confirmation situation + if (server.status == pvc_vm_states.ACTIVE and + server_task is None): + LOG.info(_("The VM instance %s is auto-confirmed successfully.") + % server.id) + return True + + if (server.status == pvc_vm_states.SHUTOFF and + server_task is None): + LOG.info(_("The VM instance %s is auto-confirmed successfully.") + % server.id) + return True + + try: + response = self._manager.confirm_resize(server) + self._validate_response(response) + except Exception as exc: + LOG.info(_("Getting the exception during confirming the resize of " + "the instance %s.") % server.id) + LOG.info(_("The exception: %s") % exc) + server = self._manager.get(server_instance) + if server.status == pvc_vm_states.ERROR: + raise exception.ResizeError("Error during confirming " + "the resize operation.") + timer = loopingcall.FixedIntervalLoopingCall( + self._wait_for_confirm_state_change, server) + return timer.start(self.longrun_loop_interval * 2, + self.longrun_initial_delay).wait() + + timer = loopingcall.FixedIntervalLoopingCall( + self._wait_for_confirm_state_change, server) + + return timer.start(self.longrun_loop_interval * 2, + self.longrun_initial_delay).wait() + + def attach_volume(self, connection_info, instance, mountpoint): + """ + Attach the specified volume to the specified instance + """ + server_instance = self._get_server(instance) + server = self._manager.get(server_instance) + + server_id = server.id + local_volume_id = connection_info['serial'] + volume_id = self._get_pvc_volume_id(local_volume_id) + + if volume_id == '': + LOG.debug("Could not get the PowerVC volume id " + "with local volume id.") + raise exception.VolumeUnattached + + self._volumes.create_server_volume(server_id, + volume_id, + mountpoint) + + self.try_time = 0 + timer = loopingcall.\ + FixedIntervalLoopingCall(self._check_attachment_of_instance, + server_id, + volume_id) + + return timer.start(self.longrun_loop_interval, + self.longrun_initial_delay).wait() + + def list_attachments_of_instance(self, server_id): + """ + Lists the volume attachments for the specified server. + """ + + response = self._volumes.get_server_volumes(server_id) + + return response + + def _check_attachment_of_instance(self, server_id, volume_id): + """ + Check whether the specified volume has been attached to + the specified instance. + """ + self.try_time = self.try_time + 1 + try: + attachments = self.list_attachments_of_instance(server_id) + except exceptions: + LOG.warning(_("Fail to get the attachments of the server:\ + VM %s.") % server_id) + raise exceptions.BadRequest + + for attachment in attachments: + get_volume_id = getattr(attachment, 'volumeId', '') + + if get_volume_id == volume_id: + LOG.debug(_("The attach_volume operation of the server:\ + VM %s is completed") % server_id) + raise loopingcall.LoopingCallDone(True) + + if self.try_time > self.max_tries: + raise exception.VolumeUnattached + + def _get_pvc_volume_id(self, local_id): + """ + The method get_pvc_volume_id is used to get the PowerVC volume id + with the local volume id + """ + pvc_volume_id = '' + + local_volume = self._cinderclient.volumes.get(local_id) + + if local_volume is None: + return pvc_volume_id + + metadata = getattr(local_volume, 'metadata', '') + if metadata == '': + return pvc_volume_id + + if "pvc:id" in metadata.keys(): + pvc_volume_id = metadata["pvc:id"] + + return pvc_volume_id + + def snapshot(self, context, instance, + local_image_id, image): + """ + Captures a workload into an image. + :param context: the context for the capture + :param instance: the instance to be capture + :param local_image_id: the id of the local image created for the + snapshot + :param image: image object to update + """ + server_instance = self._get_server(instance) + server = self._get_pvcserver(server_instance) + image_name = image["name"] + glance_image_service = glance.get_default_image_service() + + #nova is going to pick up the uuid from the image the instance was + #deployed from. We need to remove it to prevent treating this image + #as if it is the base deploy image + image_props = image["properties"] + if common_constants.POWERVC_UUID_KEY in image_props: + props = {'properties': {common_constants.POWERVC_UUID_KEY: None}} + glance_image_service.update(context, local_image_id, props, + purge_props=False) + + glance_powervc_uuid_value = \ + server.create_image(image_name, {common_constants.LOCAL_UUID_KEY: + image["id"]}) + + image_data = { + 'properties': { + common_constants.POWERVC_UUID_KEY: glance_powervc_uuid_value + } + } + + glance_image_service.update(context, local_image_id, image_data, + purge_props=False) + + def _wait_for_snapshot(): + """ + The method is used to call at an interval until the + capture is complete + """ + get_server_response = self._manager.get(server) + server_response = get_server_response.__dict__ + task_state = server_response['OS-EXT-STS:task_state'] + + if task_state is None or task_state == 'None': + LOG.info(_("Capture of VM instance %s is complete.") % + server.id) + raise loopingcall.LoopingCallDone(True) + LOG.debug(_("Capture of VM instance %s in state %s.") % + (server.id, task_state)) + + timer = loopingcall.FixedIntervalLoopingCall(_wait_for_snapshot) + return timer.start(self.longrun_loop_interval, + self.longrun_initial_delay).wait() + + def live_migrate(self, instance, dest, migrate_data): + """ + Live migrate a PowerVC instance. + :param instance: Local OS instance + :param dest: Destination host + :param migrate_data: implementation specific data dict + """ + server_instance = self._get_server(instance) + server = self._manager.get(server_instance) + + resp = self._manager.live_migrate(server, dest, False, False) + self._validate_response(resp) + + server_dict = server.__dict__ + orig_host = server_dict['OS-EXT-SRV-ATTR:host'] + + def _wait_for_live_migration(): + """ + The method is used to call at an interval until the + instance transitions from Migrating state . + """ + pvc_server = self._manager.get(server) + pvc_server_dict = pvc_server.__dict__ + current_host = pvc_server_dict['OS-EXT-SRV-ATTR:host'] + LOG.debug(_('Original Host %s, Current Host %s') % + (orig_host, current_host)) + if (pvc_server.status != pvc_vm_states.MIGRATING and + current_host != orig_host): + LOG.info(_("Instance %s completed migration.") % pvc_server.id) + raise loopingcall.LoopingCallDone(True) + + timer = loopingcall.FixedIntervalLoopingCall(_wait_for_live_migration) + return timer.start(self.longrun_loop_interval * 3, + self.longrun_initial_delay * 2).wait() + + def reboot(self, instance, reboot_type): + """Reboot the specified instance. + + After this is called successfully, the instance's state + goes back to power_state.RUNNING. The virtualization + platform should ensure that the reboot action has completed + successfully even in cases in which the underlying domain/vm + is paused or halted/stopped. + + :param instance: nova.objects.instance.Instance + :param reboot_type: Either a HARD or SOFT reboot + """ + server_instance = self._get_server(instance) + server = self._manager.get(server_instance) + + if reboot_type == "SOFT": + server.reboot(self._servers.REBOOT_SOFT) + else: + server.reboot(self._servers.REBOOT_HARD) + + # loop when vm state status is not none + LOG.debug(_('wait until rebooted server task state is none')) + timer = loopingcall.FixedIntervalLoopingCall( + self._wait_for_reboot_state_change, server) + try: + timer.start(self.longrun_loop_interval * 2, + self.longrun_initial_delay).wait() + LOG.debug(_('Reboot VM succeeded')) + except exception.InstanceRebootFailure: + with excutils.save_and_reraise_exception(): + LOG.warning("Reboot VM failed.") + + def update_correct_host(self, context, instance): + """ + Update the property host of the instance. + When the VM instance is resized, the Nova will select the + host to migrate it. In order to handle with this situation, + we need to update the property host of the instance after the + resize operation. Additionally, when live migration is deferred + to powervc, we will not know the host as well. This method + needs to update the host, node and hostname values as they + are all related to an instance belonging to a new compute + node. + """ + server_instance = self._get_server(instance) + server = self._manager.get(server_instance) + server_dict = server.__dict__ + host = \ + powervc_utils.normalize_host(server_dict['OS-EXT-SRV-ATTR:host']) + hostname = server_dict['OS-EXT-SRV-ATTR:hypervisor_hostname'] + try: + db.instance_update(context, instance['uuid'], + {'host': host, + 'node': hostname, + 'hostname': hostname}) + except Exception as exc: + LOG.info(_("Fail to set the host of VM instance %s.") + % server.id) + raise exc + + def get_valid_destinations(self, instance_ref): + """ + Utility method to get valid hosts for an instance to + move to. + """ + server_instance = self._get_server(instance_ref) + server = self._manager.get(server_instance) + return self._manager.list_instance_storage_viable_hosts(server) + + def _is_live_migration_valid(self, status, health_status): + """ + Utility method to determine if we can safely request a live migration + on the powervc system. Ideally, powerVC should be giving its clients + a safer API, but for this release we need to do our best to make sure + its safe to call. This method does 2 things, checks that the instance + status is active and secondly that there is a valid RMC connection to + HMC with the instance. If there is not a valid connection the instance + will go to error state in powervc, although the instance can safely be + recovered, we should not send the request altogether. + :param status: (str) instance status from powervc + :param health_status: (tuple, examples below) of health information. + + Examples of OK instance: + { u'health_value': u'OK', + u'id': u'ba93a763-061e-49a1-807d-aa053bccdc81' + } + + { u'health_value': u'UNKNOWN', + u'unknown_reason': u'Unable to get related hypervisor data' + } + + Example of WARNING instance: + { u'health_value': u'WARNING', + u'id': u'a370885f-4bff-4d8e-869f-2aa64545a7aa', + u'value_reason': [ + {u'resource_local': u'server', + u'resource_id': u'a370885f-4bff-4d8e-869f-2aa64545a7aa', + u'display_name': u'aix', + u'resource_property_key': u'rmc_state', + u'resource_property_value': u'inactive'}, + {u'resource_local': u'server', + u'resource_id': u'a370885f-4bff-4d8e-869f-2aa64545a7aa', + u'display_name': u'aix', + u'resource_property_key': u'vm_state', + u'resource_property_value': u'stopped'} + ] + } + """ + if (status != 'ACTIVE'): + return False + if (health_status is not None and + (health_status['health_value'] == 'OK' or + health_status['health_value'] == 'UNKNOWN')): + return True + else: + if (health_status is not None and + 'value_reason' in health_status): + for reason in health_status['value_reason']: + if (reason is not None and + 'resource_property_key' in reason and + 'resource_property_value' in reason and + reason['resource_property_key'] == 'rmc_state' and + reason['resource_property_value'] == 'inactive'): + return False + return True + + def cache_volume_data(self): + """ + Cache the volume data during the sync instances. + """ + cache_volume = {} + local_volumes = self._cinderclient.volumes.list_all_volumes() + + for local_volume in local_volumes: + metadata = getattr(local_volume, 'metadata', '') + if metadata == '': + continue + if 'pvc:id' in metadata.keys(): + pvc_volume_id = metadata['pvc:id'] + local_volume_id = getattr(local_volume, 'id', '') + if pvc_volume_id is not None and local_volume_id != '': + cache_volume[pvc_volume_id] = local_volume_id + return cache_volume diff --git a/nova-powervc/powervc/nova/driver/virt/powervc/sync/__init__.py b/nova-powervc/powervc/nova/driver/virt/powervc/sync/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/nova-powervc/powervc/nova/driver/virt/powervc/sync/flavorsync.py b/nova-powervc/powervc/nova/driver/virt/powervc/sync/flavorsync.py new file mode 100644 index 0000000..3e90814 --- /dev/null +++ b/nova-powervc/powervc/nova/driver/virt/powervc/sync/flavorsync.py @@ -0,0 +1,200 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import re +from eventlet import greenthread + +import powervc.common.config as cfg +from nova.compute import flavors +from nova import exception +from nova import db +from nova.openstack.common import log as logging +from nova.openstack.common import loopingcall +from powervc.nova.driver.compute import constants + +LOG = logging.getLogger(__name__) + +CONF = cfg.CONF + + +def periodic_flavor_sync(ctx, driver, scg_id_list): + """ + Periodically update the flavors from PowerVC. + A default time of 300 seconds is specified for the refresh interval. + if the refresh interval is set to 0, then flavors are not refreshed. + """ + sync_interval = CONF.powervc.flavor_sync_interval + + if sync_interval is None or sync_interval == 0: + return + + def flavors_sync(driver, scg_id_list): + FlavorSync(driver, scg_id_list).synchronize_flavors(ctx) + LOG.debug('Flavors synchronization completed') + + sync_flavors = loopingcall.FixedIntervalLoopingCall(flavors_sync, + driver, + scg_id_list) + sync_flavors.start(interval=sync_interval, initial_delay=sync_interval) + + +class FlavorSync(): + """A class that synchorizes the flavors. + The functionality provided here is called by the manager. + The driver provided interfaces to the PowerVC. + """ + + def __init__(self, driver, scg_id_list): + self.driver = driver + self.prefix = CONF.powervc.flavor_prefix + self.scg_id_list = scg_id_list + + def synchronize_flavors(self, ctx): + """ + Get a list of all public flavors from PowerVC. + If it is in configuration white list, + and not in black list, insert it. + if it is already in local tables, ignore it. + """ + # Get all public flavors. By default, detail and public is set. + pvcFlavors = self.driver.list_flavors() + # Sync flavors in list + for flavor in pvcFlavors: + greenthread.sleep(0) + # This check is added to eliminate sync of private flavors + # Can be removed once PowerVC fixes to return only public flavors + # by default. + if not(flavor.__dict__.get(constants.IS_PUBLIC)): + continue + + if (self._check_for_sync(flavor.name)): + response = self._check_for_extraspecs(flavor) + if response is not None: + self._sync_flavor(ctx, flavor, response[1]) + + def _sanitize(self, opts_list): + """ + Remove any whitespace only list values + """ + for opt in opts_list: + if len(opt.strip()) == 0: + opts_list.remove(opt) + return opts_list + + def get_flavors_white_list(self): + """ + Get the flavors to sync from the powervc conf file + """ + return self._sanitize(CONF.powervc.flavor_white_list) + + def get_flavors_black_list(self): + """ + Get the black listed flavors from the powervc conf file + """ + return self._sanitize(CONF.powervc.flavor_black_list) + + def _check_for_sync(self, fl_name): + """ Check the white/black lists to determine if sync candidate """ + fl_sync = True + # Get the list of flavors names to sync. + fl_wlist = self.get_flavors_white_list() + fl_blist = self.get_flavors_black_list() + + if (len(fl_wlist) != 0): + fl_sync = self._regex_comp(fl_name, fl_wlist) + if (fl_sync and (len(fl_blist) != 0)): + fl_sync = not(self._regex_comp(fl_name, fl_blist)) + return fl_sync + + def _regex_comp(self, name, flist): + """ + Make a regex comparison for name in the list + Return a boolean True if found in the list + """ + if name in flist: + return True + for item in flist: + p = re.compile(item) + match = p.match(name) + if (match is not None): + return True + return False + + def _sync_flavor(self, ctx, flavor, extra_specs): + """ + Insert the flavor with extra specs if not in local database + """ + flavor_name = self.prefix + flavor.name + try: + db.flavor_get_by_name(ctx, flavor_name) + except exception.FlavorNotFoundByName: + self._insert_pvc_flavor_extraspecs(ctx, flavor, extra_specs) + + def _check_for_extraspecs(self, flavor): + """ + Check for valid extraspecs defined and to be synced. + The method returns the following values: + (True, None) - flavor to be synced, and no extra specs defined. + (True, extraspecs) - flavor to be synced with the extra specs defined. + None - scg connectivity group defined in extraspecs is not supported, + and flavor not to be synced. + + Checking for scg to be removed when powervc driver supports multiple + scgs + """ + flavor_extraspecs = self.driver.get_flavor_extraspecs(flavor) + if flavor_extraspecs: + scg_key = constants.SCG_KEY + if scg_key in flavor_extraspecs: + if not self.scg_id_list: + return None + if not flavor_extraspecs[scg_key] in self.scg_id_list: + return None + return (True, flavor_extraspecs) + + def _insert_pvc_flavor_extraspecs(self, context, flavor, extra_specs): + """ Insert the flavor and extra specs if any """ + flavor_created = self._create_flavor(context, flavor) + if extra_specs: + self._update_flavor_extraspecs(context, + flavor_created.get('flavorid'), + extra_specs) + + def _update_flavor_extraspecs(self, context, flavorid, flavor_extraspecs): + """ Insert the flavor extra specs """ + db.flavor_extra_specs_update_or_create(context, + flavorid, + flavor_extraspecs) + + def _create_flavor(self, context, flavor): + """ Create and insert the flavor """ + flavor_dict = flavor.__dict__ + name = self.prefix + flavor.name + flavorid = self.prefix + flavor.id + memory = flavor.ram + vcpus = flavor.vcpus + root_gb = flavor.disk + ephemeral_gb = flavor_dict.get('OS-FLV-EXT-DATA:ephemeral', 0) + u_swap = flavor_dict.get('swap', 0) + rxtx_factor = flavor_dict.get('rxtx_factor', 1.0) + is_public = flavor_dict.get('os-flavor-access:is_public', True) + if u_swap == "": + swap = 0 + else: + swap = int(u_swap) + + try: + return flavors.create(name, memory, vcpus, root_gb, + ephemeral_gb=ephemeral_gb, + flavorid=flavorid, swap=swap, + rxtx_factor=rxtx_factor, + is_public=is_public) + except exception.InstanceExists as err: + raise err diff --git a/nova-powervc/powervc/nova/extension/__init__.py b/nova-powervc/powervc/nova/extension/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/nova-powervc/powervc/nova/extension/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/nova-powervc/powervc/nova/extension/extended_powervm.py b/nova-powervc/powervc/nova/extension/extended_powervm.py new file mode 100644 index 0000000..64d8f8b --- /dev/null +++ b/nova-powervc/powervc/nova/extension/extended_powervm.py @@ -0,0 +1,146 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +"""The Extended Server Attributes API extension.""" + +from nova.api.openstack import extensions +from nova.api.openstack import wsgi +from nova.api.openstack import xmlutil + +authorize = extensions.soft_extension_authorizer('compute', + 'extended_powervm') + + +class ExtendedPowerVMAttributesController(wsgi.Controller): + + def gen_pvc_key(self, key): + self.LOCAL_PVC_PREFIX = 'powervm:' + if key is None: + return key + if key.startswith(self.LOCAL_PVC_PREFIX): + return key + return self.LOCAL_PVC_PREFIX + key + + def _extend_server(self, context, server, instance): + metadata = instance['metadata'] + pvc_attrs = ['cpus', 'min_cpus', 'max_cpus', 'cpu_utilization', + 'min_vcpus', 'max_vcpus', + 'min_memory_mb', 'max_memory_mb', + 'root_gb'] + + key = "%s:id" % (Extended_powervm.alias) + if 'pvc_id' in metadata: + server[key] = metadata['pvc_id'] + + key = "%s:health_status" % (Extended_powervm.alias) + health_status = {} + att = self.gen_pvc_key('health_status.health_value') + if att in metadata: + health_status['health_value'] = metadata[att] + del metadata[att] + #TODO:Here can add other health_status property to construct + #dictionary data + server[key] = health_status + + for item in pvc_attrs: + key = "%s:%s" % (Extended_powervm.alias, item) + att = self.gen_pvc_key(item) + if att in metadata: + value = metadata[att] + server[key] = value + del metadata[att] + + @wsgi.extends + def show(self, req, resp_obj, id): + context = req.environ['nova.context'] + if authorize(context): + # Attach our slave template to the response object + resp_obj.attach(xml=ExtendedPowervmTemplate()) + server = resp_obj.obj['server'] + db_instance = req.get_db_instance(server['id']) + # server['id'] is guaranteed to be in the cache due to + # the core API adding it in its 'show' method. + self._extend_server(context, server, db_instance) + + @wsgi.extends + def detail(self, req, resp_obj): + context = req.environ['nova.context'] + if authorize(context): + # Attach our slave template to the response object + resp_obj.attach(xml=ExtendedPowervmsTemplate()) + + servers = list(resp_obj.obj['servers']) + for server in servers: + db_instance = req.get_db_instance(server['id']) + # server['id'] is guaranteed to be in the cache due to + # the core API adding it in its 'detail' method. + self._extend_server(context, server, db_instance) + + +class Extended_powervm(extensions.ExtensionDescriptor): + """Extended Server Attributes support.""" + name = "ExtendedPowervm" + alias = "IBM-PVM" + namespace = ("http://docs.openstack.org/compute/ext/" + "extended_powervm/api/v1.1") + updated = "2011-11-03T00:00:00+00:00" + + def get_controller_extensions(self): + controller = ExtendedPowerVMAttributesController() + extension = extensions.ControllerExtension(self, 'servers', controller) + return [extension] + + +def make_server(elem): + elem.set('{%s}id' % Extended_powervm.namespace, + '%s:id' % Extended_powervm.alias) + + elem.set('{%s}cpus' % Extended_powervm.namespace, + '%s:cpus' % Extended_powervm.alias) + elem.set('{%s}max_cpus' % Extended_powervm.namespace, + '%s:max_cpus' % Extended_powervm.alias) + elem.set('{%s}min_cpus' % Extended_powervm.namespace, + '%s:min_cpus' % Extended_powervm.alias) + elem.set('{%s}cpu_utilization' % Extended_powervm.namespace, + '%s:cpu_utilization' % Extended_powervm.alias) + + elem.set('{%s}min_vcpus' % Extended_powervm.namespace, + '%s:min_vcpus' % Extended_powervm.alias) + elem.set('{%s}max_vcpus' % Extended_powervm.namespace, + '%s:max_vcpus' % Extended_powervm.alias) + + elem.set('{%s}min_memory_mb' % Extended_powervm.namespace, + '%s:min_memory_mb' % Extended_powervm.alias) + elem.set('{%s}max_memory_mb' % Extended_powervm.namespace, + '%s:max_memory_mb' % Extended_powervm.alias) + + elem.set('{%s}root_gb' % Extended_powervm.namespace, + '%s:root_gb' % Extended_powervm.alias) + elem.set('{%s}health_status' % Extended_powervm.namespace, + '%s:health_status' % Extended_powervm.alias) + + +class ExtendedPowervmTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('server', selector='server') + make_server(root) + alias = Extended_powervm.alias + namespace = Extended_powervm.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) + + +class ExtendedPowervmsTemplate(xmlutil.TemplateBuilder): + def construct(self): + root = xmlutil.TemplateElement('servers') + elem = xmlutil.SubTemplateElement(root, 'server', selector='servers') + make_server(elem) + alias = Extended_powervm.alias + namespace = Extended_powervm.namespace + return xmlutil.SlaveTemplate(root, 1, nsmap={alias: namespace}) diff --git a/nova-powervc/powervc/utils.py b/nova-powervc/powervc/utils.py new file mode 100644 index 0000000..716d701 --- /dev/null +++ b/nova-powervc/powervc/utils.py @@ -0,0 +1,122 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +"""PowerVC Driver related Utilities""" + +from powervc.nova.driver.compute import constants +from powervc.common.gettextutils import _ + +import logging + +LOG = logging.getLogger(__name__) + + +def normalize_host(hostname): + """The RPC 'topic.host' format only supports a single '.'""" + if not hostname: + return hostname + return hostname.replace('.', '_') + + +def get_pvc_id_from_metadata(metadata): + """ + This method helps to get pvc_id from a list or dict type + metadata. This util method handles the following situation + of metadata: + Type of list sample 1: + metadata = [ + {'key': 'powervm:defer_placement', 'value': 'true'}, + {'key': 'pvc_id', 'value': '40e2d7c9-b510-4e10-8986-057800117714'} + ] + Type of list sample 2: + metadata = [{ + "powervm:health_status.health_value": "OK", + "pvc_id": "40e2d7c9-b510-4e10-8986-057800117714" + }] + Type of dict sample: + metadata = { + "powervm:health_status.health_value": "OK", + "pvc_id": "40e2d7c9-b510-4e10-8986-057800117714", + "powervm:defer_placement": "Fale", + "powervm:max_cpus": "1" + } + If none of above types match and pvc_id found, return None + """ + if not metadata: + return None + + pvc_id = None + + if (isinstance(metadata, list)): + # Try to get pvc_id from list type 1 + for meta_list in metadata: + if meta_list.get('key') == constants.PVC_ID: + pvc_id = meta_list.get('value') + LOG.info(_('Found the pvc_id from the list type 1 metadata:%s') + % pvc_id) + return pvc_id + # If pvc_id not found in list type 1, try list type 2 + for meta_dict in metadata: + if constants.PVC_ID in meta_dict.keys(): + pvc_id = meta_dict.get(constants.PVC_ID) + LOG.info(_('Found the pvc_id from the list type 2 metadata:%s') + % pvc_id) + return pvc_id + + # If still not found pvc_id in list, return None + LOG.info(_('Not found the pvc_id from the list type metadata.')) + return None + + if (isinstance(metadata, dict)): + # Try to get pvc_id from dict type + if constants.PVC_ID in metadata.keys(): + pvc_id = metadata.get(constants.PVC_ID) + LOG.info(_('Find the pvc_id from the dict type metadata: %s') + % pvc_id) + return pvc_id + else: + LOG.info(_('Not found the pvc_id from the dict type metadata.')) + return None + + +def fill_metadata_dict_by_pvc_instance(metadata, pvc_instance): + """ + This common method help to get PowerVC unique property into metadata + """ + if pvc_instance is None or not isinstance(pvc_instance, dict): + return {} + if metadata is None: + metadata = {} + LOG.debug(_('metadata before filled: %s') % metadata) + + health_value = None + health_status = pvc_instance.get('health_status') + if health_status is not None: + health_value = health_status.get('health_value') + + metadata[constants.PVC_ID] = pvc_instance['id'] + + # The value 'None' of the dict type instance metadata is reserved + # by the Nova framework. + # Can not set the value of the instance metadata when it's 'None'. + if health_value is not None: + metadata[constants.gen_pvc_key('health_status.health_value')] \ + = health_value + + pvc_attrs = ['cpus', 'min_cpus', 'max_cpus', 'cpu_utilization', + 'min_vcpus', 'max_vcpus', + 'min_memory_mb', 'max_memory_mb', 'root_gb'] + for attr in pvc_attrs: + val = pvc_instance.get(attr) + if val is not None: + metadata[constants.gen_pvc_key(attr)] = val + + LOG.debug(_('metadata after filled: %s') % metadata) + return metadata diff --git a/nova-powervc/run_tests.sh b/nova-powervc/run_tests.sh new file mode 100755 index 0000000..f3ad8f0 --- /dev/null +++ b/nova-powervc/run_tests.sh @@ -0,0 +1,192 @@ +#!/bin/bash + +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +set -eu + +function usage { + echo "Usage: $0 [OPTION]..." + echo "Run PowerVC Nova's test suite(s)" + echo "" + echo " -V, --virtual-env Always use virtualenv. Install automatically if not present" + echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment" + echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)." + echo " -n, --no-recreate-db Don't recreate the test database." + echo " -x, --stop Stop running tests after the first error or failure." + echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added." + echo " -u, --update Update the virtual environment with any newer package versions" + echo " -p, --pep8 Just run flake8" + echo " -8, --8 Just run flake8, don't show PEP8 text for each error" + echo " -P, --no-pep8 Don't run flake8" + echo " -c, --coverage Generate coverage report" + echo " -h, --help Print this usage message" + echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list" + echo " --standard-threads Don't do the eventlet threading monkeypatch." + echo "" + echo "Note: with no options specified, the script will try to run the tests in a virtual environment," + echo " If no virtualenv is found, the script will ask if you would like to create one. If you " + echo " prefer to run tests NOT in a virtual environment, simply pass the -N option." + exit +} + +function process_option { + case "$1" in + -h|--help) usage;; + -V|--virtual-env) always_venv=1; never_venv=0;; + -N|--no-virtual-env) always_venv=0; never_venv=1;; + -r|--recreate-db) recreate_db=1;; + -n|--no-recreate-db) recreate_db=0;; + -f|--force) force=1;; + -u|--update) update=1;; + -p|--pep8) just_flake8=1;; + -8|--8) short_flake8=1;; + -P|--no-pep8) no_flake8=1;; + -c|--coverage) coverage=1;; + --standard-threads) + export STANDARD_THREADS=1 + ;; + -*) noseopts="$noseopts $1";; + *) noseargs="$noseargs $1" + esac +} + +venv=.venv +with_venv=tools/with_venv.sh +always_venv=0 +never_venv=0 +force=0 +noseargs= +noseopts= +wrapper="" +just_flake8=0 +short_flake8=0 +no_flake8=0 +coverage=0 +recreate_db=1 +update=0 + +for arg in "$@"; do + process_option $arg +done + +# If enabled, tell nose to collect coverage data +if [ $coverage -eq 1 ]; then + noseopts="$noseopts --with-coverage --cover-package=nova-powervc" +fi + +function run_tests { + # Just run the test suites in current environment + ${wrapper} $NOSETESTS + # If we get some short import error right away, print the error log directly + RESULT=$? + if [ "$RESULT" -ne "0" ]; + then + ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'` + if [ "$ERRSIZE" -lt "40" ]; + then + cat run_tests.log + fi + fi + return $RESULT +} + +function run_flake8 { + FLAGS=--show-pep8 + if [ $# -gt 0 ] && [ 'short' == ''$1 ] + then + FLAGS='' + fi + + + echo "Running flake8 ..." + # Just run flake8 in current environment + #echo ${wrapper} flake8 $FLAGS powervc | tee pep8.txt + ${wrapper} flake8 $FLAGS powervc | tee pep8.txt + RESULT=${PIPESTATUS[0]} + return $RESULT +} + +NOSETESTS="nosetests $noseopts $noseargs" + +if [ $never_venv -eq 0 ] +then + # Remove the virtual environment if --force used + if [ $force -eq 1 ]; then + echo "Cleaning virtualenv..." + rm -rf ${venv} + fi + if [ $update -eq 1 ]; then + echo "Updating virtualenv..." + python tools/install_venv.py + fi + if [ -e ${venv} ]; then + wrapper="${with_venv}" + else + if [ $always_venv -eq 1 ]; then + # Automatically install the virtualenv + python tools/install_venv.py + wrapper="${with_venv}" + else + echo -e "No virtual environment found...create one? (Y/n) \c" + read use_ve + if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then + # Install the virtualenv and run the test suite in it + python tools/install_venv.py + wrapper=${with_venv} + fi + fi + fi +fi + +# Delete old coverage data from previous runs +if [ $coverage -eq 1 ]; then + ${wrapper} coverage erase +fi + + +if [ $just_flake8 -eq 1 ]; then + run_flake8 + RESULT=$? + echo "RESULT $RESULT" + exit $RESULT +fi + +if [ $short_flake8 -eq 1 ]; then + run_flake8 short + RESULT=$? + exit $RESULT +fi + +run_tests +RESULT=$? + +# NOTE(sirp): we only want to run flake8 when we're running the full-test +# suite, not when we're running tests individually. To handle this, we need to +# distinguish between options (noseopts), which begin with a '-', and arguments +# (noseargs). +if [ -z "$noseargs" ]; then + if [ $no_flake8 -eq 0 ]; then + run_flake8 + TMP_RESULT=$? + RESULT=$(($TMP_RESULT + $RESULT)) + fi +fi + +if [ $coverage -eq 1 ]; then + echo "Generating coverage report in covhtml/" + ${wrapper} coverage html -d covhtml -i +fi + +exit $RESULT \ No newline at end of file diff --git a/nova-powervc/test/__init__.py b/nova-powervc/test/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/nova-powervc/test/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/nova-powervc/test/compute/__init__.py b/nova-powervc/test/compute/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/nova-powervc/test/compute/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/nova-powervc/test/compute/test_sync_instance.py b/nova-powervc/test/compute/test_sync_instance.py new file mode 100644 index 0000000..ceacb0c --- /dev/null +++ b/nova-powervc/test/compute/test_sync_instance.py @@ -0,0 +1,122 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" +import unittest +import mox + +from nova.openstack.common import gettextutils +gettextutils.install('nova') + +from nova.compute import flavors + +from powervc.nova.driver.compute.manager import PowerVCCloudManager +from test.fake_os_flavor import FakeOSFlavor +from test.fake_os_image import FakeOSImage +from test.fake_os_instance import FakeOSInstance +from test.fake_pvc_flavor import FakePVCFlavor +from test.fake_pvc_image import FakePVCImage +from test.fake_pvc_instance import FakePVCInstance +from test.fake_ctx import FakeCTX +from powervc.common.utils import Utils +from powervc.common.utils import StagingCache + + +class TestSyncInstance(unittest.TestCase): + + def setUp(self): + """ + The method "setUp" is used to initialize the fake environment + """ + + # Create an instance of Mox + self.moxer = mox.Mox() + + # Create a fake OpenStack flavor object + self.osflavor = FakeOSFlavor() + # Create a fake OpenStack image object + self.osimage = FakeOSImage() + # Create a fake OpenStack instance object + self.osinstance = FakeOSInstance() + + # Create a fake PowerVC flavor object + self.pvcflavor = FakePVCFlavor() + # Create a fake PowerVC image object + self.pvcimage = FakePVCImage() + # Create a fake PowerVC instance object + self.pvcinstance = FakePVCInstance() + + self.ctx = FakeCTX() + + def init(self, compute_driver=None, *args, **kwargs): + self.project_id = "ibm-default" + self.scg_id = "storage connection group" + self._staging_cache = StagingCache() + + def init_utils(self): + pass + + def fake_get_id(self): + return "" + + def fake_get_user_id(self): + return "" + + PowerVCCloudManager.__init__ = init + Utils.__init__ = init_utils + Utils.get_local_staging_project_id = fake_get_id + Utils.get_local_staging_user_id = fake_get_user_id + + self.PowerVCCloudManager = PowerVCCloudManager() + + def tearDown(self): + pass + + def test_translate_pvc_instance(self): + + pvc_instance = self.pvcinstance.pvc_instance + ctx = self.ctx + + self.moxer.StubOutWithMock(self.PowerVCCloudManager._staging_cache, + "get_staging_user_and_project") + self.PowerVCCloudManager._staging_cache.\ + get_staging_user_and_project(True)\ + .AndReturn(('', '')) + self.moxer.StubOutWithMock(self.PowerVCCloudManager, + "_get_image_from_instance") + self.PowerVCCloudManager._get_image_from_instance(ctx, + pvc_instance, + None)\ + .AndReturn(self.osimage.os_image) + + self.moxer.StubOutWithMock(self.PowerVCCloudManager, + "_get_flavor_from_instance") + self.PowerVCCloudManager._get_flavor_from_instance(ctx, + pvc_instance, + None)\ + .AndReturn(self.osflavor.os_flavor) + + self.moxer.StubOutWithMock(flavors, "save_flavor_info") + flavors.save_flavor_info(dict(), self.osflavor.os_flavor)\ + .AndReturn("system_metadata") + + self.moxer.ReplayAll() + + ins, image, flavor = self.PowerVCCloudManager.\ + _translate_pvc_instance(ctx, pvc_instance) + + self.moxer.UnsetStubs() + self.moxer.VerifyAll() + + print "====ins=======================================================" + print ins + print "===self.osinstance.os_instance================================" + print self.osinstance.os_instance + print "==============================================================" + + self.assertEqual(ins, self.osinstance.os_instance) diff --git a/nova-powervc/test/fake_ctx.py b/nova-powervc/test/fake_ctx.py new file mode 100644 index 0000000..e8b4cb4 --- /dev/null +++ b/nova-powervc/test/fake_ctx.py @@ -0,0 +1,30 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" + The class FakeCTX is used to produce the fake data of CTX +""" + + +class FakeCTX(): + + user_id = None + project_id = None + + def __init__(self): + + self.user_id = "testuser" + self.project_id = "testproject" + + def update(self, **update): + + if not update: + self.user_id = update['user_id'] + self.project_id = update['project_id'] diff --git a/nova-powervc/test/fake_os_flavor.py b/nova-powervc/test/fake_os_flavor.py new file mode 100644 index 0000000..e84106e --- /dev/null +++ b/nova-powervc/test/fake_os_flavor.py @@ -0,0 +1,51 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" + The class FakeOSFlavor is used to produce + the fake data of the OpenStack Flavor +""" + + +class FakeOSFlavor(): + + os_flavor = dict() + + items = [ + 'id', + 'name', + 'memory_mb', + 'vcpus', + 'root_gb', + 'ephemeral_gb', + 'flavorid', + 'swap', + 'rxtx_factor', + 'vcpu_weight', + ] + + def __init__(self): + + self.os_flavor['id'] = 2 + self.os_flavor['name'] = "m1.small" + self.os_flavor['memory_mb'] = 2048 + self.os_flavor['vcpus'] = 1 + #FixMe Don't know what are proper values for the property "root_gb", + #"ephemeral_gb", "flavorid" + self.os_flavor['root_gb'] = 0 + self.os_flavor['ephemeral_gb'] = 0 + self.os_flavor['flavorid'] = "fakeflavorid" + self.os_flavor['swap'] = "" + self.os_flavor['rxtx_factor'] = 1.0 + self.os_flavor['vcpu_weight'] = None + + def update(self, **update): + + self.os_flavor.update(**update) diff --git a/nova-powervc/test/fake_os_image.py b/nova-powervc/test/fake_os_image.py new file mode 100644 index 0000000..c7a852a --- /dev/null +++ b/nova-powervc/test/fake_os_image.py @@ -0,0 +1,48 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" + The class FakeOSImage is used to produce + the fake data of the OpenStack image +""" + + +class FakeOSImage(): + + os_image = dict() + + items = [ + 'id', + 'name', + 'created_at', + 'updated_at', + 'deleted_at', + 'status', + 'is_public', + 'container_format', + 'disk_format', + 'size' + ] + + def __init__(self): + self.os_image['id'] = "18b28659-966d-4913-bdda-2ca3cc68fb59" + self.os_image['name'] = "RHEL63" + self.os_image['created_at'] = "2013-05-17T17:47:25Z" + self.os_image['updated_at'] = "2013-05-17T17:47:45Z" + self.os_image['status'] = "ACTIVE" + self.os_image['deleted_at'] = None + self.os_image['is_public'] = True + self.os_image['container_format'] = None + self.os_image['disk_format'] = None + self.os_image['size'] = 4233 + + def update(self, **update): + + self.os_image.update(**update) diff --git a/nova-powervc/test/fake_os_instance.py b/nova-powervc/test/fake_os_instance.py new file mode 100644 index 0000000..6fb3b59 --- /dev/null +++ b/nova-powervc/test/fake_os_instance.py @@ -0,0 +1,104 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" + The class FakeOSInstance is used to produce the fake + data of the OpenStack instance +""" + +import datetime + + +class FakeOSInstance(): + + os_instance = dict() + + items = [ + 'image_ref', + 'launch_time', + 'launched_at', + 'scheduled_at', + 'memory_mb', + 'vcpus', + 'root_gb', + 'ephemeral_gb', + 'display_name', + 'display_description', + 'locked', + 'instance_type_id', + 'progress', + 'metadata', + 'architecture', + 'host', + 'launched_on', + 'hostname', + 'access_ip_v4', + 'root_device_name', + 'system_metadata', + 'vm_state', + 'task_state', + 'power_state' + ] + + def __init__(self): + + self.os_instance['image_ref'] = "18b28659-966d-4913-bdda-2ca3cc68fb59" + self.os_instance['launch_time'] = \ + datetime.datetime(2013, 8, 12, 5, 59, 25) + self.os_instance['launched_at'] = \ + datetime.datetime(2013, 8, 12, 5, 59, 25) + self.os_instance['scheduled_at'] = \ + datetime.datetime(2013, 8, 12, 6, 57, 23) + self.os_instance['memory_mb'] = 2048 + self.os_instance['vcpus'] = 1 + self.os_instance['root_gb'] = 0 + self.os_instance['ephemeral_gb'] = 0 + self.os_instance['display_name'] = "IVT-Test17" + self.os_instance['display_description'] = "IVT-Test17" + self.os_instance['locked'] = False + self.os_instance['instance_type_id'] = 2 + self.os_instance['progress'] = 0 + self.os_instance['metadata'] = { + 'powervm:min_vcpus': '0.10', + 'pvc_id': + '786d7a82-c6fe-4ee3-bb0b-9faf81f835f9', + 'powervm:cpu_utilization': 0.01, + 'powervm:min_memory_mb': 512, + 'powervm:max_cpus': '', + 'powervm:max_vcpus': '16.00', + 'powervm:min_cpus': '', + 'powervm:max_memory_mb': 8192, + 'powervm:cpus': '' + } + self.os_instance['architecture'] = "ppc64" + self.os_instance['host'] = \ + "blade7_9-5-46-230" + self.os_instance['launched_on'] = \ + "72bb2b5af241413172ad4cf38354e727ce317843ee2432c36439643c" + self.os_instance['hostname'] = "IVT-Test17" + self.os_instance['access_ip_v4'] = None + self.os_instance['root_device_name'] = None + self.os_instance['system_metadata'] = "system_metadata" + self.os_instance['vm_state'] = "active" + self.os_instance['task_state'] = None + self.os_instance['power_state'] = 1 + self.os_instance['project_id'] = "" + self.os_instance['node'] = "IVT-Test17" + self.os_instance['user_id'] = '' + + def update(self, **update): + + self.self.os_instance.update(**update) + + def get(self, name, default_value=None): + return self.os_instance.get(name, None) + + def __getitem__(self, name): + return self.os_instance.get(name) diff --git a/nova-powervc/test/fake_pvc_flavor.py b/nova-powervc/test/fake_pvc_flavor.py new file mode 100644 index 0000000..23b8e1a --- /dev/null +++ b/nova-powervc/test/fake_pvc_flavor.py @@ -0,0 +1,44 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" + The class FakePVCFlavor is used to produce + the fake data of the PowerVC Flavor +""" + + +class FakePVCFlavor(): + fake_pvc_flavor = dict() + + items = ["name", + "ram", + "OS-FLV-DISABLED:disabled", + "vcpus", + "swap", + "os-flavor-access:is_public", + "rxtx_factor", + "OS-FLV-EXT-DATA:ephemeral", + "disk", + "id"] + + def __init__(self): + self.fake_pvc_flavor["name"] = "m1.small" + self.fake_pvc_flavor["ram"] = 2048 + self.fake_pvc_flavor["OS-FLV-DISABLED:disabled"] = False + self.fake_pvc_flavor["vcpus"] = 1 + self.fake_pvc_flavor["swap"] = "" + self.fake_pvc_flavor["os-flavor-access:is_public"] = True + self.fake_pvc_flavor["rxtx_factor"] = 1.0 + self.fake_pvc_flavor["OS-FLV-EXT-DATA:ephemeral"] = 0 + self.fake_pvc_flavor["disk"] = 20 + self.fake_pvc_flavor["id"] = 2 + + def update(self, **update): + self.fake_pvc_flavor.update(**update) diff --git a/nova-powervc/test/fake_pvc_image.py b/nova-powervc/test/fake_pvc_image.py new file mode 100644 index 0000000..0440cf7 --- /dev/null +++ b/nova-powervc/test/fake_pvc_image.py @@ -0,0 +1,56 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +""" + The class FakePVCImage is used to produce + the fake data of the PowerVC image +""" + + +class FakePVCImage(): + + fake_pvc_image = dict() + + items = [ + "status", + "updated", + "id", + "OS-EXT-IMG-SIZE:size", + "name", + "created", + "minDisk", + "progress", + "minRam", + "os_distro", + "hypervisor_type", + "architecture", + "volume_id" + ] + + def __init__(self): + self.fake_pvc_image["status"] = "ACTIVE" + self.fake_pvc_image["updated"] = "2013-05-17T17:47:45Z" + self.fake_pvc_image["id"] = "18b28659-966d-4913-bdda-2ca3cc68fb59" + self.fake_pvc_image["OS-EXT-IMG-SIZE:size"] = 4233 + self.fake_pvc_image["name"] = "RHEL63" + self.fake_pvc_image["created"] = "2013-05-17T17:47:25Z" + self.fake_pvc_image["minDisk"] = 0 + self.fake_pvc_image["progress"] = 100 + self.fake_pvc_image["minRam"] = 0 + self.fake_pvc_image["os_distro"] = "rhel" + self.fake_pvc_image["hypervisor_type"] = "powervm" + self.fake_pvc_image["architecture"] = "ppc64" + self.fake_pvc_image["volume_id"] = "6005076802808446B0000000000003C8" + + def update(self, **updates): + """ + The method "update" is used to update the fake PowerVC image data + """ + self.fake_pvc_image.update(**updates) diff --git a/nova-powervc/test/fake_pvc_instance.py b/nova-powervc/test/fake_pvc_instance.py new file mode 100644 index 0000000..2c89e44 --- /dev/null +++ b/nova-powervc/test/fake_pvc_instance.py @@ -0,0 +1,119 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013, 2014 All Rights Reserved + +************************************************************* +""" + +""" + The class FakePVCInstance is used to produce + the fake data of the PowerVC instance +""" + + +class FakePVCInstance(): + + pvc_instance = dict() + + items = [ + "status", + "updated", + "hostId", + "cpu_utilization", + "key_name", + "max_vcpus", + "OS-EXT-STS:task_state", + "OS-EXT-SRV-ATTR:host", + "OS-EXT-STS:vm_state", + "OS-EXT-SRV-ATTR:instance_name", + "vcpu_mode", + "id", + "OS-EXT-SRV-ATTR:hypervisor_hostname", + "min_memory_mb", + "max_memory_mb", + "user_id", + "name", + "created", + "tenant_id", + "min_vcpus", + "OS-DCF:diskConfig", + "vcpus", + "memory_mb", + "accessIPv4", + "accessIPv6", + "progress", + "OS-EXT-STS:power_state", + "OS-EXT-AZ:availability_zone", + "memory_mode", + "launched_at", + "scheduled_at", + "cpus" + "min_cpus", + "max_cpus", + "OS-EXT-SRV-ATTR:hypervisor_hostname", + "health_status" + ] + + _info = None + + def __init__(self): + self.pvc_instance["status"] = "ACTIVE" + self.status = self.pvc_instance["status"] + self.pvc_instance["updated"] = "2013-07-08T20:52:26Z" + self.pvc_instance["hostId"] = \ + "72bb2b5af241413172ad4cf38354e727ce317843ee2432c36439643c" + self.pvc_instance["cpu_utilization"] = 0.01 + self.pvc_instance["key_name"] = None + self.pvc_instance["max_vcpus"] = "16.00" + self.pvc_instance["OS-EXT-STS:task_state"] = None + self.pvc_instance["OS-EXT-SRV-ATTR:host"] = "blade7_9-5-46-230" + self.__dict__["OS-EXT-SRV-ATTR:host"] = \ + self.pvc_instance["OS-EXT-SRV-ATTR:host"] + self.pvc_instance["OS-EXT-STS:vm_state"] = "active" + self.pvc_instance["OS-EXT-SRV-ATTR:instance_name"] = \ + "nova-ngp02-05-powervc--00000012" + self.pvc_instance["vcpu_mode"] = "shared" + self.pvc_instance["id"] = "786d7a82-c6fe-4ee3-bb0b-9faf81f835f9" + self.id = self.pvc_instance["id"] + self.pvc_instance["OS-EXT-SRV-ATTR:hypervisor_hostname"] = \ + "ngp02-07.rch.kstart.ibm.com" + self.pvc_instance["min_memory_mb"] = 512 + self.pvc_instance["max_memory_mb"] = 8192 + self.pvc_instance["user_id"] = "499443d298384e4f8cba0705789a523c" + self.pvc_instance["name"] = "IVT-Test17" + self.pvc_instance["created"] = "2013-05-17T21:28:35Z" + self.pvc_instance["tenant_id"] = "67ebad6b205a4b4a9582684c709f816c" + self.pvc_instance["min_vcpus"] = "0.10" + self.pvc_instance["OS-DCF:diskConfig"] = "MANUAL" + self.pvc_instance["vcpus"] = "0.10" + self.pvc_instance["memory_mb"] = 2048 + self.pvc_instance["accessIPv4"] = "" + self.pvc_instance["accessIPv6"] = "" + self.pvc_instance["progress"] = 0 + self.pvc_instance["OS-EXT-STS:power_state"] = 1 + self.pvc_instance["OS-EXT-AZ:availability_zone"] = "nova" + self.pvc_instance["memory_mode"] = "dedicated" + self.pvc_instance["launched_at"] = 1376287165.55 + self.pvc_instance["scheduled_at"] = 1376290643.2 + self.pvc_instance["cpus"] = "" + self.pvc_instance["min_cpus"] = "" + self.pvc_instance["max_cpus"] = "" + self.pvc_instance["OS-EXT-SRV-ATTR:hypervisor_hostname"] = "IVT-Test17" + self.health_status = {u'health_value': u'WARNING'} + + # Here is just a slice of data in an completed structure of _info + # attribute of pvc instance, refer to sample_pvc_instance.json for the + # whole image of an pvc instance and the _info in it. + self._info = { + 'max_memory_mb': 8192, + 'memory_mb': 2048, + 'cpus': 2, + 'OS-EXT-STS:power_state': 1 + } + + def update(self, **update): + self.pvc_instance.update(**update) diff --git a/nova-powervc/test/nova/__init__.py b/nova-powervc/test/nova/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/nova-powervc/test/nova/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/nova-powervc/test/nova/driver/__init__.py b/nova-powervc/test/nova/driver/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/nova-powervc/test/nova/driver/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/nova-powervc/test/nova/driver/virt/__init__.py b/nova-powervc/test/nova/driver/virt/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/nova-powervc/test/nova/driver/virt/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/nova-powervc/test/nova/driver/virt/powervc/__init__.py b/nova-powervc/test/nova/driver/virt/powervc/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/nova-powervc/test/nova/driver/virt/powervc/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/nova-powervc/test/nova/driver/virt/powervc/sync/__init__.py b/nova-powervc/test/nova/driver/virt/powervc/sync/__init__.py new file mode 100644 index 0000000..4bea874 --- /dev/null +++ b/nova-powervc/test/nova/driver/virt/powervc/sync/__init__.py @@ -0,0 +1,9 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" diff --git a/nova-powervc/test/nova/driver/virt/powervc/sync/testflavorsync.py b/nova-powervc/test/nova/driver/virt/powervc/sync/testflavorsync.py new file mode 100644 index 0000000..3158435 --- /dev/null +++ b/nova-powervc/test/nova/driver/virt/powervc/sync/testflavorsync.py @@ -0,0 +1,83 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import unittest +import mox + +from powervc.nova.driver.virt.powervc.sync import flavorsync +import powervc.common.config as cfg + +CONF = cfg.CONF + + +class TestFlavorSync(unittest.TestCase): + + def setUp(self): + """ + The method "setUp" is used to initialize the fake environment + """ + + # Create an instance of Mox + self.moxer = mox.Mox() + + def init(self, driver=None): + self.driver = None + self.prefix = 'PVC-' + + flavorsync.FlavorSync.__init__ = init + + self.flavor_sync = flavorsync.FlavorSync(driver=None) + + def tearDown(self): + pass + + def runTest(self): + + flavor_black_list = [] + flavor_white_list = [] + + self.moxer.StubOutWithMock(self.flavor_sync, "get_flavors_black_list") + self.flavor_sync.get_flavors_black_list().AndReturn(flavor_black_list) + + self.moxer.StubOutWithMock(self.flavor_sync, "get_flavors_white_list") + self.flavor_sync.get_flavors_white_list().AndReturn(flavor_white_list) + + self.moxer.ReplayAll() + + response = self.flavor_sync._check_for_sync("m1.tiny") + + self.assertTrue(response, msg=None) + + flavor_black_list = ["m1.tiny", "m1.small"] + flavor_white_list = ["m1.tiny", "m1.medium"] + + flavor_name_list = ["m1.tiny", "m1.small", "m1.medium", "m1.large"] + response_list = [False, False, True, False] + + for (flavor_name, response) in zip(flavor_name_list, response_list): + + self.moxer.UnsetStubs() + + self.moxer.StubOutWithMock(self.flavor_sync, + "get_flavors_black_list") + self.flavor_sync.get_flavors_black_list().\ + AndReturn(flavor_black_list) + + self.moxer.StubOutWithMock(self.flavor_sync, + "get_flavors_white_list") + self.flavor_sync.get_flavors_white_list().\ + AndReturn(flavor_white_list) + self.moxer.ReplayAll() + + response_ret = self.flavor_sync._check_for_sync(flavor_name) + + self.assertEquals(response_ret, response) + + self.moxer.UnsetStubs() diff --git a/nova-powervc/test/nova/driver/virt/powervc/test_driver.py b/nova-powervc/test/nova/driver/virt/powervc/test_driver.py new file mode 100644 index 0000000..d0e7931 --- /dev/null +++ b/nova-powervc/test/nova/driver/virt/powervc/test_driver.py @@ -0,0 +1,709 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2014 All Rights Reserved + +************************************************************* +""" +import sys +import mock +import testtools +import threading +import os +from nova import exception +os.environ['EVENTLET_NO_GREENDNS'] = 'yes' +from nova import test +from nova.openstack.common import jsonutils +from nova.compute import task_states +from nova.image import glance +from nova import db +from novaclient import exceptions +import unittest +from powervc import utils as powervc_utils +sys.modules['powervc.common.client'] = mock.MagicMock() +from mock import MagicMock +from powervc.nova.driver.virt.powervc.service import PowerVCService +from powervc.nova.driver.virt.powervc.driver import PowerVCDriver +from powervc.nova.driver.virt.powervc.driver import CONF as driver_conf +from powervc.nova.driver.virt.powervc import pvc_vm_states +from test.fake_pvc_instance import FakePVCInstance +from test.fake_os_instance import FakeOSInstance +from powervc.nova.driver.compute import constants +from nova.exception import MigrationPreCheckError as mpcError +from nova.exception import Invalid + +pvcIns = FakePVCInstance() +hostname = '789523X_10421DB' + + +def _change_state(ins, status): + ins.status = status + + +class FakeClient(object): + def __init__(self): + pass + + +class FakeHostStat(object): + stat = dict() + + def __init__(self): + self.stat['vcpus'] = 8.5 + self.stat['vcpus_used'] = 1 + self.stat['local_gb'] = 1024 + self.stat['local_gb_used'] = 1024 - 500 + self.stat['proc_units_reserved'] = 0 + self.stat['memory_mb'] = 1000 + self.stat['memory_mb_reserved'] = 0 + self.stat['memory_mb_used'] = 500 + self.stat['disk_available_least'] = 50 + self.stat['hypervisor_type'] = 'powervm' + self.stat['hypervisor_version'] = 7 + self.stat['cpu_info'] = 'powervm' + self.stat['hypervisor_hostname'] = hostname + self.stat['supported_instances'] = \ + jsonutils.dumps(constants.POWERVC_SUPPORTED_INSTANCES) + + +class Server(): + def __init__(self, status): + self.status = status + self.id = '98765' + self.metadata = dict() + + +class Volume(): + def __init__(self): + self.metadata = dict() + self.metadata['pvc:id'] = 'pvc_volume_id' + + +class Attachment(): + def __init__(self): + self.volumeId = 'pvc_volume_id' + + +class PowerVCDriverTestCase(test.NoDBTestCase): + + def save_before_patch(self): + self.pvcsvc_init_copy = PowerVCService.__init__ + self.pvcsvc_get_server_copy = PowerVCService._get_server + self.pvcsvc_get_pvcserver_copy = PowerVCService._get_pvcserver + self.pvcdrv_init_copy = PowerVCDriver.__init__ + + def patch(self): + PowerVCService.__init__ = MagicMock(return_value=None) + PowerVCService._get_server = MagicMock(return_value=pvcIns) + PowerVCService._get_pvcserver = MagicMock(return_value=pvcIns) + PowerVCDriver.__init__ = MagicMock(return_value=None) + + def unpatch(self): + PowerVCService.__init__ = self.pvcsvc_init_copy + PowerVCService._get_server = self.pvcsvc_get_server_copy + PowerVCService._get_pvcserver = self.pvcsvc_get_pvcserver_copy + PowerVCDriver.__init__ = self.pvcdrv_init_copy + + def setUp(self): + super(PowerVCDriverTestCase, self).setUp() + self.save_before_patch() + self.patch() + self._driver = PowerVCDriver() + self._driver._service = PowerVCService(FakeClient()) + self._driver._service.longrun_loop_interval = 1 + self._driver._service.longrun_initial_delay = 1 + self._driver._service._manager = MagicMock() + self._driver._service._manager.get = \ + MagicMock(return_value=pvcIns) + self._driver._service._validate_response = MagicMock() + glance.get_default_image_service = MagicMock() + self.pvc_id = 123456789 + self._driver.hostname = hostname + pvcIns.__dict__["OS-EXT-SRV-ATTR:host"] = "source_host_name" + + def test_power_on_active_instance(self): + # Test an already ACTIVE instance. + pvcIns.status = pvc_vm_states.ACTIVE + self._driver.power_on(None, pvcIns, None) + self.assertEqual(pvc_vm_states.ACTIVE, pvcIns.status, + "Tested power on an ACTIVE instance.") + + def test_power_on_shutoff_instance(self): + # Test an OFF instance" + pvcIns.status = pvc_vm_states.SHUTOFF + # Use a timer to change the status later. + timer = threading.Timer(1, _change_state, + [pvcIns, pvc_vm_states.ACTIVE]) + timer.start() + self._driver.power_on(None, pvcIns, None) + timer.cancel() + self.assertEqual(pvc_vm_states.ACTIVE, pvcIns.status, + "Tested power on a SHUTOFF instance.") + + def test_power_off_shutoff_instance(self): + # Test an OFF instance. + pvcIns.status = pvc_vm_states.SHUTOFF + self._driver.power_off(pvcIns) + self.assertEqual(pvc_vm_states.SHUTOFF, pvcIns.status, + "Tested power off an SHUTOFF instance.") + + def test_power_off_active_instance(self): + # Test power off an active instance. + pvcIns.status = pvc_vm_states.ACTIVE + timer = threading.Timer(1, _change_state, + [pvcIns, pvc_vm_states.SHUTOFF]) + timer.start() + self._driver.power_off(pvcIns) + timer.cancel() + self.assertEqual(pvc_vm_states.SHUTOFF, pvcIns.status, + "Tested power off an SHUTOFF instance.") + + def test_get_available_resource(self): + fake_hypervisor = MagicMock() + fake_hypervisor_info = FakeHostStat().stat + fake_hypervisor._info = fake_hypervisor_info + self._driver.get_hypervisor_by_hostname = \ + MagicMock(return_value=fake_hypervisor) + stats = self._driver.get_available_resource(None) + int_fake_vcpu = int(fake_hypervisor_info['vcpus']) + self.assertEqual(stats['vcpus'], int_fake_vcpu) + self.assertEqual(stats['local_gb'], fake_hypervisor_info['local_gb']) + fake_local_gb_used = fake_hypervisor_info['local_gb_used'] + self.assertEqual(stats['local_gb_used'], fake_local_gb_used) + self.assertEqual(stats['memory_mb'], fake_hypervisor_info['memory_mb']) + fake_memory_mb_used = fake_hypervisor_info['memory_mb_used'] + self.assertEqual(stats['memory_mb_used'], fake_memory_mb_used) + fake_hypervisor_type = fake_hypervisor_info['hypervisor_type'] + self.assertEqual(stats['hypervisor_type'], fake_hypervisor_type) + fake_hypervisor_version = fake_hypervisor_info['hypervisor_version'] + self.assertEqual(stats['hypervisor_version'], fake_hypervisor_version) + self.assertEqual(stats['hypervisor_hostname'], self._driver.hostname) + self.assertEqual(stats['supported_instances'], jsonutils.dumps( + constants.POWERVC_SUPPORTED_INSTANCES)) + + def test_get_available_resource_memory_disc(self): + fake_hypervisor = MagicMock() + fake_hypervisor_info = FakeHostStat().stat + fake_hypervisor._info = fake_hypervisor_info + fake_hypervisor_info['proc_units_reserved'] = 3 + fake_hypervisor_info['memory_mb_reserved'] = 50 + self._driver.get_hypervisor_by_hostname = \ + MagicMock(return_value=fake_hypervisor) + stats = self._driver.get_available_resource(None) + fake_hypervisor._info = fake_hypervisor_info + vcpu_expected = int(fake_hypervisor_info['vcpus'] - + fake_hypervisor_info['proc_units_reserved']) + self.assertEqual(stats['vcpus'], vcpu_expected) + memory_expected = int(fake_hypervisor_info['memory_mb'] - + fake_hypervisor_info['memory_mb_reserved']) + self.assertEqual(stats['memory_mb'], memory_expected) + + def test_check_can_live_migrate_source(self): + pass + + def test_check_can_live_migrate_destination_cleanup(self): + pass + + def test_check_can_live_migrate_destination_no_instance(self): + os_instance = FakeOSInstance() + service = self._driver._service + service.get_instance = MagicMock(return_value=None) + cclmd = self._driver.check_can_live_migrate_destination + dest_compute_info = FakeHostStat().stat + self.assertRaises(mpcError, cclmd, None, + os_instance, None, dest_compute_info) + + def test_check_can_live_migrate_destination_invalid_state(self): + os_instance = FakeOSInstance() + pvc_instance = FakePVCInstance() + service = self._driver._service + service.get_instance = MagicMock(return_value=pvc_instance) + cclmd = self._driver.check_can_live_migrate_destination + dest_compute_info = FakeHostStat().stat + service._is_live_migration_valid = MagicMock(return_value=False) + self.assertRaises(mpcError, cclmd, None, + os_instance, None, dest_compute_info) + + def test_check_can_live_migrate_destination_block_migration(self): + os_instance = FakeOSInstance() + pvc_instance = FakePVCInstance() + service = self._driver._service + service.get_instance = MagicMock(return_value=pvc_instance) + cclmd = self._driver.check_can_live_migrate_destination + dest_compute_info = FakeHostStat().stat + service._is_live_migration_valid = MagicMock(return_value=True) + self.assertRaises(mpcError, cclmd, None, + os_instance, None, + dest_compute_info, block_migration=True) + + def test_check_can_live_migrate_destination_disc_over_commit(self): + os_instance = FakeOSInstance() + pvc_instance = FakePVCInstance() + service = self._driver._service + service.get_instance = MagicMock(return_value=pvc_instance) + cclmd = self._driver.check_can_live_migrate_destination + dest_compute_info = FakeHostStat().stat + service._is_live_migration_valid = MagicMock(return_value=True) + self.assertRaises(mpcError, cclmd, None, + os_instance, None, + dest_compute_info, disk_over_commit=True) + + def test__check_defer_placement(self): + os_instance = FakeOSInstance() + driver = self._driver + os_instance.os_instance['metadata']['powervm:defer_placement'] = 'true' + self.assertTrue(driver._check_defer_placement(os_instance)) + os_instance.os_instance['metadata']['powervm:defer_placement'] = \ + 'false' + self.assertFalse(driver._check_defer_placement(os_instance)) + #if the property is not presented + del os_instance.os_instance['metadata']['powervm:defer_placement'] + self.assertFalse(driver._check_defer_placement(os_instance)) + + def test_check_can_live_migrate_destination_defer_placement(self): + os_instance = FakeOSInstance() + pvc_instance = FakePVCInstance() + service = self._driver._service + service.get_instance = MagicMock(return_value=pvc_instance) + cclmd = self._driver.check_can_live_migrate_destination + dest_compute_info = FakeHostStat().stat + service._is_live_migration_valid = MagicMock(return_value=True) + os_instance.os_instance['metadata']['powervm:defer_placement'] = 'true' + self.assertEquals(dest_compute_info, cclmd(None, os_instance, None, + dest_compute_info)) + os_instance.os_instance['metadata']['powervm:defer_placement'] = \ + 'false' + service.get_valid_destinations = MagicMock(return_value=[]) + self.assertRaises(Invalid, cclmd, None, + os_instance, None, dest_compute_info) + service.get_valid_destinations = \ + MagicMock(return_value=[driver_conf.get('host')]) + self.assertEquals(dest_compute_info, cclmd(None, os_instance, None, + dest_compute_info)) + + def test_live_migrate(self): + os_instance = FakeOSInstance() + pvc_instance = FakePVCInstance() + service = self._driver._service + service.get_instance = MagicMock(return_value=pvc_instance) + dest_compute_info = FakeHostStat().stat + os_instance.os_instance['metadata']['powervm:defer_placement'] = \ + 'false' + recover_method = MagicMock() + post_method = MagicMock() + + def change_host(server): + server.__dict__["OS-EXT-SRV-ATTR:host"] = "dest_host_name" + timer = threading.Timer(1, change_host, + [pvcIns]) + timer.start() + self._driver.live_migration(None, os_instance, dest_compute_info, + post_method, recover_method) + timer.cancel() + post_method.assert_called_once_with(None, os_instance, + dest_compute_info, False, None) + + def test_live_migrate_with_defer(self): + os_instance = FakeOSInstance() + pvc_instance = FakePVCInstance() + service = self._driver._service + service.get_instance = MagicMock(return_value=pvc_instance) + dest_compute_info = FakeHostStat().stat + os_instance.os_instance['metadata']['powervm:defer_placement'] = 'true' + recover_method = MagicMock() + post_method = MagicMock() + + def change_host(server): + server.__dict__["OS-EXT-SRV-ATTR:host"] = "dest_host_name" + timer = threading.Timer(1, change_host, + [pvcIns]) + timer.start() + self._driver.live_migration(None, os_instance, dest_compute_info, + post_method, recover_method) + timer.cancel() + post_method.assert_called_once_with(None, os_instance, + None, False, None) + + def test_live_migrate_with_recover(self): + os_instance = FakeOSInstance() + pvc_instance = FakePVCInstance() + service = self._driver._service + service.get_instance = MagicMock(return_value=pvc_instance) + dest_compute_info = FakeHostStat().stat + os_instance.os_instance['metadata']['powervm:defer_placement'] = \ + 'false' + recover_method = MagicMock() + post_method = MagicMock() + service.live_migrate = MagicMock(side_effect=Exception("Error")) + self.assertRaises(Exception, self._driver.live_migration, + None, os_instance, dest_compute_info, + post_method, recover_method) + recover_method.assert_called_once_with(None, os_instance, + dest_compute_info, False, None) + + def test_confirm_migration(self): + pvc_driver = self._driver + pvc_driver._service = MagicMock() + pvc_driver._service.confirm_migration = MagicMock() + migration = 0 + instance = 0 + network_info = 0 + pvc_driver.confirm_migration(migration, instance, network_info) + pvc_driver._service.confirm_migration.assert_called_once_with(instance) + + def test_deatch_volumn(self): + pvc_driver = self._driver + pvc_driver._service._volumes = MagicMock() + pvc_driver._service._volumes.delete_server_volume = MagicMock() + pvc_driver._service._get_pvc_volume_id = MagicMock(return_value=1) + pvc_driver._service.longrun_loop_interval = 0 + pvc_driver._service.longrun_initial_delay = 0 + pvc_driver._service.max_tries = 2 + #pvc_driver._service. + connection_info = {"serial": 1} + metadata = {"pvc_id": 1} + instance = {"metadata": metadata} + pvc_driver.detach_volume(connection_info, instance, None) + PowerVCService._get_pvc_volume_id + pvc_driver._service._get_pvc_volume_id.assert_called_once_with(1) + pvc_driver._service._volumes.delete_server_volume.\ + assert_called_once_with(1, 1) + + def test_finish_migration(self): + pvc_driver = self._driver + pvc_driver.confirm_migration = MagicMock() + pvc_driver.power_on = MagicMock() + pvc_driver._service.resize_instance = MagicMock() + pvc_driver._service.update_correct_host = MagicMock() + context = 0 + migration = 0 + instance = 0 + disk_info = 0 + network_info = 0 + image_meta = 0 + resize_instance = True + block_device_info = 0 + power_on = True + pvc_driver.finish_migration(context, migration, instance, + disk_info, network_info, image_meta, + resize_instance, block_device_info, + power_on) + pvc_driver._service.resize_instance.assert_called_once_with(context, + migration, + instance, + image_meta) + pvc_driver.confirm_migration.assert_called_once_with(None, + instance, None) + pvc_driver._service.update_correct_host(context, instance) + pvc_driver.power_on.assert_called_once_with(context, instance, + network_info, + block_device_info) + + def test_snapshot(self): + pvc_driver = self._driver + pvc_driver._service.snapshot = MagicMock() + context = MagicMock() + instance = 1 + image_id = 1 + update_task_state = MagicMock() + pvc_driver.snapshot(context, instance, image_id, + update_task_state) + update_task_state.assert_any_call( + task_state=task_states.IMAGE_PENDING_UPLOAD) + update_task_state.assert_any_call( + task_state=task_states.IMAGE_UPLOADING, + expected_state=task_states.IMAGE_PENDING_UPLOAD) + + def tearDown(self): + super(PowerVCDriverTestCase, self).tearDown() + self.unpatch() + + +class TestDriver(unittest.TestCase): + def setUp(self): + def init(self, pvc_client=None): + pass + PowerVCDriver.__init__ = init + PowerVCService.__init__ = init + self.powervc_driver = PowerVCDriver() + self.powervc_driver.hypervisor_id = "fake_hypervisor_id_123456" + self.powervc_driver._service = PowerVCService(None) + self.powervc_driver._service._manager = mock.MagicMock() + self.powervc_driver._service._volumes = mock.MagicMock() + self.powervc_driver._service._cinderclient = mock.MagicMock() + self.powervc_driver._service.longrun_loop_interval = 2 + self.powervc_driver._service.longrun_initial_delay = 3 + self.powervc_driver._service.max_tries = 3 + + def test_spawn_success(self): + context = None + instance = self.fake_instance() + image_meta = self.fake_image_meta() + injected_files = None + admin_password = None + PowerVCDriver._check_defer_placement = \ + mock.MagicMock(return_value=False) + #mock database operation + db.flavor_get = mock.MagicMock() + PowerVCDriver._get_pvc_network_info = mock.MagicMock() + self.powervc_driver._service.validate_update_scg = mock.MagicMock() + createdServer = Server(pvc_vm_states.BUILD) + self.powervc_driver._service._manager.create = \ + mock.MagicMock(return_value=createdServer) + createFinished = Server(pvc_vm_states.ACTIVE) + self.powervc_driver._service._manager.get = \ + mock.MagicMock(return_value=createFinished) + self.powervc_driver._service.\ + _update_local_instance_by_pvc_created_instance = \ + mock.MagicMock() + self.powervc_driver._clean_vm_and_save_fault_message = \ + mock.MagicMock() + metadata = dict() + powervc_utils.fill_metadata_dict_by_pvc_instance = \ + mock.MagicMock(return_value=metadata) + self.powervc_driver._service.\ + _update_local_instance_by_pvc_created_instance = \ + mock.MagicMock() + resultServer = self.powervc_driver.spawn(context, + instance, + image_meta, + injected_files, + admin_password) + self.assertEquals(createFinished, + resultServer, + 'success') + + def test_spawn_instance_invalid_state_exception(self): + context = None + instance = self.fake_instance() + image_meta = self.fake_image_meta() + injected_files = None + admin_password = None + PowerVCDriver._check_defer_placement = \ + mock.MagicMock(return_value=False) + #mock database operation + db.flavor_get = mock.MagicMock() + PowerVCDriver._get_pvc_network_info = mock.MagicMock() + self.powervc_driver._service.validate_update_scg = \ + mock.MagicMock() + createdServer = Server('ERROR') + self.powervc_driver._service._manager.create = \ + mock.MagicMock(return_value=createdServer) + createFinished = Server(pvc_vm_states.ACTIVE) + self.powervc_driver._service._manager.get = \ + mock.MagicMock(return_value=createFinished) + self.powervc_driver._service.\ + _update_local_instance_by_pvc_created_instance = \ + mock.MagicMock() + self.powervc_driver._clean_vm_and_save_fault_message = \ + mock.MagicMock() + metadata = dict() + powervc_utils.fill_metadata_dict_by_pvc_instance = \ + mock.MagicMock(return_value=metadata) + self.powervc_driver._service.\ + _update_local_instance_by_pvc_created_instance = \ + mock.MagicMock() + self.assertRaises(exception.InstanceInvalidState, + self.powervc_driver.spawn, + context, + instance, + image_meta, + injected_files, + admin_password) + + def test_destroy_success(self): + instance = self.fake_instance_for_destroy() + context = None + network_info = None + self.powervc_driver._service._servers = mock.MagicMock() + server = Server(pvc_vm_states.ACTIVE) + self.powervc_driver._service.Server = \ + mock.MagicMock(return_value=server) + manager_get_server_from_instance = \ + Server(pvc_vm_states.ACTIVE) + manager_get_server_from_destroy_instance = \ + Server('DELETED') + setattr(manager_get_server_from_destroy_instance, + 'OS-EXT-STS:task_state', None) + self.powervc_driver._service._manager.get = \ + mock.MagicMock( + side_effect=[manager_get_server_from_instance, + manager_get_server_from_destroy_instance]) + self.powervc_driver._service._manager.delete = \ + mock.MagicMock() + self.powervc_driver._service._validate_response = \ + mock.MagicMock() + result = self.powervc_driver.destroy(context, + instance, network_info) + self.assertEqual(result, True, "delete success") + + def test_destroy_not_found_exception(self): + instance = self.fake_instance_for_destroy() + context = None + network_info = None + self.powervc_driver._service._servers = mock.MagicMock() + server = Server(pvc_vm_states.ACTIVE) + self.powervc_driver._service.Server = \ + mock.MagicMock(return_value=server) + self.powervc_driver._service._manager.get = \ + mock.MagicMock(side_effect=exceptions.NotFound('404')) + expr = self.powervc_driver.destroy(context, + instance, + network_info) + self.assertTrue(expr, "faild") + + def test_destroy_instanceTerminationFailure_exception(self): + instance = self.fake_instance_for_destroy() + context = None + network_info = None + self.powervc_driver._service._servers = mock.MagicMock() + server = Server(pvc_vm_states.ACTIVE) + self.powervc_driver._service.Server = \ + mock.MagicMock(return_value=server) + manager_get_server_from_instance = \ + Server(pvc_vm_states.ACTIVE) + manager_get_server_from_destroy_instance = Server('ACTIVE') + setattr(manager_get_server_from_destroy_instance, + 'OS-EXT-STS:task_state', 'active') + self.powervc_driver._service._manager.get = \ + mock.MagicMock( + side_effect=[manager_get_server_from_instance, + manager_get_server_from_destroy_instance]) + self.powervc_driver._service._manager.delete = mock.MagicMock() + self.powervc_driver._service._validate_response = mock.MagicMock() + self.assertRaises(exception.InstanceTerminationFailure, + self.powervc_driver.destroy, + context, + instance, + network_info) + + def fake_image_meta(self): + image_meta = dict() + image_meta['deleted'] = False + image_meta['id'] = 'image_meta_id' + properties = dict() + properties['powervc_uuid'] = 'fake_pvc_uuid' + image_meta['properties'] = properties + return image_meta + + def fake_instance(self): + instance = dict() + instance['instance_type_id'] = 'fake_instace_type_id' + instance['host'] = 'fake_host' + instance['uuid'] = 'fake_uuid' + instance['hostname'] = 'fake_host_name' + meta = dict() + meta[u'powervm:defer_placement'] = 'true' + meta['pvc_id'] = 'fake_pvc_id' + instance['metadata'] = meta + return instance + + def fake_instance_for_destroy(self): + instance = dict() + instance['instance_type_id'] = 'fake_instace_type_id' + instance['host'] = 'fake_host' + instance['uuid'] = 'fake_uuid' + instance['hostname'] = 'fake_host_name' + meta = dict() + meta['key'] = 'pvc_id' + meta['value'] = 'pvc_key_value' + instance['metadata'] = [meta] + return instance + + +class TestGetInstance(testtools.TestCase): + """This is the test fixture for PowerVCDriver.get_instance.""" + + def setUp(self): + """Prepare for this test fixture.""" + super(TestGetInstance, self).setUp() + self.pvc_id = 123456789 + # save before monkey patch + self.pvcdrv_init_copy = PowerVCDriver.__init__ + + def test_get_instance_found(self): + """When get instance find an instance.""" + pvc_svc = mock.MagicMock() + pvc_svc.get_instance = mock.MagicMock(return_value="an instance") + + def pvc_drv_init_instance_found(self): + """A fake init to replace PowerVCDriver.__init__.""" + self._service = pvc_svc + + # monkey patch + PowerVCDriver.__init__ = pvc_drv_init_instance_found + pvc_drv = PowerVCDriver() + + self.assertIsNotNone(pvc_drv.get_instance(self.pvc_id)) + + def test_get_instance_not_found(self): + """When get instance find nothing.""" + pvc_svc = mock.MagicMock() + pvc_svc.get_instance = mock.MagicMock(side_effect= + exceptions.NotFound(0)) + + def pvc_drv_init_instance_not_found(self): + """A fake init to replace PowerVCDriver.__init__.""" + self._service = pvc_svc + + # monkey patch + PowerVCDriver.__init__ = pvc_drv_init_instance_not_found + pvc_drv = PowerVCDriver() + + self.assertIsNone(pvc_drv.get_instance(self.pvc_id)) + + def tearDown(self): + """Clean work for this test fixture.""" + super(TestGetInstance, self).tearDown() + # restore from monkey patch + PowerVCDriver.__init__ = self.pvcdrv_init_copy + + +class TestGetInfo(testtools.TestCase): + """This is the test fixture for PowerVCDriver.get_info.""" + + def setUp(self): + """Prepare for this test fixture.""" + super(TestGetInfo, self).setUp() + # fake data + self.os_instance = FakeOSInstance().os_instance + self.pvc_instance = FakePVCInstance() + + # save before monkey patch + pvcdrv_init_copy = PowerVCDriver.__init__ + # monkey patch + PowerVCDriver.__init__ = mock.MagicMock(return_value=None) + self.pvc_drv = PowerVCDriver() + #restore from monkey patch, no need to wait until tearDown + PowerVCDriver.__init__ = pvcdrv_init_copy + + def test_get_info_success(self): + """When everything is fine in the main path.""" + self.pvc_drv.get_instance = mock.MagicMock(return_value= + self.pvc_instance) + self.assertEqual(self.pvc_drv.get_info(self.os_instance), + {'state': 1, + 'max_mem': 8192, + 'mem': 2048, + 'num_cpu': 2, + 'cpu_time': 0 + } + ) + + def test_get_info_instance_not_found_0(self): + """When any exception occurred during fetch PVC LPAR instance.""" + self.pvc_drv.get_instance = mock.MagicMock(side_effect= + exception.NotFound()) + self.assertRaises(exception.NotFound, + self.pvc_drv.get_info, + self.os_instance) + + def test_get_info_instance_not_found_1(self): + """When no PVC LPAR instance found.""" + self.pvc_drv.get_instance = mock.MagicMock(return_value=None) + self.assertRaises(exception.NotFound, + self.pvc_drv.get_info, + self.os_instance) diff --git a/nova-powervc/test/nova/driver/virt/powervc/test_startvm.py b/nova-powervc/test/nova/driver/virt/powervc/test_startvm.py new file mode 100644 index 0000000..976019d --- /dev/null +++ b/nova-powervc/test/nova/driver/virt/powervc/test_startvm.py @@ -0,0 +1,61 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import unittest +import mox +from powervc.nova.driver.virt.powervc.service import PowerVCService +from test.fake_pvc_instance import FakePVCInstance +from test.fake_os_instance import FakeOSInstance +from novaclient.v1_1 import servers + + +class TestStartVM(unittest.TestCase): + + def setUp(self): + """ + The method "setUp" is used to initialize the fake environment + """ + self.moxer = mox.Mox() + fake_instance = FakePVCInstance().pvc_instance + + self.os_instance = FakeOSInstance().os_instance + + def init(self, pvc_client=None): + self._client = None + self._api = None + + PowerVCService.__init__ = init + + self.service = PowerVCService() + + self.manager = servers.ServerManager(self) + + self.server = \ + servers.Server(self.manager, fake_instance) + + def tearDown(self): + pass + + def runTest(self): + + self.moxer.StubOutWithMock(self.service, "_get_server") + self.service._get_server(self.os_instance).AndReturn(self.server) + + self.moxer.StubOutWithMock(self.service, "_get_pvcserver") + self.service._get_pvcserver(self.server).AndReturn(self.server) + + self.moxer.ReplayAll() + + self.service.power_on(self.os_instance) + + print "Test should log VM is out of sync because status is 'ACTIVE'" + + self.moxer.UnsetStubs() + self.moxer.VerifyAll() diff --git a/nova-powervc/test/nova/driver/virt/powervc/test_stopvm.py b/nova-powervc/test/nova/driver/virt/powervc/test_stopvm.py new file mode 100644 index 0000000..fabd828 --- /dev/null +++ b/nova-powervc/test/nova/driver/virt/powervc/test_stopvm.py @@ -0,0 +1,63 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import unittest +import mox +from powervc.nova.driver.virt.powervc.service import PowerVCService +from test.fake_pvc_instance import FakePVCInstance +from test.fake_os_instance import FakeOSInstance +from novaclient.v1_1 import servers + + +class TestStopVM(unittest.TestCase): + + def setUp(self): + """ + The method "setUp" is used to initialize the fake environment + """ + self.moxer = mox.Mox() + fake_instance = FakePVCInstance().pvc_instance + + fake_instance["status"] = "SHUTOFF" + + self.os_instance = FakeOSInstance().os_instance + self.os_instance["status"] = "SHUTOFF" + + def init(self, pvc_client=None): + self._client = None + self._api = None + + PowerVCService.__init__ = init + + self.service = PowerVCService() + + self.manager = servers.ServerManager(self) + + self.server = servers.Server(self.manager, fake_instance) + + def tearDown(self): + pass + + def runTest(self): + + self.moxer.StubOutWithMock(self.service, "_get_server") + self.service._get_server(self.os_instance).AndReturn(self.server) + + self.moxer.StubOutWithMock(self.service, "_get_pvcserver") + self.service._get_pvcserver(self.server).AndReturn(self.server) + + self.moxer.ReplayAll() + + self.service.power_off(self.os_instance) + + print "Test should log VM is out of sync because status is 'SHUTOFF'" + + self.moxer.UnsetStubs() + self.moxer.VerifyAll() diff --git a/nova-powervc/test/sample_pvc_instance.json b/nova-powervc/test/sample_pvc_instance.json new file mode 100644 index 0000000..6a6e12d --- /dev/null +++ b/nova-powervc/test/sample_pvc_instance.json @@ -0,0 +1 @@ +{'OS-EXT-STS:task_state': None, 'addresses': {u'vlan_init': [{u'version': 4, u'addr': u'192.168.1.10', u'OS-EXT-IPS:type': u'fixed'}]}, 'links': [{u'href': u'http://localhost:8774/v2/def572107cf44a18b906ee1275f957af/servers/9db9bcc5-8dd8-4a1e-b8b8-255b000ac072', u'rel': u'self'}, {u'href': u'http://localhost:8774/def572107cf44a18b906ee1275f957af/servers/9db9bcc5-8dd8-4a1e-b8b8-255b000ac072', u'rel': u'bookmark'}], 'image': {u'id': u'd3fa393c-1ff9-445f-9f74-6dded467d5d6', u'links': [{u'href': u'http://localhost:8774/def572107cf44a18b906ee1275f957af/images/d3fa393c-1ff9-445f-9f74-6dded467d5d6', u'rel': u'bookmark'}]}, 'ephemeral_gb': 0, 'cpus': u'2', 'max_vcpus': u'1', 'manager': , 'OS-EXT-STS:vm_state': u'active', 'OS-EXT-SRV-ATTR:instance_name': u'deploy_sles-9db9bcc5-00000008', 'OS-EXT-SRV-ATTR:host': u'8205E6C_06774ET', 'flavor': {u'id': u'b35d475a-4ff2-47c8-91e9-4170a45f335e', u'links': [{u'href': u'http://localhost:8774/def572107cf44a18b906ee1275f957af/flavors/b35d475a-4ff2-47c8-91e9-4170a45f335e', u'rel': u'bookmark'}]}, 'id': u'9db9bcc5-8dd8-4a1e-b8b8-255b000ac072', 'security_groups': [{u'name': u'default'}], 'storage_connectivity_group_id': u'1249990d-8de4-402a-9228-a3e13a1e232c', 'dedicated_sharing_mode': u'', 'OS-DCF:diskConfig': u'MANUAL', 'vcpus': u'1', 'accessIPv4': u'', 'vcpu_mode': u'shared', 'desired_compatibility_mode': u'default', 'avail_priority': 127, 'shared_weight': 128, 'progress': 100, 'OS-EXT-STS:power_state': 1, 'OS-EXT-AZ:availability_zone': u'nova', 'launched_at': u'2014-01-16T06:34:44.164830', 'metadata': {}, 'status': u'ACTIVE', 'updated': u'2014-01-16T06:56:04Z', 'hostId': u'08e2fe36315c6ee246dc9b6bd34c18618c561da5ea5bb554f3566653', 'cpu_utilization': 0.00033686178565679417, 'memory_mode': u'dedicated', 'key_name': None, 'accessIPv6': u'', 'compliance_status': [{u'status': u'compliant', u'category': u'resource.allocation'}], 'current_compatibility_mode': u'POWER7', 'user_id': u'e1f5cf4760624e45b117fbc7adf5070a', 'min_memory_mb': 4096, 'root_gb': 5, 'OS-EXT-SRV-ATTR:hypervisor_hostname': u'8205E6C_06774ET', 'name': u'deploy_sles', 'uncapped': u'true', 'created': u'2014-01-16T06:32:50Z', 'tenant_id': u'def572107cf44a18b906ee1275f957af', 'min_vcpus': u'1', 'memory_mb': 4096, 'health_status': {u'health_value': u'CRITICAL', u'updated_at': u'2014-01-21T02:59:41.177198', u'value_reason': [{u'resource_related': u'os-hypervisor', u'display_name': u'8205E6C_06774ET', u'resource_property_key': u'hypervisor_state', u'resource_property_value': u'error', u'resource_id': 2}]}, 'max_memory_mb': 4096, 'max_cpus': u'2', 'min_cpus': u'2', '_info': {u'OS-EXT-STS:task_state': None, u'addresses': {u'vlan_init': [{u'version': 4, u'addr': u'192.168.1.10', u'OS-EXT-IPS:type': u'fixed'}]}, u'links': [{u'href': u'http://localhost:8774/v2/def572107cf44a18b906ee1275f957af/servers/9db9bcc5-8dd8-4a1e-b8b8-255b000ac072', u'rel': u'self'}, {u'href': u'http://localhost:8774/def572107cf44a18b906ee1275f957af/servers/9db9bcc5-8dd8-4a1e-b8b8-255b000ac072', u'rel': u'bookmark'}], u'image': {u'id': u'd3fa393c-1ff9-445f-9f74-6dded467d5d6', u'links': [{u'href': u'http://localhost:8774/def572107cf44a18b906ee1275f957af/images/d3fa393c-1ff9-445f-9f74-6dded467d5d6', u'rel': u'bookmark'}]}, u'ephemeral_gb': 0, u'cpus': u'2', u'max_vcpus': u'1', u'OS-EXT-STS:vm_state': u'active', u'OS-EXT-SRV-ATTR:instance_name': u'deploy_sles-9db9bcc5-00000008', u'cpu_utilization': 0.00033686178565679417, u'flavor': {u'id': u'b35d475a-4ff2-47c8-91e9-4170a45f335e', u'links': [{u'href': u'http://localhost:8774/def572107cf44a18b906ee1275f957af/flavors/b35d475a-4ff2-47c8-91e9-4170a45f335e', u'rel': u'bookmark'}]}, u'id': u'9db9bcc5-8dd8-4a1e-b8b8-255b000ac072', u'security_groups': [{u'name': u'default'}], u'storage_connectivity_group_id': u'1249990d-8de4-402a-9228-a3e13a1e232c', u'dedicated_sharing_mode': u'', u'OS-DCF:diskConfig': u'MANUAL', u'health_status': {u'health_value': u'CRITICAL', u'updated_at': u'2014-01-21T02:59:41.177198', u'value_reason': [{u'resource_related': u'os-hypervisor', u'display_name': u'8205E6C_06774ET', u'resource_property_key': u'hypervisor_state', u'resource_property_value': u'error', u'resource_id': 2}]}, u'accessIPv4': u'', u'vcpu_mode': u'shared', u'desired_compatibility_mode': u'default', u'avail_priority': 127, u'shared_weight': 128, u'progress': 100, u'OS-EXT-STS:power_state': 1, u'OS-EXT-AZ:availability_zone': u'nova', u'launched_at': u'2014-01-16T06:34:44.164830', u'metadata': {}, u'status': u'ACTIVE', u'updated': u'2014-01-16T06:56:04Z', u'hostId': u'08e2fe36315c6ee246dc9b6bd34c18618c561da5ea5bb554f3566653', u'OS-EXT-SRV-ATTR:host': u'8205E6C_06774ET', u'memory_mode': u'dedicated', u'key_name': None, u'accessIPv6': u'', u'compliance_status': [{u'status': u'compliant', u'category': u'resource.allocation'}], u'current_compatibility_mode': u'POWER7', u'user_id': u'e1f5cf4760624e45b117fbc7adf5070a', u'min_memory_mb': 4096, u'root_gb': 5, u'OS-EXT-SRV-ATTR:hypervisor_hostname': u'8205E6C_06774ET', u'name': u'deploy_sles', u'uncapped': u'true', u'created': u'2014-01-16T06:32:50Z', u'tenant_id': u'def572107cf44a18b906ee1275f957af', u'min_vcpus': u'1', u'memory_mb': 4096, u'vcpus': u'1', u'max_memory_mb': 4096, u'max_cpus': u'2', u'min_cpus': u'2'}, '_loaded': True} diff --git a/nova-powervc/test/test_utils.py b/nova-powervc/test/test_utils.py new file mode 100644 index 0000000..e23aed8 --- /dev/null +++ b/nova-powervc/test/test_utils.py @@ -0,0 +1,57 @@ +COPYRIGHT = """ +************************************************************* +Licensed Materials - Property of IBM + +OCO Source Materials + +(C) Copyright IBM Corp. 2013 All Rights Reserved +************************************************************* +""" + +import testtools +import powervc.utils as utils_to_test + + +class UtilsTest(testtools.TestCase): + """ + Class UtilsTest is used to provide testcases for + powervc/utils.py + """ + + def setUp(self): + super(UtilsTest, self).setUp() + + def test_get_pvc_id_from_list_type_1(self): + pvc_id_expected = '40e2d7c9-b510-4e10-8986-057800117714' + metadata = [ + {'key': 'powervm:defer_placement', 'value': 'true'}, + {'key': 'pvc_id', 'value': pvc_id_expected} + ] + + pvc_id = utils_to_test.get_pvc_id_from_metadata(metadata) + self.assertEqual(pvc_id_expected, pvc_id, + 'pvc_id matches on list type 1') + + def test_get_pvc_id_from_list_type_2(self): + pvc_id_expected = '40e2d7c9-b510-4e10-8986-057800117714' + metadata = [{ + "powervm:health_status.health_value": "OK", + "pvc_id": pvc_id_expected + }] + + pvc_id = utils_to_test.get_pvc_id_from_metadata(metadata) + self.assertEqual(pvc_id_expected, pvc_id, + 'pvc_id matches on list type 2') + + def test_get_pvc_id_from_dict_type(self): + pvc_id_expected = '40e2d7c9-b510-4e10-8986-057800117714' + metadata = { + "powervm:health_status.health_value": "OK", + "pvc_id": pvc_id_expected, + "powervm:defer_placement": "Fale", + "powervm:max_cpus": "1" + } + + pvc_id = utils_to_test.get_pvc_id_from_metadata(metadata) + self.assertEqual(pvc_id_expected, pvc_id, + 'pvc_id matches on dict')