Initial code base commit.

Change-Id: Id1e336028fa662ddee865841ac7b6c31a316f854
Closes-Bug: #1317383
This commit is contained in:
Le Tian Ren 2014-05-08 17:01:18 +08:00
parent ab7092bf77
commit 5e1c266859
163 changed files with 26406 additions and 0 deletions

21
cinder-powervc/.project Normal file
View File

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>cinder-powervc</name>
<comment></comment>
<projects>
<project>cinder</project>
<project>cinder-client</project>
<project>common-powervc</project>
<project>oslo</project>
</projects>
<buildSpec>
<buildCommand>
<name>org.python.pydev.PyDevBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.python.pydev.pythonNature</nature>
</natures>
</projectDescription>

View File

@ -0,0 +1,10 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?eclipse-pydev version="1.0"?>
<pydev_project>
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
<path>/cinder-powervc</path>
</pydev_pathproperty>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
</pydev_project>

View File

@ -0,0 +1,63 @@
#!/usr/bin/env python
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
"""Starter script for the PowerVC cinder-volume Service."""
import os
import sys
import eventlet
import traceback
# If ../powervc/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python.
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(
os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'powervc', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
from cinder.openstack.common import gettextutils
# TODO RYKAL
# This should go in the base __init__ folder I think
gettextutils.install('cinder')
from cinder import utils
from cinder.openstack.common import log as logging
from cinder.openstack.common import service
from cinder.common import config as cinder_config
from powervc.common import config
# NOTE: parse config before import manager
config.parse_power_config(sys.argv, 'cinder')
from powervc.volume.manager import manager
eventlet.patcher.monkey_patch(os=False, socket=True, time=True)
logging.setup('powervc')
LOG = logging.getLogger(__name__)
if __name__ == '__main__':
try:
logging.setup('powervc')
utils.monkey_patch()
LOG.info(_('Launching PowerVC Driver StorageManager service...'))
launcher = service.ServiceLauncher()
launcher.launch_service(manager.PowerVCCinderManager())
launcher.wait()
LOG.info(_('PowerVC Driver StorageManager service ended'))
except Exception:
traceback.print_exc()
raise

View File

@ -0,0 +1,103 @@
#!/bin/sh
#
# openstack-cinder-powervc OpenStack PowerVC Cinder Manager
#
# chkconfig: - 98 02
# description: Provides PowerVC manage-to support.
### BEGIN INIT INFO
# Provides:
# Required-Start: $remote_fs $network $syslog
# Required-Stop: $remote_fs $syslog
# Default-Stop: 0 1 6
# Short-Description: OpenStack PowerVC Cinder Manager
# Description:
### END INIT INFO
. /etc/rc.d/init.d/functions
suffix=powervc
prog=openstack-cinder-powervc
exec="/opt/ibm/openstack/powervc-driver/bin/cinder-$suffix"
config="/etc/powervc/powervc.conf"
cinderconf="/etc/cinder/cinder.conf"
pidfile="/var/run/$suffix/cinder-$suffix.pid"
logfile="/var/log/$suffix/cinder-$suffix.log"
[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
lockfile=/var/lock/subsys/$prog
start() {
[ -x $exec ] || exit 5
[ -f $config ] || exit 6
echo -n $"Starting $prog: "
daemon --user powervc --pidfile $pidfile "$exec --config-file $config --config-file $cinderconf --logfile $logfile &>/dev/null & echo \$! > $pidfile"
retval=$?
echo
[ $retval -eq 0 ] && touch $lockfile
return $retval
}
stop() {
echo -n $"Stopping $prog: "
killproc -p $pidfile $prog
retval=$?
echo
[ $retval -eq 0 ] && rm -f $lockfile
return $retval
}
restart() {
stop
start
}
reload() {
restart
}
force_reload() {
restart
}
rh_status() {
status -p $pidfile $prog
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
reload)
rh_status_q || exit 7
$1
;;
force-reload)
force_reload
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
exit 2
esac
exit $?

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,310 @@
from __future__ import absolute_import
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import logging
import sys
from cinder import exception
from cinder.openstack.common import log as cinderLogging
from cinder.volume.driver import VolumeDriver
from cinderclient.exceptions import NotFound
from oslo.config import cfg
from powervc.common import config
from powervc.common import constants as common_constants
from powervc.common.gettextutils import _
from powervc.volume.manager import constants
from powervc.volume.driver import service
volume_driver_opts = [
# Ignore delete errors so an exception is not thrown during a
# delete. When set to true, this allows the volume to be deleted
# on the hosting OS even if an exception occurs. When set to false,
# exceptions during delete prevent the volume from being deleted
# on the hosting OS.
cfg.BoolOpt('volume_driver_ignore_delete_error', default=False)
]
CONF = config.CONF
CONF.register_opts(volume_driver_opts, group='powervc')
LOG = cinderLogging.getLogger(__name__)
def _load_power_config(argv):
"""
Loads the powervc config.
"""
# Cinder is typically started with the --config-file option.
# This prevents the default config files from loading since
# the olso config code will only load those
# config files as specified on the command line.
# If the cinder is started with the
# --config-file option then append our powervc.conf file to
# the command line so it gets loaded as well.
for arg in argv:
if arg == '--config-file' or arg.startswith('--config-file='):
argv[len(argv):] = ["--config-file"] + \
[cfg.find_config_files(project='powervc',
prog='powervc')[0]]
break
config.parse_power_config(argv, 'cinder')
_load_power_config(sys.argv)
# must load powervc config before importing factory when
# called with import utils for a driver
from powervc.common.client import factory
class PowerVCDriver(VolumeDriver):
"""
Implements the cinder volume driver for powerVC
"""
def __init__(self, *args, **kwargs):
CONF.log_opt_values(LOG, logging.INFO)
self._service = service.PowerVCService()
if not service.PowerVCService._client:
service.PowerVCService._client = factory.POWERVC.new_client(str(
common_constants.SERVICE_TYPES.volume))
def check_for_setup_error(self):
"""
Checks for setup errors. Nothing to do for powervc.
"""
pass
def initialize_connection(self, volume, connector):
"""
Allow connection to connector and return connection info.
In the PowerVC cinder driver, it does not need to be implemented.
"""
LOG.debug("Enter - initialize_connection")
return {'driver_volume_type': '', 'data': {}}
LOG.debug("Exit - initialize_connection")
def validate_connector(self, connector):
"""
Fail if connector doesn't contain all the data needed by driver.
In the PowerVC cinder driver, it does not need to be implemented.
"""
return True
def terminate_connection(self, volume_ref, connector, force):
"""Do nothing since connection is not used"""
pass
def create_export(self, context, volume):
"""
Exports the volume. Nothing to do for powervc
"""
pass
def accept_transfer(self, context, volume_ref, new_user, new_project):
"""
Accept a volume that has been offered for transfer.
Nothing to do for powervc
"""
pass
def create_cloned_volume(self, volume_ref, srcvol_ref):
"""
Clone a volume from an existing volume.
Currently not supported by powervc.
Add stub to pass tempest.
"""
pass
def copy_image_to_volume(self, context, volume_ref, image_service,
image_id):
"""
Copy a glance image to a volume.
Currently not supported by powervc.
Add stub to pass tempest.
"""
pass
def copy_volume_to_image(self, context, volume, image_service, image_meta):
"""
Upload an exsiting volume into powervc as a glance image
Currently not supported by powervc.
Add stub to pass tempest.
"""
pass
def create_snapshot(self, snapshot_ref):
"""
Create a snapshot.
Currently not supported by powervc.
Add stub to pass tempest.
"""
pass
def delete_snapshot(self, snapshot_ref):
"""
Delete a snapshot.
Currently not supported by powervc.
Add stub to pass tempest.
"""
pass
def create_volume_from_snapshot(self, volume, snapshot_ref):
"""
Create a volume from the snapshot.
Currently not supported by powervc.
Add stub to pass tempest.
"""
pass
def extend_volume(self, volume, new_size):
"""
Extend a volume size.
Currently not supported by powervc.
Add stub to pass tempest.
"""
pass
def create_volume(self, volume):
"""
Creates a volume with the specified volume attributes
:returns: a dictionary of updates to the volume db, for example
adding metadata
"""
LOG.info(_("Creating volume with volume: %s."), volume)
size = getattr(volume, 'size', None)
display_name = getattr(volume, 'display_name', None)
display_description = getattr(volume, 'display_description', None)
volume_type_obj = getattr(volume, 'volume_type', None)
metadatas = getattr(volume, 'volume_metadata', None)
meta = {}
if metadatas:
# Use map() to get a list of 'key', 'value' tuple
# dict() can convert a list of tuple to dict obj
meta = dict(map(lambda m: (getattr(m, 'key'),
getattr(m, 'value')), metadatas))
if (size is None):
raise exception.InvalidVolume(reason='size is None')
LOG.info(_("Creating volume %s of size %sG."),
self._get_vol_name(volume),
size)
volume_data_updates = self._service.create_volume(
local_volume_id=volume.id,
size=size,
display_name=display_name,
display_description=display_description,
metadata=meta,
volume_type=getattr(volume_type_obj, 'id',
None))
return volume_data_updates
def delete_volume(self, volume):
"""
Deletes the specfied volume from powervc
"""
try:
LOG.info(_("Deleting volume %s."), self._get_vol_name(volume))
pvc_volume_id = None
for metaDataItem in volume.volume_metadata:
if metaDataItem.key == constants.LOCAL_PVC_PREFIX + 'id':
pvc_volume_id = metaDataItem.value
break
if pvc_volume_id is not None:
self._service.delete_volume(pvc_volume_id)
else:
LOG.warning(_("Volume metadata does not "
"contain a powervc volume identifier."))
except NotFound:
LOG.debug(_("Volume id %s was already deleted on powervc"),
pvc_volume_id)
LOG.info(_("Volume %s deleted."), self._get_vol_name(volume))
except Exception as e:
if CONF.powervc.volume_driver_ignore_delete_error:
LOG.error(_("Volume %s deleted, however the following "
"error occurred "
"which prevented the backing volume in PowerVC "
"from being deleted: %s"),
self._get_vol_name(volume),
str(e))
else:
raise
def ensure_export(self, context, volume):
"""
Makes sure the volume is exported. Nothing to do for powervc
"""
pass
def remove_export(self, context, volume):
"""
Removes the export. Nothing to do for powervc
"""
pass
def get_volume_stats(self, refresh=False):
"""
Gets the volume statistics for this driver. Cinder periodically calls
this to get the latest volume stats. The stats are stored in the
instance attribute called _stats
"""
if refresh:
self._update_volume_status()
return self._stats
def _update_volume_status(self):
"""
Retrieve volumes stats info from powervc.
For now just make something up
"""
LOG.debug(_("Getting volume stats from powervc"))
# get accessible storage providers list
sp_list = self._list_storage_providers()
free_capacity_gb = 0
total_capacity_gb = 0
for sp in sp_list:
free_capacity_gb += getattr(sp, 'free_capacity_gb', 0)
total_capacity_gb += getattr(sp, 'total_capacity_gb', 0)
data = {}
data["volume_backend_name"] = constants.POWERVC_VOLUME_BACKEND
data["vendor_name"] = 'IBM'
data["driver_version"] = 1.0
data["storage_protocol"] = 'Openstack'
data['total_capacity_gb'] = total_capacity_gb
data['free_capacity_gb'] = free_capacity_gb
data['reserved_percentage'] = 0
data['QoS_support'] = False
self._stats = data
LOG.debug(self._stats)
def _list_storage_providers(self):
return self._service.list_storage_providers()
def _get_vol_name(self, volume):
"""
Returns the name of the volume or its id
"""
name = getattr(volume, 'display_name', None)
if name:
return name
else:
return volume.id

View File

@ -0,0 +1,280 @@
from __future__ import absolute_import
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import httplib
from cinderclient import exceptions
from cinder.openstack.common import log as logging
from powervc.common import constants as common_constants
from powervc.common.gettextutils import _
from powervc.volume.manager import constants
from cinder import exception
from cinder import db
from cinder import context
from cinder.openstack.common import loopingcall
LOG = logging.getLogger(__name__)
class PowerVCService(object):
"""A service that exposes PowerVC functionality.
The services provided here are called by the driver.
The services leverage the nova client to interface to the PowerVC.
This design keeps the driver and client interface clean and simple
and provides a workspace for any data manipulation and utility work
that may need to be done.
"""
_client = None
def __init__(self, pvc_client=None):
"""Initializer."""
from powervc.common.client import factory
if(PowerVCService._client is None):
PowerVCService._client = \
factory.POWERVC.new_client(
str(common_constants.SERVICE_TYPES.volume))
# Add version checking as required
def create_volume(self, local_volume_id, size, snapshot_id=None,
source_volid=None,
display_name=None, display_description=None,
volume_type=None, user_id=None,
project_id=None, availability_zone=None,
metadata=None, imageRef=None):
"""
Creates a volume on powervc
"""
# Use the standard cinderclient to create volume
# TODO Do not pass metadata to PowerVC currently as we don't
# know if this has a conflict with PowerVC design.
pvc_volume = PowerVCService._client.volumes.create(size,
snapshot_id,
source_volid,
display_name,
display_description,
volume_type,
user_id,
project_id,
availability_zone,
{},
imageRef)
# update powervc uuid to db immediately to avoid duplicated
# synchronization
additional_volume_data = {}
additional_volume_data['metadata'] = metadata
additional_volume_data['metadata'][constants.LOCAL_PVC_PREFIX + 'id'] \
= pvc_volume.id
db.volume_update(context.get_admin_context(),
local_volume_id,
additional_volume_data)
LOG.info(_("Volume %s start to create with PVC UUID: %s"),
local_volume_id, pvc_volume.id)
temp_status = getattr(pvc_volume, 'status', None)
if temp_status == constants.STATUS_CREATING:
LOG.debug(_(
'wait until created volume status is available or ERROR'))
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_state_change, pvc_volume.id,
getattr(pvc_volume, 'status', None),
constants.STATUS_AVAILABLE,
constants.STATUS_CREATING)
try:
timer.start(interval=10).wait()
# set status to available
additional_volume_data['status'] = \
constants.STATUS_AVAILABLE
except:
latest_pvc_volume = PowerVCService._client.volumes.get(
pvc_volume.id)
additional_volume_data['status'] = getattr(latest_pvc_volume,
'status', '')
else:
LOG.debug(_('Not in creating status, just set as powerVC status'))
additional_volume_data['status'] = temp_status
# return updated volume status information
return additional_volume_data
def _wait_for_state_change(self, volume_id, original_state, expected_state,
middle_state):
"""
Utility method to wait for a volume to change to the
expected state.
The process of some operation contains three states.
during the operation. If the operation has no middle state,
it can be set as original state.
"""
volume = None
try:
volume = PowerVCService._client.volumes.get(volume_id)
except exceptions.NotFound:
raise exception.VolumeNotFound('volume not found: %s' %
volume_id)
if volume.status == expected_state:
LOG.debug(
"Operation %(vm_id)s successfully, " +
"status changed to %(state)s"
% {'vm_id': volume.id, 'state': expected_state})
raise loopingcall.LoopingCallDone()
if (volume.status != original_state and
volume.status != expected_state and
volume.status != middle_state):
raise exception.InvalidVolume()
def delete_volume(self, pvc_volume_id):
"""
Deletes the specified powervc volume id from powervc
"""
LOG.debug(_("Deleting pvc volume: %s"), pvc_volume_id)
if not pvc_volume_id:
raise AttributeError(_("Powervc volume identifier must be "
"specified"))
existed_pvc_volume = None
try:
existed_pvc_volume = PowerVCService._client.volumes.get(
pvc_volume_id)
except exceptions.NotFound:
LOG.critical(_("pvc: %s no longer existed in powervc, ignore"),
pvc_volume_id)
raise
temp_status = getattr(existed_pvc_volume, 'status', None)
if temp_status == constants.STATUS_DELETING:
# Volume in deleting status, do not perform delete operation
# again
LOG.warning(
_("pvc: %s is deleting in powervc, wait for status"),
pvc_volume_id)
else:
# volume available for deleting, perform delete opeartion
PowerVCService._client.volumes.delete(pvc_volume_id)
LOG.debug(_(
'wait until created volume deleted or status is ERROR'))
timer = loopingcall.FixedIntervalLoopingCall(
self._wait_for_state_change, existed_pvc_volume.id,
getattr(existed_pvc_volume, 'status', None),
'',
constants.STATUS_DELETING)
try:
timer.start(interval=10).wait()
except exception.VolumeNotFound:
# deleted complete
LOG.info(_("pvc: %s deleted successfully"),
pvc_volume_id)
except exception.InvalidVolume:
LOG.critical(_("pvc: %s deleted failed, "),
pvc_volume_id)
# when delete failed raise exception
raise exception.CinderException(
_('Volume deletion failed for id: %s'),
pvc_volume_id)
def _validate_response(self, response):
"""
Validates an HTTP response to a REST API request made by this service.
The method will simply return if the HTTP error code indicates success
(i.e. between 200 and 300).
Any other errors, this method will raise the exception.
Note: Appropriate exceptions to be added...
Nova client throws an exception for 404
:param response: the HTTP response to validate
"""
if response is None:
return
httpResponse = response[0]
# Any non-successful response >399 is an error
if httpResponse.status_code >= httplib.BAD_REQUEST:
LOG.critical(_("Service: got this response: %s")
% httpResponse)
LOG.debug("Service: got this response: %s"
% httpResponse)
raise exceptions.BadRequest(httpResponse)
def list_volume_types(self):
return PowerVCService._client.volume_types.list()
def get_volume_type(self, vol_type_id):
return PowerVCService._client.volume_types.get(vol_type_id)
def get_volume_type_by_name(self, volume_type_name):
pvc_volume_type = None
if volume_type_name is None or PowerVCService._client is None:
return pvc_volume_type
pvc_volume_type_list = self.list_volume_types()
if pvc_volume_type_list is None:
return volume_type_name
for volume_type in pvc_volume_type_list:
if volume_type_name == volume_type._info["name"]:
pvc_volume_type = volume_type
break
return pvc_volume_type
def get_volumes(self):
pvc_volumes = None
if PowerVCService._client is None:
return pvc_volumes
pvc_volumes = PowerVCService._client.volumes.list()
return pvc_volumes
def get_volume_by_name(self, display_name):
pvc_volume = None
if display_name is None or PowerVCService._client is None:
return pvc_volume
pvc_volume_list = self.get_volumes()
if pvc_volume_list is None:
return pvc_volume
for volume in pvc_volume_list:
if display_name == volume._info["display_name"]:
pvc_volume = volume
break
return pvc_volume
def get_volume_by_id(self, volume_id):
pvc_volume = None
if volume_id is None or PowerVCService._client is None:
return pvc_volume
try:
pvc_volume = PowerVCService._client.volumes.get(volume_id)
except exceptions.NotFound:
LOG.debug("get_volume_by_id volume %s not found"
% volume_id)
pvc_volume = None
return pvc_volume
def list_storage_providers(self):
return PowerVCService._client.storage_providers.list()

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,61 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
"""
All constants.
"""
# Instance metadata keys that will store pvc related infor.
# in the local nova DB.
PVC_TENANT = "pvc_tenant" # project in pvc
PVC_SCG = "pvc_scg" # pvc storage connection group
PVC_ID = "pvc_id" # pvc instance uuid
PPC64 = "ppc64" # Found on the wiki
# The default image for pvc instance if no match found.
DEFAULT_IMG = "SCE Default Image"
DEFAULT_SCG = "storage connection group"
# Suffix to append to sync event notifications
SYNC_EVENT_SUFFIX = 'sync'
LOCAL_PVC_VOLUME_TYPE_PREFIX = 'pvc:'
LOCAL_PVC_PREFIX = 'pvc:'
# The composite PowerVC storage backend
POWERVC_VOLUME_BACKEND = 'powervc'
# PowerVC volume & volume type notification events that we listen for
EVENT_VOLUME_TYPE_CREATE = 'volume_type.create'
EVENT_VOLUME_TYPE_DELETE = 'volume_type.delete'
EVENT_VOLUME_TYPE_EXTRA_SPECS_CREATE = 'volume_type_extra_specs.create'
EVENT_VOLUME_TYPE_EXTRA_SPECS_UPDATE = 'volume_type_extra_specs.update'
EVENT_VOLUME_TYPE_EXTRA_SPECS_DELETE = 'volume_type_extra_specs.delete'
EVENT_VOLUME_CREATE_START = 'volume.create.start'
EVENT_VOLUME_CREATE_END = 'volume.create.end'
EVENT_VOLUME_DELETE_START = 'volume.delete.start'
EVENT_VOLUME_DELETE_END = 'volume.delete.end'
EVENT_VOLUME_UPDATE = 'volume.update'
EVENT_VOLUME_ATTACH_START = 'volume.attach.start'
EVENT_VOLUME_ATTACH_END = 'volume.attach.end'
EVENT_VOLUME_DETACH_START = 'volume.detach.start'
EVENT_VOLUME_DETACH_END = 'volume.detach.end'
EVENT_VOLUME_IMPORT_START = 'volume.import.start'
EVENT_VOLUME_IMPORT_END = 'volume.import.end'
# PowerVC volume operation status
STATUS_AVAILABLE = 'available'
STATUS_ERROR = 'error'
STATUS_CREATING = 'creating'
STATUS_DELETING = 'deleting'
#multi-backends configuration option for PowerVCDriver
BACKEND_POWERVCDRIVER = "powervcdriver"

File diff suppressed because it is too large Load Diff

192
cinder-powervc/run_tests.sh Executable file
View File

@ -0,0 +1,192 @@
#!/bin/bash
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -eu
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run PowerVC Cinder's test suite(s)"
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)."
echo " -n, --no-recreate-db Don't recreate the test database."
echo " -x, --stop Stop running tests after the first error or failure."
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -u, --update Update the virtual environment with any newer package versions"
echo " -p, --pep8 Just run flake8"
echo " -8, --8 Just run flake8, don't show PEP8 text for each error"
echo " -P, --no-pep8 Don't run flake8"
echo " -c, --coverage Generate coverage report"
echo " -h, --help Print this usage message"
echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list"
echo " --standard-threads Don't do the eventlet threading monkeypatch."
echo ""
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
echo " prefer to run tests NOT in a virtual environment, simply pass the -N option."
exit
}
function process_option {
case "$1" in
-h|--help) usage;;
-V|--virtual-env) always_venv=1; never_venv=0;;
-N|--no-virtual-env) always_venv=0; never_venv=1;;
-r|--recreate-db) recreate_db=1;;
-n|--no-recreate-db) recreate_db=0;;
-f|--force) force=1;;
-u|--update) update=1;;
-p|--pep8) just_flake8=1;;
-8|--8) short_flake8=1;;
-P|--no-pep8) no_flake8=1;;
-c|--coverage) coverage=1;;
--standard-threads)
export STANDARD_THREADS=1
;;
-*) noseopts="$noseopts $1";;
*) noseargs="$noseargs $1"
esac
}
venv=.venv
with_venv=tools/with_venv.sh
always_venv=0
never_venv=0
force=0
noseargs=
noseopts=
wrapper=""
just_flake8=0
short_flake8=0
no_flake8=0
coverage=0
recreate_db=1
update=0
for arg in "$@"; do
process_option $arg
done
# If enabled, tell nose to collect coverage data
if [ $coverage -eq 1 ]; then
noseopts="$noseopts --with-coverage --cover-package=cinder-powervc"
fi
function run_tests {
# Just run the test suites in current environment
${wrapper} $NOSETESTS
# If we get some short import error right away, print the error log directly
RESULT=$?
if [ "$RESULT" -ne "0" ];
then
ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'`
if [ "$ERRSIZE" -lt "40" ];
then
cat run_tests.log
fi
fi
return $RESULT
}
function run_flake8 {
FLAGS=--show-pep8
if [ $# -gt 0 ] && [ 'short' == ''$1 ]
then
FLAGS=''
fi
echo "Running flake8 ..."
# Just run flake8 in current environment
#echo ${wrapper} flake8 $FLAGS powervc | tee pep8.txt
${wrapper} flake8 $FLAGS powervc | tee pep8.txt
RESULT=${PIPESTATUS[0]}
return $RESULT
}
NOSETESTS="nosetests $noseopts $noseargs"
if [ $never_venv -eq 0 ]
then
# Remove the virtual environment if --force used
if [ $force -eq 1 ]; then
echo "Cleaning virtualenv..."
rm -rf ${venv}
fi
if [ $update -eq 1 ]; then
echo "Updating virtualenv..."
python tools/install_venv.py
fi
if [ -e ${venv} ]; then
wrapper="${with_venv}"
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
python tools/install_venv.py
wrapper="${with_venv}"
else
echo -e "No virtual environment found...create one? (Y/n) \c"
read use_ve
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py
wrapper=${with_venv}
fi
fi
fi
fi
# Delete old coverage data from previous runs
if [ $coverage -eq 1 ]; then
${wrapper} coverage erase
fi
if [ $just_flake8 -eq 1 ]; then
run_flake8
RESULT=$?
echo "RESULT $RESULT"
exit $RESULT
fi
if [ $short_flake8 -eq 1 ]; then
run_flake8 short
RESULT=$?
exit $RESULT
fi
run_tests
RESULT=$?
# NOTE(sirp): we only want to run flake8 when we're running the full-test
# suite, not when we're running tests individually. To handle this, we need to
# distinguish between options (noseopts), which begin with a '-', and arguments
# (noseargs).
if [ -z "$noseargs" ]; then
if [ $no_flake8 -eq 0 ]; then
run_flake8
TMP_RESULT=$?
RESULT=$(($TMP_RESULT + $RESULT))
fi
fi
if [ $coverage -eq 1 ]; then
echo "Generating coverage report in covhtml/"
${wrapper} coverage html -d covhtml -i
fi
exit $RESULT

View File

View File

@ -0,0 +1,30 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
"""
Simple cinder client tests
TODO: Convert to pyunit and use config file
"""
import powervc.common.constants as constants
from powervc.common import config
config.parse_power_config((), 'powervc')
import powervc.common.client.factory as clients
cinder_client = clients.POWERVC.new_client(str(constants.SERVICE_TYPES.volume))
print '=' * 10, 'Listing volumes', '=' * 10
vol_list = cinder_client.volumes.list()
for vol in vol_list:
print str(vol.display_name), str(vol.display_description), \
vol.id

View File

@ -0,0 +1,51 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
"""
The class FakeVolumeType is used to produce the fake
data of the OpenStack cinder volume type
"""
import datetime
class FakeVolumeType():
volume_type = dict()
items = {
'created_at',
'updated_at',
'deleted_at',
'deleted',
'id',
'name',
'extra_specs'
}
def __init__(self):
self.volume_type['id'] = "18b28659-966d-4913-bdda-2ca3cc68fb59"
self.volume_type['created_at'] = \
datetime.datetime(2013, 8, 12, 5, 59, 25)
self.volume_type['updated_at'] = \
datetime.datetime(2013, 8, 12, 5, 59, 25)
self.volume_type['deleted_at'] = None
self.os_instance['deleted'] = False
self.os_instance['name'] = "mengxd-01"
self.os_instance['extra_specs'] = {
'drivers:rsize': '2',
'drivers:storage_pool': 'r3-c3-ch1-jhusta',
'capabilities:volume_backend_name': 'shared_v7000_1'
}
def update(self, **update):
self.self.volume_type.update(**update)

View File

View File

@ -0,0 +1,225 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
# mock module
import mock
import sys
import stubout
import unittest
sys.modules['powervc.common.client'] = mock.MagicMock()
# import _
from cinder.openstack.common import gettextutils
gettextutils.install('cinder')
from powervc.common import config
from cinder import exception
from cinder import db
from powervc.volume.driver.service import PowerVCService
import six
class StorageProvider():
def __init__(self, i):
self.free_capacity_gb = (i + 1) * 5
self.total_capacity_gb = (i + 1) * 10
class VolumeMetadataWithPVCID():
def __init__(self, pvc_id="1234"):
self.key = "pvc:id"
self.value = pvc_id
class Volume():
def __init__(self, info):
self._info = info
self._add_details(info)
def setattr(self, key, val):
self.__setattr__(key, val)
def _add_details(self, info):
for (k, v) in six.iteritems(info):
try:
setattr(self, k, v)
except AttributeError:
# In this case we already defined the attribute on the class
pass
class PowerVCDriverTestCase(unittest.TestCase):
stubs = stubout.StubOutForTesting()
def setUp(self):
super(PowerVCDriverTestCase, self).setUp()
self.stubs.Set(PowerVCService, '_client', mock.MagicMock())
# we need mock load config file before import PowerVCDriver class
config.parse_power_config = mock.MagicMock()
config.CONF.log_opt_values = mock.MagicMock()
from powervc.volume.driver.powervc import PowerVCDriver
self.powervc_cinder_driver = PowerVCDriver()
def test_create_volume_no_size_raise_exception(self):
self.assertRaises(exception.InvalidVolume,
self.powervc_cinder_driver.create_volume,
None)
def test_create_volume_succeed(self):
# local volume passed to driver
vol = {'id': 1234,
'size': 1}
volume = Volume(vol)
# fake volume after call creating volume from pvc
ret_vol_after_created = {'id': 4321,
'status': 'creating'}
ret_volume_after_created = Volume(ret_vol_after_created)
# fake volume after call get volume from pvc
ret_vol_get = {'id': 4321,
'status': 'available'}
ret_volume_get = Volume(ret_vol_get)
# mock create volume restAPI
PowerVCService._client.volumes.create = \
mock.MagicMock(return_value=ret_volume_after_created)
# mock get volume restAPI
PowerVCService._client.volumes.get = \
mock.MagicMock(return_value=ret_volume_get)
# mock db access operation
db.volume_update = mock.MagicMock(return_value=None)
dic = self.powervc_cinder_driver.create_volume(volume)
self.assertEqual({'status': 'available',
'metadata': {'pvc:id': 4321}},
dic, "return vol doesn't match")
def test_create_volume_failed(self):
# local volume passed to driver
vol = {'id': 1234,
'size': 1}
volume = Volume(vol)
# fake volume after call creating volume from pvc
ret_vol_after_created = {'id': 4321,
'status': 'creating'}
ret_volume_after_created = Volume(ret_vol_after_created)
# fake volume after call get volume from pvc
ret_vol_get = {'id': 4321,
'status': 'error'}
ret_volume_get = Volume(ret_vol_get)
# mock create volume restAPI
PowerVCService._client.volumes.create = \
mock.MagicMock(return_value=ret_volume_after_created)
# mock get volume restAPI
PowerVCService._client.volumes.get = \
mock.MagicMock(return_value=ret_volume_get)
# mock db access operation
db.volume_update = mock.MagicMock(return_value=None)
dic = self.powervc_cinder_driver.create_volume(volume)
self.assertEqual({'status': 'error',
'metadata': {'pvc:id': 4321}},
dic, "return vol doesn't match")
def test_create_volume_not_found(self):
# local volume passed to driver
vol = {'id': 1234,
'size': 1}
volume = Volume(vol)
# fake volume after call creating volume from pvc
ret_vol_after_created = {'id': 4321,
'status': 'creating'}
ret_volume_after_created = Volume(ret_vol_after_created)
# fake volume after call get volume from pvc
ret_vol_get = {'id': 4321,
'status': 'error'}
ret_volume_get = Volume(ret_vol_get)
# mock create volume restAPI
PowerVCService._client.volumes.create = \
mock.MagicMock(return_value=ret_volume_after_created)
# mock get volume restAPI
# first time raise an exception,
# second time return a error volume
PowerVCService._client.volumes.get = \
mock.MagicMock(side_effect=[exception.NotFound,
ret_volume_get])
# mock db access operation
db.volume_update = mock.MagicMock(return_value=None)
dic = self.powervc_cinder_driver.create_volume(volume)
self.assertEqual({'status': 'error',
'metadata': {'pvc:id': 4321}},
dic, "return vol doesn't match")
def test_delete_volume_success(self):
#fake volume which will be passed to driver service
vol_info = {'id': 1234,
'size': 1}
volume = Volume(vol_info)
setattr(volume, 'volume_metadata', [VolumeMetadataWithPVCID("1234")])
#fake existed volume
existed_vol_info = {"status": 'available', 'id': 1234}
existed_volume_get = Volume(existed_vol_info)
#fake volume after delete
after_delete_vol_info = {"status": '', 'id': 1234}
after_delete_volume_get = Volume(after_delete_vol_info)
#mock rest API
PowerVCService._client.volumes.get = \
mock.MagicMock(side_effect=[existed_volume_get,
after_delete_volume_get])
self.powervc_cinder_driver.delete_volume(volume)
def test_delete_volume_no_powervc_attribute_error(self):
#fake volume which will be passed to driver service
vol_info = {'id': 1234, 'size': 1}
volume = Volume(vol_info)
self.assertRaises(AttributeError,
self.powervc_cinder_driver.delete_volume,
volume)
def test_delete_volume_not_found_exception(self):
vol_info = {'id': 1234, 'size': 1}
volume = Volume(vol_info)
setattr(volume, 'volume_metadata', [VolumeMetadataWithPVCID("1234")])
PowerVCService._client.volumes.get = \
mock.MagicMock(side_effect=exception.NotFound())
self.assertRaises(exception.NotFound,
self.powervc_cinder_driver.delete_volume,
volume)
def test_get_volume_stats(self):
# fake a storage provider list
ret_sp = [StorageProvider(i) for i in range(10)]
# mock rest api
PowerVCService._client.storage_providers.list = \
mock.MagicMock(return_value=ret_sp)
# fake a expected return dictionary
expected_ret_dic = {}
expected_ret_dic["volume_backend_name"] = 'powervc'
expected_ret_dic["vendor_name"] = 'IBM'
expected_ret_dic["driver_version"] = 1.0
expected_ret_dic["storage_protocol"] = 'Openstack'
expected_ret_dic['total_capacity_gb'] = 550
expected_ret_dic['free_capacity_gb'] = 275
expected_ret_dic['reserved_percentage'] = 0
expected_ret_dic['QoS_support'] = False
ret_dic = self.powervc_cinder_driver.get_volume_stats(True)
self.assertEqual(expected_ret_dic,
ret_dic,
"return stats should be matched")

View File

@ -0,0 +1,121 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
from cinder.openstack.common import gettextutils
gettextutils.install('cinder')
import unittest
import mox
from powervc.volume.manager.manager import PowerVCCinderManager
from powervc.volume.driver.service import PowerVCService
fake_volume_type = {'id': '',
'name': 'fake_volume_type'
}
fake_volume = {'display_name': 'fake_volume',
'display_description': 'This is a fake volume',
'volume_type_id': '',
'status': '',
'host': 'powervc',
'size': 1,
'availability_zone': 'nova',
'bootable': 0,
'snapshot_id': '',
'source_volid': '',
'metadata': {},
'project_id': 'admin',
'user_id': 'admin',
'attached_host': 'fake_attached_host',
'mountpoint': '',
'instance_uuid': '',
'attach_status': ''}
fake_message = {'payload': {'volume_id': '', 'display_name': ''}}
fake_context = {}
class FakeDBVolume():
def __init__(self):
pass
class FakeVolume():
def __init__(self):
pass
__dict__ = fake_volume
class FakePowerVCService(PowerVCService):
def __init__(self):
pass
fake_db_volume = FakeDBVolume()
class Test(unittest.TestCase):
def setUp(self):
self.moxer = mox.Mox()
def __init__(self):
pass
PowerVCCinderManager.__init__ = __init__
self.manager = PowerVCCinderManager()
def tearDown(self):
pass
def test_handle_powervc_volume_create_not_create(self):
self.manager._service = self.moxer.CreateMock(PowerVCService)
self.moxer.StubOutWithMock(self.manager,
'_get_local_volume_by_pvc_id')
self.moxer.StubOutWithMock(self.manager._service,
'get_volume_by_id')
self.moxer.StubOutWithMock(self.manager, '_insert_pvc_volume')
pvc_id = ''
self.manager._get_local_volume_by_pvc_id(fake_context, pvc_id)\
.AndReturn(fake_db_volume)
self.moxer.ReplayAll()
self.manager._handle_powervc_volume_create(fake_context, fake_message)
self.moxer.UnsetStubs()
self.moxer.VerifyAll()
def test_handle_powervc_volume_create_create(self):
self.manager._service = self.moxer.CreateMock(PowerVCService)
self.moxer.StubOutWithMock(self.manager,
'_get_local_volume_by_pvc_id')
self.moxer.StubOutWithMock(self.manager._service,
'get_volume_by_id')
self.moxer.StubOutWithMock(self.manager, '_insert_pvc_volume')
pvc_id = ''
volume_id = ''
fake_volume_instance = FakeVolume()
self.manager._get_local_volume_by_pvc_id(fake_context, pvc_id)\
.AndReturn(None)
self.manager._service.get_volume_by_id(volume_id)\
.AndReturn(fake_volume_instance)
self.moxer.ReplayAll()
self.manager._handle_powervc_volume_create(fake_context, fake_message)
self.moxer.UnsetStubs()
self.moxer.VerifyAll()
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()

24
common-powervc/.project Normal file
View File

@ -0,0 +1,24 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>common-powervc</name>
<comment></comment>
<projects>
<project>cinder-client</project>
<project>glance-client</project>
<project>keystone-client</project>
<project>neutron-client</project>
<project>nova</project>
<project>nova-client</project>
<project>oslo</project>
</projects>
<buildSpec>
<buildCommand>
<name>org.python.pydev.PyDevBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.python.pydev.pythonNature</nature>
</natures>
</projectDescription>

View File

@ -0,0 +1,10 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?eclipse-pydev version="1.0"?>
<pydev_project>
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
<path>/common-powervc</path>
</pydev_pathproperty>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
</pydev_project>

View File

@ -0,0 +1,232 @@
# This file contains configuration properties that affects how the powervc driver functions and how it
# communicates with the PowerVC server. Most properties have default values based on a default
# PowerVC configuration. However, some connection properties, such as PowerVC host name
# do not have default values and must be configured prior to running the powervc driver. These
# properties are marked with the text INPUT REQUIRED. Also you may have to change other
# properties depending on your environment and how your PowerVC sever is configured. For
# more information, see the Smart Cloud Entry Administration Guide.
[DEFAULT]
# The following group of properties needs to be configured
# in order for the PowerVC Driver to be able to authenticate with keystone
# and obtain information from it, that might be required to run background
# tasks (such as discovering a new image), or simply to connect to a
# secured Glance.
# When running secured Glance, make sure the 'auth_strategy' property in
# nova.conf is set to 'keystone'.
# Log info messages
verbose = true
[openstack]
# Authentication url to authenticate with keystone (string value)
auth_url = http://localhost:5000/v2.0
# v2.0 or v3
keystone_version = v2.0
# Tenant name for connecting to keystone in admin context (string value)
admin_tenant_name = demo
# Username for connecting to keystone in admin context (string value)
admin_user = demo
# Password for connecting to keystone in admin context (string value)
admin_password = openstack
# For local SSL connections, specify the path and filename of the cacert file
#connection_cacert =
http_insecure = True
# Region name for the local Openstack. Must be set to correct name when local
# Openstack is configured for working in multiple regions environment.
#region_name =
#
# Qpid connection information
#
# Qpid broker hostname (string value)
qpid_hostname =
# Qpid broker port (integer value)
qpid_port = 5672
# Username for qpid connection (string value)
qpid_username =
# Password for qpid connection (string value)
qpid_password =
# Transport to use, either 'tcp'(default) or 'ssl'
qpid_protocol = tcp
[powervc]
# Full class name for the manager for PowerVC Manager Service (string value)
powervc_manager = powervc.nova.driver.compute.manager.PowerVCCloudManager
# Full class name for the driver for PowerVC Driver Service (string value)
powervc_driver = powervc.nova.driver.virt.powervc.driver.PowerVCDriver
#
# Connection information for PowerVC.
#
# Authentication url of the PowerVC to connect to
# INPUT REQUIRED
# Provide 'host' portion by updating it to the hostname of the PowerVC system
auth_url = https://host/powervc/openstack/identity/v3
# v2.0 or v3
keystone_version = v3
# Username for PowerVC connection (string value)
admin_user = root
# Password for PowerVC connection (string value)
admin_password = passw0rd
# Tenant name for PowerVC connection (string value)
admin_tenant_name = ibm-default
# For PowerVC SSL connections, specify the path and filename of the cacert file
# INPUT REQUIRED
# Provide the cacert file by copying it from its install location on the
# PowerVC host (e.g. /etc/pki/tls/certs/powervc.crt) to the local hosting
# Openstack system.
#connection_cacert =
# Value of insecure option for PowerVC connections (Default=True)
# INPUT REQUIRED
# Change to False when using a secure connection and providing a cacert file.
http_insecure = True
# Value of authorization token expiration stale duration (Default=3600)
# INPUT REQUIRED
# Due to PowerVC requirement, all the REST API customers need to pre-refresh
# authorization token at least 1 hour before expiration
expiration_stale_duration = 3600
# The names of the storage connectivity groups supported by our driver
# INPUT REQUIRED
# Provide the PowerVC storage connectivity group (SCG) names by getting the name
# from the PowerVC system, or using the PowerVC default SCG of "Any host, all VIOS".
# If there are more than one SCG you want to specify, just add more SCG values with
# more storage_connectivity_group
# Note: The value of this property must exactly match the value as specified on the
# PowerVC server, including case, punctuation, and spaces.
storage_connectivity_group = Any host, all VIOS
#storage_connectivity_group =
#
# Qpid connection information for PowerVC
#
# Qpid broker hostname (string value)
# INPUT REQUIRED
# Change 'host' to the hostname of the PowerVC system
qpid_hostname = host
# Qpid broker port (integer value)
# uncomment following line for non-ssl
# qpid_port = 5672
qpid_port = 5671
# Username for qpid connection (string value)
qpid_username = powervc_qpid
# Password for qpid connection (string value)
# INPUT REQUIRED
# Provide the qpid connection password from the PowerVC system
# by using the cat command on the pw.file in the directory where
# PowerVC is installed (e.g. cat /opt/ibm/powervc/data/pw.file)
qpid_password =
# Transport to use, either 'tcp'(default) or 'ssl'
# uncomment following line for non-ssl
# qpid_protocol = tcp
qpid_protocol = ssl
#
# Sync variables
#
# The name of the staging project (string value)
# If not set defaults to 'Public'. If set the named project should exist and
# be accessible by the staging_user.
staging_project_name = Public
# The name of the staging user (string value)
# If not set defaults to 'admin'. If set the user should exist and
# have access to the project identified by staging_project_name.
staging_user = admin
# The prefix that will be added to the flavor name from PowerVC
# and stored (string value). This should be unique for every
# connection to help distinguish the flavors
flavor_prefix = PVC-
# This is a list of PowerVC flavor names that should be synced.
# If no flavor name is specified, then all flavors are synced.
flavor_white_list =
# This is a list of PowerVC flavor names that should not be synced.
flavor_black_list =
# The periodic flavor sync interval in seconds.
flavor_sync_interval = 300
# Instance periodic sync interval specified in seconds
instance_sync_interval = 20
# How many instance sync intervals between full instance syncs. Only instances
# known to be out of sync are synced on the interval except after this many
# intervals when all instances are synced.
full_instance_sync_frequency = 30
# Image periodic sync interval specified in seconds. This is the time from the end
# of one successful image periodic sync operation to the start of the next.
image_periodic_sync_interval_in_seconds = 300
# The time in seconds between image sync retry attempts if an error was
# encountered during an image sync operation
image_sync_retry_interval_time_in_seconds = 60
# The maximum number of images to return. The default is 500 images. If your PowerVC
# has more than 500 images, this limit should be increased to include all images.
image_limit = 500
# Volume periodic sync interval specified in seconds
volume_sync_interval = 20
# How many volume sync intervals between full volume syncs.
# Only volumes known to be out of sync are synced on the interval
# except after this many intervals when all volumes are synced.
full_volume_sync_frequency = 30
# Volume type periodic sync interval specified in seconds
volume_type_sync_interval = 20
# How many volume type sync intervals between full volume type syncs.
# Only volumes known to be out of sync are synced on the interval
# except after this many intervals when all volumes are synced.
full_volume_type_sync_frequency = 30
# Ignore delete errors so an exception is not thrown during a
# delete. When set to true, this allows the volume to be deleted
# on the hosting OS even if an exception occurs. When set to false,
# exceptions during delete prevent the volume from being deleted
# on the hosting OS.
volume_driver_ignore_delete_error = False
# The times to check whether attaching/detaching the volume succeed
volume_max_try_times = 12
# Minimum delay interval and initial delay seconds for long run tasks.
longrun_loop_interval = 7
longrun_initial_delay = 10

View File

@ -0,0 +1,10 @@
compress
/var/log/powervc/*.log {
weekly
rotate 4
missingok
compress
minsize 100k
size 50M
}

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,42 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
from powervc.common import config
from powervc.common import netutils
CONF = config.CONF
# http client opts from config file normalized
# to keystone client form
OS_OPTS = None
PVC_OPTS = None
def _build_base_http_opts(config_section, opt_map):
configuration = CONF[config_section]
opt_map['tenant_name'] = configuration['admin_tenant_name']
opt_map['username'] = configuration['admin_user']
opt_map['password'] = configuration['admin_password']
opt_map['cacert'] = configuration['connection_cacert']
opt_map['insecure'] = configuration['http_insecure']
if opt_map['insecure'] is False:
opt_map['auth_url'] = netutils.hostname_url(configuration['auth_url'])
else:
opt_map['auth_url'] = configuration['auth_url']
return opt_map
# init client opts for powervc and openstack only once
if OS_OPTS is None:
OS_OPTS = _build_base_http_opts('openstack', {})
#support mulitple region on local openstack
OS_OPTS['region_name'] = CONF['openstack']['region_name']
if PVC_OPTS is None:
PVC_OPTS = _build_base_http_opts('powervc', {})

View File

@ -0,0 +1,83 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
def new_composite_deletgate(delegates):
"""create and return a new class which delegates
calls to the delegates. the facade object returned
from this method allows you to extend functionality
of existing objects using containment rather than
inherritance.
for example suppose you have obj1 which has method
x() and you have obj2 which has method y(). you can
create a single view of those objects like this:
composite = new_composite_delegate([obj1, obj2])
composite.x() # calls x() on obj1
composite.y() # calls y() on obj2
:param delegates: a list of objects which make up the
delegates. when a method call or attr access is made
on the returned wrapper, the list of delegates will
be tried in order until an object is found with the
attr.
"""
class CompositeDelegator(object):
def __init__(self, *args):
super(CompositeDelegator, self).__init__()
def __getattribute__(self, name):
for instance in delegates:
if hasattr(instance, name):
attr = instance.__getattribute__(name)
if hasattr(attr, '__call__'):
def _f(*args, **kwargs):
return attr(*args, **kwargs)
return _f
else:
return attr
return None
return CompositeDelegator()
def context_dynamic_auth_token(ctx, keystone):
"""
create a delegate specifically for security context
This is because security context need to access renew
auth_token for each request. But this property in context
is static. Delegate this auth_token property to keystone
dynamic property auth_token.
Every context created for long live usage should wrap
this delegate to ensure it always uses the newest
auth_token for every REST request
"""
class ContextDAT(ctx.__class__):
def __init__(self):
super(ctx.__class__, self).__init__()
def __getattribute__(self, name):
if name != 'auth_token':
if hasattr(ctx, name):
attr = ctx.__getattribute__(name)
if hasattr(attr, '__call__'):
def _f(*args, **kwargs):
return attr(*args, **kwargs)
return _f
else:
return attr
else:
return keystone.auth_token
return ContextDAT()

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,16 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
class ClientExtension(object):
"""base class for all extensions.
"""
def __init__(self, client):
self.client = client

View File

@ -0,0 +1,200 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import six
import urllib
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from cinderclient import base as client_base
from cinderclient.v1 import volumes
from cinderclient.v1 import volume_types
from powervc.common.client.extensions import base
from powervc.common import utils
class Client(base.ClientExtension):
def __init__(self, client):
super(Client, self).__init__(client)
# Initialize Storage Provider Manager
self.storage_providers = StorageProviderManager(client)
# Initialize PVC specified Volume Manager
self.volumes = PVCVolumeManager(client)
# Initialize PVC specified StorageTemplate Manager
self.volume_types = PVCStorageTemplateManager(client)
# any extensions to std cinder client go below
class StorageProvider(client_base.Resource):
"""
Entity class for StorageProvider
"""
def __repr__(self):
return ("<StorageProvider: %s, storage_hostname: %s>" %
(self.id, self.storage_hostname))
class StorageProviderManager(client_base.Manager):
"""
Manager class for StorageProvider
Currently get and list functions for StorageProvider
are implemented.
"""
resource_class = StorageProvider
def get(self, spUUID):
"""
Get a StorageProvider.
:param server: UUID `StorageProvider` to get.
:rtype: :class:`Server`
"""
return self._get("/storage-providers/%s" % spUUID,
"storage_provider")
def list(self, detailed=True, search_opts=None,
scgUUID=None,
scgName=None):
"""
Get a list of the Storage Template that filtered by a specified
SCG UUID or SCG name, if both SCG UUID and SCG name are specified,
UUID has the high priority to check.
:rtype: list of :class:`StorageProvider`
"""
# Get accessible volumes by SCG
if scgUUID or scgName:
return (utils.get_utils().
get_scg_accessible_storage_providers(
scgUUID=scgUUID, scgName=scgName,
detailed=detailed, search_opts=search_opts)
)
else:
return (utils.get_utils().
get_multi_scg_accessible_storage_providers(
None, None, detailed=detailed, search_opts=search_opts)
)
def list_all_providers(self, detailed=True, search_opts=None):
"""
Get a list of StorageProvider.
Optional detailed returns details StorageProvider info.
:rtype: list of :class:`StorageProvider`
"""
if search_opts is None:
search_opts = {}
qparams = {}
for opt, val in six.iteritems(search_opts):
if val:
qparams[opt] = val
query_string = "?%s" % urllib.urlencode(qparams) if qparams else ""
detail = ""
if detailed:
detail = "/detail"
return self._list("/storage-providers%s%s" %
(detail, query_string),
"storage_providers")
class PVCVolumeManager(volumes.VolumeManager):
"""
The PVC specified VolumeManager that got and list volumes
which filtered by Storage Connectivity Group
"""
def list(self, detailed=True, search_opts=None,
scgUUID=None,
scgName=None):
"""
Get a list of the volumes that filtered by a specified SCG UUID
or SCG name, if both SCG UUID and SCG name are specified, UUID has the
high priority to check.
:rtype: list of :class:`Volume`
"""
# Get accessible volumes by SCG
if scgUUID or scgName:
return (utils.get_utils().
get_scg_accessible_volumes(scgUUID=scgUUID,
scgName=scgName,
detailed=detailed,
search_opts=search_opts))
else:
return (utils.get_utils().
get_multi_scg_accessible_volumes(None,
None,
detailed=detailed,
search_opts=search_opts)
)
def list_all_volumes(self, detailed=True, search_opts=None):
"""
Get a list of all volumes.
:rtype: list of :class:`Volume`
"""
if search_opts is None:
search_opts = {}
qparams = {}
for opt, val in six.iteritems(search_opts):
if val:
qparams[opt] = val
query_string = "?%s" % urlencode(qparams) if qparams else ""
detail = ""
if detailed:
detail = "/detail"
return self._list("/volumes%s%s" % (detail, query_string),
"volumes")
class PVCStorageTemplateManager(volume_types.VolumeTypeManager):
"""
The PVC specified StorageTemplateManager that list Storage Templates
(VolumeType in OpenStack) which filtered by Storage Connectivity Group
"""
def list(self, scgUUID=None, scgName=None):
"""
Get a list of the Storage Template that filtered by a specified
SCG UUID or SCG name, if both SCG UUID and SCG name are specified,
UUID has the high priority to check.
:rtype: list of :class:`VolumeType`
"""
# Get accessible volumes by SCG
if scgUUID or scgName:
return (utils.get_utils().
get_scg_accessible_storage_templates(scgUUID=scgUUID,
scgName=scgName))
else:
return (utils.get_utils().
get_multi_scg_accessible_storage_templates(None,
None))
def list_all_storage_templates(self):
"""
Get a list of all Storage Templates
:rtype: list of :class:`VolumeType`.
"""
return self._list("/types", "volume_types")

View File

@ -0,0 +1,57 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import powervc.common.client.extensions.base as base
class Client(base.ClientExtension):
def __init__(self, client):
super(Client, self).__init__(client)
###################Images functions##########################
def listImages(self):
return [image for image in self.client.images.list()]
def getImage(self, image_id):
return self.client.images.get(image_id)
def getImageFile(self, image_id):
return self.client.images.data(image_id)
def deleteImage(self, image_id):
return self.client.images.delete(image_id)
def updateImage(self, image_id, **kwargs):
return self.client.images.update(image_id, **kwargs)
##################Image member functions#######################
def listImageMembers(self, image_id):
return [imageMember for imageMember in
self.client.image_members.list(image_id)]
def deleteImageMember(self, image_id, member_id):
self.client.image_members.delete(image_id, member_id)
def updateImageMember(self, image_id, member_id, member_status):
return self.client.image_members.update(image_id, member_id,
member_status)
def createImageMember(self, image_id, member_id):
return self.client.image_members.create(image_id, member_id)
##################Image tag functions (v2 only)################
def updateImageTag(self, image_id, tag_value):
if self.client_version == 2:
return self.client.image_tags.update(image_id, tag_value)
def deleteImageTag(self, image_id, tag_value):
if self.client_version == 2:
return self.client.image_tags.delete(image_id, tag_value)

View File

@ -0,0 +1,489 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import six
import urllib
import base64
from novaclient import base as client_base
from novaclient.v1_1 import servers
from novaclient.v1_1 import hypervisors
from novaclient.v1_1 import images
from novaclient.v1_1 import flavors
from novaclient.v1_1 import volumes
from novaclient.v1_1.volume_types import VolumeType
from novaclient.openstack.common import strutils
from powervc.common.client.extensions import base
from powervc.common import utils
import logging
LOG = logging.getLogger(__name__)
class Client(base.ClientExtension):
def __init__(self, client):
super(Client, self).__init__(client)
self.manager = PVCServerManager(client)
self.servers = servers
self.hypervisors = hypervisors.HypervisorManager(client)
self.images = images.ImageManager(client)
self.flavors = flavors.FlavorManager(client)
self.storage_connectivity_groups = \
StorageConnectivityGroupManager(client)
self.volumes = volumes.VolumeManager(client)
self.scg_images = SCGImageManager(client)
# any extensions to std nova client go below
class PVCServerManager(servers.ServerManager):
"""
This ServerManager class is specific for PowerVC booting a VM.
As the PowerVC boot API does not follow the standard openstack boot API,
need to rewrite the default boot method to satisfy powerVC boot restAPI
content.
"""
def list(self, detailed=True, search_opts=None,
scgUUID=None,
scgName=None):
"""
Get a list of the Servers that filtered by a specified SCG UUID
or SCG name, if both SCG UUID and SCG name are specified, UUID has the
high priority to check.
:rtype: list of :class:`Server`
"""
if scgUUID or scgName:
return utils.get_utils().get_scg_accessible_servers(scgUUID,
scgName,
detailed,
search_opts
)
else:
# This will get all scgs accessible servers
return utils.get_utils().\
get_multi_scg_accessible_servers(None,
None,
detailed,
search_opts
)
def list_all_servers(self, detailed=True, search_opts=None):
"""
Get a list of all servers without filters.
Optional detailed returns details server info.
Optional reservation_id only returns instances with that
reservation_id.
:rtype: list of :class:`Server`
"""
if search_opts is None:
search_opts = {}
qparams = {}
for opt, val in six.iteritems(search_opts):
if val:
qparams[opt] = val
query_string = "?%s" % urllib.urlencode(qparams) if qparams else ""
detail = ""
if detailed:
detail = "/detail"
return self._list("/servers%s%s" % (detail, query_string), "servers")
# This function was copied from (/usr/lib/python2.6/site-packages/
# novaclient/v1_1/servers.py) before, but changes needed when activation
# data contains userdata and files, because in a boot action, local OS
# novaclient's _boot will read them from CLI or GUI firstly, then when our
# driver is triggered, this version of _boot should just forward the data
# or file content to PowerVC without any reading, otherwise error happens.
# RTC/172018, add support to boot server with activation data.
def _boot(self, resource_url, response_key, name, image, flavor,
meta=None, files=None, userdata=None, reservation_id=None,
return_raw=False, min_count=None, max_count=None,
security_groups=None, key_name=None, availability_zone=None,
block_device_mapping=None, nics=None, scheduler_hints=None,
config_drive=None, admin_pass=None, **kwargs):
"""Create (boot) a new server.
:param name: Server Name.
:param image: The string of PowerVC `Image` UUID to boot with.
:param flavor: The :dict of `Flavor` that need to boot onto.
:param meta: A dict of arbitrary key/value metadata to store for this
server. A maximum of five entries is allowed, and both
keys and values must be 255 characters or less.
:param files: A dict of files to overrwrite on the server upon boot.
Keys are file names (i.e. ``/etc/passwd``) and values
are the file contents (either as a string or as a
file-like object). A maximum of five entries is allowed,
and each file must be 10k or less.
:param userdata: user data to pass to make config drive this can be a
file type object as well or a string. PowerVC don't use
metadata server for security considerations.
:param reservation_id: a UUID for the set of servers being requested.
:param return_raw: If True, don't try to coearse the result into
a Resource object.
:param security_groups: list of security group names
:param key_name: (optional extension) name of keypair to inject into
the instance
:param availability_zone: Name of the availability zone for instance
placement.
:param block_device_mapping: A dict of block device mappings for this
server.
:param nics: (optional extension) an ordered list of nics to be
added to this server, with information about
connected networks, fixed ips, etc.
:param scheduler_hints: (optional extension) arbitrary key-value pairs
specified by the client to help boot an instance.
:param config_drive: (optional extension) value for config drive
either boolean, or volume-id
:param admin_pass: admin password for the server.
"""
body = {"server": {
"name": name,
"imageRef": image,
"flavor": {},
}}
# Add the flavor information to PowerVC for booting VM
body["server"]["flavor"]['ram'] = flavor['memory_mb']
body["server"]["flavor"]['vcpus'] = flavor['vcpus']
body["server"]["flavor"]['disk'] = flavor['root_gb']
body["server"]["flavor"]['OS-FLV-EXT-DATA:ephemeral'] = \
flavor.get('OS-FLV-EXT-DATA:ephemeral', 0)
body["server"]["flavor"]['extra_specs'] = flavor['extra_specs']
# If hypervisor ID specified:
if kwargs.get("hypervisor", None):
body["server"]['hypervisor_hostname'] = kwargs["hypervisor"]
if userdata:
# RTC/172018 -- start
# comment out the following, already done by local OS nova client
# if hasattr(userdata, 'read'):
# userdata = userdata.read()
# userdata = strutils.safe_encode(userdata)
# body["server"]["user_data"] = base64.b64encode(userdata)
body["server"]["user_data"] = userdata
# RTC/172018 -- end
if meta:
body["server"]["metadata"] = meta
if reservation_id:
body["server"]["reservation_id"] = reservation_id
if key_name:
body["server"]["key_name"] = key_name
if scheduler_hints:
body['os:scheduler_hints'] = scheduler_hints
if config_drive:
body["server"]["config_drive"] = config_drive
if admin_pass:
body["server"]["adminPass"] = admin_pass
if not min_count:
min_count = 1
if not max_count:
max_count = min_count
body["server"]["min_count"] = min_count
body["server"]["max_count"] = max_count
if security_groups:
body["server"]["security_groups"] = ([{'name': sg}
for sg in security_groups])
# Files are a slight bit tricky. They're passed in a "personality"
# list to the POST. Each item is a dict giving a file name and the
# base64-encoded contents of the file. We want to allow passing
# either an open file *or* some contents as files here.
if files:
personality = body['server']['personality'] = []
# RTC/172018 -- start
# comment out the following, already done by local OS nova client
#for filepath, file_or_string in files.items():
# if hasattr(file_or_string, 'read'):
# data = file_or_string.read()
# else:
# data = file_or_string
for file in files:
personality.append({
'path': file[0],
'contents': file[1].encode('base64'),
})
# RTC/172018 -- end
if availability_zone:
body["server"]["availability_zone"] = availability_zone
# Block device mappings are passed as a list of dictionaries
if block_device_mapping:
bdm = body['server']['block_device_mapping'] = []
for device_name, mapping in block_device_mapping.items():
#
# The mapping is in the format:
# <id>:[<type>]:[<size(GB)>]:[<delete_on_terminate>]
#
bdm_dict = {'device_name': device_name}
mapping_parts = mapping.split(':')
id_ = mapping_parts[0]
if len(mapping_parts) == 1:
bdm_dict['volume_id'] = id_
if len(mapping_parts) > 1:
type_ = mapping_parts[1]
if type_.startswith('snap'):
bdm_dict['snapshot_id'] = id_
else:
bdm_dict['volume_id'] = id_
if len(mapping_parts) > 2:
bdm_dict['volume_size'] = mapping_parts[2]
if len(mapping_parts) > 3:
bdm_dict['delete_on_termination'] = mapping_parts[3]
bdm.append(bdm_dict)
if nics is not None:
# NOTE(tr3buchet): nics can be an empty list
all_net_data = []
for nic_info in nics:
net_data = {}
# if value is empty string, do not send value in body
if nic_info.get('net-id'):
net_data['uuid'] = nic_info['net-id']
if nic_info.get('v4-fixed-ip'):
net_data['fixed_ip'] = nic_info['v4-fixed-ip']
if nic_info.get('port-id'):
net_data['port'] = nic_info['port-id']
all_net_data.append(net_data)
body['server']['networks'] = all_net_data
return self._create(resource_url, body, response_key,
return_raw=return_raw, **kwargs)
def _resize_pvc(self, server, info, **kwargs):
"""
This method is used to overwrite the resize in the
class ServerManager
"""
return self._action('resize', server, info=info, **kwargs)
def list_instance_storage_viable_hosts(self, server):
"""
Get a list of hosts compatible with this server.
Used for getting candidate host hypervisors from powervc for
live migration. We need to do things a bit different
since there not a common schema apperently for the content
returned. See below..
{
"8233E8B_100008P":{
"host":"8233E8B_100008P"
},
"8233E8B_100043P":{
"host":"8233E8B_100043P"
}
}
:param server: ID of the :class:`Server` to get.
:rtype: dict
"""
url = "/storage-viable-hosts?instance_uuid=%s"\
% (client_base.getid(server))
_resp, body = self.api.client.get(url)
return body
class StorageConnectivityGroup(client_base.Resource):
"""
Entity class for StorageConnectivityGroup
"""
def __repr__(self):
return ("<StorageConnectivityGroup: %s, displayname: %s>" %
(self.id, self.display_name))
def list_all_volumes(self):
"""
Get a list of accessible volume for this SCG.
:rtype: list of :class:`Volume`
"""
return self.manager.list_all_volumes(self.id)
def list_all_volume_types(self):
"""
Get a list of accessible volume types for this SCG.
:rtype: list of :class:`VolumeType`
"""
return self.manager.list_all_volume_types(self.id)
class StorageConnectivityGroupManager(client_base.Manager):
"""
Manager class for StorageConnectivityGroup
Currently get and list functions for StorageConnectivityGroup
are implemented.
"""
resource_class = StorageConnectivityGroup
def get(self, scgUUID):
"""
Get a StorageConnectivityGroup.
:param server: UUID `StorageConnectivityGroup` to get.
:rtype: :class:`Server`
"""
try:
return self._get("/storage-connectivity-groups/%s" % scgUUID,
"storage_connectivity_group")
except Exception as e:
# If PowerVC Express installations in IVM mode
# would receive BadRequest
LOG.error('A problem was encountered while getting the '
' Storage Connectivity Group %s: %s '
% (scgUUID, str(e)))
return None
def list_for_image(self, imageUUID):
"""
Get a list of StorageConnectivityGroups for the specified image. If
an error occurs getting the SCGs for an image, an exception is logged
and raised.
:param: imageUUID The image UUID:
:rtype: list of :class:`StorageConnectivityGroup`
"""
try:
return self._list("/images/%s/storage-connectivity-groups" %
imageUUID, "storage_connectivity_groups")
except Exception as e:
LOG.error('A problem was encountered while getting a list of '
'Storage Connectivity Groups for image %s: %s '
% (imageUUID, str(e)))
raise e
def list_all_volumes(self, scgUUID):
"""
Get a list of accessible volume for this SCG.
:rtype: list of :class:`Volume`
"""
try:
return self._list("/storage-connectivity-groups/%s/volumes"
% scgUUID, "volumes", volumes.Volume)
except Exception as e:
LOG.error('A problem was encountered while getting a list of '
'accessible volumes for scg %s: %s '
% (scgUUID, str(e)))
raise e
def list_all_volume_types(self, scgUUID):
"""
Get a list of accessible volume types for this SCG.
:rtype: list of :class:`VolumeType`
"""
try:
return self._list("/storage-connectivity-groups/%s/volume-types"
% scgUUID, "volume-types", VolumeType)
except Exception as e:
LOG.error('A problem was encountered while getting a list of '
'accessible volume types for scg %s: %s '
% (scgUUID, str(e)))
raise e
def list(self, detailed=True, search_opts=None):
"""
Get a list of StorageConnectivityGroups.
Optional detailed returns details StorageConnectivityGroup info.
:rtype: list of :class:`StorageConnectivityGroup`
"""
if search_opts is None:
search_opts = {}
qparams = {}
for opt, val in six.iteritems(search_opts):
if val:
qparams[opt] = val
query_string = "?%s" % urllib.urlencode(qparams) if qparams else ""
detail = ""
if detailed:
detail = "/detail"
try:
return self._list("/storage-connectivity-groups%s%s" %
(detail, query_string),
"storage_connectivity_groups")
except Exception as e:
# If PowerVC Express installations in IVM mode
# would receive BadRequest
LOG.error('A problem was encountered while getting a list'
' of Storage Connectivity Groups: %s '
% str(e))
return []
class SCGImage(client_base.Resource):
"""
Entity class for SCGImage
"""
def __repr__(self):
return ("<SCGImage: %s, name: %s>" %
(self.id, self.name))
class SCGImageManager(client_base.Manager):
"""
Manager class for SCGImage
Currently the list function for SCGImages in a StorageConnectivityGroup,
and the image identifiers of SCGImages in a StorageConnectivityGroup is
implemented.
"""
resource_class = SCGImage
def list(self, scgUUID):
"""
Get a list of SCGImages for the specified StorageConnectivityGroup. If
an error occurs getting the SCGImages, and exception is logged and
raised.
:param: scgUUID The StorageConnectivityGroup UUID:
:rtype: list of :class:`SCGImage`
"""
try:
return self._list("/storage-connectivity-groups/%s/images" %
scgUUID, "images")
except Exception as e:
LOG.error('A problem was encountered while getting a list of '
'images for Storage Connectivity Group \'%s\': %s '
% (scgUUID, str(e)))
raise e
def list_ids(self, scgUUID):
"""
Get a list of SCGImage identifiers for the specified
StorageConnectivityGroup. If an error occurs getting the SCGImage ids,
and exception is logged and raised.
:param: scgUUID The StorageConnectivityGroup UUID:
:rtype: list of :class:`SCGImage` identifiers
"""
ids = []
SCGImages = self.list(scgUUID)
if SCGImages:
for image in SCGImages:
ids.append(image.id)
return ids

View File

@ -0,0 +1,64 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import powervc.common.client.service as service
from powervc.common.client.config import CONF as CONF
from powervc.common.client.config import OS_OPTS as OS_OPTS
from powervc.common.client.config import PVC_OPTS as PVC_OPTS
from powervc.common.constants import SERVICE_TYPES as SERVICE_TYPES
"""sample useage
New PowerVC v1 glance client:
pvc_glance_v1 = factory.POWERVC.get_client(
str(constants.SERVICE_TYPES.image), 'v1')
New PowerVC glance client for latest known version:
pvc_lastest_glance = factory.POWERVC.get_client(
str(constants.SERVICE_TYPES.image))
New PowerVC cinder client of latest version:
pvc_cinder_versions = factory.POWERVC.get_versions(
str(constants.SERVICE_TYPES.volume))
List the services types on the local openstack host:
known_lcl_service_types = factory.LOCAL.get_service_types()
Get a reference to keystone client for PowerVC:
pvc_keystone = factory.POWERVC.keystone
"""
# global access to local openstack and powervc services
LOCAL = None
POWERVC = None
if LOCAL is None:
keystone = service.KeystoneService(str(SERVICE_TYPES.identity),
CONF['openstack']['keystone_version'],
OS_OPTS['auth_url'], OS_OPTS,
None).new_client()
LOCAL = service.ClientServiceCatalog(OS_OPTS, keystone)
if POWERVC is None:
keystone_opts = PVC_OPTS.copy()
keystone_opts['stale_duration']\
= CONF['powervc']['expiration_stale_duration']
keystone = service.KeystoneService(str(SERVICE_TYPES.identity),
CONF['powervc']['keystone_version'],
PVC_OPTS['auth_url'], keystone_opts,
None).new_client()
POWERVC = service.ClientServiceCatalog(PVC_OPTS, keystone)

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,27 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
def patch_client(service_wrapper, client):
org = client.client._cs_request
"""patch the _cs_request method of cinder client and inject
a keystone managed token and management url. this allows us
to ensure a valid token is maintained an also support keystone
v3 apis.
"""
def _authd_cs_request(url, method, **kwargs):
# patch cinders HTTPClient to use our keystone for tokens
# and support for non standard URLs
client.client.auth_token = service_wrapper.keystone.auth_token
client.client.management_url = service_wrapper.management_url
return org(url, method, **kwargs)
client.client._cs_request = _authd_cs_request

View File

@ -0,0 +1,117 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import warlock
def patch_client(service_wrapper, client):
http_client = client
if hasattr(client, 'http_client'):
http_client = client.http_client
org_http_request = http_client._http_request
"""
Patch the _http_request method of glance client and inject
a keystone managed token and management url. this allows us
to ensure a valid token is maintained an also support keystone
v3 apis.
"""
def _patched_http_request(url, method, **kwargs):
# patch glance HTTPClient to use our keystone for tokens
# and support for non standard URLs
if http_client.endpoint_path and\
not http_client.endpoint_path.endswith('/'):
http_client.endpoint_path += '/'
http_client.auth_token = service_wrapper.keystone.auth_token
if url.startswith('/'):
url = url[1:]
return org_http_request(url, method, **kwargs)
http_client._http_request = _patched_http_request
def _patched_raw_request(method, url, **kwargs):
'''
Patch the http raw_request method to fix a problem. If there is no
image data set the content-type in the headers to application/json.
Failure to do so can lead to errors during image updates and creates.
'''
kwargs.setdefault('headers', {})
if 'body' in kwargs:
if kwargs['body'] is None:
kwargs['headers'].setdefault('Content-Type',
'application/json')
else:
kwargs['headers'].setdefault('Content-Type',
'application/octet-stream')
if (hasattr(kwargs['body'], 'read')
and method.lower() in ('post', 'put')):
# We use 'Transfer-Encoding: chunked' because
# body size may not always be known in advance.
kwargs['headers']['Transfer-Encoding'] = 'chunked'
else:
kwargs['headers'].setdefault('Content-Type',
'application/json')
return _patched_http_request(url, method, **kwargs)
http_client.raw_request = _patched_raw_request
"""
Patch v2 glanceclient controller for update image
"""
ver = str(client).split('.')[1]
if ver != 'v2':
# if not v2 client, nothing else to do
return
org_image_controller = client.images
def _patched_image_update(image_id, remove_props=None, **kwargs):
"""
Update attributes of an image.
This is patched to fix an issue. The Content-Type should reflect v2.1
since that is the version of the patch schema that is used.
:param image_id: ID of the image to modify.
:param remove_props: List of property names to remove
:param **kwargs: Image attribute names and their new values.
"""
image = org_image_controller.get(image_id)
for (key, value) in kwargs.items():
try:
setattr(image, key, value)
except warlock.InvalidOperation as e:
raise TypeError(unicode(e))
if remove_props is not None:
cur_props = image.keys()
new_props = kwargs.keys()
#NOTE(esheffield): Only remove props that currently exist on the
# image and are NOT in the properties being updated / added
props_to_remove = set(cur_props).intersection(
set(remove_props).difference(new_props))
for key in props_to_remove:
delattr(image, key)
url = '/v2/images/%s' % image_id
hdrs = {'Content-Type': 'application/openstack-images-v2.1-json-patch'}
http_client.raw_request('PATCH', url,
headers=hdrs,
body=image.patch)
#NOTE(bcwaldon): calling image.patch doesn't clear the changes, so
# we need to fetch the image again to get a clean history. This is
# an obvious optimization for warlock
return org_image_controller.get(image_id)
org_image_controller.update = _patched_image_update

View File

@ -0,0 +1,25 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
def patch_client(service_wrapper, client):
org_auth_and_fetch = client.httpclient.authenticate_and_fetch_endpoint_url
"""patch the authenticate_and_fetch_endpoint_url method to inject
our own managed keystone token and endpoint
"""
def _patched_auth_and_fetch():
# inject our keystone managed token
client.httpclient.auth_token = service_wrapper.keystone.auth_token
client.httpclient.endpoint_url = service_wrapper.management_url
return org_auth_and_fetch()
client.httpclient.authenticate_and_fetch_endpoint_url = \
_patched_auth_and_fetch

View File

@ -0,0 +1,24 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
def patch_client(service_wrapper, client):
""" wrapper the _cs_request call in an authenticated version
of it so we can reuse our keystone connection
"""
org_cs_request = client.client._cs_request
def _authd_cs_request(url, method, **kwargs):
client.client.auth_token = service_wrapper.keystone.auth_token
client.client.management_url = service_wrapper.management_url
return org_cs_request(url, method, **kwargs)
client.client._cs_request = _authd_cs_request

View File

@ -0,0 +1,422 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import urlparse
import re
import powervc.common.client.delegate as delegate
from glanceclient.openstack.common import importutils
from powervc.common.constants import SERVICE_TYPES as SERVICE_TYPES
from powervc.common import netutils
class AbstractService(object):
"""a stub over a service endpoint which permits consumers
to create openstack python clients directly from this object.
"""
def __init__(self, svc_type, version, url, base_args, keystone):
self.svc_type = svc_type
self.version = version
self.url = url
self.base_args = base_args.copy()
self.keystone = keystone
self.base_name = SERVICE_TYPES[svc_type].to_codename()
self.client_version = version = version.replace('.', '_')
self.clazz = self._lookup_client()
self.extension = self._lookup_extension()
self.management_url = url
def _extend(self, client, client_extension=None, *extension_args):
if self.extension is None and client_extension is None:
return client
delegates = []
if client_extension is not None:
delegates.append(client_extension(client, *extension_args))
if self.extension is not None:
delegates.append(self.extension(client, *extension_args))
delegates.append(client)
# extend the base client using a mixin type delegate
return delegate.new_composite_deletgate(delegates)
def _patch(self, client):
try:
# if applicable patch the client
module = (importutils.
import_module("powervc.common.client.patch.%s" %
(self.base_name)))
module.patch_client(self, client)
except ImportError:
pass
return client
def _lookup_client(self):
return importutils.import_class("%sclient.%s.client.Client" %
(self.base_name,
self.get_client_version()))
def _lookup_extension(self):
try:
return (importutils.
import_class("powervc.common.client.extensions.%s.Client" %
(self.base_name)))
except ImportError:
return None
return None
def _chomp_version(self, version):
match = re.search('(v[0-9])[_]*[0-9]*', version, re.IGNORECASE)
if match:
version = match.group(1)
return version
def _init_std_client(self):
return self._patch(self.clazz(self.base_args['username'],
self.base_args['password'],
self.base_args['tenant_name'],
self.base_args['auth_url'],
self.base_args['insecure'],
cacert=self.base_args['cacert']))
def new_client(self, client_extension=None, *extension_args):
"""build and return a new python client for this service
:param client_extension: the optional subclass of
powervc.common.client.extensions.base to extend the python client with.
:param extension_args: optional arguments to pass to the client
extension when it is created.
"""
return self._extend(self._init_std_client(), client_extension,
*extension_args)
def get_client_version(self):
"""returns the version of the client for this service
"""
return self.client_version
class KeystoneService(AbstractService):
"""wrappers keystone service endpoint
"""
def __init__(self, *kargs):
super(KeystoneService, self).__init__(*kargs)
def new_client(self, client_extension=None, *extension_args):
return self._extend(self.clazz(**self.base_args), client_extension,
*extension_args)
def get_client_version(self):
if self.client_version == 'v3_0':
return 'v3'
return self.client_version
class CinderService(AbstractService):
"""wrappers cinder service endpoint
"""
def __init__(self, *kargs):
super(CinderService, self).__init__(*kargs)
def get_client_version(self):
return self._chomp_version(self.client_version)
class NovaService(AbstractService):
"""wrappers nova service endpoint
"""
def __init__(self, *kargs):
super(NovaService, self).__init__(*kargs)
def get_client_version(self):
if re.search('v2', self.client_version) is not None:
return 'v1_1'
return self.client_version
class GlanceService(AbstractService):
"""wrappers glance service endpoint
"""
def __init__(self, *kargs):
super(GlanceService, self).__init__(*kargs)
def new_client(self, client_extension=None, *extension_args):
url = self.url
if not url.endswith('/'):
url += '/'
return (self.
_extend(self.
_patch(self.clazz(url, token=self.keystone.auth_token,
insecure=self.base_args['insecure'],
cacert=self.base_args['cacert'])),
client_extension, *extension_args))
def get_client_version(self):
return self._chomp_version(self.client_version)
class NeutronService(AbstractService):
"""wrappers neutron service endpoint
"""
def __init__(self, *kargs):
super(NeutronService, self).__init__(*kargs)
def new_client(self, client_extension=None, *extension_args):
return self._extend(self._patch(self.clazz(
username=self.base_args['username'],
tenant_name=self.base_args['tenant_name'],
password=self.base_args['password'],
auth_url=self.base_args['auth_url'],
endpoint_url=self.management_url,
insecure=self.base_args['insecure'],
token=self.keystone.auth_token,
ca_cert=self.base_args['cacert'])),
client_extension, *extension_args)
def get_client_version(self):
if self.client_version.startswith('v1'):
return 'v2_0'
return self.client_version
class ClientServiceCatalog(object):
"""provides a simple catalog of openstack services
for a single host and permits consumers to query
those services based on service types, versions
as well as create new python clients from the service
directly.
"""
def __init__(self, base_client_opts, keystone):
self.base_opts = base_client_opts
self.keystone = keystone
# validate authN
self.token = self.keystone.auth_token
self.host = urlparse.urlsplit(self.base_opts['auth_url']).hostname
self.endpoints = {}
self.blacklist = [str(SERVICE_TYPES.s3), str(SERVICE_TYPES.ec2),
str(SERVICE_TYPES.ttv)]
self._discover_services()
def new_client(self, svc_type, client_extension=None, *extension_args):
"""creates a new python client for the given service type
using the most recent version of the service in the catalog.
:param svc_type: the service type to create a client for
:param client_extension: the optional extension to decorate
the base client with
:param extension_args: optional arguments to pass to the client
extension when it is created.
"""
service_versions = self.get_services(svc_type)
if service_versions:
return service_versions[0].new_client(client_extension,
*extension_args)
return None
def get_services(self, svc_type, version_filter=None):
"""queries this catalogs services based on service type
and version filter.
:param svc_type: the type of service to query.
:param version_filter: a filter string to indicate the
service version the caller wants. if None the highest
version of the service is returned.
"""
if svc_type not in self.endpoints:
return None
versions = self.endpoints[svc_type]
if version_filter is None:
return versions[max(versions, key=str)]
for version in versions.keys():
if version.find(version_filter) > -1:
return versions[version]
return None
def get_versions(self, svc_type):
"""return a list of the versions for the given service type
:param svc_type: the type of service to query
"""
if svc_type not in self.endpoints:
return None
return self.endpoints[svc_type].keys()
def get_version(self, svc_type, version_filter=None):
"""query a service to determine if a given version exists.
:param svc_type: the service type to query.
:param version_filter: a string to search for in the version.
if None the most recent version of the service type is returned.
"""
if svc_type not in self.endpoints:
return None
for version in self.endpoints[svc_type].keys():
if not version_filter or version.find(version_filter) > -1:
return version
return None
def get_service_types(self):
"""returns a list of all service types in this catalog.
"""
return self.endpoints.keys()
def get_token(self):
"""returns a keystone token for the host this catalog
belongs to.
"""
return self.keystone.auth_token
def get_client(self, svc_type, version_filter=None, client_extension=None,
*extension_args):
"""creates a new python cient for the given service type
and version.
:param svc_type: the service type to create a client for.
:param version_filter: a string to search for in the version
the caller wants. if None the most recent version is used.
:param client_extension: the optional class to extend
the client with
"""
services = self.get_services(svc_type, version_filter)
if not services:
return None
return services[0].new_client(client_extension, *extension_args)
def _parse_link_href(self, links):
hrefs = []
for link_meta in links:
if link_meta['rel'] == 'self':
href = self._filter_host(link_meta['href'])
hrefs.append(href)
return hrefs
def _filter_host(self, loc):
# endpoint urls from base api query will often
# return localhost in the url; resolve those
return loc.replace('localhost',
self.host).replace('127.0.0.1',
self.host).replace('0.0.0.0',
self.host)
def _parse_version_meta(self, ver, ver_map={}):
ver_map[ver['id']] = self._parse_link_href(ver['links'])
return ver_map
def _parse_version(self, response_json, url):
if response_json is not None:
if 'version' in response_json:
return {response_json['version']['id']:
[self._filter_host(url)]}
elif 'versions' in response_json:
services = {}
versions = response_json['versions']
if 'values' in versions:
versions = versions['values']
for version_meta in versions:
if 'status' in version_meta and \
version_meta['status'] == 'CURRENT':
ver = version_meta['id']
if not ver in services:
services[ver] = []
services[ver].append(self._filter_host(url))
return services
return None
def _parse_version_from_url(self, url):
for seg in reversed(url.split('/')):
match = re.search('^(v[0-9][.]?[0-9]?$)', seg, re.IGNORECASE)
if match:
return match.group(0)
return None
def _build_wrappered_services(self, version_map, svc_type):
services = {}
for version in version_map.keys():
wrappers = []
for s_url in version_map[version]:
if svc_type == (str(SERVICE_TYPES.compute) or
svc_type == str(SERVICE_TYPES.computev3)):
wrappers.append(NovaService(svc_type, version,
s_url, self.base_opts,
self.keystone))
elif svc_type == str(SERVICE_TYPES.image):
wrappers.append(GlanceService(svc_type, version,
s_url, self.base_opts,
self.keystone))
elif svc_type == str(SERVICE_TYPES.identity):
# keystone is a special case as the auth url given
# in the base opts may not match the auth url from
# the catalog
keystone_opts = self.base_opts.copy()
keystone_opts['auth_url'] = s_url
wrappers.append(KeystoneService(svc_type, version,
s_url, keystone_opts,
self.keystone))
elif svc_type == str(SERVICE_TYPES.volume):
wrappers.append(CinderService(svc_type, version,
s_url, self.base_opts,
self.keystone))
elif svc_type == str(SERVICE_TYPES.network):
wrappers.append(NeutronService(svc_type, version,
s_url, self.base_opts,
self.keystone))
services[version] = wrappers
return services
def _query_endpoint(self, url):
# query the endpoint to get version info
client = netutils.JSONRESTClient(self.get_token())
urldata = urlparse.urlsplit(url)
host = urldata.scheme + '://' + urldata.netloc
segments = filter(lambda x: x != '', urldata.path.split('/'))
if not segments:
segments = ['']
# chomp uri until we find base of endpoint
for segment in segments[:] or ['']:
endpoint_url = "%s/%s/" % (host, '/'.join(segments))
segments.pop()
response = None
try:
response = client.get(endpoint_url)
except:
continue
versions = self._parse_version(response, url)
if versions is not None:
return versions
return {'v1': [url]}
def _build_endpoint_services(self, url, svc_type):
# try to parse from the url
ver = self._parse_version_from_url(url)
if ver is not None:
return self._build_wrappered_services({ver: [url]}, svc_type)
versions = self._query_endpoint(url)
return self._build_wrappered_services(versions, svc_type)
def _normalize_catalog_entry(self, entry):
for key in entry.keys():
if re.search('url', key, re.IGNORECASE):
entry[key] = self._filter_host(entry[key])
if self.keystone.version == 'v2.0':
# keystone v2.0 entries differ from v3; normalize
entry['url'] = entry['publicURL']
return entry
def _discover_services(self):
public_eps = (self.keystone.
service_catalog.get_endpoints(endpoint_type='publicURL'))
self.endpoints = {}
for svc_type in public_eps.keys():
if svc_type in self.blacklist:
continue
for entry in public_eps[svc_type]:
entry = self._normalize_catalog_entry(entry)
self.endpoints[svc_type] = \
self._build_endpoint_services(entry['url'], svc_type)

View File

@ -0,0 +1,107 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
"""Config file utility
"""
import constants
from oslo.config import cfg
CONF = cfg.CONF
def parse_power_config(argv, base_project, base_prog=None):
"""
Loads configuration information from powervc.conf as well as a project
specific file. Expectation is that all powervc config options will be in
the common powervc.conf file and the base_project will represent open stack
component configuration like nova.conf or cinder.conf. A base_prog file
name can be optionally specified as well. That is a specific file name to
use from the specified open stack component. This function should only be
called once, in the startup path of a component (probably as soon as
possible since many modules will have a dependency on the config options).
"""
# Ensure that we only try to load the config once. Loading it a second
# time will result in errors.
if hasattr(parse_power_config, 'power_config_loaded'):
return
if base_project and base_project.startswith('powervc-'):
default_files = cfg.find_config_files(project='powervc',
prog=base_project)
else:
default_files = cfg.find_config_files(project=base_project,
prog=(base_project
if base_prog is None
else base_prog))
default_files.extend(cfg.find_config_files(project='powervc',
prog='powervc'))
# reduce duplicates
default_files = list(set(default_files))
CONF(argv[1:], default_config_files=default_files)
parse_power_config.power_config_loaded = True
FILE_OPTIONS = {
'': [],
'openstack': [
# Keystone info
cfg.StrOpt('auth_url', default='http://localhost:5000/v2.0/'),
cfg.StrOpt('admin_user'),
cfg.StrOpt('admin_password', secret=True),
cfg.StrOpt('admin_tenant_name'),
cfg.StrOpt('connection_cacert', default=None),
cfg.BoolOpt('http_insecure', default=False),
cfg.StrOpt('keystone_version', default="v3"),
cfg.StrOpt('region_name', default=None),
# Hosting OS Qpid connection info
cfg.StrOpt('qpid_hostname'),
cfg.IntOpt('qpid_port', default=5672),
cfg.StrOpt('qpid_username', default='anonymous'),
cfg.StrOpt('qpid_password', secret=True, default=''),
cfg.StrOpt('qpid_protocol', default='tcp')],
'powervc': [
# Keystone info
cfg.StrOpt('auth_url', default='http://localhost:5000/v2.0/'),
cfg.StrOpt('admin_user'),
cfg.StrOpt('admin_password', secret=True),
cfg.StrOpt('admin_tenant_name'),
cfg.StrOpt('connection_cacert', default=None),
cfg.StrOpt('powervc_default_image_name',
default='PowerVC Default Image'),
cfg.BoolOpt('http_insecure', default=False),
cfg.StrOpt('keystone_version', default="v3"),
cfg.IntOpt('expiration_stale_duration', default=3600),
# Hosting OS Qpid connection info
cfg.StrOpt('qpid_hostname'),
cfg.IntOpt('qpid_port', default=5672),
cfg.StrOpt('qpid_username', default='anonymous'),
cfg.StrOpt('qpid_password', secret=True, default=''),
cfg.StrOpt('qpid_protocol', default='tcp'),
# manager
cfg.StrOpt('powervc_manager',
default='powervc.compute.manager.PowerVCCloudManager'),
# driver
cfg.StrOpt('powervc_driver',
default='powervc.virt.powervc.driver.PowerVCDriver'),
cfg.MultiStrOpt('storage_connectivity_group'),
# Hosting OS staging project name. This project must exist in the
# hosting OS
cfg.StrOpt('staging_project_name',
default=constants.DEFAULT_STAGING_PROJECT_NAME),
cfg.StrOpt('staging_user',
default=constants.DEFAULT_STAGING_USER_NAME)]
}
for section in FILE_OPTIONS:
for option in FILE_OPTIONS[section]:
if section:
CONF.register_opt(option, group=section)
else:
CONF.register_opt(option)

View File

@ -0,0 +1,82 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
"""
All Common PowerVC Driver Constants
"""
# The user domain default value
DEFAULT_USER_DOMAIN_NAME = 'Default'
# The project domain default value
DEFAULT_PROJECT_DOMAIN_NAME = 'Default'
# The default staging project name
DEFAULT_STAGING_PROJECT_NAME = 'Public'
# The default staging user name
DEFAULT_STAGING_USER_NAME = 'admin'
# The property key used to store a PowerVC resource UUID in
# a hosting OS resource.
POWERVC_UUID_KEY = 'powervc_uuid'
# The property key used to mark a powervc image with the
# corresponding powervc driver image uuid.
LOCAL_UUID_KEY = 'powervcdriver_uuid'
# OpenStack instance identifier
LOCAL_OS = 'local'
POWERVC_OS = 'powervc'
# AMQP topic for the commun. between nova and neutron
PVC_TOPIC = 'powervcrpc'
# Storage Type that SCG can access
SCG_SUPPORTED_STORAGE_TYPE = 'fc'
class ServiceType(object):
"""Wrappers service type to project codename.
"""
def __init__(self, svc_type, codename):
self.svc_type = svc_type
self.codename = codename
def __str__(self):
return self.svc_type
def to_codename(self):
"""Returns the codename of this service.
"""
return self.codename
class ServiceTypes(object):
"""The service types known to this infrastructure which can be
referenced using attr based notation.
"""
def __init__(self):
self.volume = ServiceType('volume', 'cinder')
self.compute = ServiceType('compute', 'nova')
self.network = ServiceType('network', 'neutron')
self.identity = ServiceType('identity', 'keystone')
self.computev3 = ServiceType('computev3', 'nova')
self.image = ServiceType('image', 'glance')
self.s3 = ServiceType('s3', 'nova')
self.ec2 = ServiceType('ec2', 'nova'),
self.ttv = ServiceType('ttv', 'ttv')
def __getitem__(self, name):
if name in self.__dict__:
return self.__dict__[name]
return None
SERVICE_TYPES = ServiceTypes()

View File

@ -0,0 +1,73 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
"""
PowerVC Driver Common Exceptions
"""
from powervc.common.gettextutils import _
_FATAL_EXCEPTION_FORMAT_ERRORS = False
class CommonException(Exception):
"""
PowerVC Driver Common Exception
To correctly use this class, inherit from it and define a 'message'
property. That message will get printed with the keyword arguments
provided to the constructor.
"""
message = _('An unknown exception occurred')
def __init__(self, message=None, *args, **kwargs):
if not message:
message = self.message
try:
message = message % kwargs
except Exception:
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise
else:
# at least get the core message out if something happened
pass
super(CommonException, self).__init__(message)
class StorageConnectivityGroupNotFound(CommonException):
"""
Exception thrown when the PowerVC Storage Connectivity Group specified
cannot be found.
:param scg: The PowerVC Storage Connectivity Group name or id
"""
message = _('The PowerVC Storage Connectivity Group \'%(scg)s\' was not '
'found.')
class StagingProjectNotFound(CommonException):
"""
Exception thrown when the staging project specified in the conf cannot be
found.
:param name: The name of the staging project which was not found.
"""
message = _('The staging project \'%(name)s\' was not found.')
class StagingUserNotFound(CommonException):
"""
Exception thrown when the staging user specified in the conf cannot be
found.
:param name: The name of the staging user which was not found.
"""
message = _('The staging user \'%(name)s\' was not found.')

View File

@ -0,0 +1,17 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import gettext
t = gettext.translation('powervc-driver-common', fallback=True)
def _(msg):
return t.ugettext(msg)

View File

@ -0,0 +1,499 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
"""
This module contains Qpid connection utilities that can be used to connect
to a Qpid message broker and listen for notifications.
Examples:
# Import common messaging module
from powervc.common import messaging
# Connect to host OS Qpid broker and handle instance update notifications.
conn = messaging.LocalConnection(
reconnect_handler=self.handle_qpid_reconnect)
listener = conn.create_listener('nova', 'notifications.info')
listener.register_handler('compute.instance.update',
self._handle_instance_update)
conn.start()
# Connect to PowerVC Qpid broker and handle two event types with a single
# handler function.
conn = messaging.PowerVCConnection()
listener = conn.create_listener('nova', 'notifications.info')
listener.register_handler(['compute.instance.create.start',
'compute.instance.create.end'],
self._handle_instance_create)
conn.start()
# Connect to PowerVC Qpid broker and handle any instance notifications.
conn = messaging.PowerVCConnection()
listener = conn.create_listener('nova', 'notifications.info')
listener.register_handler('compute.instance.*',
self._handle_instance_notifications)
conn.start()
"""
import sys
import threading
import traceback
import fnmatch
import json
from time import sleep
from qpid.messaging import Connection
from qpid.messaging.exceptions import ConnectionError
from oslo.config import cfg
from powervc.common.gettextutils import _
CONF = cfg.CONF
def log(log, level, msg):
"""
Log a message.
:param: log The log to write to.
:param: level The logging level for the message
:param: msg The message to log
"""
if not log:
return
if level == 'critical':
log.critical(msg)
elif level == 'error':
log.error(msg)
elif level == 'warn':
log.warn(msg)
elif level == 'info':
log.info(msg)
elif level == 'debug':
log.debug(msg)
class QpidConnection(object):
"""
This class represents a connection to a Qpid broker. A QpidConnection must
be created in order to send or receive AMQP messages using a Qpid broker.
"""
def __init__(self, url, username, password, transport='tcp',
reconnection_interval=60, reconnect_handler=None,
context=None, log=None):
"""
Create a new connection to a Qpid message broker in order to send or
receive AMQP messages.
:param: url URL for the Qpid connection, e.g. 9.10.49.164:5672
:param: username Qpid username
:param: password Qpid password
:param: transport Transport mechanism, one of tcp, tcp+tls,
or ssl (alias for tcp+tls).
:param: reconnection_interval Interval in seconds between reconnect
attempts.
:param: reconnect_handler The function to call upon reconnecting to
the Qpid broker after connection was lost and
then reestablished. This function will be called after the
connections is reestablished but before the listeners are
started up again. It is not passed any parameters.
:param: context The security context
:param: log The logging module used for logging messages. If not
provided then no logging will be done.
"""
self.url = url
self.username = username
self.password = password
self.context = context
self.log = log.getLogger(__name__) if log else None
self.transport = transport
self.reconnection_interval = reconnection_interval
self.reconnect_handler = reconnect_handler
self._listeners = []
self._is_connected = False
def create_listener(self, exchange, topic):
"""
Create a new listener on the given exchange for the given topic.
:param: exchange The name of the Qpid exchange, e.g. 'nova'
:param: topic The topic to listen for, e.g. 'notifications.info'
:returns: A new QpidListener that will listen for messages on the
given exchange and topic.
"""
listener = QpidListener(self, exchange, topic)
self._listeners.append(listener)
return listener
def start(self, is_reconnect=False):
"""
Initiate the Qpid connection and start up any listeners.
:param: is_reconnect True if this method is called as part of a
reconnect attempt, False otherwise
:raise: ConnectionError if a connection cannot be established
"""
# If the Qpid broker URL is not specified (or just the hostname is not
# specified) then we can't make a connection.
if not self.url or self.url.startswith(':'):
log(self.log, 'warn', _('Qpid broker not specified, cannot start '
'connection.'))
return
if not self._is_connected:
self.conn = Connection(self.url, username=self.username,
password=self.password,
transport=self.transport)
try:
self.conn.open()
except ConnectionError as e:
log(self.log, 'critical', _('Cannot connect to Qpid message '
'broker: %s') % (e.message))
# close this connection when encounter connection error
# otherwise, it will leave an ESTABLISHED connection
# to qpid server forever.
if self.conn is not None:
self.conn.close()
raise e
self._is_connected = True
if is_reconnect and self.reconnect_handler:
self.reconnect_handler()
for listener in self._listeners:
listener._start(self.conn)
log(self.log, 'info', _('Connected to Qpid message broker: '
'%s@%s') % (self.username, self.url))
def _reconnect(self):
"""
Attempt to reconnect to the Qpid message broker in intervals until the
connection comes back.
"""
self.conn = None
class ReconnectionThread(threading.Thread):
def __init__(self, qpid_connection):
super(ReconnectionThread, self).__init__(
name='ReconnectionThread')
self.qpid_connection = qpid_connection
def run(self):
while not self.qpid_connection._is_connected:
try:
self.qpid_connection.start(is_reconnect=True)
except ConnectionError:
sleep(self.qpid_connection.reconnection_interval)
pass
reconnection_thread = ReconnectionThread(self)
reconnection_thread.start()
def set_reconnect_handler(self, reconnect_handler):
"""
Set the function to call upon reconnecting to the Qpid broker after
connection is lost and then reestablished.
:param: reconnect_handler The function to call upon reconnecting.
"""
self.reconnect_handler = reconnect_handler
class PowerVCConnection(QpidConnection):
"""
This class represents a connection to the PowerVC Qpid broker as defined
in the configuration property files.
"""
def __init__(self, reconnect_handler=None, context=None, log=None):
"""
Create a new connection to the PowerVC Qpid message broker in order
to send or receive AMQP messages.
:param: reconnect_handler The function to call upon reconnecting to
the Qpid broker after connection was lost and
then reestablished. This function will be called after the
connection is reestablished but before the listeners are
started up again. It is not passed any parameters.
:param: context The security context
:param: log The logging module used for logging messages. If not
provided then no logging will be done.
"""
if CONF.powervc.qpid_protocol == 'ssl':
transport = 'ssl'
else:
transport = 'tcp'
super(PowerVCConnection,
self).__init__('%s:%d' % (CONF.powervc.qpid_hostname,
CONF.powervc.qpid_port),
CONF.powervc.qpid_username,
CONF.powervc.qpid_password,
reconnect_handler=reconnect_handler,
context=context, log=log,
transport=transport)
class LocalConnection(QpidConnection):
"""
This class represents a connection to the local OS Qpid broker as defined
in the configuration property files.
"""
def __init__(self, reconnect_handler=None, context=None, log=None):
"""
Create a new connection to the local OS Qpid message broker in order
to send or receive AMQP messages.
:param: reconnect_handler The function to call upon reconnecting to
the Qpid broker after connection was lost and
then reestablished. This function will be called after the
connection is reestablished but before the listeners are
started up again. It is not passed any parameters.
:param: context The security context
:param: log The logging module used for logging messages. If not
provided then no logging will be done.
"""
if CONF.openstack.qpid_protocol == 'ssl':
transport = 'ssl'
else:
transport = 'tcp'
super(LocalConnection,
self).__init__('%s:%d' % (CONF.openstack.qpid_hostname,
CONF.openstack.qpid_port),
CONF.openstack.qpid_username,
CONF.openstack.qpid_password,
reconnect_handler=reconnect_handler,
context=context, log=log,
transport=transport)
class QpidListener(object):
'''
This class is used to listen for AMQP message notifications. It should
probably not be instantiated directly. First create a QpidConnection and
then add a QpidListener to the connection using the
QpidConnection.create_listener() method.
'''
def __init__(self, qpid_connection, exchange, topic):
"""
Create a new QpidListener object to listen for AMQP messages.
:param: qpid_connection The QpidConnection object used for connecting
to the Qpid message broker.
:param: exchange The name of the Qpid exchange, e.g. 'nova'
:param: topic The topic to listen for, e.g. 'notifications.info'
"""
self.qpid_connection = qpid_connection
self.exchange = exchange
self.topic = topic
self._handler_map = {}
self._count_since_acknowledge = 0
def register_handler(self, event_type, handler):
"""
Register a handler function for one or more message notification event
types. The handler function will be called when a message is
received that matches the event type. The handler function will be
passed two arguments: the security context and a dictionary containing
the message attributes. The message attributes include: event_type,
timestamp, message_id, priority, publisher_id, payload.
The following wildcards are allowed when registering an event type
handler (see the documentation for fnmatch):
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any character not in seq
For example, registering the following event type handler will cause
the handler function to be called for any event type starting with
'compute.instance.'.
listener = conn.register_handler('compute.instance.*',
self.handle_instance_messages)
If a single notification event type matches multiple registered
handlers, each matching handler will be called. The order in which the
handlers are called is not guaranteed. If the execution order is
important for the multiple handlers of a single event type then ensure
that only a single handler will be called for the event type and
perform the multiple operations in the single handler.
:param: event_type The event type or list of event types to associate
with the handler
:param: handler The handler function to handle a message with the given
event type
"""
if not isinstance(event_type, list):
event_type = [event_type]
for et in event_type:
self._handler_map[et] = handler
def unregister_handler(self, event_type):
"""
Stop handling the given message notification event type.
:param: event_type The event type to unregister
"""
try:
self._handler_map.pop(event_type)
except KeyError:
log(self.qpid_connection.log, 'warn',
_('There is no handler for this event type: %s') % event_type)
def _start(self, connection):
"""
Start listening for messages. This method should probably not be called
directly. After creating a QpidConnection and adding listeners using
the create_listener() method, use the QpidConnection.start() method to
start listening for messages. The QpidConnection will start up all of
the listeners.
:param: connection The qpid.messaging.endpoints.Connection object used
to establish the connection to the message broker.
"""
self.session = connection.session('%s/%s' %
(self.exchange, self.topic))
addr_opts = {
"create": "always",
"node": {
"type": "topic",
"x-declare": {
"durable": True,
"auto-delete": True
},
},
}
connection_info = "%s / %s ; %s" % (self.exchange, self.topic,
json.dumps(addr_opts))
self.receiver = self.session.receiver(connection_info)
log(self.qpid_connection.log, 'debug',
_('QpidListener session info: %s') % (json.dumps(connection_info)))
"""
A listener blocks while it waits for the next message on the queue,
so we initiate a thread to run the listening function.
"""
t = threading.Thread(target=self._listen)
t.start()
def _has_more_messages(self):
'''
Determine if there are any new messages in the queue.
:returns: True if there are messages on the queue, False otherwise
'''
return bool(self.receiver)
def _next_message(self):
'''
Wait for the next message on the queue.
:returns: The raw message object from the message queue
'''
return self.receiver.fetch()
def _acknowledge(self):
'''
Acknowledge a message has been received.
'''
self.session.acknowledge(sync=False)
def _get_handlers(self, event_type):
"""
Get a list of all the registered handlers that match the given event
type.
"""
handlers = []
for event_type_pattern in self._handler_map:
if fnmatch.fnmatch(event_type, event_type_pattern):
handlers.append(self._handler_map.get(event_type_pattern))
return handlers
def _dispatch(self, message):
'''
Dispatch a message to its specific handler.
:param: message A dictionary containing the OpenStack message
notification attributes (event_type, timestamp,
message_id, priority, publisher_id, payload)
'''
event_type = message.get('event_type')
handlers = self._get_handlers(event_type)
log_ = self.qpid_connection.log
self._count_since_acknowledge += 1
try:
if handlers:
log(log_, 'debug', _('Dispatching message to handlers'))
log(log_, 'info', _('Qpid listener received '
'message of event type: %s'
% message['event_type']))
for handler in handlers:
handler(self.qpid_connection.context, message)
except Exception, e:
log(log_, 'error', _('Error handling message: %s: %s. Message: '
'%s.') % (Exception, e, message))
# Print stack trace
exc_type, exc_value, exc_traceback = sys.exc_info()
log(log_, 'error', _('error type %s') % (exc_type))
log(log_, 'error', _('error object %s') % (exc_value))
log(log_, 'error', ''.join(traceback.format_tb(exc_traceback)))
finally:
if self._count_since_acknowledge > 100:
self._count_since_acknowledge = 0
self._acknowledge()
def _resolve_message(self, raw_message):
'''
Resolves the given raw message obtained from the Qpid message queue
into a message that can be dispatched to a handler function.
:param: raw_message A raw message obtained from the Qpid message
queue
:returns: A dictionary containing the following keys:
event_type, timestamp, message_id, priority, publisher_id, payload
'''
content = raw_message.content
message = dict()
for attr in ['event_type', 'timestamp', 'message_id', 'priority',
'publisher_id', 'payload']:
message[attr] = content.get(attr)
log(self.qpid_connection.log, 'debug', _('Qpid listener received '
'message: %s') % (message))
return message
def _listen(self):
'''
Handle messages when they arrive on the message queue.
'''
while True:
try:
if self._has_more_messages():
raw_message = self._next_message()
message = self._resolve_message(raw_message)
self._dispatch(message)
else:
break
except ConnectionError, e:
log(self.qpid_connection.log, 'warning',
_("Connection error: %s") % (e))
self.qpid_connection._is_connected = False
self.qpid_connection._reconnect()
break

View File

@ -0,0 +1,115 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import json
import socket
import urllib2
import urlparse
def is_ipv4_address(ip_or_host):
"""Determines if a netloc is an IPv4 address.
:param ip_or_host: the host/ip to check
"""
try:
socket.inet_aton(ip_or_host)
return True
except:
return False
def hostname_url(url):
"""Converts the URL into its FQHN form.
This requires DNS to be setup on the OS or the hosts table
to be updated.
:param url: the url to convert to FQHN form
"""
frags = urlparse.urlsplit(url)
if is_ipv4_address(frags.hostname) is True:
return url
try:
fqhn, alist, ip = socket.gethostbyaddr(frags.hostname)
except:
# likely no DNS configured, return inital url
return url
port_str = ''
if frags.port is not None:
port_str = ':' + str(frags.port)
return frags.scheme + '://' + fqhn + port_str + frags.path
def extract_url_segment(url, needles):
"""searches the url segments for the 1st occurence
of an element in the list of search keys.
:param url: the url or uri to search
:param needles: the keys to search for
"""
for seg in reversed(url.split('/')):
if seg in needles:
return seg
return None
class JSONRESTClient(object):
"""a simple json rest client
"""
def __init__(self, token):
self.token = token
def get(self, url):
"""perform a http GET on the url
:param url: the url to GET
"""
return self._rest_call(url)
def post(self, url, json_body):
"""perform a http POST on the url
:param url: the url to POST
:param json_body: the body to POST
"""
return self._rest_call(url, 'POST', json_body)
def put(self, url, json_body):
"""perform a http PUT on the url
:param url: the url to PUT
:param json_body: the body to PUT
"""
return self._rest_call(url, 'PUT', json_body)
def delete(self, url):
"""perform an http DELETE on the url
:param url: the url to DELETE
"""
return self._rest_call(url, 'DELETE')
def _rest_call(self, url, method='GET', json_body=None):
request = urllib2.Request(url)
request.add_header('Content-Type', 'application/json;charset=utf8')
request.add_header('Accept', 'application/json')
request.add_header('User-Agent', 'python-client')
if self.token:
request.add_header('X-Auth-Token', self.token)
if json_body:
request.add_data(json.dumps(json_body))
request.get_method = lambda: method
try:
response = urllib2.urlopen(request)
except urllib2.HTTPError as e:
if e.code == 300:
return json.loads(e.read())
raise e
return json.loads(response.read())

View File

@ -0,0 +1,829 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import logging
import exception
import os
import sys
import threading
import time
from eventlet.semaphore import Semaphore
from glanceclient.openstack.common import importutils
from powervc.common import config
from powervc.common import constants
from powervc.common.exception import StorageConnectivityGroupNotFound
from powervc.common.gettextutils import _
LOG = logging.getLogger(__name__)
CONF = config.CONF
DEFAULT_TTL = 600
class TimeLivedCache(object):
"""
The base class to provide the functionality of a timed cache.
The default refresh time is 10 mins.
"""
def __init__(self, ttl=DEFAULT_TTL):
self._cache = {}
self._last_updated = -1
self._lock = threading.Lock()
self.ttl = ttl
def _cache_resources(self):
"""
Refreshes the cached values if the cached time has expired,
or if there are no cached values.
"""
now = round(time.time())
if now - self._last_updated < self.ttl and len(self._cache) != 0:
return
with self._lock:
if now - self._last_updated < self.ttl:
return
self._cache = self._get_cache()
LOG.debug(_("Updated %s at %s. Last update: %s") %
(str(self), now, self._last_updated))
self._last_updated = now
def _get_cache(self):
tmp_cache = {}
resources = self._get_resources()
if resources:
for resource in resources:
tmp_cache[self._id_for_resource(resource)] = resource
return tmp_cache
def list(self):
"""
Returns the cached values
"""
self._cache_resources()
return self._cache.values()
def _id_for_resource(self, resource):
raise NotImplementedError()
def _get_resources(self):
raise NotImplementedError()
class GreenTimeLivedCache(TimeLivedCache):
"""
Extend the TimeLivedCache to use green thread.
"""
def __init__(self, ttl=DEFAULT_TTL):
super(GreenTimeLivedCache, self).__init__(ttl)
# Replace with the semaphore.
self._lock = Semaphore()
class VolumeCache(GreenTimeLivedCache):
"""
Caches the volumes
"""
def __init__(self, driver, ttl=DEFAULT_TTL):
assert driver
self._driver = driver
super(VolumeCache, self).__init__(ttl)
def _get_resources(self):
return self._driver.cache_volume_data()
def _get_cache(self):
return self._get_resources()
def set_by_id(self, pvc_id, local_id):
with self._lock:
self._cache[pvc_id] = local_id
def get_by_id(self, pvc_id, default=None):
self._cache_resources()
if (len(self._cache) != 0):
if pvc_id in self._cache:
LOG.info(_("Found volume id equals: '%s'" % pvc_id))
return self._cache[pvc_id]
LOG.info(_("No volume found which equals: '%s'" % pvc_id))
return default
class SCGCache(GreenTimeLivedCache):
"""
Caches the SCGs.
"""
def __init__(self, nova, ttl=DEFAULT_TTL):
assert nova
self._nova = nova
super(SCGCache, self).__init__(ttl)
def __str__(self):
return _("Storage Connectivity Group Cache")
def _id_for_resource(self, resource):
return resource.display_name
def _get_resources(self):
"""
Calls the api to get all SCGs
"""
return self._nova.storage_connectivity_groups.list(detailed=True)
def by_name(self, name, default=None):
"""
Returns the SCG by name
"""
self._cache_resources()
if (len(self._cache) != 0):
if name in self._cache:
LOG.info(_("Found scg which name equals: '%s'" % name))
return self._cache[name]
LOG.info(_("No scg found which equals name: '%s'" % name))
return default
def by_id(self, scg_id, default=None):
"""
Returns the SCG by id
"""
self._cache_resources()
if (len(self._cache) != 0):
for scg in self.list():
if scg.id == scg_id:
LOG.info(_("Found scg which equals id: '%s'" % scg_id))
return scg
LOG.info(_("No scg found which equals id: '%s'" % scg_id))
return default
__lock = threading.Lock()
__utils = None
def get_utils():
"""
Returns a singleton Utils object
"""
global __lock
global __utils
if __utils is not None:
return __utils
with __lock:
if __utils is not None:
return __utils
__utils = Utils()
return __utils
class Utils(object):
"""
This Utils class leverages the pvcnovaclient and pvccinderclient
to retrieve the Storage Connectivity Group, Storage Providers and
Storage Templates information, etc.
Usage sample:
username = 'root'
password = 'passw0rd'
tenant = 'ibm-default'
auth_url = 'https://z3-9-5-127-193.rch.nimbus.kstart.ibm.com/\
powervc/openstack/admin/v3'
cacert = '/home/osadmin/z3-9-5-127-193.rch.nimbus.kstart.ibm.com'
utils = utils.Utils(username=username,
api_key=password,
project_id=tenant,
auth_url=auth_url,
insecure=False,
cacert=cacert)
sps = utils.get_scg_accessible_storage_providers()
sts = utils.get_scg_accessible_storage_templates()
volumes = utils.get_scg_accessible_volumes()
"""
def __init__(self):
factory = importutils.import_module('powervc.common.client.factory')
self._novaclient = factory.POWERVC.new_client(
str(constants.SERVICE_TYPES.compute))
self._cinderclient = factory.POWERVC.new_client(
str(constants.SERVICE_TYPES.volume))
self._localkeystoneclient = factory.LOCAL.new_client(
str(constants.SERVICE_TYPES.identity))
self.scg_cache = self.get_scg_cache(self._novaclient)
def get_scg_cache(self, novaclient):
"""
Return the SCGCache object.
"""
return SCGCache(novaclient)
def get_all_scgs(self):
"""
Get all Storage Connectivity Groups from PowerVC
:returns: A list of all Storage Connectivity Groups on PowerVC
"""
return self.scg_cache.list()
def get_our_scg_list(self):
"""
If SCG names are specified in our configuration, see if the scgs exist.
If they do not exist, raise an exception. If they exist, return the scg
list for the name specified. If no SCG name is specified, return
[] for the scg list.
:returns: The StorageConnectivityGroup object list if found, else []
:raise StorageConnectivityGroupNotFound: if the Storage Connectivity
Groups could not be found on PowerVC
"""
our_scg_list = []
scg_to_use_list = CONF['powervc'].storage_connectivity_group
for scg_to_use in scg_to_use_list:
if scg_to_use:
scg = self.scg_cache.by_name(scg_to_use)
if scg is not None:
LOG.debug(_('PowerVC Storage Connectivity Group \'%s\' '
'found.'), scg.display_name)
our_scg = scg
our_scg_list.append(our_scg)
else:
# If a SCG is specified and it's not found on the PowerVC,
# raise an exception.
LOG.error(_('The PowerVC Storage Connectivity Group'
' \'%s\' was not found.'), scg_to_use)
raise StorageConnectivityGroupNotFound(scg=scg_to_use)
else:
LOG.error(_('No Storage Connectivity Group is specified in '
'the configuration settings.'))
return our_scg_list
def validate_scgs(self):
"""
Validate the SCG name specified in the configuration,
Return validated SCG list if successful
Return [] if SCGs are not specified in the configuration file OR
SCG specified is not found in PowerVC.
"""
validated_scgs = []
try:
validated_scgs = self.get_our_scg_list()
except StorageConnectivityGroupNotFound:
return []
return validated_scgs
def get_scg_by_scgName(self, scg_name):
"""
Get the SCG by scgName
"""
return self.scg_cache.by_name(scg_name)
def get_scg_by_scgUUID(self, scg_uuid):
"""
Get the SCG by uuid
"""
return self.scg_cache.by_id(scg_uuid)
def get_scg_id_by_scgName(self, scg_name):
"""
Get the SCG_ID by scg_name
"""
if scg_name == "":
return ""
# If no scg_name is found, None is returned.
scg = self.get_scg_by_scgName(scg_name)
if scg is not None:
return scg.id
return ""
def get_multi_scg_accessible_servers(self, scg_uuid_list, scg_name_list,
detailed=True, search_opts=None):
"""
Get accessible virtual servers by specified SCG UUID list
or SCG Name list,
If both SCG UUID and SCG Name are specified specified, UUID is prior,
If none of SCG UUID and Name specified, get all servers
"""
class WrapServer():
def __init__(self, server):
self.server = server
def __eq__(self, other):
if isinstance(other, WrapServer):
return self.server.id == other.server.id
else:
return False
def __hash__(self):
return hash(self.server.id)
wrap_servers = set()
if scg_uuid_list:
for scg_uuid in scg_uuid_list:
scg_servers = self.get_scg_accessible_servers(scg_uuid,
None,
detailed,
search_opts)
wrap_scg_servers = [WrapServer(scg_server)
for scg_server in scg_servers]
wrap_servers.update(wrap_scg_servers)
return [wrap_server.server for wrap_server in wrap_servers]
if not scg_name_list:
scg_name_list = CONF.powervc.storage_connectivity_group
if scg_name_list:
for scg_name in scg_name_list:
scg_servers = self.get_scg_accessible_servers(None,
scg_name,
detailed,
search_opts)
wrap_scg_servers = [WrapServer(scg_server)
for scg_server in scg_servers]
wrap_servers.update(wrap_scg_servers)
return [wrap_server.server for wrap_server in wrap_servers]
def get_scg_accessible_servers(self, scgUUID=None, scgName=None,
detailed=True, search_opts=None):
"""
Get accessible virtual servers by specified SCG UUID or scgName,
If both SCG UUID and SCG Name are specified specified, UUID is prior,
If none of SCG UUID and Name specified, get all servers
"""
scg = None
# If no scgUUID specified.
if not scgUUID:
if scgName:
# If scgName specified, then search by scgName
scg = self.get_scg_by_scgName(scgName)
else:
# If scgName not specified, return None
scg = None
else:
LOG.debug("Specified scgUUID: '%s'" % scgUUID)
# retrieve scg by scgUUID
scg = self.scg_cache.by_id(scgUUID)
if not scg:
# If no scg, then it's a IVM based PowerVC,
# return all servers
return self._novaclient.manager.list_all_servers(detailed, search_opts)
# accessible_storage_servers to return
accessible_storage_servers = []
all_servers = self._novaclient.manager.list_all_servers(detailed, search_opts)
# Filter the servers for the SCG
for server in all_servers:
server_scg = getattr(server, 'storage_connectivity_group_id', None)
if server_scg and server_scg == scg.id:
accessible_storage_servers.append(server)
elif server_scg is None:
# onboarding VMs
accessible_storage_servers.append(server)
LOG.info("All accessible_storage_servers: %s" %
accessible_storage_servers)
return accessible_storage_servers
def get_multi_scg_accessible_storage_providers(self,
scg_uuid_list,
scg_name_list,
detailed=True,
search_opts=None):
"""
Get accessible storage providers by specified SCG UUID list
or SCG Name list,
If both SCG UUID and SCG Name are specified specified, UUID is prior,
"""
class WrapProvider():
def __init__(self, provider):
self.provider = provider
def __eq__(self, other):
if isinstance(other, WrapProvider):
return self.provider.id == other.provider.id
else:
return False
def __hash__(self):
return hash(self.provider.id)
wrap_providers = set()
if scg_uuid_list:
for scg_uuid in scg_uuid_list:
scg_providers = self.get_scg_accessible_storage_providers(
scg_uuid, None, detailed, search_opts)
wrap_scg_providers = [WrapProvider(scg_provider)
for scg_provider in scg_providers]
wrap_providers.update(wrap_scg_providers)
return [wrap_provider.provider for wrap_provider in wrap_providers]
if not scg_name_list:
scg_name_list = CONF.powervc.storage_connectivity_group
if scg_name_list:
for scg_name in scg_name_list:
scg_providers = self.get_scg_accessible_storage_providers(
None, scg_name, detailed, search_opts)
wrap_scg_providers = [WrapProvider(scg_provider)
for scg_provider in scg_providers]
wrap_providers.update(wrap_scg_providers)
return [wrap_provider.provider for wrap_provider in wrap_providers]
def get_scg_accessible_storage_providers(self, scgUUID=None, scgName=None,
detailed=True, search_opts=None):
"""
Get accessible storage providers by specified SCG UUID or scgName,
If both SCG UUID and SCG Name are specified specified, UUID is prior,
If none of SCG UUID and Name specified, get the first SCG from powerVC
"""
scg = None
# If no scgUUID specified.
if not scgUUID:
if scgName:
# If scgName specified, then search by scgName
scg = self.get_scg_by_scgName(scgName)
else:
# If scgName not specified, return None
scg = None
else:
LOG.debug(_("Specified scgUUID: '%s'" % scgUUID))
# retrieve scg by scgUUID
scg = self.scg_cache.by_id(scgUUID)
if not scg:
# If no scg, then it's a IVM based PowerVC,
# return all storage providers
return (self._cinderclient.storage_providers.
list_all_providers(detailed, search_opts))
# accessible_storage_providers to return
accessible_storage_providers = []
# retrieve fc_storage_access
fc_storage_access = getattr(scg, 'fc_storage_access', False) or False
LOG.info(_("scg['fc_storage_access']: '%s'" % fc_storage_access))
# retrieve provider_id in vios_cluster
provider_id = None
vios_cluster = getattr(scg, 'vios_cluster', {})
if vios_cluster:
provider_id = vios_cluster.get('provider_id', '')
LOG.info(_("scg['vios_cluster']['provider_id']: '%s'" %
(provider_id)))
# retrieve all the storage-providers
storage_providers = (self._cinderclient.storage_providers.
list_all_providers(detailed, search_opts))
LOG.info(_("storage_providers: %s" % storage_providers))
# Loop over the storage providers, if the 'storage_hostname' matches
# SCG['vios_cluster']['provider_id'], or if SCG['fc_storage_access']
# is "True" AND the provider's storage_type is "fc", then add to list
for storage_provider in storage_providers:
storage_hostname = getattr(storage_provider,
'storage_hostname', '')
storage_type = getattr(storage_provider,
'storage_type', '')
LOG.info(_("storage_provider['storage_hostname']: '%s'" %
(storage_hostname)))
if storage_hostname and storage_hostname == provider_id:
LOG.info(_("Add to accessible_storage_providers: %s" %
(storage_provider)))
accessible_storage_providers.append(storage_provider)
elif fc_storage_access and (constants.SCG_SUPPORTED_STORAGE_TYPE ==
storage_type):
LOG.info(_("Add to accessible_storage_providers: %s" %
(storage_provider)))
accessible_storage_providers.append(storage_provider)
# TODO as currently provider_id and storage_type are not
# implemented
else:
accessible_storage_providers.append(storage_provider)
LOG.info(_("All accessible_storage_providers: %s" %
(accessible_storage_providers)))
return accessible_storage_providers
def get_multi_scg_accessible_storage_templates(self,
scg_uuid_list,
scg_name_list):
"""
Get accessible storage templates by specified SCG UUID list
or SCG Name list,
If both SCG UUID and SCG Name are specified specified, UUID is prior,
"""
class WrapType():
def __init__(self, volume_type):
self.type = volume_type
def __eq__(self, other):
if isinstance(other, WrapType):
return self.type.id == other.type.id
else:
return False
def __hash__(self):
return hash(self.type.id)
wrap_types = set()
if scg_uuid_list:
for scg_uuid in scg_uuid_list:
scg_types = self.get_scg_accessible_storage_templates(
scg_uuid, None)
wrap_scg_types = [WrapType(scg_type) for scg_type in scg_types]
wrap_types.update(wrap_scg_types)
return [wrap_type.type for wrap_type in wrap_types]
if not scg_name_list:
scg_name_list = CONF.powervc.storage_connectivity_group
if scg_name_list:
for scg_name in scg_name_list:
scg_types = self.get_scg_accessible_storage_templates(
None, scg_name)
wrap_scg_types = [WrapType(scg_type) for scg_type in scg_types]
wrap_types.update(wrap_scg_types)
return [wrap_type.type for wrap_type in wrap_types]
def get_scg_accessible_storage_templates(self, scgUUID=None, scgName=None):
"""
Get accessible storage templates by specified SCG UUID or scgName,
If both SCG UUID and SCG Name are specified specified, UUID is prior,
If none of SCG UUID and Name specified, get the first SCG from powerVC
"""
scg = None
# If no scgUUID specified.
if not scgUUID:
if scgName:
# If scgName specified, then search by scgName
scg = self.get_scg_by_scgName(scgName)
else:
# If scgName not specified, get the SCG from the value
# configured in powervc.conf
scg = self.get_configured_scg()
else:
LOG.debug(_("Specified scgUUID: '%s'" % scgUUID))
# retrieve scg by scgUUID
scg = self.scg_cache.by_id(scgUUID)
if not scg:
# If no scg, then it's a IVM based PowerVC,
# return all volumes
return (self._cinderclient.volume_types.
list_all_storage_templates())
# accessible_storage_templates to return
accessible_storage_templates = []
#filter out all the accessible storage template uuid
volume_types = scg.list_all_volume_types()
volume_type_ids = []
for vol_type in volume_types:
volume_type_ids.append(vol_type.__dict__.get("id"))
all_volume_types = \
self._cinderclient.volume_types.list_all_storage_templates()
for storage_template in all_volume_types:
if(storage_template.__dict__.get("id") in volume_type_ids):
accessible_storage_templates.append(storage_template)
LOG.info(_('accessible_storage_templates: %s' %
(accessible_storage_templates)))
return accessible_storage_templates
def get_multi_scg_accessible_volumes(self,
scg_uuid_list,
scg_name_list,
detailed=True,
search_opts=None):
"""
Get accessible storage providers by specified SCG UUID list
or SCG Name list,
If both SCG UUID and SCG Name are specified specified, UUID is prior,
"""
class WrapVolume():
def __init__(self, volume):
self.volume = volume
def __eq__(self, other):
if isinstance(other, WrapVolume):
return self.volume.id == other.volume.id
else:
return False
def __hash__(self):
return hash(self.volume.id)
wrap_volumes = set()
if scg_uuid_list:
for scg_uuid in scg_uuid_list:
scg_volumes = self.get_scg_accessible_volumes(scg_uuid,
None,
detailed,
search_opts)
wrap_scg_volumes = [WrapVolume(scg_volume)
for scg_volume in scg_volumes]
wrap_volumes.update(wrap_scg_volumes)
return [wrap_volume.volume for wrap_volume in wrap_volumes]
if not scg_name_list:
scg_name_list = CONF.powervc.storage_connectivity_group
if scg_name_list:
for scg_name in scg_name_list:
scg_volumes = self.get_scg_accessible_volumes(None,
scg_name,
detailed,
search_opts)
wrap_scg_volumes = [WrapVolume(scg_volume)
for scg_volume in scg_volumes]
wrap_volumes.update(wrap_scg_volumes)
return [wrap_volume.volume for wrap_volume in wrap_volumes]
def get_scg_accessible_volumes(self, scgUUID=None, scgName=None,
detailed=True, search_opts=None):
"""
Get SCG accessible volumes providers by specified SCG UUID or scgName,
If both SCG UUID and SCG Name are specified specified, UUID is prior,
If none of SCG UUID and Name specified, get the first SCG from powerVC
"""
scg = None
# If no scgUUID specified.
if not scgUUID:
if scgName:
# If scgName specified, then search by scgName
scg = self.get_scg_by_scgName(scgName)
else:
# If scgName not specified, get the SCG from the value
# configured in powervc.conf
scg = self.get_configured_scg()
else:
LOG.debug(_("Specified scgUUID: '%s'" % scgUUID))
# retrieve scg by scgUUID
scg = self.scg_cache.by_id(scgUUID)
if not scg:
# If no scg, then it's a IVM based PowerVC,
# return all volumes
return (self._cinderclient.volumes.list_all_volumes())
# accessible_storage_volumes to return
accessible_storage_volumes = []
volumes = scg.list_all_volumes()
volume_ids = []
for vol in volumes:
volume_ids.append(vol.__dict__.get("id"))
all_volumes = \
self._cinderclient.volumes.list_all_volumes(detailed, search_opts)
for storage_volume in all_volumes:
if(storage_volume.__dict__.get("id") in volume_ids):
metadata = storage_volume.__dict__.get("metadata")
if(metadata is not None):
is_boot_volume = metadata.get("is_boot_volume")
# Filter out the boot volumes
if(is_boot_volume != "True"):
accessible_storage_volumes.append(storage_volume)
else:
accessible_storage_volumes.append(storage_volume)
LOG.info(_('accessible_storage_volumes: %s' % (
accessible_storage_volumes)))
return accessible_storage_volumes
def get_image_scgs(self, imageUUID):
"""
Get the Storage Connectivity Groups for the specified image.
:param: imageUUID The UUID of the image
:returns: The Storage Connectivity Groups for the specified image or an
empty list if none are found.
"""
if imageUUID is not None:
return self._novaclient.storage_connectivity_groups.list_for_image(
imageUUID)
else:
return []
def get_scg_image_ids(self, scgUUID):
"""
Get the SCGImage identifiers for the specified Storage Connectivity
Group.
:param: scgUUID The UUID of the StorageConnectvityGroup
:returns: The list of SCGImage identifiers for the specified Storage
Connectivity Group or an empty list if none are found.
"""
if scgUUID is not None:
return self._novaclient.scg_images.list_ids(scgUUID)
else:
return []
def get_local_staging_project_id(self):
"""
Get the local hosting OS staging project Id. If a staging
project name is not found, a exception.StagingProjectNotFound
exception will be raised. If no staging project is specified in
the conf, the default value will be used as specified in constants.
:returns: The local hosting OS staging project Id
"""
ks_client = self._localkeystoneclient
stagingname = CONF.powervc.staging_project_name or \
constants.DEFAULT_STAGING_PROJECT_NAME
try:
for tenant in ks_client.tenants.list():
projectname = tenant.name
projectid = tenant.id
if projectname == stagingname:
LOG.debug(_('The staging_project_name %s has id %s'),
stagingname, projectid)
return projectid
except Exception as e:
LOG.debug(_('An error occurred getting the tenant list: %s.'), e)
LOG.debug(_('Unable to find staging project: %s'), stagingname)
raise exception.StagingProjectNotFound(name=stagingname)
def get_local_staging_user_id(self):
"""
Get the local hosting OS staging user Id which defaults to
constants.DEFAULT_STAGING_USERNAME if not set in the conf.
If a staging user name is not found, a StagingUserNotFound
exception will be raised.
:returns: The local hosting OS staging user Id
"""
ks_client = self._localkeystoneclient
staginguser = CONF.powervc.staging_user or \
constants.DEFAULT_STAGING_USER_NAME
try:
for user in ks_client.users.list():
username = user.name
userid = user.id
if staginguser == username:
LOG.debug(_('The staging_user %s has id %s'),
staginguser, userid)
return userid
except Exception as e:
LOG.debug(_('An error occurred getting the user list: %s'), e)
LOG.debug(_('Unable to find staging user: %s'), staginguser)
raise exception.StagingUserNotFound(name=staginguser)
def import_relative_module(relative_import_str, import_str):
"""
Imports a module relative to another. Can be used when more
than 1 module of the given name exists in the python path
to resolve any discrepency in multiple paths.
:param relative_import_str: a module import string which
neighbors the actual import. for example 'glanceclient'.
:param import_str: the module import string. for example
'tests.utils'
example:
utils = import_relative_module('glanceclient', 'tests.utils')
fapi = utils.FakeAPI(...)
"""
mod = importutils.import_module(relative_import_str)
mpath = os.path.dirname(os.path.dirname(os.path.realpath(mod.__file__)))
if not sys.path[0] is mpath:
sys.path.insert(0, mpath)
return importutils.import_module(import_str)
class StagingCache(object):
"""
Provides a lazy cache around the local staging user and project.
Consumers can use the staging_user_and_project property to retrieve the
(user_id, project_id) pair for the staging user. These values are
lazily fetched at most once
"""
def __init__(self):
super(StagingCache, self).__init__()
self.utils = get_utils()
self.staging_user = None
self.staging_project = None
@property
def is_valid(self):
uid, pid = self.get_staging_user_and_project()
return uid is not None and pid is not None
def get_staging_user_and_project(self, raise_on_invalid=False):
try:
if not self.staging_user:
self.staging_user = self.utils.get_local_staging_user_id()
if not self.staging_project:
self.staging_project = \
self.utils.get_local_staging_project_id()
return (self.staging_user, self.staging_project)
except exception.StagingProjectNotFound as e:
if raise_on_invalid:
raise e
return (None, None)
except exception.StagingUserNotFound as e:
if raise_on_invalid:
raise e
return (None, None)

192
common-powervc/run_tests.sh Executable file
View File

@ -0,0 +1,192 @@
#!/bin/bash
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -eu
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run PowerVC Common test suite(s)"
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)."
echo " -n, --no-recreate-db Don't recreate the test database."
echo " -x, --stop Stop running tests after the first error or failure."
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -u, --update Update the virtual environment with any newer package versions"
echo " -p, --pep8 Just run flake8"
echo " -8, --8 Just run flake8, don't show PEP8 text for each error"
echo " -P, --no-pep8 Don't run flake8"
echo " -c, --coverage Generate coverage report"
echo " -h, --help Print this usage message"
echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list"
echo " --standard-threads Don't do the eventlet threading monkeypatch."
echo ""
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
echo " prefer to run tests NOT in a virtual environment, simply pass the -N option."
exit
}
function process_option {
case "$1" in
-h|--help) usage;;
-V|--virtual-env) always_venv=1; never_venv=0;;
-N|--no-virtual-env) always_venv=0; never_venv=1;;
-r|--recreate-db) recreate_db=1;;
-n|--no-recreate-db) recreate_db=0;;
-f|--force) force=1;;
-u|--update) update=1;;
-p|--pep8) just_flake8=1;;
-8|--8) short_flake8=1;;
-P|--no-pep8) no_flake8=1;;
-c|--coverage) coverage=1;;
--standard-threads)
export STANDARD_THREADS=1
;;
-*) noseopts="$noseopts $1";;
*) noseargs="$noseargs $1"
esac
}
venv=.venv
with_venv=tools/with_venv.sh
always_venv=0
never_venv=0
force=0
noseargs=
noseopts=
wrapper=""
just_flake8=0
short_flake8=0
no_flake8=0
coverage=0
recreate_db=1
update=0
for arg in "$@"; do
process_option $arg
done
# If enabled, tell nose to collect coverage data
if [ $coverage -eq 1 ]; then
noseopts="$noseopts --with-coverage --cover-package=common-powervc"
fi
function run_tests {
# Just run the test suites in current environment
${wrapper} $NOSETESTS
# If we get some short import error right away, print the error log directly
RESULT=$?
if [ "$RESULT" -ne "0" ];
then
ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'`
if [ "$ERRSIZE" -lt "40" ];
then
cat run_tests.log
fi
fi
return $RESULT
}
function run_flake8 {
FLAGS=--show-pep8
if [ $# -gt 0 ] && [ 'short' == ''$1 ]
then
FLAGS=''
fi
echo "Running flake8 ..."
# Just run flake8 in current environment
#echo ${wrapper} flake8 $FLAGS powervc | tee pep8.txt
${wrapper} flake8 $FLAGS powervc | tee pep8.txt
RESULT=${PIPESTATUS[0]}
return $RESULT
}
NOSETESTS="nosetests $noseopts $noseargs"
if [ $never_venv -eq 0 ]
then
# Remove the virtual environment if --force used
if [ $force -eq 1 ]; then
echo "Cleaning virtualenv..."
rm -rf ${venv}
fi
if [ $update -eq 1 ]; then
echo "Updating virtualenv..."
python tools/install_venv.py
fi
if [ -e ${venv} ]; then
wrapper="${with_venv}"
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
python tools/install_venv.py
wrapper="${with_venv}"
else
echo -e "No virtual environment found...create one? (Y/n) \c"
read use_ve
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py
wrapper=${with_venv}
fi
fi
fi
fi
# Delete old coverage data from previous runs
if [ $coverage -eq 1 ]; then
${wrapper} coverage erase
fi
if [ $just_flake8 -eq 1 ]; then
run_flake8
RESULT=$?
echo "RESULT $RESULT"
exit $RESULT
fi
if [ $short_flake8 -eq 1 ]; then
run_flake8 short
RESULT=$?
exit $RESULT
fi
run_tests
RESULT=$?
# NOTE(sirp): we only want to run flake8 when we're running the full-test
# suite, not when we're running tests individually. To handle this, we need to
# distinguish between options (noseopts), which begin with a '-', and arguments
# (noseargs).
if [ -z "$noseargs" ]; then
if [ $no_flake8 -eq 0 ]; then
run_flake8
TMP_RESULT=$?
RESULT=$(($TMP_RESULT + $RESULT))
fi
fi
if [ $coverage -eq 1 ]; then
echo "Generating coverage report in covhtml/"
${wrapper} coverage html -d covhtml -i
fi
exit $RESULT

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,459 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
from cinderclient.tests.v1 import fakes
from cinderclient.tests.v1.test_volumes import VolumesTest
from cinderclient.tests.v1.test_types import TypesTest
from cinderclient.tests import utils
from cinderclient.v1.volumes import Volume
from cinderclient.v1.volume_types import VolumeType
from cinderclient.v1.volume_types import VolumeTypeManager
from powervc.common.client.extensions import cinder as ext_cinder
from powervc.common.client import delegate
from powervc.common import utils as commonutils
import mock
import sys
"""
This class similarly extend the current cinder client test cases
and also provided are examples of how someone can override and existing
method in the event we need to test something unique to powerVC.
The current methods that are overridden expect the same results as the base
class test cases and are only provided for example.
For specific PowerVC data model, just override the parent fake data
structure and corresponding testcase methods logic that could verify
the functions.
To run the testcases, alternatively:
1. Right click the TestCinderClient.py --> Run As --> Python unit-test
or
2. Refer to this link for detail UT running information:
https://jazz04.rchland.ibm.com:9443/jazz/service/ +
com.ibm.team.workitem.common.internal.rest.IAttachmentRestService/ +
itemName/com.ibm.team.workitem.Attachment/67843
All the testcases should be run successfully.
"""
class PVCFakeClient(fakes.FakeClient):
"""
This PVCFakeClient class extends the current cinder FakeClient,
and pvccinderclient.CinderClient.
aiming to set the self client variable to PVCFakeHTTPClient
"""
def __init__(self, *args, **kwargs):
fakes.FakeClient.__init__(self, *args, **kwargs)
self.client = PVCFakeHTTPClient(**kwargs)
sys.modules['powervc.common.client.factory'] = mock.MagicMock()
class PVCFakeHTTPClient(fakes.FakeHTTPClient):
"""
This PVCFakeHTTPClient class extends the current cinder FakeHTTPClient.
For all the HTTP requests in this class, it returns a fake json data
as specified beforehand instead of requesting to a real environment.
Ex, to test if json data from powerVC volume RESTAPI:
1. Add expected powerVC volumes json raw data into
get_volumes_detail() method
2. Add get_volumes_{volume_id} method to return the volume
3. Add post_volumes_{volume_id}_action to handle post logic
4. Add testcase and new added methods will be called
"""
def __init__(self, **kwargs):
fakes.FakeHTTPClient.__init__(self, **kwargs)
def get_volumes_pvcvolume(self, **kw):
r = {'volume': self.get_volumes_detail()[2]['volumes'][1]}
return (200, {}, r)
def get_volumes_detail(self, **kw):
"""
Override the parent method to a new powerVC specified volume,
Here is the same structure as OpenStack one for example.
"""
return (200, {}, {"volumes": [
{'id': 1234,
'name': 'sample-volume for cinder',
'attachments': [{'server_id': 12234}]},
{'id': 'pvcvolume',
'name': 'pvc sample-volume for cinder',
'attachments': [{'server_id': 54321}]}
]})
def post_volumes_pvcvolume_action(self, body, **kw):
"""
Add this method to handle powerVC volume post actions
Here is the same logic as OpenStack one for example.
"""
_body = None
resp = 202
assert len(list(body.keys())) == 1
action = list(body.keys())[0]
if action == 'os-attach':
assert sorted(list(body[action])) == ['instance_uuid',
'mode',
'mountpoint']
elif action == 'os-detach':
assert body[action] is None
elif action == 'os-reserve':
assert body[action] is None
elif action == 'os-unreserve':
assert body[action] is None
elif action == 'os-initialize_connection':
assert list(body[action].keys()) == ['connector']
return (202, {}, {'connection_info': 'foos'})
elif action == 'os-terminate_connection':
assert list(body[action].keys()) == ['connector']
elif action == 'os-begin_detaching':
assert body[action] is None
elif action == 'os-roll_detaching':
assert body[action] is None
elif action == 'os-reset_status':
assert 'status' in body[action]
else:
raise AssertionError("Unexpected action: %s" % action)
return (resp, {}, _body)
def get_storage_providers_2(self, **kw):
"""
To get a fake detail storage_providers
"""
return (200, {}, {"storage_provider":
{
"backend_type": "svc",
"volume_count": "null",
"service": {
"host_display_name": "shared_v7000_1",
"host": "shared_v7000_1",
"id": 4
},
"backend_id": "00000200A0204C30",
"health_status": {
"health_value": "OK"
},
"free_capacity_gb": 873.5,
"total_capacity_gb": 1115.5,
"storage_hostname": "shared_v7000_1",
"id": 2,
"backend_state": "running"
}})
def get_storage_providers_detail(self, **kw):
"""
To return a fake detail storage_providers
"""
return (200, {}, {"storage_providers": [
{
"backend_type": "svc",
"volume_count": "null",
"service": {
"host_display_name": "shared_v7000_1",
"host": "shared_v7000_1",
"id": 4
},
"backend_id": "00000200A0204C30",
"health_status": {
"health_value": "OK"
},
"free_capacity_gb": 873.5,
"total_capacity_gb": 1115.5,
"storage_hostname": "shared_v7000_1",
"id": 2,
"backend_state": "running"
},
{
"backend_type": "fc",
"volume_count": "null",
"service": {
"host_display_name": "shared_v7000_1",
"host": "shared_v7000_1",
"id": 4
},
"backend_id": "00000200A0204C31",
"health_status": {
"health_value": "OK"
},
"free_capacity_gb": 73.5,
"total_capacity_gb": 115.5,
"storage_hostname": "shared_v7000_2",
"id": 3,
"backend_state": "running"
}
]})
def get_types(self, **kw):
return (200, {}, {
"volume_types": [
{
"extra_specs": {
"drivers:storage_pool": "P-NGP01-pool",
"capabilities:volume_backend_name": "shared_v7000_1",
"drivers:rsize": "-1"
},
"name": "shared_v7000_1-default",
"id": "6627888e-9f59-4996-8c22-5d528c3273f0"
},
{
"extra_specs": {},
"name": "dm-crypt",
"id": "a3ae95f6-4aab-4446-b1d2-0fc2f60a89bb"
},
{
"extra_specs": {},
"name": "LUKS",
"id": "291f81a2-591b-4164-b2b2-829abc935573"
}
]
})
class PVCCinderVolumesTest(VolumesTest):
"""
This PVCCinderVolumesTest class extends the current cinder
VolumesTest class to provide volume related UT cases.
"""
volume_list = [
{
'id': 1234,
'name': 'sample-volume for cinder',
'attachments': [{'server_id': 12234}]},
{
'id': 'pvcvolume',
'name': 'pvc sample-volume for cinder',
'attachments': [{'server_id': 54321}]
}]
def setUp(self):
super(PVCCinderVolumesTest, self).setUp()
# get cinder client
cinder_fakeclient = PVCFakeClient('r', 'p')
# delegate to nova extension class
cinder_client = delegate.new_composite_deletgate(
[ext_cinder.Client(cinder_fakeclient), cinder_fakeclient])
self.cs = cinder_client
def tearDown(self):
super(PVCCinderVolumesTest, self).tearDown()
def test_pvcvolume_attach(self):
"""
Add this method to test if powerVC volume attach functions
Here is the same logic as OpenStack for example.
"""
v = self.cs.volumes.get('pvcvolume')
self.cs.volumes.attach(v, 1, '/dev/vdc')
self.cs.assert_called('POST',
'/volumes/pvcvolume/action')
def test_list_all_volumes(self):
resluts = self.cs.volumes.list_all_volumes()
self.cs.assert_called('GET', '/volumes/detail')
self.assertEqual(resluts[0].id, 1234)
self.assertEqual(resluts[1].name, 'pvc sample-volume for cinder')
def test_list_volumes_1(self):
returnvalues = [Volume(self, res, loaded=True)
for res in self.volume_list if res]
commonutils.get_utils().get_multi_scg_accessible_volumes = \
mock.MagicMock(return_value=returnvalues)
result = self.cs.volumes.list()
self.assertEquals(result[0].id, 1234)
self.assertEquals(result[1].name, "pvc sample-volume for cinder")
def test_list_volumes_2(self):
returnvalues = [Volume(self, res, loaded=True)
for res in self.volume_list if res]
commonutils.get_utils().get_scg_accessible_volumes = \
mock.MagicMock(return_value=returnvalues)
result = self.cs.volumes.list(True, None, 'SCGUUID', None)
self.assertEquals(result[0].name, "sample-volume for cinder")
class PVCCinderTypesTest(TypesTest):
"""
This PVCCinderTypesTest class extends the current cinder
TypesTest class to provide volume Type related UT cases.
"""
volumes_type_list = [
{
"extra_specs": {
"drivers:storage_pool": "P-NGP01-pool",
"capabilities:volume_backend_name": "shared_v7000_1",
"drivers:rsize": "-1"
},
"name": "shared_v7000_1-default",
"id": "6627888e-9f59-4996-8c22-5d528c3273f"
},
{
"extra_specs": {},
"name": "dm-crypt",
"id": "a3ae95f6-4aab-4446-b1d2-0fc2f60a89b"
},
{
"extra_specs": {},
"name": "LUKS",
"id": "291f81a2-591b-4164-b2b2-829abc93557"
}]
def setUp(self):
super(PVCCinderTypesTest, self).setUp()
# get cinder client
cinder_fakeclient = PVCFakeClient('r', 'p')
# delegate to nova extension class
cinder_client = delegate.new_composite_deletgate(
[ext_cinder.Client(cinder_fakeclient), cinder_fakeclient])
self.cs = cinder_client
def tearDown(self):
super(PVCCinderTypesTest, self).tearDown()
def test_list_all_storage_templates(self):
reslut = self.cs.volume_types.list_all_storage_templates()
self.assertEqual(reslut[0].name, "shared_v7000_1-default")
def test_list_storage_templates_1(self):
returnvalues = [VolumeType(VolumeTypeManager, res, loaded=True)
for res in self.volumes_type_list if res]
commonutils.get_utils().get_multi_scg_accessible_storage_templates = \
mock.MagicMock(return_value=returnvalues)
result = self.cs.volume_types.list()
self.assertEquals(result[0].id, "6627888e-9f59-4996-8c22-5d528c3273f")
self.assertEquals(result[1].name, "dm-crypt")
self.assertEquals(result[2].name, "LUKS")
def test_list_storage_templates_2(self):
data = self.volumes_type_list[2]
returnvalues = [VolumeType(VolumeTypeManager, res, loaded=True)
for res in [data] if res]
commonutils.get_utils().get_scg_accessible_storage_templates = \
mock.MagicMock(return_value=returnvalues)
result = self.cs.volume_types.list("SCGUUID", None)
self.assertEquals(result[0].name, "LUKS")
class PVCStorageProvidersTest(utils.TestCase):
"""
Class PVCStorageProvidersTest is used to provide
Storage Providers related UT cases.
"""
expected_sp = [
dict(
backend_type="svc",
volume_count="null",
service=dict(
host_display_name="shared_v7000_1",
host="shared_v7000_1",
id=4),
backend_id="00000200A0204C30",
health_status=dict(health_value="OK"),
free_capacity_gb=873.5,
total_capacity_gb=1115.5,
storage_hostname="shared_v7000_1",
id=2,
backend_state="running",
storage_type="fc")]
def setUp(self):
super(PVCStorageProvidersTest, self).setUp()
# get cinder client
cinder_fakeclient = PVCFakeClient('r', 'p')
# delegate to nova extension class
cinder_client = delegate.new_composite_deletgate(
[ext_cinder.Client(cinder_fakeclient), cinder_fakeclient])
self.cs = cinder_client
def tearDown(self):
super(PVCStorageProvidersTest, self).tearDown()
def compare_to_expected(self, expected, hyper):
for key, value in expected.items():
self.assertEqual(getattr(hyper, key), value)
def test_get_detail_SPs(self):
expected = [
dict(id=2,
backend_type="svc",
backend_id="00000200A0204C30",
free_capacity_gb=873.5,
total_capacity_gb=1115.5,
storage_hostname="shared_v7000_1",
backend_state="running"),
dict(id=3,
backend_type="fc",
backend_id="00000200A0204C31",
free_capacity_gb=73.5,
total_capacity_gb=115.5,
storage_hostname="shared_v7000_2",
backend_state="running")]
result = self.cs.storage_providers.list_all_providers()
self.cs.assert_called('GET', '/storage-providers/detail')
for idx, hyper in enumerate(result):
self.compare_to_expected(expected[idx], hyper)
def test_get_storage_provider(self):
expected = dict(id=2,
backend_type="svc",
backend_id="00000200A0204C30",
free_capacity_gb=873.5,
total_capacity_gb=1115.5,
storage_hostname="shared_v7000_1",
backend_state="running")
result = self.cs.storage_providers.get(2)
self.cs.assert_called('GET',
'/storage-providers/2')
self.compare_to_expected(expected, result)
def test_list_SP_1(self):
expected = self.expected_sp
returnvalue = [ext_cinder.StorageProvider(None, expected[0], True)]
commonutils.get_utils().get_scg_accessible_storage_providers = \
mock.MagicMock(return_value=returnvalue)
result = self.cs.storage_providers.list(True, None, "SCGUUID", None)
for idx, hyper in enumerate(result):
self.compare_to_expected(expected[idx], hyper)
def test_list_SP_2(self):
expected = self.expected_sp
returnvalue = [ext_cinder.StorageProvider(None, expected[0], True)]
commonutils.get_utils().get_multi_scg_accessible_storage_providers = \
mock.MagicMock(return_value=returnvalue)
result = self.cs.storage_providers.list()
for idx, hyper in enumerate(result):
self.compare_to_expected(expected[idx], hyper)

View File

@ -0,0 +1,53 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import unittest
from powervc.common.client import delegate
class FakeDelegator1(object):
def x(self):
return 'x'
class FakeDelegator2(object):
def y(self):
return 'y'
class FakeContext(object):
def __init__(self):
self.auth_token = 'Context Auth Token'
self.project_id = 'Project Id'
class FakeKeyStone(object):
def __init__(self):
self.auth_token = 'KeyStone Auth Token'
class DelegateTest(unittest.TestCase):
def test_new_composite_deletgate(self):
d1 = FakeDelegator1()
d2 = FakeDelegator2()
dele = delegate.new_composite_deletgate([d1, d2])
self.assertEqual(dele.x(), 'x')
self.assertEqual(dele.y(), 'y')
def test_context_dynamic_auth_token(self):
ctx = FakeContext()
keystone = FakeKeyStone()
dele_ctx_keystone = delegate.context_dynamic_auth_token(ctx, keystone)
self.assertEqual(dele_ctx_keystone.auth_token, 'KeyStone Auth Token')
self.assertEqual(dele_ctx_keystone.project_id, 'Project Id')
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,177 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import unittest
import mox
from mock import MagicMock
import glanceclient.v1.images as imagesV1
import glanceclient.v1.image_members as membersV1
from glanceclient.openstack.common import gettextutils
gettextutils.install('common-glance-client-ut')
import powervc.common.utils as common_utils
utils = common_utils.import_relative_module('glanceclient', 'tests.utils')
test_images = common_utils.import_relative_module('glanceclient',
'tests.v1.test_images')
test_image_members = common_utils.import_relative_module(
'glanceclient',
'tests.v1.test_image_members')
from powervc.common.client.extensions.glance import Client as PVCGlanceClient
class FakeGlanceClient(object):
"""
Fake client to populate the pvcglanceclient.Client
"""
def __init__(self, images, members):
self.images = images
self.image_members = members
self.image_tags = MagicMock()
class TestPVCGlanceClient(unittest.TestCase):
def setUp(self):
# prepare the fake api
images_api = utils.FakeAPI(test_images.fixtures) # @UndefinedVariable
images_manager = imagesV1.ImageManager(images_api)
members_api = utils.FakeAPI( # @UndefinedVariable
test_image_members.fixtures
)
members_manager = membersV1.ImageMemberManager(members_api)
# create mock object
self.moxer = mox.Mox()
client = self.moxer.CreateMockAnything()
self.pvc_gc = PVCGlanceClient(client)
# append the fake api to mock object
self.pvc_gc.client.images = images_manager
self.pvc_gc.client.image_members = members_manager
self.pvc_gc.client.image_tags = MagicMock()
def test_listImages(self):
self.moxer.ReplayAll()
images = self.pvc_gc.listImages()
self.moxer.VerifyAll()
self.assertEqual(images[0].id, 'a')
self.assertEqual(images[0].name, 'image-1')
self.assertEqual(images[1].id, 'b')
self.assertEqual(images[1].name, 'image-2')
def test_getImage(self):
self.moxer.ReplayAll()
image = self.pvc_gc.getImage('1')
self.moxer.VerifyAll()
self.assertEqual(image.id, '1')
self.assertEqual(image.name, 'image-1')
def test_deleteImage(self):
self.moxer.ReplayAll()
self.pvc_gc.deleteImage('1')
expect = [
('DELETE', '/v1/images/1', {}, None),
]
self.moxer.VerifyAll()
self.assertEqual(self.pvc_gc.
client.
images.
api.calls,
expect)
def test_listImageMembers(self):
self.moxer.ReplayAll()
image_id = '1'
image_members = self.pvc_gc.listImageMembers(image_id)
self.moxer.VerifyAll()
self.assertEqual(image_members[0].image_id, '1')
self.assertEqual(image_members[0].member_id, '1')
def test_deleteImageMember(self):
self.moxer.ReplayAll()
image_id = '1'
member_id = '1'
self.pvc_gc.deleteImageMember(image_id, member_id)
expect = [
('DELETE',
'/v1/images/{image}/members/{mem}'.
format(image='1',
mem='1'),
{},
None)]
self.moxer.VerifyAll()
self.assertEqual(self.pvc_gc.client.image_members.
api.calls,
expect)
def test_getImageFile(self):
self.pvc_gc.client.images.data = MagicMock(return_value='FILE')
ret = self.pvc_gc.getImageFile('image_id')
self.pvc_gc.client.images.data.assert_called_once_with('image_id')
self.assertEqual(ret, 'FILE')
def test_updateImage(self):
self.pvc_gc.client.images.update = MagicMock(return_value='updated')
ret = self.pvc_gc.updateImage('image_id')
self.pvc_gc.client.images.update.assert_called_once_with('image_id')
self.assertEqual(ret, 'updated')
def test_updateImageMember(self):
self.pvc_gc.client.image_members.update =\
MagicMock(return_value='member updated')
ret = self.pvc_gc.updateImageMember('image_id',
'member_id',
'member_status')
self.pvc_gc.client.image_members.update.\
assert_called_once_with('image_id',
'member_id',
'member_status')
self.assertEqual(ret, 'member updated')
def test_createImageMember(self):
self.pvc_gc.client.image_members.create =\
MagicMock(return_value='member created')
ret = self.pvc_gc.createImageMember('image_id', 'member_id')
self.pvc_gc.client.image_members.create.\
assert_called_once_with('image_id',
'member_id')
self.assertEqual(ret, 'member created')
def test_updateImageTag(self):
self.pvc_gc.client.image_tags.update =\
MagicMock(return_value='tag updated')
self.pvc_gc.client_version = 2
ret = self.pvc_gc.updateImageTag('image_id', 'tag_value')
self.pvc_gc.client.image_tags.update.\
assert_called_once_with('image_id',
'tag_value')
self.assertEqual(ret, 'tag updated')
def test_deleteImageTag(self):
self.pvc_gc.client.image_tags.delete =\
MagicMock(return_value='tag deleted')
self.pvc_gc.client_version = 2
ret = self.pvc_gc.deleteImageTag('image_id', 'tag_value')
self.pvc_gc.client.image_tags.delete.\
assert_called_once_with('image_id',
'tag_value')
self.assertEqual(ret, 'tag deleted')
def tearDown(self):
pass
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,561 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import unittest
from mock import MagicMock
from mock import patch
import novaclient.tests.v1_1.test_servers as servers_testbox
import novaclient.tests.v1_1.test_flavors as flavors_testbox
import novaclient.tests.v1_1.test_hypervisors as hypervisors_testbox
from novaclient.tests.v1_1 import fakes
from novaclient.v1_1 import servers
from novaclient.v1_1 import flavors
from novaclient.tests import utils
from powervc.common.client.extensions import nova as ext_nova
from powervc.common.client import delegate
from powervc.common import utils as comm_utils
"""
This class similarly extend the current nova client test cases
and also provided are examples of how someone can override and existing
method in the event we need to test something unique to powerVC.
The current methods that are overridden expect the same results as the base
class test cases and are only provided for example.
For specific PowerVC data model, just override the parent fake data
structure and corresponding testcase methods logic that could verify
the functions.
To run the testcases, alternatively:
1. Right click the TestNovaClient.py --> Run As --> Python unit-test
or
2. Refer to this link for detail UT running information:
https://jazz04.rchland.ibm.com:9443/jazz/service/ +
com.ibm.team.workitem.common.internal.rest.IAttachmentRestService/ +
itemName/com.ibm.team.workitem.Attachment/67843
All the testcases should be run successfully.
"""
class PVCFakeClient(fakes.FakeClient):
"""
This PVCFakeClient class extends the current nova FakeClient,
aiming to set the self.client variable to PVCFakeHTTPClient
"""
def __init__(self, *args, **kwargs):
fakes.FakeClient.__init__(self, *args, **kwargs)
self.client = PVCFakeHTTPClient(**kwargs)
class PVCFakeHTTPClient(fakes.FakeHTTPClient):
"""
This PVCFakeHTTPClient class extends the current nova FakeHTTPClient.
For all the HTTP requests in this class, it returns a fake json data
as specified beforehand instead of requesting to a real environment.
"""
def __init__(self, **kwargs):
fakes.FakeHTTPClient.__init__(self, **kwargs)
def get_servers(self, **kw):
"""
Override the parent method to a new powerVC specified server.
"""
return (200, {}, {"servers": [
{'id': 1234, 'name': 'sample-server'},
{'id': 5678, 'name': 'powerVC sample-server'}
]})
def get_servers_detail(self, **kw):
"""
Override the parent method to specify powerVC specified server
detail.
"""
return (200, {}, {"servers": [
{
"id": 1234,
"name": "sample-server",
"image": {
"id": 2,
"name": "sample image",
},
"flavor": {
"id": 1,
"name": "256 MB Server",
},
"hostId": "e4d909c290d0fb1ca068ffaddf22cbd0",
"status": "BUILD",
"progress": 60,
"addresses": {
"public": [{
"version": 4,
"addr": "1.2.3.4",
}, {
"version": 4,
"addr": "5.6.7.8",
}],
"private": [{
"version": 4,
"addr": "10.11.12.13",
}],
},
"metadata": {
"Server Label": "Web Head 1",
"Image Version": "2.1"
},
"OS-EXT-SRV-ATTR:host": "computenode1",
"security_groups": [{
'id': 1, 'name': 'securitygroup1',
'description': 'FAKE_SECURITY_GROUP',
'tenant_id': '4ffc664c198e435e9853f2538fbcd7a7'
}],
"OS-EXT-MOD:some_thing": "mod_some_thing_value"},
{
"id": 5678,
"name": "powerVC sample-server",
"image": {
"id": 2,
"name": "sample image",
},
"flavor": {
"id": 1,
"name": "256 MB Server",
},
"hostId": "9e107d9d372bb6826bd81d3542a419d6",
"status": "ACTIVE",
"addresses": {
"public": [{
"version": 4,
"addr": "4.5.6.7",
}, {
"version": 4,
"addr": "5.6.9.8",
}],
"private": [{
"version": 4,
"addr": "10.13.12.13",
}],
},
"metadata": {
"Server Label": "DB 1"
},
"OS-EXT-SRV-ATTR:host": "computenode2",
},
{
"id": 9012,
"name": "sample-server3",
"image": "",
"flavor": {
"id": 1,
"name": "256 MB Server",
},
"hostId": "9e107d9d372bb6826bd81d3542a419d6",
"status": "ACTIVE",
"addresses": {
"public": [{
"version": 4,
"addr": "4.5.6.7",
}, {
"version": 4,
"addr": "5.6.9.8",
}],
"private": [{
"version": 4,
"addr": "10.13.12.13",
}],
},
"metadata": {
"Server Label": "DB 1"
}
}
]})
def get_flavors_detail(self, **kw):
"""
Override the parent method to specify powerVC specified flavors
detail.
"""
return (200, {}, {'flavors': [
{'id': 1, 'name': '256 MB Server', 'ram': 256, 'disk': 10,
'OS-FLV-EXT-DATA:ephemeral': 10,
'os-flavor-access:is_public': True,
'links': {}},
{'id': 2, 'name': '128 MB Server', 'ram': 512, 'disk': 0,
'OS-FLV-EXT-DATA:ephemeral': 20,
'os-flavor-access:is_public': False,
'links': {}},
{'id': 'aa1', 'name': 'PowerVC 128 MB Server', 'ram': 5120,
'disk': 5678, 'OS-FLV-EXT-DATA:ephemeral': 0,
'os-flavor-access:is_public': True,
'links': {}}
]})
def get_os_hypervisors(self, **kw):
"""
Override the parent method to specify powerVC specified hypervisors
detail.
"""
return (200, {}, {"hypervisors": [
{'id': 1234, 'hypervisor_hostname': 'hyper1'},
{'id': 5678, 'hypervisor_hostname': 'hyper2'},
]})
def get_storage_connectivity_groups_f4b541cb_f418_4b4b_83b9_a8148650d4e9(
self, **kw):
"""
To get a fake detail storage_connectivity_group
"""
return (200, {}, {"storage_connectivity_group":
{
"auto_add_vios": True,
"fc_storage_access": True,
"display_name": "Auto-SCG for Registered SAN",
"host_list": [
{
"name": "ngp01_02_vios_1",
"vios_list": [
{
"lpar_id": 1,
"name": "10-F715A",
"id": "ngp01_02_vios_1##1"
}
]
},
{
"name": "ngp01_03_vios_1",
"vios_list": [
{
"lpar_id": 1,
"name": "10-F76CA",
"id": "ngp01_03_vios_1##1"
}
]
}
],
"created_at": "2013-08-23 14:56:11.787465",
"enabled": True,
"auto_defined": True,
"id": "f4b541cb-f418-4b4b-83b9-a8148650d4e9"
}})
def get_storage_connectivity_groups(self, **kw):
"""
To return a fake storage_connectivity_groups
"""
return (200, {}, {"storage_connectivity_groups": [
{
"display_name": "Auto-SCG for Registered SAN",
"id": "f4b541cb-f418-4b4b-83b9-a8148650d4e9"
},
{
"display_name": "SCG sample",
"id": "sdfb541cb-f418-4b4b-3129-a814865023fs"
}]})
def get_storage_connectivity_groups_detail(self, **kw):
"""
To return a fake detail storage_connectivity_groups
"""
return (200, {}, {"storage_connectivity_groups": [
{
"auto_add_vios": True,
"fc_storage_access": True,
"display_name": "Auto-SCG for Registered SAN",
"host_list": [
{
"name": "ngp01_02_vios_1",
"vios_list": [
{
"lpar_id": 1,
"name": "10-F715A",
"id": "ngp01_02_vios_1##1"
}
]
},
{
"name": "ngp01_03_vios_1",
"vios_list": [
{
"lpar_id": 1,
"name": "10-F76CA",
"id": "ngp01_03_vios_1##1"
}
]
}
],
"created_at": "2013-08-23 14:56:11.787465",
"enabled": True,
"auto_defined": True,
"id": "f4b541cb-f418-4b4b-83b9-a8148650d4e9"},
{
"auto_add_vios": True,
"fc_storage_access": True,
"display_name": "SCG Sample",
"host_list": [
{
"name": "ngp01_02_vios_1",
"vios_list": [
{
"lpar_id": 1,
"name": "10-F715A",
"id": "ngp01_02_vios_1##1"
}
]
}, {
"name": "ngp01_03_vios_1",
"vios_list": [
{
"lpar_id": 1,
"name": "10-F76CA",
"id": "ngp01_03_vios_1##1"
}
]
}
],
"created_at": "2013-08-23 14:56:11.787465",
"enabled": True,
"auto_defined": True,
"id": "sdfb541cb-f418-4b4b-3129-a814865023fs"
}
]})
class PVCNovaServersTest(servers_testbox.ServersTest):
"""
This PVCNovaServersTest class extends the current nova
ServersTest class to provide servers related UT cases.
"""
def setUp(self):
super(PVCNovaServersTest, self).setUp()
nova_fakeclient = PVCFakeClient('r', 'p', 's',
'http://localhost:5000/')
# delegate to nova extension class
nova_client = delegate.new_composite_deletgate(
[ext_nova.Client(nova_fakeclient), nova_fakeclient])
self.cs = nova_client
def tearDown(self):
super(PVCNovaServersTest, self).tearDown()
def test_list(self):
comm_utils.get_utils = MagicMock()
comm_utils.get_utils().get_multi_scg_accessible_servers = MagicMock()
self.cs.manager.list()
comm_utils.get_utils().get_multi_scg_accessible_servers.\
assert_called_once_with(None, None, True, None)
def test_list_servers(self):
"""
Override this method to test listing powerVC server
Here is the same logic as OpenStack for example.
"""
sl = self.cs.manager.list_all_servers()
print sl
self.cs.assert_called('GET', '/servers/detail')
[self.assertTrue(isinstance(s, servers.Server)) for s in sl]
def test_list_instance_storage_viable_hosts(self):
with patch('novaclient.base.getid') as mock:
mock.return_value = 'server_id'
mock('server')
self.cs.manager.api.client.get = MagicMock(
return_value=('head', 'body'))
ret = self.cs.manager.list_instance_storage_viable_hosts('server')
self.cs.manager.api.client.get.assert_called_once_with(
'/storage-viable-hosts?instance_uuid=server_id')
self.assertEqual(ret, 'body')
class PVCNovaFlavorsTest(flavors_testbox.FlavorsTest):
"""
This PVCNovaFlavorsTest class extends the current nova
FlavorsTest class to provide flavors related UT cases.
"""
def setUp(self):
super(PVCNovaFlavorsTest, self).setUp()
nova_fakeclient = PVCFakeClient('r', 'p', 's',
'http://localhost:5000/')
# delegate to nova extension class
nova_client = delegate.new_composite_deletgate(
[ext_nova.Client(nova_fakeclient), nova_fakeclient])
self.cs = nova_client
def tearDown(self):
super(PVCNovaFlavorsTest, self).tearDown()
def test_get_flavor_details_alphanum_id(self):
"""
Override this method to test list specified powerVC
flavors. Here is the same logic as OpenStack for example.
"""
f = self.cs.flavors.get('aa1')
self.cs.assert_called('GET', '/flavors/aa1')
self.assertTrue(isinstance(f, flavors.Flavor))
# Verify the preset value
self.assertEqual(f.ram, 5120)
self.assertEqual(f.disk, 5678)
self.assertEqual(f.ephemeral, 0)
self.assertEqual(f.is_public, True)
class PVCNovaHypervisorsTest(hypervisors_testbox.HypervisorsTest):
"""
This PVCNovaHypervisorsTest class extends the current nova
HypervisorsTest class to provide hypervisors related UT cases.
"""
def setUp(self):
super(PVCNovaHypervisorsTest, self).setUp()
nova_fakeclient = PVCFakeClient('r', 'p', 's',
'http://localhost:5000/')
# delegate to nova extension class
nova_client = delegate.new_composite_deletgate(
[ext_nova.Client(nova_fakeclient), nova_fakeclient])
self.cs = nova_client
def tearDown(self):
super(PVCNovaHypervisorsTest, self).tearDown()
def test_hypervisor_detail(self):
"""
Override this method to test if listing powerVC hypervisors
function works.
Here is the same logic as OpenStack for example.
"""
expected = [
dict(id=1234,
service=dict(id=1, host='compute1'),
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper1",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100),
dict(id=2,
service=dict(id=2, host="compute2"),
vcpus=4,
memory_mb=10 * 1024,
local_gb=250,
vcpus_used=2,
memory_mb_used=5 * 1024,
local_gb_used=125,
hypervisor_type="xen",
hypervisor_version=3,
hypervisor_hostname="hyper2",
free_ram_mb=5 * 1024,
free_disk_gb=125,
current_workload=2,
running_vms=2,
cpu_info='cpu_info',
disk_available_least=100)]
result = self.cs.hypervisors.list()
print result
self.cs.assert_called('GET', '/os-hypervisors/detail')
for idx, hyper in enumerate(result):
self.compare_to_expected(expected[idx], hyper)
class PVCSCGTest(utils.TestCase):
def setUp(self):
super(PVCSCGTest, self).setUp()
nova_fakeclient = PVCFakeClient('r', 'p', 's',
'http://localhost:5000/')
# delegate to nova extension class
nova_client = delegate.new_composite_deletgate(
[ext_nova.Client(nova_fakeclient), nova_fakeclient])
self.cs = nova_client
def compare_to_expected(self, expected, hyper):
for key, value in expected.items():
self.assertEqual(getattr(hyper, key), value)
def test_get_detail_SCGs(self):
expected = [
dict(id="f4b541cb-f418-4b4b-83b9-a8148650d4e9",
auto_add_vios=True,
fc_storage_access=True,
display_name="Auto-SCG for Registered SAN",
enabled=True,
auto_defined=True),
dict(id="sdfb541cb-f418-4b4b-3129-a814865023fs",
auto_add_vios=True,
fc_storage_access=True,
display_name="SCG Sample",
enabled=True,
auto_defined=True)]
result = self.cs.storage_connectivity_groups.list()
self.cs.assert_called('GET', '/storage-connectivity-groups/detail')
for idx, hyper in enumerate(result):
self.compare_to_expected(expected[idx], hyper)
def test_get_SCGs(self):
expected = dict(id="f4b541cb-f418-4b4b-83b9-a8148650d4e9",
auto_add_vios=True,
fc_storage_access=True,
display_name="Auto-SCG for Registered SAN",
enabled=True,
auto_defined=True)
result = self.cs.storage_connectivity_groups.\
get('f4b541cb-f418-4b4b-83b9-a8148650d4e9')
self.cs.assert_called('GET',
'/storage-connectivity-groups/' +
'f4b541cb-f418-4b4b-83b9-a8148650d4e9')
self.compare_to_expected(expected, result)
class SCGImageManagerTest(unittest.TestCase):
def setUp(self):
super(SCGImageManagerTest, self).setUp()
nova_fakeclient = PVCFakeClient('r', 'p', 's',
'http://localhost:5000/')
# delegate to nova extension class
nova_client = delegate.new_composite_deletgate(
[ext_nova.Client(nova_fakeclient), nova_fakeclient])
self.cs = nova_client
def test_list(self):
with patch('novaclient.base.Manager._list') as mock:
mock.return_value = ['image1', 'image2', 'image3']
ret = self.cs.scg_images.list('scgUUID')
mock.assert_called_once_with(
'/storage-connectivity-groups/scgUUID/images', 'images')
self.assertEqual(ret, ['image1', 'image2', 'image3'])
def test_list_ids(self):
class FakeImage(object):
def __init__(self, image_id):
self.id = image_id
self.cs.scg_images.list = MagicMock(
return_value=[FakeImage(1), FakeImage(2), FakeImage(3)])
ret = self.cs.scg_images.list_ids('scgUUID')
self.assertEqual(ret, [1, 2, 3])

View File

@ -0,0 +1,228 @@
# This file contains configuration properties that affects how the powervc driver functions and how it
# communicates with the PowerVC server. Most properties have default values based on a default
# PowerVC configuration. However, some connection properties, such as PowerVC host name
# do not have default values and must be configured prior to running the powervc driver. These
# properties are marked with the text INPUT REQUIRED. Also you may have to change other
# properties depending on your environment and how your PowerVC sever is configured. For
# more information, see the Smart Cloud Entry Administration Guide.
[DEFAULT]
# The following group of properties needs to be configured
# in order for the PowerVC Driver to be able to authenticate with keystone
# and obtain information from it, that might be required to run background
# tasks (such as discovering a new image), or simply to connect to a
# secured Glance.
# When running secured Glance, make sure the 'auth_strategy' property in
# nova.conf is set to 'keystone'.
# Log info messages
verbose = true
[openstack]
# Authentication url to authenticate with keystone (string value)
auth_url = http://localhost:5000/v2.0
# v2.0 or v3
keystone_version = v2.0
# Tenant name for connecting to keystone in admin context (string value)
admin_tenant_name = demo
# Username for connecting to keystone in admin context (string value)
admin_user = demo
# Password for connecting to keystone in admin context (string value)
admin_password = openstack
# For local SSL connections, specify the path and filename of the cacert file
#connection_cacert =
http_insecure = True
#
# Qpid connection information
#
# Qpid broker hostname (string value)
qpid_hostname = host
# Qpid broker port (integer value)
qpid_port = 5675
# Username for qpid connection (string value)
qpid_username =
# Password for qpid connection (string value)
qpid_password =
# Transport to use, either 'tcp'(default) or 'ssl'
qpid_protocol = tcp
[powervc]
# Full class name for the manager for PowerVC Manager Service (string value)
powervc_manager = powervc.nova.driver.compute.manager.PowerVCCloudManager
# Full class name for the driver for PowerVC Driver Service (string value)
powervc_driver = powervc.nova.driver.virt.powervc.driver.PowerVCDriver
#
# Connection information for PowerVC.
#
# Authentication url of the PowerVC to connect to
# INPUT REQUIRED
# Provide 'host' portion by updating it to the hostname of the PowerVC system
#auth_url = https://host/powervc/openstack/identity/v3
# v2.0 or v3
keystone_version = v3
# Username for PowerVC connection (string value)
admin_user = root
# Password for PowerVC connection (string value)
admin_password = passw0rd
# Tenant name for PowerVC connection (string value)
admin_tenant_name = ibm-default
# For PowerVC SSL connections, specify the path and filename of the cacert file
# INPUT REQUIRED
# Provide the cacert file by copying it from its install location on the
# PowerVC host (e.g. /etc/pki/tls/certs/powervc.crt) to the local hosting
# Openstack system.
#connection_cacert =
# Value of insecure option for PowerVC connections (Default=True)
# INPUT REQUIRED
# Change to False when using a secure connection and providing a cacert file.
http_insecure = True
# Value of authorization token expiration stale duration (Default=3600)
# INPUT REQUIRED
# Due to PowerVC requirement, all the REST API customers need to pre-refresh
# authorization token at least 1 hour before expiration
expiration_stale_duration = 3600
# The names of the storage connectivity groups supported by our driver
# INPUT REQUIRED
# Provide the PowerVC storage connectivity group (SCG) names by getting the name
# from the PowerVC system, or using the PowerVC default SCG of "Any host, all VIOS".
# If there are more than one SCG you want to specify, just add more SCG values with
# more storage_connectivity_group
# Note: The value of this property must exactly match the value as specified on the
# PowerVC server, including case, punctuation, and spaces.
storage_connectivity_group = Any host, all VIOS
#storage_connectivity_group =
#
# Qpid connection information for PowerVC
#
# Qpid broker hostname (string value)
# INPUT REQUIRED
# Change 'host' to the hostname of the PowerVC system
qpid_hostname = host
# Qpid broker port (integer value)
# uncomment following line for non-ssl
# qpid_port = 5672
qpid_port = 5679
# Username for qpid connection (string value)
#qpid_username = powervc_qpid
# Password for qpid connection (string value)
# INPUT REQUIRED
# Provide the qpid connection password from the PowerVC system
# by using the cat command on the pw.file in the directory where
# PowerVC is installed (e.g. cat /opt/ibm/powervc/data/pw.file)
qpid_password =
# Transport to use, either 'tcp'(default) or 'ssl'
# uncomment following line for non-ssl
# qpid_protocol = tcp
qpid_protocol = ssl
#
# Sync variables
#
# The name of the staging project (string value)
# If not set defaults to 'Public'. If set the named project should exist and
# be accessible by the staging_user.
staging_project_name = Public
# The name of the staging user (string value)
# If not set defaults to 'admin'. If set the user should exist and
# have access to the project identified by staging_project_name.
staging_user = admin
# The prefix that will be added to the flavor name from PowerVC
# and stored (string value). This should be unique for every
# connection to help distinguish the flavors
flavor_prefix = PVC-
# This is a list of PowerVC flavor names that should be synced.
# If no flavor name is specified, then all flavors are synced.
flavor_white_list =
# This is a list of PowerVC flavor names that should not be synced.
flavor_black_list =
# The periodic flavor sync interval in seconds.
flavor_sync_interval = 300
# Instance periodic sync interval specified in seconds
instance_sync_interval = 20
# How many instance sync intervals between full instance syncs. Only instances
# known to be out of sync are synced on the interval except after this many
# intervals when all instances are synced.
full_instance_sync_frequency = 30
# Image periodic sync interval specified in seconds. This is the time from the end
# of one successful image periodic sync operation to the start of the next.
image_periodic_sync_interval_in_seconds = 300
# The time in seconds between image sync retry attempts if an error was
# encountered during an image sync operation
image_sync_retry_interval_time_in_seconds = 60
# The maximum number of images to return. The default is 500 images. If your PowerVC
# has more than 500 images, this limit should be increased to include all images.
image_limit = 500
# Volume periodic sync interval specified in seconds
volume_sync_interval = 20
# How many volume sync intervals between full volume syncs.
# Only volumes known to be out of sync are synced on the interval
# except after this many intervals when all volumes are synced.
full_volume_sync_frequency = 30
# Volume type periodic sync interval specified in seconds
volume_type_sync_interval = 20
# How many volume type sync intervals between full volume type syncs.
# Only volumes known to be out of sync are synced on the interval
# except after this many intervals when all volumes are synced.
full_volume_type_sync_frequency = 30
# Ignore delete errors so an exception is not thrown during a
# delete. When set to true, this allows the volume to be deleted
# on the hosting OS even if an exception occurs. When set to false,
# exceptions during delete prevent the volume from being deleted
# on the hosting OS.
volume_driver_ignore_delete_error = False
# The times to check whether attaching/detaching the volume succeed
volume_max_try_times = 12
# Minimum delay interval and initial delay seconds for long run tasks.
longrun_loop_interval = 7
longrun_initial_delay = 10

View File

@ -0,0 +1,228 @@
# This file contains configuration properties that affects how the powervc driver functions and how it
# communicates with the PowerVC server. Most properties have default values based on a default
# PowerVC configuration. However, some connection properties, such as PowerVC host name
# do not have default values and must be configured prior to running the powervc driver. These
# properties are marked with the text INPUT REQUIRED. Also you may have to change other
# properties depending on your environment and how your PowerVC sever is configured. For
# more information, see the Smart Cloud Entry Administration Guide.
[DEFAULT]
# The following group of properties needs to be configured
# in order for the PowerVC Driver to be able to authenticate with keystone
# and obtain information from it, that might be required to run background
# tasks (such as discovering a new image), or simply to connect to a
# secured Glance.
# When running secured Glance, make sure the 'auth_strategy' property in
# nova.conf is set to 'keystone'.
# Log info messages
verbose = true
[openstack]
# Authentication url to authenticate with keystone (string value)
auth_url = http://localhost:5000/v2.0
# v2.0 or v3
keystone_version = v2.0
# Tenant name for connecting to keystone in admin context (string value)
admin_tenant_name = demo
# Username for connecting to keystone in admin context (string value)
admin_user = demo
# Password for connecting to keystone in admin context (string value)
admin_password = openstack
# For local SSL connections, specify the path and filename of the cacert file
#connection_cacert =
http_insecure = True
#
# Qpid connection information
#
# Qpid broker hostname (string value)
qpid_hostname = monica
# Qpid broker port (integer value)
qpid_port = 5675
# Username for qpid connection (string value)
qpid_username =
# Password for qpid connection (string value)
qpid_password =
# Transport to use, either 'tcp'(default) or 'ssl'
qpid_protocol = tcp
[powervc]
# Full class name for the manager for PowerVC Manager Service (string value)
powervc_manager = powervc.nova.driver.compute.manager.PowerVCCloudManager
# Full class name for the driver for PowerVC Driver Service (string value)
powervc_driver = powervc.nova.driver.virt.powervc.driver.PowerVCDriver
#
# Connection information for PowerVC.
#
# Authentication url of the PowerVC to connect to
# INPUT REQUIRED
# Provide 'host' portion by updating it to the hostname of the PowerVC system
auth_url = https://host/powervc/openstack/identity/v3
# v2.0 or v3
keystone_version = v3
# Username for PowerVC connection (string value)
admin_user = root
# Password for PowerVC connection (string value)
admin_password = passw0rd
# Tenant name for PowerVC connection (string value)
admin_tenant_name = ibm-default
# For PowerVC SSL connections, specify the path and filename of the cacert file
# INPUT REQUIRED
# Provide the cacert file by copying it from its install location on the
# PowerVC host (e.g. /etc/pki/tls/certs/powervc.crt) to the local hosting
# Openstack system.
#connection_cacert =
# Value of insecure option for PowerVC connections (Default=True)
# INPUT REQUIRED
# Change to False when using a secure connection and providing a cacert file.
http_insecure = True
# Value of authorization token expiration stale duration (Default=3600)
# INPUT REQUIRED
# Due to PowerVC requirement, all the REST API customers need to pre-refresh
# authorization token at least 1 hour before expiration
expiration_stale_duration = 3600
# The names of the storage connectivity groups supported by our driver
# INPUT REQUIRED
# Provide the PowerVC storage connectivity group (SCG) names by getting the name
# from the PowerVC system, or using the PowerVC default SCG of "Any host, all VIOS".
# If there are more than one SCG you want to specify, just add more SCG values with
# more storage_connectivity_group
# Note: The value of this property must exactly match the value as specified on the
# PowerVC server, including case, punctuation, and spaces.
storage_connectivity_group = Any host, all VIOS
#storage_connectivity_group =
#
# Qpid connection information for PowerVC
#
# Qpid broker hostname (string value)
# INPUT REQUIRED
# Change 'host' to the hostname of the PowerVC system
qpid_hostname = host
# Qpid broker port (integer value)
# uncomment following line for non-ssl
# qpid_port = 5672
qpid_port = 5678
# Username for qpid connection (string value)
qpid_username = powervc_qpid_2
# Password for qpid connection (string value)
# INPUT REQUIRED
# Provide the qpid connection password from the PowerVC system
# by using the cat command on the pw.file in the directory where
# PowerVC is installed (e.g. cat /opt/ibm/powervc/data/pw.file)
qpid_password =
# Transport to use, either 'tcp'(default) or 'ssl'
# uncomment following line for non-ssl
# qpid_protocol = tcp
qpid_protocol = ssl
#
# Sync variables
#
# The name of the staging project (string value)
# If not set defaults to 'Public'. If set the named project should exist and
# be accessible by the staging_user.
staging_project_name = Public
# The name of the staging user (string value)
# If not set defaults to 'admin'. If set the user should exist and
# have access to the project identified by staging_project_name.
staging_user = admin
# The prefix that will be added to the flavor name from PowerVC
# and stored (string value). This should be unique for every
# connection to help distinguish the flavors
flavor_prefix = PVC-
# This is a list of PowerVC flavor names that should be synced.
# If no flavor name is specified, then all flavors are synced.
flavor_white_list =
# This is a list of PowerVC flavor names that should not be synced.
flavor_black_list =
# The periodic flavor sync interval in seconds.
flavor_sync_interval = 300
# Instance periodic sync interval specified in seconds
instance_sync_interval = 20
# How many instance sync intervals between full instance syncs. Only instances
# known to be out of sync are synced on the interval except after this many
# intervals when all instances are synced.
full_instance_sync_frequency = 30
# Image periodic sync interval specified in seconds. This is the time from the end
# of one successful image periodic sync operation to the start of the next.
image_periodic_sync_interval_in_seconds = 300
# The time in seconds between image sync retry attempts if an error was
# encountered during an image sync operation
image_sync_retry_interval_time_in_seconds = 60
# The maximum number of images to return. The default is 500 images. If your PowerVC
# has more than 500 images, this limit should be increased to include all images.
image_limit = 500
# Volume periodic sync interval specified in seconds
volume_sync_interval = 20
# How many volume sync intervals between full volume syncs.
# Only volumes known to be out of sync are synced on the interval
# except after this many intervals when all volumes are synced.
full_volume_sync_frequency = 30
# Volume type periodic sync interval specified in seconds
volume_type_sync_interval = 20
# How many volume type sync intervals between full volume type syncs.
# Only volumes known to be out of sync are synced on the interval
# except after this many intervals when all volumes are synced.
full_volume_type_sync_frequency = 30
# Ignore delete errors so an exception is not thrown during a
# delete. When set to true, this allows the volume to be deleted
# on the hosting OS even if an exception occurs. When set to false,
# exceptions during delete prevent the volume from being deleted
# on the hosting OS.
volume_driver_ignore_delete_error = False
# The times to check whether attaching/detaching the volume succeed
volume_max_try_times = 12
# Minimum delay interval and initial delay seconds for long run tasks.
longrun_loop_interval = 7
longrun_initial_delay = 10

View File

@ -0,0 +1,62 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import mock
import testtools
import os
from powervc.common import config
class PVCConfigTest(testtools.TestCase):
def setUp(self):
super(PVCConfigTest, self).setUp()
def tearDown(self):
super(PVCConfigTest, self).tearDown()
del config.parse_power_config.power_config_loaded
def test_parse_config_1(self):
p1 = mock.patch('oslo.config.cfg.find_config_files',
new=mock.MagicMock(return_value=["%s%s%s" %
(os.path.dirname(__file__),
os.sep,
"powervc_test_1.conf")]))
try:
p1.start()
config.parse_power_config([], "powervc-baseproject", None)
# default value
self.assertEqual(config.CONF.powervc.auth_url,
"http://localhost:5000/v2.0/")
# value in file
self.assertEqual(config.CONF.powervc.qpid_port, 5679)
finally:
p1.stop()
def test_parse_config_2(self):
p2 = mock.patch('oslo.config.cfg.find_config_files',
new=mock.MagicMock(side_effect=[["%s%s%s" %
(os.path.dirname(__file__),
os.sep,
"powervc_test_1.conf")],
["%s%s%s" %
(os.path.dirname(__file__),
os.sep,
"powervc_test_2.conf")]]))
try:
p2.start()
config.parse_power_config([], "baseproject", None)
# extend value in second file
self.assertEqual(config.CONF.powervc.qpid_username,
"powervc_qpid_2")
finally:
p2.stop()

View File

@ -0,0 +1,64 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import unittest
from powervc.common.messaging import QpidConnection
class QpidTest(unittest.TestCase):
def setUp(self):
super(QpidTest, self).setUp()
self.conn = QpidConnection(url='127.0.0.1:5989',
username='test_username',
password='test_passwd',
transport='tcp',
reconnection_interval=60,
reconnect_handler=None,
context=None,
log=None)
def test_create_listener(self):
self.listener = self.conn.\
create_listener('test_exchange', 'test_topic')
self.assertNotEqual(self.listener, None)
self.assertEqual([self.listener], self.conn._listeners)
def test_register_handler(self):
def _fake_handler():
pass
if not hasattr(self, 'listener'):
self.listener = self.conn.\
create_listener('test_exchange', 'test_topic')
self.listener.register_handler('foo.bar.*', _fake_handler)
self.assertEqual(self.listener._handler_map['foo.bar.*'],
_fake_handler)
def test_unregister_handler(self):
def _fake_handler():
pass
if not hasattr(self, 'listener'):
self.listener = self.conn.\
create_listener('test_exchange', 'test_topic')
self.listener.register_handler('foo.bar.*', _fake_handler)
self.assertEqual(self.listener._handler_map['foo.bar.*'],
_fake_handler)
self.listener.unregister_handler('foo.bar.*')
self.assertEqual(self.listener._handler_map,
{})
def tearDown(self):
unittest.TestCase.tearDown(self)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,45 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import testtools
from powervc.common import netutils
class PVCNetUtilsTest(testtools.TestCase):
def setUp(self):
super(PVCNetUtilsTest, self).setUp()
def tearDown(self):
super(PVCNetUtilsTest, self).tearDown()
def test_is_ipv4_address_1(self):
isipv4_address = netutils.is_ipv4_address("localhost")
self.assertFalse(isipv4_address)
def test_is_ipv4_address_2(self):
isipv4_address = netutils.is_ipv4_address("127.0.0.1")
self.assertTrue(isipv4_address)
def test_hostname_url_1(self):
url = netutils.hostname_url("http://127.0.0.1:5000/v2.0")
self.assertEqual(url, "http://127.0.0.1:5000/v2.0")
def test_hostname_url_2(self):
url = netutils\
.hostname_url("https://9.110.75.155/powervc/openstack/identity/v3")
self.assertEqual(url,
"https://9.110.75.155/powervc/openstack/identity/v3")
def test_hostname_url_3(self):
url = netutils.hostname_url("http://random_host:5000/v2.0")
self.assertEqual(url, "http://random_host:5000/v2.0")

File diff suppressed because it is too large Load Diff

19
glance-powervc/.project Normal file
View File

@ -0,0 +1,19 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>glance-powervc</name>
<comment></comment>
<projects>
<project>common-powervc</project>
<project>glance</project>
</projects>
<buildSpec>
<buildCommand>
<name>org.python.pydev.PyDevBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.python.pydev.pythonNature</nature>
</natures>
</projectDescription>

View File

@ -0,0 +1,11 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?eclipse-pydev version="1.0"?>
<pydev_project>
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
<path>/glance-powervc</path>
<path>/glance-powervc/bin</path>
</pydev_pathproperty>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
</pydev_project>

View File

@ -0,0 +1,62 @@
#!/usr/bin/env python
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
"""Starter script for the PowerVC Driver ImageManager Service."""
import eventlet
import os
import socket
import sys
import traceback
eventlet.patcher.monkey_patch(os=False, socket=True, time=True)
# FIXME: Is there a way to keep multiple instances from running at the same time?
# FIXME: Haven't really looked too close at this yet. It may need more work.
# If ../powervc/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'powervc', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
# TODO RYKAL
# This should go in the base __init__ folder I think
from glance.openstack.common import gettextutils
gettextutils.install('glance')
from powervc.glance.common import config
from glance.openstack.common import log
from glance.openstack.common import service
from glance.openstack.common import importutils
CONF = config.CONF
LOG = log.getLogger(__name__)
if __name__ == '__main__':
try:
# Obtain glance opts from glance-api.conf
config.parse_config(sys.argv, 'glance', 'glance-api')
log.setup('powervc')
LOG.info(_('Launching PowerVC Driver ImageManager service...'))
manager = importutils.import_object(
'powervc.glance.manager.manager.PowerVCImageManager')
launcher = service.ServiceLauncher()
launcher.launch_service(manager)
LOG.info(_('PowerVC Driver ImageManager service started'))
launcher.wait()
LOG.info(_('PowerVC Driver ImageManager service ended'))
except Exception:
traceback.print_exc()
raise

View File

@ -0,0 +1,102 @@
#!/bin/sh
#
# openstack-glance-powervc OpenStack PowerVC Glance Driver
#
# chkconfig: - 98 02
# description: Provides PowerVC manage-to support.
### BEGIN INIT INFO
# Provides:
# Required-Start: $remote_fs $network $syslog
# Required-Stop: $remote_fs $syslog
# Default-Stop: 0 1 6
# Short-Description: OpenStack PowerVC Glance Driver
# Description:
### END INIT INFO
. /etc/rc.d/init.d/functions
suffix=powervc
prog=openstack-glance-powervc
exec="/opt/ibm/openstack/powervc-driver/bin/glance-$suffix"
config="/etc/powervc/powervc.conf"
pidfile="/var/run/$suffix/glance-$suffix.pid"
logfile="/var/log/$suffix/glance-$suffix.log"
[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
lockfile=/var/lock/subsys/$prog
start() {
[ -x $exec ] || exit 5
[ -f $config ] || exit 6
echo -n $"Starting $prog: "
daemon --user powervc --pidfile $pidfile "$exec --config-file $config --logfile $logfile &>/dev/null & echo \$! > $pidfile"
retval=$?
echo
[ $retval -eq 0 ] && touch $lockfile
return $retval
}
stop() {
echo -n $"Stopping $prog: "
killproc -p $pidfile $prog
retval=$?
echo
[ $retval -eq 0 ] && rm -f $lockfile
return $retval
}
restart() {
stop
start
}
reload() {
restart
}
force_reload() {
restart
}
rh_status() {
status -p $pidfile $prog
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
reload)
rh_status_q || exit 7
$1
;;
force-reload)
force_reload
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
exit 2
esac
exit $?

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,45 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
"""
PowerVC Driver ImageManager Configuration
"""
from oslo.config import cfg
import powervc.common.config as common_config
from powervc.glance.common import constants
CONF = common_config.CONF
# PowerVC Driver ImageManager specific configuration
image_opts = [
# The image periodic sync interval in seconds. Default is 300.
cfg.IntOpt('image_periodic_sync_interval_in_seconds',
default=constants.IMAGE_PERIODIC_SYNC_INTERVAL_IN_SECONDS),
# In case of error, the image sync retry interval time in seconds. Default
# is 60.
cfg.IntOpt('image_sync_retry_interval_time_in_seconds',
default=constants.IMAGE_SYNC_RETRY_INTERVAL_TIME_IN_SECONDS),
# The maximum number of images to read for each query request. Default is
# 500.
cfg.IntOpt('image_limit', default=constants.IMAGE_LIMIT)
]
CONF.register_opts(image_opts, group='powervc')
# Import glance opts
CONF.import_opt('owner_is_tenant', 'glance.api.middleware.context')
def parse_config(*args, **kwargs):
common_config.parse_power_config(*args, **kwargs)

View File

@ -0,0 +1,99 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
"""
All PowerVC Driver ImageManager Constants
"""
import powervc.common.constants as consts
# Maximum size of a property that can be handled by the v1 Image APIs
MAX_HEADER_LEN_V1 = 8192
# Interval in seconds between periodic image syncs
IMAGE_PERIODIC_SYNC_INTERVAL_IN_SECONDS = 300
# The maximum number of images to return with the v1 image list API call. The
# default is 500 images. If the PowerVC has more than 500 images, this limit
# should be increased to include all images.
IMAGE_LIMIT = 500
# the V2 URI patch value
V2_URI_PATH = 'v2.0'
# The image client service type
CLIENT_SERVICE_TYPE = 'image'
# The image client endpoint type to use
CLIENT_ENDPOINT_TYPE = 'publicURL'
# Image location path
IMAGE_LOCATION_PATH = 'v2/images/'
# List of image create parameters to filter out
IMAGE_CREATE_PARAMS_FILTER = ['id']
# List of image update parameters to filter out
IMAGE_UPDATE_PARAMS_FILTER = ['owner', 'location']
# List of image properties which should have HTML/XML entities unescaped
IMAGE_UNESCAPE_PROPERTIES = ['configuration_strategy']
# List of v2image update parameters to filter out
v2IMAGE_UPDATE_PARAMS_FILTER = IMAGE_UPDATE_PARAMS_FILTER + ['deleted', 'size', 'checksum']
# List of image properties to filter out during an update
IMAGE_UPDATE_PROPERTIES_FILTER = [consts.POWERVC_UUID_KEY,
consts.LOCAL_UUID_KEY]
# Timestamp format of image updated_at field
IMAGE_TIMESTAMP_FORMAT = '%Y-%m-%dT%H:%M:%S'
# The expiration period for image events in hours
EVENT_TUPLE_EXPIRATION_PERIOD_IN_HOURS = 1
# The number of seconds in an hour
SECONDS_IN_HOUR = 3600
# PowerVC identifier
POWER_VC = 'pvc'
# Local hosting OS identifier
LOCAL = 'local'
# Event queue event constants
EVENT_TYPE = 'type'
EVENT_CONTEXT = 'context'
EVENT_MESSAGE = 'message'
# Event queue event types
LOCAL_IMAGE_EVENT = LOCAL
PVC_IMAGE_EVENT = POWER_VC
PERIODIC_SCAN_EVENT = 'periodic'
STARTUP_SCAN_EVENT = 'startup'
# Image notification event exchange
IMAGE_EVENT_EXCHANGE = 'glance'
# Image notification event topic
IMAGE_EVENT_TOPIC = 'notifications.info'
# Image notification event types
IMAGE_EVENT_TYPE_ALL = 'image.*'
IMAGE_EVENT_TYPE_ACTIVATE = 'image.activate'
IMAGE_EVENT_TYPE_CREATE = 'image.create'
IMAGE_EVENT_TYPE_UPDATE = 'image.update'
IMAGE_EVENT_TYPE_DELETE = 'image.delete'
# Constants used by the ImageSyncController
SYNC_PASSED = 1
SYNC_FAILED = -1
IMAGE_SYNC_RETRY_INTERVAL_TIME_IN_SECONDS = 60
IMAGE_SYNC_CHECK_INTERVAL_TIME_IN_SECONDS = 1

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

File diff suppressed because it is too large Load Diff

192
glance-powervc/run_tests.sh Executable file
View File

@ -0,0 +1,192 @@
#!/bin/bash
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -eu
function usage {
echo "Usage: $0 [OPTION]..."
echo "Run PowerVC Glance test suite(s)"
echo ""
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
echo " -r, --recreate-db Recreate the test database (deprecated, as this is now the default)."
echo " -n, --no-recreate-db Don't recreate the test database."
echo " -x, --stop Stop running tests after the first error or failure."
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
echo " -u, --update Update the virtual environment with any newer package versions"
echo " -p, --pep8 Just run flake8"
echo " -8, --8 Just run flake8, don't show PEP8 text for each error"
echo " -P, --no-pep8 Don't run flake8"
echo " -c, --coverage Generate coverage report"
echo " -h, --help Print this usage message"
echo " --hide-elapsed Don't print the elapsed time for each test along with slow test list"
echo " --standard-threads Don't do the eventlet threading monkeypatch."
echo ""
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
echo " If no virtualenv is found, the script will ask if you would like to create one. If you "
echo " prefer to run tests NOT in a virtual environment, simply pass the -N option."
exit
}
function process_option {
case "$1" in
-h|--help) usage;;
-V|--virtual-env) always_venv=1; never_venv=0;;
-N|--no-virtual-env) always_venv=0; never_venv=1;;
-r|--recreate-db) recreate_db=1;;
-n|--no-recreate-db) recreate_db=0;;
-f|--force) force=1;;
-u|--update) update=1;;
-p|--pep8) just_flake8=1;;
-8|--8) short_flake8=1;;
-P|--no-pep8) no_flake8=1;;
-c|--coverage) coverage=1;;
--standard-threads)
export STANDARD_THREADS=1
;;
-*) noseopts="$noseopts $1";;
*) noseargs="$noseargs $1"
esac
}
venv=.venv
with_venv=tools/with_venv.sh
always_venv=0
never_venv=0
force=0
noseargs=
noseopts=
wrapper=""
just_flake8=0
short_flake8=0
no_flake8=0
coverage=0
recreate_db=1
update=0
for arg in "$@"; do
process_option $arg
done
# If enabled, tell nose to collect coverage data
if [ $coverage -eq 1 ]; then
noseopts="$noseopts --with-coverage --cover-package=glance-powervc"
fi
function run_tests {
# Just run the test suites in current environment
${wrapper} $NOSETESTS
# If we get some short import error right away, print the error log directly
RESULT=$?
if [ "$RESULT" -ne "0" ];
then
ERRSIZE=`wc -l run_tests.log | awk '{print \$1}'`
if [ "$ERRSIZE" -lt "40" ];
then
cat run_tests.log
fi
fi
return $RESULT
}
function run_flake8 {
FLAGS=--show-pep8
if [ $# -gt 0 ] && [ 'short' == ''$1 ]
then
FLAGS=''
fi
echo "Running flake8 ..."
# Just run flake8 in current environment
#echo ${wrapper} flake8 $FLAGS powervc | tee pep8.txt
${wrapper} flake8 $FLAGS powervc | tee pep8.txt
RESULT=${PIPESTATUS[0]}
return $RESULT
}
NOSETESTS="nosetests $noseopts $noseargs"
if [ $never_venv -eq 0 ]
then
# Remove the virtual environment if --force used
if [ $force -eq 1 ]; then
echo "Cleaning virtualenv..."
rm -rf ${venv}
fi
if [ $update -eq 1 ]; then
echo "Updating virtualenv..."
python tools/install_venv.py
fi
if [ -e ${venv} ]; then
wrapper="${with_venv}"
else
if [ $always_venv -eq 1 ]; then
# Automatically install the virtualenv
python tools/install_venv.py
wrapper="${with_venv}"
else
echo -e "No virtual environment found...create one? (Y/n) \c"
read use_ve
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
# Install the virtualenv and run the test suite in it
python tools/install_venv.py
wrapper=${with_venv}
fi
fi
fi
fi
# Delete old coverage data from previous runs
if [ $coverage -eq 1 ]; then
${wrapper} coverage erase
fi
if [ $just_flake8 -eq 1 ]; then
run_flake8
RESULT=$?
echo "RESULT $RESULT"
exit $RESULT
fi
if [ $short_flake8 -eq 1 ]; then
run_flake8 short
RESULT=$?
exit $RESULT
fi
run_tests
RESULT=$?
# NOTE(sirp): we only want to run flake8 when we're running the full-test
# suite, not when we're running tests individually. To handle this, we need to
# distinguish between options (noseopts), which begin with a '-', and arguments
# (noseargs).
if [ -z "$noseargs" ]; then
if [ $no_flake8 -eq 0 ]; then
run_flake8
TMP_RESULT=$?
RESULT=$(($TMP_RESULT + $RESULT))
fi
fi
if [ $coverage -eq 1 ]; then
echo "Generating coverage report in covhtml/"
${wrapper} coverage html -d covhtml -i
fi
exit $RESULT

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,55 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
"""
Simple Glance client test. This should be more like junits someday. For
now its just some random tests
"""
from keystoneclient.v3 import client as v3client
from keystoneclient.v2_0 import client
from glanceclient.v2 import client as gc
keystone_client = client.Client(username='root', password='passw0rd',
tenant_name='ibm-default', insecure=True,
auth_url='https://9.5.125.55/powervc/open'
'stack/identity/v2.0')
keystonev3_client = v3client.Client(username='root', password='passw0rd',
project_name='ibm-default', insecure=True,
auth_url='https://9.5.125.55/powervc/'
'openstack/identity/v3')
token = keystone_client.auth_token
tokenv3 = keystonev3_client.auth_token # Keystone v3 test, but not used
print token
glanceUrl = None
if keystone_client.auth_ref:
if keystone_client.auth_ref.service_catalog:
glanceUrl_ret = keystone_client.\
auth_ref.service_catalog.\
get_urls(service_type='image', endpoint_type='publicURL')
if glanceUrl_ret:
glanceUrl = glanceUrl_ret[0]
print glanceUrl
glance_client = gc.Client(endpoint=glanceUrl, token=token, insecure=True)
###print glance_client.images.list()
print '=' * 8, 'PowerVC Images using the v2 Glance API', '=' * 8
v2pvc_images = glance_client.images
for v2image in v2pvc_images.list():
print '-' * 40
print 'Image ', v2image.get('name')
for v2imagekey in v2image.keys():
print v2imagekey, '=', v2image.get(v2imagekey)
print '-' * 40

View File

@ -0,0 +1,254 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import sys
import itertools
import time
import traceback
from oslo.config import cfg
from glance.openstack.common import gettextutils
gettextutils.install('glance')
import glance.openstack.common.log as logging
from glance.common import config as logging_config
from glanceclient.v1 import images as v1images
from powervc.common import config
from powervc.glance.common import constants
# PowerVC Driver ImageManager specific configuration
image_opts = [
# The image period sync interval in seconds
cfg.IntOpt('image_periodic_sync_interval_in_seconds',
default=constants.IMAGE_PERIODIC_SYNC_INTERVAL_IN_SECONDS)
]
CONF = config.CONF
CONF.register_opts(image_opts, 'powervc')
LOG = logging.getLogger(__name__)
config.parse_power_config(sys.argv, 'glance')
from powervc.common import messaging
from powervc.common import constants as consts
import powervc.common.client.factory as clients
def test_image_events(wait_forever=True):
def local_reconnect():
LOG.debug(_('Re-established connection to local hosting OS '
'Qpid broker'))
local_conn = messaging.LocalConnection(log=logging,
reconnect_handler=local_reconnect)
# local_conn = messaging.QpidConnection('localhost:5672', \
# 'admin', 'ICA1NTQxNzI5ODgK')
# conn = messaging.QpidConnection('localhost:5672', 'admin', 'openstack1')
local_listener = local_conn.create_listener('glance', 'notifications.info')
local_listener.register_handler('image.*',
handle_local_image_notifications)
local_conn.start()
# pvc_conn = messaging.QpidConnection('9.5.125.55:5672', \
# 'anonymous', '')
def pvc_reconnect():
LOG.debug(_('Re-established connection to PowerVC Qpid broker'))
pvc_conn = messaging.PowerVCConnection(log=logging,
reconnect_handler=pvc_reconnect)
# pvc_conn = messaging.QpidConnection('9.5.125.55:5672', \
# 'root', 'passw0rd')
pvc_listener = pvc_conn.create_listener('glance', 'notifications.info')
pvc_listener.register_handler('image.*',
handle_pvc_image_notifications)
pvc_conn.start()
print 'Monitoring hosting OS and PowerVC for Image notifications...'
while wait_forever:
time.sleep(5)
def test_pvc_image_events(wait_forever=True):
# pvc_conn = messaging.QpidConnection('9.5.125.55:5672', \
# 'anonymous', '')
def pvc_reconnect():
LOG.debug(_('Re-established connection to PowerVC Qpid broker'))
pvc_conn = messaging.PowerVCConnection(log=logging,
reconnect_handler=pvc_reconnect)
# pvc_conn = messaging.QpidConnection('9.5.125.55:5672', \
# 'root', 'passw0rd')
pvc_listener = pvc_conn.create_listener('glance', 'notifications.info')
pvc_listener.register_handler('image.*',
handle_pvc_image_notifications)
pvc_conn.start()
print 'Monitoring PowerVC for Image notifications...'
while wait_forever:
time.sleep(5)
def handle_local_image_notifications(context, message):
print '=' * 80
print 'LOCAL:', str(context)
print 'LOCAL:', str(message)
image = message.get('payload') # should be the v1 image as a dict
dump_image(image)
print '=' * 80
def handle_pvc_image_notifications(context, message):
print '=' * 80
print 'PVC:', str(context)
print 'PVC:', str(message)
image = message.get('payload') # should be the v1 image as a dict
dump_image(image)
print '=' * 80
def dump_image(image_dict):
for v1imagekey in image_dict.keys():
print v1imagekey, '=', image_dict.get(v1imagekey)
props = image_dict.get('properties')
if props:
for v1imageprop in props.keys():
print 'property: ', v1imageprop, '=',\
props.get(v1imageprop)
def test_update_local_image(image_id):
params = {}
filters = {}
filters['is_public'] = False
params['filters'] = filters
local_v1client = \
clients.LOCAL.get_client(str(consts.SERVICE_TYPES.image), 'v1')
v1local_images = local_v1client.images
image = \
get_v1image_from_id(image_id, itertools.chain(
v1local_images.list(), v1local_images.list(**params)))
if image:
field_dict, patch_dict = get_v1image_update_fields(image)
if 'is_public' in field_dict.keys():
public = field_dict['is_public']
field_dict['is_public'] = not public
v1local_images.update(image, **field_dict)
if len(patch_dict) > 0:
local_v2client = \
clients.LOCAL.get_client(str(consts.SERVICE_TYPES.image), 'v2')
v2local_images = local_v2client.images
v2local_images.update(image.id, **patch_dict)
print 'Image', image.name, 'updated.'
else:
print 'Image', image_id, 'not found!'
def get_v1image_update_fields(image):
"""
Get the properties for an image update
:param: image The image to pull properties from to be used
for an image update operation.
:returns: A tuple containing with the dict containing the
properties to use for an image update operation,
and the dict of the properties that are too
large to be processed by v1 Image APIs. Those
properties should be updated using the
v2 Image PATCH API.
"""
field_dict = {}
patch_dict = {}
props = image.properties
if props and props is not None:
patch_dict = remove_large_properties(props)
image.properties = props
image_dict = image.to_dict()
for imagekey in image_dict.keys():
if imagekey in v1images.UPDATE_PARAMS and \
imagekey not in constants.IMAGE_UPDATE_PARAMS_FILTER:
field_value = image_dict.get(imagekey)
if field_value is not None:
if len(str(field_value)) < constants.MAX_HEADER_LEN_V1:
field_dict[imagekey] = field_value
else:
patch_dict[imagekey] = field_value
return field_dict, patch_dict
def remove_large_properties(properties):
"""
Remove any properties that are too large to be processed by
the v1 APIs and return them in a dict to the caller. The properties
passed in are also modified.
:param: properties. The properties dict to remove large properties
from. Large properties are removed from the original
properties dict
:returns: A dict containing properties that are too large to
be processed by v1 Image APIs
"""
too_large_properties = {}
if properties and properties is not None:
for propkey in properties.keys():
propvalue = properties.get(propkey)
if propvalue and propvalue is not None:
if properties.get(propkey) and (len(str(propvalue)) >=
constants.MAX_HEADER_LEN_V1):
too_large_properties[propkey] = properties.pop(propkey)
return too_large_properties
def test_delete_local_image(image_id):
pass
def get_v1image_from_id(image_id, v1images):
"""
Get a v1 Image from an image id.
:param: image_id The image id
:param: v1images The image manager used to obtain images from the
v1 glance client
:returns: The image for the specified id or None if not found.
"""
for image in v1images:
if image and image.id == image_id:
return image
return None
"""
Main test entry point
"""
if __name__ == '__main__':
try:
# turn off debug logging
# CONF.debug = False
logging_config.setup_logging()
logging.setup('powervc')
# test getting the staging project id
# test_image_events(wait_forever=True)
test_pvc_image_events(wait_forever=True)
# image_id = '3060d198-c951-4693-9b1d-6314ac0539bf'
# test_update_local_image(image_id)
# test_delete_local_image(image_id)
print 'Tests done!'
except Exception:
traceback.print_exc()
raise

21
neutron-powervc/.project Normal file
View File

@ -0,0 +1,21 @@
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>neutron-powervc</name>
<comment></comment>
<projects>
<project>common-powervc</project>
<project>neutron</project>
<project>neutron-client</project>
<project>oslo</project>
</projects>
<buildSpec>
<buildCommand>
<name>org.python.pydev.PyDevBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.python.pydev.pythonNature</nature>
</natures>
</projectDescription>

View File

@ -0,0 +1,10 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?eclipse-pydev version="1.0"?>
<pydev_project>
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
<path>/neutron-powervc</path>
</pydev_pathproperty>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
</pydev_project>

View File

@ -0,0 +1,120 @@
#!/usr/bin/env python
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import sys
import traceback
import os
if ('eventlet' in sys.modules and
os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'):
raise ImportError('eventlet imported before neutron agent '
'(env var set to %s)'
% os.environ.get('EVENTLET_NO_GREENDNS'))
os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
import eventlet
eventlet.patcher.monkey_patch(os=False, thread=False)
from oslo.config import cfg
from neutron import context
from neutron.common import config as logging_config
from neutron.openstack.common import log as logging
from powervc.common import config
from powervc.neutron.api.client_rpc import RpcClient
LOG = logging.getLogger(__name__)
VALID_COMMANDS = ['net-list', 'subnet-list', 'port-list',
'net-show', 'subnet-show', 'port-show',
'get-local-net', 'get-pvc-net']
def usage():
print 'Usage neutron-powervc'
print ''
print 'Commands:'
print ' net-list List networks mapped in the DB'
print ' net-show <sync_key> Retrieve a specific network mapping'
print ''
print ' subnet-list List subnets mapped in the DB'
print ' subnet-show <sync_key> Retrieve a specific subnet mapping'
print ''
print ' port-list List ports mapped in the DB'
print ' port-show <sync_key> Retrieve a specific port mapping'
print ''
print ' get-local-net <pvc id> Retrieve local id for PVC network id'
print ' get-pvc-net <local id> Retrieve PVC id for local network id'
print ''
sys.exit(0)
def main():
try:
if len(sys.argv) == 1:
usage()
cmd = sys.argv[1]
opt = None
if len(sys.argv) == 3:
opt = sys.argv[2]
if cmd not in VALID_COMMANDS:
print 'Invlaid request:', cmd
usage()
argv = [sys.argv[0]]
config.parse_power_config(argv, 'powervc-neutron')
# logging_config.setup_logging(cfg.CONF)
LOG.debug(_('Create RPC interface'))
ctx = context.get_admin_context_without_session()
rpc = RpcClient(ctx)
if cmd == 'net-list':
LOG.debug(_('Calling RPC method'))
rpc.get_networks()
elif cmd == 'subnet-list':
LOG.debug(_('Calling RPC method'))
rpc.get_subnets()
elif cmd == 'port-list':
LOG.debug(_('Calling RPC method'))
rpc.get_ports()
elif cmd == 'net-show':
if not opt:
usage()
LOG.debug(_('Calling RPC method'))
rpc.get_network(opt)
elif cmd == 'subnet-show':
if not opt:
usage()
LOG.debug(_('Calling RPC method'))
rpc.get_subnet(opt)
elif cmd == 'port-show':
if not opt:
usage()
LOG.debug(_('Calling RPC method'))
rpc.get_port(opt)
elif cmd == 'get-local-net':
if not opt:
usage()
LOG.debug(_('Calling RPC method'))
rpc.get_local_network_uuid(opt)
elif cmd == 'get-pvc-net':
if not opt:
usage()
LOG.debug(_('Calling RPC method'))
rpc.get_pvc_network_uuid(opt)
else:
usage()
except Exception:
traceback.print_exc()
raise
if __name__ == "__main__":
main()

View File

@ -0,0 +1,53 @@
#!/usr/bin/env python
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
import os
import sys
# NOTE(mikal): All of this is because if dnspython is present in your
# environment then eventlet monkeypatches socket.getaddrinfo() with an
# implementation which doesn't work for IPv6. What we're checking here is
# that the magic environment variable was set when the import happened.
if ('eventlet' in sys.modules and
os.environ.get('EVENTLET_NO_GREENDNS', '').lower() != 'yes'):
raise ImportError('eventlet imported before neutron agent '
'(env var set to %s)'
% os.environ.get('EVENTLET_NO_GREENDNS'))
os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
import eventlet
eventlet.patcher.monkey_patch(os=False, thread=False)
# If ../powervc/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python.
POSSIBLE_TOPDIR = os.path.normpath(os.path.join(
os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(POSSIBLE_TOPDIR, 'powervc', '__init__.py')):
sys.path.insert(0, POSSIBLE_TOPDIR)
# TODO RYKAL
# This should go in the base __init__ folder I think
from neutron.openstack.common import gettextutils
gettextutils.install('neutron')
from neutron.common import config as logging_config
from powervc.common import config
config.parse_power_config(sys.argv, 'powervc-neutron')
logging_config.setup_logging(config.CONF)
from powervc.neutron.agent.neutron_powervc_agent import main
main()

View File

@ -0,0 +1,58 @@
[DEFAULT]
debug = False
verbose = True
# The messaging module to use, defaults to kombu.
# rpc_backend = neutron.openstack.common.rpc.impl_kombu
# AMQP password
# rabbit_password = openstack1
# AMQP host
# rabbit_host = localhost
# Size of RPC thread pool
# rpc_thread_pool_size = 64
# Size of RPC connection pool
# rpc_conn_pool_size = 30
# Seconds to wait for a response from call or multicall
# rpc_response_timeout = 60
# Seconds to wait before a cast expires (TTL). Only supported by impl_zmq.
# rpc_cast_timeout = 30
# Modules of exceptions that are permitted to be recreated
# upon receiving exception data from an rpc call.
# allowed_rpc_exception_modules = neutron.openstack.common.exception, nova.exception
# AMQP exchange to connect to if using RabbitMQ or QPID
control_exchange = nova
# QPID
# rpc_backend=neutron.openstack.common.rpc.impl_qpid
# Qpid broker hostname
# qpid_hostname = localhost
# Qpid broker port
# qpid_port = 5672
# Username for qpid connection
# qpid_username = qpidclient
# Password for qpid connection
# qpid_password = openstack1
# Space separated list of SASL mechanisms to use for auth
# qpid_sasl_mechanisms = ''
# Seconds between connection keepalive heartbeats
# qpid_heartbeat = 60
# Transport to use, either 'tcp' or 'ssl'
# qpid_protocol = tcp
# Disable Nagle algorithm
# qpid_tcp_nodelay = True
[AGENT]
# Agent's polling interval in seconds
polling_interval = 60
# (ListOpt) Comma-separated list of PowerVC network names to be mapped to
# local OS as networks. If the network does not exist in the local OS, it
# will be automatically created. PowerVC networks listed here do not have
# to exist prior to starting up the powervc_neutron_agent. Wildcard
# characters ('*') can be specified. By default, all PowerVC networks will
# be available in the local OS.
# map_powervc_networks = *
[DATABASE]
# Database where agent will store mapping data
connection = mysql://root:openstack1@localhost/powervc?charset=utf8

View File

@ -0,0 +1,104 @@
#!/bin/sh
#
# openstack-neutron-powervc OpenStack PowerVC Neutron Driver
#
# chkconfig: - 98 02
# description: Provides PowerVC manage-to support.
### BEGIN INIT INFO
# Provides:
# Required-Start: $remote_fs $network $syslog
# Required-Stop: $remote_fs $syslog
# Default-Stop: 0 1 6
# Short-Description: OpenStack PowerVC Neutron Driver
# Description:
### END INIT INFO
. /etc/rc.d/init.d/functions
suffix=powervc
prog=openstack-neutron-powervc
exec="/opt/ibm/openstack/powervc-driver/bin/neutron-$suffix-agent"
config="/etc/$suffix/$suffix-neutron.conf"
powervcconf="/etc/$suffix/$suffix.conf"
pidfile="/var/run/$suffix/neutron-$suffix.pid"
logfile="/var/log/$suffix/neutron-$suffix.log"
[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
lockfile=/var/lock/subsys/$prog
start() {
[ -x $exec ] || exit 5
[ -f $config ] || exit 6
[ -f $powervcconf ] || exit 6
echo -n $"Starting $prog: "
daemon --user powervc --pidfile $pidfile "$exec --config-file $config --config-file $powervcconf --logfile $logfile &>/dev/null & echo \$! > $pidfile"
retval=$?
echo
[ $retval -eq 0 ] && touch $lockfile
return $retval
}
stop() {
echo -n $"Stopping $prog: "
killproc -p $pidfile $prog
retval=$?
echo
[ $retval -eq 0 ] && rm -f $lockfile
return $retval
}
restart() {
stop
start
}
reload() {
restart
}
force_reload() {
restart
}
rh_status() {
status -p $pidfile $prog
}
rh_status_q() {
rh_status >/dev/null 2>&1
}
case "$1" in
start)
rh_status_q && exit 0
$1
;;
stop)
rh_status_q || exit 0
$1
;;
restart)
$1
;;
reload)
rh_status_q || exit 7
$1
;;
force-reload)
force_reload
;;
status)
rh_status
;;
condrestart|try-restart)
rh_status_q || exit 0
restart
;;
*)
echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload}"
exit 2
esac
exit $?

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,117 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
from oslo.config import cfg
from prettytable import PrettyTable
from neutron.openstack.common.rpc import proxy
from neutron.openstack.common import log as logging
from powervc.common.gettextutils import _
LOG = logging.getLogger(__name__)
LIST_COLUMNS = ['status', 'local_id', 'pvc_id', 'sync_key']
#==============================================================================
# RPC client
#==============================================================================
class RpcClient(proxy.RpcProxy):
BASE_RPC_API_VERSION = '1.0'
def __init__(self, context):
LOG.debug(_('__init__'))
self.topic = 'powervcrpc'
self.context = context
self.host = cfg.CONF.host
super(RpcClient, self).__init__(
topic=self.topic, default_version=self.BASE_RPC_API_VERSION)
def _print_table(self, result):
if result and len(result) > 0:
pt = PrettyTable(LIST_COLUMNS)
for obj in result:
row = []
for col in LIST_COLUMNS:
row.append(obj.get(col))
pt.add_row(row)
print pt
def _print_object(self, result):
if result:
pt = PrettyTable(['Field', 'Value'])
pt.align['Field'] = 'l'
pt.align['Value'] = 'l'
for field in result.keys():
row = [field, result.get(field)]
pt.add_row(row)
print pt
else:
print 'Result from RPC call: ', result
def get_local_network_uuid(self, network_id):
LOG.debug(_('get_local_network_uuid'))
result = self.call(self.context,
self.make_msg('get_local_network_uuid',
network_id=network_id),
topic=self.topic)
print 'Result from RPC call:', result
def get_pvc_network_uuid(self, network_id):
LOG.debug(_('get_pvc_network_uuid'))
result = self.call(self.context,
self.make_msg('get_pvc_network_uuid',
network_id=network_id),
topic=self.topic)
print 'Result from RPC call:', result
def get_network(self, opt):
LOG.debug(_('get_network: %s'), opt)
result = self.call(self.context,
self.make_msg('get_network', sync_key=opt),
topic=self.topic)
self._print_object(result)
def get_networks(self):
LOG.debug(_('get_networks'))
result = self.call(self.context,
self.make_msg('get_networks'),
topic=self.topic)
self._print_table(result)
def get_subnet(self, opt):
LOG.debug(_('get_subnet: %s'), opt)
result = self.call(self.context,
self.make_msg('get_subnet', sync_key=opt),
topic=self.topic)
self._print_object(result)
def get_subnets(self):
LOG.debug(_('get_subnets'))
result = self.call(self.context,
self.make_msg('get_subnets'),
topic=self.topic)
self._print_table(result)
def get_port(self, opt):
LOG.debug(_('get_port: %s'), opt)
result = self.call(self.context,
self.make_msg('get_port', sync_key=opt),
topic=self.topic)
self._print_object(result)
def get_ports(self):
LOG.debug(_('get_ports'))
result = self.call(self.context,
self.make_msg('get_ports'),
topic=self.topic)
self._print_table(result)

View File

@ -0,0 +1,115 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
from neutron.openstack.common import log as logging
from neutron.openstack.common.rpc import dispatcher
from powervc.common.constants import LOCAL_OS
from powervc.common.constants import POWERVC_OS
from powervc.common.gettextutils import _
from powervc.neutron.common import utils
from powervc.neutron.db import powervc_db_v2
LOG = logging.getLogger(__name__)
#==============================================================================
# RPC callback
#==============================================================================
class PVCRpcCallbacks(object):
"""
RPC callbacks for nova driver calling this agent.
MUST set topic=powervc at both sides.
"""
# Set RPC API version to 1.0 by default.
RPC_API_VERSION = '1.0'
def __init__(self, neutron_agent):
super(PVCRpcCallbacks, self).__init__()
self.agent = neutron_agent
self.db = powervc_db_v2.PowerVCAgentDB()
def create_rpc_dispatcher(self):
return dispatcher.RpcDispatcher([self])
def get_local_network_uuid(self, context, network_id):
LOG.info(_("Neutron Agent RPC: get_local_network_uuid:"))
LOG.info(_("- pvc_net_id: %s"), network_id)
local_net_id = utils.translate_net_id(self.db, network_id, LOCAL_OS)
LOG.info(_("- local_net_id: %s"), local_net_id)
return local_net_id
def get_pvc_network_uuid(self, context, network_id):
LOG.info(_("Neutron Agent RPC: get_pvc_network_uuid:"))
LOG.info(_("- local_net_id: %s"), network_id)
pvc_net_id = utils.translate_net_id(self.db, network_id, POWERVC_OS)
LOG.info(_("- pvc_net_id: %s"), pvc_net_id)
return pvc_net_id
def get_network(self, context, sync_key):
LOG.info(_("Neutron Agent RPC: get_network:"))
LOG.info(_("- sync_key: %s"), sync_key)
net = self.db.get_network(sync_key=sync_key)
LOG.info(_("- net: %s"), net)
return net
def get_networks(self, context):
LOG.info(_("Neutron Agent RPC: get_networks:"))
nets = self.db.get_networks()
LOG.info(_("- nets: %s"), nets)
return nets
def get_subnet(self, context, sync_key):
LOG.info(_("Neutron Agent RPC: get_subnet:"))
LOG.info(_("- sync_key: %s"), sync_key)
subnet = self.db.get_subnet(sync_key=sync_key)
LOG.info(_("- subnet: %s"), subnet)
return subnet
def get_subnets(self, context):
LOG.info(_("Neutron Agent RPC: get_subnets:"))
subnets = self.db.get_subnets()
LOG.info(_("- subnets: %s"), subnets)
return subnets
def get_port(self, context, sync_key):
LOG.info(_("Neutron Agent RPC: get_port:"))
LOG.info(_("- sync_key: %s"), sync_key)
port = self.db.get_port(sync_key=sync_key)
LOG.info(_("- port: %s"), port)
return port
def get_ports(self, context):
LOG.info(_("Neutron Agent RPC: get_ports:"))
ports = self.db.get_ports()
LOG.info(_("- ports: %s"), ports)
return ports
def set_device_id_on_port_by_pvc_instance_uuid(self,
context,
device_id,
pvc_ins_uuid):
"""
Query the ports by pvc instance uuid, and set its
local instance id(device_id).
"""
LOG.info(_("Neutron Agent RPC: "
"set_device_id_on_port_by_pvc_instance_uuid:"))
LOG.info(_("- device_id: %s"), device_id)
LOG.info(_("- pvc_ins_uuid: %s"), pvc_ins_uuid)
local_ids = self.agent.\
set_device_id_on_port_by_pvc_instance_uuid(self.db,
device_id,
pvc_ins_uuid)
LOG.info(_("- local_ids: %s"), local_ids)
return local_ids

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,253 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
"""
Module to contain all of the local OS routines
"""
'''
Created on Aug 1, 2013
@author: John Kasperski
'''
from neutron.openstack.common import log as logging
from powervc.common import messaging
from powervc.common.client import factory
from powervc.common.constants import SERVICE_TYPES
from powervc.common.constants import LOCAL_OS
from powervc.common.gettextutils import _
from powervc.neutron.client import neutron_client_bindings
from powervc.neutron.common import constants
from powervc.neutron.common import utils
from powervc.neutron.db import powervc_db_v2
LOG = logging.getLogger(__name__)
class Client(neutron_client_bindings.Client):
"""Local OS access methods"""
def __init__(self, client, agent):
if not client:
return
self.os = LOCAL_OS
self.db = powervc_db_v2.PowerVCAgentDB()
self.agent = agent
super(Client, self).__init__(client, self.os)
self._create_amqp_listeners()
# A cache to save image uuids on power.
self.power_image_cache = []
# Save nova/glance client
self.nova = None
self.glance = None
def _create_amqp_listeners(self):
"""Listen for AMQP messages from the local OS"""
LOG.debug(_('Creating AMQP listeners'))
def reconnect():
LOG.info(_('Re-established connection to local OS Qpid broker'))
self.agent.queue_event(self.os, constants.EVENT_FULL_SYNC, None)
connection = messaging.LocalConnection(log=logging,
reconnect_handler=reconnect)
listener = connection.create_listener(constants.QPID_EXCHANGE,
constants.QPID_TOPIC)
listener.register_handler(constants.EVENT_NETWORK_CREATE,
self._handle_network_create)
listener.register_handler(constants.EVENT_NETWORK_UPDATE,
self._handle_network_update)
listener.register_handler(constants.EVENT_NETWORK_DELETE,
self._handle_network_delete)
listener.register_handler(constants.EVENT_SUBNET_CREATE,
self._handle_subnet_create)
listener.register_handler(constants.EVENT_SUBNET_UPDATE,
self._handle_subnet_update)
listener.register_handler(constants.EVENT_SUBNET_DELETE,
self._handle_subnet_delete)
listener.register_handler(constants.EVENT_PORT_CREATE,
self._handle_port_create)
listener.register_handler(constants.EVENT_PORT_UPDATE,
self._handle_port_update)
listener.register_handler(constants.EVENT_PORT_DELETE,
self._handle_port_delete)
connection.start()
def _handle_network_create(self, context, message):
event, payload = self._extact_event_payload(message)
network = payload.get('network')
network_id = network.get('id')
if not utils.is_network_mappable(network):
LOG.info(_("Network %s is not mappable"), network_id)
return
db_net = self.db.get_network(local_id=network_id)
if db_net:
LOG.info(_("DB entry for network %s already exists"), network_id)
return
self.agent.queue_event(self.os, event, network)
def _handle_network_update(self, context, message):
event, payload = self._extact_event_payload(message)
network = payload.get('network')
self.agent.queue_event(self.os, event, network)
def _handle_network_delete(self, context, message):
event, payload = self._extact_event_payload(message)
network_id = payload.get('network_id')
self.agent.queue_event(self.os, event, network_id)
def _handle_subnet_create(self, context, message):
event, payload = self._extact_event_payload(message)
subnet = payload.get('subnet')
subnet_id = subnet.get('id')
if not utils.is_subnet_mappable(subnet):
LOG.info(_("Subnet %s is not mappable"), subnet_id)
return
db_sub = self.db.get_subnet(local_id=subnet_id)
if db_sub:
LOG.info(_("DB entry for subnet %s already exists"), subnet_id)
return
self.agent.queue_event(self.os, event, subnet)
def _handle_subnet_update(self, context, message):
event, payload = self._extact_event_payload(message)
subnet = payload.get('subnet')
self.agent.queue_event(self.os, event, subnet)
def _handle_subnet_delete(self, context, message):
event, payload = self._extact_event_payload(message)
subnet_id = payload.get('subnet_id')
self.agent.queue_event(self.os, event, subnet_id)
def _handle_port_create(self, context, message):
event, payload = self._extact_event_payload(message)
port = payload.get('port')
port_id = port.get('id')
if not utils.is_port_mappable(port):
LOG.info(_("Port %s is not mappable"), port_id)
return
db_port = self.db.get_port(local_id=port_id)
if db_port:
LOG.info(_("DB entry for port %s already exists"), port_id)
return
self.agent.queue_event(self.os, event, port)
def _handle_port_update(self, context, message):
event, payload = self._extact_event_payload(message)
port = payload.get('port')
self.agent.queue_event(self.os, event, port)
def _handle_port_delete(self, context, message):
event, payload = self._extact_event_payload(message)
port_id = payload.get('port_id')
self.agent.queue_event(self.os, event, port_id)
#==============================================================================
# Local OS - Utility routines using other clients (Nova, Glance)
#==============================================================================
def get_power_vm_mapping(self):
"""
Return dict with PowerVC to local instance uuid mappings
"""
vm_map = {}
if not self.nova:
self.nova = factory.LOCAL.get_client(str(SERVICE_TYPES.compute))
try:
local_instances = self.nova.manager.list_all_servers()
except Exception as e:
LOG.exception(_("Exception occurred getting servers: %s"), e)
return vm_map
for inst in local_instances:
metadata = inst._info.get(constants.METADATA)
if metadata:
pvc_id = metadata.get(constants.PVC_ID)
if pvc_id:
vm_map[pvc_id] = inst._info.get('id')
return vm_map
def is_instance_valid(self, uuid):
"""
Check if this VM instance is still valid. Call nova client
to retrieve the VM information.
"""
# Verify uuid is valid
if not uuid or len(uuid) == 0:
return False
# Check to see if this is a reserved port that we created while we
# are waiting for the PowerVC side to go away
if uuid.startswith(constants.RSVD_PORT_PREFIX):
return False
if not self.nova:
self.nova = factory.LOCAL.get_client(str(SERVICE_TYPES.compute))
try:
inst = self.nova.manager.get(uuid)
except Exception as e:
"""
If the instance can not be found, exception will be thrown. These
exceptions should be caught and not break the agent.
"""
LOG.exception(_("Exception occurred getting server %s: %s"),
uuid, e)
return False
if inst:
return True
return False
def is_instance_on_power(self, uuid):
"""
Return True if an instance is hosted on power.
"""
# Verify uuid is valid
if not uuid or len(uuid) == 0:
return False
if not self.nova:
self.nova = factory.LOCAL.get_client(str(SERVICE_TYPES.compute))
try:
inst = self.nova.manager.get(uuid)
except Exception as e:
"""
If the instance can not be found, exception will be thrown. These
exceptions should be caught and not break the agent.
"""
LOG.exception(_("Exception occurred getting server %s: %s"),
uuid, e)
return False
if inst:
metadata = inst._info[constants.METADATA]
if constants.PVC_ID in metadata:
# Return true if we have pvc_id for this instance.
return True
else:
img_uuid = inst.image.get('id', '')
if img_uuid in self.power_image_cache:
return True
else:
# Check if the image is hosted on power.
if not self.glance:
self.glance = factory.LOCAL.\
get_client(str(SERVICE_TYPES.image))
try:
img = self.glance.getImage(img_uuid)
except Exception as e:
LOG.exception(_("Exception occurred getting image "
"%s: %s"), img_uuid, e)
return False
if constants.POWERVM == img.get(constants.HYPERVISOR_TYPE,
''):
self.power_image_cache.append(img_uuid)
return True
return False
# Return false if we can't find this instance locally.
return False

View File

@ -0,0 +1,328 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
"""
Module to contain all of the base Neutron client interfaces
"""
'''
Created on Aug 1, 2013
@author: John Kasperski
'''
from neutron.openstack.common import log as logging
from neutronclient.common import exceptions
import powervc.common.client.extensions.base as base
from powervc.common.constants import POWERVC_OS
from powervc.common.gettextutils import _
from powervc.neutron.common import constants
from powervc.neutron.common import utils
from powervc.neutron.db import powervc_db_v2
LOG = logging.getLogger(__name__)
class Client(base.ClientExtension):
"""Neutron Client access methods"""
def __init__(self, client, os):
super(Client, self).__init__(client)
self.os = os
self.db = powervc_db_v2.PowerVCAgentDB()
self.client = client
def _extact_event_payload(self, message):
event = message.get('event_type')
payload = message.get('payload')
LOG.info(_("Handling AMQP message from %s: %s"), self.os, event)
return (event, payload)
def create_network(self, net):
body = {}
for field in constants.NETWORK_CREATE_FIELDS:
if field in net:
body[field] = net[field]
request = {}
request['network'] = body
try:
LOG.info(_("Create %s network: %s"), self.os, body)
response = self.client.create_network(request)
if response and 'network' in response:
return response.get('network')
return None
except exceptions.NeutronClientException as e:
LOG.exception(_("Error creating network: %s\nError message: %s"),
body, e)
return None
def create_subnet(self, sub):
net_id = utils.translate_net_id(self.db, sub.get('network_id'),
self.os)
if not net_id:
return None
body = {}
body['network_id'] = net_id
for field in constants.SUBNET_CREATE_FIELDS:
if field in sub:
body[field] = sub[field]
request = {}
request['subnet'] = body
try:
LOG.info(_("Create %s subnet: %s"), self.os, body)
response = self.client.create_subnet(request)
if response and 'subnet' in response:
return response.get('subnet')
return None
except exceptions.NeutronClientException as e:
LOG.exception(_("Error creating subnet: %s\nError message: %s"),
body, e)
return None
def create_port(self, port):
net_id = utils.translate_net_id(self.db, port.get('network_id'),
self.os)
if not net_id:
return None
body = {}
body['network_id'] = net_id
body['fixed_ips'] = []
for field in constants.PORT_CREATE_FIELDS:
if field in port:
body[field] = port[field]
if self.os == POWERVC_OS:
body['device_owner'] = constants.POWERVC_DEVICE_OWNER
elif port.get('device_id'):
# If we are creating a local port and the PowerVC port has a
# device id, then set the device id of the new local port to be
# "pvc:" + PowerVC device id.
body['device_id'] = constants.RSVD_PORT_PREFIX + port['device_id']
fixed_ips = port.get('fixed_ips')
if not fixed_ips:
return None
for ip in fixed_ips:
ip_addr = ip.get('ip_address')
if not ip_addr or ':' in ip_addr:
continue
sub_id = utils.translate_subnet_id(self.db, ip.get('subnet_id'),
self.os)
if not sub_id:
LOG.warning(_("%s subnet does not exist for: %s"),
self.os, ip_addr)
continue
new_ip = {}
new_ip['ip_address'] = ip_addr
new_ip['subnet_id'] = sub_id
body['fixed_ips'].append(new_ip)
if len(body['fixed_ips']) == 0:
return None
request = {}
request['port'] = body
try:
LOG.info(_("Create %s port: %s"), self.os, body)
response = self.client.create_port(request)
if response and 'port' in response:
return response.get('port')
return None
except exceptions.NeutronClientException as e:
LOG.exception(_("Error creating port: %s\nError message: %s"),
body, e)
return None
def delete_network(self, net_id):
try:
LOG.info(_("Delete %s network: %s"), self.os, net_id)
return self.client.delete_network(net_id)
except exceptions.NeutronClientException as e:
LOG.exception(_("Error deleting network: %s"), e)
return e
def delete_subnet(self, sub_id):
try:
LOG.info(_("Delete %s subnet: %s"), self.os, sub_id)
return self.client.delete_subnet(sub_id)
except exceptions.NeutronClientException as e:
LOG.exception(_("Error deleting subnet: %s"), e)
return e
def delete_port(self, port_id):
try:
LOG.info(_("Delete %s port: %s"), self.os, port_id)
return self.client.delete_port(port_id)
except exceptions.NeutronClientException as e:
LOG.exception(_("Error deleting port: %s"), e)
return e
def get_networks(self):
response = self.client.list_networks()
if 'networks' in response:
net_list = response['networks']
networks = {}
for net in net_list:
if utils.is_network_mappable(net):
net_id = net['id']
networks[net_id] = net
return networks
return {}
def get_subnets(self):
response = self.client.list_subnets()
if 'subnets' in response:
sub_list = response['subnets']
subnets = {}
for sub in sub_list:
if utils.is_subnet_mappable(sub):
sub_id = sub['id']
subnets[sub_id] = sub
return subnets
return {}
def get_ports(self):
response = self.client.list_ports()
if 'ports' in response:
port_list = response['ports']
ports = {}
for port in port_list:
if utils.is_port_mappable(port):
port_id = port['id']
ports[port_id] = port
return ports
return {}
def get_ports_on_network(self, net_id):
response = self.client.list_ports(network_id=net_id)
if 'ports' in response:
return response['ports']
return []
def get_ports_on_subnet(self, net_id, subnet_id):
port_list = self.get_ports_on_network(net_id)
if len(port_list) == 0:
return []
ports = []
for port in port_list:
fixed_ips = port.get('fixed_ips')
if not fixed_ips:
continue
for ip in fixed_ips:
if ip.get('subnet_id') == subnet_id:
ports.append(port)
break
return ports
def get_network(self, net_id, log_error=False):
try:
response = self.client.show_network(net_id)
if 'network' in response:
return response['network']
return None
except exceptions.NeutronClientException as e:
if log_error:
LOG.exception(_("Error retrieving network: %s"), e)
return None
def get_subnet(self, sub_id, log_error=False):
try:
response = self.client.show_subnet(sub_id)
if 'subnet' in response:
return response['subnet']
return None
except exceptions.NeutronClientException as e:
if log_error:
LOG.exception(_("Error retrieving subnet: %s"), e)
return None
def get_port(self, port_id, log_error=False):
try:
response = self.client.show_port(port_id)
if 'port' in response:
return response['port']
return None
except exceptions.NeutronClientException as e:
if log_error:
LOG.exception(_("Error retrieving port: %s"), e)
return None
def set_port_device_id(self, port, device_id):
body = {}
body['device_id'] = device_id
request = {}
request['port'] = body
try:
LOG.info(_("Update %s port: %s"), self.os, body)
return self.client.update_port(port['id'], request)
except exceptions.NeutronClientException as e:
LOG.exception(_("Error updating port: %s"), e)
return None
return None
def update_network(self, net_dest, net_src):
body = {}
request = None
for field in constants.NETWORK_UPDATE_FIELDS:
if net_src[field] != net_dest[field]:
body[field] = net_src[field]
if not request:
request = {}
request['network'] = body
if request:
try:
LOG.info(_("Update %s network: %s"), self.os, body)
return self.client.update_network(net_dest['id'], request)
except exceptions.NeutronClientException as e:
LOG.exception(_("Error updating network: %s"), e)
return None
return None
def update_subnet(self, sub_dest, sub_src):
body = {}
request = None
for field in constants.SUBNET_UPDATE_FIELDS:
if sub_src[field] != sub_dest[field]:
body[field] = sub_src[field]
if not request:
request = {}
request['subnet'] = body
if request:
try:
LOG.info(_("Update %s subnet: %s"), self.os, body)
return self.client.update_subnet(sub_dest['id'], request)
except exceptions.NeutronClientException as e:
LOG.exception(_("Error updating subnet: %s"), e)
return None
return None
def update_port(self, port_dest, port_src):
body = {}
request = None
for field in constants.PORT_UPDATE_FIELDS:
if port_src[field] != port_dest[field]:
body[field] = port_src[field]
if not request:
request = {}
request['port'] = body
if request:
try:
LOG.info(_("Update %s port: %s"), self.os, body)
return self.client.update_port(port_dest['id'], request)
except exceptions.NeutronClientException as e:
LOG.exception(_("Error updating port: %s"), e)
return None
return None
def get_ports_by_instance_uuid(self, ins_id):
"""
Query all network ports by an instance id.
"""
response = self.client.list_ports(device_id=ins_id)
if 'ports' in response:
return response['ports']
return []

View File

@ -0,0 +1,145 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
"""
Module to contain all of the PowerVC routines
"""
'''
Created on Aug 1, 2013
@author: John Kasperski
'''
from neutron.openstack.common import log as logging
from powervc.common import messaging
from powervc.common.constants import POWERVC_OS
from powervc.common.gettextutils import _
from powervc.neutron.client import neutron_client_bindings
from powervc.neutron.common import constants
from powervc.neutron.common import utils
from powervc.neutron.db import powervc_db_v2
LOG = logging.getLogger(__name__)
class Client(neutron_client_bindings.Client):
"""PowerVC access methods"""
def __init__(self, client, agent):
if not client:
return
self.os = POWERVC_OS
self.db = powervc_db_v2.PowerVCAgentDB()
self.agent = agent
super(Client, self).__init__(client, self.os)
self._create_amqp_listeners()
def _create_amqp_listeners(self):
"""Listen for AMQP messages from PowerVC"""
LOG.debug(_('Creating AMQP listeners'))
def reconnect():
LOG.info(_('Re-established connection to PowerVC Qpid broker'))
self.agent.queue_event(self.os, constants.EVENT_FULL_SYNC, None)
connection = messaging.PowerVCConnection(log=logging,
reconnect_handler=reconnect)
listener = connection.create_listener(constants.QPID_EXCHANGE,
constants.QPID_TOPIC)
listener.register_handler(constants.EVENT_NETWORK_CREATE,
self._handle_network_create)
listener.register_handler(constants.EVENT_NETWORK_UPDATE,
self._handle_network_update)
listener.register_handler(constants.EVENT_NETWORK_DELETE,
self._handle_network_delete)
listener.register_handler(constants.EVENT_SUBNET_CREATE,
self._handle_subnet_create)
listener.register_handler(constants.EVENT_SUBNET_UPDATE,
self._handle_subnet_update)
listener.register_handler(constants.EVENT_SUBNET_DELETE,
self._handle_subnet_delete)
listener.register_handler(constants.EVENT_PORT_CREATE,
self._handle_port_create)
listener.register_handler(constants.EVENT_PORT_UPDATE,
self._handle_port_update)
listener.register_handler(constants.EVENT_PORT_DELETE,
self._handle_port_delete)
connection.start()
def _handle_network_create(self, context, message):
event, payload = self._extact_event_payload(message)
network = payload.get('network')
network_id = network.get('id')
if not utils.is_network_mappable(network):
LOG.info(_("Network %s is not mappable"), network_id)
return
db_net = self.db.get_network(pvc_id=network_id)
if db_net:
LOG.info(_("DB entry for network %s already exists"), network_id)
return
self.agent.queue_event(self.os, event, network)
def _handle_network_update(self, context, message):
event, payload = self._extact_event_payload(message)
network = payload.get('network')
self.agent.queue_event(self.os, event, network)
def _handle_network_delete(self, context, message):
event, payload = self._extact_event_payload(message)
network_id = payload.get('network_id')
self.agent.queue_event(self.os, event, network_id)
def _handle_subnet_create(self, context, message):
event, payload = self._extact_event_payload(message)
subnet = payload.get('subnet')
subnet_id = subnet.get('id')
if not utils.is_subnet_mappable(subnet):
LOG.info(_("Subnet %s is not mappable"), subnet_id)
return
db_sub = self.db.get_subnet(pvc_id=subnet_id)
if db_sub:
LOG.info(_("DB entry for subnet %s already exists"), subnet_id)
return
self.agent.queue_event(self.os, event, subnet)
def _handle_subnet_update(self, context, message):
event, payload = self._extact_event_payload(message)
subnet = payload.get('subnet')
self.agent.queue_event(self.os, event, subnet)
def _handle_subnet_delete(self, context, message):
event, payload = self._extact_event_payload(message)
subnet_id = payload.get('subnet_id')
self.agent.queue_event(self.os, event, subnet_id)
def _handle_port_create(self, context, message):
event, payload = self._extact_event_payload(message)
port = payload.get('port')
port_id = port.get('id')
if not utils.is_port_mappable(port):
LOG.info(_("Port %s is not mappable"), port_id)
return
db_port = self.db.get_port(pvc_id=port_id)
if db_port:
LOG.info(_("DB entry for port %s already exists"), port_id)
return
self.agent.queue_event(self.os, event, port)
def _handle_port_update(self, context, message):
event, payload = self._extact_event_payload(message)
port = payload.get('port')
self.agent.queue_event(self.os, event, port)
def _handle_port_delete(self, context, message):
event, payload = self._extact_event_payload(message)
port_id = payload.get('port_id')
self.agent.queue_event(self.os, event, port_id)

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

View File

@ -0,0 +1,107 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
'''
Created on Aug 2, 2013
@author: John Kasperski
'''
#==============================================================================
# Device owner value for Neutron ports we create
#==============================================================================
POWERVC_DEVICE_OWNER = 'network:IBM SmartCloud'
RSVD_PORT_PREFIX = 'pvc:'
#==============================================================================
# Mapping enum values
#==============================================================================
OBJ_TYPE_NETWORK = 'Network'
OBJ_TYPE_SUBNET = 'Subnet'
OBJ_TYPE_PORT = 'Port'
STATUS_CREATING = 'Creating'
STATUS_ACTIVE = 'Active'
STATUS_DELETING = 'Deleting'
MAX_UPDATE_DATA_LENGTH = 512
#==============================================================================
# Neutron network fields (that we care about)
#==============================================================================
NETWORK_CREATE_FIELDS = ['name',
'shared',
'provider:network_type',
'provider:segmentation_id',
'provider:physical_network']
NETWORK_UPDATE_FIELDS = ['name',
'shared']
#==============================================================================
# Neutron subnet fields (that we care about)
#==============================================================================
SUBNET_CREATE_FIELDS = ['name',
'ip_version',
'cidr',
'gateway_ip',
'dns_nameservers',
'allocation_pools',
'enable_dhcp']
SUBNET_UPDATE_FIELDS = ['name',
'gateway_ip',
'dns_nameservers',
'enable_dhcp']
#==============================================================================
# Neutron port fields (that we care about)
#==============================================================================
PORT_CREATE_FIELDS = ['name',
'mac_address',
'device_owner']
PORT_UPDATE_FIELDS = ['name']
#==============================================================================
# Qpid message handling
#==============================================================================
QPID_EXCHANGE = 'neutron'
QPID_TOPIC = 'notifications.info'
EVENT_END_THREAD = 'thread.end'
EVENT_FULL_SYNC = 'full.sync'
EVENT_NETWORK_CREATE = 'network.create.end'
EVENT_NETWORK_UPDATE = 'network.update.end'
EVENT_NETWORK_DELETE = 'network.delete.end'
EVENT_SUBNET_CREATE = 'subnet.create.end'
EVENT_SUBNET_UPDATE = 'subnet.update.end'
EVENT_SUBNET_DELETE = 'subnet.delete.end'
EVENT_PORT_CREATE = 'port.create.end'
EVENT_PORT_UPDATE = 'port.update.end'
EVENT_PORT_DELETE = 'port.delete.end'
# Event queue event constants
EVENT_OS = 'os'
EVENT_TYPE = 'type'
EVENT_OBJECT = 'obj'
# metadata key for pvc uuid
METADATA = 'metadata'
PVC_ID = 'pvc_id'
# power image hypervisor type
POWERVM = 'powervm'
HYPERVISOR_TYPE = 'hypervisor_type'

View File

@ -0,0 +1,278 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""
'''
Created on Aug 2, 2013
@author: John Kasperski
'''
import fnmatch
from powervc.common.constants import LOCAL_OS
from powervc.common.constants import POWERVC_OS
from powervc.neutron.common import constants
from oslo.config import cfg
import json
from neutron.openstack.common import log as logging
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
#==============================================================================
# Utility routines
#==============================================================================
def _compare_objects(local_obj, pvc_obj, db_obj,
update_fields, default_target):
for field in update_fields:
if pvc_obj.get(field) != local_obj.get(field):
update_data = db_obj.get('update_data')
if not update_data or len(update_data) == 0:
return default_target
try:
update_dict = json.loads(update_data)
except ValueError:
pass
update_dict = None
if not update_dict:
return default_target
db_field = update_dict.get(field)
if db_field != pvc_obj.get(field):
return LOCAL_OS
else:
return POWERVC_OS
return None
def compare_networks(local_net, pvc_net, db_net, default_target):
return _compare_objects(local_net, pvc_net, db_net,
constants.NETWORK_UPDATE_FIELDS, default_target)
def compare_subnets(local_sub, pvc_sub, db_sub, default_target):
return _compare_objects(local_sub, pvc_sub, db_sub,
constants.SUBNET_UPDATE_FIELDS, default_target)
def compare_ports(local_port, pvc_port, db_port, default_target):
return _compare_objects(local_port, pvc_port, db_port,
constants.PORT_UPDATE_FIELDS, default_target)
def _equal_objects(obj1, obj2, update_fields):
for field in update_fields:
if obj1.get(field) != obj2.get(field):
return False
return True
def equal_networks(net1, net2):
return _equal_objects(net1, net2, constants.NETWORK_UPDATE_FIELDS)
def equal_subnets(sub1, sub2):
return _equal_objects(sub1, sub2, constants.SUBNET_UPDATE_FIELDS)
def equal_ports(port1, port2):
return _equal_objects(port1, port2, constants.PORT_UPDATE_FIELDS)
def extract_ids_from_entry(obj):
pvc_id = obj.get('pvc_id')
local_id = obj.get('local_id')
return (pvc_id, local_id)
def extract_subnets_from_port(port):
subnets = []
fixed_ips = port.get('fixed_ips')
if not fixed_ips:
return []
for ip in fixed_ips:
subnet = ip.get('subnet_id')
if subnet and len(subnet) > 0:
subnets.append(subnet)
return subnets
def gen_network_sync_key(net):
result = ''
if 'provider:network_type' in net:
result += net['provider:network_type']
if 'provider:segmentation_id' in net:
if net['provider:segmentation_id']:
result += '_' + str(net['provider:segmentation_id'])
if 'provider:physical_network' in net:
if net['provider:physical_network']:
result += '_' + net['provider:physical_network']
return result
def gen_subnet_sync_key(sub, db_net):
return sub['cidr'] + '_' + db_net['pvc_id']
def gen_port_sync_key(port, db_net):
result = ''
fixed_ips = port.get('fixed_ips')
if not fixed_ips:
return False
for ip in fixed_ips:
ipaddr = ip.get('ip_address')
if ipaddr and '.' in ipaddr:
if len(result) == 0:
result += ipaddr
else:
result += '_' + ipaddr
return result + '_' + db_net['pvc_id']
def _gen_object_update_data(obj, update_fields):
data = {}
for field in update_fields:
data[field] = obj.get(field)
result = json.dumps(data)
if len(result) > constants.MAX_UPDATE_DATA_LENGTH:
return None
return result
def gen_network_update_data(net):
return _gen_object_update_data(net, constants.NETWORK_UPDATE_FIELDS)
def gen_subnet_update_data(sub):
return _gen_object_update_data(sub, constants.SUBNET_UPDATE_FIELDS)
def gen_port_update_data(port):
return _gen_object_update_data(port, constants.PORT_UPDATE_FIELDS)
def _get_map_white_list():
"""
Get pvc network white list. Easy to mock in a function.
"""
return CONF.AGENT.map_powervc_networks
def network_has_subnet(net):
"""
Check if a network has a subnet. PowerVC networks that do not have
a subnet are considerd DHCP networks. We don't support DHCP
"""
subnets = net.get('subnets')
if not subnets or len(subnets) == 0:
return False
return True
def is_network_mappable(net):
"""
Check if network can be sync
"""
if 'provider:network_type' in net:
network_type = net['provider:network_type']
if network_type != 'vlan':
return False
if 'provider:physical_network' in net:
physical_network = net['provider:physical_network']
if physical_network != 'default':
return False
return True
def network_has_mappable_subnet(client, net):
"""
Check if a network has mappable subnet, mappable subnet is defined in
method is is_subnet_mappable()
"""
subnets_id = net.get('subnets')
if subnets_id:
for sub_id in subnets_id:
subnet = client.get_subnet(sub_id)
if subnet and is_subnet_mappable(subnet):
return True
return False
def is_network_in_white_list(net):
"""
Check if a network's name is in the white list.
"""
whitelist = _get_map_white_list()
if whitelist:
"""
The following wildcards are allowed when
the network name matches a pattern in the white list.
(see the documentation for fnmatch):
* matches everything
? matches any single character
[seq] matches any character in seq
[!seq] matches any character not in seq
"""
for pat in whitelist:
if pat == '*':
return True
elif net.get('name') and fnmatch.fnmatch(net.get('name'), pat):
return True
# No match found.
return False
else:
# No network is allowed to sync.
return False
def is_subnet_mappable(sub):
if 'ip_version' in sub:
if sub['ip_version'] == 6:
return False
if 'enable_dhcp' in sub:
if sub['enable_dhcp']:
return False
return True
def is_port_mappable(port):
fixed_ips = port.get('fixed_ips')
if not fixed_ips:
return False
for ip in fixed_ips:
ipaddr = ip.get('ip_address')
if ipaddr and '.' in ipaddr:
return True
return False
def translate_net_id(db, net_id, target_os):
if target_os == LOCAL_OS:
db_net = db.get_network(pvc_id=net_id)
if db_net:
return db_net.get('local_id')
else:
db_net = db.get_network(local_id=net_id)
if db_net:
return db_net.get('pvc_id')
return None
def translate_subnet_id(db, sub_id, target_os):
if target_os == LOCAL_OS:
db_sub = db.get_subnet(pvc_id=sub_id)
if db_sub:
return db_sub.get('local_id')
else:
db_sub = db.get_subnet(local_id=sub_id)
if db_sub:
return db_sub.get('pvc_id')
return None

View File

@ -0,0 +1,9 @@
COPYRIGHT = """
*************************************************************
Licensed Materials - Property of IBM
OCO Source Materials
(C) Copyright IBM Corp. 2013 All Rights Reserved
*************************************************************
"""

Some files were not shown because too many files have changed in this diff Show More