Add gpfs volume driver configuration flags

Add support for gpfs volume driver configuration flags.  Add
initialization of gpfs_mount_point_base directory with proper
owner and permission settings.

Use Havana location of gpfs driver.  In Icehouse, this will
be updated to reflect change in driver path under "ibm" subdirectory.

Partially implements: blueprint gpfs-storage-support
Change-Id: I002fc73c1d1fa89d8cb56d8da048426701251a46
This commit is contained in:
Bill Owen 2014-03-12 15:45:16 -07:00
parent 653b93326d
commit c25f1a9024
5 changed files with 127 additions and 0 deletions

View File

@ -152,6 +152,14 @@ Cinder attributes
* `openstack']['block-storage']['vmware']['vmware_image_transfer_timeout_secs']` - Timeout in seconds for VMDK volume transfer between Cinder and Glance. (integer value, default 7200)
* `openstack']['block-storage']['vmware']['vmware_max_objects_retrieval']` - Max number of objects to be retrieved per batch. (integer value, default 100)
### IBM GPFS attributes ###
* `openstack['block-storage']['gpfs']['gpfs_mount_point_base']` - Path to directory in GPFS filesystem where volume files are located (string value)
* `openstack['block-storage']['gpfs']['gpfs_images_dir']` - Path to directory in GPFS filesystem where Glance images are located (string value)
* `openstack['block-storage']['gpfs']['gpfs_images_share_mode']` - Type of image copy to use, either "copy_on_write" or "copy" (string value)
* `openstack['block-storage']['gpfs']['gpfs_sparse_volumes']` - Create volumes as sparse or fully allocated files (boolean value, default true)
* `openstack['block-storage']['gpfs']['gpfs_max_clone_depth']` - Maximum clone indirections allowed when creating volume file snapshots clones; zero indicates unlimited clone depth (integer, defalut 0)
* `openstack['block-storage']['gpfs']['gpfs_storage_pool']` - GPFS storage pool that volumes are assigned to (string value)
Testing
=====

View File

@ -164,6 +164,14 @@ default['openstack']['block-storage']['vmware']['vmware_volume_folder'] = 'cinde
default['openstack']['block-storage']['vmware']['vmware_image_transfer_timeout_secs'] = 7200
default['openstack']['block-storage']['vmware']['vmware_max_objects_retrieval'] = 100
# IBM GPFS Support
default['openstack']['block-storage']['gpfs']['gpfs_mount_point_base'] = node['openstack']['block-storage']['gpfs']['gpfs_mount_point_base']
default['openstack']['block-storage']['gpfs']['gpfs_images_dir'] = node['openstack']['block-storage']['gpfs']['gpfs_images_dir']
default['openstack']['block-storage']['gpfs']['gpfs_images_share_mode'] = 'copy_on_write'
default['openstack']['block-storage']['gpfs']['gpfs_sparse_volumes'] = true
default['openstack']['block-storage']['gpfs']['gpfs_max_clone_depth'] = 8
default['openstack']['block-storage']['gpfs']['gpfs_storage_pool'] = 'system'
# logging attribute
default['openstack']['block-storage']['syslog']['use'] = false
default['openstack']['block-storage']['syslog']['facility'] = 'LOG_LOCAL2'

View File

@ -118,6 +118,14 @@ when 'cinder.volume.drivers.storwize_svc.StorwizeSVCDriver'
group node['openstack']['block-storage']['group']
end
when 'cinder.volume.drivers.gpfs.GPFSDriver'
directory node['openstack']['block-storage']['gpfs']['gpfs_mount_point_base'] do
mode '0755'
owner node['openstack']['block-storage']['user']
group node['openstack']['block-storage']['group']
recursive true
end
when 'cinder.volume.drivers.lvm.LVMISCSIDriver'
if node['openstack']['block-storage']['volume']['create_volume_group']
volume_size = node['openstack']['block-storage']['volume']['volume_group_size']

View File

@ -79,5 +79,56 @@ describe 'openstack-block-storage::volume' do
expect(@chef_run).to render_file(file).with_content('include /var/lib/cinder/volumes/*')
expect(@chef_run).not_to render_file(file).with_content('include /etc/tgt/conf.d/*.conf')
end
describe 'IBM GPFS volume driver' do
before do
@chef_run = ::ChefSpec::Runner.new ::REDHAT_OPTS do |n|
n.set['openstack']['block-storage']['volume']['driver'] = 'cinder.volume.drivers.gpfs.GPFSDriver'
n.set['openstack']['block-storage']['gpfs']['gpfs_mount_point_base'] = 'volumes'
end
@conf = '/etc/cinder/cinder.conf'
@chef_run.converge 'openstack-block-storage::volume'
end
it 'verifies gpfs_mount_point_base' do
expect(@chef_run).to render_file(@conf).with_content(
/^gpfs_mount_point_base = volumes$/)
end
it 'verifies gpfs_images_dir' do
@chef_run.node.set['openstack']['block-storage']['gpfs']['gpfs_images_dir'] = 'images'
expect(@chef_run).to render_file(@conf).with_content(
/^gpfs_images_dir = images$/)
end
it 'verifies gpfs_images_share_mode is default' do
expect(@chef_run).to render_file(@conf).with_content(
/^gpfs_images_share_mode = copy_on_write$/)
end
it 'verifies gpfs_sparse_volumes is default' do
expect(@chef_run).to render_file(@conf).with_content(
/^gpfs_sparse_volumes = true$/)
end
it 'verifies gpfs_max_clone_depth is default' do
expect(@chef_run).to render_file(@conf).with_content(
/^gpfs_max_clone_depth = 8$/)
end
it 'verifies gpfs_storage_pool is default' do
expect(@chef_run).to render_file(@conf).with_content(
/^gpfs_storage_pool = system$/)
end
it 'verifies gpfs volume directory is created with owner and mode set correctly' do
expect(@chef_run).to create_directory('volumes').with(
owner: 'cinder',
group: 'cinder',
mode: '0755'
)
end
end
end
end

View File

@ -906,3 +906,55 @@ vmware_image_transfer_timeout_secs = <%= node['openstack']['block-storage']['vmw
# less than the configured value. (integer value)
vmware_max_objects_retrieval = <%= node['openstack']['block-storage']['vmware']['vmware_max_objects_retrieval'] %>
<% end %>
######## defined in cinder.openstack.common.cfg:CommonConfigOpts ########
<% if node["openstack"]["block-storage"]["volume"]["driver"] == "cinder.volume.drivers.gpfs.GPFSDriver" %>
# Specifies the path of the GPFS directory where Block Storage
# volume and snapshot files are stored. (string value)
gpfs_mount_point_base = <%= node["openstack"]["block-storage"]["gpfs"]["gpfs_mount_point_base"] %>
<% if node['openstack']['block-storage']['gpfs']['gpfs_images_dir'] %>
# Specifies the path of the Image service repository in GPFS.
# Leave undefined if not storing images in GPFS. (string
# value)
gpfs_images_dir = <%= node['openstack']['block-storage']['gpfs']['gpfs_images_dir'] %>
<% end %>
# Specifies the type of image copy to be used. Set this when
# the Image service repository also uses GPFS so that image
# files can be transferred efficiently from the Image service
# to the Block Storage service. There are two valid values:
# "copy" specifies that a full copy of the image is made;
# "copy_on_write" specifies that copy-on-write optimization
# strategy is used and unmodified blocks of the image file are
# shared efficiently. (string value)
gpfs_images_share_mode = <%= node['openstack']['block-storage']['gpfs']['gpfs_images_share_mode'] %>
# Specifies an upper limit on the number of indirections
# required to reach a specific block due to snapshots or
# clones. A lengthy chain of copy-on-write snapshots or
# clones can have a negative impact on performance, but
# improves space utilization. 0 indicates unlimited clone
# depth. (integer value)
gpfs_max_clone_depth = <%= node['openstack']['block-storage']['gpfs']['gpfs_max_clone_depth'] %>
# Specifies that volumes are created as sparse files which
# initially consume no space. If set to False, the volume is
# created as a fully allocated file, in which case, creation
# may take a significantly longer time. (boolean value)
gpfs_sparse_volumes = <%= node['openstack']['block-storage']['gpfs']['gpfs_sparse_volumes'] %>
# Specifies the storage pool that volumes are assigned to. By
# default, the system storage pool is used. (string value)
gpfs_storage_pool = <%= node['openstack']['block-storage']['gpfs']['gpfs_storage_pool'] %>
<% end %>
san_ip=<%= node["openstack"]["block-storage"]["storwize"]["san_ip"] %>
#### (StrOpt) IP address of SAN controller
san_login=<%= node["openstack"]["block-storage"]["storwize"]["san_login"] %>
#### (StrOpt) Username for SAN controller
san_private_key=<%= node["openstack"]["block-storage"]["storwize"]["san_private_key"] %>
#### (StrOpt) Filename of private key to use for SSH authentication