Moving to upstream ceph
Remove fuel-ceph module Add upstream puppet-ceph instead Add new function for converting osd_devices_list to hash Add noop tests Related-Blueprint: fuel-upstream-ceph Change-Id: I75aa14cbc20cc22df38ab9193505af221d95e1af
This commit is contained in:
parent
cf36967458
commit
206192232c
|
@ -1,9 +0,0 @@
|
|||
fixtures:
|
||||
symlinks:
|
||||
ceph: "#{source_dir}"
|
||||
firewall: "#{source_dir}/../firewall"
|
||||
stdlib: "#{source_dir}/../stdlib"
|
||||
nova: "#{source_dir}/../nova"
|
||||
openstacklib: "#{source_dir}/../openstacklib"
|
||||
inifile: "#{source_dir}/../inifile"
|
||||
oslo: "#{source_dir}/../oslo"
|
|
@ -1 +0,0 @@
|
|||
spec/fixtures
|
|
@ -1,2 +0,0 @@
|
|||
--color
|
||||
-f doc
|
|
@ -1,17 +0,0 @@
|
|||
source 'https://rubygems.org'
|
||||
|
||||
group :development, :test do
|
||||
gem 'rake', :require => false
|
||||
gem 'rspec', '~>3.3', :require => false
|
||||
gem 'rspec-puppet', '~>2.1.0', :require => false
|
||||
gem 'puppetlabs_spec_helper', :require => false
|
||||
gem 'puppet-lint', '~> 1.1'
|
||||
end
|
||||
|
||||
if puppetversion = ENV['PUPPET_GEM_VERSION']
|
||||
gem 'puppet', puppetversion, :require => false
|
||||
else
|
||||
gem 'puppet', '<4.0', :require => false
|
||||
end
|
||||
|
||||
# vim:ft=ruby
|
|
@ -1,476 +0,0 @@
|
|||
Fuel Puppet module for Ceph
|
||||
===============================
|
||||
|
||||
About
|
||||
-----
|
||||
|
||||
This is a Puppet module to install a Ceph cluster inside of OpenStack. This
|
||||
module has been developed specifically to work with Fuel for
|
||||
OpenStack.
|
||||
|
||||
* Puppet: http://www.puppetlabs.com/
|
||||
* Ceph: http://ceph.com/
|
||||
* Fuel: http://fuel.mirantis.com/
|
||||
|
||||
Status
|
||||
------
|
||||
|
||||
Currently working with Ceph 0.61:
|
||||
|
||||
Developed and tested with:
|
||||
|
||||
* CentOS 6.4, Ubuntu 12.04
|
||||
* Puppet 2.7.19
|
||||
* Ceph 0.61.8
|
||||
|
||||
Known Issues
|
||||
------------
|
||||
|
||||
**Glance**
|
||||
|
||||
There are currently issues with glance 2013.1.2 (grizzly) that cause ``glance
|
||||
image-create`` with ``--location`` to not function. see
|
||||
https://bugs.launchpad.net/glance/+bug/1215682
|
||||
|
||||
**RadosGW, Keystone and Python 2.6**
|
||||
|
||||
RadosGW (RGW) will work with Keystone token_formats UUID or PKI. While RGW
|
||||
prefers using PKI tokens. Python 2.6 distributions currently may not work
|
||||
correctly with the PKI tokens. As such, keystone integration will default to
|
||||
UUID, but you can adjust as desired see ```rgw_use_pki``` option.
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
* Ceph package
|
||||
* Ceph Monitors
|
||||
* Ceph OSDs
|
||||
* Ceph MDS (present, but un-supported)
|
||||
* Ceph Object Gateway (radosgw)
|
||||
* * OpenStack Keystone integration
|
||||
|
||||
|
||||
Using
|
||||
-----
|
||||
|
||||
To deploy a Ceph cluster you need at least one monitor and two OSD devices. If
|
||||
you are deploying Ceph outside of Fuel, see the example/site.pp for the
|
||||
parameters that you will need to adjust.
|
||||
|
||||
This module requires the puppet agents to have ``pluginsync = true``.
|
||||
|
||||
Understanding the example Puppet manifest
|
||||
-----------------------------------------
|
||||
|
||||
This section should be re-written.
|
||||
|
||||
This parameter defines the names of the ceph pools we want to pre-create. By
|
||||
default, ``volumes`` and ``images`` are necessary to setup the OpenStack hooks.
|
||||
|
||||
```puppet
|
||||
node 'default' {
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
This section configures components for all nodes of Ceph and OpenStack.
|
||||
|
||||
```puppet
|
||||
class { 'ceph::deploy':
|
||||
auth_supported => 'cephx',
|
||||
osd_journal_size => '2048',
|
||||
osd_mkfs_type => 'xfs',
|
||||
}
|
||||
```
|
||||
|
||||
In this section you can change authentication type, journal size (in KB), type
|
||||
of filesystem.
|
||||
|
||||
Verifying the deployment
|
||||
------------------------
|
||||
|
||||
You can issue ``ceph -s`` or ``ceph health`` (terse) to check the current
|
||||
status of the cluster. The output of ``ceph -s`` should include:
|
||||
|
||||
* ``monmap``: this should contain the correct number of monitors
|
||||
* ``osdmap``: this should contain the correct number of osd instances (one per
|
||||
node per volume)
|
||||
|
||||
```
|
||||
root@fuel-ceph-02:~# ceph -s
|
||||
health HEALTH_OK
|
||||
monmap e1: 2 mons at {fuel-ceph-01=10.0.0.253:6789/0,fuel-ceph-02=10.0.0.252:6789/0}, election epoch 4, quorum 0,1 fuel-ceph-01,fuel-ceph-02
|
||||
osdmap e23: 4 osds: 4 up, 4 in
|
||||
pgmap v275: 448 pgs: 448 active+clean; 9518 bytes data, 141 MB used, 28486 MB / 28627 MB avail
|
||||
mdsmap e4: 1/1/1 up {0=fuel-ceph-02.local.try=up:active}
|
||||
```
|
||||
|
||||
Here are some errors that may be reported.
|
||||
|
||||
``ceph -s`` returned ``health HEALTH_WARN``:
|
||||
|
||||
```
|
||||
root@fuel-ceph-01:~# ceph -s
|
||||
health HEALTH_WARN 63 pgs peering; 54 pgs stuck inactive; 208 pgs stuck unclean; recovery 2/34 degraded (5.882%)
|
||||
...
|
||||
```
|
||||
|
||||
``ceph`` commands return key errors:
|
||||
|
||||
```
|
||||
[root@controller-13 ~]# ceph -s
|
||||
2013-08-22 00:06:19.513437 7f79eedea760 -1 monclient(hunting): ERROR: missing keyring, cannot use cephx for authentication
|
||||
2013-08-22 00:06:19.513466 7f79eedea760 -1 ceph_tool_common_init failed.
|
||||
|
||||
```
|
||||
|
||||
Check the links in ``/root/ceph\*.keyring``. There should be one for each of
|
||||
admin, osd, and mon. If any are missing this could be the cause.
|
||||
|
||||
Try to run ``ceph-deploy gatherkeys {mon-server-name}``. If this doesn't work
|
||||
then there may have been an issue starting the cluster.
|
||||
|
||||
Check to see running ceph processes ``ps axu | grep ceph``. If there is a
|
||||
python process running for ``ceph-create-keys`` then there is likely a problem
|
||||
with the MON processes talking to each other.
|
||||
* Check each mon's network and firewall. The monitor defaults to a port 6789
|
||||
* If public_network is defined in ceph.conf, mon_host and DNS names **MUST**
|
||||
be inside the public_network or ceph-deploy won't create mon's
|
||||
|
||||
Missing OSD instances
|
||||
---------------------
|
||||
|
||||
By default there should be one OSD instance per volume per OSD node listed in
|
||||
in the configuration. If one or more of them is missing you might have a
|
||||
problem with the initialization of the disks. Properly working block devices be
|
||||
mounted for you.
|
||||
|
||||
Common issues:
|
||||
|
||||
* the disk or volume is in use
|
||||
* the disk partition didn't refresh in the kernel
|
||||
|
||||
Check the osd tree:
|
||||
|
||||
```
|
||||
#ceph osd tree
|
||||
|
||||
# id weight type name up/down reweight
|
||||
-1 6 root default
|
||||
-2 2 host controller-1
|
||||
0 1 osd.0 up 1
|
||||
3 1 osd.3 up 1
|
||||
-3 2 host controller-2
|
||||
1 1 osd.1 up 1
|
||||
4 1 osd.4 up 1
|
||||
-4 2 host controller-3
|
||||
2 1 osd.2 up 1
|
||||
5 1 osd.5 up 1
|
||||
```
|
||||
|
||||
Ceph pools
|
||||
----------
|
||||
|
||||
By default we create two pools ``image``, and ``volumes``, there should also be
|
||||
defaults of ``data``, ``metadata``, and ``rbd``. ``ceph osd lspools`` can show
|
||||
the current pools:
|
||||
|
||||
# ceph osd lspools
|
||||
0 data,1 metadata,2 rbd,3 images,4 volumes,
|
||||
|
||||
Testing OpenStack
|
||||
-----------------
|
||||
|
||||
|
||||
### Glance
|
||||
|
||||
To test Glance, upload an image to Glance to see if it is saved in Ceph:
|
||||
|
||||
```shell
|
||||
source ~/openrc
|
||||
glance image-create --name cirros --container-format bare \
|
||||
--disk-format qcow2 --is-public yes --location \
|
||||
https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img
|
||||
```
|
||||
|
||||
**Note: ``--location`` is currently broken in glance see known issues above use
|
||||
below instead**
|
||||
|
||||
```
|
||||
source ~/openrc
|
||||
wget https://launchpad.net/cirros/trunk/0.3.0/+download/cirros-0.3.0-x86_64-disk.img
|
||||
glance image-create --name cirros --container-format bare \
|
||||
--disk-format qcow2 --is-public yes < cirros-0.3.0-x86_64-disk.img
|
||||
```
|
||||
|
||||
This will return something like:
|
||||
|
||||
```
|
||||
+------------------+--------------------------------------+
|
||||
| Property | Value |
|
||||
+------------------+--------------------------------------+
|
||||
| checksum | None |
|
||||
| container_format | bare |
|
||||
| created_at | 2013-08-22T19:54:28 |
|
||||
| deleted | False |
|
||||
| deleted_at | None |
|
||||
| disk_format | qcow2 |
|
||||
| id | f52fb13e-29cf-4a2f-8ccf-a170954907b8 |
|
||||
| is_public | True |
|
||||
| min_disk | 0 |
|
||||
| min_ram | 0 |
|
||||
| name | cirros |
|
||||
| owner | baa3187b7df94d9ea5a8a14008fa62f5 |
|
||||
| protected | False |
|
||||
| size | 0 |
|
||||
| status | active |
|
||||
| updated_at | 2013-08-22T19:54:30 |
|
||||
+------------------+--------------------------------------+
|
||||
```
|
||||
|
||||
Then check rbd:
|
||||
|
||||
```shell
|
||||
rbd ls images
|
||||
```
|
||||
|
||||
```shell
|
||||
rados -p images df
|
||||
```
|
||||
|
||||
### Cinder
|
||||
|
||||
To test cinder, we will create a small volume and see if it was saved in cinder
|
||||
|
||||
```shell
|
||||
source openrc
|
||||
cinder create 1
|
||||
```
|
||||
|
||||
This will instruct cinder to create a 1 GiB volume, it should respond with
|
||||
something similar to:
|
||||
|
||||
```
|
||||
+---------------------+--------------------------------------+
|
||||
| Property | Value |
|
||||
+---------------------+--------------------------------------+
|
||||
| attachments | [] |
|
||||
| availability_zone | nova |
|
||||
| bootable | false |
|
||||
| created_at | 2013-08-30T00:01:39.011655 |
|
||||
| display_description | None |
|
||||
| display_name | None |
|
||||
| id | 78bf2750-e99c-4c52-b5ca-09764af367b5 |
|
||||
| metadata | {} |
|
||||
| size | 1 |
|
||||
| snapshot_id | None |
|
||||
| source_volid | None |
|
||||
| status | creating |
|
||||
| volume_type | None |
|
||||
+---------------------+--------------------------------------+
|
||||
```
|
||||
|
||||
Then we can check the status of the image using its ``id`` using
|
||||
``cinder show <id>``
|
||||
|
||||
```
|
||||
cinder show 78bf2750-e99c-4c52-b5ca-09764af367b5
|
||||
+------------------------------+--------------------------------------+
|
||||
| Property | Value |
|
||||
+------------------------------+--------------------------------------+
|
||||
| attachments | [] |
|
||||
| availability_zone | nova |
|
||||
| bootable | false |
|
||||
| created_at | 2013-08-30T00:01:39.000000 |
|
||||
| display_description | None |
|
||||
| display_name | None |
|
||||
| id | 78bf2750-e99c-4c52-b5ca-09764af367b5 |
|
||||
| metadata | {} |
|
||||
| os-vol-host-attr:host | controller-19.domain.tld |
|
||||
| os-vol-tenant-attr:tenant_id | b11a96140e8e4522b81b0b58db6874b0 |
|
||||
| size | 1 |
|
||||
| snapshot_id | None |
|
||||
| source_volid | None |
|
||||
| status | available |
|
||||
| volume_type | None |
|
||||
+------------------------------+--------------------------------------+
|
||||
```
|
||||
|
||||
Since the image is ``status`` ``available`` it should have been created in
|
||||
ceph. we can check this with ``rbd ls volumes``
|
||||
|
||||
```shell
|
||||
rbd ls volumes
|
||||
volume-78bf2750-e99c-4c52-b5ca-09764af367b5
|
||||
```
|
||||
|
||||
### Rados GW
|
||||
|
||||
First confirm that the cluster is ```HEALTH_OK``` using ```ceph -s``` or
|
||||
```ceph health detail```. If the cluster isn't healthy most of these tests
|
||||
will not function.
|
||||
|
||||
#### Checking on the Rados GW service.
|
||||
|
||||
***Note: RedHat distros: mod_fastcgi's /etc/httpd/conf.d/fastcgi.conf must
|
||||
have ```FastCgiWrapper Off``` or rados calls will return 500 errors***
|
||||
|
||||
Rados relies on the service ```radosgw``` (Debian) ```ceph-radosgw``` (RHEL)
|
||||
running and creating a socket for the webserver's script service to talk to.
|
||||
If the radosgw service is not running, or not staying running then we need to
|
||||
inspect it closer.
|
||||
|
||||
the service script for radosgw might exit 0 and not start the service, the
|
||||
easy way to test this is to simply ```service ceph-radosgw restart``` if the
|
||||
service script can not stop the service, it wasn't running in the first place.
|
||||
|
||||
We can also check to see if the rados service might be running by
|
||||
```ps axu | grep radosgw```, but this might also show the webserver script
|
||||
server processes as well.
|
||||
|
||||
most commands from ```radosgw-admin``` will work whether or not the ```radosgw```
|
||||
service is running.
|
||||
|
||||
#### swift testing
|
||||
|
||||
##### Simple authentication for RadosGW
|
||||
|
||||
|
||||
create a new user
|
||||
|
||||
```shell
|
||||
radosgw-admin user create --uid=test --display-name="bob" --email="bob@mail.ru"
|
||||
```
|
||||
|
||||
```
|
||||
{ "user_id": "test",
|
||||
"display_name": "bob",
|
||||
"email": "bob@mail.ru",
|
||||
"suspended": 0,
|
||||
"max_buckets": 1000,
|
||||
"auid": 0,
|
||||
"subusers": [],
|
||||
"keys": [
|
||||
{ "user": "test",
|
||||
"access_key": "CVMC8OX9EMBRE2F5GA8C",
|
||||
"secret_key": "P3H4Ilv8Lhx0srz8ALO\/7udwkJd6raIz11s71FIV"}],
|
||||
"swift_keys": [],
|
||||
"caps": []}
|
||||
```
|
||||
|
||||
swift auth works with subusers, in that from OpenStack this would be
|
||||
tenant:user so we need to mimic the same
|
||||
|
||||
```shell
|
||||
radosgw-admin subuser create --uid=test --subuser=test:swift --access=full
|
||||
```
|
||||
|
||||
```
|
||||
{ "user_id": "test",
|
||||
"display_name": "bob",
|
||||
"email": "bob@mail.ru",
|
||||
"suspended": 0,
|
||||
"max_buckets": 1000,
|
||||
"auid": 0,
|
||||
"subusers": [
|
||||
{ "id": "test:swift",
|
||||
"permissions": "full-control"}],
|
||||
"keys": [
|
||||
{ "user": "test",
|
||||
"access_key": "CVMC8OX9EMBRE2F5GA8C",
|
||||
"secret_key": "P3H4Ilv8Lhx0srz8ALO\/7udwkJd6raIz11s71FIV"}],
|
||||
"swift_keys": [],
|
||||
"caps": []}
|
||||
```
|
||||
|
||||
Generate the secret key.
|
||||
___Note that ```--gen-secred``` is required in (at least) cuttlefish and newer.___
|
||||
|
||||
```shell
|
||||
radosgw-admin key create --subuser=test:swift --key-type=swift --gen-secret
|
||||
```
|
||||
|
||||
```
|
||||
{ "user_id": "test",
|
||||
"display_name": "bob",
|
||||
"email": "bob@mail.ru",
|
||||
"suspended": 0,
|
||||
"max_buckets": 1000,
|
||||
"auid": 0,
|
||||
"subusers": [
|
||||
{ "id": "test:swift",
|
||||
"permissions": "full-control"}],
|
||||
"keys": [
|
||||
{ "user": "test",
|
||||
"access_key": "CVMC8OX9EMBRE2F5GA8C",
|
||||
"secret_key": "P3H4Ilv8Lhx0srz8ALO\/7udwkJd6raIz11s71FIV"}],
|
||||
"swift_keys": [
|
||||
{ "user": "test:swift",
|
||||
"secret_key": "hLyMvpVNPez7lBqFlLjcefsZnU0qlCezyE2IDRsp"}],
|
||||
"caps": []}
|
||||
```
|
||||
|
||||
some test commands
|
||||
|
||||
```shell
|
||||
swift -A http://localhost:6780/auth/1.0 -U test:swift -K "eRYvzUr6vubg93dMRMk60RWYiGdJGvDk3lnwi4cl" post test
|
||||
swift -A http://localhost:6780/auth/1.0 -U test:swift -K "eRYvzUr6vubg93dMRMk60RWYiGdJGvDk3lnwi4cl" upload test myfile
|
||||
swift -A http://localhost:6780/auth/1.0 -U test:swift -K "eRYvzUr6vubg93dMRMk60RWYiGdJGvDk3lnwi4cl" list test
|
||||
```
|
||||
|
||||
##### Keystone integration
|
||||
|
||||
We will start with a simple test, we should be able to use the keystone openrc
|
||||
credentials and start using the swift client as if we were actually using
|
||||
swift.
|
||||
|
||||
```shell
|
||||
source openrc
|
||||
swift post test
|
||||
swift list test
|
||||
```
|
||||
|
||||
```
|
||||
test
|
||||
```
|
||||
|
||||
|
||||
Clean up ceph to re-run
|
||||
=======================
|
||||
|
||||
some times it is necessary to re-set the ceph-cluster rather than rebuilding
|
||||
everything from scratch
|
||||
|
||||
set ``all`` to contain all monitors, osds, and computes want to re-initialize.
|
||||
|
||||
```shell
|
||||
export all="compute-4 controller-1 controller-2 controller-3"
|
||||
for node in $all
|
||||
do
|
||||
ssh $node 'service ceph -a stop ;
|
||||
umount /var/lib/ceph/osd/ceph*';
|
||||
done;
|
||||
ceph-deploy purgedata $all;
|
||||
ceph-deploy purge $all;
|
||||
yum install -y ceph-deploy;
|
||||
rm ~/ceph* ;
|
||||
ceph-deploy install $all
|
||||
```
|
||||
|
||||
|
||||
Copyright and License
|
||||
---------------------
|
||||
|
||||
Copyright: (C) 2013 [Mirantis](https://www.mirantis.com/) Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -1,11 +0,0 @@
|
|||
require 'rubygems'
|
||||
require 'puppetlabs_spec_helper/rake_tasks'
|
||||
require 'puppet-lint/tasks/puppet-lint'
|
||||
PuppetLint.configuration.send('disable_80chars')
|
||||
PuppetLint.configuration.send('disable_2sp_soft_tabs')
|
||||
PuppetLint.configuration.send('disable_arrow_alignment')
|
||||
PuppetLint.configuration.send('disable_autoloader_layout')
|
||||
PuppetLint.configuration.send('disable_selector_inside_resource')
|
||||
|
||||
# Disable check due to upstream bug: https://github.com/rodjek/puppet-lint/issues/170
|
||||
PuppetLint.configuration.send('disable_class_parameter_defaults')
|
|
@ -1,111 +0,0 @@
|
|||
# Global settings
|
||||
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ] }
|
||||
|
||||
# Hostnames MUST match either cluster_network, or public_network or
|
||||
# ceph will not setup correctly.
|
||||
|
||||
# primary_mon defines the node from which ceph-deploy will pull it's config
|
||||
# from in any following nodes. All nodes must have a ssh-key and root (or sudo)
|
||||
# access to this host. ceph-deploy new will only be run from here.
|
||||
$primary_mon = 'controller-1.domain.tld'
|
||||
|
||||
# public_network is necessary to add monitors atomically, the monitor nodes will
|
||||
# also bind to this address.
|
||||
$public_network = '192.168.0.0/24'
|
||||
|
||||
# cluster_network is necessary to ensure that osd's bind to the expected interface.
|
||||
$cluster_network = '10.0.0.0/24'
|
||||
|
||||
# osd_devices is used in ceph::osd to activate the disk and join it to the
|
||||
# cluster.
|
||||
# it may be <device_name|mounted_path>[:journal_device|journal_path]...
|
||||
$osd_devices = split($::osd_devices_list, ' ')
|
||||
|
||||
########
|
||||
#RadosGW
|
||||
########
|
||||
# set use_rgw to configure RadosGW items
|
||||
$use_rgw = false
|
||||
|
||||
# rgw_ip is IP address for binding web server to listen on it, default is 0.0.0.0
|
||||
#$rgw_ip = '10.109.10.3'
|
||||
|
||||
# rgw_port, if you are running other services on this web server you need to
|
||||
# run rgw on an alternate port, default is 6780
|
||||
#$rgw_port = 6780
|
||||
|
||||
# rgw_use_keystone will configure the keystone parts
|
||||
#$rgw_use_keystone = true
|
||||
|
||||
#rgw_use_pki if true, attempt to sign the keystone certs and enable PKI
|
||||
# token verification. If false, will defalt to values that should work with UUID
|
||||
# this requires keystone.conf to use token_format = PKI and
|
||||
# keystone-manage pki_setup to have been run. This also assumes that rados is
|
||||
# being installed on the keystone server, otherwise you will need to move the
|
||||
# keys yourself into /etc/keystone/ssl.
|
||||
#$rgw_use_pki = false
|
||||
|
||||
# rgw_keystone_url is the ip and port for the keystone server, this will work
|
||||
# on management or admin url's (internal:5000 or internal:35357)
|
||||
#$rgw_keystone_url = 192.168.1.20:5000
|
||||
|
||||
# rgw_keystone_admin_token will be the token to perform admin functions in
|
||||
# keystone. This is commonly inside /root/openrc on controllers
|
||||
#$rgw_keystone_admin_token = 'CPj09fj'
|
||||
|
||||
|
||||
node 'default' {
|
||||
class {'ceph':
|
||||
# General settings
|
||||
cluster_node_address => $cluster_node_address, #This should be the cluster service address
|
||||
primary_mon => $primary_mon, #This should be the first controller
|
||||
osd_devices => split($::osd_devices_list, ' '),
|
||||
use_ssl => false,
|
||||
use_rgw => $use_rgw,
|
||||
|
||||
# ceph.conf Global settings
|
||||
auth_supported => 'cephx',
|
||||
osd_journal_size => '2048',
|
||||
osd_mkfs_type => 'xfs',
|
||||
osd_pool_default_size => '2',
|
||||
osd_pool_default_min_size => '1',
|
||||
# TODO: calculate PG numbers
|
||||
osd_pool_default_pg_num => '100',
|
||||
osd_pool_default_pgp_num => '100',
|
||||
cluster_network => $cluster_network,
|
||||
public_network => $public_network,
|
||||
|
||||
# RadosGW settings
|
||||
rgw_host => $::osfamily ? { 'Debian' => $::hostname, default => $::fqdn },
|
||||
rgw_ip => $rgw_ip,
|
||||
rgw_port => $rgw_port,
|
||||
rgw_keyring_path => '/etc/ceph/keyring.radosgw.gateway',
|
||||
rgw_socket_path => '/tmp/radosgw.sock',
|
||||
rgw_log_file => '/var/log/ceph/radosgw.log',
|
||||
rgw_use_keystone => true,
|
||||
rgw_use_pki => false,
|
||||
rgw_keystone_url => $rgw_keystone_url,
|
||||
rgw_keystone_admin_token => $rgw_keystone_admin_token,
|
||||
rgw_keystone_token_cache_size => '10',
|
||||
rgw_keystone_accepted_roles => '_member_, Member, admin, swiftoperator',
|
||||
rgw_keystone_revocation_interval => $::ceph::rgw_use_pki ? { false => 1000000, default => 60 },
|
||||
rgw_data => '/var/lib/ceph/radosgw',
|
||||
rgw_dns_name => "*.${::domain}",
|
||||
rgw_print_continue => false,
|
||||
rgw_nss_db_path => '/etc/ceph/nss',
|
||||
|
||||
# Cinder settings
|
||||
volume_driver => 'cinder.volume.drivers.rbd.RBDDriver',
|
||||
glance_api_version => '2',
|
||||
cinder_user => 'volumes',
|
||||
cinder_pool => 'volumes',
|
||||
# TODO: generate rbd_secret_uuid
|
||||
rbd_secret_uuid => 'a5d0dd94-57c4-ae55-ffe0-7e3732a24455',
|
||||
|
||||
# Glance settings
|
||||
glance_backend => 'ceph',
|
||||
glance_user => 'images',
|
||||
glance_pool => 'images',
|
||||
show_image_direct_url => 'True',
|
||||
}
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
Facter.add("ceph_conf") do
|
||||
|
||||
setcode do
|
||||
|
||||
File.exists? '/etc/ceph/ceph.conf'
|
||||
|
||||
end
|
||||
|
||||
end
|
|
@ -1,9 +0,0 @@
|
|||
Facter.add("cinder_conf") do
|
||||
|
||||
setcode do
|
||||
|
||||
File.exists? '/etc/cinder/cinder.conf'
|
||||
|
||||
end
|
||||
|
||||
end
|
|
@ -1,9 +0,0 @@
|
|||
Facter.add("glance_api_conf") do
|
||||
|
||||
setcode do
|
||||
|
||||
File.exists? '/etc/glance/glance-api.conf'
|
||||
|
||||
end
|
||||
|
||||
end
|
|
@ -1,9 +0,0 @@
|
|||
Facter.add("keystone_conf") do
|
||||
|
||||
setcode do
|
||||
|
||||
File.exists? '/etc/keystone/keystone.conf'
|
||||
|
||||
end
|
||||
|
||||
end
|
|
@ -1,9 +0,0 @@
|
|||
Facter.add("nova_compute") do
|
||||
|
||||
setcode do
|
||||
|
||||
File.exists? '/etc/nova/nova-compute.conf'
|
||||
|
||||
end
|
||||
|
||||
end
|
|
@ -1,27 +0,0 @@
|
|||
Puppet::Type.type(:ceph_conf).provide(
|
||||
:ini_setting,
|
||||
:parent => Puppet::Type.type(:ini_setting).provider(:ruby)
|
||||
) do
|
||||
|
||||
def section
|
||||
resource[:name].split('/', 2).first
|
||||
end
|
||||
|
||||
def setting
|
||||
resource[:name].split('/', 2).last
|
||||
end
|
||||
#Ceph-deploy 1.2.3 uses ' = ' not '='
|
||||
def separator
|
||||
' = '
|
||||
end
|
||||
|
||||
def self.file_path
|
||||
'/etc/ceph/ceph.conf'
|
||||
end
|
||||
|
||||
# this needs to be removed. This has been replaced with the class method
|
||||
def file_path
|
||||
self.class.file_path
|
||||
end
|
||||
|
||||
end
|
|
@ -1,42 +0,0 @@
|
|||
Puppet::Type.newtype(:ceph_conf) do
|
||||
|
||||
ensurable
|
||||
|
||||
newparam(:name, :namevar => true) do
|
||||
desc 'Section/setting name to manage from ./ceph.conf'
|
||||
newvalues(/\S+\/\S+/)
|
||||
end
|
||||
|
||||
newproperty(:value) do
|
||||
desc 'The value of the setting to be defined.'
|
||||
munge do |value|
|
||||
value = value.to_s.strip
|
||||
value.capitalize! if value =~ /^(true|false)$/i
|
||||
value
|
||||
end
|
||||
|
||||
def is_to_s( currentvalue )
|
||||
if resource.secret?
|
||||
return '[old secret redacted]'
|
||||
else
|
||||
return currentvalue
|
||||
end
|
||||
end
|
||||
|
||||
def should_to_s( newvalue )
|
||||
if resource.secret?
|
||||
return '[new secret redacted]'
|
||||
else
|
||||
return newvalue
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
newparam(:secret, :boolean => true) do
|
||||
desc 'Whether to hide the value from Puppet logs. Defaults to `false`.'
|
||||
|
||||
newvalues(:true, :false)
|
||||
|
||||
defaultto false
|
||||
end
|
||||
end
|
|
@ -1,100 +0,0 @@
|
|||
# create new conf on primary Ceph MON, pull conf on all other nodes
|
||||
class ceph::conf (
|
||||
$mon_addr = $::ceph::mon_addr,
|
||||
$node_hostname = $::ceph::node_hostname,
|
||||
|
||||
) {
|
||||
if $node_hostname == $::ceph::primary_mon {
|
||||
|
||||
exec {'ceph-deploy new':
|
||||
command => "ceph-deploy new ${node_hostname}:${mon_addr}",
|
||||
cwd => '/etc/ceph',
|
||||
logoutput => true,
|
||||
creates => '/etc/ceph/ceph.conf',
|
||||
}
|
||||
|
||||
# link is necessary to work around http://tracker.ceph.com/issues/6281
|
||||
file {'/root/ceph.conf':
|
||||
ensure => link,
|
||||
target => '/etc/ceph/ceph.conf',
|
||||
}
|
||||
|
||||
file {'/root/ceph.mon.keyring':
|
||||
ensure => link,
|
||||
target => '/etc/ceph/ceph.mon.keyring',
|
||||
}
|
||||
|
||||
ceph_conf {
|
||||
'global/auth_supported': value => $::ceph::auth_supported;
|
||||
'global/osd_journal_size': value => $::ceph::osd_journal_size;
|
||||
'global/osd_mkfs_type': value => $::ceph::osd_mkfs_type;
|
||||
'global/osd_pool_default_size': value => $::ceph::osd_pool_default_size;
|
||||
'global/osd_pool_default_min_size': value => $::ceph::osd_pool_default_min_size;
|
||||
'global/osd_pool_default_pg_num': value => $::ceph::osd_pool_default_pg_num;
|
||||
'global/osd_pool_default_pgp_num': value => $::ceph::osd_pool_default_pgp_num;
|
||||
'global/cluster_network': value => $::ceph::cluster_network;
|
||||
'global/public_network': value => $::ceph::public_network;
|
||||
'global/log_to_syslog': value => $::ceph::use_syslog;
|
||||
'global/log_to_syslog_level': value => $::ceph::syslog_log_level;
|
||||
'global/log_to_syslog_facility': value => $::ceph::syslog_log_facility;
|
||||
'global/osd_max_backfills': value => $::ceph::osd_max_backfills;
|
||||
'global/osd_recovery_max_active': value => $::ceph::osd_recovery_max_active;
|
||||
'client/rbd_cache': value => $::ceph::rbd_cache;
|
||||
'client/rbd_cache_writethrough_until_flush': value => $::ceph::rbd_cache_writethrough_until_flush;
|
||||
}
|
||||
|
||||
Exec['ceph-deploy new'] ->
|
||||
File['/root/ceph.conf'] -> File['/root/ceph.mon.keyring'] ->
|
||||
Ceph_conf <||>
|
||||
|
||||
} else {
|
||||
|
||||
exec {'ceph-deploy config pull':
|
||||
command => "ceph-deploy --overwrite-conf config pull ${::ceph::primary_mon}",
|
||||
cwd => '/etc/ceph',
|
||||
creates => '/etc/ceph/ceph.conf',
|
||||
tries => 5,
|
||||
try_sleep => 2,
|
||||
}
|
||||
|
||||
file {'/root/ceph.conf':
|
||||
ensure => link,
|
||||
target => '/etc/ceph/ceph.conf',
|
||||
}
|
||||
|
||||
exec {'ceph-deploy gatherkeys remote':
|
||||
command => "ceph-deploy gatherkeys ${::ceph::primary_mon}",
|
||||
creates => ['/root/ceph.bootstrap-mds.keyring',
|
||||
'/root/ceph.bootstrap-osd.keyring',
|
||||
'/root/ceph.client.admin.keyring',
|
||||
'/root/ceph.mon.keyring',],
|
||||
tries => 5,
|
||||
try_sleep => 2,
|
||||
}
|
||||
|
||||
file {'/etc/ceph/ceph.client.admin.keyring':
|
||||
ensure => file,
|
||||
source => '/root/ceph.client.admin.keyring',
|
||||
mode => '0600',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
}
|
||||
|
||||
exec {'ceph-deploy init config':
|
||||
command => "ceph-deploy --overwrite-conf config push ${::hostname}",
|
||||
creates => '/etc/ceph/ceph.conf',
|
||||
}
|
||||
|
||||
ceph_conf {
|
||||
'global/cluster_network': value => $::ceph::cluster_network;
|
||||
'global/public_network': value => $::ceph::public_network;
|
||||
}
|
||||
|
||||
Exec['ceph-deploy config pull'] ->
|
||||
File['/root/ceph.conf'] ->
|
||||
Ceph_conf[['global/cluster_network', 'global/public_network']] ->
|
||||
Exec['ceph-deploy gatherkeys remote'] ->
|
||||
File['/etc/ceph/ceph.client.admin.keyring'] ->
|
||||
Exec['ceph-deploy init config']
|
||||
}
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
# Enable RBD backend for ephemeral volumes
|
||||
class ceph::ephemeral (
|
||||
$rbd_secret_uuid = $::ceph::rbd_secret_uuid,
|
||||
$libvirt_images_type = $::ceph::libvirt_images_type,
|
||||
$pool = $::ceph::compute_pool,
|
||||
) {
|
||||
|
||||
nova_config {
|
||||
'libvirt/images_type': value => $libvirt_images_type;
|
||||
'libvirt/inject_key': value => false;
|
||||
'libvirt/inject_partition': value => '-2';
|
||||
'libvirt/images_rbd_pool': value => $pool;
|
||||
}
|
||||
}
|
|
@ -1,156 +0,0 @@
|
|||
# ceph configuration and resource relations
|
||||
# TODO: split ceph module to submodules instead of using case with roles
|
||||
|
||||
class ceph (
|
||||
# General settings
|
||||
$mon_hosts = undef,
|
||||
$mon_ip_addresses = undef,
|
||||
$cluster_node_address = $::ipaddress, # This should be the cluster service address
|
||||
$primary_mon = $::hostname, # This should be the first controller
|
||||
$mon_addr = $::ipaddress, # This needs to be replaced with the address we want to bind the mon to (if this is a mon)
|
||||
$node_hostname = $::hostname,
|
||||
$osd_devices = split($::osd_devices_list, ' '),
|
||||
$use_ssl = false,
|
||||
$use_rgw = false,
|
||||
|
||||
# ceph.conf Global settings
|
||||
$auth_supported = 'cephx',
|
||||
$osd_journal_size = '2048',
|
||||
$osd_mkfs_type = 'xfs',
|
||||
$osd_pool_default_size = undef,
|
||||
$osd_pool_default_min_size = '1',
|
||||
$osd_pool_default_pg_num = undef,
|
||||
$osd_pool_default_pgp_num = undef,
|
||||
$cluster_network = undef,
|
||||
$public_network = undef,
|
||||
|
||||
#ceph.conf osd settings
|
||||
$osd_max_backfills = '1',
|
||||
$osd_recovery_max_active = '1',
|
||||
|
||||
#RBD client settings
|
||||
$rbd_cache = true,
|
||||
$rbd_cache_writethrough_until_flush = true,
|
||||
|
||||
# RadosGW settings
|
||||
$rgw_host = $::hostname,
|
||||
$rgw_ip = '0.0.0.0',
|
||||
$rgw_port = '6780',
|
||||
$swift_endpoint_port = '8080',
|
||||
$rgw_keyring_path = '/etc/ceph/keyring.radosgw.gateway',
|
||||
$rgw_socket_path = '/tmp/radosgw.sock',
|
||||
$rgw_frontends = 'fastcgi socket_port=9000 socket_host=127.0.0.1',
|
||||
$rgw_log_file = '/var/log/ceph/radosgw.log',
|
||||
$rgw_use_keystone = true,
|
||||
$rgw_use_pki = false,
|
||||
$rgw_keystone_url = "${cluster_node_address}:35357", #"fix my formatting.
|
||||
$rgw_keystone_admin_token = undef,
|
||||
$rgw_keystone_token_cache_size = '10',
|
||||
$rgw_keystone_accepted_roles = '_member_, Member, admin, swiftoperator',
|
||||
$rgw_keystone_revocation_interval = $::ceph::rgw_use_pki ? { false => 1000000, default => 60 },
|
||||
$rgw_s3_auth_use_keystone = false,
|
||||
$rgw_data = '/var/lib/ceph/radosgw',
|
||||
$rgw_dns_name = "*.${::domain}",
|
||||
$rgw_print_continue = true,
|
||||
$rgw_nss_db_path = '/etc/ceph/nss',
|
||||
|
||||
$rgw_large_pool_name = '.rgw',
|
||||
$rgw_large_pool_pg_nums = '512',
|
||||
|
||||
# Cinder settings
|
||||
$volume_driver = 'cinder.volume.drivers.rbd.RBDDriver',
|
||||
$glance_api_version = '2',
|
||||
$cinder_user = 'volumes',
|
||||
$cinder_pool = 'volumes',
|
||||
# TODO: generate rbd_secret_uuid
|
||||
$rbd_secret_uuid = 'a5d0dd94-57c4-ae55-ffe0-7e3732a24455',
|
||||
|
||||
# Cinder Backup settings
|
||||
$cinder_backup_user = 'backups',
|
||||
$cinder_backup_pool = 'backups',
|
||||
|
||||
# Glance settings
|
||||
$glance_backend = 'ceph',
|
||||
$glance_user = 'images',
|
||||
$glance_pool = 'images',
|
||||
$show_image_direct_url = 'True',
|
||||
|
||||
# Compute settings
|
||||
$compute_user = 'compute',
|
||||
$compute_pool = 'compute',
|
||||
$libvirt_images_type = 'rbd',
|
||||
$ephemeral_ceph = false,
|
||||
|
||||
# Log settings
|
||||
$use_syslog = false,
|
||||
$syslog_log_facility = 'daemon',
|
||||
$syslog_log_level = 'info',
|
||||
) {
|
||||
|
||||
Exec {
|
||||
path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
|
||||
cwd => '/root',
|
||||
}
|
||||
|
||||
# the regex includes all roles that require ceph.conf
|
||||
if roles_include(['primary-controller', 'controller', 'ceph-mon', 'ceph-osd', 'compute', 'cinder']) {
|
||||
|
||||
validate_array($mon_hosts)
|
||||
validate_array($mon_ip_addresses)
|
||||
|
||||
include ceph::ssh
|
||||
include ceph::params
|
||||
include ceph::conf
|
||||
Class[['ceph::ssh', 'ceph::params']] -> Class['ceph::conf']
|
||||
}
|
||||
|
||||
if roles_include(['primary-controller', 'controller', 'ceph-mon', 'ceph-osd']) {
|
||||
service { 'ceph':
|
||||
ensure => 'running',
|
||||
name => $::ceph::params::service_name,
|
||||
enable => true,
|
||||
hasrestart => true,
|
||||
require => Class['ceph::conf']
|
||||
}
|
||||
Package<| title == 'ceph' |> ~> Service['ceph']
|
||||
if !defined(Service['ceph']) {
|
||||
notify{ "Module ${module_name} cannot notify service ceph on packages update": }
|
||||
}
|
||||
}
|
||||
|
||||
if roles_include(['primary-controller', 'controller', 'ceph-mon']) {
|
||||
include ceph::mon
|
||||
|
||||
Class['ceph::conf'] -> Class['ceph::mon'] ->
|
||||
Service['ceph']
|
||||
|
||||
if ($::ceph::use_rgw) {
|
||||
include ceph::radosgw
|
||||
Class['ceph::mon'] ->
|
||||
Class['ceph::radosgw']
|
||||
if defined(Class['::keystone']) {
|
||||
Class['::keystone'] -> Class['ceph::radosgw']
|
||||
}
|
||||
Ceph_conf <||> ~> Service['ceph']
|
||||
}
|
||||
}
|
||||
|
||||
if roles_include('ceph-osd') {
|
||||
if ! empty($osd_devices) {
|
||||
include ceph::osds
|
||||
if roles_include(['ceph-mon']) {
|
||||
Class['ceph::mon'] -> Class['ceph::osds']
|
||||
}
|
||||
Class['ceph::conf'] -> Class['ceph::osds']
|
||||
Ceph_conf <||> ~> Service['ceph']
|
||||
|
||||
# set the recommended value according: http://tracker.ceph.com/issues/10988
|
||||
sysctl::value { 'kernel.pid_max':
|
||||
value => '4194303',
|
||||
}
|
||||
|
||||
Sysctl::Value <| |> -> Service['ceph']
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -1,59 +0,0 @@
|
|||
# setup Ceph monitors
|
||||
class ceph::mon (
|
||||
$mon_hosts = $::ceph::mon_hosts,
|
||||
$mon_ip_addresses = $::ceph::mon_ip_addresses,
|
||||
$mon_addr = $::ceph::mon_addr,
|
||||
$node_hostname = $::ceph::node_hostname,
|
||||
) {
|
||||
|
||||
firewall {'010 ceph-mon allow':
|
||||
chain => 'INPUT',
|
||||
dport => 6789,
|
||||
proto => 'tcp',
|
||||
action => accept,
|
||||
}
|
||||
|
||||
exec {'ceph-deploy mon create':
|
||||
command => "ceph-deploy mon create ${node_hostname}:${mon_addr}",
|
||||
logoutput => true,
|
||||
unless => "ceph mon dump | grep -qE '^[0-9]+: +${mon_addr}:.* mon\\.${node_hostname}\$'",
|
||||
}
|
||||
|
||||
exec {'Wait for Ceph quorum':
|
||||
command => "ceph mon stat | grep -q 'quorum.*${node_hostname}'",
|
||||
tries => 12, # This is necessary to prevent a race: mon must establish
|
||||
# a quorum before it can generate keys, observed this takes upto 15 seconds
|
||||
# Keys must exist prior to other commands running
|
||||
try_sleep => 5,
|
||||
refreshonly => true,
|
||||
}
|
||||
|
||||
exec {'ceph-deploy gatherkeys':
|
||||
command => "ceph-deploy gatherkeys ${node_hostname}",
|
||||
unless => ['test -f /root/ceph.bootstrap-mds.keyring \
|
||||
-a -f /root/ceph.bootstrap-osd.keyring \
|
||||
-a -f /root/ceph.client.admin.keyring',
|
||||
],
|
||||
try_sleep => 5,
|
||||
tries => 6,
|
||||
}
|
||||
|
||||
Firewall['010 ceph-mon allow'] ->
|
||||
Exec['ceph-deploy mon create'] ~>
|
||||
Exec['Wait for Ceph quorum'] ->
|
||||
Exec['ceph-deploy gatherkeys']
|
||||
|
||||
if $node_hostname == $::ceph::primary_mon {
|
||||
|
||||
# After the primary monitor has established a quorum, it is safe to
|
||||
# add other monitors to ceph.conf. All other Ceph nodes will get
|
||||
# these settings via 'ceph-deploy config pull' in ceph::conf.
|
||||
ceph_conf {
|
||||
'global/mon_host': value => join($mon_ip_addresses, ' ');
|
||||
'global/mon_initial_members': value => join($mon_hosts, ' ');
|
||||
}
|
||||
|
||||
Ceph_conf[['global/mon_host', 'global/mon_initial_members']] ->
|
||||
Exec['Wait for Ceph quorum']
|
||||
}
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
# configure the nova_compute parts if present
|
||||
class ceph::nova_compute (
|
||||
$rbd_secret_uuid = $::ceph::rbd_secret_uuid,
|
||||
$user = $::ceph::compute_user,
|
||||
$compute_pool = $::ceph::compute_pool,
|
||||
$secret_xml = '/root/.secret_attrs.xml',
|
||||
) {
|
||||
|
||||
include ::nova::params
|
||||
|
||||
file { $secret_xml:
|
||||
mode => '0400',
|
||||
content => template('ceph/secret.erb')
|
||||
}
|
||||
|
||||
ensure_resource('service', 'libvirt', {
|
||||
ensure => 'running',
|
||||
name => $::nova::params::libvirt_service_name,
|
||||
})
|
||||
|
||||
exec {'Set Ceph RBD secret for Nova':
|
||||
# TODO: clean this command up
|
||||
command => "virsh secret-define --file ${secret_xml} && \
|
||||
virsh secret-set-value --secret ${rbd_secret_uuid} \
|
||||
--base64 $(ceph auth get-key client.${user})",
|
||||
unless => "virsh secret-list | fgrep -qw ${rbd_secret_uuid}",
|
||||
}
|
||||
|
||||
nova_config {
|
||||
'libvirt/rbd_secret_uuid': value => $rbd_secret_uuid;
|
||||
'libvirt/rbd_user': value => $user;
|
||||
}
|
||||
|
||||
File[$secret_xml] ->
|
||||
Service['libvirt'] -> Exec['Set Ceph RBD secret for Nova']
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
# == Class: ceph::osd
|
||||
#
|
||||
# Prepare and bring online the OSD devices
|
||||
#
|
||||
# ==== Parameters
|
||||
#
|
||||
# [*devices*]
|
||||
# (optional) Array. This is the list of OSD devices identified by the facter.
|
||||
#
|
||||
class ceph::osds (
|
||||
$devices = $::ceph::osd_devices,
|
||||
){
|
||||
|
||||
exec { 'udevadm trigger':
|
||||
command => 'udevadm trigger',
|
||||
returns => 0,
|
||||
logoutput => true,
|
||||
} ->
|
||||
|
||||
exec {'ceph-disk activate-all':
|
||||
command => 'ceph-disk activate-all',
|
||||
returns => [0, 1],
|
||||
logoutput => true,
|
||||
} ->
|
||||
|
||||
firewall { '011 ceph-osd allow':
|
||||
chain => 'INPUT',
|
||||
dport => '6800-7100',
|
||||
proto => 'tcp',
|
||||
action => accept,
|
||||
} ->
|
||||
|
||||
ceph::osds::osd{ $devices: }
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
# == Define: ceph::osds::osd
|
||||
#
|
||||
# Prepare and activate OSD nodes on the node
|
||||
#
|
||||
define ceph::osds::osd () {
|
||||
|
||||
# ${name} format is DISK[:JOURNAL]
|
||||
$params = split($name, ':')
|
||||
$data_device_name = $params[0]
|
||||
$deploy_device_name = "${::hostname}:${name}"
|
||||
|
||||
exec { "ceph-deploy osd prepare ${deploy_device_name}":
|
||||
# ceph-deploy osd prepare is ensuring there is a filesystem on the
|
||||
# disk according to the args passed to ceph.conf (above).
|
||||
#
|
||||
# It has a long timeout because of the format taking forever. A
|
||||
# resonable amount of time would be around 300 times the length of
|
||||
# $osd_nodes. Right now its 0 to prevent puppet from aborting it.
|
||||
command => "ceph-deploy osd prepare ${deploy_device_name}",
|
||||
returns => 0,
|
||||
timeout => 0, # TODO: make this something reasonable
|
||||
tries => 2, # This is necessary because of race for mon creating keys
|
||||
try_sleep => 1,
|
||||
logoutput => true,
|
||||
unless => "ceph-disk list | fgrep -q -e '${data_device_name} ceph data, active' -e '${data_device_name} ceph data, prepared'",
|
||||
}
|
||||
|
||||
exec { "ceph-deploy osd activate ${deploy_device_name}":
|
||||
command => "ceph-deploy osd activate ${deploy_device_name}",
|
||||
try_sleep => 10,
|
||||
tries => 3,
|
||||
logoutput => true,
|
||||
timeout => 0,
|
||||
onlyif => "ceph-disk list | fgrep -q -e '${data_device_name} ceph data, prepared'",
|
||||
}
|
||||
|
||||
Exec <| title == 'ceph-deploy gatherkeys' |> ->
|
||||
Exec["ceph-deploy osd prepare ${deploy_device_name}"] ->
|
||||
Exec["ceph-deploy osd activate ${deploy_device_name}"]
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
# These are per-OS parameters and should be considered static
|
||||
class ceph::params {
|
||||
|
||||
case $::osfamily {
|
||||
'RedHat': {
|
||||
$service_name = 'ceph'
|
||||
$service_nova_compute = 'openstack-nova-compute'
|
||||
#RadosGW
|
||||
$service_httpd = 'httpd'
|
||||
$package_httpd = 'httpd'
|
||||
$user_httpd = 'apache'
|
||||
$package_libnss = 'nss-tools'
|
||||
$service_radosgw = 'ceph-radosgw'
|
||||
$package_radosgw = 'ceph-radosgw'
|
||||
$package_modssl = 'mod_ssl'
|
||||
$package_fastcgi = 'mod_fastcgi'
|
||||
$dir_httpd_conf = '/etc/httpd/conf/'
|
||||
$dir_httpd_sites = '/etc/httpd/conf.d/'
|
||||
$dir_httpd_ssl = '/etc/httpd/ssl/'
|
||||
$dir_httpd_log = '/var/log/httpd/'
|
||||
|
||||
package { ['ceph', 'redhat-lsb-core','ceph-deploy',]:
|
||||
ensure => installed,
|
||||
}
|
||||
|
||||
file {'/etc/sudoers.d/ceph':
|
||||
content => "# This is required for ceph-deploy\nDefaults !requiretty\n"
|
||||
}
|
||||
}
|
||||
|
||||
'Debian': {
|
||||
$service_name = 'ceph-all'
|
||||
$service_nova_compute = 'nova-compute'
|
||||
#RadosGW
|
||||
$service_httpd = 'apache2'
|
||||
$package_httpd = 'apache2'
|
||||
$user_httpd = 'www-data'
|
||||
$package_libnss = 'libnss3-tools'
|
||||
$service_radosgw = 'radosgw'
|
||||
$package_radosgw = 'radosgw'
|
||||
$package_fastcgi = 'libapache2-mod-fastcgi'
|
||||
$package_modssl = undef
|
||||
$dir_httpd_conf = '/etc/httpd/conf/'
|
||||
$dir_httpd_sites = '/etc/apache2/sites-available/'
|
||||
$dir_httpd_ssl = '/etc/apache2/ssl/'
|
||||
$dir_httpd_log = '/var/log/apache2/'
|
||||
|
||||
package { ['ceph','ceph-deploy', ]:
|
||||
ensure => installed,
|
||||
}
|
||||
}
|
||||
|
||||
default: {
|
||||
fail("Unsupported osfamily: ${::osfamily} operatingsystem: ${::operatingsystem}, module ${module_name} only support osfamily RedHat and Debian")
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,45 +0,0 @@
|
|||
# create a Ceph pool with an associated Cephx user and ACL
|
||||
|
||||
define ceph::pool (
|
||||
# Cephx user and ACL
|
||||
$user = $name,
|
||||
$acl = "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${name}'",
|
||||
|
||||
# Unix user and group for the keyring file
|
||||
$keyring_owner = $user,
|
||||
$keyring_group = $keyring_owner,
|
||||
|
||||
# Pool settings
|
||||
$pg_num = $::ceph::osd_pool_default_pg_num,
|
||||
$pgp_num = $::ceph::osd_pool_default_pgp_num,
|
||||
) {
|
||||
|
||||
exec {"Create ${name} pool":
|
||||
command => "ceph osd pool create ${name} ${pg_num} ${pgp_num}",
|
||||
unless => "rados lspools | grep -q '^${name}$'",
|
||||
}
|
||||
|
||||
exec {"Create ${user} Cephx user and ACL":
|
||||
command => "ceph auth get-or-create client.${user} ${acl}",
|
||||
unless => "ceph auth list | grep -q '^client.${user}$'"
|
||||
}
|
||||
|
||||
$keyring = "/etc/ceph/ceph.client.${user}.keyring"
|
||||
|
||||
exec {"Populate ${user} keyring":
|
||||
command => "ceph auth get-or-create client.${user} > ${keyring}",
|
||||
creates => $keyring,
|
||||
}
|
||||
|
||||
file {$keyring:
|
||||
ensure => file,
|
||||
mode => '0640',
|
||||
owner => $keyring_owner,
|
||||
group => $keyring_group,
|
||||
}
|
||||
|
||||
Exec["Create ${name} pool"] ->
|
||||
Exec["Create ${user} Cephx user and ACL"] ->
|
||||
Exec["Populate ${user} keyring"] ->
|
||||
File[$keyring]
|
||||
}
|
|
@ -1,251 +0,0 @@
|
|||
|
||||
# deploys Ceph radosgw as an Apache FastCGI application
|
||||
class ceph::radosgw (
|
||||
$rgw_id = 'radosgw.gateway',
|
||||
$rgw_user = $::ceph::params::user_httpd,
|
||||
$use_ssl = $::ceph::use_ssl,
|
||||
$primary_mon = $::ceph::primary_mon,
|
||||
|
||||
# RadosGW settings
|
||||
$rgw_host = $::ceph::rgw_host,
|
||||
$rgw_ip = $::ceph::rgw_ip,
|
||||
$rgw_port = $::ceph::rgw_port,
|
||||
$swift_endpoint_port = $::ceph::swift_endpoint_port,
|
||||
$rgw_keyring_path = $::ceph::rgw_keyring_path,
|
||||
$rgw_socket_path = $::ceph::rgw_socket_path,
|
||||
$rgw_frontends = $::ceph::rgw_frontends,
|
||||
$rgw_log_file = $::ceph::rgw_log_file,
|
||||
$rgw_data = $::ceph::rgw_data,
|
||||
$rgw_dns_name = $::ceph::rgw_dns_name,
|
||||
$rgw_print_continue = $::ceph::rgw_print_continue,
|
||||
|
||||
#rgw Keystone settings
|
||||
$rgw_use_pki = $::ceph::rgw_use_pki,
|
||||
$rgw_use_keystone = $::ceph::rgw_use_keystone,
|
||||
$rgw_keystone_url = $::ceph::rgw_keystone_url,
|
||||
$rgw_keystone_admin_token = $::ceph::rgw_keystone_admin_token,
|
||||
$rgw_keystone_token_cache_size = $::ceph::rgw_keystone_token_cache_size,
|
||||
$rgw_keystone_accepted_roles = $::ceph::rgw_keystone_accepted_roles,
|
||||
$rgw_keystone_revocation_interval = $::ceph::rgw_keystone_revocation_interval,
|
||||
$rgw_s3_auth_use_keystone = $::ceph::rgw_s3_auth_use_keystone,
|
||||
$rgw_nss_db_path = $::ceph::rgw_nss_db_path,
|
||||
$rgw_large_pool_name = $::ceph::rgw_large_pool_name,
|
||||
$rgw_large_pool_pg_nums = $::ceph::rgw_large_pool_pg_nums,
|
||||
|
||||
#rgw Log settings
|
||||
$use_syslog = $::ceph::use_syslog,
|
||||
$syslog_facility = $::ceph::syslog_log_facility,
|
||||
$syslog_level = $::ceph::syslog_log_level,
|
||||
) {
|
||||
|
||||
$keyring_path = "/etc/ceph/keyring.${rgw_id}"
|
||||
$radosgw_auth_key = "client.${rgw_id}"
|
||||
$dir_httpd_root = '/var/www/radosgw'
|
||||
$dir_httpd_log = $::ceph::params::dir_httpd_log
|
||||
|
||||
package { [
|
||||
$::ceph::params::package_radosgw,
|
||||
$::ceph::params::package_fastcgi,
|
||||
$::ceph::params::package_libnss,
|
||||
]:
|
||||
ensure => 'installed',
|
||||
}
|
||||
|
||||
# check out httpd package/service is defined
|
||||
if !defined(Package['httpd']) {
|
||||
package { 'httpd':
|
||||
ensure => 'installed',
|
||||
name => $::ceph::params::package_httpd,
|
||||
}
|
||||
}
|
||||
|
||||
if !defined(Service['httpd']) {
|
||||
service { 'httpd':
|
||||
ensure => 'running',
|
||||
name => $::ceph::params::service_httpd,
|
||||
enable => true,
|
||||
}
|
||||
}
|
||||
|
||||
firewall {'012 RadosGW allow':
|
||||
chain => 'INPUT',
|
||||
dport => [ $rgw_port, $swift_endpoint_port ],
|
||||
proto => 'tcp',
|
||||
action => accept,
|
||||
}
|
||||
|
||||
# All files need to be owned by the rgw / http user.
|
||||
File {
|
||||
owner => $rgw_user,
|
||||
group => $rgw_user,
|
||||
}
|
||||
|
||||
ceph_conf {
|
||||
"client.${rgw_id}/host": value => $rgw_host;
|
||||
"client.${rgw_id}/keyring": value => $keyring_path;
|
||||
"client.${rgw_id}/rgw_socket_path": value => $rgw_socket_path;
|
||||
"client.${rgw_id}/rgw_frontends": value => $rgw_frontends;
|
||||
"client.${rgw_id}/user": value => $rgw_user;
|
||||
"client.${rgw_id}/rgw_data": value => $rgw_data;
|
||||
"client.${rgw_id}/rgw_dns_name": value => $rgw_dns_name;
|
||||
"client.${rgw_id}/rgw_print_continue": value => $rgw_print_continue;
|
||||
}
|
||||
|
||||
if ($use_ssl) {
|
||||
|
||||
$httpd_ssl = $::ceph::params::dir_httpd_ssl
|
||||
exec {'copy OpenSSL certificates':
|
||||
command => "scp -r ${rgw_nss_db_path}/* ${primary_mon}:${rgw_nss_db_path} && \
|
||||
ssh ${primary_mon} '/etc/init.d/radosgw restart'",
|
||||
}
|
||||
exec {"generate SSL certificate on ${name}":
|
||||
command => "openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout ${httpd_ssl}apache.key -out ${httpd_ssl}apache.crt -subj '/C=RU/ST=Russia/L=Saratov/O=Fuel/OU=CA/CN=localhost'",
|
||||
returns => [0,1],
|
||||
}
|
||||
}
|
||||
|
||||
if ($rgw_use_keystone) {
|
||||
|
||||
ceph_conf {
|
||||
"client.${rgw_id}/rgw_keystone_url": value => $rgw_keystone_url;
|
||||
"client.${rgw_id}/rgw_keystone_admin_token": value => $rgw_keystone_admin_token;
|
||||
"client.${rgw_id}/rgw_keystone_accepted_roles": value => $rgw_keystone_accepted_roles;
|
||||
"client.${rgw_id}/rgw_keystone_token_cache_size": value => $rgw_keystone_token_cache_size;
|
||||
"client.${rgw_id}/rgw_keystone_revocation_interval": value => $rgw_keystone_revocation_interval;
|
||||
}
|
||||
|
||||
if ($rgw_s3_auth_use_keystone) {
|
||||
ceph_conf {
|
||||
"client.${rgw_id}/rgw_s3_auth_use_keystone": value => $rgw_s3_auth_use_keystone;
|
||||
}
|
||||
} else {
|
||||
ceph_conf {
|
||||
"client.${rgw_id}/rgw_s3_auth_use_keystone": ensure => 'absent'
|
||||
}
|
||||
}
|
||||
|
||||
if ($rgw_use_pki) {
|
||||
|
||||
ceph_conf {
|
||||
"client.${rgw_id}/nss db path": value => $rgw_nss_db_path;
|
||||
}
|
||||
|
||||
# This creates the signing certs used by radosgw to check cert revocation
|
||||
# status from keystone
|
||||
exec {'create nss db signing certs':
|
||||
command => "openssl x509 -in /etc/keystone/ssl/certs/ca.pem -pubkey | \
|
||||
certutil -d ${rgw_nss_db_path} -A -n ca -t 'TCu,Cu,Tuw' && \
|
||||
openssl x509 -in /etc/keystone/ssl/certs/signing_cert.pem -pubkey | \
|
||||
certutil -A -d ${rgw_nss_db_path} -n signing_cert -t 'P,P,P'",
|
||||
user => $rgw_user,
|
||||
}
|
||||
|
||||
Exec["ceph-create-radosgw-keyring-on ${name}"] ->
|
||||
Exec['create nss db signing certs']
|
||||
|
||||
} #END rgw_use_pki
|
||||
} #END rgw_use_keystone
|
||||
|
||||
if ($::osfamily == 'Debian'){
|
||||
|
||||
file {'/etc/apache2/sites-enabled/rgw.conf':
|
||||
ensure => link,
|
||||
target => "${::ceph::params::dir_httpd_sites}/rgw.conf",
|
||||
}
|
||||
|
||||
Package[$::ceph::params::package_fastcgi] ->
|
||||
File["${::ceph::params::dir_httpd_sites}/rgw.conf"] ->
|
||||
File['/etc/apache2/sites-enabled/rgw.conf'] ~>
|
||||
Service<| title == 'httpd' |>
|
||||
|
||||
} #END osfamily Debian
|
||||
|
||||
if ! $use_syslog {
|
||||
ceph_conf {
|
||||
"client.${rgw_id}/log_file": value => $rgw_log_file;
|
||||
"client.${rgw_id}/log_to_syslog": value => $use_syslog;
|
||||
}
|
||||
file { $rgw_log_file:
|
||||
ensure => present,
|
||||
mode => '0755',
|
||||
}
|
||||
} else {
|
||||
file { $rgw_log_file:
|
||||
ensure => absent,
|
||||
}
|
||||
}
|
||||
|
||||
file { [
|
||||
$::ceph::params::dir_httpd_ssl,
|
||||
"${rgw_data}/ceph-${rgw_id}",
|
||||
$rgw_data,
|
||||
$dir_httpd_root,
|
||||
$rgw_nss_db_path,
|
||||
]:
|
||||
ensure => 'directory',
|
||||
mode => '0755',
|
||||
recurse => true,
|
||||
}
|
||||
|
||||
file { "${::ceph::params::dir_httpd_sites}/rgw.conf":
|
||||
content => template('ceph/rgw.conf.erb'),
|
||||
}
|
||||
|
||||
file { "${dir_httpd_root}/s3gw.fcgi":
|
||||
content => template('ceph/s3gw.fcgi.erb'),
|
||||
mode => '0755',
|
||||
}
|
||||
|
||||
file {"${::ceph::params::dir_httpd_sites}/fastcgi.conf":
|
||||
content => template('ceph/fastcgi.conf.erb'),
|
||||
mode => '0755',
|
||||
}
|
||||
|
||||
exec { "ceph create ${radosgw_auth_key}":
|
||||
command => "ceph auth get-or-create ${radosgw_auth_key} osd 'allow rwx' mon 'allow rw'",
|
||||
unless => "ceph auth list | fgrep -qx ${radosgw_auth_key}",
|
||||
}
|
||||
|
||||
exec { "Populate ${radosgw_auth_key} keyring":
|
||||
command => "ceph auth get-or-create ${radosgw_auth_key} > ${keyring_path}",
|
||||
creates => $keyring_path,
|
||||
}
|
||||
|
||||
exec { "Create ${rgw_large_pool_name} pool":
|
||||
command => "ceph -n ${radosgw_auth_key} osd pool create ${rgw_large_pool_name} ${rgw_large_pool_pg_nums} ${rgw_large_pool_pg_nums}",
|
||||
unless => "rados lspools | grep '^${rgw_large_pool_name}$'",
|
||||
}
|
||||
|
||||
file { $keyring_path: mode => '0640', }
|
||||
|
||||
file {"${rgw_data}/ceph-${rgw_id}/done":
|
||||
ensure => present,
|
||||
mode => '0644',
|
||||
}
|
||||
|
||||
Ceph_conf <||> ->
|
||||
Package<| title == 'httpd' |> ->
|
||||
Package[ [
|
||||
$::ceph::params::package_radosgw,
|
||||
$::ceph::params::package_fastcgi,
|
||||
$::ceph::params::package_libnss,
|
||||
] ] ->
|
||||
File[ [
|
||||
"${::ceph::params::dir_httpd_sites}/rgw.conf",
|
||||
"${::ceph::params::dir_httpd_sites}/fastcgi.conf",
|
||||
"${dir_httpd_root}/s3gw.fcgi",
|
||||
$::ceph::params::dir_httpd_ssl,
|
||||
"${rgw_data}/ceph-${rgw_id}",
|
||||
$rgw_data,
|
||||
$dir_httpd_root,
|
||||
$rgw_nss_db_path,
|
||||
$rgw_log_file,
|
||||
] ] ->
|
||||
Exec["ceph create ${radosgw_auth_key}"] ->
|
||||
Exec["Populate ${radosgw_auth_key} keyring"] ->
|
||||
File["${rgw_data}/ceph-${rgw_id}/done"] ->
|
||||
File[$keyring_path] ->
|
||||
Exec["Create ${rgw_large_pool_name} pool"] ->
|
||||
Firewall['012 RadosGW allow'] ~>
|
||||
Service <| title == 'httpd' |>
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
# generate and install SSH keys for Ceph
|
||||
class ceph::ssh {
|
||||
|
||||
$ssh_config = '/root/.ssh/config'
|
||||
$private_key = '/var/lib/astute/ceph/ceph'
|
||||
$public_key = '/var/lib/astute/ceph/ceph.pub'
|
||||
|
||||
install_ssh_keys {'root_ssh_keys_for_ceph':
|
||||
ensure => present,
|
||||
user => 'root',
|
||||
private_key_path => $private_key,
|
||||
public_key_path => $public_key,
|
||||
private_key_name => 'id_rsa',
|
||||
public_key_name => 'id_rsa.pub',
|
||||
authorized_keys => 'authorized_keys',
|
||||
}
|
||||
|
||||
if !defined(File[$ssh_config]) {
|
||||
file { $ssh_config :
|
||||
mode => '0600',
|
||||
content => "Host *\n StrictHostKeyChecking no\n UserKnownHostsFile=/dev/null\n",
|
||||
}
|
||||
}
|
||||
|
||||
Install_ssh_keys['root_ssh_keys_for_ceph'] -> File[$ssh_config]
|
||||
}
|
|
@ -1,37 +0,0 @@
|
|||
require 'spec_helper'
|
||||
|
||||
describe 'ceph::nova_compute', :type => :class do
|
||||
|
||||
let(:facts) do
|
||||
{
|
||||
:osfamily => 'Debian',
|
||||
:operatingsystem => 'Ubuntu',
|
||||
}
|
||||
end
|
||||
|
||||
context 'ceph::nova_compute with defaults' do
|
||||
let (:params) do
|
||||
{
|
||||
:secret_xml => '/root/.secret_attrs.xml',
|
||||
:rbd_secret_uuid => 'a6c179f4-0e40-4d97-8d20-74fad3935e8a',
|
||||
:user => 'compute',
|
||||
}
|
||||
end
|
||||
|
||||
it { should contain_file(params[:secret_xml]) }
|
||||
it { should contain_exec('Set Ceph RBD secret for Nova').that_requires('Service[libvirt]') }
|
||||
|
||||
it { should contain_service('libvirt').with(
|
||||
:ensure => 'running',
|
||||
)}
|
||||
|
||||
it { should contain_nova_config('libvirt/rbd_secret_uuid').with(
|
||||
:value => params[:rbd_secret_uuid],
|
||||
)}
|
||||
|
||||
it { should contain_nova_config('libvirt/rbd_user').with(
|
||||
:value => params[:user],
|
||||
)}
|
||||
end
|
||||
|
||||
end
|
|
@ -1,33 +0,0 @@
|
|||
require 'spec_helper'
|
||||
|
||||
describe 'ceph::osds', :type => :class do
|
||||
|
||||
context 'Simple ceph::osds class test' do
|
||||
let (:params) {{ :devices => ['/dev/vdc', '/dev/vdd' ] }}
|
||||
|
||||
it { should contain_exec('udevadm trigger') }
|
||||
it { should contain_exec('ceph-disk activate-all').that_requires('Exec[udevadm trigger]') }
|
||||
it { should contain_firewall('011 ceph-osd allow').that_requires('Exec[ceph-disk activate-all]') }
|
||||
it { should contain_ceph__osds__osd('/dev/vdc').that_requires('Firewall[011 ceph-osd allow]') }
|
||||
it { should contain_ceph__osds__osd('/dev/vdd').that_requires('Firewall[011 ceph-osd allow]') }
|
||||
end
|
||||
|
||||
|
||||
context 'Class ceph::osds without devices' do
|
||||
let (:params) {{ :devices => nil }}
|
||||
|
||||
it { should contain_firewall('011 ceph-osd allow') }
|
||||
it { should_not contain_ceph__osds__osd }
|
||||
end
|
||||
|
||||
context 'Class ceph::osds with devices and journals' do
|
||||
let (:params) {{ :devices => ['/dev/sdc1:/dev/sdc2', '/dev/sdd1:/dev/sdd2'] }}
|
||||
|
||||
it { should contain_firewall('011 ceph-osd allow') }
|
||||
it { should contain_ceph__osds__osd('/dev/sdc1:/dev/sdc2') }
|
||||
it { should contain_ceph__osds__osd('/dev/sdd1:/dev/sdd2') }
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
# vim: set ts=2 sw=2 et :
|
|
@ -1,57 +0,0 @@
|
|||
require 'spec_helper'
|
||||
|
||||
describe 'ceph::osds::osd', :type => :define do
|
||||
let :facts do
|
||||
{ :hostname => 'test.example' }
|
||||
end
|
||||
|
||||
context 'Simple test' do
|
||||
let(:title) { '/dev/svv' }
|
||||
|
||||
it { should contain_exec("ceph-deploy osd prepare test.example:/dev/svv").with(
|
||||
'command' => 'ceph-deploy osd prepare test.example:/dev/svv',
|
||||
'returns' => 0,
|
||||
'timeout' => 0,
|
||||
'tries' => 2,
|
||||
'try_sleep' => 1,
|
||||
'logoutput' => true,
|
||||
'unless' => "ceph-disk list | fgrep -q -e '/dev/svv ceph data, active' -e '/dev/svv ceph data, prepared'",
|
||||
)
|
||||
}
|
||||
it { should contain_exec("ceph-deploy osd activate test.example:/dev/svv").with(
|
||||
'command' => 'ceph-deploy osd activate test.example:/dev/svv',
|
||||
'try_sleep' => 10,
|
||||
'tries' => 3,
|
||||
'logoutput' => true,
|
||||
'timeout' => 0,
|
||||
'onlyif' => "ceph-disk list | fgrep -q -e '/dev/svv ceph data, prepared'",
|
||||
)
|
||||
}
|
||||
end
|
||||
|
||||
context 'Simple test with journal' do
|
||||
let(:title) { '/dev/sdd:/dev/journal' }
|
||||
it { should contain_exec("ceph-deploy osd prepare test.example:/dev/sdd:/dev/journal").with(
|
||||
'command' => 'ceph-deploy osd prepare test.example:/dev/sdd:/dev/journal',
|
||||
'returns' => 0,
|
||||
'timeout' => 0,
|
||||
'tries' => 2,
|
||||
'try_sleep' => 1,
|
||||
'logoutput' => true,
|
||||
'unless' => "ceph-disk list | fgrep -q -e '/dev/sdd ceph data, active' -e '/dev/sdd ceph data, prepared'",
|
||||
)
|
||||
}
|
||||
it { should contain_exec("ceph-deploy osd activate test.example:/dev/sdd:/dev/journal").with(
|
||||
'command' => 'ceph-deploy osd activate test.example:/dev/sdd:/dev/journal',
|
||||
'try_sleep' => 10,
|
||||
'tries' => 3,
|
||||
'logoutput' => true,
|
||||
'timeout' => 0,
|
||||
'onlyif' => "ceph-disk list | fgrep -q -e '/dev/sdd ceph data, prepared'",
|
||||
)
|
||||
}
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
# vim: set ts=2 sw=2 et :
|
|
@ -1,192 +0,0 @@
|
|||
#! /usr/bin/env ruby -S rspec
|
||||
require 'spec_helper'
|
||||
|
||||
describe "ceph::facter::osd_devices_list", :type => :fact do
|
||||
|
||||
it "should exist" do
|
||||
expect(Facter.fact(:osd_devices_list).name).to eq(:osd_devices_list)
|
||||
end
|
||||
|
||||
context "with typical block device names" do
|
||||
context "OSD without journal"
|
||||
before :all do
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{lsblk -nr -o KNAME,TYPE}).returns(
|
||||
"sda disk
|
||||
sda1 part
|
||||
sda2 part
|
||||
sdb disk
|
||||
sdb1 part
|
||||
hda disk
|
||||
hda12 part
|
||||
dm-0 lvm
|
||||
dm-1 lvm
|
||||
dm-2 lvm"
|
||||
)
|
||||
Dir.stubs(:glob).with("/dev/sda?*").returns(["/dev/sda1", "/dev/sda2"])
|
||||
Dir.stubs(:glob).with("/dev/sdb?*").returns(["/dev/sdb1"])
|
||||
Dir.stubs(:glob).with("/dev/hda?*").returns(["/dev/hda12"])
|
||||
Dir.stubs(:glob).with("/dev/vda?*").returns([])
|
||||
# Partition GUID code: EBD0A0A2-B9E5-4433-87C0-68B6B72699C7 (Microsoft basic data)
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{sgdisk -i 1 /dev/sda}).returns("
|
||||
You will need to delete this partition or resize it in another utility.
|
||||
Partition GUID code: EBD0A0A2-B9E5-4433-87C0-68B6B72699C7 (Microsoft basic data)
|
||||
Partition unique GUID: 8DA1A912-0CBD-4E0D-9C02-A754D651A15C")
|
||||
# Partition GUID code: 0FC63DAF-8483-4772-8E79-3D69D8477DE4 (Linux filesystem)
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{sgdisk -i 2 /dev/sda}).returns("
|
||||
You will need to delete this partition or resize it in another utility.
|
||||
Partition GUID code: 0FC63DAF-8483-4772-8E79-3D69D8477DE4 (Linux filesystem)
|
||||
Partition unique GUID: 91DF4C37-DEC2-45D1-B977-DC73FD5AA195")
|
||||
# OSD
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{sgdisk -i 1 /dev/sdb}).returns("
|
||||
You will need to delete this partition or resize it in another utility.
|
||||
Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D (Unknown)
|
||||
Partition unique GUID: 4400F9A1-DE34-44EB-AE42-61578FFF31D5
|
||||
")
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{sgdisk -i 12 /dev/hda}).returns("
|
||||
You will need to delete this partition or resize it in another utility.
|
||||
Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D (Unknown)
|
||||
Partition unique GUID: 4400F9A1-DE34-44EB-AE42-61578FFF31D5
|
||||
")
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{grep -c /dev/sdb1 /proc/mounts}).returns("0\n")
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{grep -c /dev/hda12 /proc/mounts}).returns("1\n")
|
||||
end
|
||||
|
||||
it "should return umounted osd device without journal" do
|
||||
expect(Facter.fact(:osd_devices_list).value).to eq("/dev/sdb1")
|
||||
end
|
||||
|
||||
after :all do
|
||||
Dir.unstub(:glob)
|
||||
Facter::Util::Resolution.unstub(:exec)
|
||||
Facter.flush
|
||||
end
|
||||
end
|
||||
|
||||
context "OSD with journal" do
|
||||
before :all do
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{lsblk -nr -o KNAME,TYPE}).returns(
|
||||
"sda disk
|
||||
sda1 part
|
||||
sda2 part
|
||||
sda3 part
|
||||
sda4 part
|
||||
dm-0 lvm
|
||||
dm-1 lvm
|
||||
dm-2 lvm"
|
||||
)
|
||||
Dir.stubs(:glob).with("/dev/sda?*").returns(["/dev/sda1", "/dev/sda2", "/dev/sda3", "/dev/sda4"])
|
||||
# OSD with journals
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{sgdisk -i 1 /dev/sda}).returns("
|
||||
You will need to delete this partition or resize it in another utility.
|
||||
Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D (Unknown)
|
||||
Partition unique GUID: 4400F9A1-DE34-44EB-AE42-61578FFF31D5
|
||||
")
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{sgdisk -i 2 /dev/sda}).returns("
|
||||
You will need to delete this partition or resize it in another utility.
|
||||
Partition GUID code: 45B0969E-9B03-4F30-B4C6-B4B80CEFF106 (Unknown)
|
||||
Partition unique GUID: 4400F9A1-DE34-44EB-AE42-61578FFF31D5
|
||||
")
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{sgdisk -i 3 /dev/sda}).returns("
|
||||
You will need to delete this partition or resize it in another utility.
|
||||
Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D (Unknown)
|
||||
Partition unique GUID: 4400F9A1-DE34-44EB-AE42-61578FFF31D5
|
||||
")
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{sgdisk -i 4 /dev/sda}).returns("
|
||||
You will need to delete this partition or resize it in another utility.
|
||||
Partition GUID code: 45B0969E-9B03-4F30-B4C6-B4B80CEFF106 (Unknown)
|
||||
Partition unique GUID: 4400F9A1-DE34-44EB-AE42-61578FFF31D5
|
||||
")
|
||||
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{udevadm info -q property -n /dev/sda2}).returns("DEVLINKS=/dev/disk/by-id/ata-ST1000DM003-1ER162_Z4Y18F8B-part2 /dev/disk/by-id/wwn-0x5000c5007906728b-part2 /dev/disk/by-uuid/d62a043d-586f-461e-b333-d822d8014301\nDEVNAME=/dev/sda2")
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{udevadm info -q property -n /dev/sda4}).returns("DEVLINKS=/dev/disk/by-id/ata-ST1000DM003-1ER162_Z4Y18F8B-part4 /dev/disk/by-id/wwn-0x5000c5007906728b-part4 /dev/disk/by-uuid/5E9645E89645C0EF\nDEVNAME=/dev/sda4")
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{grep -c /dev/sda1 /proc/mounts}).returns("0\n")
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{grep -c /dev/sda2 /proc/mounts}).returns("0\n")
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{grep -c /dev/sda3 /proc/mounts}).returns("0\n")
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{grep -c /dev/sda4 /proc/mounts}).returns("0\n")
|
||||
end
|
||||
|
||||
it "should return 2 osd devices with journal" do
|
||||
expect(Facter.fact(:osd_devices_list).value).to eq("/dev/sda1:/dev/disk/by-id/ata-ST1000DM003-1ER162_Z4Y18F8B-part2 /dev/sda3:/dev/disk/by-id/ata-ST1000DM003-1ER162_Z4Y18F8B-part4")
|
||||
end
|
||||
after :all do
|
||||
Dir.unstub(:glob)
|
||||
Facter::Util::Resolution.unstub(:exec)
|
||||
Facter.flush
|
||||
end
|
||||
end
|
||||
|
||||
|
||||
context "no OSD devices" do
|
||||
before :all do
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{lsblk -nr -o KNAME,TYPE}).returns(
|
||||
"sda disk
|
||||
sda1 part
|
||||
sda2 part
|
||||
dm-0 lvm
|
||||
dm-1 lvm
|
||||
dm-2 lvm"
|
||||
)
|
||||
Dir.stubs(:glob).with("/dev/sda?*").returns(["/dev/sda1", "/dev/sda2"])
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{sgdisk -i 1 /dev/sda}).returns("
|
||||
You will need to delete this partition or resize it in another utility.
|
||||
Partition GUID code: EBD0A0A2-B9E5-4433-87C0-68B6B72699C7 (Microsoft basic data)
|
||||
Partition unique GUID: 8DA1A912-0CBD-4E0D-9C02-A754D651A15C")
|
||||
# Partition GUID code: 0FC63DAF-8483-4772-8E79-3D69D8477DE4 (Linux filesystem)
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{sgdisk -i 2 /dev/sda}).returns("
|
||||
You will need to delete this partition or resize it in another utility.
|
||||
Partition GUID code: 0FC63DAF-8483-4772-8E79-3D69D8477DE4 (Linux filesystem)
|
||||
Partition unique GUID: 91DF4C37-DEC2-45D1-B977-DC73FD5AA195")
|
||||
end
|
||||
|
||||
it "should return nil if no devices were detected" do
|
||||
expect(Facter.fact(:osd_devices_list).value).to be_empty
|
||||
end
|
||||
|
||||
after :all do
|
||||
Dir.unstub(:glob)
|
||||
Facter::Util::Resolution.unstub(:exec)
|
||||
Facter.flush
|
||||
end
|
||||
end
|
||||
|
||||
context "with special block device names" do
|
||||
before :all do
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{lsblk -nr -o KNAME,TYPE}).returns(
|
||||
"cciss!c0d0 disk
|
||||
cciss/c0d0p1 part
|
||||
nvme0n1 disk
|
||||
nvme0n1p1 part
|
||||
dm-0 lvm
|
||||
dm-1 lvm
|
||||
dm-2 lvm"
|
||||
)
|
||||
|
||||
Dir.stubs(:glob).with("/dev/cciss/c0d0?*").returns(["/dev/cciss/c0d0p1"])
|
||||
Dir.stubs(:glob).with("/dev/nvme0n1?*").returns(["/dev/nvme0n1p1"])
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{sgdisk -i 1 /dev/cciss/c0d0}).returns("
|
||||
You will need to delete this partition or resize it in another utility.
|
||||
Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D (Unknown)
|
||||
Partition unique GUID: 4400F9A1-DE34-44EB-AE42-61578FFF31D5
|
||||
")
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{sgdisk -i 1 /dev/nvme0n1}).returns("
|
||||
You will need to delete this partition or resize it in another utility.
|
||||
Partition GUID code: 4FBD7E29-9D25-41B8-AFD0-062C0CEFF05D (Unknown)
|
||||
Partition unique GUID: 4400F9A1-DE34-44EB-AE42-61578FFF31D5
|
||||
")
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{grep -c /dev/cciss/c0d0p1 /proc/mounts}).returns("0\n")
|
||||
Facter::Util::Resolution.stubs(:exec).with(%q{grep -c /dev/nvme0n1p1 /proc/mounts}).returns("0\n")
|
||||
end
|
||||
|
||||
it "should return two osd devices without journals" do
|
||||
expect(Facter.fact(:osd_devices_list).value).to eq("/dev/cciss/c0d0p1 /dev/nvme0n1p1")
|
||||
end
|
||||
after :all do
|
||||
Dir.unstub(:glob)
|
||||
Facter::Util::Resolution.unstub(:exec)
|
||||
Facter.flush
|
||||
end
|
||||
|
||||
end
|
||||
end
|
||||
|
||||
# vim: set ts=2 sw=2 et :
|
|
@ -1,9 +0,0 @@
|
|||
require 'rubygems'
|
||||
require 'puppetlabs_spec_helper/module_spec_helper'
|
||||
|
||||
fixture_path = File.expand_path(File.join(__FILE__, '..', 'fixtures'))
|
||||
|
||||
RSpec.configure do |c|
|
||||
c.module_path = File.join(fixture_path, 'modules')
|
||||
c.manifest_dir = File.join(fixture_path, 'manifests')
|
||||
end
|
|
@ -1,25 +0,0 @@
|
|||
# This file is managed by Puppet
|
||||
|
||||
# WARNING: this is a kludge:
|
||||
## The User/Group for httpd need to be set before we can load mod_fastcgi,
|
||||
## but /etc/httpd/conf.d/fastcgi.conf on RHEL gets loaded before
|
||||
## /etc/httpd/conf/httpd.conf, so we need to set them here :(
|
||||
## mod_fcgid does not have this bug,
|
||||
## but it does not handle child PHP processes appropriately per
|
||||
## http://serverfault.com/questions/303535/a-single-php-fastcgi-process-blocks-all-other-php-requests/305093#305093
|
||||
|
||||
User apache
|
||||
Group apache
|
||||
|
||||
LoadModule fastcgi_module modules/mod_fastcgi.so
|
||||
|
||||
# dir for IPC socket files
|
||||
FastCgiIpcDir /var/run/mod_fastcgi
|
||||
|
||||
# wrap all fastcgi script calls in suexec
|
||||
# Must be off in RHEL
|
||||
FastCgiWrapper Off
|
||||
|
||||
# global FastCgiConfig can be overridden by FastCgiServer options in vhost config
|
||||
FastCgiConfig -idle-timeout 20 -maxClassProcesses 1
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
Listen <%= @rgw_ip %>:<%= @rgw_port %>
|
||||
|
||||
<VirtualHost <%= @rgw_ip %>:<%= @rgw_port %>>
|
||||
ServerName <%= @fqdn %>
|
||||
DocumentRoot <%= @dir_httpd_root %>
|
||||
|
||||
RewriteEngine On
|
||||
RewriteRule .* - [E=HTTP_AUTHORIZATION:%{HTTP:Authorization},L]
|
||||
|
||||
SetEnv proxy-nokeepalive 1
|
||||
ProxyPass / fcgi://127.0.0.1:9000/
|
||||
|
||||
## Logging
|
||||
ErrorLog "<%= @dir_httpd_log %>radosgw_error.log"
|
||||
CustomLog "<%= @dir_httpd_log %>radosgw_access.log" forwarded
|
||||
|
||||
AllowEncodedSlashes On
|
||||
ServerSignature Off
|
||||
</VirtualHost>
|
|
@ -1,2 +0,0 @@
|
|||
#!/bin/sh
|
||||
exec /usr/bin/radosgw -c /etc/ceph/ceph.conf -n client.radosgw.gateway
|
|
@ -95,3 +95,8 @@ mod 'sahara',
|
|||
mod 'swift',
|
||||
:git => 'https://github.com/fuel-infra/puppet-swift.git',
|
||||
:ref => 'master'
|
||||
|
||||
# Pull in puppet-ceph
|
||||
mod 'ceph',
|
||||
:git => 'https://github.com/fuel-infra/puppet-ceph.git',
|
||||
:ref => 'master'
|
||||
|
|
|
@ -74,7 +74,7 @@
|
|||
type: puppet
|
||||
version: 2.1.0
|
||||
role: [primary-controller, controller]
|
||||
requires: [post_deployment_start, enable_rados]
|
||||
requires: [post_deployment_start, ceph-radosgw]
|
||||
required_for: [post_deployment_end]
|
||||
condition:
|
||||
yaql_exp: >
|
||||
|
|
|
@ -310,6 +310,7 @@ class openstack_tasks::keystone::keystone {
|
|||
token_caching => $token_caching,
|
||||
cache_backend => $cache_backend,
|
||||
revoke_driver => $revoke_driver,
|
||||
enable_pki_setup => true,
|
||||
admin_endpoint => $admin_url,
|
||||
memcache_dead_retry => '60',
|
||||
memcache_socket_timeout => '1',
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
module Puppet::Parser::Functions
|
||||
newfunction(:osd_devices_hash, :type => :rvalue,
|
||||
:doc => <<-EOS
|
||||
Returns the hash of osd devices for create_resources puppet function
|
||||
EOS
|
||||
) do |arguments|
|
||||
|
||||
raise(Puppet::ParseError, "Wrong number of arguments (#{arguments.length} for 1).") if arguments.size != 1
|
||||
raise(Puppet::ParseError, "Argument should be a String.") if !arguments[0].is_a?(String)
|
||||
|
||||
devices_array = arguments[0].split(" ").map{|value| value.split(":")}
|
||||
devices_hash = devices_array.inject({}) do |memo, (key, value)|
|
||||
memo[key] = {'journal' => value}
|
||||
memo
|
||||
end
|
||||
return devices_hash
|
||||
end
|
||||
end
|
||||
|
|
@ -2,20 +2,25 @@ class osnailyfacter::ceph::ceph_compute {
|
|||
|
||||
notice('MODULAR: ceph/ceph_compute.pp')
|
||||
|
||||
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
|
||||
$storage_hash = hiera_hash('storage', {})
|
||||
$use_neutron = hiera('use_neutron')
|
||||
$public_vip = hiera('public_vip')
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0')
|
||||
$keystone_hash = hiera_hash('keystone', {})
|
||||
# Cinder settings
|
||||
$cinder_pool = 'volumes'
|
||||
# Glance settings
|
||||
$glance_pool = 'images'
|
||||
#Nova Compute settings
|
||||
$compute_user = 'compute'
|
||||
$compute_pool = 'compute'
|
||||
$storage_hash = hiera('storage', {})
|
||||
$mon_key = pick($storage_hash['mon_key'], 'AQDesGZSsC7KJBAAw+W/Z4eGSQGAIbxWjxjvfw==')
|
||||
$fsid = pick($storage_hash['fsid'], '066F558C-6789-4A93-AAF1-5AF1BA01A3AD')
|
||||
$cinder_pool = 'volumes'
|
||||
$glance_pool = 'images'
|
||||
$compute_user = 'compute'
|
||||
$compute_pool = 'compute'
|
||||
$secret = $mon_key
|
||||
$per_pool_pg_nums = $storage_hash['per_pool_pg_nums']
|
||||
$compute_pool_pg_num = pick($per_pool_pg_nums[$compute_pool], '1024')
|
||||
$compute_pool_pgp_num = pick($per_pool_pg_nums[$compute_pool], '1024')
|
||||
|
||||
prepare_network_config(hiera_hash('network_scheme', {}))
|
||||
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
|
||||
$ceph_public_network = get_network_role_property('ceph/public', 'network')
|
||||
|
||||
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
|
||||
$mon_ips = join(values($mon_address_map), ',')
|
||||
$mon_hosts = join(keys($mon_address_map), ',')
|
||||
|
||||
if !($storage_hash['ephemeral_ceph']) {
|
||||
$libvirt_images_type = 'default'
|
||||
|
@ -23,14 +28,6 @@ class osnailyfacter::ceph::ceph_compute {
|
|||
$libvirt_images_type = 'rbd'
|
||||
}
|
||||
|
||||
if ($storage_hash['images_ceph']) {
|
||||
$glance_backend = 'ceph'
|
||||
} elsif ($storage_hash['images_vcenter']) {
|
||||
$glance_backend = 'vmware'
|
||||
} else {
|
||||
$glance_backend = 'swift'
|
||||
}
|
||||
|
||||
if ($storage_hash['volumes_ceph'] or
|
||||
$storage_hash['images_ceph'] or
|
||||
$storage_hash['objects_ceph'] or
|
||||
|
@ -42,64 +39,36 @@ class osnailyfacter::ceph::ceph_compute {
|
|||
}
|
||||
|
||||
if $use_ceph {
|
||||
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
|
||||
$primary_mons = keys($ceph_primary_monitor_node)
|
||||
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
|
||||
|
||||
prepare_network_config(hiera_hash('network_scheme', {}))
|
||||
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
|
||||
$ceph_public_network = get_network_role_property('ceph/public', 'network')
|
||||
|
||||
$per_pool_pg_nums = $storage_hash['per_pool_pg_nums']
|
||||
|
||||
class { '::ceph':
|
||||
primary_mon => $primary_mon,
|
||||
mon_hosts => keys($mon_address_map),
|
||||
mon_ip_addresses => values($mon_address_map),
|
||||
cluster_node_address => $public_vip,
|
||||
osd_pool_default_size => $storage_hash['osd_pool_size'],
|
||||
osd_pool_default_pg_num => $storage_hash['pg_num'],
|
||||
osd_pool_default_pgp_num => $storage_hash['pg_num'],
|
||||
use_rgw => false,
|
||||
glance_backend => $glance_backend,
|
||||
cluster_network => $ceph_cluster_network,
|
||||
public_network => $ceph_public_network,
|
||||
use_syslog => $use_syslog,
|
||||
syslog_log_level => hiera('syslog_log_level_ceph', 'info'),
|
||||
syslog_log_facility => $syslog_log_facility_ceph,
|
||||
rgw_keystone_admin_token => $keystone_hash['admin_token'],
|
||||
ephemeral_ceph => $storage_hash['ephemeral_ceph']
|
||||
fsid => $fsid,
|
||||
mon_initial_members => $mon_hosts,
|
||||
mon_host => $mon_ips,
|
||||
cluster_network => $ceph_cluster_network,
|
||||
public_network => $ceph_public_network,
|
||||
}
|
||||
|
||||
service { $::ceph::params::service_nova_compute :}
|
||||
|
||||
class { 'ceph::ephemeral':
|
||||
libvirt_images_type => $libvirt_images_type,
|
||||
pool => $compute_pool,
|
||||
}
|
||||
|
||||
Class['ceph::conf'] ->
|
||||
Class['ceph::ephemeral'] ~>
|
||||
Service[$::ceph::params::service_nova_compute]
|
||||
|
||||
ceph::pool { $compute_pool:
|
||||
user => $compute_user,
|
||||
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}, allow rwx pool=${compute_pool}'",
|
||||
keyring_owner => 'nova',
|
||||
pg_num => pick($per_pool_pg_nums[$compute_pool], '1024'),
|
||||
pgp_num => pick($per_pool_pg_nums[$compute_pool], '1024'),
|
||||
pg_num => $compute_pool_pg_num,
|
||||
pgp_num => $compute_pool_pgp_num,
|
||||
}
|
||||
|
||||
include ::ceph::nova_compute
|
||||
ceph::key { "client.${compute_user}":
|
||||
user => 'nova',
|
||||
group => 'nova',
|
||||
secret => $secret,
|
||||
cap_mon => 'allow r',
|
||||
cap_osd => "allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}, allow rwx pool=${compute_pool}",
|
||||
inject => true,
|
||||
}
|
||||
|
||||
Class['::ceph::conf'] ->
|
||||
class {'::osnailyfacter::ceph_nova_compute':
|
||||
user => $compute_user,
|
||||
compute_pool => $compute_pool,
|
||||
libvirt_images_type => $libvirt_images_type,
|
||||
}
|
||||
|
||||
Class['ceph'] ->
|
||||
Ceph::Pool[$compute_pool] ->
|
||||
Class['::ceph::nova_compute'] ~>
|
||||
Service[$::ceph::params::service_nova_compute]
|
||||
|
||||
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
|
||||
cwd => '/root',
|
||||
}
|
||||
|
||||
Class['osnailyfacter::ceph_nova_compute']
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,67 +1,108 @@
|
|||
class osnailyfacter::ceph::ceph_osd {
|
||||
|
||||
notice('MODULAR: ceph/ceph_osd.pp')
|
||||
# TODO(bogdando) add monit ceph-osd services monitoring, if required
|
||||
notice('MODULAR: ceph-osd.pp')
|
||||
|
||||
# Pulling hiera
|
||||
$storage_hash = hiera_hash('storage', {})
|
||||
$verbose = pick($storage_hash['verbose'], true)
|
||||
$storage_hash = hiera('storage', {})
|
||||
$admin_key = pick($storage_hash['admin_key'], 'AQCTg71RsNIHORAAW+O6FCMZWBjmVfMIPk3MhQ==')
|
||||
$bootstrap_osd_key = pick($storage_hash['bootstrap_osd_key'], 'AQABsWZSgEDmJhAAkAGSOOAJwrMHrM5Pz5On1A==')
|
||||
$fsid = pick($storage_hash['fsid'], '066F558C-6789-4A93-AAF1-5AF1BA01A3AD')
|
||||
$osd_pool_default_size = $storage_hash['osd_pool_size']
|
||||
$osd_pool_default_pg_num = $storage_hash['pg_num']
|
||||
$osd_pool_default_pgp_num = $storage_hash['pg_num']
|
||||
$osd_pool_default_min_size = pick($storage_hash['osd_pool_default_min_size'], '1')
|
||||
$osd_journal_size = pick($storage_hash['osd_journal_size'], '2048')
|
||||
$debug = pick($storage_hash['debug'], hiera('debug', true))
|
||||
$network_scheme = hiera_hash('network_scheme', {})
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
|
||||
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
|
||||
$primary_mons = keys($ceph_primary_monitor_node)
|
||||
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
|
||||
prepare_network_config($network_scheme)
|
||||
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
|
||||
$ceph_public_network = get_network_role_property('ceph/public', 'network')
|
||||
$ceph_tuning_settings_hash = hiera_hash('ceph_tuning_settings', {})
|
||||
$ceph_tuning_settings = hiera('ceph_tuning_settings', {})
|
||||
$ssl_hash = hiera_hash('use_ssl', {})
|
||||
|
||||
$filestore_xattr_use_omap = pick($storage_hash['filestore_xattr_use_omap'], true)
|
||||
$osd_recovery_max_active = pick($storage_hash['osd_recovery_max_active'], '1')
|
||||
$osd_max_backfills = pick($storage_hash['osd_max_backfills'], '1')
|
||||
$rbd_cache_writethrough_until_flush = pick($storage_hash['rbd_cache_writethrough_until_flush'], true)
|
||||
$rbd_cache = pick($storage_hash['rbd_cache'], true)
|
||||
$log_to_syslog = hiera('use_syslog', true)
|
||||
$log_to_syslog_level = pick($storage_hash['ceph_syslog_level'], 'info')
|
||||
$log_to_syslog_facility = pick($storage_hash['ceph_syslog_facility'], 'LOG_LOCAL0')
|
||||
|
||||
prepare_network_config(hiera_hash('network_scheme'))
|
||||
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
|
||||
$ceph_public_network = get_network_role_property('ceph/public', 'network')
|
||||
|
||||
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
|
||||
$mon_host = join(values($mon_address_map), ',')
|
||||
$mon_initial_members = join(keys($mon_address_map), ',')
|
||||
|
||||
class { '::ceph':
|
||||
primary_mon => $primary_mon,
|
||||
mon_hosts => keys($mon_address_map),
|
||||
mon_ip_addresses => values($mon_address_map),
|
||||
osd_pool_default_size => $storage_hash['osd_pool_size'],
|
||||
osd_pool_default_pg_num => $storage_hash['pg_num'],
|
||||
osd_pool_default_pgp_num => $storage_hash['pg_num'],
|
||||
use_rgw => false,
|
||||
glance_backend => $glance_backend,
|
||||
cluster_network => $ceph_cluster_network,
|
||||
public_network => $ceph_public_network,
|
||||
use_syslog => $use_syslog,
|
||||
syslog_log_level => hiera('syslog_log_level_ceph', 'info'),
|
||||
syslog_log_facility => hiera('syslog_log_facility_ceph','LOG_LOCAL0'),
|
||||
ephemeral_ceph => $storage_hash['ephemeral_ceph'],
|
||||
fsid => $fsid,
|
||||
mon_initial_members => $mon_initial_members,
|
||||
mon_host => $mon_host,
|
||||
cluster_network => $ceph_cluster_network,
|
||||
public_network => $ceph_public_network,
|
||||
osd_pool_default_size => $osd_pool_default_size,
|
||||
osd_pool_default_pg_num => $osd_pool_default_pg_num,
|
||||
osd_pool_default_pgp_num => $osd_pool_default_pgp_num,
|
||||
osd_pool_default_min_size => $osd_pool_default_min_size,
|
||||
osd_journal_size => $osd_journal_size,
|
||||
}
|
||||
|
||||
if $ceph_tuning_settings_hash != {} {
|
||||
ceph_conf {
|
||||
ceph_config {
|
||||
'global/filestore_xattr_use_omap' : value => $filestore_xattr_use_omap;
|
||||
'global/osd_recovery_max_active' : value => $osd_recovery_max_active;
|
||||
'global/osd_max_backfills' : value => $osd_max_backfills;
|
||||
'client/rbd_cache_writethrough_until_flush' : value => $rbd_cache_writethrough_until_flush;
|
||||
'client/rbd_cache' : value => $rbd_cache;
|
||||
'global/log_to_syslog' : value => $log_to_syslog;
|
||||
'global/log_to_syslog_level' : value => $log_to_syslog_level;
|
||||
'global/log_to_syslog_facility' : value => $log_to_syslog_facility;
|
||||
}
|
||||
|
||||
Ceph::Key {
|
||||
inject => false,
|
||||
}
|
||||
|
||||
ceph::key { 'client.admin':
|
||||
secret => $admin_key,
|
||||
cap_mon => 'allow *',
|
||||
cap_osd => 'allow *',
|
||||
cap_mds => 'allow',
|
||||
}
|
||||
|
||||
ceph::key {'client.bootstrap-osd':
|
||||
keyring_path => '/var/lib/ceph/bootstrap-osd/ceph.keyring',
|
||||
secret => $bootstrap_osd_key,
|
||||
}
|
||||
|
||||
$osd_devices_hash = osd_devices_hash($::osd_devices_list)
|
||||
|
||||
class { '::ceph::osds':
|
||||
args => $osd_devices_hash,
|
||||
} ->
|
||||
|
||||
service {'ceph-osd-all-starter':
|
||||
ensure => running,
|
||||
provider => upstart,
|
||||
}
|
||||
|
||||
if $ceph_tuning_settings != {} {
|
||||
ceph_config {
|
||||
'global/debug_default' : value => $debug;
|
||||
'global/max_open_files' : value => $ceph_tuning_settings_hash['max_open_files'];
|
||||
'osd/osd_mkfs_type' : value => $ceph_tuning_settings_hash['osd_mkfs_type'];
|
||||
'osd/osd_mount_options_xfs' : value => $ceph_tuning_settings_hash['osd_mount_options_xfs'];
|
||||
'osd/osd_op_threads' : value => $ceph_tuning_settings_hash['osd_op_threads'];
|
||||
'osd/filestore_queue_max_ops' : value => $ceph_tuning_settings_hash['filestore_queue_max_ops'];
|
||||
'osd/filestore_queue_committing_max_ops' : value => $ceph_tuning_settings_hash['filestore_queue_committing_max_ops'];
|
||||
'osd/journal_max_write_entries' : value => $ceph_tuning_settings_hash['journal_max_write_entries'];
|
||||
'osd/journal_queue_max_ops' : value => $ceph_tuning_settings_hash['journal_queue_max_ops'];
|
||||
'osd/objecter_inflight_ops' : value => $ceph_tuning_settings_hash['objecter_inflight_ops'];
|
||||
'osd/filestore_queue_max_bytes' : value => $ceph_tuning_settings_hash['filestore_queue_max_bytes'];
|
||||
'osd/filestore_queue_committing_max_bytes': value => $ceph_tuning_settings_hash['filestore_queue_committing_max_bytes'];
|
||||
'osd/journal_max_write_bytes' : value => $ceph_tuning_settings_hash['journal_queue_max_bytes'];
|
||||
'osd/journal_queue_max_bytes' : value => $ceph_tuning_settings_hash['journal_queue_max_bytes'];
|
||||
'osd/ms_dispatch_throttle_bytes' : value => $ceph_tuning_settings_hash['ms_dispatch_throttle_bytes'];
|
||||
'osd/objecter_infilght_op_bytes' : value => $ceph_tuning_settings_hash['objecter_infilght_op_bytes'];
|
||||
'osd/filestore_max_sync_interval' : value => $ceph_tuning_settings_hash['filestore_max_sync_interval'];
|
||||
'global/max_open_files' : value => $ceph_tuning_settings['max_open_files'];
|
||||
'osd/osd_mkfs_type' : value => $ceph_tuning_settings['osd_mkfs_type'];
|
||||
'osd/osd_mount_options_xfs' : value => $ceph_tuning_settings['osd_mount_options_xfs'];
|
||||
'osd/osd_op_threads' : value => $ceph_tuning_settings['osd_op_threads'];
|
||||
'osd/filestore_queue_max_ops' : value => $ceph_tuning_settings['filestore_queue_max_ops'];
|
||||
'osd/filestore_queue_committing_max_ops' : value => $ceph_tuning_settings['filestore_queue_committing_max_ops'];
|
||||
'osd/journal_max_write_entries' : value => $ceph_tuning_settings['journal_max_write_entries'];
|
||||
'osd/journal_queue_max_ops' : value => $ceph_tuning_settings['journal_queue_max_ops'];
|
||||
'osd/objecter_inflight_ops' : value => $ceph_tuning_settings['objecter_inflight_ops'];
|
||||
'osd/filestore_queue_max_bytes' : value => $ceph_tuning_settings['filestore_queue_max_bytes'];
|
||||
'osd/filestore_queue_committing_max_bytes': value => $ceph_tuning_settings['filestore_queue_committing_max_bytes'];
|
||||
'osd/journal_max_write_bytes' : value => $ceph_tuning_settings['journal_queue_max_bytes'];
|
||||
'osd/journal_queue_max_bytes' : value => $ceph_tuning_settings['journal_queue_max_bytes'];
|
||||
'osd/ms_dispatch_throttle_bytes' : value => $ceph_tuning_settings['ms_dispatch_throttle_bytes'];
|
||||
'osd/objecter_infilght_op_bytes' : value => $ceph_tuning_settings['objecter_infilght_op_bytes'];
|
||||
'osd/filestore_max_sync_interval' : value => $ceph_tuning_settings['filestore_max_sync_interval'];
|
||||
}
|
||||
# File /root/ceph.conf is symlink which is created after /etc/ceph/ceph.conf in ceph::conf class
|
||||
File<| title == '/root/ceph.conf' |> -> Ceph_conf <||>
|
||||
}
|
||||
|
||||
# TODO(bogdando) add monit ceph-osd services monitoring, if required
|
||||
|
||||
#################################################################
|
||||
|
||||
# vim: set ts=2 sw=2 et :
|
||||
|
||||
}
|
||||
|
|
|
@ -1,52 +1,65 @@
|
|||
class osnailyfacter::ceph::ceph_pools {
|
||||
|
||||
notice('MODULAR: ceph/ceph_pools.pp')
|
||||
notice('MODULAR: ceph/ceph_pools')
|
||||
|
||||
$storage_hash = hiera_hash('storage', {})
|
||||
$osd_pool_default_pg_num = $storage_hash['pg_num']
|
||||
$osd_pool_default_pgp_num = $storage_hash['pg_num']
|
||||
# Cinder settings
|
||||
$cinder_user = 'volumes'
|
||||
$cinder_pool = 'volumes'
|
||||
# Cinder Backup settings
|
||||
$cinder_backup_user = 'backups'
|
||||
$cinder_backup_pool = 'backups'
|
||||
# Glance settings
|
||||
$glance_user = 'images'
|
||||
$glance_pool = 'images'
|
||||
$storage_hash = hiera('storage', {})
|
||||
$fsid = pick($storage_hash['fsid'], '066F558C-6789-4A93-AAF1-5AF1BA01A3AD')
|
||||
$mon_key = pick($storage_hash['mon_key'], 'AQDesGZSsC7KJBAAw+W/Z4eGSQGAIbxWjxjvfw==')
|
||||
$cinder_user = 'volumes'
|
||||
$cinder_pool = 'volumes'
|
||||
$cinder_backup_user = 'backups'
|
||||
$cinder_backup_pool = 'backups'
|
||||
$glance_user = 'images'
|
||||
$glance_pool = 'images'
|
||||
|
||||
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
|
||||
cwd => '/root',
|
||||
class {'ceph':
|
||||
fsid => $fsid
|
||||
}
|
||||
|
||||
$per_pool_pg_nums = $storage_hash['per_pool_pg_nums']
|
||||
|
||||
# DO NOT SPLIT ceph auth command lines! See http://tracker.ceph.com/issues/3279
|
||||
ceph::pool {$glance_pool:
|
||||
user => $glance_user,
|
||||
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${glance_pool}'",
|
||||
keyring_owner => 'glance',
|
||||
pg_num => pick($per_pool_pg_nums[$glance_pool], '256'),
|
||||
pgp_num => pick($per_pool_pg_nums[$glance_pool], '256'),
|
||||
# DO NOT SPLIT ceph auth command lines! See http://tracker.ceph.com/issues/3279
|
||||
ceph::pool { $glance_pool:
|
||||
pg_num => pick($per_pool_pg_nums[$glance_pool], '256'),
|
||||
pgp_num => pick($per_pool_pg_nums[$glance_pool], '256'),
|
||||
}
|
||||
|
||||
ceph::pool {$cinder_pool:
|
||||
user => $cinder_user,
|
||||
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}'",
|
||||
keyring_owner => 'cinder',
|
||||
pg_num => pick($per_pool_pg_nums[$cinder_pool], '2048'),
|
||||
pgp_num => pick($per_pool_pg_nums[$cinder_pool], '2048'),
|
||||
ceph::key { "client.${glance_user}":
|
||||
secret => $mon_key,
|
||||
user => 'glance',
|
||||
group => 'glance',
|
||||
cap_mon => 'allow r',
|
||||
cap_osd => "allow class-read object_prefix rbd_children, allow rwx pool=${glance_pool}",
|
||||
inject => true,
|
||||
}
|
||||
|
||||
ceph::pool {$cinder_backup_pool:
|
||||
user => $cinder_backup_user,
|
||||
acl => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=${cinder_backup_pool}, allow rwx pool=${cinder_pool}'",
|
||||
keyring_owner => 'cinder',
|
||||
pg_num => pick($per_pool_pg_nums[$cinder_backup_pool], '512'),
|
||||
pgp_num => pick($per_pool_pg_nums[$cinder_backup_pool], '512'),
|
||||
ceph::pool { $cinder_pool:
|
||||
pg_num => pick($per_pool_pg_nums[$cinder_pool], '256'),
|
||||
pgp_num => pick($per_pool_pg_nums[$cinder_pool], '256'),
|
||||
}
|
||||
|
||||
Ceph::Pool[$glance_pool] -> Ceph::Pool[$cinder_pool] -> Ceph::Pool[$cinder_backup_pool]
|
||||
ceph::key { "client.${cinder_user}":
|
||||
secret => $mon_key,
|
||||
user => 'cinder',
|
||||
group => 'cinder',
|
||||
cap_mon => 'allow r',
|
||||
cap_osd => "allow class-read object_prefix rbd_children, allow rwx pool=${cinder_pool}, allow rx pool=${glance_pool}",
|
||||
inject => true,
|
||||
}
|
||||
|
||||
ceph::pool { $cinder_backup_pool:
|
||||
pg_num => pick($per_pool_pg_nums[$cinder_backup_pool], '256'),
|
||||
pgp_num => pick($per_pool_pg_nums[$cinder_backup_pool], '256'),
|
||||
}
|
||||
|
||||
ceph::key { "client.${cinder_backup_user}":
|
||||
secret => $mon_key,
|
||||
user => 'cinder',
|
||||
group => 'cinder',
|
||||
cap_mon => 'allow r',
|
||||
cap_osd => "allow class-read object_prefix rbd_children, allow rwx pool=${cinder_backup_pool}, allow rwx pool=${cinder_pool}",
|
||||
inject => true,
|
||||
}
|
||||
|
||||
if ($storage_hash['volumes_ceph']) {
|
||||
include ::cinder::params
|
||||
|
|
|
@ -1,64 +0,0 @@
|
|||
class osnailyfacter::ceph::enable_rados {
|
||||
|
||||
notice('MODULAR: ceph/enable_rados.pp')
|
||||
|
||||
$management_vip = hiera('management_vip', '')
|
||||
$service_endpoint = hiera('service_endpoint', '')
|
||||
$ssl_hash = hiera_hash('use_ssl', {})
|
||||
|
||||
include ::ceph::params
|
||||
|
||||
$radosgw_service = $::ceph::params::service_radosgw
|
||||
$radosgw_override_file = '/etc/init/radosgw-all.override'
|
||||
|
||||
if ($::operatingsystem == 'Ubuntu') {
|
||||
# ensure the service is stopped and will not start on boot
|
||||
service { 'radosgw':
|
||||
enable => false,
|
||||
provider => 'debian',
|
||||
}
|
||||
|
||||
service { 'radosgw-all':
|
||||
ensure => running,
|
||||
enable => true,
|
||||
provider => 'upstart',
|
||||
}
|
||||
|
||||
file {$radosgw_override_file:
|
||||
ensure => present,
|
||||
mode => '0644',
|
||||
owner => 'root',
|
||||
group => 'root',
|
||||
content => "start on runlevel [2345]\nstop on starting rc RUNLEVEL=[016]\n",
|
||||
}
|
||||
|
||||
Service['radosgw'] ->
|
||||
File[$radosgw_override_file] ~>
|
||||
Service['radosgw-all'] ->
|
||||
::Osnailyfacter::Wait_for_backend['object-storage']
|
||||
}
|
||||
else {
|
||||
service { $radosgw_service:
|
||||
ensure => running,
|
||||
enable => true,
|
||||
}
|
||||
|
||||
Service[$radosgw_service] -> ::Osnailyfacter::Wait_for_backend['object-storage']
|
||||
}
|
||||
|
||||
$rgw_protocol = get_ssl_property($ssl_hash, {}, 'radosgw', 'internal', 'protocol', 'http')
|
||||
$rgw_address = get_ssl_property($ssl_hash, {}, 'radosgw', 'internal', 'hostname', [$service_endpoint, $management_vip])
|
||||
$rgw_url = "${rgw_protocol}://${rgw_address}:8080"
|
||||
|
||||
$lb_hash = {
|
||||
'object-storage' => {
|
||||
name => 'object-storage',
|
||||
provider => 'http',
|
||||
url => $rgw_url
|
||||
}
|
||||
}
|
||||
|
||||
::osnailyfacter::wait_for_backend {'object-storage':
|
||||
lb_hash => $lb_hash
|
||||
}
|
||||
}
|
|
@ -2,20 +2,45 @@ class osnailyfacter::ceph::mon {
|
|||
|
||||
notice('MODULAR: ceph/mon.pp')
|
||||
|
||||
$storage_hash = hiera_hash('storage', {})
|
||||
$use_neutron = hiera('use_neutron')
|
||||
$public_vip = hiera('public_vip')
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0')
|
||||
$keystone_hash = hiera_hash('keystone', {})
|
||||
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
|
||||
$storage_hash = hiera('storage', {})
|
||||
$admin_key = pick($storage_hash['admin_key'], 'AQCTg71RsNIHORAAW+O6FCMZWBjmVfMIPk3MhQ==')
|
||||
$mon_key = pick($storage_hash['mon_key'], 'AQDesGZSsC7KJBAAw+W/Z4eGSQGAIbxWjxjvfw==')
|
||||
$bootstrap_osd_key = pick($storage_hash['bootstrap_osd_key'], 'AQABsWZSgEDmJhAAkAGSOOAJwrMHrM5Pz5On1A==')
|
||||
$fsid = pick($storage_hash['fsid'], '066F558C-6789-4A93-AAF1-5AF1BA01A3AD')
|
||||
$osd_pool_default_size = $storage_hash['osd_pool_size']
|
||||
$osd_pool_default_pg_num = $storage_hash['pg_num']
|
||||
$osd_pool_default_pgp_num = $storage_hash['pg_num']
|
||||
$osd_pool_default_min_size = pick($storage_hash['osd_pool_default_min_size'], '1')
|
||||
$osd_journal_size = pick($storage_hash['osd_journal_size'], '2048')
|
||||
|
||||
if ($storage_hash['images_ceph']) {
|
||||
$glance_backend = 'ceph'
|
||||
} elsif ($storage_hash['images_vcenter']) {
|
||||
$glance_backend = 'vmware'
|
||||
$filestore_xattr_use_omap = pick($storage_hash['filestore_xattr_use_omap'], true)
|
||||
$osd_recovery_max_active = pick($storage_hash['osd_recovery_max_active'], '1')
|
||||
$osd_max_backfills = pick($storage_hash['osd_max_backfills'], '1')
|
||||
$rbd_cache_writethrough_until_flush = pick($storage_hash['rbd_cache_writethrough_until_flush'], true)
|
||||
$rbd_cache = pick($storage_hash['rbd_cache'], true)
|
||||
$log_to_syslog = hiera('use_syslog', true)
|
||||
$log_to_syslog_level = pick($storage_hash['ceph_syslog_level'], 'info')
|
||||
$log_to_syslog_facility = pick($storage_hash['ceph_syslog_facility'], 'LOG_LOCAL0')
|
||||
|
||||
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
|
||||
$primary_mon = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_primary_monitor_node'), 'ceph/public')
|
||||
|
||||
$mon_ips = join(values($mon_address_map), ',')
|
||||
$mon_hosts = join(keys($mon_address_map), ',')
|
||||
|
||||
$primary_mon_hostname = join(keys($primary_mon))
|
||||
$primary_mon_ip = join(values($primary_mon))
|
||||
|
||||
prepare_network_config(hiera_hash('network_scheme'))
|
||||
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
|
||||
$ceph_public_network = get_network_role_property('ceph/public', 'network')
|
||||
|
||||
if $primary_mon_hostname == $::hostname {
|
||||
$mon_initial_members = $primary_mon_hostname
|
||||
$mon_host = $primary_mon_ip
|
||||
} else {
|
||||
$glance_backend = 'swift'
|
||||
$mon_initial_members = $mon_hosts
|
||||
$mon_host = $mon_ips
|
||||
}
|
||||
|
||||
if ($storage_hash['volumes_ceph'] or
|
||||
|
@ -29,33 +54,50 @@ class osnailyfacter::ceph::mon {
|
|||
}
|
||||
|
||||
if $use_ceph {
|
||||
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
|
||||
$primary_mons = keys($ceph_primary_monitor_node)
|
||||
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
|
||||
|
||||
prepare_network_config(hiera_hash('network_scheme', {}))
|
||||
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
|
||||
$ceph_public_network = get_network_role_property('ceph/public', 'network')
|
||||
$mon_addr = get_network_role_property('ceph/public', 'ipaddr')
|
||||
|
||||
class { '::ceph':
|
||||
primary_mon => $primary_mon,
|
||||
mon_hosts => keys($mon_address_map),
|
||||
mon_ip_addresses => values($mon_address_map),
|
||||
mon_addr => $mon_addr,
|
||||
cluster_node_address => $public_vip,
|
||||
osd_pool_default_size => $storage_hash['osd_pool_size'],
|
||||
osd_pool_default_pg_num => $storage_hash['pg_num'],
|
||||
osd_pool_default_pgp_num => $storage_hash['pg_num'],
|
||||
use_rgw => false,
|
||||
glance_backend => $glance_backend,
|
||||
cluster_network => $ceph_cluster_network,
|
||||
public_network => $ceph_public_network,
|
||||
use_syslog => $use_syslog,
|
||||
syslog_log_level => hiera('syslog_log_level_ceph', 'info'),
|
||||
syslog_log_facility => $syslog_log_facility_ceph,
|
||||
rgw_keystone_admin_token => $keystone_hash['admin_token'],
|
||||
ephemeral_ceph => $storage_hash['ephemeral_ceph']
|
||||
fsid => $fsid,
|
||||
mon_initial_members => $mon_initial_members,
|
||||
mon_host => $mon_host,
|
||||
cluster_network => $ceph_cluster_network,
|
||||
public_network => $ceph_public_network,
|
||||
osd_pool_default_size => $osd_pool_default_size,
|
||||
osd_pool_default_pg_num => $osd_pool_default_pg_num,
|
||||
osd_pool_default_pgp_num => $osd_pool_default_pgp_num,
|
||||
osd_pool_default_min_size => $osd_pool_default_min_size,
|
||||
osd_journal_size => $osd_journal_size,
|
||||
}
|
||||
|
||||
ceph_config {
|
||||
'global/filestore_xattr_use_omap' : value => $filestore_xattr_use_omap;
|
||||
'global/osd_recovery_max_active' : value => $osd_recovery_max_active;
|
||||
'global/osd_max_backfills' : value => $osd_max_backfills;
|
||||
'client/rbd_cache_writethrough_until_flush' : value => $rbd_cache_writethrough_until_flush;
|
||||
'client/rbd_cache' : value => $rbd_cache;
|
||||
'global/log_to_syslog' : value => $log_to_syslog;
|
||||
'global/log_to_syslog_level' : value => $log_to_syslog_level;
|
||||
'global/log_to_syslog_facility' : value => $log_to_syslog_facility;
|
||||
}
|
||||
|
||||
Ceph::Key {
|
||||
inject => true,
|
||||
inject_as_id => 'mon.',
|
||||
inject_keyring => "/var/lib/ceph/mon/ceph-${::hostname}/keyring",
|
||||
}
|
||||
|
||||
ceph::key { 'client.admin':
|
||||
secret => $admin_key,
|
||||
cap_mon => 'allow *',
|
||||
cap_osd => 'allow *',
|
||||
cap_mds => 'allow',
|
||||
}
|
||||
|
||||
ceph::key { 'client.bootstrap-osd':
|
||||
secret => $bootstrap_osd_key,
|
||||
cap_mon => 'allow profile bootstrap-osd',
|
||||
}
|
||||
|
||||
ceph::mon { $::hostname:
|
||||
key => $mon_key,
|
||||
}
|
||||
|
||||
if ($storage_hash['volumes_ceph']) {
|
||||
|
@ -74,8 +116,8 @@ class osnailyfacter::ceph::mon {
|
|||
hasrestart => true,
|
||||
}
|
||||
|
||||
Class['::ceph'] ~> Service['cinder-volume']
|
||||
Class['::ceph'] ~> Service['cinder-backup']
|
||||
Class['ceph'] ~> Service['cinder-volume']
|
||||
Class['ceph'] ~> Service['cinder-backup']
|
||||
}
|
||||
|
||||
if ($storage_hash['images_ceph']) {
|
||||
|
@ -87,9 +129,7 @@ class osnailyfacter::ceph::mon {
|
|||
hasrestart => true,
|
||||
}
|
||||
|
||||
Class['::ceph'] ~> Service['glance-api']
|
||||
Class['ceph'] ~> Service['glance-api']
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,44 @@
|
|||
class osnailyfacter::ceph::primary_mon_update {
|
||||
|
||||
notice('MODULAR: ceph/primary_mon_update.pp')
|
||||
|
||||
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
|
||||
$mon_ips = join(values($mon_address_map), ',')
|
||||
$mon_hosts = join(keys($mon_address_map), ',')
|
||||
|
||||
$storage_hash = hiera('storage', {})
|
||||
|
||||
if ($storage_hash['volumes_ceph'] or
|
||||
$storage_hash['images_ceph'] or
|
||||
$storage_hash['objects_ceph'] or
|
||||
$storage_hash['ephemeral_ceph']
|
||||
) {
|
||||
$use_ceph = true
|
||||
} else {
|
||||
$use_ceph = false
|
||||
}
|
||||
|
||||
if $use_ceph {
|
||||
exec {'Wait for Ceph quorum':
|
||||
path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
|
||||
command => "ceph mon stat | grep -q 'quorum.*${node_hostname}'",
|
||||
tries => 12, # This is necessary to prevent a race: mon must establish
|
||||
# a quorum before it can generate keys, observed this takes upto 15 seconds
|
||||
# Keys must exist prior to other commands running
|
||||
try_sleep => 5,
|
||||
refreshonly => true,
|
||||
}
|
||||
|
||||
ceph_config {
|
||||
'global/mon_host': value => $mon_ips;
|
||||
'global/mon_initial_members': value => $mon_hosts;
|
||||
}
|
||||
|
||||
exec {'reload Ceph for HA':
|
||||
path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
|
||||
command => 'service ceph reload',
|
||||
}
|
||||
|
||||
Exec['Wait for Ceph quorum'] -> Ceph_config<||> ~> Exec['reload Ceph for HA']
|
||||
}
|
||||
}
|
|
@ -2,78 +2,104 @@ class osnailyfacter::ceph::radosgw {
|
|||
|
||||
notice('MODULAR: ceph/radosgw.pp')
|
||||
|
||||
$storage_hash = hiera_hash('storage', {})
|
||||
$keystone_hash = hiera_hash('keystone', {})
|
||||
$management_vip = hiera('management_vip')
|
||||
$service_endpoint = hiera('service_endpoint')
|
||||
$radosgw_large_pool_name = '.rgw'
|
||||
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
|
||||
$external_lb = hiera('external_lb', false)
|
||||
$ssl_hash = hiera_hash('use_ssl', {})
|
||||
$gateway_name = 'radosgw.gateway'
|
||||
$storage_hash = hiera('storage', {})
|
||||
$radosgw_key = pick($storage_hash['radosgw_key'], 'AQCTg71RsNIHORAAW+O6FCMZWBjmVfMIPk3MhQ==')
|
||||
$fsid = pick($storage_hash['fsid'], '066F558C-6789-4A93-AAF1-5AF1BA01A3AD')
|
||||
$rgw_log_file = '/var/log/ceph/radosgw.log'
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$rgw_large_pool_name = '.rgw'
|
||||
$rgw_large_pool_pg_nums = pick($storage_hash['per_pool_pg_nums'][$rgw_large_pool_name], '512')
|
||||
$keystone_hash = hiera('keystone', {})
|
||||
$rgw_keystone_accepted_roles = pick($storage_hash['radosgw_keystone_accepted_roles'], '_member_, Member, admin, swiftoperator')
|
||||
$rgw_keystone_revocation_interval = '1000000'
|
||||
$rgw_keystone_token_cache_size = '10'
|
||||
$service_endpoint = hiera('service_endpoint')
|
||||
$management_vip = hiera('management_vip')
|
||||
|
||||
$ssl_hash = hiera_hash('use_ssl', {})
|
||||
$admin_identity_protocol = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'protocol', 'http')
|
||||
$admin_identity_address = get_ssl_property($ssl_hash, {}, 'keystone', 'admin', 'hostname', [$service_endpoint, $management_vip])
|
||||
$admin_identity_url = "${admin_identity_protocol}://${admin_identity_address}:35357"
|
||||
|
||||
prepare_network_config(hiera_hash('network_scheme'))
|
||||
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
|
||||
$ceph_public_network = get_network_role_property('ceph/public', 'network')
|
||||
|
||||
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
|
||||
$mon_ips = join(values($mon_address_map), ',')
|
||||
$mon_hosts = join(keys($mon_address_map), ',')
|
||||
|
||||
if $storage_hash['objects_ceph'] {
|
||||
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
|
||||
$primary_mons = keys($ceph_primary_monitor_node)
|
||||
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
|
||||
|
||||
prepare_network_config(hiera_hash('network_scheme', {}))
|
||||
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
|
||||
$ceph_public_network = get_network_role_property('ceph/public', 'network')
|
||||
$rgw_ip_address = get_network_role_property('ceph/radosgw', 'ipaddr')
|
||||
|
||||
# Listen directives with host required for ip_based vhosts
|
||||
class { '::osnailyfacter::apache':
|
||||
listen_ports => hiera_array('apache_ports', ['0.0.0.0:80', '0.0.0.0:8888']),
|
||||
ceph::key { "client.${gateway_name}":
|
||||
keyring_path => "/etc/ceph/client.${gateway_name}",
|
||||
secret => $radosgw_key,
|
||||
cap_mon => 'allow rw',
|
||||
cap_osd => 'allow rwx',
|
||||
inject => true,
|
||||
}
|
||||
|
||||
if ($::osfamily == 'Debian'){
|
||||
apache::mod {'rewrite': }
|
||||
apache::mod {'proxy': }
|
||||
apache::mod {'proxy_fcgi': }
|
||||
class { 'ceph':
|
||||
fsid => $fsid,
|
||||
}
|
||||
|
||||
include ::tweaks::apache_wrappers
|
||||
|
||||
include ::ceph::params
|
||||
|
||||
class { '::ceph::radosgw':
|
||||
# RadosGW settings
|
||||
rgw_host => $::hostname,
|
||||
rgw_ip => $rgw_ip_address,
|
||||
rgw_port => '6780',
|
||||
swift_endpoint_port => '8080',
|
||||
rgw_keyring_path => '/etc/ceph/keyring.radosgw.gateway',
|
||||
rgw_socket_path => '/tmp/radosgw.sock',
|
||||
rgw_frontends => 'fastcgi socket_port=9000 socket_host=127.0.0.1',
|
||||
rgw_log_file => '/var/log/ceph/radosgw.log',
|
||||
rgw_data => '/var/lib/ceph/radosgw',
|
||||
rgw_dns_name => "*.${::domain}",
|
||||
rgw_print_continue => true,
|
||||
#######################################
|
||||
# Ugly hack to support our ceph package
|
||||
#######################################
|
||||
|
||||
#rgw Keystone settings
|
||||
rgw_use_pki => false,
|
||||
rgw_use_keystone => true,
|
||||
file { '/etc/init/radosgw.conf':
|
||||
ensure => present,
|
||||
content => template('osnailyfacter/radosgw-init.erb'),
|
||||
before => Ceph::Rgw[$gateway_name],
|
||||
}
|
||||
#######################################
|
||||
|
||||
ceph::rgw { $gateway_name:
|
||||
frontend_type => 'apache-proxy-fcgi',
|
||||
rgw_print_continue => true,
|
||||
keyring_path => "/etc/ceph/client.${gateway_name}",
|
||||
rgw_data => "/var/lib/ceph/radosgw-${gateway_name}",
|
||||
rgw_dns_name => "*.${::domain}",
|
||||
log_file => undef,
|
||||
}
|
||||
|
||||
ceph::rgw::keystone { $gateway_name:
|
||||
rgw_keystone_url => $admin_identity_url,
|
||||
rgw_keystone_admin_token => $keystone_hash['admin_token'],
|
||||
rgw_keystone_token_cache_size => '10',
|
||||
rgw_keystone_accepted_roles => '_member_, Member, admin, swiftoperator',
|
||||
rgw_keystone_revocation_interval => '1000000',
|
||||
rgw_s3_auth_use_keystone => false,
|
||||
rgw_nss_db_path => '/etc/ceph/nss',
|
||||
rgw_large_pool_name => $radosgw_large_pool_name,
|
||||
rgw_large_pool_pg_nums => pick($storage_hash['per_pool_pg_nums'][$radosgw_large_pool_name], '512'),
|
||||
|
||||
#rgw Log settings
|
||||
use_syslog => hiera('use_syslog', true),
|
||||
syslog_facility => hiera('syslog_log_facility_ceph', 'LOG_LOCAL0'),
|
||||
syslog_level => hiera('syslog_log_level_ceph', 'info'),
|
||||
rgw_keystone_token_cache_size => $rgw_keystone_token_cache_size,
|
||||
rgw_keystone_accepted_roles => $rgw_keystone_accepted_roles,
|
||||
rgw_keystone_revocation_interval => $rgw_keystone_revocation_interval,
|
||||
}
|
||||
|
||||
Exec { path => [ '/bin/', '/sbin/' , '/usr/bin/', '/usr/sbin/' ],
|
||||
cwd => '/root',
|
||||
file { "/var/lib/ceph/radosgw/ceph-${gateway_name}":
|
||||
ensure => directory,
|
||||
}
|
||||
|
||||
ceph::rgw::apache_proxy_fcgi { 'radosgw.gateway':
|
||||
docroot => '/var/www/radosgw',
|
||||
rgw_port => '6780',
|
||||
apache_purge_configs => false,
|
||||
apache_purge_vhost => false,
|
||||
custom_apache_ports => hiera_array('apache_ports', ['0.0.0.0:80']),
|
||||
}
|
||||
|
||||
if ! $use_syslog {
|
||||
ceph_config {
|
||||
'client.radosgw.gateway/log_file': value => $rgw_log_file;
|
||||
'client.radosgw.gateway/log_to_syslog': value => $use_syslog;
|
||||
}
|
||||
}
|
||||
|
||||
exec { "Create ${rgw_large_pool_name} pool":
|
||||
command => "ceph -n client.radosgw.gateway osd pool create ${rgw_large_pool_name} ${rgw_large_pool_pg_nums} ${rgw_large_pool_pg_nums}",
|
||||
path => '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/bin',
|
||||
unless => "rados lspools | grep '^${rgw_large_pool_name}$'",
|
||||
}
|
||||
|
||||
Ceph::Key["client.${gateway_name}"] -> Exec["Create ${rgw_large_pool_name} pool"]
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -0,0 +1,47 @@
|
|||
# configure the nova_compute parts if present
|
||||
class osnailyfacter::ceph_nova_compute (
|
||||
$rbd_secret_uuid = 'a5d0dd94-57c4-ae55-ffe0-7e3732a24455',
|
||||
$user = 'compute',
|
||||
$compute_pool = 'compute',
|
||||
$secret_xml = '/root/.secret_attrs.xml',
|
||||
$libvirt_images_type = 'rbd',
|
||||
) {
|
||||
|
||||
include ::nova::params
|
||||
|
||||
service { $::nova::params::compute_service_name: }
|
||||
|
||||
nova_config {
|
||||
'libvirt/images_type': value => $libvirt_images_type;
|
||||
'libvirt/inject_key': value => false;
|
||||
'libvirt/inject_partition': value => '-2';
|
||||
'libvirt/images_rbd_pool': value => $compute_pool;
|
||||
'libvirt/rbd_secret_uuid': value => $rbd_secret_uuid;
|
||||
'libvirt/rbd_user': value => $user;
|
||||
}
|
||||
|
||||
file { $secret_xml:
|
||||
content => template('osnailyfacter/ceph_secret.erb')
|
||||
}
|
||||
|
||||
ensure_resource('service', 'libvirt', {
|
||||
ensure => 'running',
|
||||
name => $::nova::params::libvirt_service_name,
|
||||
})
|
||||
|
||||
exec {'Set Ceph RBD secret for Nova':
|
||||
# TODO: clean this command up
|
||||
command => "virsh secret-define --file ${secret_xml} && \
|
||||
virsh secret-set-value --secret ${rbd_secret_uuid} \
|
||||
--base64 $(ceph auth get-key client.${user})",
|
||||
path => '/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin',
|
||||
unless => "virsh secret-list | fgrep -qw ${rbd_secret_uuid}",
|
||||
}
|
||||
|
||||
Nova_config<||> ~>
|
||||
Service[$::nova::params::compute_service_name]
|
||||
|
||||
File[$secret_xml] ->
|
||||
Service['libvirt'] ->
|
||||
Exec['Set Ceph RBD secret for Nova']
|
||||
}
|
|
@ -6,6 +6,7 @@ class osnailyfacter::firewall::firewall {
|
|||
$network_metadata = hiera_hash('network_metadata')
|
||||
$ironic_hash = hiera_hash('ironic', {})
|
||||
$roles = hiera('roles')
|
||||
$storage_hash = hiera('storage', {})
|
||||
|
||||
$aodh_port = 8042
|
||||
$ceilometer_port = 8777
|
||||
|
@ -58,6 +59,9 @@ class osnailyfacter::firewall::firewall {
|
|||
$swift_proxy_check_port = 49001
|
||||
$swift_proxy_port = 8080
|
||||
$vxlan_udp_port = 4789
|
||||
$ceph_mon_port = 6789
|
||||
$ceph_osd_port = '6800-7100'
|
||||
$radosgw_port = 6780
|
||||
|
||||
$corosync_networks = get_routable_networks_for_network_role($network_scheme, 'mgmt/corosync')
|
||||
$memcache_networks = get_routable_networks_for_network_role($network_scheme, 'mgmt/memcache')
|
||||
|
@ -458,4 +462,39 @@ class osnailyfacter::firewall::firewall {
|
|||
}
|
||||
}
|
||||
|
||||
if ($storage_hash['volumes_ceph'] or
|
||||
$storage_hash['images_ceph'] or
|
||||
$storage_hash['objects_ceph'] or
|
||||
$storage_hash['ephemeral_ceph']
|
||||
) {
|
||||
if member($roles, 'primary-controller') or member($roles, 'controller') {
|
||||
firewall {'010 ceph-mon allow':
|
||||
chain => 'INPUT',
|
||||
dport => $ceph_mon_port,
|
||||
proto => 'tcp',
|
||||
action => accept,
|
||||
}
|
||||
}
|
||||
|
||||
if member($roles, 'ceph-osd') {
|
||||
firewall { '011 ceph-osd allow':
|
||||
chain => 'INPUT',
|
||||
dport => $ceph_osd_port,
|
||||
proto => 'tcp',
|
||||
action => accept,
|
||||
}
|
||||
}
|
||||
|
||||
if $storage_hash['objects_ceph'] {
|
||||
if member($roles, 'primary-controller') or member($roles, 'controller') {
|
||||
firewall {'012 RadosGW allow':
|
||||
chain => 'INPUT',
|
||||
dport => [ $radosgw_port, $swift_proxy_port ],
|
||||
proto => 'tcp',
|
||||
action => accept,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -304,7 +304,7 @@
|
|||
role: [primary-controller]
|
||||
condition:
|
||||
yaql_exp: &ironic_enabled '$.ironic.enabled and changed($.ironic.enabled)'
|
||||
requires: [enable_quorum, enable_rados]
|
||||
requires: [enable_quorum, ceph-radosgw]
|
||||
required_for: [post_deployment_end]
|
||||
parameters:
|
||||
cmd: ruby /etc/puppet/modules/osnailyfacter/modular/astute/ironic_post_swift_key.rb
|
||||
|
@ -319,7 +319,7 @@
|
|||
condition:
|
||||
yaql_exp: *ironic_enabled
|
||||
required_for: [post_deployment_end]
|
||||
requires: [enable_quorum, enable_rados]
|
||||
requires: [enable_quorum, ceph-radosgw]
|
||||
parameters:
|
||||
cmd: ruby /etc/puppet/modules/openstack_tasks/examples/ironic/upload_images.rb {CLUSTER_ID}
|
||||
retries: 3
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
class { '::osnailyfacter::ceph::enable_rados' :}
|
|
@ -0,0 +1 @@
|
|||
include ::osnailyfacter::ceph::primary_mon_update
|
|
@ -42,18 +42,17 @@
|
|||
requires: [hosts, firewall]
|
||||
cross-depends:
|
||||
- name: ceph-mon
|
||||
- name: /(primary-)?ceph-radosgw/
|
||||
parameters:
|
||||
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceph/ceph-osd.pp
|
||||
puppet_modules: /etc/puppet/modules
|
||||
timeout: 3600
|
||||
|
||||
- id: primary-ceph-radosgw
|
||||
- id: ceph-radosgw
|
||||
version: 2.1.0
|
||||
type: puppet
|
||||
groups: [primary-controller]
|
||||
required_for: [deploy_end, controller_remaining_tasks]
|
||||
requires: [apache, ceph-mon, primary-ceph-mon]
|
||||
role: [primary-controller, controller]
|
||||
required_for: [upload_cirros, post_deployment_end]
|
||||
requires: [post_deployment_start]
|
||||
condition:
|
||||
yaql_exp: &ceph_radosgw >
|
||||
changedAny($.storage, $.keystone, $.network_metadata.vips,
|
||||
|
@ -73,31 +72,12 @@
|
|||
test_post:
|
||||
cmd: ruby /etc/puppet/modules/osnailyfacter/modular/ceph/radosgw_post.rb
|
||||
|
||||
- id: ceph-radosgw
|
||||
type: puppet
|
||||
version: 2.1.0
|
||||
groups: [controller]
|
||||
cross-depends:
|
||||
- name: primary-ceph-radosgw
|
||||
required_for: [deploy_end, controller_remaining_tasks]
|
||||
requires: [apache, ceph-mon, primary-ceph-mon]
|
||||
condition:
|
||||
yaql_exp: *ceph_radosgw
|
||||
parameters:
|
||||
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceph/radosgw.pp
|
||||
puppet_modules: /etc/puppet/modules
|
||||
timeout: 3600
|
||||
test_pre:
|
||||
cmd: ruby /etc/puppet/modules/osnailyfacter/modular/ceph/radosgw_pre.rb
|
||||
test_post:
|
||||
cmd: ruby /etc/puppet/modules/osnailyfacter/modular/ceph/radosgw_post.rb
|
||||
|
||||
- id: radosgw-keystone
|
||||
type: puppet
|
||||
version: 2.1.0
|
||||
groups: [primary-controller]
|
||||
required_for: [primary-ceph-radosgw, ceph-radosgw]
|
||||
requires: [primary-keystone, keystone]
|
||||
role: [primary-controller]
|
||||
required_for: [ceph-radosgw]
|
||||
requires: [post_deployment_start]
|
||||
condition:
|
||||
yaql_exp: >
|
||||
changedAny($.storage, $.network_metadata.vips,
|
||||
|
@ -143,6 +123,19 @@
|
|||
puppet_modules: /etc/puppet/modules
|
||||
timeout: 3600
|
||||
|
||||
- id: primary-mon-update
|
||||
type: puppet
|
||||
version: 2.1.0
|
||||
groups: [primary-controller]
|
||||
required_for: [deploy_end, controller_remaining_tasks]
|
||||
requires: [primary-ceph-mon]
|
||||
cross-depends:
|
||||
- name: ceph-mon
|
||||
parameters:
|
||||
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceph/primary_mon_update.pp
|
||||
puppet_modules: /etc/puppet/modules
|
||||
timeout: 3600
|
||||
|
||||
#
|
||||
# POST_DEPLOYMENT Tasks
|
||||
#
|
||||
|
@ -199,27 +192,11 @@
|
|||
condition:
|
||||
yaql_exp: *ceph_changed
|
||||
requires: [post_deployment_start]
|
||||
required_for: [enable_rados, upload_cirros]
|
||||
required_for: [ceph-radosgw, upload_cirros]
|
||||
parameters:
|
||||
cmd: ruby /etc/puppet/modules/osnailyfacter/modular/ceph/ceph_ready_check.rb
|
||||
timeout: 1800
|
||||
|
||||
- id: enable_rados
|
||||
type: puppet
|
||||
version: 2.1.0
|
||||
role: [primary-controller, controller]
|
||||
condition:
|
||||
yaql_exp: '$.storage.objects_ceph and changed($.storage.objects_ceph)'
|
||||
requires: [post_deployment_start]
|
||||
required_for: [upload_cirros, post_deployment_end]
|
||||
cross-depended-by:
|
||||
- name: upload_cirros
|
||||
parameters:
|
||||
puppet_manifest: /etc/puppet/modules/osnailyfacter/modular/ceph/enable_rados.pp
|
||||
puppet_modules: /etc/puppet/modules
|
||||
timeout: 180
|
||||
cwd: /
|
||||
|
||||
- id: updatedb
|
||||
type: puppet
|
||||
version: 2.1.0
|
||||
|
|
|
@ -0,0 +1,28 @@
|
|||
#TODO (omolchanov): Remove template once we switch to systemd
|
||||
description "Ceph radosgw"
|
||||
|
||||
start on radosgw
|
||||
stop on runlevel [!2345] or stopping radosgw-all
|
||||
|
||||
respawn
|
||||
respawn limit 5 30
|
||||
|
||||
limit nofile 8096 65536
|
||||
|
||||
pre-start script
|
||||
set -e
|
||||
test -x /usr/bin/radosgw || { stop; exit 0; }
|
||||
test -d "/var/lib/ceph/radosgw/${cluster:-ceph}-$id" || { stop; exit 0; }
|
||||
|
||||
install -d -m0755 /var/run/ceph
|
||||
end script
|
||||
|
||||
instance ${cluster:-ceph}/$id
|
||||
export cluster
|
||||
export id
|
||||
|
||||
# this breaks oneiric
|
||||
#usage "cluster = name of cluster (defaults to 'ceph'); id = mds instance id"
|
||||
|
||||
exec /usr/bin/radosgw --cluster="${cluster:-ceph}" --id "$id" -f
|
||||
|
|
@ -6,49 +6,162 @@ manifest = 'ceph/ceph-osd.pp'
|
|||
|
||||
describe manifest do
|
||||
shared_examples 'catalog' do
|
||||
storage_hash = Noop.hiera_hash 'storage'
|
||||
ceph_monitor_nodes = Noop.hiera 'ceph_monitor_nodes'
|
||||
|
||||
let(:facts) {
|
||||
Noop.ubuntu_facts.merge({
|
||||
:osd_devices_list => '/dev/sdb'
|
||||
})
|
||||
}
|
||||
|
||||
storage_hash = Noop.hiera 'storage'
|
||||
|
||||
let(:osd_devices_hash) do
|
||||
Noop.puppet_function 'osd_devices_hash', '/dev/sdb'
|
||||
end
|
||||
|
||||
let(:admin_key) do
|
||||
Noop.hiera_structure 'storage/admin_key', 'AQCTg71RsNIHORAAW+O6FCMZWBjmVfMIPk3MhQ=='
|
||||
end
|
||||
|
||||
let(:bootstrap_osd_key) do
|
||||
Noop.hiera_structure 'storage/bootstrap_osd_key', 'AQABsWZSgEDmJhAAkAGSOOAJwrMHrM5Pz5On1A=='
|
||||
end
|
||||
|
||||
let(:fsid) do
|
||||
Noop.hiera_structure 'storage/fsid', '066F558C-6789-4A93-AAF1-5AF1BA01A3AD'
|
||||
end
|
||||
|
||||
let(:network_scheme) do
|
||||
Noop.hiera_hash 'network_scheme'
|
||||
end
|
||||
|
||||
let(:prepare_network_config) do
|
||||
Noop.puppet_function 'prepare_network_config', network_scheme
|
||||
end
|
||||
|
||||
let(:ceph_cluster_network) do
|
||||
Noop.puppet_function 'get_network_role_property', 'ceph/replication', 'network'
|
||||
end
|
||||
|
||||
let(:ceph_public_network) do
|
||||
Noop.puppet_function 'get_network_role_property', 'ceph/public', 'network'
|
||||
end
|
||||
|
||||
let(:ceph_monitor_nodes) do
|
||||
Noop.hiera_hash('ceph_monitor_nodes')
|
||||
end
|
||||
|
||||
let(:mon_address_map) do
|
||||
Noop.puppet_function 'get_node_to_ipaddr_map_by_network_role', ceph_monitor_nodes, 'ceph/public'
|
||||
end
|
||||
|
||||
let(:mon_host) do
|
||||
mon_address_map.values.join(',')
|
||||
end
|
||||
|
||||
let(:mon_initial_members) do
|
||||
mon_address_map.keys.join(',')
|
||||
end
|
||||
|
||||
let(:osd_pool_default_size) do
|
||||
storage_hash['osd_pool_size']
|
||||
end
|
||||
|
||||
let(:osd_pool_default_pg_num) do
|
||||
storage_hash['pg_num']
|
||||
end
|
||||
|
||||
let(:osd_pool_default_pgp_num) do
|
||||
storage_hash['pg_num']
|
||||
end
|
||||
|
||||
if storage_hash['debug']
|
||||
debug = storage_hash['debug']
|
||||
else
|
||||
debug = Noop.hiera 'debug', true
|
||||
debug = Noop.hiera 'debug', true
|
||||
end
|
||||
|
||||
ceph_tuning_settings = Noop.hiera 'ceph_tuning_settings'
|
||||
|
||||
if (storage_hash['images_ceph'] or storage_hash['objects_ceph'])
|
||||
it { should contain_class('ceph').with(
|
||||
'mon_hosts' => ceph_monitor_nodes.keys,
|
||||
'osd_pool_default_size' => storage_hash['osd_pool_size'],
|
||||
'osd_pool_default_pg_num' => storage_hash['pg_num'],
|
||||
'osd_pool_default_pgp_num' => storage_hash['pg_num'],
|
||||
'ephemeral_ceph' => storage_hash['ephemeral_ceph'],
|
||||
)
|
||||
}
|
||||
it 'should configure ceph' do
|
||||
should contain_class('ceph').with(
|
||||
'fsid' => fsid,
|
||||
'mon_initial_members' => mon_initial_members,
|
||||
'mon_host' => mon_host,
|
||||
'cluster_network' => ceph_cluster_network,
|
||||
'public_network' => ceph_public_network,
|
||||
'osd_pool_default_size' => osd_pool_default_size,
|
||||
'osd_pool_default_pg_num' => osd_pool_default_pg_num,
|
||||
'osd_pool_default_pgp_num' => osd_pool_default_pgp_num,
|
||||
'osd_pool_default_min_size' => '1',
|
||||
'osd_journal_size' => '2048',
|
||||
)
|
||||
end
|
||||
|
||||
if ceph_tuning_settings
|
||||
it 'should set Ceph tuning settings' do
|
||||
should contain_ceph_conf('global/debug_default').with(:value => debug)
|
||||
should contain_ceph_conf('global/max_open_files').with(:value => ceph_tuning_settings['max_open_files'])
|
||||
should contain_ceph_conf('osd/osd_mkfs_type').with(:value => ceph_tuning_settings['osd_mkfs_type'])
|
||||
should contain_ceph_conf('osd/osd_mount_options_xfs').with(:value => ceph_tuning_settings['osd_mount_options_xfs'])
|
||||
should contain_ceph_conf('osd/osd_op_threads').with(:value => ceph_tuning_settings['osd_op_threads'])
|
||||
should contain_ceph_conf('osd/filestore_queue_max_ops').with(:value => ceph_tuning_settings['filestore_queue_max_ops'])
|
||||
should contain_ceph_conf('osd/filestore_queue_committing_max_ops').with(:value => ceph_tuning_settings['filestore_queue_committing_max_ops'])
|
||||
should contain_ceph_conf('osd/journal_max_write_entries').with(:value => ceph_tuning_settings['journal_max_write_entries'])
|
||||
should contain_ceph_conf('osd/journal_queue_max_ops').with(:value => ceph_tuning_settings['journal_queue_max_ops'])
|
||||
should contain_ceph_conf('osd/objecter_inflight_ops').with(:value => ceph_tuning_settings['objecter_inflight_ops'])
|
||||
should contain_ceph_conf('osd/filestore_queue_max_bytes').with(:value => ceph_tuning_settings['filestore_queue_max_bytes'])
|
||||
should contain_ceph_conf('osd/filestore_queue_committing_max_bytes').with(:value => ceph_tuning_settings['filestore_queue_committing_max_bytes'])
|
||||
should contain_ceph_conf('osd/journal_max_write_bytes').with(:value => ceph_tuning_settings['journal_max_write_bytes'])
|
||||
should contain_ceph_conf('osd/journal_queue_max_bytes').with(:value => ceph_tuning_settings['journal_queue_max_bytes'])
|
||||
should contain_ceph_conf('osd/ms_dispatch_throttle_bytes').with(:value => ceph_tuning_settings['ms_dispatch_throttle_bytes'])
|
||||
should contain_ceph_conf('osd/objecter_infilght_op_bytes').with(:value => ceph_tuning_settings['objecter_infilght_op_bytes'])
|
||||
should contain_ceph_conf('osd/filestore_max_sync_interval').with(:value => ceph_tuning_settings['filestore_max_sync_interval'])
|
||||
end
|
||||
it 'should add parameters to ceph.conf' do
|
||||
should contain_ceph_config('global/filestore_xattr_use_omap').with(:value => true)
|
||||
should contain_ceph_config('global/osd_recovery_max_active').with(:value => '1')
|
||||
should contain_ceph_config('global/osd_max_backfills').with(:value => '1')
|
||||
should contain_ceph_config('client/rbd_cache_writethrough_until_flush').with(:value => true)
|
||||
should contain_ceph_config('client/rbd_cache').with(:value => true)
|
||||
should contain_ceph_config('global/log_to_syslog').with(:value => true)
|
||||
should contain_ceph_config('global/log_to_syslog_level').with(:value => 'info')
|
||||
should contain_ceph_config('global/log_to_syslog_facility').with(:value => 'LOG_LOCAL0')
|
||||
end
|
||||
|
||||
it 'should add admin key' do
|
||||
should contain_ceph__key('client.admin').with(
|
||||
'secret' => admin_key,
|
||||
'cap_mon' => 'allow *',
|
||||
'cap_osd' => 'allow *',
|
||||
'cap_mds' => 'allow',
|
||||
'inject' => false,
|
||||
)
|
||||
end
|
||||
|
||||
it 'should add osd bootstrap key' do
|
||||
should contain_ceph__key('client.bootstrap-osd').with(
|
||||
'keyring_path' => '/var/lib/ceph/bootstrap-osd/ceph.keyring',
|
||||
'secret' => bootstrap_osd_key,
|
||||
'inject' => false,
|
||||
)
|
||||
end
|
||||
|
||||
it 'should configure osd disks' do
|
||||
should contain_class('ceph::osds').with(
|
||||
'args' => osd_devices_hash,
|
||||
)
|
||||
end
|
||||
|
||||
it 'should start osd daemons' do
|
||||
should contain_service('ceph-osd-all-starter').with(
|
||||
'ensure' => 'running',
|
||||
'provider' => 'upstart',
|
||||
).that_requires('Class[ceph::osds]')
|
||||
end
|
||||
|
||||
if ceph_tuning_settings != {}
|
||||
it 'should set Ceph tuning settings' do
|
||||
should contain_ceph_config('global/debug_default').with(:value => debug)
|
||||
should contain_ceph_config('global/max_open_files').with(:value => ceph_tuning_settings['max_open_files'])
|
||||
should contain_ceph_config('osd/osd_mkfs_type').with(:value => ceph_tuning_settings['osd_mkfs_type'])
|
||||
should contain_ceph_config('osd/osd_mount_options_xfs').with(:value => ceph_tuning_settings['osd_mount_options_xfs'])
|
||||
should contain_ceph_config('osd/osd_op_threads').with(:value => ceph_tuning_settings['osd_op_threads'])
|
||||
should contain_ceph_config('osd/filestore_queue_max_ops').with(:value => ceph_tuning_settings['filestore_queue_max_ops'])
|
||||
should contain_ceph_config('osd/filestore_queue_committing_max_ops').with(:value => ceph_tuning_settings['filestore_queue_committing_max_ops'])
|
||||
should contain_ceph_config('osd/journal_max_write_entries').with(:value => ceph_tuning_settings['journal_max_write_entries'])
|
||||
should contain_ceph_config('osd/journal_queue_max_ops').with(:value => ceph_tuning_settings['journal_queue_max_ops'])
|
||||
should contain_ceph_config('osd/objecter_inflight_ops').with(:value => ceph_tuning_settings['objecter_inflight_ops'])
|
||||
should contain_ceph_config('osd/filestore_queue_max_bytes').with(:value => ceph_tuning_settings['filestore_queue_max_bytes'])
|
||||
should contain_ceph_config('osd/filestore_queue_committing_max_bytes').with(:value => ceph_tuning_settings['filestore_queue_committing_max_bytes'])
|
||||
should contain_ceph_config('osd/journal_max_write_bytes').with(:value => ceph_tuning_settings['journal_max_write_bytes'])
|
||||
should contain_ceph_config('osd/journal_queue_max_bytes').with(:value => ceph_tuning_settings['journal_queue_max_bytes'])
|
||||
should contain_ceph_config('osd/ms_dispatch_throttle_bytes').with(:value => ceph_tuning_settings['ms_dispatch_throttle_bytes'])
|
||||
should contain_ceph_config('osd/objecter_infilght_op_bytes').with(:value => ceph_tuning_settings['objecter_infilght_op_bytes'])
|
||||
should contain_ceph_config('osd/filestore_max_sync_interval').with(:value => ceph_tuning_settings['filestore_max_sync_interval'])
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
test_ubuntu_and_centos manifest
|
||||
end
|
||||
|
||||
|
|
|
@ -6,12 +6,82 @@ manifest = 'ceph/ceph_compute.pp'
|
|||
|
||||
describe manifest do
|
||||
shared_examples 'catalog' do
|
||||
storage_hash = Noop.hiera_hash 'storage'
|
||||
storage_hash = Noop.hiera 'storage'
|
||||
|
||||
if (storage_hash['ephemeral_ceph'])
|
||||
libvirt_images_type = 'rbd'
|
||||
else
|
||||
libvirt_images_type = 'default'
|
||||
let(:mon_key) do
|
||||
Noop.hiera_structure 'storage/mon_key', 'AQDesGZSsC7KJBAAw+W/Z4eGSQGAIbxWjxjvfw=='
|
||||
end
|
||||
|
||||
let(:fsid) do
|
||||
Noop.hiera_structure 'storage/fsid', '066F558C-6789-4A93-AAF1-5AF1BA01A3AD'
|
||||
end
|
||||
|
||||
let(:cinder_pool) do
|
||||
'volumes'
|
||||
end
|
||||
|
||||
let(:glance_pool) do
|
||||
'images'
|
||||
end
|
||||
|
||||
let(:compute_pool) do
|
||||
'compute'
|
||||
end
|
||||
|
||||
let(:compute_user) do
|
||||
'compute'
|
||||
end
|
||||
|
||||
let(:libvirt_images_type) do
|
||||
'rbd'
|
||||
end
|
||||
|
||||
let(:secret) do
|
||||
Noop.hiera_structure 'storage/mon_key', 'AQDesGZSsC7KJBAAw+W/Z4eGSQGAIbxWjxjvfw=='
|
||||
end
|
||||
|
||||
let(:per_pool_pg_nums) do
|
||||
storage_hash['per_pool_pg_nums']
|
||||
end
|
||||
|
||||
let(:compute_pool_pg_nums) do
|
||||
Noop.hiera_structure 'storage/per_pool_pg_nums/compute', '1024'
|
||||
end
|
||||
|
||||
let(:compute_pool_pgp_nums) do
|
||||
Noop.hiera_structure 'storage/per_pool_pg_nums/compute', '1024'
|
||||
end
|
||||
|
||||
let(:network_scheme) do
|
||||
Noop.hiera_hash 'network_scheme'
|
||||
end
|
||||
|
||||
let(:prepare_network_config) do
|
||||
Noop.puppet_function 'prepare_network_config', network_scheme
|
||||
end
|
||||
|
||||
let(:ceph_cluster_network) do
|
||||
Noop.puppet_function 'get_network_role_property', 'ceph/replication', 'network'
|
||||
end
|
||||
|
||||
let(:ceph_public_network) do
|
||||
Noop.puppet_function 'get_network_role_property', 'ceph/public', 'network'
|
||||
end
|
||||
|
||||
let(:ceph_monitor_nodes) do
|
||||
Noop.hiera_hash('ceph_monitor_nodes')
|
||||
end
|
||||
|
||||
let(:mon_address_map) do
|
||||
Noop.puppet_function 'get_node_to_ipaddr_map_by_network_role', ceph_monitor_nodes, 'ceph/public'
|
||||
end
|
||||
|
||||
let(:mon_ips) do
|
||||
mon_address_map.values.join(',')
|
||||
end
|
||||
|
||||
let(:mon_hosts) do
|
||||
mon_address_map.keys.join(',')
|
||||
end
|
||||
|
||||
if (storage_hash['volumes_ceph'] or
|
||||
|
@ -19,26 +89,39 @@ describe manifest do
|
|||
storage_hash['objects_ceph'] or
|
||||
storage_hash['ephemeral_ceph']
|
||||
)
|
||||
it { should contain_class('ceph').with(
|
||||
'osd_pool_default_size' => storage_hash['osd_pool_size'],
|
||||
'osd_pool_default_pg_num' => storage_hash['pg_num'],
|
||||
'osd_pool_default_pgp_num' => storage_hash['pg_num'],)
|
||||
}
|
||||
it { should contain_class('ceph::conf') }
|
||||
it 'should deploy ceph' do
|
||||
should contain_class('ceph').with(
|
||||
'fsid' => fsid,
|
||||
'mon_initial_members' => mon_hosts,
|
||||
'mon_host' => mon_ips,
|
||||
'cluster_network' => ceph_cluster_network,
|
||||
'public_network' => ceph_public_network,
|
||||
)
|
||||
end
|
||||
|
||||
it { should contain_ceph__pool('compute').with(
|
||||
'pg_num' => storage_hash['per_pool_pg_nums']['compute'],
|
||||
'pgp_num' => storage_hash['per_pool_pg_nums']['compute'],)
|
||||
}
|
||||
it { should contain_class('ceph::ephemeral').with(
|
||||
'libvirt_images_type' => libvirt_images_type,)
|
||||
}
|
||||
it { should contain_ceph__pool('compute').that_requires('Class[ceph::conf]') }
|
||||
it { should contain_ceph__pool('compute').that_comes_before('Class[ceph::nova_compute]') }
|
||||
it { should contain_class('ceph::nova_compute').that_requires('Ceph::Pool[compute]') }
|
||||
it { should contain_exec('Set Ceph RBD secret for Nova').that_requires('Service[libvirt]')}
|
||||
else
|
||||
it { should_not contain_class('ceph') }
|
||||
it 'should configure compute pool' do
|
||||
should contain_ceph__pool(compute_pool).with(
|
||||
'pg_num' => compute_pool_pg_nums,
|
||||
'pgp_num' => compute_pool_pgp_nums,
|
||||
).that_requires('ceph')
|
||||
end
|
||||
|
||||
it 'should configure ceph compute keys' do
|
||||
should contain_ceph__key("client.#{compute_user}").with(
|
||||
'secret' => secret,
|
||||
'cap_mon' => 'allow r',
|
||||
'cap_osd' => "allow class-read object_prefix rbd_children, allow rwx pool=#{cinder_pool}, allow rx pool=#{glance_pool}, allow rwx pool=#{compute_pool}",
|
||||
'inject' => true,
|
||||
)
|
||||
end
|
||||
|
||||
it 'should contain class osnailyfacter::ceph_nova_compute' do
|
||||
should contain_class('osnailyfacter::ceph_nova_compute').with(
|
||||
'user' => compute_user,
|
||||
'compute_pool' => compute_pool,
|
||||
'libvirt_images_type' => libvirt_images_type,
|
||||
)
|
||||
end
|
||||
end
|
||||
end
|
||||
test_ubuntu_and_centos manifest
|
||||
|
|
|
@ -7,28 +7,100 @@ manifest = 'ceph/ceph_pools.pp'
|
|||
|
||||
describe manifest do
|
||||
shared_examples 'catalog' do
|
||||
storage_hash = Noop.hiera_hash 'storage'
|
||||
glance_pool = 'images'
|
||||
cinder_pool = 'volumes'
|
||||
cinder_backup_pool = 'backups'
|
||||
storage_hash = Noop.hiera 'storage'
|
||||
|
||||
let(:mon_key) do
|
||||
Noop.hiera_structure 'storage/mon_key', 'AQDesGZSsC7KJBAAw+W/Z4eGSQGAIbxWjxjvfw=='
|
||||
end
|
||||
|
||||
let(:fsid) do
|
||||
Noop.hiera_structure 'storage/fsid', '066F558C-6789-4A93-AAF1-5AF1BA01A3AD'
|
||||
end
|
||||
|
||||
let(:cinder_user) do
|
||||
'volumes'
|
||||
end
|
||||
|
||||
let(:cinder_pool) do
|
||||
'volumes'
|
||||
end
|
||||
|
||||
let(:cinder_backup_user) do
|
||||
'backups'
|
||||
end
|
||||
|
||||
let(:cinder_backup_pool) do
|
||||
'backups'
|
||||
end
|
||||
|
||||
let(:glance_user) do
|
||||
'images'
|
||||
end
|
||||
|
||||
let(:glance_pool) do
|
||||
'images'
|
||||
end
|
||||
|
||||
if (storage_hash['images_ceph'] or storage_hash['objects_ceph'])
|
||||
it { should contain_ceph__pool("#{glance_pool}").with(
|
||||
'acl' => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=#{glance_pool}'",
|
||||
'pg_num' => storage_hash['per_pool_pg_nums']['images'],
|
||||
'pgp_num' => storage_hash['per_pool_pg_nums']['images'],)
|
||||
}
|
||||
it { should contain_ceph__pool("#{cinder_pool}").with(
|
||||
'acl' => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=#{cinder_pool}, allow rx pool=#{glance_pool}'",
|
||||
'pg_num' => storage_hash['per_pool_pg_nums']['volumes'],
|
||||
'pgp_num' => storage_hash['per_pool_pg_nums']['volumes'],)
|
||||
}
|
||||
it { should contain_ceph__pool("#{cinder_backup_pool}").with(
|
||||
'acl' => "mon 'allow r' osd 'allow class-read object_prefix rbd_children, allow rwx pool=#{cinder_backup_pool}, allow rwx pool=#{cinder_pool}'",
|
||||
'pg_num' => storage_hash['per_pool_pg_nums']['backups'],
|
||||
'pgp_num' => storage_hash['per_pool_pg_nums']['backups'],)
|
||||
}
|
||||
it 'should deploy ceph' do
|
||||
should contain_class('ceph').with(
|
||||
'fsid' => fsid,
|
||||
)
|
||||
end
|
||||
|
||||
it 'should configure glance pool' do
|
||||
should contain_ceph__pool(glance_pool).with(
|
||||
'pg_num' => storage_hash['per_pool_pg_nums']['images'],
|
||||
'pgp_num' => storage_hash['per_pool_pg_nums']['images']
|
||||
)
|
||||
end
|
||||
|
||||
it 'should configure ceph glance key' do
|
||||
should contain_ceph__key("client.#{glance_user}").with(
|
||||
'secret' => mon_key,
|
||||
'user' => 'glance',
|
||||
'group' => 'glance',
|
||||
'cap_mon' => 'allow r',
|
||||
'cap_osd' => "allow class-read object_prefix rbd_children, allow rwx pool=#{glance_pool}",
|
||||
'inject' => true,
|
||||
)
|
||||
end
|
||||
|
||||
it 'should configure cinder pool' do
|
||||
should contain_ceph__pool(cinder_pool).with(
|
||||
'pg_num' => storage_hash['per_pool_pg_nums']['volumes'],
|
||||
'pgp_num' => storage_hash['per_pool_pg_nums']['volumes']
|
||||
)
|
||||
end
|
||||
|
||||
it 'should configure ceph cinder key' do
|
||||
should contain_ceph__key("client.#{cinder_user}").with(
|
||||
'secret' => mon_key,
|
||||
'user' => 'cinder',
|
||||
'group' => 'cinder',
|
||||
'cap_mon' => 'allow r',
|
||||
'cap_osd' => "allow class-read object_prefix rbd_children, allow rwx pool=#{cinder_pool}, allow rx pool=#{glance_pool}",
|
||||
'inject' => true,
|
||||
)
|
||||
end
|
||||
|
||||
it 'should configure cinder-backup pool' do
|
||||
should contain_ceph__pool(cinder_backup_pool).with(
|
||||
'pg_num' => storage_hash['per_pool_pg_nums']['backups'],
|
||||
'pgp_num' => storage_hash['per_pool_pg_nums']['backups']
|
||||
)
|
||||
end
|
||||
|
||||
it 'should configure ceph cinder-backup key' do
|
||||
should contain_ceph__key("client.#{cinder_backup_user}").with(
|
||||
'secret' => mon_key,
|
||||
'user' => 'cinder',
|
||||
'group' => 'cinder',
|
||||
'cap_mon' => 'allow r',
|
||||
'cap_osd' => "allow class-read object_prefix rbd_children, allow rwx pool=#{cinder_backup_pool}, allow rwx pool=#{cinder_pool}",
|
||||
'inject' => true,
|
||||
)
|
||||
end
|
||||
|
||||
if storage_hash['volumes_ceph']
|
||||
it { should contain_ceph__pool("#{cinder_pool}").that_notifies('Service[cinder-volume]') }
|
||||
|
|
|
@ -1,60 +0,0 @@
|
|||
# ROLE: primary-controller
|
||||
# ROLE: controller
|
||||
|
||||
require 'spec_helper'
|
||||
require 'shared-examples'
|
||||
manifest = 'ceph/enable_rados.pp'
|
||||
|
||||
describe manifest do
|
||||
shared_examples 'catalog' do
|
||||
it "should contain radosgw service" do
|
||||
|
||||
if facts[:operatingsystem] == 'Ubuntu'
|
||||
should contain_service('radosgw').with(
|
||||
:enable => 'false',
|
||||
:provider => 'debian'
|
||||
)
|
||||
|
||||
should contain_service('radosgw-all').with(
|
||||
:ensure => 'running',
|
||||
:enable => 'true',
|
||||
:provider => 'upstart'
|
||||
)
|
||||
else
|
||||
should contain_service('ceph-radosgw').with(
|
||||
:ensure => 'running',
|
||||
:enable => 'true'
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
it "should create radosgw init override file on Ubuntu" do
|
||||
if facts[:operatingsystem] == 'Ubuntu'
|
||||
should contain_file("/etc/init/radosgw-all.override").with(
|
||||
:ensure => 'present',
|
||||
:mode => '0644',
|
||||
:owner => 'root',
|
||||
:group => 'root',
|
||||
:content => "start on runlevel [2345]\nstop on starting rc RUNLEVEL=[016]\n"
|
||||
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
it "should wait until radosgw get ready" do
|
||||
ssl_hash = Noop.hiera_hash('use_ssl', {})
|
||||
service_endpoint = Noop.hiera('service_endpoint', '')
|
||||
management_vip = Noop.hiera('management_vip', '')
|
||||
rgw_protocol = Noop.puppet_function 'get_ssl_property', ssl_hash, {}, 'radosgw', 'internal', 'protocol', 'http'
|
||||
rgw_address = Noop.puppet_function 'get_ssl_property', ssl_hash, {}, 'radosgw', 'internal', 'hostname', [service_endpoint, management_vip]
|
||||
rgw_url = "#{rgw_protocol}://#{rgw_address}:8080"
|
||||
|
||||
should contain_haproxy_backend_status('object-storage').with(
|
||||
:url => rgw_url,
|
||||
:provider => 'http'
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
test_ubuntu_and_centos manifest
|
||||
end
|
|
@ -7,24 +7,157 @@ manifest = 'ceph/mon.pp'
|
|||
|
||||
describe manifest do
|
||||
shared_examples 'catalog' do
|
||||
storage_hash = Noop.hiera_hash 'storage'
|
||||
ceph_monitor_nodes = Noop.hiera 'ceph_monitor_nodes'
|
||||
|
||||
storage_hash = Noop.hiera 'storage'
|
||||
|
||||
let(:mon_key) do
|
||||
Noop.hiera_structure 'storage/mon_key', 'AQDesGZSsC7KJBAAw+W/Z4eGSQGAIbxWjxjvfw=='
|
||||
end
|
||||
|
||||
let(:bootstrap_osd_key) do
|
||||
Noop.hiera_structure 'storage/bootstrap_osd_key', 'AQABsWZSgEDmJhAAkAGSOOAJwrMHrM5Pz5On1A=='
|
||||
end
|
||||
|
||||
let(:admin_key) do
|
||||
Noop.hiera_structure 'storage/admin_key', 'AQCTg71RsNIHORAAW+O6FCMZWBjmVfMIPk3MhQ=='
|
||||
end
|
||||
|
||||
let(:fsid) do
|
||||
Noop.hiera_structure 'storage/fsid', '066F558C-6789-4A93-AAF1-5AF1BA01A3AD'
|
||||
end
|
||||
|
||||
let(:osd_pool_default_size) do
|
||||
storage_hash['osd_pool_size']
|
||||
end
|
||||
|
||||
let(:osd_pool_default_pg_num) do
|
||||
storage_hash['pg_num']
|
||||
end
|
||||
|
||||
let(:osd_pool_default_pgp_num) do
|
||||
storage_hash['pg_num']
|
||||
end
|
||||
|
||||
let(:osd_pool_default_min_size) do
|
||||
'1'
|
||||
end
|
||||
|
||||
let(:osd_journal_size) do
|
||||
'2048'
|
||||
end
|
||||
|
||||
let(:network_scheme) do
|
||||
Noop.hiera_hash 'network_scheme'
|
||||
end
|
||||
|
||||
let(:prepare_network_config) do
|
||||
Noop.puppet_function 'prepare_network_config', network_scheme
|
||||
end
|
||||
|
||||
let(:ceph_cluster_network) do
|
||||
Noop.puppet_function 'get_network_role_property', 'ceph/replication', 'network'
|
||||
end
|
||||
|
||||
let(:ceph_public_network) do
|
||||
Noop.puppet_function 'get_network_role_property', 'ceph/public', 'network'
|
||||
end
|
||||
|
||||
ceph_monitor_nodes = Noop.hiera_hash('ceph_monitor_nodes')
|
||||
mon_address_map = Noop.puppet_function 'get_node_to_ipaddr_map_by_network_role', ceph_monitor_nodes, 'ceph/public'
|
||||
ceph_primary_monitor_node = Noop.hiera_hash('ceph_primary_monitor_node')
|
||||
primary_mon = Noop.puppet_function 'get_node_to_ipaddr_map_by_network_role', ceph_primary_monitor_node, 'ceph/public'
|
||||
mon_ips = mon_address_map.values.join(',')
|
||||
mon_hosts = mon_address_map.keys.join(',')
|
||||
primary_mon_ip = primary_mon.values.join
|
||||
primary_mon_hostname = primary_mon.keys.join
|
||||
|
||||
if (storage_hash['volumes_ceph'] or
|
||||
storage_hash['images_ceph'] or
|
||||
storage_hash['objects_ceph'] or
|
||||
storage_hash['ephemeral_ceph']
|
||||
)
|
||||
it { should contain_class('ceph').with(
|
||||
'mon_hosts' => ceph_monitor_nodes.keys,
|
||||
'osd_pool_default_size' => storage_hash['osd_pool_size'],
|
||||
'osd_pool_default_pg_num' => storage_hash['pg_num'],
|
||||
'osd_pool_default_pgp_num' => storage_hash['pg_num'],
|
||||
'ephemeral_ceph' => storage_hash['ephemeral_ceph'],
|
||||
)
|
||||
}
|
||||
else
|
||||
it { should_not contain_class('ceph') }
|
||||
describe 'should configure primary ceph mon' do
|
||||
let(:facts) {
|
||||
Noop.ubuntu_facts.merge({
|
||||
:hostname => primary_mon.keys[0]
|
||||
})
|
||||
}
|
||||
|
||||
it 'should deploy primary ceph mon' do
|
||||
should contain_class('ceph').with(
|
||||
'fsid' => fsid,
|
||||
'mon_initial_members' => primary_mon_hostname,
|
||||
'mon_host' => primary_mon_ip,
|
||||
'cluster_network' => ceph_cluster_network,
|
||||
'public_network' => ceph_public_network,
|
||||
'osd_pool_default_size' => osd_pool_default_size,
|
||||
'osd_pool_default_pg_num' => osd_pool_default_pg_num,
|
||||
'osd_pool_default_pgp_num' => osd_pool_default_pgp_num,
|
||||
'osd_pool_default_min_size' => osd_pool_default_min_size,
|
||||
'osd_journal_size' => osd_journal_size,
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
describe 'should configure non-primary ceph mon' do
|
||||
let(:facts) {
|
||||
Noop.ubuntu_facts.merge({
|
||||
:hostname => 'non-primary-node'
|
||||
})
|
||||
}
|
||||
|
||||
it 'should deploy non-primary ceph mon' do
|
||||
should contain_class('ceph').with(
|
||||
'fsid' => fsid,
|
||||
'mon_initial_members' => mon_hosts,
|
||||
'mon_host' => mon_ips,
|
||||
'cluster_network' => ceph_cluster_network,
|
||||
'public_network' => ceph_public_network,
|
||||
'osd_pool_default_size' => osd_pool_default_size,
|
||||
'osd_pool_default_pg_num' => osd_pool_default_pg_num,
|
||||
'osd_pool_default_pgp_num' => osd_pool_default_pgp_num,
|
||||
'osd_pool_default_min_size' => osd_pool_default_min_size,
|
||||
'osd_journal_size' => osd_journal_size,
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
it 'should add parameters to ceph.conf' do
|
||||
should contain_ceph_config('global/filestore_xattr_use_omap').with(:value => true)
|
||||
should contain_ceph_config('global/osd_recovery_max_active').with(:value => '1')
|
||||
should contain_ceph_config('global/osd_max_backfills').with(:value => '1')
|
||||
should contain_ceph_config('client/rbd_cache_writethrough_until_flush').with(:value => true)
|
||||
should contain_ceph_config('client/rbd_cache').with(:value => true)
|
||||
should contain_ceph_config('global/log_to_syslog').with(:value => true)
|
||||
should contain_ceph_config('global/log_to_syslog_level').with(:value => 'info')
|
||||
should contain_ceph_config('global/log_to_syslog_facility').with(:value => 'LOG_LOCAL0')
|
||||
end
|
||||
|
||||
it 'should add admin key' do
|
||||
should contain_ceph__key('client.admin').with(
|
||||
'secret' => admin_key,
|
||||
'cap_mon' => 'allow *',
|
||||
'cap_osd' => 'allow *',
|
||||
'cap_mds' => 'allow',
|
||||
'inject' => true,
|
||||
)
|
||||
end
|
||||
|
||||
it 'should add bootstrap osd key' do
|
||||
should contain_ceph__key('client.bootstrap-osd').with(
|
||||
'secret' => bootstrap_osd_key,
|
||||
'cap_mon' => 'allow profile bootstrap-osd',
|
||||
)
|
||||
end
|
||||
|
||||
if storage_hash['volumes_ceph']
|
||||
it { should contain_service('cinder-volume').that_subscribes_to('Class[ceph]') }
|
||||
it { should contain_service('cinder-backup').that_subscribes_to('Class[ceph]') }
|
||||
end
|
||||
|
||||
if storage_hash['images_ceph']
|
||||
it { should contain_service('glance-api').that_subscribes_to('Class[ceph]') }
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
@ -0,0 +1,38 @@
|
|||
require 'spec_helper'
|
||||
require 'shared-examples'
|
||||
manifest = 'ceph/primary_mon_update.pp'
|
||||
|
||||
describe manifest do
|
||||
shared_examples 'catalog' do
|
||||
|
||||
ceph_monitor_nodes = Noop.hiera_hash('ceph_monitor_nodes')
|
||||
mon_address_map = Noop.puppet_function 'get_node_to_ipaddr_map_by_network_role', ceph_monitor_nodes, 'ceph/public'
|
||||
mon_ips = mon_address_map.values.join(',')
|
||||
mon_hosts = mon_address_map.keys.join(',')
|
||||
|
||||
storage_hash = Noop.hiera 'storage'
|
||||
|
||||
if (storage_hash['volumes_ceph'] or
|
||||
storage_hash['images_ceph'] or
|
||||
storage_hash['objects_ceph'] or
|
||||
storage_hash['ephemeral_ceph']
|
||||
)
|
||||
|
||||
it 'should wait for ceph to be ready' do
|
||||
should contain_exec('Wait for Ceph quorum')
|
||||
end
|
||||
|
||||
it 'should add parameters to ceph.conf' do
|
||||
should contain_ceph_config('global/mon_host').with(:value => mon_ips)
|
||||
should contain_ceph_config('global/mon_initial_members').with(:value => mon_hosts)
|
||||
end
|
||||
|
||||
it 'should reload Ceph' do
|
||||
should contain_exec('reload Ceph for HA')
|
||||
end
|
||||
end
|
||||
end
|
||||
test_ubuntu_and_centos manifest
|
||||
|
||||
end
|
||||
|
|
@ -7,61 +7,83 @@ manifest = 'ceph/radosgw.pp'
|
|||
|
||||
describe manifest do
|
||||
shared_examples 'catalog' do
|
||||
storage_hash = Noop.hiera_hash 'storage'
|
||||
ceph_monitor_nodes = Noop.hiera 'ceph_monitor_nodes'
|
||||
|
||||
radosgw_enabled = Noop.hiera_structure('storage/objects_ceph', false)
|
||||
ssl_hash = Noop.hiera_structure('use_ssl', {})
|
||||
storage_hash = Noop.hiera_hash 'storage'
|
||||
|
||||
rgw_large_pool_name = '.rgw'
|
||||
rgw_large_pool_pg_nums = storage_hash['per_pool_pg_nums'][rgw_large_pool_name]
|
||||
rgw_id = 'radosgw.gateway'
|
||||
radosgw_auth_key = "client.#{rgw_id}"
|
||||
|
||||
let(:gateway_name) {
|
||||
'radosgw.gateway'
|
||||
}
|
||||
|
||||
let(:fsid) {
|
||||
Noop.hiera_structure 'storage/fsid', '066F558C-6789-4A93-AAF1-5AF1BA01A3AD'
|
||||
}
|
||||
|
||||
let(:ssl_hash) { Noop.hiera_hash 'use_ssl', {} }
|
||||
|
||||
let(:admin_auth_protocol) {
|
||||
let(:service_endpoint) {
|
||||
Noop.hiera_structure 'service_endpoint'
|
||||
}
|
||||
|
||||
let(:admin_identity_protocol) {
|
||||
Noop.puppet_function 'get_ssl_property',ssl_hash,{},'keystone',
|
||||
'admin','protocol','http'
|
||||
}
|
||||
|
||||
let(:admin_auth_address) {
|
||||
let(:admin_identity_address) {
|
||||
Noop.puppet_function 'get_ssl_property',ssl_hash,{},'keystone','admin',
|
||||
'hostname',
|
||||
[Noop.hiera('service_endpoint', Noop.hiera('management_vip'))]
|
||||
}
|
||||
|
||||
let(:admin_url) {
|
||||
"#{admin_auth_protocol}://#{admin_auth_address}:35357"
|
||||
let(:admin_identity_url) {
|
||||
"#{admin_identity_protocol}://#{admin_identity_address}:35357"
|
||||
}
|
||||
|
||||
if storage_hash['objects_ceph']
|
||||
rgw_large_pool_name = '.rgw'
|
||||
rgw_large_pool_pg_nums = storage_hash['per_pool_pg_nums'][rgw_large_pool_name]
|
||||
rgw_id = 'radosgw.gateway'
|
||||
radosgw_auth_key = "client.#{rgw_id}"
|
||||
let(:radosgw_key) do
|
||||
Noop.hiera_structure 'storage/radosgw_key', 'AQCTg71RsNIHORAAW+O6FCMZWBjmVfMIPk3MhQ=='
|
||||
end
|
||||
|
||||
it 'should configure apache mods' do
|
||||
if facts[:osfamily] == 'Debian'
|
||||
should contain_apache__mod('rewrite')
|
||||
should contain_apache__mod('proxy')
|
||||
should contain_apache__mod('proxy_fcgi')
|
||||
else
|
||||
should contain_apache__mod('rewrite')
|
||||
should_not contain_apache__mod('proxy')
|
||||
should_not contain_apache__mod('proxy_fcgi')
|
||||
end
|
||||
if radosgw_enabled
|
||||
it 'should add radosgw key' do
|
||||
should contain_ceph__key("client.#{gateway_name}").with(
|
||||
'secret' => radosgw_key,
|
||||
'cap_mon' => 'allow rw',
|
||||
'cap_osd' => 'allow rwx',
|
||||
'inject' => true,
|
||||
)
|
||||
end
|
||||
|
||||
it { should contain_class('ceph::radosgw').with(
|
||||
'rgw_frontends' => 'fastcgi socket_port=9000 socket_host=127.0.0.1',
|
||||
'rgw_keystone_url' => admin_url,
|
||||
)
|
||||
}
|
||||
|
||||
it { should contain_service('httpd').with(
|
||||
:hasrestart => true,
|
||||
:restart => 'sleep 30 && apachectl graceful || apachectl restart',
|
||||
it 'should deploy ceph' do
|
||||
should contain_class('ceph').with(
|
||||
'fsid' => fsid,
|
||||
)
|
||||
}
|
||||
end
|
||||
|
||||
it 'should contain ceph::rgw' do
|
||||
should contain_ceph__rgw(gateway_name).with(
|
||||
'frontend_type' => 'apache-proxy-fcgi',
|
||||
)
|
||||
end
|
||||
|
||||
it 'should configure radosgw keystone' do
|
||||
should contain_ceph__rgw__keystone(gateway_name).with(
|
||||
'rgw_keystone_url' => admin_identity_url,
|
||||
)
|
||||
end
|
||||
|
||||
it { should contain_exec("Create #{rgw_large_pool_name} pool").with(
|
||||
:command => "ceph -n #{radosgw_auth_key} osd pool create #{rgw_large_pool_name} #{rgw_large_pool_pg_nums} #{rgw_large_pool_pg_nums}",
|
||||
:unless => "rados lspools | grep '^#{rgw_large_pool_name}$'"
|
||||
)
|
||||
}
|
||||
|
||||
end
|
||||
end
|
||||
|
||||
|
|
|
@ -10,7 +10,9 @@ describe manifest do
|
|||
shared_examples 'catalog' do
|
||||
storage_hash = Noop.hiera_hash 'storage'
|
||||
|
||||
if (storage_hash['images_ceph'] or storage_hash['objects_ceph'] or storage_hash['objects_ceph'])
|
||||
if (storage_hash['volumes_ceph'] or
|
||||
storage_hash['images_ceph'] or
|
||||
storage_hash['objects_ceph'])
|
||||
it { should contain_exec('Ensure /var/lib/ceph in the updatedb PRUNEPATH').with(
|
||||
:path => [ '/usr/bin', '/bin' ],
|
||||
:command => "sed -i -Ee 's|(PRUNEPATHS *= *\"[^\"]*)|\\1 /var/lib/ceph|' /etc/updatedb.conf",
|
||||
|
|
|
@ -50,6 +50,7 @@ describe manifest do
|
|||
end
|
||||
|
||||
node_name = Noop.hiera('node_name')
|
||||
storage_hash = Noop.hiera 'storage'
|
||||
network_metadata = Noop.hiera_hash 'network_metadata', {}
|
||||
roles = network_metadata['nodes'][node_name]['node_roles']
|
||||
mongodb_port = Noop.hiera('mongodb_port', '27017')
|
||||
|
@ -235,6 +236,47 @@ describe manifest do
|
|||
end
|
||||
end
|
||||
end
|
||||
|
||||
if (storage_hash['volumes_ceph'] or
|
||||
storage_hash['images_ceph'] or
|
||||
storage_hash['objects_ceph'] or
|
||||
storage_hash['ephemeral_ceph']
|
||||
)
|
||||
if Noop.puppet_function 'member', roles, 'primary-controller' or Noop.puppet_function 'member', roles, 'controller'
|
||||
it 'should configure firewall' do
|
||||
should contain_firewall('010 ceph-mon allow').with(
|
||||
'chain' => 'INPUT',
|
||||
'dport' => '6789',
|
||||
'proto' => 'tcp',
|
||||
'action' => 'accept',
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
if Noop.puppet_function 'member', roles, 'ceph-osd'
|
||||
it 'should configure firewall' do
|
||||
should contain_firewall('011 ceph-osd allow').with(
|
||||
'chain' => 'INPUT',
|
||||
'dport' => '6800-7100',
|
||||
'proto' => 'tcp',
|
||||
'action' => 'accept',
|
||||
)
|
||||
end
|
||||
end
|
||||
|
||||
if storage_hash['objects_ceph']
|
||||
if Noop.puppet_function 'member', roles, 'primary-controller' or Noop.puppet_function 'member', roles, 'controller'
|
||||
it 'should configure firewall' do
|
||||
should contain_firewall('012 RadosGW allow').with(
|
||||
'chain' => 'INPUT',
|
||||
'dport' => [ '6780', '8080' ],
|
||||
'proto' => 'tcp',
|
||||
'action' => 'accept',
|
||||
)
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
test_ubuntu_and_centos manifest
|
||||
|
|
Loading…
Reference in New Issue