Merge ceph charm into ceph-mon
Squashed commit of the following: commit 9b832d9391f9fea2d1491d01da6101585930fc75 Merge:e2432c4
7b36210 Author: Chris MacNaughton <chmacnaughton@gmail.com> Date: Mon Mar 21 16:40:54 2016 -0400 Merge branch 'master' of github.com:openstack/charm-ceph into charm-ceph-mon Change-Id: I42cfe6f1e5887627981f8ce4beff164803cc3957 commit 7b36210bac5bef3bacae2614995e123ef926453f Author: Chris Holcombe <xfactor973@gmail.com> Date: Fri Mar 18 15:37:06 2016 -0700 Add ceph-osd to ceph This change adds ceph-osd back into ceph for amulet testing. Change-Id: Ice4aaf7739e8c839189313d3f6175a834cf64219 commit e87e0b7bd22fe5ccae2aafcf6bd30f145405e01b Author: Ryan Beisner <ryan.beisner@canonical.com> Date: Wed Mar 16 17:33:48 2016 +0000 Update amulet test to include a non-existent osd-devices value The osd-devices charm config option is a whitelist, and the charm needs to gracefully handle items in that whitelist which may not exist. Change-Id: I5f9c6c1e4519fd671d6d36b415c9c8f763495dad commit ffce15d52333de4063d04b808cfbca5d890fb996 Merge: fe8bf6e 9614896 Author: Jenkins <jenkins@review.openstack.org> Date: Wed Mar 16 17:45:25 2016 +0000 Merge "Revert "Make 'blocked' status when node have no storage device"" commit 961489609d85851bd63c6825339a296bdf74e320 Author: Chris Holcombe <xfactor973@gmail.com> Date: Wed Mar 16 16:55:02 2016 +0000 Revert "Make 'blocked' status when node have no storage device" This reverts commit fc04dd0fff33639b812627d04645134dd7d4d3de. Change-Id: I9efbf623fc9aa6096725a15e53df426739ac16ff commit fe8bf6e4a5cb466a5efc6403c215e7aece2c6b9c Author: Billy Olsen <billy.olsen@gmail.com> Date: Tue Mar 15 20:08:20 2016 -0700 Use tox in Makefile targets Modify the Makefile to point at the appropriate tox targets so that tox and Make output can be equivalent. This involves mapping the lint target to the pep8 target and the test target to the py27 target. Change-Id: I99761d2fdf120bacff58d0aa5c2e584382c2e72b commit fc04dd0fff33639b812627d04645134dd7d4d3de Author: Seyeong Kim <seyeong.kim@canonical.com> Date: Fri Mar 11 06:07:52 2016 +0000 Make 'blocked' status when node have no storage device Currently there is an msg for no storage status on ceph node. But it doesn't make this charm state 'blocked'. is_storage_fine function has been created to check storage devices on ceph_hooks.py and using it on assess_status. Change-Id: I790fde0280060fa220ee83de2ad2319ac2c77230 Closes-Bug: lp1424510 commit a7c5e85c408ab8446a18cc6761b1d0b292641ea7 Author: Ryan Beisner <ryan.beisner@canonical.com> Date: Fri Mar 4 14:36:38 2016 +0000 Enable Xenial-Mitaka amulet test target. Change-Id: I0c386fc0c052cc1ac52c0a30f7a39fa914a61100 commit e80c5097c26ac4eb200a289daa272d5c7ac82539 Author: uoscibot <uosci-testing-bot@ubuntu.com> Date: Mon Feb 29 10:45:49 2016 +0000 Adapt imports and metadata for github move commit 391ed288fc763b69f0cd92459f236e7581a5f244 Merge: 78250bd 6228ea2 Author: Edward Hope-Morley <edward.hope-morley@canonical.com> Date: Thu Feb 25 13:34:27 2016 -0500 [hopem,r=] Support multiple l3 segments. Closes-Bug: 1523871 commit 6228ea2a8fa578c3c6b24b59f621e6e1026a7668 Merge: 6159390 78250bd Author: Edward Hope-Morley <edward.hope-morley@canonical.com> Date: Thu Feb 25 09:29:46 2016 -0500 sync /next commit 78250bd65c861adcb321f1c634def29fcfdaa8a9 Author: James Page <james.page@ubuntu.com> Date: Wed Feb 24 21:53:28 2016 +0000 Add gitreview prior to migration to openstack commit 61593905939359ba72768ccb8f1a450a571c1d24 Author: Edward Hope-Morley <edward.hope-morley@canonical.com> Date: Wed Feb 24 15:56:20 2016 -0500 only use fallback for get_public_addr() if networks not provided in config commit 34841b0aea85b3d5693a5336dbf956a406414474 Merge: 08d1cbc 092368d Author: James Page <james.page@ubuntu.com> Date: Wed Feb 24 14:22:20 2016 +0000 Add actions to support configuration of erasure coded pools. commit 092368d646d4e02b2d2ac08026b6cbf2c94a4042 Merge: de98010 08d1cbc Author: Chris Holcombe <chris.holcombe@canonical.com> Date: Tue Feb 23 08:19:56 2016 -0800 Merge upstream commit 08d1cbcdc943493a556e0187d2b3e6fbe83b69e3 Merge: 2d4ff89 414e519 Author: James Page <james.page@ubuntu.com> Date: Tue Feb 23 09:49:50 2016 +0000 Fix amulet tests for nova-compute changes. commit 414e5195c939a99adcaf79e27eb057c07c7f4761 Author: Edward Hope-Morley <edward.hope-morley@canonical.com> Date: Mon Feb 22 15:21:00 2016 -0500 fix amulet commit e99e991be21c6d98fc670bcafa30684c0ba4d5e0 Author: Edward Hope-Morley <edward.hope-morley@canonical.com> Date: Mon Feb 22 12:56:00 2016 -0500 fixup commit de98010f6f8d81e63d47ac03d33aa40bd870c7ea Author: Chris Holcombe <chris.holcombe@canonical.com> Date: Mon Feb 22 08:05:32 2016 -0800 charmhelpers sync commit 2d4ff89e4bba2e93e08a6dd00bc2367e90b708fe Merge: f16e3fa f98627c Author: Liam Young <liam.young@canonical.com> Date: Mon Feb 22 09:26:38 2016 +0000 [james-page, r=gnuoy] Add configuration option for toggling use of direct io for OSD journals commit f3803cb60d55154e35ac2294170b27fb348141b3 Author: Chris Holcombe <chris.holcombe@canonical.com> Date: Fri Feb 19 08:11:18 2016 -0800 Change /usr/bin/python2.7 to /usr/bin/python commit 612ba454c4263d9bfc672fe168a55c2f01599d70 Merge: c3d20a0 f16e3fa Author: Chris Holcombe <chris.holcombe@canonical.com> Date: Thu Feb 18 17:16:55 2016 -0800 Merge upstream and resolve conflicts with actions and actions.yaml commit c3d20a0eb67918d11585851a7b5df55ce0290392 Author: Chris Holcombe <chris.holcombe@canonical.com> Date: Thu Feb 18 17:10:56 2016 -0800 Fix up the niggles and provide feedback to the action user as to why something failed commit ea5cc48ccbb5d6515703bd5c93c13b2147972cd1 Author: Edward Hope-Morley <edward.hope-morley@canonical.com> Date: Thu Feb 18 17:42:05 2016 +0000 more commit f58dd864eac130a6bc20b46c1495d7fa34a54894 Author: Edward Hope-Morley <edward.hope-morley@canonical.com> Date: Thu Feb 18 17:09:52 2016 +0000 restore sanity commit 32631ccde309040b92ba76ecc12b16bad953f486 Author: Edward Hope-Morley <edward.hope-morley@canonical.com> Date: Thu Feb 18 11:40:09 2016 +0000 post-review fixes commit 7ada8f0de65d397648d041fae20ed21b3f38bd15 Author: Edward Hope-Morley <edward.hope-morley@canonical.com> Date: Thu Feb 18 11:36:46 2016 +0000 post-review fixes commit f16e3fac5240133c1c7dfd406caacd21b364532a Merge: a0ffb8b 7709b7d Author: James Page <james.page@ubuntu.com> Date: Thu Feb 18 11:02:17 2016 +0000 Add pause/resume cluster health actions Add actions to pause and resume cluster health monitoring within ceph for all osd devices. This will ensure that no rebalancing is done whilst maintenance actions are happening within the cluster. commit a0ffb8bf97c9cf3c19d17090c96f2ea60c89da65 Merge: 65439ba 531b40d Author: James Page <james.page@ubuntu.com> Date: Thu Feb 18 10:38:53 2016 +0000 Wait for quorom and query the right unit remote_unit when not in radosgw context commit 65439ba7dc3acf494c9a8d11e2cdd274d144b485 Merge: 5e77170 afd390b Author: James Page <james.page@ubuntu.com> Date: Wed Feb 17 11:28:44 2016 +0000 Update test target definitions; Wait for unit status. commit 531b40d9b2d216b467cca59d7649ab5bb4577b3d Author: Liam Young <liam.young@canonical.com> Date: Wed Feb 17 10:15:37 2016 +0000 Wait for quorom and query the right unit remote_unit when not in radosgw context commit 5e77170f378be92a3e2e8de3c06dad158b4a14ca Author: James Page <james.page@ubuntu.com> Date: Tue Feb 16 06:59:17 2016 +0000 Tidy tox targets commit 732d8e11cd5058e680a5982bce77648952c8532f Author: Chris Holcombe <chris.holcombe@canonical.com> Date: Fri Feb 12 14:17:34 2016 -0800 Used a list as an integer. I meant to use the size of the list commit afd390b3ed4212883a02ca971e5613246c3ae6a8 Author: Ryan Beisner <ryan.beisner@canonical.com> Date: Fri Feb 12 21:24:20 2016 +0000 No need to not wait for nonexistent nrpe commit 9721ce8006720d24b8e4133fbbb8a01d989a71c8 Author: Ryan Beisner <ryan.beisner@canonical.com> Date: Fri Feb 12 21:02:36 2016 +0000 Disable Xenial test re: pending lp1537155 commit d12e2658f5b5e6c38b98ae986134f83df2e0a380 Author: Ryan Beisner <ryan.beisner@canonical.com> Date: Fri Feb 12 20:57:08 2016 +0000 Update test target definitions; Wait for unit status. commit 7709b7d5385757fc6d8fe48fa7646efcdb77564a Author: Chris MacNaughton <chris.macnaughton@canonical.com> Date: Fri Feb 12 08:26:13 2016 -0500 rename actions commit 2c945523486227dd1c58a1c1a76a779d9c131a71 Merge:5b5e6dc
27d5d4b Author: James Page <james.page@ubuntu.com> Date: Fri Feb 12 12:34:20 2016 +0000 Resolve symlinks in get_devices(). commit 7edce1dd489a4718a150f7f38ffd366855e49828 Author: Edward Hope-Morley <edward.hope-morley@canonical.com> Date: Wed Feb 10 15:20:52 2016 +0000 [hopem,r=] Support multiple l3 segments. Closes-Bug: 1523871 commit 27d5d4b8bb0fd61a3910dad1bdf46adc2b476649 Author: Bjorn Tillenius <bjorn@canonical.com> Date: Tue Feb 2 19:01:53 2016 +0200 Lint. commit 6980d3a3418ba512e65a79a62b140b238d54a17b Author: Bjorn Tillenius <bjorn@canonical.com> Date: Tue Feb 2 17:34:19 2016 +0200 Resolve symlinks in get_devices(). commit f98627c1c163d702ae1142a6153801073d57280c Merge: 4f0dc6d5b5e6dc
Author: James Page <james.page@ubuntu.com> Date: Sat Jan 30 15:45:01 2016 +0100 rebase commit eaa365a180e8eda88e6ef9f1a6c975a0b780dee5 Author: Chris Holcombe <chris.holcombe@canonical.com> Date: Fri Jan 22 15:21:45 2016 -0800 Clean up another lint error commit 477cdc96fbe124509995a02c358c24c64451c9e4 Author: Chris Holcombe <chris.holcombe@canonical.com> Date: Fri Jan 22 15:04:27 2016 -0800 Patching up the other unit tests to passing status commit faa7b3ad95ebed02718ff58b3e3203b7d59be709 Author: Chris MacNaughton <chris.macnaughton@canonical.com> Date: Fri Jan 22 16:42:58 2016 -0500 remove regex commit 1e3b2f5dd409a02399735aa2aeb5e78d18ea2240 Author: Chris MacNaughton <chris.macnaughton@canonical.com> Date: Fri Jan 22 16:10:15 2016 -0500 lint fix commit 620209aeb47900430f039eb2e65bfe00db672e32 Author: Chris MacNaughton <chris.macnaughton@canonical.com> Date: Fri Jan 22 16:05:15 2016 -0500 use search instead of match commit 2f47939fa84c43c485042325a925d72797df6480 Author: Chris MacNaughton <chris.macnaughton@canonical.com> Date: Fri Jan 22 15:16:22 2016 -0500 fix line length commit f203a5bdfc12a2a99e3695840f16182e037f1df1 Author: Chris MacNaughton <chris.macnaughton@canonical.com> Date: Fri Jan 22 15:02:10 2016 -0500 modify regex to not care about order commit 706b272fc91d432921750b3af09689361f4b8bb9 Author: Chris MacNaughton <chris.macnaughton@canonical.com> Date: Fri Jan 22 14:16:46 2016 -0500 try with sleeping commit 66d6952a65ceb5c8858f262daf127f96ed03ea81 Merge: e446a775b5e6dc
Author: Chris Holcombe <chris.holcombe@canonical.com> Date: Fri Jan 22 10:46:50 2016 -0800 Merge upstream and resolve conflicts commit fc714c96f40bac9fb89108cd56962343472f63cf Author: Chris MacNaughton <chris.macnaughton@canonical.com> Date: Fri Jan 22 11:10:34 2016 -0500 fix variable name commit 8cb53237c6588a00d86dcc0a564d18eb7cd751ae Author: Chris MacNaughton <chris.macnaughton@canonical.com> Date: Fri Jan 22 10:47:26 2016 -0500 update to use correct(?) commands commit b762e9842ca335845fe3a442dfdde838e5246b3b Author: Chris MacNaughton <chris.macnaughton@canonical.com> Date: Fri Jan 22 08:01:03 2016 -0500 update tests.yaml commit e446a7731cbe377f30c88bb99083745ba95caa4e Author: Chris Holcombe <chris.holcombe@canonical.com> Date: Thu Jan 21 14:19:53 2016 -0800 Clean up lint warnings. Also added a few more mock unit tests commit 32ff93e8d0166b2346c422cbb9cd53bc4f805256 Author: Chris Holcombe <chris.holcombe@canonical.com> Date: Thu Jan 21 09:38:47 2016 -0800 Adding a unit test file for ceph_ops commit 4f0dc6d8b76b8545453293b2c69e2d6a164db10e Author: James Page <james.page@ubuntu.com> Date: Mon Jan 18 16:39:49 2016 +0000 Add configuration option for toggling use of direct io for OSD journals commit 1977cdbde1d0fa7ad57baa07d97f477143d54787 Author: Chris Holcombe <chris.holcombe@canonical.com> Date: Mon Jan 18 08:07:35 2016 -0800 Add actions to lint. Change actions.yaml to use enum and also change underscores to dashes. Log action_fail in addition to exiting -1. Merge v2 requests with v1 requests since this does not break backwards compatibility. Add unit tests. Modify tox.ini to include actions. . commit 3f0e16bcc483952e340fa89505011b7a115ff421 Author: Chris MacNaughton <chris.macnaughton@canonical.com> Date: Fri Jan 15 16:45:00 2016 -0500 fix version commit c665092be6f9d07f45a0b9baf2e0f128e4ecdc37 Author: Chris MacNaughton <chris.macnaughton@canonical.com> Date: Fri Jan 15 16:20:27 2016 -0500 updating tests commit 80de4d7256efbbc6c2ab7cdfcb1ab292668be607 Author: Chris MacNaughton <chris.macnaughton@canonical.com> Date: Thu Jan 14 13:19:10 2016 -0500 update readme commit 44365d58785e9ba63179d092b875c2029024aa8b Author: Chris MacNaughton <chris.macnaughton@canonical.com> Date: Thu Jan 14 13:17:19 2016 -0500 add pause/resume actions pause calls: `ceph osd set noout ceoh osd set nodown` resume calls: `ceph osd unset noout ceph osd unset nodown` commit bdd4e69e801e2178532e31216efe7e815b06f864 Author: Chris Holcombe <chris.holcombe@canonical.com> Date: Tue Dec 15 04:54:21 2015 -0800 Missed a few typos commit 0158586bde1a1f878c0a046a97510b8b90a95ce9 Author: Chris Holcombe <chris.holcombe@canonical.com> Date: Tue Dec 15 04:41:22 2015 -0800 lint errors commit 92ad78733279112bbba8e12d3fb19809ab9d0ff7 Author: Chris Holcombe <chris.holcombe@canonical.com> Date: Mon Dec 14 17:44:22 2015 -0800 Actions are working and lightly tested. Need to create a more robust, automated test setup Change-Id: Ia18b19961dab66bb6c19ef7e9c421b2fec60fcc7
This commit is contained in:
parent
e2432c4fdc
commit
b497f4de1e
4
.gitignore
vendored
4
.gitignore
vendored
@ -1,8 +1,8 @@
|
||||
bin
|
||||
.idea
|
||||
.coverage
|
||||
.testrepository
|
||||
.tox
|
||||
*.sw[nop]
|
||||
.idea
|
||||
*.pyc
|
||||
.idea
|
||||
*.pyc
|
@ -1,4 +1,4 @@
|
||||
[gerrit]
|
||||
host=review.openstack.org
|
||||
port=29418
|
||||
project=openstack/charm-ceph-mon.git
|
||||
project=openstack/charm-ceph-mon.git
|
@ -9,15 +9,15 @@ juju
|
||||
# Usage
|
||||
|
||||
The ceph charm has two pieces of mandatory configuration for which no defaults
|
||||
are provided. You _must_ set these configuration options before deployment or the charm will not work:
|
||||
are provided. You _must_ set these configuration options before deployment or the charm will not work:
|
||||
|
||||
fsid:
|
||||
uuid specific to a ceph cluster used to ensure that different
|
||||
clusters don't get mixed up - use `uuid` to generate one.
|
||||
|
||||
monitor-secret:
|
||||
monitor-secret:
|
||||
a ceph generated key used by the daemons that manage to cluster
|
||||
to control security. You can use the ceph-authtool command to
|
||||
to control security. You can use the ceph-authtool command to
|
||||
generate one:
|
||||
|
||||
ceph-authtool /dev/stdout --name=mon. --gen-key
|
||||
@ -30,7 +30,7 @@ At a minimum you must provide a juju config file during initial deployment
|
||||
with the fsid and monitor-secret options (contents of cepy.yaml below):
|
||||
|
||||
ceph:
|
||||
fsid: ecbb8960-0e21-11e2-b495-83a88f44db01
|
||||
fsid: ecbb8960-0e21-11e2-b495-83a88f44db01
|
||||
monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg==
|
||||
|
||||
Boot things up by using:
|
||||
|
175
actions.yaml
175
actions.yaml
@ -39,3 +39,178 @@ remove-cache-tier:
|
||||
as the hot pool
|
||||
required: [backer-pool, cache-pool]
|
||||
additionalProperties: false
|
||||
|
||||
create-pool:
|
||||
description: Creates a pool
|
||||
params:
|
||||
name:
|
||||
type: string
|
||||
description: The name of the pool
|
||||
profile-name:
|
||||
type: string
|
||||
description: The crush profile to use for this pool. The ruleset must exist first.
|
||||
pool-type:
|
||||
type: string
|
||||
default: "replicated"
|
||||
enum: [replicated, erasure]
|
||||
description: |
|
||||
The pool type which may either be replicated to recover from lost OSDs by keeping multiple copies of the
|
||||
objects or erasure to get a kind of generalized RAID5 capability.
|
||||
replicas:
|
||||
type: integer
|
||||
default: 3
|
||||
description: |
|
||||
For the replicated pool this is the number of replicas to store of each object.
|
||||
erasure-profile-name:
|
||||
type: string
|
||||
default: default
|
||||
description: |
|
||||
The name of the erasure coding profile to use for this pool. Note this profile must exist
|
||||
before calling create-pool
|
||||
required: [name]
|
||||
additionalProperties: false
|
||||
create-erasure-profile:
|
||||
description: Create a new erasure code profile to use on a pool.
|
||||
params:
|
||||
name:
|
||||
type: string
|
||||
description: The name of the profile
|
||||
failure-domain:
|
||||
type: string
|
||||
default: host
|
||||
enum: [chassis, datacenter, host, osd, pdu, pod, rack, region, room, root, row]
|
||||
description: |
|
||||
The failure-domain=host will create a CRUSH ruleset that ensures no two chunks are stored in the same host.
|
||||
plugin:
|
||||
type: string
|
||||
default: "jerasure"
|
||||
enum: [jerasure, isa, lrc, shec]
|
||||
description: |
|
||||
The erasure plugin to use for this profile.
|
||||
See http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ for more details
|
||||
data-chunks:
|
||||
type: integer
|
||||
default: 3
|
||||
description: |
|
||||
The number of data chunks, i.e. the number of chunks in which the original object is divided. For instance
|
||||
if K = 2 a 10KB object will be divided into K objects of 5KB each.
|
||||
coding-chunks:
|
||||
type: integer
|
||||
default: 2
|
||||
description: |
|
||||
The number of coding chunks, i.e. the number of additional chunks computed by the encoding functions.
|
||||
If there are 2 coding chunks, it means 2 OSDs can be out without losing data.
|
||||
locality-chunks:
|
||||
type: integer
|
||||
description: |
|
||||
Group the coding and data chunks into sets of size locality. For instance, for k=4 and m=2, when locality=3
|
||||
two groups of three are created. Each set can be recovered without reading chunks from another set.
|
||||
durability-estimator:
|
||||
type: integer
|
||||
description: |
|
||||
The number of parity chunks each of which includes each data chunk in its calculation range. The number is used
|
||||
as a durability estimator. For instance, if c=2, 2 OSDs can be down without losing data.
|
||||
required: [name, data-chunks, coding-chunks]
|
||||
additionalProperties: false
|
||||
get-erasure-profile:
|
||||
description: Display an erasure code profile.
|
||||
params:
|
||||
name:
|
||||
type: string
|
||||
description: The name of the profile
|
||||
required: [name]
|
||||
additionalProperties: false
|
||||
delete-erasure-profile:
|
||||
description: Deletes an erasure code profile.
|
||||
params:
|
||||
name:
|
||||
type: string
|
||||
description: The name of the profile
|
||||
required: [name]
|
||||
additionalProperties: false
|
||||
list-erasure-profiles:
|
||||
description: List the names of all erasure code profiles
|
||||
additionalProperties: false
|
||||
list-pools:
|
||||
description: List your cluster’s pools
|
||||
additionalProperties: false
|
||||
set-pool-max-bytes:
|
||||
description: Set pool quotas for the maximum number of bytes.
|
||||
params:
|
||||
max:
|
||||
type: integer
|
||||
description: The name of the pool
|
||||
pool-name:
|
||||
type: string
|
||||
description: The name of the pool
|
||||
required: [pool-name, max]
|
||||
additionalProperties: false
|
||||
delete-pool:
|
||||
description: Deletes the named pool
|
||||
params:
|
||||
pool-name:
|
||||
type: string
|
||||
description: The name of the pool
|
||||
required: [pool-name]
|
||||
additionalProperties: false
|
||||
rename-pool:
|
||||
description: Rename a pool
|
||||
params:
|
||||
pool-name:
|
||||
type: string
|
||||
description: The name of the pool
|
||||
new-name:
|
||||
type: string
|
||||
description: The new name of the pool
|
||||
required: [pool-name, new-name]
|
||||
additionalProperties: false
|
||||
pool-statistics:
|
||||
description: Show a pool’s utilization statistics
|
||||
additionalProperties: false
|
||||
snapshot-pool:
|
||||
description: Snapshot a pool
|
||||
params:
|
||||
pool-name:
|
||||
type: string
|
||||
description: The name of the pool
|
||||
snapshot-name:
|
||||
type: string
|
||||
description: The name of the snapshot
|
||||
required: [snapshot-name, pool-name]
|
||||
additionalProperties: false
|
||||
remove-pool-snapshot:
|
||||
description: Remove a pool snapshot
|
||||
params:
|
||||
pool-name:
|
||||
type: string
|
||||
description: The name of the pool
|
||||
snapshot-name:
|
||||
type: string
|
||||
description: The name of the snapshot
|
||||
required: [snapshot-name, pool-name]
|
||||
additionalProperties: false
|
||||
pool-set:
|
||||
description: Set a value for the pool
|
||||
params:
|
||||
pool-name:
|
||||
type: string
|
||||
description: The pool to set this variable on.
|
||||
key:
|
||||
type: string
|
||||
description: Any valid Ceph key from http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
|
||||
value:
|
||||
type: string
|
||||
description: The value to set
|
||||
required: [key, value, pool-name]
|
||||
additionalProperties: false
|
||||
pool-get:
|
||||
description: Get a value for the pool
|
||||
params:
|
||||
pool-name:
|
||||
type: string
|
||||
description: The pool to get this variable from.
|
||||
key:
|
||||
type: string
|
||||
description: Any valid Ceph key from http://docs.ceph.com/docs/master/rados/operations/pools/#get-pool-values
|
||||
required: [key, pool-name]
|
||||
additionalProperties: false
|
||||
|
@ -1 +1,3 @@
|
||||
__author__ = 'chris'
|
||||
import sys
|
||||
sys.path.append('hooks')
|
||||
|
103
actions/ceph_ops.py
Executable file
103
actions/ceph_ops.py
Executable file
@ -0,0 +1,103 @@
|
||||
__author__ = 'chris'
|
||||
from subprocess import CalledProcessError, check_output
|
||||
import sys
|
||||
|
||||
sys.path.append('hooks')
|
||||
|
||||
import rados
|
||||
from charmhelpers.core.hookenv import log, action_get, action_fail
|
||||
from charmhelpers.contrib.storage.linux.ceph import pool_set, \
|
||||
set_pool_quota, snapshot_pool, remove_pool_snapshot
|
||||
|
||||
|
||||
# Connect to Ceph via Librados and return a connection
|
||||
def connect():
|
||||
try:
|
||||
cluster = rados.Rados(conffile='/etc/ceph/ceph.conf')
|
||||
cluster.connect()
|
||||
return cluster
|
||||
except (rados.IOError,
|
||||
rados.ObjectNotFound,
|
||||
rados.NoData,
|
||||
rados.NoSpace,
|
||||
rados.PermissionError) as rados_error:
|
||||
log("librados failed with error: {}".format(str(rados_error)))
|
||||
|
||||
|
||||
def create_crush_rule():
|
||||
# Shell out
|
||||
pass
|
||||
|
||||
|
||||
def list_pools():
|
||||
try:
|
||||
cluster = connect()
|
||||
pool_list = cluster.list_pools()
|
||||
cluster.shutdown()
|
||||
return pool_list
|
||||
except (rados.IOError,
|
||||
rados.ObjectNotFound,
|
||||
rados.NoData,
|
||||
rados.NoSpace,
|
||||
rados.PermissionError) as e:
|
||||
action_fail(e.message)
|
||||
|
||||
|
||||
def pool_get():
|
||||
key = action_get("key")
|
||||
pool_name = action_get("pool_name")
|
||||
try:
|
||||
value = check_output(['ceph', 'osd', 'pool', 'get', pool_name, key])
|
||||
return value
|
||||
except CalledProcessError as e:
|
||||
action_fail(e.message)
|
||||
|
||||
|
||||
def set_pool():
|
||||
key = action_get("key")
|
||||
value = action_get("value")
|
||||
pool_name = action_get("pool_name")
|
||||
pool_set(service='ceph', pool_name=pool_name, key=key, value=value)
|
||||
|
||||
|
||||
def pool_stats():
|
||||
try:
|
||||
pool_name = action_get("pool-name")
|
||||
cluster = connect()
|
||||
ioctx = cluster.open_ioctx(pool_name)
|
||||
stats = ioctx.get_stats()
|
||||
ioctx.close()
|
||||
cluster.shutdown()
|
||||
return stats
|
||||
except (rados.Error,
|
||||
rados.IOError,
|
||||
rados.ObjectNotFound,
|
||||
rados.NoData,
|
||||
rados.NoSpace,
|
||||
rados.PermissionError) as e:
|
||||
action_fail(e.message)
|
||||
|
||||
|
||||
def delete_pool_snapshot():
|
||||
pool_name = action_get("pool-name")
|
||||
snapshot_name = action_get("snapshot-name")
|
||||
remove_pool_snapshot(service='ceph',
|
||||
pool_name=pool_name,
|
||||
snapshot_name=snapshot_name)
|
||||
|
||||
|
||||
# Note only one or the other can be set
|
||||
def set_pool_max_bytes():
|
||||
pool_name = action_get("pool-name")
|
||||
max_bytes = action_get("max")
|
||||
set_pool_quota(service='ceph',
|
||||
pool_name=pool_name,
|
||||
max_bytes=max_bytes)
|
||||
|
||||
|
||||
def snapshot_ceph_pool():
|
||||
pool_name = action_get("pool-name")
|
||||
snapshot_name = action_get("snapshot-name")
|
||||
snapshot_pool(service='ceph',
|
||||
pool_name=pool_name,
|
||||
snapshot_name=snapshot_name)
|
89
actions/create-erasure-profile
Executable file
89
actions/create-erasure-profile
Executable file
@ -0,0 +1,89 @@
|
||||
#!/usr/bin/python
|
||||
from subprocess import CalledProcessError
|
||||
import sys
|
||||
|
||||
sys.path.append('hooks')
|
||||
|
||||
from charmhelpers.contrib.storage.linux.ceph import create_erasure_profile
|
||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
||||
|
||||
|
||||
def make_erasure_profile():
|
||||
name = action_get("name")
|
||||
plugin = action_get("plugin")
|
||||
failure_domain = action_get("failure-domain")
|
||||
|
||||
# jerasure requires k+m
|
||||
# isa requires k+m
|
||||
# local requires k+m+l
|
||||
# shec requires k+m+c
|
||||
|
||||
if plugin == "jerasure":
|
||||
k = action_get("data-chunks")
|
||||
m = action_get("coding-chunks")
|
||||
try:
|
||||
create_erasure_profile(service='admin',
|
||||
erasure_plugin_name=plugin,
|
||||
profile_name=name,
|
||||
data_chunks=k,
|
||||
coding_chunks=m,
|
||||
failure_domain=failure_domain)
|
||||
except CalledProcessError as e:
|
||||
log(e)
|
||||
action_fail("Create erasure profile failed with "
|
||||
"message: {}".format(e.message))
|
||||
elif plugin == "isa":
|
||||
k = action_get("data-chunks")
|
||||
m = action_get("coding-chunks")
|
||||
try:
|
||||
create_erasure_profile(service='admin',
|
||||
erasure_plugin_name=plugin,
|
||||
profile_name=name,
|
||||
data_chunks=k,
|
||||
coding_chunks=m,
|
||||
failure_domain=failure_domain)
|
||||
except CalledProcessError as e:
|
||||
log(e)
|
||||
action_fail("Create erasure profile failed with "
|
||||
"message: {}".format(e.message))
|
||||
elif plugin == "local":
|
||||
k = action_get("data-chunks")
|
||||
m = action_get("coding-chunks")
|
||||
l = action_get("locality-chunks")
|
||||
try:
|
||||
create_erasure_profile(service='admin',
|
||||
erasure_plugin_name=plugin,
|
||||
profile_name=name,
|
||||
data_chunks=k,
|
||||
coding_chunks=m,
|
||||
locality=l,
|
||||
failure_domain=failure_domain)
|
||||
except CalledProcessError as e:
|
||||
log(e)
|
||||
action_fail("Create erasure profile failed with "
|
||||
"message: {}".format(e.message))
|
||||
elif plugin == "shec":
|
||||
k = action_get("data-chunks")
|
||||
m = action_get("coding-chunks")
|
||||
c = action_get("durability-estimator")
|
||||
try:
|
||||
create_erasure_profile(service='admin',
|
||||
erasure_plugin_name=plugin,
|
||||
profile_name=name,
|
||||
data_chunks=k,
|
||||
coding_chunks=m,
|
||||
durability_estimator=c,
|
||||
failure_domain=failure_domain)
|
||||
except CalledProcessError as e:
|
||||
log(e)
|
||||
action_fail("Create erasure profile failed with "
|
||||
"message: {}".format(e.message))
|
||||
else:
|
||||
# Unknown erasure plugin
|
||||
action_fail("Unknown erasure-plugin type of {}. "
|
||||
"Only jerasure, isa, local or shec is "
|
||||
"allowed".format(plugin))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
make_erasure_profile()
|
38
actions/create-pool
Executable file
38
actions/create-pool
Executable file
@ -0,0 +1,38 @@
|
||||
#!/usr/bin/python
|
||||
import sys
|
||||
|
||||
sys.path.append('hooks')
|
||||
from subprocess import CalledProcessError
|
||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
||||
from charmhelpers.contrib.storage.linux.ceph import ErasurePool, ReplicatedPool
|
||||
|
||||
|
||||
def create_pool():
|
||||
pool_name = action_get("name")
|
||||
pool_type = action_get("pool-type")
|
||||
try:
|
||||
if pool_type == "replicated":
|
||||
replicas = action_get("replicas")
|
||||
replicated_pool = ReplicatedPool(name=pool_name,
|
||||
service='admin',
|
||||
replicas=replicas)
|
||||
replicated_pool.create()
|
||||
|
||||
elif pool_type == "erasure":
|
||||
crush_profile_name = action_get("erasure-profile-name")
|
||||
erasure_pool = ErasurePool(name=pool_name,
|
||||
erasure_code_profile=crush_profile_name,
|
||||
service='admin')
|
||||
erasure_pool.create()
|
||||
else:
|
||||
log("Unknown pool type of {}. Only erasure or replicated is "
|
||||
"allowed".format(pool_type))
|
||||
action_fail("Unknown pool type of {}. Only erasure or replicated "
|
||||
"is allowed".format(pool_type))
|
||||
except CalledProcessError as e:
|
||||
action_fail("Pool creation failed because of a failed process. "
|
||||
"Ret Code: {} Message: {}".format(e.returncode, e.message))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
create_pool()
|
24
actions/delete-erasure-profile
Executable file
24
actions/delete-erasure-profile
Executable file
@ -0,0 +1,24 @@
|
||||
#!/usr/bin/python
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
__author__ = 'chris'
|
||||
import sys
|
||||
|
||||
sys.path.append('hooks')
|
||||
|
||||
from charmhelpers.contrib.storage.linux.ceph import remove_erasure_profile
|
||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
||||
|
||||
|
||||
def delete_erasure_profile():
|
||||
name = action_get("name")
|
||||
|
||||
try:
|
||||
remove_erasure_profile(service='admin', profile_name=name)
|
||||
except CalledProcessError as e:
|
||||
action_fail("Remove erasure profile failed with error: {}".format(
|
||||
e.message))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
delete_erasure_profile()
|
28
actions/delete-pool
Executable file
28
actions/delete-pool
Executable file
@ -0,0 +1,28 @@
|
||||
#!/usr/bin/python
|
||||
import sys
|
||||
|
||||
sys.path.append('hooks')
|
||||
|
||||
import rados
|
||||
from ceph_ops import connect
|
||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
||||
|
||||
|
||||
def remove_pool():
|
||||
try:
|
||||
pool_name = action_get("name")
|
||||
cluster = connect()
|
||||
log("Deleting pool: {}".format(pool_name))
|
||||
cluster.delete_pool(str(pool_name)) # Convert from unicode
|
||||
cluster.shutdown()
|
||||
except (rados.IOError,
|
||||
rados.ObjectNotFound,
|
||||
rados.NoData,
|
||||
rados.NoSpace,
|
||||
rados.PermissionError) as e:
|
||||
log(e)
|
||||
action_fail(e)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
remove_pool()
|
18
actions/get-erasure-profile
Executable file
18
actions/get-erasure-profile
Executable file
@ -0,0 +1,18 @@
|
||||
#!/usr/bin/python
|
||||
__author__ = 'chris'
|
||||
import sys
|
||||
|
||||
sys.path.append('hooks')
|
||||
|
||||
from charmhelpers.contrib.storage.linux.ceph import get_erasure_profile
|
||||
from charmhelpers.core.hookenv import action_get, action_set
|
||||
|
||||
|
||||
def make_erasure_profile():
|
||||
name = action_get("name")
|
||||
out = get_erasure_profile(service='admin', name=name)
|
||||
action_set({'message': out})
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
make_erasure_profile()
|
22
actions/list-erasure-profiles
Executable file
22
actions/list-erasure-profiles
Executable file
@ -0,0 +1,22 @@
|
||||
#!/usr/bin/python
|
||||
__author__ = 'chris'
|
||||
import sys
|
||||
from subprocess import check_output, CalledProcessError
|
||||
|
||||
sys.path.append('hooks')
|
||||
|
||||
from charmhelpers.core.hookenv import action_get, log, action_set, action_fail
|
||||
|
||||
if __name__ == '__main__':
|
||||
name = action_get("name")
|
||||
try:
|
||||
out = check_output(['ceph',
|
||||
'--id', 'admin',
|
||||
'osd',
|
||||
'erasure-code-profile',
|
||||
'ls']).decode('UTF-8')
|
||||
action_set({'message': out})
|
||||
except CalledProcessError as e:
|
||||
log(e)
|
||||
action_fail("Listing erasure profiles failed with error: {}".format(
|
||||
e.message))
|
17
actions/list-pools
Executable file
17
actions/list-pools
Executable file
@ -0,0 +1,17 @@
|
||||
#!/usr/bin/python
|
||||
__author__ = 'chris'
|
||||
import sys
|
||||
from subprocess import check_output, CalledProcessError
|
||||
|
||||
sys.path.append('hooks')
|
||||
|
||||
from charmhelpers.core.hookenv import log, action_set, action_fail
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
out = check_output(['ceph', '--id', 'admin',
|
||||
'osd', 'lspools']).decode('UTF-8')
|
||||
action_set({'message': out})
|
||||
except CalledProcessError as e:
|
||||
log(e)
|
||||
action_fail("List pools failed with error: {}".format(e.message))
|
19
actions/pool-get
Executable file
19
actions/pool-get
Executable file
@ -0,0 +1,19 @@
|
||||
#!/usr/bin/python
|
||||
__author__ = 'chris'
|
||||
import sys
|
||||
from subprocess import check_output, CalledProcessError
|
||||
|
||||
sys.path.append('hooks')
|
||||
|
||||
from charmhelpers.core.hookenv import log, action_set, action_get, action_fail
|
||||
|
||||
if __name__ == '__main__':
|
||||
name = action_get('pool-name')
|
||||
key = action_get('key')
|
||||
try:
|
||||
out = check_output(['ceph', '--id', 'admin',
|
||||
'osd', 'pool', 'get', name, key]).decode('UTF-8')
|
||||
action_set({'message': out})
|
||||
except CalledProcessError as e:
|
||||
log(e)
|
||||
action_fail("Pool get failed with message: {}".format(e.message))
|
23
actions/pool-set
Executable file
23
actions/pool-set
Executable file
@ -0,0 +1,23 @@
|
||||
#!/usr/bin/python
|
||||
from subprocess import CalledProcessError
|
||||
import sys
|
||||
|
||||
sys.path.append('hooks')
|
||||
|
||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
||||
from ceph_broker import handle_set_pool_value
|
||||
|
||||
if __name__ == '__main__':
|
||||
name = action_get("pool-name")
|
||||
key = action_get("key")
|
||||
value = action_get("value")
|
||||
request = {'name': name,
|
||||
'key': key,
|
||||
'value': value}
|
||||
|
||||
try:
|
||||
handle_set_pool_value(service='admin', request=request)
|
||||
except CalledProcessError as e:
|
||||
log(e.message)
|
||||
action_fail("Setting pool key: {} and value: {} failed with "
|
||||
"message: {}".format(key, value, e.message))
|
15
actions/pool-statistics
Executable file
15
actions/pool-statistics
Executable file
@ -0,0 +1,15 @@
|
||||
#!/usr/bin/python
|
||||
import sys
|
||||
|
||||
sys.path.append('hooks')
|
||||
from subprocess import check_output, CalledProcessError
|
||||
from charmhelpers.core.hookenv import log, action_set, action_fail
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
out = check_output(['ceph', '--id', 'admin',
|
||||
'df']).decode('UTF-8')
|
||||
action_set({'message': out})
|
||||
except CalledProcessError as e:
|
||||
log(e)
|
||||
action_fail("ceph df failed with message: {}".format(e.message))
|
19
actions/remove-pool-snapshot
Executable file
19
actions/remove-pool-snapshot
Executable file
@ -0,0 +1,19 @@
|
||||
#!/usr/bin/python
|
||||
import sys
|
||||
|
||||
sys.path.append('hooks')
|
||||
from subprocess import CalledProcessError
|
||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
||||
from charmhelpers.contrib.storage.linux.ceph import remove_pool_snapshot
|
||||
|
||||
if __name__ == '__main__':
|
||||
name = action_get("pool-name")
|
||||
snapname = action_get("snapshot-name")
|
||||
try:
|
||||
remove_pool_snapshot(service='admin',
|
||||
pool_name=name,
|
||||
snapshot_name=snapname)
|
||||
except CalledProcessError as e:
|
||||
log(e)
|
||||
action_fail("Remove pool snapshot failed with message: {}".format(
|
||||
e.message))
|
16
actions/rename-pool
Executable file
16
actions/rename-pool
Executable file
@ -0,0 +1,16 @@
|
||||
#!/usr/bin/python
|
||||
import sys
|
||||
|
||||
sys.path.append('hooks')
|
||||
from subprocess import CalledProcessError
|
||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
||||
from charmhelpers.contrib.storage.linux.ceph import rename_pool
|
||||
|
||||
if __name__ == '__main__':
|
||||
name = action_get("pool-name")
|
||||
new_name = action_get("new-name")
|
||||
try:
|
||||
rename_pool(service='admin', old_name=name, new_name=new_name)
|
||||
except CalledProcessError as e:
|
||||
log(e)
|
||||
action_fail("Renaming pool failed with message: {}".format(e.message))
|
16
actions/set-pool-max-bytes
Executable file
16
actions/set-pool-max-bytes
Executable file
@ -0,0 +1,16 @@
|
||||
#!/usr/bin/python
|
||||
import sys
|
||||
|
||||
sys.path.append('hooks')
|
||||
from subprocess import CalledProcessError
|
||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
||||
from charmhelpers.contrib.storage.linux.ceph import set_pool_quota
|
||||
|
||||
if __name__ == '__main__':
|
||||
max_bytes = action_get("max")
|
||||
name = action_get("pool-name")
|
||||
try:
|
||||
set_pool_quota(service='admin', pool_name=name, max_bytes=max_bytes)
|
||||
except CalledProcessError as e:
|
||||
log(e)
|
||||
action_fail("Set pool quota failed with message: {}".format(e.message))
|
18
actions/snapshot-pool
Executable file
18
actions/snapshot-pool
Executable file
@ -0,0 +1,18 @@
|
||||
#!/usr/bin/python
|
||||
import sys
|
||||
|
||||
sys.path.append('hooks')
|
||||
from subprocess import CalledProcessError
|
||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
||||
from charmhelpers.contrib.storage.linux.ceph import snapshot_pool
|
||||
|
||||
if __name__ == '__main__':
|
||||
name = action_get("pool-name")
|
||||
snapname = action_get("snapshot-name")
|
||||
try:
|
||||
snapshot_pool(service='admin',
|
||||
pool_name=name,
|
||||
snapshot_name=snapname)
|
||||
except CalledProcessError as e:
|
||||
log(e)
|
||||
action_fail("Snapshot pool failed with message: {}".format(e.message))
|
@ -121,3 +121,7 @@ options:
|
||||
description: |
|
||||
A comma-separated list of nagios servicegroups.
|
||||
If left empty, the nagios_context will be used as the servicegroup
|
||||
use-direct-io:
|
||||
default: True
|
||||
type: boolean
|
||||
description: Configure use of direct IO for OSD journals.
|
||||
|
@ -1,24 +1,71 @@
|
||||
#!/usr/bin/python
|
||||
#
|
||||
# Copyright 2014 Canonical Ltd.
|
||||
# Copyright 2015 Canonical Ltd.
|
||||
#
|
||||
import json
|
||||
|
||||
from charmhelpers.contrib.storage.linux.ceph import validator, \
|
||||
erasure_profile_exists, ErasurePool, set_pool_quota, \
|
||||
pool_set, snapshot_pool, remove_pool_snapshot, create_erasure_profile, \
|
||||
ReplicatedPool, rename_pool, Pool, get_osds, pool_exists, delete_pool
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
DEBUG,
|
||||
INFO,
|
||||
ERROR,
|
||||
)
|
||||
from charmhelpers.contrib.storage.linux.ceph import (
|
||||
create_pool,
|
||||
get_osds,
|
||||
pool_exists,
|
||||
)
|
||||
|
||||
# This comes from http://docs.ceph.com/docs/master/rados/operations/pools/
|
||||
# This should do a decent job of preventing people from passing in bad values.
|
||||
# It will give a useful error message
|
||||
POOL_KEYS = {
|
||||
# "Ceph Key Name": [Python type, [Valid Range]]
|
||||
"size": [int],
|
||||
"min_size": [int],
|
||||
"crash_replay_interval": [int],
|
||||
"pgp_num": [int], # = or < pg_num
|
||||
"crush_ruleset": [int],
|
||||
"hashpspool": [bool],
|
||||
"nodelete": [bool],
|
||||
"nopgchange": [bool],
|
||||
"nosizechange": [bool],
|
||||
"write_fadvise_dontneed": [bool],
|
||||
"noscrub": [bool],
|
||||
"nodeep-scrub": [bool],
|
||||
"hit_set_type": [basestring, ["bloom", "explicit_hash",
|
||||
"explicit_object"]],
|
||||
"hit_set_count": [int, [1, 1]],
|
||||
"hit_set_period": [int],
|
||||
"hit_set_fpp": [float, [0.0, 1.0]],
|
||||
"cache_target_dirty_ratio": [float],
|
||||
"cache_target_dirty_high_ratio": [float],
|
||||
"cache_target_full_ratio": [float],
|
||||
"target_max_bytes": [int],
|
||||
"target_max_objects": [int],
|
||||
"cache_min_flush_age": [int],
|
||||
"cache_min_evict_age": [int],
|
||||
"fast_read": [bool],
|
||||
}
|
||||
|
||||
CEPH_BUCKET_TYPES = [
|
||||
'osd',
|
||||
'host',
|
||||
'chassis',
|
||||
'rack',
|
||||
'row',
|
||||
'pdu',
|
||||
'pod',
|
||||
'room',
|
||||
'datacenter',
|
||||
'region',
|
||||
'root'
|
||||
]
|
||||
|
||||
|
||||
def decode_req_encode_rsp(f):
|
||||
"""Decorator to decode incoming requests and encode responses."""
|
||||
|
||||
def decode_inner(req):
|
||||
return json.dumps(f(json.loads(req)))
|
||||
|
||||
@ -42,15 +89,14 @@ def process_requests(reqs):
|
||||
resp['request-id'] = request_id
|
||||
|
||||
return resp
|
||||
|
||||
except Exception as exc:
|
||||
log(str(exc), level=ERROR)
|
||||
msg = ("Unexpected error occurred while processing requests: %s" %
|
||||
(reqs))
|
||||
reqs)
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
msg = ("Missing or invalid api version (%s)" % (version))
|
||||
msg = ("Missing or invalid api version (%s)" % version)
|
||||
resp = {'exit-code': 1, 'stderr': msg}
|
||||
if request_id:
|
||||
resp['request-id'] = request_id
|
||||
@ -58,6 +104,156 @@ def process_requests(reqs):
|
||||
return resp
|
||||
|
||||
|
||||
def handle_create_erasure_profile(request, service):
|
||||
# "local" | "shec" or it defaults to "jerasure"
|
||||
erasure_type = request.get('erasure-type')
|
||||
# "host" | "rack" or it defaults to "host" # Any valid Ceph bucket
|
||||
failure_domain = request.get('failure-domain')
|
||||
name = request.get('name')
|
||||
k = request.get('k')
|
||||
m = request.get('m')
|
||||
l = request.get('l')
|
||||
|
||||
if failure_domain not in CEPH_BUCKET_TYPES:
|
||||
msg = "failure-domain must be one of {}".format(CEPH_BUCKET_TYPES)
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
create_erasure_profile(service=service, erasure_plugin_name=erasure_type,
|
||||
profile_name=name, failure_domain=failure_domain,
|
||||
data_chunks=k, coding_chunks=m, locality=l)
|
||||
|
||||
|
||||
def handle_erasure_pool(request, service):
|
||||
pool_name = request.get('name')
|
||||
erasure_profile = request.get('erasure-profile')
|
||||
quota = request.get('max-bytes')
|
||||
|
||||
if erasure_profile is None:
|
||||
erasure_profile = "default-canonical"
|
||||
|
||||
# Check for missing params
|
||||
if pool_name is None:
|
||||
msg = "Missing parameter. name is required for the pool"
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
# TODO: Default to 3/2 erasure coding. I believe this requires min 5 osds
|
||||
if not erasure_profile_exists(service=service, name=erasure_profile):
|
||||
# TODO: Fail and tell them to create the profile or default
|
||||
msg = "erasure-profile {} does not exist. Please create it with: " \
|
||||
"create-erasure-profile".format(erasure_profile)
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
pass
|
||||
pool = ErasurePool(service=service, name=pool_name,
|
||||
erasure_code_profile=erasure_profile)
|
||||
# Ok make the erasure pool
|
||||
if not pool_exists(service=service, name=pool_name):
|
||||
log("Creating pool '%s' (erasure_profile=%s)" % (pool,
|
||||
erasure_profile),
|
||||
level=INFO)
|
||||
pool.create()
|
||||
|
||||
# Set a quota if requested
|
||||
if quota is not None:
|
||||
set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota)
|
||||
|
||||
|
||||
def handle_replicated_pool(request, service):
|
||||
pool_name = request.get('name')
|
||||
replicas = request.get('replicas')
|
||||
quota = request.get('max-bytes')
|
||||
|
||||
# Optional params
|
||||
pg_num = request.get('pg_num')
|
||||
if pg_num:
|
||||
# Cap pg_num to max allowed just in case.
|
||||
osds = get_osds(service)
|
||||
if osds:
|
||||
pg_num = min(pg_num, (len(osds) * 100 // replicas))
|
||||
|
||||
# Check for missing params
|
||||
if pool_name is None or replicas is None:
|
||||
msg = "Missing parameter. name and replicas are required"
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
pool = ReplicatedPool(service=service,
|
||||
name=pool_name,
|
||||
replicas=replicas,
|
||||
pg_num=pg_num)
|
||||
if not pool_exists(service=service, name=pool_name):
|
||||
log("Creating pool '%s' (replicas=%s)" % (pool, replicas),
|
||||
level=INFO)
|
||||
pool.create()
|
||||
else:
|
||||
log("Pool '%s' already exists - skipping create" % pool,
|
||||
level=DEBUG)
|
||||
|
||||
# Set a quota if requested
|
||||
if quota is not None:
|
||||
set_pool_quota(service=service, pool_name=pool_name, max_bytes=quota)
|
||||
|
||||
|
||||
def handle_create_cache_tier(request, service):
|
||||
# mode = "writeback" | "readonly"
|
||||
storage_pool = request.get('cold-pool')
|
||||
cache_pool = request.get('hot-pool')
|
||||
cache_mode = request.get('mode')
|
||||
|
||||
if cache_mode is None:
|
||||
cache_mode = "writeback"
|
||||
|
||||
# cache and storage pool must exist first
|
||||
if not pool_exists(service=service, name=storage_pool) or not pool_exists(
|
||||
service=service, name=cache_pool):
|
||||
msg = "cold-pool: {} and hot-pool: {} must exist. Please create " \
|
||||
"them first".format(storage_pool, cache_pool)
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
p = Pool(service=service, name=storage_pool)
|
||||
p.add_cache_tier(cache_pool=cache_pool, mode=cache_mode)
|
||||
|
||||
|
||||
def handle_remove_cache_tier(request, service):
|
||||
storage_pool = request.get('cold-pool')
|
||||
cache_pool = request.get('hot-pool')
|
||||
# cache and storage pool must exist first
|
||||
if not pool_exists(service=service, name=storage_pool) or not pool_exists(
|
||||
service=service, name=cache_pool):
|
||||
msg = "cold-pool: {} or hot-pool: {} doesn't exist. Not " \
|
||||
"deleting cache tier".format(storage_pool, cache_pool)
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
pool = Pool(name=storage_pool, service=service)
|
||||
pool.remove_cache_tier(cache_pool=cache_pool)
|
||||
|
||||
|
||||
def handle_set_pool_value(request, service):
|
||||
# Set arbitrary pool values
|
||||
params = {'pool': request.get('name'),
|
||||
'key': request.get('key'),
|
||||
'value': request.get('value')}
|
||||
if params['key'] not in POOL_KEYS:
|
||||
msg = "Invalid key '%s'" % params['key']
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
# Get the validation method
|
||||
validator_params = POOL_KEYS[params['key']]
|
||||
if len(validator_params) is 1:
|
||||
# Validate that what the user passed is actually legal per Ceph's rules
|
||||
validator(params['value'], validator_params[0])
|
||||
else:
|
||||
# Validate that what the user passed is actually legal per Ceph's rules
|
||||
validator(params['value'], validator_params[0], validator_params[1])
|
||||
# Set the value
|
||||
pool_set(service=service, pool_name=params['pool'], key=params['key'],
|
||||
value=params['value'])
|
||||
|
||||
|
||||
def process_requests_v1(reqs):
|
||||
"""Process v1 requests.
|
||||
|
||||
@ -70,45 +266,45 @@ def process_requests_v1(reqs):
|
||||
log("Processing %s ceph broker requests" % (len(reqs)), level=INFO)
|
||||
for req in reqs:
|
||||
op = req.get('op')
|
||||
log("Processing op='%s'" % (op), level=DEBUG)
|
||||
log("Processing op='%s'" % op, level=DEBUG)
|
||||
# Use admin client since we do not have other client key locations
|
||||
# setup to use them for these operations.
|
||||
svc = 'admin'
|
||||
if op == "create-pool":
|
||||
params = {'pool': req.get('name'),
|
||||
'replicas': req.get('replicas')}
|
||||
if not all(params.iteritems()):
|
||||
msg = ("Missing parameter(s): %s" %
|
||||
(' '.join([k for k in params.iterkeys()
|
||||
if not params[k]])))
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
pool_type = req.get('pool-type') # "replicated" | "erasure"
|
||||
|
||||
# Mandatory params
|
||||
pool = params['pool']
|
||||
replicas = params['replicas']
|
||||
|
||||
# Optional params
|
||||
pg_num = req.get('pg_num')
|
||||
if pg_num:
|
||||
# Cap pg_num to max allowed just in case.
|
||||
osds = get_osds(svc)
|
||||
if osds:
|
||||
pg_num = min(pg_num, (len(osds) * 100 // replicas))
|
||||
|
||||
# Ensure string
|
||||
pg_num = str(pg_num)
|
||||
|
||||
if not pool_exists(service=svc, name=pool):
|
||||
log("Creating pool '%s' (replicas=%s)" % (pool, replicas),
|
||||
level=INFO)
|
||||
create_pool(service=svc, name=pool, replicas=replicas,
|
||||
pg_num=pg_num)
|
||||
# Default to replicated if pool_type isn't given
|
||||
if pool_type == 'erasure':
|
||||
handle_erasure_pool(request=req, service=svc)
|
||||
else:
|
||||
log("Pool '%s' already exists - skipping create" % (pool),
|
||||
level=DEBUG)
|
||||
handle_replicated_pool(request=req, service=svc)
|
||||
elif op == "create-cache-tier":
|
||||
handle_create_cache_tier(request=req, service=svc)
|
||||
elif op == "remove-cache-tier":
|
||||
handle_remove_cache_tier(request=req, service=svc)
|
||||
elif op == "create-erasure-profile":
|
||||
handle_create_erasure_profile(request=req, service=svc)
|
||||
elif op == "delete-pool":
|
||||
pool = req.get('name')
|
||||
delete_pool(service=svc, name=pool)
|
||||
elif op == "rename-pool":
|
||||
old_name = req.get('name')
|
||||
new_name = req.get('new-name')
|
||||
rename_pool(service=svc, old_name=old_name, new_name=new_name)
|
||||
elif op == "snapshot-pool":
|
||||
pool = req.get('name')
|
||||
snapshot_name = req.get('snapshot-name')
|
||||
snapshot_pool(service=svc, pool_name=pool,
|
||||
snapshot_name=snapshot_name)
|
||||
elif op == "remove-pool-snapshot":
|
||||
pool = req.get('name')
|
||||
snapshot_name = req.get('snapshot-name')
|
||||
remove_pool_snapshot(service=svc, pool_name=pool,
|
||||
snapshot_name=snapshot_name)
|
||||
elif op == "set-pool-value":
|
||||
handle_set_pool_value(request=req, service=svc)
|
||||
else:
|
||||
msg = "Unknown operation '%s'" % (op)
|
||||
msg = "Unknown operation '%s'" % op
|
||||
log(msg, level=ERROR)
|
||||
return {'exit-code': 1, 'stderr': msg}
|
||||
|
||||
|
@ -54,7 +54,7 @@ from charmhelpers.payload.execd import execd_preinstall
|
||||
from charmhelpers.contrib.openstack.alternatives import install_alternative
|
||||
from charmhelpers.contrib.network.ip import (
|
||||
get_ipv6_addr,
|
||||
format_ipv6_addr
|
||||
format_ipv6_addr,
|
||||
)
|
||||
from charmhelpers.core.sysctl import create as create_sysctl
|
||||
from charmhelpers.core.templating import render
|
||||
@ -294,6 +294,7 @@ def emit_cephconf():
|
||||
'ceph_public_network': public_network,
|
||||
'ceph_cluster_network': cluster_network,
|
||||
'loglevel': config('loglevel'),
|
||||
'dio': str(config('use-direct-io')).lower(),
|
||||
}
|
||||
|
||||
if config('prefer-ipv6'):
|
||||
|
@ -36,4 +36,3 @@ keyring = /var/lib/ceph/mon/$cluster-$id/keyring
|
||||
|
||||
[mds]
|
||||
keyring = /var/lib/ceph/mds/$cluster-$id/keyring
|
||||
|
||||
|
0
tests/018-basic-trusty-liberty
Normal file → Executable file
0
tests/018-basic-trusty-liberty
Normal file → Executable file
0
tests/019-basic-trusty-mitaka
Normal file → Executable file
0
tests/019-basic-trusty-mitaka
Normal file → Executable file
0
tests/020-basic-wily-liberty
Normal file → Executable file
0
tests/020-basic-wily-liberty
Normal file → Executable file
0
tests/021-basic-xenial-mitaka
Normal file → Executable file
0
tests/021-basic-xenial-mitaka
Normal file → Executable file
@ -3,6 +3,7 @@
|
||||
import amulet
|
||||
import re
|
||||
import time
|
||||
|
||||
from charmhelpers.contrib.openstack.amulet.deployment import (
|
||||
OpenStackAmuletDeployment
|
||||
)
|
||||
@ -30,6 +31,8 @@ class CephBasicDeployment(OpenStackAmuletDeployment):
|
||||
|
||||
u.log.info('Waiting on extended status checks...')
|
||||
exclude_services = ['mysql']
|
||||
|
||||
# Wait for deployment ready msgs, except exclusions
|
||||
self._auto_wait_for_status(exclude_services=exclude_services)
|
||||
|
||||
self._initialize_tests()
|
||||
@ -79,6 +82,9 @@ class CephBasicDeployment(OpenStackAmuletDeployment):
|
||||
'admin-token': 'ubuntutesting'}
|
||||
mysql_config = {'dataset-size': '50%'}
|
||||
cinder_config = {'block-device': 'None', 'glance-api-version': '2'}
|
||||
|
||||
# Include a non-existent device as osd-devices is a whitelist,
|
||||
# and this will catch cases where proposals attempt to change that.
|
||||
ceph_config = {
|
||||
'monitor-count': '3',
|
||||
'auth-supported': 'none',
|
||||
@ -198,7 +204,6 @@ class CephBasicDeployment(OpenStackAmuletDeployment):
|
||||
self.cinder_sentry: ['cinder-api',
|
||||
'cinder-scheduler',
|
||||
'cinder-volume'],
|
||||
self.ceph_osd_sentry: ['ceph-osd-all'],
|
||||
}
|
||||
|
||||
if self._get_openstack_release() < self.vivid_kilo:
|
||||
@ -212,6 +217,13 @@ class CephBasicDeployment(OpenStackAmuletDeployment):
|
||||
services[self.ceph1_sentry] = ceph_services
|
||||
services[self.ceph2_sentry] = ceph_services
|
||||
|
||||
ceph_osd_services = [
|
||||
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(0)),
|
||||
'ceph-osd id={}'.format(u.get_ceph_osd_id_cmd(1))
|
||||
]
|
||||
|
||||
services[self.ceph_osd_sentry] = ceph_osd_services
|
||||
|
||||
ret = u.validate_services_by_name(services)
|
||||
if ret:
|
||||
amulet.raise_status(amulet.FAIL, msg=ret)
|
||||
|
@ -19,3 +19,4 @@ packages:
|
||||
- python-novaclient
|
||||
- python-pika
|
||||
- python-swiftclient
|
||||
- python-nose
|
@ -1,12 +1,12 @@
|
||||
import json
|
||||
import mock
|
||||
import unittest
|
||||
|
||||
import mock
|
||||
|
||||
import ceph_broker
|
||||
|
||||
|
||||
class CephBrokerTestCase(unittest.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(CephBrokerTestCase, self).setUp()
|
||||
|
||||
@ -20,15 +20,15 @@ class CephBrokerTestCase(unittest.TestCase):
|
||||
def test_process_requests_missing_api_version(self, mock_log):
|
||||
req = json.dumps({'ops': []})
|
||||
rc = ceph_broker.process_requests(req)
|
||||
self.assertEqual(json.loads(rc), {'exit-code': 1,
|
||||
'stderr':
|
||||
('Missing or invalid api version '
|
||||
'(None)')})
|
||||
self.assertEqual(json.loads(rc), {
|
||||
'exit-code': 1,
|
||||
'stderr': 'Missing or invalid api version (None)'})
|
||||
|
||||
@mock.patch('ceph_broker.log')
|
||||
def test_process_requests_invalid_api_version(self, mock_log):
|
||||
req = json.dumps({'api-version': 2, 'ops': []})
|
||||
rc = ceph_broker.process_requests(req)
|
||||
print "Return: %s" % rc
|
||||
self.assertEqual(json.loads(rc),
|
||||
{'exit-code': 1,
|
||||
'stderr': 'Missing or invalid api version (2)'})
|
||||
@ -41,90 +41,88 @@ class CephBrokerTestCase(unittest.TestCase):
|
||||
{'exit-code': 1,
|
||||
'stderr': "Unknown operation 'invalid_op'"})
|
||||
|
||||
@mock.patch('ceph_broker.create_pool')
|
||||
@mock.patch('ceph_broker.pool_exists')
|
||||
@mock.patch('ceph_broker.log')
|
||||
def test_process_requests_create_pool(self, mock_log, mock_pool_exists,
|
||||
mock_create_pool):
|
||||
mock_pool_exists.return_value = False
|
||||
reqs = json.dumps({'api-version': 1,
|
||||
'ops': [{'op': 'create-pool', 'name':
|
||||
'foo', 'replicas': 3}]})
|
||||
rc = ceph_broker.process_requests(reqs)
|
||||
mock_pool_exists.assert_called_with(service='admin', name='foo')
|
||||
mock_create_pool.assert_called_with(service='admin', name='foo',
|
||||
replicas=3, pg_num=None)
|
||||
self.assertEqual(json.loads(rc), {'exit-code': 0})
|
||||
|
||||
@mock.patch('ceph_broker.get_osds')
|
||||
@mock.patch('ceph_broker.create_pool')
|
||||
@mock.patch('ceph_broker.ReplicatedPool')
|
||||
@mock.patch('ceph_broker.pool_exists')
|
||||
@mock.patch('ceph_broker.log')
|
||||
def test_process_requests_create_pool_w_pg_num(self, mock_log,
|
||||
mock_pool_exists,
|
||||
mock_create_pool,
|
||||
mock_replicated_pool,
|
||||
mock_get_osds):
|
||||
mock_get_osds.return_value = [0, 1, 2]
|
||||
mock_pool_exists.return_value = False
|
||||
reqs = json.dumps({'api-version': 1,
|
||||
'ops': [{'op': 'create-pool', 'name':
|
||||
'foo', 'replicas': 3,
|
||||
'pg_num': 100}]})
|
||||
'ops': [{
|
||||
'op': 'create-pool',
|
||||
'name': 'foo',
|
||||
'replicas': 3,
|
||||
'pg_num': 100}]})
|
||||
rc = ceph_broker.process_requests(reqs)
|
||||
mock_pool_exists.assert_called_with(service='admin', name='foo')
|
||||
mock_create_pool.assert_called_with(service='admin', name='foo',
|
||||
replicas=3, pg_num='100')
|
||||
mock_replicated_pool.assert_called_with(service='admin', name='foo',
|
||||
replicas=3, pg_num=100)
|
||||
self.assertEqual(json.loads(rc), {'exit-code': 0})
|
||||
|
||||
@mock.patch('ceph_broker.get_osds')
|
||||
@mock.patch('ceph_broker.create_pool')
|
||||
@mock.patch('ceph_broker.ReplicatedPool')
|
||||
@mock.patch('ceph_broker.pool_exists')
|
||||
@mock.patch('ceph_broker.log')
|
||||
def test_process_requests_create_pool_w_pg_num_capped(self, mock_log,
|
||||
mock_pool_exists,
|
||||
mock_create_pool,
|
||||
mock_replicated_pool,
|
||||
mock_get_osds):
|
||||
mock_get_osds.return_value = [0, 1, 2]
|
||||
mock_pool_exists.return_value = False
|
||||
reqs = json.dumps({'api-version': 1,
|
||||
'ops': [{'op': 'create-pool', 'name':
|
||||
'foo', 'replicas': 3,
|
||||
'pg_num': 300}]})
|
||||
'ops': [{
|
||||
'op': 'create-pool',
|
||||
'name': 'foo',
|
||||
'replicas': 3,
|
||||
'pg_num': 300}]})
|
||||
rc = ceph_broker.process_requests(reqs)
|
||||
mock_pool_exists.assert_called_with(service='admin', name='foo')
|
||||
mock_create_pool.assert_called_with(service='admin', name='foo',
|
||||
replicas=3, pg_num='100')
|
||||
mock_pool_exists.assert_called_with(service='admin',
|
||||
name='foo')
|
||||
mock_replicated_pool.assert_called_with(service='admin', name='foo',
|
||||
replicas=3, pg_num=100)
|
||||
self.assertEqual(json.loads(rc), {'exit-code': 0})
|
||||
self.assertEqual(json.loads(rc), {'exit-code': 0})
|
||||
|
||||
@mock.patch('ceph_broker.create_pool')
|
||||
@mock.patch('ceph_broker.ReplicatedPool')
|
||||
@mock.patch('ceph_broker.pool_exists')
|
||||
@mock.patch('ceph_broker.log')
|
||||
def test_process_requests_create_pool_exists(self, mock_log,
|
||||
mock_pool_exists,
|
||||
mock_create_pool):
|
||||
mock_replicated_pool):
|
||||
mock_pool_exists.return_value = True
|
||||
reqs = json.dumps({'api-version': 1,
|
||||
'ops': [{'op': 'create-pool', 'name': 'foo',
|
||||
'ops': [{'op': 'create-pool',
|
||||
'name': 'foo',
|
||||
'replicas': 3}]})
|
||||
rc = ceph_broker.process_requests(reqs)
|
||||
mock_pool_exists.assert_called_with(service='admin', name='foo')
|
||||
self.assertFalse(mock_create_pool.called)
|
||||
mock_pool_exists.assert_called_with(service='admin',
|
||||
name='foo')
|
||||
self.assertFalse(mock_replicated_pool.create.called)
|
||||
self.assertEqual(json.loads(rc), {'exit-code': 0})
|
||||
|
||||
@mock.patch('ceph_broker.create_pool')
|
||||
@mock.patch('ceph_broker.ReplicatedPool')
|
||||
@mock.patch('ceph_broker.pool_exists')
|
||||
@mock.patch('ceph_broker.log')
|
||||
def test_process_requests_create_pool_rid(self, mock_log, mock_pool_exists,
|
||||
mock_create_pool):
|
||||
def test_process_requests_create_pool_rid(self, mock_log,
|
||||
mock_pool_exists,
|
||||
mock_replicated_pool):
|
||||
mock_pool_exists.return_value = False
|
||||
reqs = json.dumps({'api-version': 1,
|
||||
'request-id': '1ef5aede',
|
||||
'ops': [{'op': 'create-pool', 'name':
|
||||
'foo', 'replicas': 3}]})
|
||||
'ops': [{
|
||||
'op': 'create-pool',
|
||||
'name': 'foo',
|
||||
'replicas': 3}]})
|
||||
rc = ceph_broker.process_requests(reqs)
|
||||
mock_pool_exists.assert_called_with(service='admin', name='foo')
|
||||
mock_create_pool.assert_called_with(service='admin', name='foo',
|
||||
replicas=3, pg_num=None)
|
||||
mock_replicated_pool.assert_called_with(service='admin',
|
||||
name='foo',
|
||||
pg_num=None,
|
||||
replicas=3)
|
||||
self.assertEqual(json.loads(rc)['exit-code'], 0)
|
||||
self.assertEqual(json.loads(rc)['request-id'], '1ef5aede')
|
||||
|
||||
|
217
unit_tests/test_ceph_ops.py
Normal file
217
unit_tests/test_ceph_ops.py
Normal file
@ -0,0 +1,217 @@
|
||||
__author__ = 'chris'
|
||||
|
||||
import json
|
||||
from hooks import ceph_broker
|
||||
|
||||
import mock
|
||||
import unittest
|
||||
|
||||
|
||||
class TestCephOps(unittest.TestCase):
|
||||
"""
|
||||
@mock.patch('ceph_broker.log')
|
||||
def test_connect(self, mock_broker):
|
||||
self.fail()
|
||||
"""
|
||||
|
||||
@mock.patch('ceph_broker.log')
|
||||
@mock.patch('hooks.ceph_broker.create_erasure_profile')
|
||||
def test_create_erasure_profile(self, mock_create_erasure, mock_log):
|
||||
req = json.dumps({'api-version': 1,
|
||||
'ops': [{
|
||||
'op': 'create-erasure-profile',
|
||||
'name': 'foo',
|
||||
'erasure-type': 'jerasure',
|
||||
'failure-domain': 'rack',
|
||||
'k': 3,
|
||||
'm': 2,
|
||||
}]})
|
||||
rc = ceph_broker.process_requests(req)
|
||||
mock_create_erasure.assert_called_with(service='admin',
|
||||
profile_name='foo',
|
||||
coding_chunks=2,
|
||||
data_chunks=3,
|
||||
locality=None,
|
||||
failure_domain='rack',
|
||||
erasure_plugin_name='jerasure')
|
||||
self.assertEqual(json.loads(rc), {'exit-code': 0})
|
||||
|
||||
@mock.patch('ceph_broker.log')
|
||||
@mock.patch('hooks.ceph_broker.pool_exists')
|
||||
@mock.patch('hooks.ceph_broker.ReplicatedPool.create')
|
||||
def test_process_requests_create_replicated_pool(self,
|
||||
mock_replicated_pool,
|
||||
mock_pool_exists,
|
||||
mock_log):
|
||||
mock_pool_exists.return_value = False
|
||||
reqs = json.dumps({'api-version': 1,
|
||||
'ops': [{
|
||||
'op': 'create-pool',
|
||||
'pool-type': 'replicated',
|
||||
'name': 'foo',
|
||||
'replicas': 3
|
||||
}]})
|
||||
rc = ceph_broker.process_requests(reqs)
|
||||
mock_pool_exists.assert_called_with(service='admin', name='foo')
|
||||
mock_replicated_pool.assert_called_with()
|
||||
self.assertEqual(json.loads(rc), {'exit-code': 0})
|
||||
|
||||
@mock.patch('ceph_broker.log')
|
||||
@mock.patch('hooks.ceph_broker.delete_pool')
|
||||
def test_process_requests_delete_pool(self,
|
||||
mock_delete_pool,
|
||||
mock_log):
|
||||
reqs = json.dumps({'api-version': 1,
|
||||
'ops': [{
|
||||
'op': 'delete-pool',
|
||||
'name': 'foo',
|
||||
}]})
|
||||
rc = ceph_broker.process_requests(reqs)
|
||||
mock_delete_pool.assert_called_with(service='admin', name='foo')
|
||||
self.assertEqual(json.loads(rc), {'exit-code': 0})
|
||||
|
||||
@mock.patch('ceph_broker.log')
|
||||
@mock.patch('hooks.ceph_broker.pool_exists')
|
||||
@mock.patch('hooks.ceph_broker.ErasurePool.create')
|
||||
@mock.patch('hooks.ceph_broker.erasure_profile_exists')
|
||||
def test_process_requests_create_erasure_pool(self, mock_profile_exists,
|
||||
mock_erasure_pool,
|
||||
mock_pool_exists,
|
||||
mock_log):
|
||||
mock_pool_exists.return_value = False
|
||||
reqs = json.dumps({'api-version': 1,
|
||||
'ops': [{
|
||||
'op': 'create-pool',
|
||||
'pool-type': 'erasure',
|
||||
'name': 'foo',
|
||||
'erasure-profile': 'default'
|
||||
}]})
|
||||
rc = ceph_broker.process_requests(reqs)
|
||||
mock_profile_exists.assert_called_with(service='admin', name='default')
|
||||
mock_pool_exists.assert_called_with(service='admin', name='foo')
|
||||
mock_erasure_pool.assert_called_with()
|
||||
self.assertEqual(json.loads(rc), {'exit-code': 0})
|
||||
|
||||
@mock.patch('ceph_broker.log')
|
||||
@mock.patch('hooks.ceph_broker.pool_exists')
|
||||
@mock.patch('hooks.ceph_broker.Pool.add_cache_tier')
|
||||
def test_process_requests_create_cache_tier(self, mock_pool,
|
||||
mock_pool_exists, mock_log):
|
||||
mock_pool_exists.return_value = True
|
||||
reqs = json.dumps({'api-version': 1,
|
||||
'ops': [{
|
||||
'op': 'create-cache-tier',
|
||||
'cold-pool': 'foo',
|
||||
'hot-pool': 'foo-ssd',
|
||||
'mode': 'writeback',
|
||||
'erasure-profile': 'default'
|
||||
}]})
|
||||
rc = ceph_broker.process_requests(reqs)
|
||||
mock_pool_exists.assert_any_call(service='admin', name='foo')
|
||||
mock_pool_exists.assert_any_call(service='admin', name='foo-ssd')
|
||||
|
||||
mock_pool.assert_called_with(cache_pool='foo-ssd', mode='writeback')
|
||||
self.assertEqual(json.loads(rc), {'exit-code': 0})
|
||||
|
||||
@mock.patch('ceph_broker.log')
|
||||
@mock.patch('hooks.ceph_broker.pool_exists')
|
||||
@mock.patch('hooks.ceph_broker.Pool.remove_cache_tier')
|
||||
def test_process_requests_remove_cache_tier(self, mock_pool,
|
||||
mock_pool_exists, mock_log):
|
||||
mock_pool_exists.return_value = True
|
||||
reqs = json.dumps({'api-version': 1,
|
||||
'ops': [{
|
||||
'op': 'remove-cache-tier',
|
||||
'hot-pool': 'foo-ssd',
|
||||
}]})
|
||||
rc = ceph_broker.process_requests(reqs)
|
||||
mock_pool_exists.assert_any_call(service='admin', name='foo-ssd')
|
||||
|
||||
mock_pool.assert_called_with(cache_pool='foo-ssd')
|
||||
self.assertEqual(json.loads(rc), {'exit-code': 0})
|
||||
|
||||
@mock.patch('ceph_broker.log')
|
||||
@mock.patch('hooks.ceph_broker.snapshot_pool')
|
||||
def test_snapshot_pool(self, mock_snapshot_pool, mock_log):
|
||||
reqs = json.dumps({'api-version': 1,
|
||||
'ops': [{
|
||||
'op': 'snapshot-pool',
|
||||
'name': 'foo',
|
||||
'snapshot-name': 'foo-snap1',
|
||||
}]})
|
||||
rc = ceph_broker.process_requests(reqs)
|
||||
mock_snapshot_pool.return_value = 1
|
||||
mock_snapshot_pool.assert_called_with(service='admin',
|
||||
pool_name='foo',
|
||||
snapshot_name='foo-snap1')
|
||||
self.assertEqual(json.loads(rc), {'exit-code': 0})
|
||||
|
||||
@mock.patch('ceph_broker.log')
|
||||
@mock.patch('hooks.ceph_broker.rename_pool')
|
||||
def test_rename_pool(self, mock_rename_pool, mock_log):
|
||||
reqs = json.dumps({'api-version': 1,
|
||||
'ops': [{
|
||||
'op': 'rename-pool',
|
||||
'name': 'foo',
|
||||
'new-name': 'foo2',
|
||||
}]})
|
||||
rc = ceph_broker.process_requests(reqs)
|
||||
mock_rename_pool.assert_called_with(service='admin',
|
||||
old_name='foo',
|
||||
new_name='foo2')
|
||||
self.assertEqual(json.loads(rc), {'exit-code': 0})
|
||||
|
||||
@mock.patch('ceph_broker.log')
|
||||
@mock.patch('hooks.ceph_broker.remove_pool_snapshot')
|
||||
def test_remove_pool_snapshot(self, mock_snapshot_pool, mock_broker):
|
||||
reqs = json.dumps({'api-version': 1,
|
||||
'ops': [{
|
||||
'op': 'remove-pool-snapshot',
|
||||
'name': 'foo',
|
||||
'snapshot-name': 'foo-snap1',
|
||||
}]})
|
||||
rc = ceph_broker.process_requests(reqs)
|
||||
mock_snapshot_pool.assert_called_with(service='admin',
|
||||
pool_name='foo',
|
||||
snapshot_name='foo-snap1')
|
||||
self.assertEqual(json.loads(rc), {'exit-code': 0})
|
||||
|
||||
@mock.patch('ceph_broker.log')
|
||||
@mock.patch('hooks.ceph_broker.pool_set')
|
||||
def test_set_pool_value(self, mock_set_pool, mock_broker):
|
||||
reqs = json.dumps({'api-version': 1,
|
||||
'ops': [{
|
||||
'op': 'set-pool-value',
|
||||
'name': 'foo',
|
||||
'key': 'size',
|
||||
'value': 3,
|
||||
}]})
|
||||
rc = ceph_broker.process_requests(reqs)
|
||||
mock_set_pool.assert_called_with(service='admin',
|
||||
pool_name='foo',
|
||||
key='size',
|
||||
value=3)
|
||||
self.assertEqual(json.loads(rc), {'exit-code': 0})
|
||||
|
||||
@mock.patch('ceph_broker.log')
|
||||
def test_set_invalid_pool_value(self, mock_broker):
|
||||
reqs = json.dumps({'api-version': 1,
|
||||
'ops': [{
|
||||
'op': 'set-pool-value',
|
||||
'name': 'foo',
|
||||
'key': 'size',
|
||||
'value': 'abc',
|
||||
}]})
|
||||
rc = ceph_broker.process_requests(reqs)
|
||||
# self.assertRaises(AssertionError)
|
||||
self.assertEqual(json.loads(rc)['exit-code'], 1)
|
||||
|
||||
'''
|
||||
@mock.patch('ceph_broker.log')
|
||||
def test_set_pool_max_bytes(self, mock_broker):
|
||||
self.fail()
|
||||
'''
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
@ -31,7 +31,6 @@ ENOUGH_PEERS_COMPLETE = {
|
||||
|
||||
|
||||
class ServiceStatusTestCase(test_utils.CharmTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(ServiceStatusTestCase, self).setUp(hooks, TO_PATCH)
|
||||
self.config.side_effect = self.test_config.get
|
||||
|
Loading…
Reference in New Issue
Block a user