Retire project
Leave README around for those that follow. Change-Id: I34f8092296c70cd1b51de7c48d7ee589f410d55d
This commit is contained in:
parent
25fcc99e47
commit
536c22a6a2
|
@ -1,7 +0,0 @@
|
||||||
[report]
|
|
||||||
# Regexes for lines to exclude from consideration
|
|
||||||
exclude_lines =
|
|
||||||
if __name__ == .__main__.:
|
|
||||||
include=
|
|
||||||
hooks/hooks.py
|
|
||||||
hooks/ceph*.py
|
|
17
.project
17
.project
|
@ -1,17 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8"?>
|
|
||||||
<projectDescription>
|
|
||||||
<name>ceph</name>
|
|
||||||
<comment></comment>
|
|
||||||
<projects>
|
|
||||||
</projects>
|
|
||||||
<buildSpec>
|
|
||||||
<buildCommand>
|
|
||||||
<name>org.python.pydev.PyDevBuilder</name>
|
|
||||||
<arguments>
|
|
||||||
</arguments>
|
|
||||||
</buildCommand>
|
|
||||||
</buildSpec>
|
|
||||||
<natures>
|
|
||||||
<nature>org.python.pydev.pythonNature</nature>
|
|
||||||
</natures>
|
|
||||||
</projectDescription>
|
|
|
@ -1,8 +0,0 @@
|
||||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
|
||||||
<?eclipse-pydev version="1.0"?><pydev_project>
|
|
||||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
|
|
||||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
|
|
||||||
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
|
|
||||||
<path>/ceph/hooks</path>
|
|
||||||
</pydev_pathproperty>
|
|
||||||
</pydev_project>
|
|
|
@ -1,8 +0,0 @@
|
||||||
[DEFAULT]
|
|
||||||
test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \
|
|
||||||
OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \
|
|
||||||
OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \
|
|
||||||
${PYTHON:-python} -m subunit.run discover -t ./ ./unit_tests $LISTOPT $IDOPTION
|
|
||||||
|
|
||||||
test_id_option=--load-list $IDFILE
|
|
||||||
test_list_option=--list
|
|
202
LICENSE
202
LICENSE
|
@ -1,202 +0,0 @@
|
||||||
|
|
||||||
Apache License
|
|
||||||
Version 2.0, January 2004
|
|
||||||
http://www.apache.org/licenses/
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
|
||||||
|
|
||||||
1. Definitions.
|
|
||||||
|
|
||||||
"License" shall mean the terms and conditions for use, reproduction,
|
|
||||||
and distribution as defined by Sections 1 through 9 of this document.
|
|
||||||
|
|
||||||
"Licensor" shall mean the copyright owner or entity authorized by
|
|
||||||
the copyright owner that is granting the License.
|
|
||||||
|
|
||||||
"Legal Entity" shall mean the union of the acting entity and all
|
|
||||||
other entities that control, are controlled by, or are under common
|
|
||||||
control with that entity. For the purposes of this definition,
|
|
||||||
"control" means (i) the power, direct or indirect, to cause the
|
|
||||||
direction or management of such entity, whether by contract or
|
|
||||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
|
||||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
|
||||||
|
|
||||||
"You" (or "Your") shall mean an individual or Legal Entity
|
|
||||||
exercising permissions granted by this License.
|
|
||||||
|
|
||||||
"Source" form shall mean the preferred form for making modifications,
|
|
||||||
including but not limited to software source code, documentation
|
|
||||||
source, and configuration files.
|
|
||||||
|
|
||||||
"Object" form shall mean any form resulting from mechanical
|
|
||||||
transformation or translation of a Source form, including but
|
|
||||||
not limited to compiled object code, generated documentation,
|
|
||||||
and conversions to other media types.
|
|
||||||
|
|
||||||
"Work" shall mean the work of authorship, whether in Source or
|
|
||||||
Object form, made available under the License, as indicated by a
|
|
||||||
copyright notice that is included in or attached to the work
|
|
||||||
(an example is provided in the Appendix below).
|
|
||||||
|
|
||||||
"Derivative Works" shall mean any work, whether in Source or Object
|
|
||||||
form, that is based on (or derived from) the Work and for which the
|
|
||||||
editorial revisions, annotations, elaborations, or other modifications
|
|
||||||
represent, as a whole, an original work of authorship. For the purposes
|
|
||||||
of this License, Derivative Works shall not include works that remain
|
|
||||||
separable from, or merely link (or bind by name) to the interfaces of,
|
|
||||||
the Work and Derivative Works thereof.
|
|
||||||
|
|
||||||
"Contribution" shall mean any work of authorship, including
|
|
||||||
the original version of the Work and any modifications or additions
|
|
||||||
to that Work or Derivative Works thereof, that is intentionally
|
|
||||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
|
||||||
or by an individual or Legal Entity authorized to submit on behalf of
|
|
||||||
the copyright owner. For the purposes of this definition, "submitted"
|
|
||||||
means any form of electronic, verbal, or written communication sent
|
|
||||||
to the Licensor or its representatives, including but not limited to
|
|
||||||
communication on electronic mailing lists, source code control systems,
|
|
||||||
and issue tracking systems that are managed by, or on behalf of, the
|
|
||||||
Licensor for the purpose of discussing and improving the Work, but
|
|
||||||
excluding communication that is conspicuously marked or otherwise
|
|
||||||
designated in writing by the copyright owner as "Not a Contribution."
|
|
||||||
|
|
||||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
|
||||||
on behalf of whom a Contribution has been received by Licensor and
|
|
||||||
subsequently incorporated within the Work.
|
|
||||||
|
|
||||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
copyright license to reproduce, prepare Derivative Works of,
|
|
||||||
publicly display, publicly perform, sublicense, and distribute the
|
|
||||||
Work and such Derivative Works in Source or Object form.
|
|
||||||
|
|
||||||
3. Grant of Patent License. Subject to the terms and conditions of
|
|
||||||
this License, each Contributor hereby grants to You a perpetual,
|
|
||||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
|
||||||
(except as stated in this section) patent license to make, have made,
|
|
||||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
|
||||||
where such license applies only to those patent claims licensable
|
|
||||||
by such Contributor that are necessarily infringed by their
|
|
||||||
Contribution(s) alone or by combination of their Contribution(s)
|
|
||||||
with the Work to which such Contribution(s) was submitted. If You
|
|
||||||
institute patent litigation against any entity (including a
|
|
||||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
|
||||||
or a Contribution incorporated within the Work constitutes direct
|
|
||||||
or contributory patent infringement, then any patent licenses
|
|
||||||
granted to You under this License for that Work shall terminate
|
|
||||||
as of the date such litigation is filed.
|
|
||||||
|
|
||||||
4. Redistribution. You may reproduce and distribute copies of the
|
|
||||||
Work or Derivative Works thereof in any medium, with or without
|
|
||||||
modifications, and in Source or Object form, provided that You
|
|
||||||
meet the following conditions:
|
|
||||||
|
|
||||||
(a) You must give any other recipients of the Work or
|
|
||||||
Derivative Works a copy of this License; and
|
|
||||||
|
|
||||||
(b) You must cause any modified files to carry prominent notices
|
|
||||||
stating that You changed the files; and
|
|
||||||
|
|
||||||
(c) You must retain, in the Source form of any Derivative Works
|
|
||||||
that You distribute, all copyright, patent, trademark, and
|
|
||||||
attribution notices from the Source form of the Work,
|
|
||||||
excluding those notices that do not pertain to any part of
|
|
||||||
the Derivative Works; and
|
|
||||||
|
|
||||||
(d) If the Work includes a "NOTICE" text file as part of its
|
|
||||||
distribution, then any Derivative Works that You distribute must
|
|
||||||
include a readable copy of the attribution notices contained
|
|
||||||
within such NOTICE file, excluding those notices that do not
|
|
||||||
pertain to any part of the Derivative Works, in at least one
|
|
||||||
of the following places: within a NOTICE text file distributed
|
|
||||||
as part of the Derivative Works; within the Source form or
|
|
||||||
documentation, if provided along with the Derivative Works; or,
|
|
||||||
within a display generated by the Derivative Works, if and
|
|
||||||
wherever such third-party notices normally appear. The contents
|
|
||||||
of the NOTICE file are for informational purposes only and
|
|
||||||
do not modify the License. You may add Your own attribution
|
|
||||||
notices within Derivative Works that You distribute, alongside
|
|
||||||
or as an addendum to the NOTICE text from the Work, provided
|
|
||||||
that such additional attribution notices cannot be construed
|
|
||||||
as modifying the License.
|
|
||||||
|
|
||||||
You may add Your own copyright statement to Your modifications and
|
|
||||||
may provide additional or different license terms and conditions
|
|
||||||
for use, reproduction, or distribution of Your modifications, or
|
|
||||||
for any such Derivative Works as a whole, provided Your use,
|
|
||||||
reproduction, and distribution of the Work otherwise complies with
|
|
||||||
the conditions stated in this License.
|
|
||||||
|
|
||||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
|
||||||
any Contribution intentionally submitted for inclusion in the Work
|
|
||||||
by You to the Licensor shall be under the terms and conditions of
|
|
||||||
this License, without any additional terms or conditions.
|
|
||||||
Notwithstanding the above, nothing herein shall supersede or modify
|
|
||||||
the terms of any separate license agreement you may have executed
|
|
||||||
with Licensor regarding such Contributions.
|
|
||||||
|
|
||||||
6. Trademarks. This License does not grant permission to use the trade
|
|
||||||
names, trademarks, service marks, or product names of the Licensor,
|
|
||||||
except as required for reasonable and customary use in describing the
|
|
||||||
origin of the Work and reproducing the content of the NOTICE file.
|
|
||||||
|
|
||||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
|
||||||
agreed to in writing, Licensor provides the Work (and each
|
|
||||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
||||||
implied, including, without limitation, any warranties or conditions
|
|
||||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
|
||||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
|
||||||
appropriateness of using or redistributing the Work and assume any
|
|
||||||
risks associated with Your exercise of permissions under this License.
|
|
||||||
|
|
||||||
8. Limitation of Liability. In no event and under no legal theory,
|
|
||||||
whether in tort (including negligence), contract, or otherwise,
|
|
||||||
unless required by applicable law (such as deliberate and grossly
|
|
||||||
negligent acts) or agreed to in writing, shall any Contributor be
|
|
||||||
liable to You for damages, including any direct, indirect, special,
|
|
||||||
incidental, or consequential damages of any character arising as a
|
|
||||||
result of this License or out of the use or inability to use the
|
|
||||||
Work (including but not limited to damages for loss of goodwill,
|
|
||||||
work stoppage, computer failure or malfunction, or any and all
|
|
||||||
other commercial damages or losses), even if such Contributor
|
|
||||||
has been advised of the possibility of such damages.
|
|
||||||
|
|
||||||
9. Accepting Warranty or Additional Liability. While redistributing
|
|
||||||
the Work or Derivative Works thereof, You may choose to offer,
|
|
||||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
|
||||||
or other liability obligations and/or rights consistent with this
|
|
||||||
License. However, in accepting such obligations, You may act only
|
|
||||||
on Your own behalf and on Your sole responsibility, not on behalf
|
|
||||||
of any other Contributor, and only if You agree to indemnify,
|
|
||||||
defend, and hold each Contributor harmless for any liability
|
|
||||||
incurred by, or claims asserted against, such Contributor by reason
|
|
||||||
of your accepting any such warranty or additional liability.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
APPENDIX: How to apply the Apache License to your work.
|
|
||||||
|
|
||||||
To apply the Apache License to your work, attach the following
|
|
||||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
|
||||||
replaced with your own identifying information. (Don't include
|
|
||||||
the brackets!) The text should be enclosed in the appropriate
|
|
||||||
comment syntax for the file format. We also recommend that a
|
|
||||||
file or class name and description of purpose be included on the
|
|
||||||
same "printed page" as the copyright notice for easier
|
|
||||||
identification within third-party archives.
|
|
||||||
|
|
||||||
Copyright [yyyy] [name of copyright owner]
|
|
||||||
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
you may not use this file except in compliance with the License.
|
|
||||||
You may obtain a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
See the License for the specific language governing permissions and
|
|
||||||
limitations under the License.
|
|
35
Makefile
35
Makefile
|
@ -1,35 +0,0 @@
|
||||||
#!/usr/bin/make
|
|
||||||
PYTHON := /usr/bin/env python
|
|
||||||
|
|
||||||
lint:
|
|
||||||
@tox -e pep8
|
|
||||||
|
|
||||||
test:
|
|
||||||
@echo Starting unit tests...
|
|
||||||
@tox -e py27
|
|
||||||
|
|
||||||
functional_test:
|
|
||||||
@echo Starting Amulet tests...
|
|
||||||
@tox -e func27
|
|
||||||
|
|
||||||
bin/charm_helpers_sync.py:
|
|
||||||
@mkdir -p bin
|
|
||||||
@curl -o bin/charm_helpers_sync.py https://raw.githubusercontent.com/juju/charm-helpers/master/tools/charm_helpers_sync/charm_helpers_sync.py
|
|
||||||
|
|
||||||
|
|
||||||
bin/git_sync.py:
|
|
||||||
@mkdir -p bin
|
|
||||||
@wget -O bin/git_sync.py https://raw.githubusercontent.com/CanonicalLtd/git-sync/master/git_sync.py
|
|
||||||
|
|
||||||
ch-sync: bin/charm_helpers_sync.py
|
|
||||||
$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
|
|
||||||
$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
|
|
||||||
|
|
||||||
ceph-sync: bin/git_sync.py
|
|
||||||
$(PYTHON) bin/git_sync.py -d lib -s https://github.com/openstack/charms.ceph.git
|
|
||||||
|
|
||||||
sync: ch-sync
|
|
||||||
|
|
||||||
publish: lint test
|
|
||||||
bzr push lp:charms/ceph
|
|
||||||
bzr push lp:charms/trusty/ceph
|
|
|
@ -0,0 +1,6 @@
|
||||||
|
This project is no longer maintained.
|
||||||
|
|
||||||
|
The contents of this repository are still available in the Git
|
||||||
|
source code management system. To see the contents of this
|
||||||
|
repository before it reached its end of life, please check out the
|
||||||
|
previous commit with "git checkout HEAD^1".
|
143
README.md
143
README.md
|
@ -1,143 +0,0 @@
|
||||||
# Overview
|
|
||||||
|
|
||||||
---
|
|
||||||
**NOTE**
|
|
||||||
|
|
||||||
This charm is deprecated and will not receive updates past February 2018.
|
|
||||||
|
|
||||||
Existing users should refer to [Appendix A](https://docs.openstack.org/project-deploy-guide/charm-deployment-guide/latest/app-ceph-migration.html).
|
|
||||||
of the Charm Deployment Guide for details of how to migration existing
|
|
||||||
deployments to the preferred ceph-mon and ceph-osd charms.
|
|
||||||
|
|
||||||
---
|
|
||||||
|
|
||||||
Ceph is a distributed storage and network file system designed to provide
|
|
||||||
excellent performance, reliability, and scalability.
|
|
||||||
|
|
||||||
This charm deploys a Ceph cluster.
|
|
||||||
|
|
||||||
# Usage
|
|
||||||
|
|
||||||
The ceph charm has two pieces of mandatory configuration for which no defaults
|
|
||||||
are provided. You _must_ set these configuration options before deployment or the charm will not work:
|
|
||||||
|
|
||||||
fsid:
|
|
||||||
uuid specific to a ceph cluster used to ensure that different
|
|
||||||
clusters don't get mixed up - use `uuid` to generate one.
|
|
||||||
|
|
||||||
monitor-secret:
|
|
||||||
a ceph generated key used by the daemons that manage to cluster
|
|
||||||
to control security. You can use the ceph-authtool command to
|
|
||||||
generate one:
|
|
||||||
|
|
||||||
ceph-authtool /dev/stdout --name=mon. --gen-key
|
|
||||||
|
|
||||||
These two pieces of configuration must NOT be changed post bootstrap; attempting
|
|
||||||
to do this will cause a reconfiguration error and new service units will not join
|
|
||||||
the existing ceph cluster.
|
|
||||||
|
|
||||||
The charm also supports the specification of storage devices to be used in the
|
|
||||||
ceph cluster.
|
|
||||||
|
|
||||||
osd-devices:
|
|
||||||
A list of devices that the charm will attempt to detect, initialise and
|
|
||||||
activate as ceph storage.
|
|
||||||
|
|
||||||
This can be a superset of the actual storage devices presented to each
|
|
||||||
service unit and can be changed post ceph bootstrap using `juju set`.
|
|
||||||
|
|
||||||
The full path of each device must be provided, e.g. /dev/vdb.
|
|
||||||
|
|
||||||
For Ceph >= 0.56.6 (Raring or the Grizzly Cloud Archive) use of
|
|
||||||
directories instead of devices is also supported.
|
|
||||||
|
|
||||||
At a minimum you must provide a juju config file during initial deployment
|
|
||||||
with the fsid and monitor-secret options (contents of cepy.yaml below):
|
|
||||||
|
|
||||||
ceph:
|
|
||||||
fsid: ecbb8960-0e21-11e2-b495-83a88f44db01
|
|
||||||
monitor-secret: AQD1P2xQiKglDhAA4NGUF5j38Mhq56qwz+45wg==
|
|
||||||
osd-devices: /dev/vdb /dev/vdc /dev/vdd /dev/vde
|
|
||||||
|
|
||||||
Specifying the osd-devices to use is also a good idea.
|
|
||||||
|
|
||||||
Boot things up by using:
|
|
||||||
|
|
||||||
juju deploy -n 3 --config ceph.yaml ceph
|
|
||||||
|
|
||||||
By default the ceph cluster will not bootstrap until 3 service units have been
|
|
||||||
deployed and started; this is to ensure that a quorum is achieved prior to adding
|
|
||||||
storage devices.
|
|
||||||
|
|
||||||
## Actions
|
|
||||||
|
|
||||||
This charm supports pausing and resuming ceph's health functions on a cluster, for example when doing maintenance on a machine. to pause or resume, call:
|
|
||||||
|
|
||||||
`juju action do --unit ceph/0 pause-health` or `juju action do --unit ceph/0 resume-health`
|
|
||||||
|
|
||||||
## Scale Out Usage
|
|
||||||
|
|
||||||
You can use the Ceph OSD and Ceph Radosgw charms:
|
|
||||||
|
|
||||||
- [Ceph OSD](https://jujucharms.com/ceph-osd)
|
|
||||||
- [Ceph Rados Gateway](https://jujucharms.com/ceph-radosgw)
|
|
||||||
|
|
||||||
## Network Space support
|
|
||||||
|
|
||||||
This charm supports the use of Juju Network Spaces, allowing the charm to be bound to network space configurations managed directly by Juju. This is only supported with Juju 2.0 and above.
|
|
||||||
|
|
||||||
Network traffic can be bound to specific network spaces using the public (front-side) and cluster (back-side) bindings:
|
|
||||||
|
|
||||||
juju deploy ceph --bind "public=data-space cluster=cluster-space"
|
|
||||||
|
|
||||||
alternatively these can also be provided as part of a Juju native bundle configuration:
|
|
||||||
|
|
||||||
ceph:
|
|
||||||
charm: cs:xenial/ceph
|
|
||||||
num_units: 1
|
|
||||||
bindings:
|
|
||||||
public: data-space
|
|
||||||
cluster: cluster-space
|
|
||||||
|
|
||||||
Please refer to the [Ceph Network Reference](http://docs.ceph.com/docs/master/rados/configuration/network-config-ref) for details on how using these options effects network traffic within a Ceph deployment.
|
|
||||||
|
|
||||||
**NOTE:** Spaces must be configured in the underlying provider prior to attempting to use them.
|
|
||||||
|
|
||||||
**NOTE**: Existing deployments using ceph-*-network configuration options will continue to function; these options are preferred over any network space binding provided if set.
|
|
||||||
|
|
||||||
# Contact Information
|
|
||||||
|
|
||||||
## Authors
|
|
||||||
|
|
||||||
- Paul Collins <paul.collins@canonical.com>,
|
|
||||||
- James Page <james.page@ubuntu.com>
|
|
||||||
|
|
||||||
Report bugs on [Launchpad](http://bugs.launchpad.net/charms/+source/ceph/+filebug)
|
|
||||||
|
|
||||||
## Ceph
|
|
||||||
|
|
||||||
- [Ceph website](http://ceph.com)
|
|
||||||
- [Ceph mailing lists](http://ceph.com/resources/mailing-list-irc/)
|
|
||||||
- [Ceph bug tracker](http://tracker.ceph.com/projects/ceph)
|
|
||||||
|
|
||||||
# Technical Footnotes
|
|
||||||
|
|
||||||
This charm uses the new-style Ceph deployment as reverse-engineered from the
|
|
||||||
Chef cookbook at https://github.com/ceph/ceph-cookbooks, although we selected
|
|
||||||
a different strategy to form the monitor cluster. Since we don't know the
|
|
||||||
names *or* addresses of the machines in advance, we use the _relation-joined_
|
|
||||||
hook to wait for all three nodes to come up, and then write their addresses
|
|
||||||
to ceph.conf in the "mon host" parameter. After we initialize the monitor
|
|
||||||
cluster a quorum forms quickly, and OSD bringup proceeds.
|
|
||||||
|
|
||||||
The osds use so-called "OSD hotplugging". **ceph-disk prepare** is used to
|
|
||||||
create the filesystems with a special GPT partition type. *udev* is set up
|
|
||||||
to mount such filesystems and start the osd daemons as their storage becomes
|
|
||||||
visible to the system (or after `udevadm trigger`).
|
|
||||||
|
|
||||||
The Chef cookbook mentioned above performs some extra steps to generate an OSD
|
|
||||||
bootstrapping key and propagate it to the other nodes in the cluster. Since
|
|
||||||
all OSDs run on nodes that also run mon, we don't need this and did not
|
|
||||||
implement it.
|
|
||||||
|
|
||||||
See [the documentation](http://ceph.com/docs/master/dev/mon-bootstrap/) for more information on Ceph monitor cluster deployment strategies and pitfalls.
|
|
6
TODO
6
TODO
|
@ -1,6 +0,0 @@
|
||||||
Ceph Charm
|
|
||||||
==========
|
|
||||||
|
|
||||||
* fix tunables (http://tracker.newdream.net/issues/2210)
|
|
||||||
* more than 192 PGs
|
|
||||||
* fixup data placement in crush to be host not osd driven
|
|
217
actions.yaml
217
actions.yaml
|
@ -1,217 +0,0 @@
|
||||||
pause-health:
|
|
||||||
description: Pause ceph health operations across the entire ceph cluster
|
|
||||||
resume-health:
|
|
||||||
description: Resume ceph health operations across the entire ceph cluster
|
|
||||||
pause:
|
|
||||||
description: |
|
|
||||||
CAUTION - Set the local osd units in the charm to 'out' but does not stop
|
|
||||||
the osds. Unless the osd cluster is set to noout (see below), this removes
|
|
||||||
them from the ceph cluster and forces ceph to migrate the PGs to other OSDs
|
|
||||||
in the cluster. See the following.
|
|
||||||
|
|
||||||
http://docs.ceph.com/docs/master/rados/operations/add-or-rm-osds/#removing-the-osd
|
|
||||||
"Do not let your cluster reach its full ratio when removing an OSD.
|
|
||||||
Removing OSDs could cause the cluster to reach or exceed its full ratio."
|
|
||||||
Also note that for small clusters you may encounter the corner case where
|
|
||||||
some PGs remain stuck in the active+remapped state. Refer to the above link
|
|
||||||
on how to resolve this.
|
|
||||||
|
|
||||||
pause-health unit can be used before pausing the ceph units to stop the
|
|
||||||
cluster rebalancing the data off this unit. pause-health sets 'noout' on
|
|
||||||
the cluster such that it will not try to rebalance the data accross the
|
|
||||||
remaining units.
|
|
||||||
|
|
||||||
It is up to the user of the charm to determine whether pause-health should
|
|
||||||
be used as it depends on whether the osd is being paused for maintenance or
|
|
||||||
to remove it from the cluster completely.
|
|
||||||
resume:
|
|
||||||
description: |
|
|
||||||
Set the local osd units in the charm to 'in'. Note that the pause option
|
|
||||||
does NOT stop the osd processes.
|
|
||||||
create-pool:
|
|
||||||
description: Creates a pool
|
|
||||||
params:
|
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
description: The name of the pool
|
|
||||||
profile-name:
|
|
||||||
type: string
|
|
||||||
description: The crush profile to use for this pool. The ruleset must exist first.
|
|
||||||
pool-type:
|
|
||||||
type: string
|
|
||||||
default: "replicated"
|
|
||||||
enum: [replicated, erasure]
|
|
||||||
description: |
|
|
||||||
The pool type which may either be replicated to recover from lost OSDs by keeping multiple copies of the
|
|
||||||
objects or erasure to get a kind of generalized RAID5 capability.
|
|
||||||
replicas:
|
|
||||||
type: integer
|
|
||||||
default: 3
|
|
||||||
description: |
|
|
||||||
For the replicated pool this is the number of replicas to store of each object.
|
|
||||||
erasure-profile-name:
|
|
||||||
type: string
|
|
||||||
default: default
|
|
||||||
description: |
|
|
||||||
The name of the erasure coding profile to use for this pool. Note this profile must exist
|
|
||||||
before calling create-pool
|
|
||||||
required: [name]
|
|
||||||
additionalProperties: false
|
|
||||||
create-erasure-profile:
|
|
||||||
description: Create a new erasure code profile to use on a pool.
|
|
||||||
params:
|
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
description: The name of the profile
|
|
||||||
failure-domain:
|
|
||||||
type: string
|
|
||||||
default: host
|
|
||||||
enum: [chassis, datacenter, host, osd, pdu, pod, rack, region, room, root, row]
|
|
||||||
description: |
|
|
||||||
The failure-domain=host will create a CRUSH ruleset that ensures no two chunks are stored in the same host.
|
|
||||||
plugin:
|
|
||||||
type: string
|
|
||||||
default: "jerasure"
|
|
||||||
enum: [jerasure, isa, lrc, shec]
|
|
||||||
description: |
|
|
||||||
The erasure plugin to use for this profile.
|
|
||||||
See http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ for more details
|
|
||||||
data-chunks:
|
|
||||||
type: integer
|
|
||||||
default: 3
|
|
||||||
description: |
|
|
||||||
The number of data chunks, i.e. the number of chunks in which the original object is divided. For instance
|
|
||||||
if K = 2 a 10KB object will be divided into K objects of 5KB each.
|
|
||||||
coding-chunks:
|
|
||||||
type: integer
|
|
||||||
default: 2
|
|
||||||
description: |
|
|
||||||
The number of coding chunks, i.e. the number of additional chunks computed by the encoding functions.
|
|
||||||
If there are 2 coding chunks, it means 2 OSDs can be out without losing data.
|
|
||||||
locality-chunks:
|
|
||||||
type: integer
|
|
||||||
description: |
|
|
||||||
Group the coding and data chunks into sets of size locality. For instance, for k=4 and m=2, when locality=3
|
|
||||||
two groups of three are created. Each set can be recovered without reading chunks from another set.
|
|
||||||
durability-estimator:
|
|
||||||
type: integer
|
|
||||||
description: |
|
|
||||||
The number of parity chunks each of which includes each data chunk in its calculation range. The number is used
|
|
||||||
as a durability estimator. For instance, if c=2, 2 OSDs can be down without losing data.
|
|
||||||
required: [name, data-chunks, coding-chunks]
|
|
||||||
additionalProperties: false
|
|
||||||
get-erasure-profile:
|
|
||||||
description: Display an erasure code profile.
|
|
||||||
params:
|
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
description: The name of the profile
|
|
||||||
required: [name]
|
|
||||||
additionalProperties: false
|
|
||||||
delete-erasure-profile:
|
|
||||||
description: Deletes an erasure code profile.
|
|
||||||
params:
|
|
||||||
name:
|
|
||||||
type: string
|
|
||||||
description: The name of the profile
|
|
||||||
required: [name]
|
|
||||||
additionalProperties: false
|
|
||||||
list-erasure-profiles:
|
|
||||||
description: List the names of all erasure code profiles
|
|
||||||
additionalProperties: false
|
|
||||||
list-pools:
|
|
||||||
description: List your cluster’s pools
|
|
||||||
additionalProperties: false
|
|
||||||
set-pool-max-bytes:
|
|
||||||
description: Set pool quotas for the maximum number of bytes.
|
|
||||||
params:
|
|
||||||
max:
|
|
||||||
type: integer
|
|
||||||
description: The name of the pool
|
|
||||||
pool-name:
|
|
||||||
type: string
|
|
||||||
description: The name of the pool
|
|
||||||
required: [pool-name, max]
|
|
||||||
additionalProperties: false
|
|
||||||
delete-pool:
|
|
||||||
description: Deletes the named pool
|
|
||||||
params:
|
|
||||||
pool-name:
|
|
||||||
type: string
|
|
||||||
description: The name of the pool
|
|
||||||
required: [pool-name]
|
|
||||||
additionalProperties: false
|
|
||||||
rename-pool:
|
|
||||||
description: Rename a pool
|
|
||||||
params:
|
|
||||||
pool-name:
|
|
||||||
type: string
|
|
||||||
description: The name of the pool
|
|
||||||
new-name:
|
|
||||||
type: string
|
|
||||||
description: The new name of the pool
|
|
||||||
required: [pool-name, new-name]
|
|
||||||
additionalProperties: false
|
|
||||||
pool-statistics:
|
|
||||||
description: Show a pool’s utilization statistics
|
|
||||||
additionalProperties: false
|
|
||||||
snapshot-pool:
|
|
||||||
description: Snapshot a pool
|
|
||||||
params:
|
|
||||||
pool-name:
|
|
||||||
type: string
|
|
||||||
description: The name of the pool
|
|
||||||
snapshot-name:
|
|
||||||
type: string
|
|
||||||
description: The name of the snapshot
|
|
||||||
required: [snapshot-name, pool-name]
|
|
||||||
additionalProperties: false
|
|
||||||
remove-pool-snapshot:
|
|
||||||
description: Remove a pool snapshot
|
|
||||||
params:
|
|
||||||
pool-name:
|
|
||||||
type: string
|
|
||||||
description: The name of the pool
|
|
||||||
snapshot-name:
|
|
||||||
type: string
|
|
||||||
description: The name of the snapshot
|
|
||||||
required: [snapshot-name, pool-name]
|
|
||||||
additionalProperties: false
|
|
||||||
pool-set:
|
|
||||||
description: Set a value for the pool
|
|
||||||
params:
|
|
||||||
pool-name:
|
|
||||||
type: string
|
|
||||||
description: The pool to set this variable on.
|
|
||||||
key:
|
|
||||||
type: string
|
|
||||||
description: Any valid Ceph key from http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
|
|
||||||
value:
|
|
||||||
type: string
|
|
||||||
description: The value to set
|
|
||||||
required: [key, value, pool-name]
|
|
||||||
additionalProperties: false
|
|
||||||
pool-get:
|
|
||||||
description: Get a value for the pool
|
|
||||||
params:
|
|
||||||
pool-name:
|
|
||||||
type: string
|
|
||||||
description: The pool to get this variable from.
|
|
||||||
key:
|
|
||||||
type: string
|
|
||||||
description: Any valid Ceph key from http://docs.ceph.com/docs/master/rados/operations/pools/#get-pool-values
|
|
||||||
required: [key, pool-name]
|
|
||||||
additionalProperties: false
|
|
||||||
list-disks:
|
|
||||||
description: List the unmounted disk on the specified unit
|
|
||||||
add-disk:
|
|
||||||
description: Add disk(s) to Ceph
|
|
||||||
params:
|
|
||||||
osd-devices:
|
|
||||||
type: string
|
|
||||||
description: The devices to format and set up as osd volumes.
|
|
||||||
bucket:
|
|
||||||
type: string
|
|
||||||
description: The name of the bucket in Ceph to add these devices into
|
|
||||||
required:
|
|
||||||
- osd-devices
|
|
|
@ -1,16 +0,0 @@
|
||||||
# Copyright 2016 Canonical Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import sys
|
|
||||||
sys.path.append('hooks')
|
|
|
@ -1 +0,0 @@
|
||||||
add_disk.py
|
|
|
@ -1,78 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
#
|
|
||||||
# Copyright 2016 Canonical Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import psutil
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.append('lib')
|
|
||||||
sys.path.append('hooks')
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config,
|
|
||||||
action_get,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.contrib.storage.linux.ceph import (
|
|
||||||
CephBrokerRq,
|
|
||||||
send_request_if_needed,
|
|
||||||
)
|
|
||||||
|
|
||||||
import ceph.utils as ceph
|
|
||||||
|
|
||||||
from ceph_hooks import (
|
|
||||||
get_journal_devices,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def add_device(request, device_path, bucket=None):
|
|
||||||
ceph.osdize(dev, config('osd-format'),
|
|
||||||
get_journal_devices(), config('osd-reformat'),
|
|
||||||
config('ignore-device-errors'),
|
|
||||||
config('osd-encrypt'),
|
|
||||||
config('bluestore'))
|
|
||||||
# Make it fast!
|
|
||||||
if config('autotune'):
|
|
||||||
ceph.tune_dev(dev)
|
|
||||||
mounts = filter(lambda disk: device_path
|
|
||||||
in disk.device, psutil.disk_partitions())
|
|
||||||
if mounts:
|
|
||||||
osd = mounts[0]
|
|
||||||
osd_id = osd.mountpoint.split('/')[-1].split('-')[-1]
|
|
||||||
request.ops.append({
|
|
||||||
'op': 'move-osd-to-bucket',
|
|
||||||
'osd': "osd.{}".format(osd_id),
|
|
||||||
'bucket': bucket})
|
|
||||||
return request
|
|
||||||
|
|
||||||
|
|
||||||
def get_devices():
|
|
||||||
devices = []
|
|
||||||
for path in action_get('osd-devices').split(' '):
|
|
||||||
path = path.strip()
|
|
||||||
if os.path.isabs(path):
|
|
||||||
devices.append(path)
|
|
||||||
|
|
||||||
return devices
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
request = CephBrokerRq()
|
|
||||||
for dev in get_devices():
|
|
||||||
request = add_device(request=request,
|
|
||||||
device_path=dev,
|
|
||||||
bucket=action_get("bucket"))
|
|
||||||
send_request_if_needed(request, relation='mon')
|
|
|
@ -1,116 +0,0 @@
|
||||||
# Copyright 2016 Canonical Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from subprocess import CalledProcessError, check_output
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.append('hooks')
|
|
||||||
|
|
||||||
import rados
|
|
||||||
from charmhelpers.core.hookenv import log, action_get, action_fail
|
|
||||||
from charmhelpers.contrib.storage.linux.ceph import pool_set, \
|
|
||||||
set_pool_quota, snapshot_pool, remove_pool_snapshot
|
|
||||||
|
|
||||||
|
|
||||||
# Connect to Ceph via Librados and return a connection
|
|
||||||
def connect():
|
|
||||||
try:
|
|
||||||
cluster = rados.Rados(conffile='/etc/ceph/ceph.conf')
|
|
||||||
cluster.connect()
|
|
||||||
return cluster
|
|
||||||
except (rados.IOError,
|
|
||||||
rados.ObjectNotFound,
|
|
||||||
rados.NoData,
|
|
||||||
rados.NoSpace,
|
|
||||||
rados.PermissionError) as rados_error:
|
|
||||||
log("librados failed with error: {}".format(str(rados_error)))
|
|
||||||
|
|
||||||
|
|
||||||
def create_crush_rule():
|
|
||||||
# Shell out
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def list_pools():
|
|
||||||
try:
|
|
||||||
cluster = connect()
|
|
||||||
pool_list = cluster.list_pools()
|
|
||||||
cluster.shutdown()
|
|
||||||
return pool_list
|
|
||||||
except (rados.IOError,
|
|
||||||
rados.ObjectNotFound,
|
|
||||||
rados.NoData,
|
|
||||||
rados.NoSpace,
|
|
||||||
rados.PermissionError) as e:
|
|
||||||
action_fail(e.message)
|
|
||||||
|
|
||||||
|
|
||||||
def pool_get():
|
|
||||||
key = action_get("key")
|
|
||||||
pool_name = action_get("pool_name")
|
|
||||||
try:
|
|
||||||
value = check_output(['ceph', 'osd', 'pool', 'get', pool_name, key])
|
|
||||||
return value
|
|
||||||
except CalledProcessError as e:
|
|
||||||
action_fail(e.message)
|
|
||||||
|
|
||||||
|
|
||||||
def set_pool():
|
|
||||||
key = action_get("key")
|
|
||||||
value = action_get("value")
|
|
||||||
pool_name = action_get("pool_name")
|
|
||||||
pool_set(service='ceph', pool_name=pool_name, key=key, value=value)
|
|
||||||
|
|
||||||
|
|
||||||
def pool_stats():
|
|
||||||
try:
|
|
||||||
pool_name = action_get("pool-name")
|
|
||||||
cluster = connect()
|
|
||||||
ioctx = cluster.open_ioctx(pool_name)
|
|
||||||
stats = ioctx.get_stats()
|
|
||||||
ioctx.close()
|
|
||||||
cluster.shutdown()
|
|
||||||
return stats
|
|
||||||
except (rados.Error,
|
|
||||||
rados.IOError,
|
|
||||||
rados.ObjectNotFound,
|
|
||||||
rados.NoData,
|
|
||||||
rados.NoSpace,
|
|
||||||
rados.PermissionError) as e:
|
|
||||||
action_fail(e.message)
|
|
||||||
|
|
||||||
|
|
||||||
def delete_pool_snapshot():
|
|
||||||
pool_name = action_get("pool-name")
|
|
||||||
snapshot_name = action_get("snapshot-name")
|
|
||||||
remove_pool_snapshot(service='ceph',
|
|
||||||
pool_name=pool_name,
|
|
||||||
snapshot_name=snapshot_name)
|
|
||||||
|
|
||||||
|
|
||||||
# Note only one or the other can be set
|
|
||||||
def set_pool_max_bytes():
|
|
||||||
pool_name = action_get("pool-name")
|
|
||||||
max_bytes = action_get("max")
|
|
||||||
set_pool_quota(service='ceph',
|
|
||||||
pool_name=pool_name,
|
|
||||||
max_bytes=max_bytes)
|
|
||||||
|
|
||||||
|
|
||||||
def snapshot_ceph_pool():
|
|
||||||
pool_name = action_get("pool-name")
|
|
||||||
snapshot_name = action_get("snapshot-name")
|
|
||||||
snapshot_pool(service='ceph',
|
|
||||||
pool_name=pool_name,
|
|
||||||
snapshot_name=snapshot_name)
|
|
|
@ -1,89 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
from subprocess import CalledProcessError
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.append('hooks')
|
|
||||||
|
|
||||||
from charmhelpers.contrib.storage.linux.ceph import create_erasure_profile
|
|
||||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
|
||||||
|
|
||||||
|
|
||||||
def make_erasure_profile():
|
|
||||||
name = action_get("name")
|
|
||||||
plugin = action_get("plugin")
|
|
||||||
failure_domain = action_get("failure-domain")
|
|
||||||
|
|
||||||
# jerasure requires k+m
|
|
||||||
# isa requires k+m
|
|
||||||
# local requires k+m+l
|
|
||||||
# shec requires k+m+c
|
|
||||||
|
|
||||||
if plugin == "jerasure":
|
|
||||||
k = action_get("data-chunks")
|
|
||||||
m = action_get("coding-chunks")
|
|
||||||
try:
|
|
||||||
create_erasure_profile(service='admin',
|
|
||||||
erasure_plugin_name=plugin,
|
|
||||||
profile_name=name,
|
|
||||||
data_chunks=k,
|
|
||||||
coding_chunks=m,
|
|
||||||
failure_domain=failure_domain)
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log(e)
|
|
||||||
action_fail("Create erasure profile failed with "
|
|
||||||
"message: {}".format(e.message))
|
|
||||||
elif plugin == "isa":
|
|
||||||
k = action_get("data-chunks")
|
|
||||||
m = action_get("coding-chunks")
|
|
||||||
try:
|
|
||||||
create_erasure_profile(service='admin',
|
|
||||||
erasure_plugin_name=plugin,
|
|
||||||
profile_name=name,
|
|
||||||
data_chunks=k,
|
|
||||||
coding_chunks=m,
|
|
||||||
failure_domain=failure_domain)
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log(e)
|
|
||||||
action_fail("Create erasure profile failed with "
|
|
||||||
"message: {}".format(e.message))
|
|
||||||
elif plugin == "local":
|
|
||||||
k = action_get("data-chunks")
|
|
||||||
m = action_get("coding-chunks")
|
|
||||||
l = action_get("locality-chunks")
|
|
||||||
try:
|
|
||||||
create_erasure_profile(service='admin',
|
|
||||||
erasure_plugin_name=plugin,
|
|
||||||
profile_name=name,
|
|
||||||
data_chunks=k,
|
|
||||||
coding_chunks=m,
|
|
||||||
locality=l,
|
|
||||||
failure_domain=failure_domain)
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log(e)
|
|
||||||
action_fail("Create erasure profile failed with "
|
|
||||||
"message: {}".format(e.message))
|
|
||||||
elif plugin == "shec":
|
|
||||||
k = action_get("data-chunks")
|
|
||||||
m = action_get("coding-chunks")
|
|
||||||
c = action_get("durability-estimator")
|
|
||||||
try:
|
|
||||||
create_erasure_profile(service='admin',
|
|
||||||
erasure_plugin_name=plugin,
|
|
||||||
profile_name=name,
|
|
||||||
data_chunks=k,
|
|
||||||
coding_chunks=m,
|
|
||||||
durability_estimator=c,
|
|
||||||
failure_domain=failure_domain)
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log(e)
|
|
||||||
action_fail("Create erasure profile failed with "
|
|
||||||
"message: {}".format(e.message))
|
|
||||||
else:
|
|
||||||
# Unknown erasure plugin
|
|
||||||
action_fail("Unknown erasure-plugin type of {}. "
|
|
||||||
"Only jerasure, isa, local or shec is "
|
|
||||||
"allowed".format(plugin))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
make_erasure_profile()
|
|
|
@ -1,38 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.append('hooks')
|
|
||||||
from subprocess import CalledProcessError
|
|
||||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
|
||||||
from charmhelpers.contrib.storage.linux.ceph import ErasurePool, ReplicatedPool
|
|
||||||
|
|
||||||
|
|
||||||
def create_pool():
|
|
||||||
pool_name = action_get("name")
|
|
||||||
pool_type = action_get("pool-type")
|
|
||||||
try:
|
|
||||||
if pool_type == "replicated":
|
|
||||||
replicas = action_get("replicas")
|
|
||||||
replicated_pool = ReplicatedPool(name=pool_name,
|
|
||||||
service='admin',
|
|
||||||
replicas=replicas)
|
|
||||||
replicated_pool.create()
|
|
||||||
|
|
||||||
elif pool_type == "erasure":
|
|
||||||
crush_profile_name = action_get("erasure-profile-name")
|
|
||||||
erasure_pool = ErasurePool(name=pool_name,
|
|
||||||
erasure_code_profile=crush_profile_name,
|
|
||||||
service='admin')
|
|
||||||
erasure_pool.create()
|
|
||||||
else:
|
|
||||||
log("Unknown pool type of {}. Only erasure or replicated is "
|
|
||||||
"allowed".format(pool_type))
|
|
||||||
action_fail("Unknown pool type of {}. Only erasure or replicated "
|
|
||||||
"is allowed".format(pool_type))
|
|
||||||
except CalledProcessError as e:
|
|
||||||
action_fail("Pool creation failed because of a failed process. "
|
|
||||||
"Ret Code: {} Message: {}".format(e.returncode, e.message))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
create_pool()
|
|
|
@ -1,24 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
from subprocess import CalledProcessError
|
|
||||||
|
|
||||||
__author__ = 'chris'
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.append('hooks')
|
|
||||||
|
|
||||||
from charmhelpers.contrib.storage.linux.ceph import remove_erasure_profile
|
|
||||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
|
||||||
|
|
||||||
|
|
||||||
def delete_erasure_profile():
|
|
||||||
name = action_get("name")
|
|
||||||
|
|
||||||
try:
|
|
||||||
remove_erasure_profile(service='admin', profile_name=name)
|
|
||||||
except CalledProcessError as e:
|
|
||||||
action_fail("Remove erasure profile failed with error: {}".format(
|
|
||||||
e.message))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
delete_erasure_profile()
|
|
|
@ -1,28 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.append('hooks')
|
|
||||||
|
|
||||||
import rados
|
|
||||||
from ceph_ops import connect
|
|
||||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
|
||||||
|
|
||||||
|
|
||||||
def remove_pool():
|
|
||||||
try:
|
|
||||||
pool_name = action_get("name")
|
|
||||||
cluster = connect()
|
|
||||||
log("Deleting pool: {}".format(pool_name))
|
|
||||||
cluster.delete_pool(str(pool_name)) # Convert from unicode
|
|
||||||
cluster.shutdown()
|
|
||||||
except (rados.IOError,
|
|
||||||
rados.ObjectNotFound,
|
|
||||||
rados.NoData,
|
|
||||||
rados.NoSpace,
|
|
||||||
rados.PermissionError) as e:
|
|
||||||
log(e)
|
|
||||||
action_fail(e)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
remove_pool()
|
|
|
@ -1,18 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
__author__ = 'chris'
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.append('hooks')
|
|
||||||
|
|
||||||
from charmhelpers.contrib.storage.linux.ceph import get_erasure_profile
|
|
||||||
from charmhelpers.core.hookenv import action_get, action_set
|
|
||||||
|
|
||||||
|
|
||||||
def make_erasure_profile():
|
|
||||||
name = action_get("name")
|
|
||||||
out = get_erasure_profile(service='admin', name=name)
|
|
||||||
action_set({'message': out})
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
make_erasure_profile()
|
|
|
@ -1 +0,0 @@
|
||||||
list_disks.py
|
|
|
@ -1,22 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
__author__ = 'chris'
|
|
||||||
import sys
|
|
||||||
from subprocess import check_output, CalledProcessError
|
|
||||||
|
|
||||||
sys.path.append('hooks')
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import action_get, log, action_set, action_fail
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
name = action_get("name")
|
|
||||||
try:
|
|
||||||
out = check_output(['ceph',
|
|
||||||
'--id', 'admin',
|
|
||||||
'osd',
|
|
||||||
'erasure-code-profile',
|
|
||||||
'ls']).decode('UTF-8')
|
|
||||||
action_set({'message': out})
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log(e)
|
|
||||||
action_fail("Listing erasure profiles failed with error: {}".format(
|
|
||||||
e.message))
|
|
|
@ -1,17 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
__author__ = 'chris'
|
|
||||||
import sys
|
|
||||||
from subprocess import check_output, CalledProcessError
|
|
||||||
|
|
||||||
sys.path.append('hooks')
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import log, action_set, action_fail
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
try:
|
|
||||||
out = check_output(['ceph', '--id', 'admin',
|
|
||||||
'osd', 'lspools']).decode('UTF-8')
|
|
||||||
action_set({'message': out})
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log(e)
|
|
||||||
action_fail("List pools failed with error: {}".format(e.message))
|
|
|
@ -1,50 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
#
|
|
||||||
# Copyright 2016 Canonical Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
List unmounted devices.
|
|
||||||
|
|
||||||
This script will get all block devices known by udev and check if they
|
|
||||||
are mounted so that we can give unmounted devices to the administrator.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import pyudev
|
|
||||||
import sys
|
|
||||||
|
|
||||||
|
|
||||||
sys.path.append('hooks/')
|
|
||||||
|
|
||||||
from charmhelpers.contrib.storage.linux.utils import is_device_mounted
|
|
||||||
from charmhelpers.core.hookenv import log, action_set
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
disks = []
|
|
||||||
context = pyudev.Context()
|
|
||||||
for device in context.list_devices(DEVTYPE='disk'):
|
|
||||||
if device['SUBSYSTEM'] == 'block':
|
|
||||||
matched = False
|
|
||||||
for block_type in [u'dm', u'loop', u'ram', u'nbd']:
|
|
||||||
if block_type in device.device_node:
|
|
||||||
matched = True
|
|
||||||
if matched:
|
|
||||||
continue
|
|
||||||
disks.append(device.device_node)
|
|
||||||
log("Found disks: {}".format(disks))
|
|
||||||
unmounted_disks = [disk for disk in disks if not is_device_mounted(disk)]
|
|
||||||
|
|
||||||
action_set({
|
|
||||||
'disks': unmounted_disks})
|
|
|
@ -1 +0,0 @@
|
||||||
pause_resume.py
|
|
|
@ -1,6 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
ceph osd set nodown
|
|
||||||
ceph osd set noout
|
|
|
@ -1,96 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
#
|
|
||||||
# Copyright 2016 Canonical Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
#
|
|
||||||
# pause/resume actions file.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
from subprocess import check_call
|
|
||||||
|
|
||||||
sys.path.append('hooks')
|
|
||||||
sys.path.append('lib')
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
action_fail,
|
|
||||||
)
|
|
||||||
|
|
||||||
from ceph.utils import get_local_osd_ids
|
|
||||||
from ceph_hooks import assess_status
|
|
||||||
|
|
||||||
from utils import (
|
|
||||||
set_unit_paused,
|
|
||||||
clear_unit_paused,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def pause(args):
|
|
||||||
"""Pause the ceph-osd units on the local machine only.
|
|
||||||
|
|
||||||
Optionally uses the 'osd-number' from juju action param to only pause a
|
|
||||||
specific osd. If all the osds are not stopped then the paused status is
|
|
||||||
not set.
|
|
||||||
|
|
||||||
@raises CalledProcessError if the ceph commands fails.
|
|
||||||
@raises OSError if it can't get the local osd ids.
|
|
||||||
"""
|
|
||||||
for local_id in get_local_osd_ids():
|
|
||||||
cmd = [
|
|
||||||
'ceph',
|
|
||||||
'--id', 'osd-upgrade',
|
|
||||||
'osd', 'out', str(local_id)]
|
|
||||||
check_call(cmd)
|
|
||||||
set_unit_paused()
|
|
||||||
assess_status()
|
|
||||||
|
|
||||||
|
|
||||||
def resume(args):
|
|
||||||
"""Resume the ceph-osd units on this local machine only
|
|
||||||
|
|
||||||
@raises subprocess.CalledProcessError should the osd units fails to resume.
|
|
||||||
@raises OSError if the unit can't get the local osd ids
|
|
||||||
"""
|
|
||||||
for local_id in get_local_osd_ids():
|
|
||||||
cmd = [
|
|
||||||
'ceph',
|
|
||||||
'--id', 'osd-upgrade',
|
|
||||||
'osd', 'in', str(local_id)]
|
|
||||||
check_call(cmd)
|
|
||||||
clear_unit_paused()
|
|
||||||
assess_status()
|
|
||||||
|
|
||||||
|
|
||||||
# A dictionary of all the defined actions to callables (which take
|
|
||||||
# parsed arguments).
|
|
||||||
ACTIONS = {"pause": pause, "resume": resume}
|
|
||||||
|
|
||||||
|
|
||||||
def main(args):
|
|
||||||
action_name = os.path.basename(args[0])
|
|
||||||
try:
|
|
||||||
action = ACTIONS[action_name]
|
|
||||||
except KeyError:
|
|
||||||
s = "Action {} undefined".format(action_name)
|
|
||||||
action_fail(s)
|
|
||||||
return s
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
action(args)
|
|
||||||
except Exception as e:
|
|
||||||
action_fail("Action {} failed: {}".format(action_name, str(e)))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
sys.exit(main(sys.argv))
|
|
|
@ -1,19 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
__author__ = 'chris'
|
|
||||||
import sys
|
|
||||||
from subprocess import check_output, CalledProcessError
|
|
||||||
|
|
||||||
sys.path.append('hooks')
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import log, action_set, action_get, action_fail
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
name = action_get('pool-name')
|
|
||||||
key = action_get('key')
|
|
||||||
try:
|
|
||||||
out = check_output(['ceph', '--id', 'admin',
|
|
||||||
'osd', 'pool', 'get', name, key]).decode('UTF-8')
|
|
||||||
action_set({'message': out})
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log(e)
|
|
||||||
action_fail("Pool get failed with message: {}".format(e.message))
|
|
|
@ -1,23 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
from subprocess import CalledProcessError
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.append('hooks')
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
|
||||||
from ceph_broker import handle_set_pool_value
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
name = action_get("pool-name")
|
|
||||||
key = action_get("key")
|
|
||||||
value = action_get("value")
|
|
||||||
request = {'name': name,
|
|
||||||
'key': key,
|
|
||||||
'value': value}
|
|
||||||
|
|
||||||
try:
|
|
||||||
handle_set_pool_value(service='admin', request=request)
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log(e.message)
|
|
||||||
action_fail("Setting pool key: {} and value: {} failed with "
|
|
||||||
"message: {}".format(key, value, e.message))
|
|
|
@ -1,15 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.append('hooks')
|
|
||||||
from subprocess import check_output, CalledProcessError
|
|
||||||
from charmhelpers.core.hookenv import log, action_set, action_fail
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
try:
|
|
||||||
out = check_output(['ceph', '--id', 'admin',
|
|
||||||
'df']).decode('UTF-8')
|
|
||||||
action_set({'message': out})
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log(e)
|
|
||||||
action_fail("ceph df failed with message: {}".format(e.message))
|
|
|
@ -1,19 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.append('hooks')
|
|
||||||
from subprocess import CalledProcessError
|
|
||||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
|
||||||
from charmhelpers.contrib.storage.linux.ceph import remove_pool_snapshot
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
name = action_get("pool-name")
|
|
||||||
snapname = action_get("snapshot-name")
|
|
||||||
try:
|
|
||||||
remove_pool_snapshot(service='admin',
|
|
||||||
pool_name=name,
|
|
||||||
snapshot_name=snapname)
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log(e)
|
|
||||||
action_fail("Remove pool snapshot failed with message: {}".format(
|
|
||||||
e.message))
|
|
|
@ -1,16 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.append('hooks')
|
|
||||||
from subprocess import CalledProcessError
|
|
||||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
|
||||||
from charmhelpers.contrib.storage.linux.ceph import rename_pool
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
name = action_get("pool-name")
|
|
||||||
new_name = action_get("new-name")
|
|
||||||
try:
|
|
||||||
rename_pool(service='admin', old_name=name, new_name=new_name)
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log(e)
|
|
||||||
action_fail("Renaming pool failed with message: {}".format(e.message))
|
|
|
@ -1 +0,0 @@
|
||||||
pause_resume.py
|
|
|
@ -1,6 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -eux
|
|
||||||
|
|
||||||
ceph osd unset nodown
|
|
||||||
ceph osd unset noout
|
|
|
@ -1,16 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.append('hooks')
|
|
||||||
from subprocess import CalledProcessError
|
|
||||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
|
||||||
from charmhelpers.contrib.storage.linux.ceph import set_pool_quota
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
max_bytes = action_get("max")
|
|
||||||
name = action_get("pool-name")
|
|
||||||
try:
|
|
||||||
set_pool_quota(service='admin', pool_name=name, max_bytes=max_bytes)
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log(e)
|
|
||||||
action_fail("Set pool quota failed with message: {}".format(e.message))
|
|
|
@ -1,18 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
import sys
|
|
||||||
|
|
||||||
sys.path.append('hooks')
|
|
||||||
from subprocess import CalledProcessError
|
|
||||||
from charmhelpers.core.hookenv import action_get, log, action_fail
|
|
||||||
from charmhelpers.contrib.storage.linux.ceph import snapshot_pool
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
name = action_get("pool-name")
|
|
||||||
snapname = action_get("snapshot-name")
|
|
||||||
try:
|
|
||||||
snapshot_pool(service='admin',
|
|
||||||
pool_name=name,
|
|
||||||
snapshot_name=snapname)
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log(e)
|
|
||||||
action_fail("Snapshot pool failed with message: {}".format(e.message))
|
|
|
@ -1,18 +0,0 @@
|
||||||
repo: https://github.com/juju/charm-helpers
|
|
||||||
destination: hooks/charmhelpers
|
|
||||||
include:
|
|
||||||
- core
|
|
||||||
- osplatform
|
|
||||||
- cli
|
|
||||||
- fetch
|
|
||||||
- contrib.python.packages
|
|
||||||
- contrib.storage.linux
|
|
||||||
- payload.execd
|
|
||||||
- contrib.openstack.alternatives
|
|
||||||
- contrib.network.ip
|
|
||||||
- contrib.openstack:
|
|
||||||
- alternatives
|
|
||||||
- exceptions
|
|
||||||
- utils
|
|
||||||
- contrib.charmsupport
|
|
||||||
- contrib.hardening|inc=*
|
|
|
@ -1,7 +0,0 @@
|
||||||
repo: https://github.com/juju/charm-helpers
|
|
||||||
destination: tests/charmhelpers
|
|
||||||
include:
|
|
||||||
- contrib.amulet
|
|
||||||
- contrib.openstack.amulet
|
|
||||||
- core
|
|
||||||
- osplatform
|
|
256
config.yaml
256
config.yaml
|
@ -1,256 +0,0 @@
|
||||||
options:
|
|
||||||
loglevel:
|
|
||||||
default: 1
|
|
||||||
type: int
|
|
||||||
description: Mon and OSD debug level. Max is 20.
|
|
||||||
fsid:
|
|
||||||
type: string
|
|
||||||
default:
|
|
||||||
description: |
|
|
||||||
fsid of the ceph cluster. To generate a suitable value use `uuid`
|
|
||||||
.
|
|
||||||
This configuration element is mandatory and the service will fail on
|
|
||||||
install if it is not provided.
|
|
||||||
config-flags:
|
|
||||||
type: string
|
|
||||||
default:
|
|
||||||
description: |
|
|
||||||
User provided Ceph configuration. Supports a string representation of
|
|
||||||
a python dictionary where each top-level key represents a section in
|
|
||||||
the ceph.conf template. You may only use sections supported in the
|
|
||||||
template.
|
|
||||||
.
|
|
||||||
WARNING: this is not the recommended way to configure the underlying
|
|
||||||
services that this charm installs and is used at the user's own risk.
|
|
||||||
This option is mainly provided as a stop-gap for users that either
|
|
||||||
want to test the effect of modifying some config or who have found
|
|
||||||
a critical bug in the way the charm has configured their services
|
|
||||||
and need it fixed immediately. We ask that whenever this is used,
|
|
||||||
that the user consider opening a bug on this charm at
|
|
||||||
http://bugs.launchpad.net/charms providing an explanation of why the
|
|
||||||
config was needed so that we may consider it for inclusion as a
|
|
||||||
natively supported config in the the charm.
|
|
||||||
auth-supported:
|
|
||||||
type: string
|
|
||||||
default: cephx
|
|
||||||
description: |
|
|
||||||
Which authentication flavour to use.
|
|
||||||
.
|
|
||||||
Valid options are "cephx" and "none". If "none" is specified,
|
|
||||||
keys will still be created and deployed so that it can be
|
|
||||||
enabled later.
|
|
||||||
monitor-secret:
|
|
||||||
type: string
|
|
||||||
default:
|
|
||||||
description: |
|
|
||||||
This value will become the mon. key. To generate a suitable value use:
|
|
||||||
.
|
|
||||||
ceph-authtool /dev/stdout --name=mon. --gen-key
|
|
||||||
.
|
|
||||||
This configuration element is mandatory and the service will fail on
|
|
||||||
install if it is not provided.
|
|
||||||
monitor-count:
|
|
||||||
type: int
|
|
||||||
default: 3
|
|
||||||
description: |
|
|
||||||
How many nodes to wait for before trying to create the monitor cluster
|
|
||||||
this number needs to be odd, and more than three is a waste except for
|
|
||||||
very large clusters.
|
|
||||||
osd-devices:
|
|
||||||
type: string
|
|
||||||
default: /dev/vdb
|
|
||||||
description: |
|
|
||||||
The devices to format and set up as osd volumes.
|
|
||||||
.
|
|
||||||
These devices are the range of devices that will be checked for and
|
|
||||||
used across all service units, in addition to any volumes attached
|
|
||||||
via the --storage flag during deployment.
|
|
||||||
.
|
|
||||||
For ceph >= 0.56.6 these can also be directories instead of devices - the
|
|
||||||
charm assumes anything not starting with /dev is a directory instead.
|
|
||||||
osd-journal:
|
|
||||||
type: string
|
|
||||||
default:
|
|
||||||
description: |
|
|
||||||
The device to use as a shared journal drive for all OSD's. By default
|
|
||||||
no journal device will be used.
|
|
||||||
.
|
|
||||||
Only supported with ceph >= 0.48.3.
|
|
||||||
osd-journal-size:
|
|
||||||
type: int
|
|
||||||
default: 1024
|
|
||||||
description: |
|
|
||||||
Ceph osd journal size. The journal size should be at least twice the
|
|
||||||
product of the expected drive speed multiplied by filestore max sync
|
|
||||||
interval. However, the most common practice is to partition the journal
|
|
||||||
drive (often an SSD), and mount it such that Ceph uses the entire
|
|
||||||
partition for the journal.
|
|
||||||
.
|
|
||||||
Only supported with ceph >= 0.48.3.
|
|
||||||
osd-format:
|
|
||||||
type: string
|
|
||||||
default: xfs
|
|
||||||
description: |
|
|
||||||
Format of filesystem to use for OSD devices; supported formats include:
|
|
||||||
.
|
|
||||||
xfs (Default >= 0.48.3)
|
|
||||||
ext4 (Only option < 0.48.3)
|
|
||||||
btrfs (experimental and not recommended)
|
|
||||||
.
|
|
||||||
Only supported with ceph >= 0.48.3.
|
|
||||||
bluestore:
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
description: |
|
|
||||||
Use experimental bluestore storage format for OSD devices; only supported
|
|
||||||
in Ceph Jewel (10.2.0) or later.
|
|
||||||
.
|
|
||||||
Note that despite bluestore being the default for Ceph Luminous, if this
|
|
||||||
option is False, OSDs will still use filestore.
|
|
||||||
osd-reformat:
|
|
||||||
type: string
|
|
||||||
default:
|
|
||||||
description: |
|
|
||||||
By default, the charm will not re-format a device that already looks
|
|
||||||
as if it might be an OSD device. This is a safeguard to try to
|
|
||||||
prevent data loss.
|
|
||||||
.
|
|
||||||
Specifying this option (any value) forces a reformat of any OSD devices
|
|
||||||
found which are not already mounted.
|
|
||||||
ignore-device-errors:
|
|
||||||
type: boolean
|
|
||||||
default: False
|
|
||||||
description: |
|
|
||||||
By default, the charm will raise errors if a whitelisted device is found,
|
|
||||||
but for some reason the charm is unable to initialize the device for use
|
|
||||||
by Ceph.
|
|
||||||
.
|
|
||||||
Setting this option to 'True' will result in the charm classifying such
|
|
||||||
problems as warnings only and will not result in a hook error.
|
|
||||||
ephemeral-unmount:
|
|
||||||
type: string
|
|
||||||
default:
|
|
||||||
description: |
|
|
||||||
Cloud instances provider ephermeral storage which is normally mounted
|
|
||||||
on /mnt.
|
|
||||||
.
|
|
||||||
Providing this option will force an unmount of the ephemeral device
|
|
||||||
so that it can be used as a OSD storage device. This is useful for
|
|
||||||
testing purposes (cloud deployment is not a typical use case).
|
|
||||||
source:
|
|
||||||
type: string
|
|
||||||
default:
|
|
||||||
description: |
|
|
||||||
Optional configuration to support use of additional sources such as:
|
|
||||||
|
|
||||||
- ppa:myteam/ppa
|
|
||||||
- cloud:trusty-proposed/kilo
|
|
||||||
- http://my.archive.com/ubuntu main
|
|
||||||
|
|
||||||
The last option should be used in conjunction with the key configuration
|
|
||||||
option.
|
|
||||||
|
|
||||||
Note that a minimum ceph version of 0.48.2 is required for use with this
|
|
||||||
charm which is NOT provided by the packages in the main Ubuntu archive
|
|
||||||
for precise but is provided in the Ubuntu cloud archive.
|
|
||||||
key:
|
|
||||||
type: string
|
|
||||||
default:
|
|
||||||
description: |
|
|
||||||
Key ID to import to the apt keyring to support use with arbitary source
|
|
||||||
configuration from outside of Launchpad archives or PPA's.
|
|
||||||
use-syslog:
|
|
||||||
type: boolean
|
|
||||||
default: False
|
|
||||||
description: |
|
|
||||||
If set to True, supporting services will log to syslog.
|
|
||||||
ceph-public-network:
|
|
||||||
type: string
|
|
||||||
default:
|
|
||||||
description: |
|
|
||||||
The IP address and netmask of the public (front-side) network (e.g.,
|
|
||||||
192.168.0.0/24)
|
|
||||||
.
|
|
||||||
If multiple networks are to be used, a space-delimited list of a.b.c.d/x
|
|
||||||
can be provided.
|
|
||||||
ceph-cluster-network:
|
|
||||||
type: string
|
|
||||||
default:
|
|
||||||
description: |
|
|
||||||
The IP address and netmask of the cluster (back-side) network (e.g.,
|
|
||||||
192.168.0.0/24)
|
|
||||||
.
|
|
||||||
If multiple networks are to be used, a space-delimited list of a.b.c.d/x
|
|
||||||
can be provided.
|
|
||||||
prefer-ipv6:
|
|
||||||
type: boolean
|
|
||||||
default: False
|
|
||||||
description: |
|
|
||||||
If True enables IPv6 support. The charm will expect network interfaces
|
|
||||||
to be configured with an IPv6 address. If set to False (default) IPv4
|
|
||||||
is expected.
|
|
||||||
|
|
||||||
NOTE: these charms do not currently support IPv6 privacy extension. In
|
|
||||||
order for this charm to function correctly, the privacy extension must be
|
|
||||||
disabled and a non-temporary address must be configured/available on
|
|
||||||
your network interface.
|
|
||||||
sysctl:
|
|
||||||
type: string
|
|
||||||
default: '{ kernel.pid_max : 2097152, vm.max_map_count : 524288,
|
|
||||||
kernel.threads-max: 2097152 }'
|
|
||||||
description: |
|
|
||||||
YAML-formatted associative array of sysctl key/value pairs to be set
|
|
||||||
persistently. By default we set pid_max, max_map_count and
|
|
||||||
threads-max to a high value to avoid problems with large numbers (>20)
|
|
||||||
of OSDs recovering. very large clusters should set those values even
|
|
||||||
higher (e.g. max for kernel.pid_max is 4194303).
|
|
||||||
customize-failure-domain:
|
|
||||||
type: boolean
|
|
||||||
default: false
|
|
||||||
description: |
|
|
||||||
Setting this to true will tell Ceph to replicate across Juju's
|
|
||||||
Availability Zone instead of specifically by host.
|
|
||||||
availability_zone:
|
|
||||||
type: string
|
|
||||||
default:
|
|
||||||
description: |
|
|
||||||
Custom availablility zone to provide to Ceph for the OSD placement
|
|
||||||
nagios_context:
|
|
||||||
type: string
|
|
||||||
default: "juju"
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Used by the nrpe-external-master subordinate charm.
|
|
||||||
A string that will be prepended to instance name to set the host name
|
|
||||||
in nagios. So for instance the hostname would be something like:
|
|
||||||
juju-myservice-0
|
|
||||||
If you're running multiple environments with the same services in them
|
|
||||||
this allows you to differentiate between them.
|
|
||||||
nagios_servicegroups:
|
|
||||||
default: ""
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
A comma-separated list of nagios servicegroups.
|
|
||||||
If left empty, the nagios_context will be used as the servicegroup
|
|
||||||
use-direct-io:
|
|
||||||
default: True
|
|
||||||
type: boolean
|
|
||||||
description: Configure use of direct IO for OSD journals.
|
|
||||||
harden:
|
|
||||||
default:
|
|
||||||
type: string
|
|
||||||
description: |
|
|
||||||
Apply system hardening. Supports a space-delimited list of modules
|
|
||||||
to run. Supported modules currently include os, ssh, apache and mysql.
|
|
||||||
default-rbd-features:
|
|
||||||
default:
|
|
||||||
type: int
|
|
||||||
description: |
|
|
||||||
Restrict the rbd features used to the specified level. If set, this will
|
|
||||||
inform clients that they should set the config value `rbd default
|
|
||||||
features`, for example:
|
|
||||||
|
|
||||||
rbd default features = 1
|
|
||||||
|
|
||||||
This needs to be set to 1 when deploying a cloud with the nova-lxd
|
|
||||||
hypervisor.
|
|
16
copyright
16
copyright
|
@ -1,16 +0,0 @@
|
||||||
Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0
|
|
||||||
|
|
||||||
Files: *
|
|
||||||
Copyright: 2012, Canonical Ltd.
|
|
||||||
License: Apache-2.0
|
|
||||||
Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
not use this file except in compliance with the License. You may obtain
|
|
||||||
a copy of the License at
|
|
||||||
|
|
||||||
http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
|
|
||||||
Unless required by applicable law or agreed to in writing, software
|
|
||||||
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
License for the specific language governing permissions and limitations
|
|
||||||
under the License.
|
|
|
@ -1,44 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
# Copyright (C) 2014 Canonical
|
|
||||||
# All Rights Reserved
|
|
||||||
# Author: Jacek Nykis <jacek.nykis@canonical.com>
|
|
||||||
|
|
||||||
import re
|
|
||||||
import argparse
|
|
||||||
import subprocess
|
|
||||||
import nagios_plugin
|
|
||||||
|
|
||||||
|
|
||||||
def check_ceph_status(args):
|
|
||||||
if args.status_file:
|
|
||||||
nagios_plugin.check_file_freshness(args.status_file, 3600)
|
|
||||||
with open(args.status_file, "r") as f:
|
|
||||||
lines = f.readlines()
|
|
||||||
status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1)
|
|
||||||
else:
|
|
||||||
lines = subprocess.check_output(["ceph", "status"]).split('\n')
|
|
||||||
status_data = dict(l.strip().split(' ', 1) for l in lines if len(l) > 1)
|
|
||||||
|
|
||||||
if ('health' not in status_data
|
|
||||||
or 'monmap' not in status_data
|
|
||||||
or 'osdmap'not in status_data):
|
|
||||||
raise nagios_plugin.UnknownError('UNKNOWN: status data is incomplete')
|
|
||||||
|
|
||||||
if status_data['health'] != 'HEALTH_OK':
|
|
||||||
msg = 'CRITICAL: ceph health status: "{}"'.format(status_data['health'])
|
|
||||||
raise nagios_plugin.CriticalError(msg)
|
|
||||||
osds = re.search("^.*: (\d+) osds: (\d+) up, (\d+) in", status_data['osdmap'])
|
|
||||||
if osds.group(1) > osds.group(2): # not all OSDs are "up"
|
|
||||||
msg = 'CRITICAL: Some OSDs are not up. Total: {}, up: {}'.format(
|
|
||||||
osds.group(1), osds.group(2))
|
|
||||||
raise nagios_plugin.CriticalError(msg)
|
|
||||||
print "All OK"
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
parser = argparse.ArgumentParser(description='Check ceph status')
|
|
||||||
parser.add_argument('-f', '--file', dest='status_file',
|
|
||||||
default=False, help='Optional file with "ceph status" output')
|
|
||||||
args = parser.parse_args()
|
|
||||||
nagios_plugin.try_check(check_ceph_status, args)
|
|
|
@ -1,18 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
# Copyright (C) 2014 Canonical
|
|
||||||
# All Rights Reserved
|
|
||||||
# Author: Jacek Nykis <jacek.nykis@canonical.com>
|
|
||||||
|
|
||||||
LOCK=/var/lock/ceph-status.lock
|
|
||||||
lockfile-create -r2 --lock-name $LOCK > /dev/null 2>&1
|
|
||||||
if [ $? -ne 0 ]; then
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
trap "rm -f $LOCK > /dev/null 2>&1" exit
|
|
||||||
|
|
||||||
DATA_DIR="/var/lib/nagios"
|
|
||||||
if [ ! -d $DATA_DIR ]; then
|
|
||||||
mkdir -p $DATA_DIR
|
|
||||||
fi
|
|
||||||
|
|
||||||
ceph status >${DATA_DIR}/cat-ceph-status.txt
|
|
|
@ -1,5 +0,0 @@
|
||||||
# Overrides file for contrib.hardening. See README.hardening in
|
|
||||||
# contrib.hardening for info on how to use this file.
|
|
||||||
ssh:
|
|
||||||
server:
|
|
||||||
use_pam: 'yes' # juju requires this
|
|
|
@ -1,13 +0,0 @@
|
||||||
# Copyright 2016 Canonical Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
|
@ -1 +0,0 @@
|
||||||
ceph_hooks.py
|
|
|
@ -1 +0,0 @@
|
||||||
ceph_hooks.py
|
|
|
@ -1 +0,0 @@
|
||||||
ceph_hooks.py
|
|
|
@ -1 +0,0 @@
|
||||||
ceph_hooks.py
|
|
|
@ -1,697 +0,0 @@
|
||||||
#!/usr/bin/python
|
|
||||||
#
|
|
||||||
# Copyright 2016 Canonical Ltd
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import socket
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
sys.path.append('lib')
|
|
||||||
import ceph.utils as ceph
|
|
||||||
from ceph.broker import (
|
|
||||||
process_requests
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
ERROR,
|
|
||||||
config,
|
|
||||||
relation_ids,
|
|
||||||
related_units,
|
|
||||||
is_relation_made,
|
|
||||||
relation_get,
|
|
||||||
relation_set,
|
|
||||||
remote_unit,
|
|
||||||
Hooks, UnregisteredHookError,
|
|
||||||
service_name,
|
|
||||||
relations_of_type,
|
|
||||||
status_set,
|
|
||||||
storage_get,
|
|
||||||
storage_list,
|
|
||||||
local_unit,
|
|
||||||
application_version_set,
|
|
||||||
)
|
|
||||||
from charmhelpers.core.host import (
|
|
||||||
service_restart,
|
|
||||||
service_pause,
|
|
||||||
umount,
|
|
||||||
mkdir,
|
|
||||||
write_file,
|
|
||||||
rsync,
|
|
||||||
cmp_pkgrevno,
|
|
||||||
add_to_updatedb_prunepath,
|
|
||||||
)
|
|
||||||
from charmhelpers.fetch import (
|
|
||||||
apt_install,
|
|
||||||
apt_update,
|
|
||||||
filter_installed_packages,
|
|
||||||
add_source,
|
|
||||||
get_upstream_version,
|
|
||||||
)
|
|
||||||
from charmhelpers.payload.execd import execd_preinstall
|
|
||||||
from charmhelpers.contrib.openstack.alternatives import (
|
|
||||||
install_alternative,
|
|
||||||
remove_alternative,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.network.ip import (
|
|
||||||
get_ipv6_addr,
|
|
||||||
format_ipv6_addr,
|
|
||||||
)
|
|
||||||
from charmhelpers.core.sysctl import create as create_sysctl
|
|
||||||
from charmhelpers.core.templating import render
|
|
||||||
from charmhelpers.contrib.storage.linux.ceph import (
|
|
||||||
CephConfContext,
|
|
||||||
)
|
|
||||||
from utils import (
|
|
||||||
get_networks,
|
|
||||||
get_public_addr,
|
|
||||||
assert_charm_supports_ipv6,
|
|
||||||
is_unit_paused_set,
|
|
||||||
get_cluster_addr,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.contrib.charmsupport import nrpe
|
|
||||||
from charmhelpers.contrib.hardening.harden import harden
|
|
||||||
|
|
||||||
hooks = Hooks()
|
|
||||||
|
|
||||||
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
|
|
||||||
SCRIPTS_DIR = '/usr/local/bin'
|
|
||||||
STATUS_FILE = '/var/lib/nagios/cat-ceph-status.txt'
|
|
||||||
STATUS_CRONFILE = '/etc/cron.d/cat-ceph-health'
|
|
||||||
STORAGE_MOUNT_PATH = '/var/lib/ceph'
|
|
||||||
|
|
||||||
|
|
||||||
def check_for_upgrade():
|
|
||||||
if not ceph.is_bootstrapped():
|
|
||||||
log("Ceph is not bootstrapped, skipping upgrade checks.")
|
|
||||||
return
|
|
||||||
|
|
||||||
c = hookenv.config()
|
|
||||||
old_version = ceph.resolve_ceph_version(c.previous('source') or
|
|
||||||
'distro')
|
|
||||||
log('old_version: {}'.format(old_version))
|
|
||||||
# Strip all whitespace
|
|
||||||
new_version = ceph.resolve_ceph_version(hookenv.config('source'))
|
|
||||||
log('new_version: {}'.format(new_version))
|
|
||||||
|
|
||||||
if old_version in ceph.UPGRADE_PATHS:
|
|
||||||
if new_version == ceph.UPGRADE_PATHS[old_version]:
|
|
||||||
log("{} to {} is a valid upgrade path. Proceeding.".format(
|
|
||||||
old_version, new_version))
|
|
||||||
ceph.roll_monitor_cluster(new_version=new_version,
|
|
||||||
upgrade_key='admin')
|
|
||||||
# Wait for all monitors to finish.
|
|
||||||
status_set("maintenance", "Waiting on mons to finish upgrading")
|
|
||||||
ceph.wait_for_all_monitors_to_upgrade(new_version=new_version,
|
|
||||||
upgrade_key='admin')
|
|
||||||
ceph.roll_osd_cluster(new_version=new_version,
|
|
||||||
upgrade_key='admin')
|
|
||||||
else:
|
|
||||||
# Log a helpful error message
|
|
||||||
log("Invalid upgrade path from {} to {}. "
|
|
||||||
"Valid paths are: {}".format(
|
|
||||||
old_version,
|
|
||||||
new_version,
|
|
||||||
ceph.pretty_print_upgrade_paths()
|
|
||||||
))
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('install.real')
|
|
||||||
@harden()
|
|
||||||
def install():
|
|
||||||
execd_preinstall()
|
|
||||||
add_source(config('source'), config('key'))
|
|
||||||
apt_update(fatal=True)
|
|
||||||
apt_install(packages=ceph.determine_packages(), fatal=True)
|
|
||||||
|
|
||||||
|
|
||||||
def az_info():
|
|
||||||
az_info = ""
|
|
||||||
config_az = config("availability_zone")
|
|
||||||
juju_az_info = os.environ.get('JUJU_AVAILABILITY_ZONE')
|
|
||||||
if juju_az_info:
|
|
||||||
az_info = "{} rack={}".format(az_info, juju_az_info)
|
|
||||||
if config_az:
|
|
||||||
az_info = "{} row={}".format(az_info, config_az)
|
|
||||||
if az_info != "":
|
|
||||||
log("AZ Info: " + az_info)
|
|
||||||
return az_info
|
|
||||||
|
|
||||||
|
|
||||||
def use_short_objects():
|
|
||||||
'''
|
|
||||||
Determine whether OSD's should be configured with
|
|
||||||
limited object name lengths.
|
|
||||||
|
|
||||||
@return: boolean indicating whether OSD's should be limited
|
|
||||||
'''
|
|
||||||
if cmp_pkgrevno('ceph', "10.2.0") >= 0:
|
|
||||||
if config('osd-format') in ('ext4'):
|
|
||||||
return True
|
|
||||||
for device in config('osd-devices'):
|
|
||||||
if not device.startswith('/dev'):
|
|
||||||
# TODO: determine format of directory based
|
|
||||||
# OSD location
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def get_ceph_context():
|
|
||||||
networks = get_networks('ceph-public-network')
|
|
||||||
public_network = ', '.join(networks)
|
|
||||||
|
|
||||||
networks = get_networks('ceph-cluster-network')
|
|
||||||
cluster_network = ', '.join(networks)
|
|
||||||
|
|
||||||
cephcontext = {
|
|
||||||
'auth_supported': config('auth-supported'),
|
|
||||||
'mon_hosts': ' '.join(get_mon_hosts()),
|
|
||||||
'fsid': config('fsid'),
|
|
||||||
'old_auth': cmp_pkgrevno('ceph', "0.51") < 0,
|
|
||||||
'osd_journal_size': config('osd-journal-size'),
|
|
||||||
'use_syslog': str(config('use-syslog')).lower(),
|
|
||||||
'ceph_public_network': public_network,
|
|
||||||
'ceph_cluster_network': cluster_network,
|
|
||||||
'loglevel': config('loglevel'),
|
|
||||||
'dio': str(config('use-direct-io')).lower(),
|
|
||||||
'short_object_len': use_short_objects(),
|
|
||||||
'bluestore': config('bluestore'),
|
|
||||||
'bluestore_experimental': cmp_pkgrevno('ceph', '12.1.0') < 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
if config('prefer-ipv6'):
|
|
||||||
dynamic_ipv6_address = get_ipv6_addr()[0]
|
|
||||||
if not public_network:
|
|
||||||
cephcontext['public_addr'] = dynamic_ipv6_address
|
|
||||||
if not cluster_network:
|
|
||||||
cephcontext['cluster_addr'] = dynamic_ipv6_address
|
|
||||||
else:
|
|
||||||
cephcontext['public_addr'] = get_public_addr()
|
|
||||||
cephcontext['cluster_addr'] = get_cluster_addr()
|
|
||||||
|
|
||||||
if config('customize-failure-domain'):
|
|
||||||
az = az_info()
|
|
||||||
if az:
|
|
||||||
cephcontext['crush_location'] = "root=default {} host={}" \
|
|
||||||
.format(az, socket.gethostname())
|
|
||||||
else:
|
|
||||||
log(
|
|
||||||
"Your Juju environment doesn't"
|
|
||||||
"have support for Availability Zones"
|
|
||||||
)
|
|
||||||
|
|
||||||
if config('default-rbd-features'):
|
|
||||||
cephcontext['rbd_features'] = config('default-rbd-features')
|
|
||||||
|
|
||||||
# NOTE(dosaboy): these sections must correspond to what is supported in the
|
|
||||||
# config template.
|
|
||||||
sections = ['global', 'mds', 'osd', 'mon']
|
|
||||||
cephcontext.update(CephConfContext(permitted_sections=sections)())
|
|
||||||
return cephcontext
|
|
||||||
|
|
||||||
|
|
||||||
def ceph_conf_path():
|
|
||||||
return "/var/lib/charm/{}/ceph.conf".format(service_name())
|
|
||||||
|
|
||||||
|
|
||||||
def emit_cephconf():
|
|
||||||
# Install ceph.conf as an alternative to support
|
|
||||||
# co-existence with other charms that write this file
|
|
||||||
charm_ceph_conf = ceph_conf_path()
|
|
||||||
mkdir(os.path.dirname(charm_ceph_conf), owner=ceph.ceph_user(),
|
|
||||||
group=ceph.ceph_user())
|
|
||||||
render('ceph.conf', charm_ceph_conf, get_ceph_context(), perms=0o644)
|
|
||||||
install_alternative('ceph.conf', '/etc/ceph/ceph.conf',
|
|
||||||
charm_ceph_conf, 100)
|
|
||||||
|
|
||||||
|
|
||||||
JOURNAL_ZAPPED = '/var/lib/ceph/journal_zapped'
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('config-changed')
|
|
||||||
@harden()
|
|
||||||
def config_changed():
|
|
||||||
if config('prefer-ipv6'):
|
|
||||||
assert_charm_supports_ipv6()
|
|
||||||
|
|
||||||
# Check if an upgrade was requested
|
|
||||||
check_for_upgrade()
|
|
||||||
|
|
||||||
log('Monitor hosts are ' + repr(get_mon_hosts()))
|
|
||||||
|
|
||||||
# Pre-flight checks
|
|
||||||
if not config('fsid'):
|
|
||||||
log('No fsid supplied, cannot proceed.', level=ERROR)
|
|
||||||
sys.exit(1)
|
|
||||||
if not config('monitor-secret'):
|
|
||||||
log('No monitor-secret supplied, cannot proceed.', level=ERROR)
|
|
||||||
sys.exit(1)
|
|
||||||
if config('osd-format') not in ceph.DISK_FORMATS:
|
|
||||||
log('Invalid OSD disk format configuration specified', level=ERROR)
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
sysctl_dict = config('sysctl')
|
|
||||||
if sysctl_dict:
|
|
||||||
create_sysctl(sysctl_dict, '/etc/sysctl.d/50-ceph-charm.conf')
|
|
||||||
|
|
||||||
emit_cephconf()
|
|
||||||
|
|
||||||
e_mountpoint = config('ephemeral-unmount')
|
|
||||||
if e_mountpoint and ceph.filesystem_mounted(e_mountpoint):
|
|
||||||
umount(e_mountpoint)
|
|
||||||
|
|
||||||
osd_journal = get_osd_journal()
|
|
||||||
if (osd_journal and not os.path.exists(JOURNAL_ZAPPED) and
|
|
||||||
os.path.exists(osd_journal)):
|
|
||||||
ceph.zap_disk(osd_journal)
|
|
||||||
with open(JOURNAL_ZAPPED, 'w') as zapped:
|
|
||||||
zapped.write('DONE')
|
|
||||||
|
|
||||||
# Support use of single node ceph
|
|
||||||
if not ceph.is_bootstrapped() and int(config('monitor-count')) == 1:
|
|
||||||
status_set('maintenance', 'Bootstrapping single Ceph MON')
|
|
||||||
ceph.bootstrap_monitor_cluster(config('monitor-secret'))
|
|
||||||
ceph.wait_for_bootstrap()
|
|
||||||
if cmp_pkgrevno('ceph', '12.0.0') >= 0:
|
|
||||||
status_set('maintenance', 'Bootstrapping single Ceph MGR')
|
|
||||||
ceph.bootstrap_manager()
|
|
||||||
|
|
||||||
storage_changed()
|
|
||||||
|
|
||||||
if relations_of_type('nrpe-external-master'):
|
|
||||||
update_nrpe_config()
|
|
||||||
add_to_updatedb_prunepath(STORAGE_MOUNT_PATH)
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('osd-devices-storage-attached', 'osd-devices-storage-detaching')
|
|
||||||
def storage_changed():
|
|
||||||
if ceph.is_bootstrapped():
|
|
||||||
for dev in get_devices():
|
|
||||||
ceph.osdize(dev, config('osd-format'), get_osd_journal(),
|
|
||||||
reformat_osd(), config('ignore-device-errors'),
|
|
||||||
bluestore=config('bluestore'))
|
|
||||||
ceph.start_osds(get_devices())
|
|
||||||
|
|
||||||
|
|
||||||
def get_osd_journal():
|
|
||||||
'''
|
|
||||||
Returns the block device path to use for the OSD journal, if any.
|
|
||||||
|
|
||||||
If there is an osd-journal storage instance attached, it will be
|
|
||||||
used as the journal. Otherwise, the osd-journal configuration will
|
|
||||||
be returned.
|
|
||||||
'''
|
|
||||||
storage_ids = storage_list('osd-journal')
|
|
||||||
if storage_ids:
|
|
||||||
# There can be at most one osd-journal storage instance.
|
|
||||||
return storage_get('location', storage_ids[0])
|
|
||||||
return config('osd-journal')
|
|
||||||
|
|
||||||
|
|
||||||
def get_mon_hosts():
|
|
||||||
hosts = []
|
|
||||||
addr = get_public_addr()
|
|
||||||
hosts.append('{}:6789'.format(format_ipv6_addr(addr) or addr))
|
|
||||||
|
|
||||||
for relid in relation_ids('mon'):
|
|
||||||
for unit in related_units(relid):
|
|
||||||
addr = relation_get('ceph-public-address', unit, relid)
|
|
||||||
if addr is not None:
|
|
||||||
hosts.append('{}:6789'.format(
|
|
||||||
format_ipv6_addr(addr) or addr))
|
|
||||||
|
|
||||||
hosts.sort()
|
|
||||||
return hosts
|
|
||||||
|
|
||||||
|
|
||||||
def get_peer_units():
|
|
||||||
"""
|
|
||||||
Returns a dictionary of unit names from the mon peer relation with
|
|
||||||
a flag indicating whether the unit has presented its address
|
|
||||||
"""
|
|
||||||
units = {}
|
|
||||||
units[local_unit()] = True
|
|
||||||
for relid in relation_ids('mon'):
|
|
||||||
for unit in related_units(relid):
|
|
||||||
addr = relation_get('ceph-public-address', unit, relid)
|
|
||||||
units[unit] = addr is not None
|
|
||||||
return units
|
|
||||||
|
|
||||||
|
|
||||||
def reformat_osd():
|
|
||||||
if config('osd-reformat'):
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def get_devices():
|
|
||||||
devices = []
|
|
||||||
|
|
||||||
if config('osd-devices'):
|
|
||||||
for path in config('osd-devices').split(' '):
|
|
||||||
path = path.strip()
|
|
||||||
# Make sure its a device which is specified using an
|
|
||||||
# absolute path so that the current working directory
|
|
||||||
# or any relative path under this directory is not used
|
|
||||||
if os.path.isabs(path):
|
|
||||||
devices.append(os.path.realpath(path))
|
|
||||||
|
|
||||||
# List storage instances for the 'osd-devices'
|
|
||||||
# store declared for this charm too, and add
|
|
||||||
# their block device paths to the list.
|
|
||||||
storage_ids = storage_list('osd-devices')
|
|
||||||
devices.extend((storage_get('location', s) for s in storage_ids))
|
|
||||||
return devices
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('mon-relation-joined')
|
|
||||||
def mon_relation_joined():
|
|
||||||
public_addr = get_public_addr()
|
|
||||||
for relid in relation_ids('mon'):
|
|
||||||
relation_set(relation_id=relid,
|
|
||||||
relation_settings={'ceph-public-address': public_addr})
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('mon-relation-departed',
|
|
||||||
'mon-relation-changed')
|
|
||||||
def mon_relation():
|
|
||||||
emit_cephconf()
|
|
||||||
|
|
||||||
moncount = int(config('monitor-count'))
|
|
||||||
if len(get_mon_hosts()) >= moncount:
|
|
||||||
status_set('maintenance', 'Bootstrapping MON cluster')
|
|
||||||
ceph.bootstrap_monitor_cluster(config('monitor-secret'))
|
|
||||||
ceph.wait_for_bootstrap()
|
|
||||||
if cmp_pkgrevno('ceph', '12.0.0') >= 0:
|
|
||||||
status_set('maintenance', 'Bootstrapping Ceph MGR')
|
|
||||||
ceph.bootstrap_manager()
|
|
||||||
for dev in get_devices():
|
|
||||||
ceph.osdize(dev, config('osd-format'), get_osd_journal(),
|
|
||||||
reformat_osd(), config('ignore-device-errors'),
|
|
||||||
bluestore=config('bluestore'))
|
|
||||||
ceph.start_osds(get_devices())
|
|
||||||
ceph.wait_for_quorum()
|
|
||||||
notify_osds()
|
|
||||||
notify_radosgws()
|
|
||||||
notify_client()
|
|
||||||
else:
|
|
||||||
log('Not enough mons ({}), punting.'
|
|
||||||
.format(len(get_mon_hosts())))
|
|
||||||
|
|
||||||
|
|
||||||
def notify_osds():
|
|
||||||
for relid in relation_ids('osd'):
|
|
||||||
osd_relation(relid)
|
|
||||||
|
|
||||||
|
|
||||||
def notify_radosgws():
|
|
||||||
for relid in relation_ids('radosgw'):
|
|
||||||
for unit in related_units(relid):
|
|
||||||
radosgw_relation(relid=relid, unit=unit)
|
|
||||||
|
|
||||||
|
|
||||||
def notify_client():
|
|
||||||
for relid in relation_ids('client'):
|
|
||||||
client_relation_joined(relid)
|
|
||||||
for unit in related_units(relid):
|
|
||||||
client_relation_changed(relid, unit)
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('osd-relation-changed')
|
|
||||||
@hooks.hook('osd-relation-joined')
|
|
||||||
def osd_relation(relid=None):
|
|
||||||
if ceph.is_quorum():
|
|
||||||
log('mon cluster in quorum - providing fsid & keys')
|
|
||||||
public_addr = get_public_addr()
|
|
||||||
data = {
|
|
||||||
'fsid': config('fsid'),
|
|
||||||
'osd_bootstrap_key': ceph.get_osd_bootstrap_key(),
|
|
||||||
'auth': config('auth-supported'),
|
|
||||||
'ceph-public-address': public_addr,
|
|
||||||
'osd_upgrade_key': ceph.get_named_key('osd-upgrade',
|
|
||||||
caps=ceph.osd_upgrade_caps),
|
|
||||||
}
|
|
||||||
|
|
||||||
unit = remote_unit()
|
|
||||||
settings = relation_get(rid=relid, unit=unit)
|
|
||||||
"""Process broker request(s)."""
|
|
||||||
if 'broker_req' in settings:
|
|
||||||
if ceph.is_leader():
|
|
||||||
rsp = process_requests(settings['broker_req'])
|
|
||||||
unit_id = unit.replace('/', '-')
|
|
||||||
unit_response_key = 'broker-rsp-' + unit_id
|
|
||||||
data[unit_response_key] = rsp
|
|
||||||
else:
|
|
||||||
log("Not leader - ignoring broker request", level=DEBUG)
|
|
||||||
|
|
||||||
relation_set(relation_id=relid,
|
|
||||||
relation_settings=data)
|
|
||||||
else:
|
|
||||||
log('mon cluster not in quorum - deferring fsid provision')
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('radosgw-relation-changed')
|
|
||||||
@hooks.hook('radosgw-relation-joined')
|
|
||||||
def radosgw_relation(relid=None, unit=None):
|
|
||||||
# Install radosgw for admin tools
|
|
||||||
apt_install(packages=filter_installed_packages(['radosgw']))
|
|
||||||
if not unit:
|
|
||||||
unit = remote_unit()
|
|
||||||
|
|
||||||
if ceph.is_quorum():
|
|
||||||
log('mon cluster in quorum - providing radosgw with keys')
|
|
||||||
public_addr = get_public_addr()
|
|
||||||
data = {
|
|
||||||
'fsid': config('fsid'),
|
|
||||||
'radosgw_key': ceph.get_radosgw_key(),
|
|
||||||
'auth': config('auth-supported'),
|
|
||||||
'ceph-public-address': public_addr,
|
|
||||||
}
|
|
||||||
|
|
||||||
settings = relation_get(rid=relid, unit=unit)
|
|
||||||
"""Process broker request(s)."""
|
|
||||||
if 'broker_req' in settings:
|
|
||||||
if ceph.is_leader():
|
|
||||||
rsp = process_requests(settings['broker_req'])
|
|
||||||
unit_id = unit.replace('/', '-')
|
|
||||||
unit_response_key = 'broker-rsp-' + unit_id
|
|
||||||
data[unit_response_key] = rsp
|
|
||||||
else:
|
|
||||||
log("Not leader - ignoring broker request", level=DEBUG)
|
|
||||||
|
|
||||||
relation_set(relation_id=relid, relation_settings=data)
|
|
||||||
else:
|
|
||||||
log('mon cluster not in quorum - deferring key provision')
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('client-relation-joined')
|
|
||||||
def client_relation_joined(relid=None):
|
|
||||||
if ceph.is_quorum():
|
|
||||||
log('mon cluster in quorum - providing client with keys')
|
|
||||||
service_name = None
|
|
||||||
if relid is None:
|
|
||||||
units = [remote_unit()]
|
|
||||||
service_name = units[0].split('/')[0]
|
|
||||||
else:
|
|
||||||
units = related_units(relid)
|
|
||||||
if len(units) > 0:
|
|
||||||
service_name = units[0].split('/')[0]
|
|
||||||
|
|
||||||
if service_name is not None:
|
|
||||||
public_addr = get_public_addr()
|
|
||||||
data = {'key': ceph.get_named_key(service_name),
|
|
||||||
'auth': config('auth-supported'),
|
|
||||||
'ceph-public-address': public_addr}
|
|
||||||
if config('default-rbd-features'):
|
|
||||||
data['rbd-features'] = config('default-rbd-features')
|
|
||||||
relation_set(relation_id=relid,
|
|
||||||
relation_settings=data)
|
|
||||||
else:
|
|
||||||
log('mon cluster not in quorum - deferring key provision')
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('client-relation-changed')
|
|
||||||
def client_relation_changed(relid=None, unit=None):
|
|
||||||
"""Process broker requests from ceph client relations."""
|
|
||||||
if ceph.is_quorum():
|
|
||||||
if not unit:
|
|
||||||
unit = remote_unit()
|
|
||||||
settings = relation_get(rid=relid, unit=unit)
|
|
||||||
if 'broker_req' in settings:
|
|
||||||
if not ceph.is_leader():
|
|
||||||
log("Not leader - ignoring broker request", level=DEBUG)
|
|
||||||
else:
|
|
||||||
rsp = process_requests(settings['broker_req'])
|
|
||||||
unit_id = remote_unit().replace('/', '-')
|
|
||||||
unit_response_key = 'broker-rsp-' + unit_id
|
|
||||||
# broker_rsp is being left for backward compatibility,
|
|
||||||
# unit_response_key superscedes it
|
|
||||||
data = {
|
|
||||||
'broker_rsp': rsp,
|
|
||||||
unit_response_key: rsp,
|
|
||||||
}
|
|
||||||
relation_set(relation_id=relid,
|
|
||||||
relation_settings=data)
|
|
||||||
else:
|
|
||||||
log('mon cluster not in quorum', level=DEBUG)
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('bootstrap-source-relation-joined')
|
|
||||||
def bootstrap_source_joined(relid=None):
|
|
||||||
"""Provide required information to bootstrap ceph-mon cluster"""
|
|
||||||
if ceph.is_quorum():
|
|
||||||
source = {
|
|
||||||
'fsid': config('fsid'),
|
|
||||||
'monitor-secret': config('monitor-secret'),
|
|
||||||
'ceph-public-address': get_public_addr(),
|
|
||||||
}
|
|
||||||
relation_set(relation_id=relid,
|
|
||||||
relation_settings=source)
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('upgrade-charm.real')
|
|
||||||
@harden()
|
|
||||||
def upgrade_charm():
|
|
||||||
emit_cephconf()
|
|
||||||
apt_install(packages=filter_installed_packages(ceph.determine_packages()),
|
|
||||||
fatal=True)
|
|
||||||
ceph.update_monfs()
|
|
||||||
mon_relation_joined()
|
|
||||||
if is_relation_made("nrpe-external-master"):
|
|
||||||
update_nrpe_config()
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('start')
|
|
||||||
def start():
|
|
||||||
# In case we're being redeployed to the same machines, try
|
|
||||||
# to make sure everything is running as soon as possible.
|
|
||||||
if ceph.systemd():
|
|
||||||
service_restart('ceph-mon')
|
|
||||||
else:
|
|
||||||
service_restart('ceph-mon-all')
|
|
||||||
if cmp_pkgrevno('ceph', '12.0.0') >= 0:
|
|
||||||
service_restart('ceph-mgr@{}'.format(socket.gethostname()))
|
|
||||||
if ceph.is_bootstrapped():
|
|
||||||
ceph.start_osds(get_devices())
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('nrpe-external-master-relation-joined')
|
|
||||||
@hooks.hook('nrpe-external-master-relation-changed')
|
|
||||||
def update_nrpe_config():
|
|
||||||
# python-dbus is used by check_upstart_job
|
|
||||||
# lockfile-create is used by collect_ceph_status
|
|
||||||
apt_install(['python-dbus', 'lockfile-progs'])
|
|
||||||
log('Refreshing nagios checks')
|
|
||||||
if os.path.isdir(NAGIOS_PLUGINS):
|
|
||||||
rsync(os.path.join(os.getenv('CHARM_DIR'), 'files', 'nagios',
|
|
||||||
'check_ceph_status.py'),
|
|
||||||
os.path.join(NAGIOS_PLUGINS, 'check_ceph_status.py'))
|
|
||||||
|
|
||||||
script = os.path.join(SCRIPTS_DIR, 'collect_ceph_status.sh')
|
|
||||||
rsync(os.path.join(os.getenv('CHARM_DIR'), 'files',
|
|
||||||
'nagios', 'collect_ceph_status.sh'),
|
|
||||||
script)
|
|
||||||
cronjob = "{} root {}\n".format('*/5 * * * *', script)
|
|
||||||
write_file(STATUS_CRONFILE, cronjob)
|
|
||||||
|
|
||||||
# Find out if nrpe set nagios_hostname
|
|
||||||
hostname = nrpe.get_nagios_hostname()
|
|
||||||
current_unit = nrpe.get_nagios_unit_name()
|
|
||||||
nrpe_setup = nrpe.NRPE(hostname=hostname)
|
|
||||||
nrpe_setup.add_check(
|
|
||||||
shortname="ceph",
|
|
||||||
description='Check Ceph health {%s}' % current_unit,
|
|
||||||
check_cmd='check_ceph_status.py -f {}'.format(STATUS_FILE)
|
|
||||||
)
|
|
||||||
nrpe_setup.write()
|
|
||||||
|
|
||||||
|
|
||||||
VERSION_PACKAGE = 'ceph-common'
|
|
||||||
|
|
||||||
|
|
||||||
def assess_status():
|
|
||||||
"""Assess status of current unit"""
|
|
||||||
application_version_set(get_upstream_version(VERSION_PACKAGE))
|
|
||||||
# check to see if the unit is paused.
|
|
||||||
if is_unit_paused_set():
|
|
||||||
status_set('maintenance',
|
|
||||||
"Paused. Use 'resume' action to resume normal service.")
|
|
||||||
return
|
|
||||||
moncount = int(config('monitor-count'))
|
|
||||||
units = get_peer_units()
|
|
||||||
# not enough peers and mon_count > 1
|
|
||||||
if len(units.keys()) < moncount:
|
|
||||||
status_set('blocked', 'Insufficient peer units to bootstrap'
|
|
||||||
' cluster (require {})'.format(moncount))
|
|
||||||
return
|
|
||||||
|
|
||||||
# mon_count > 1, peers, but no ceph-public-address
|
|
||||||
ready = sum(1 for unit_ready in units.itervalues() if unit_ready)
|
|
||||||
if ready < moncount:
|
|
||||||
status_set('waiting', 'Peer units detected, waiting for addresses')
|
|
||||||
return
|
|
||||||
|
|
||||||
# active - bootstrapped + quorum status check
|
|
||||||
if ceph.is_bootstrapped() and ceph.is_quorum():
|
|
||||||
status_set('active', 'Unit is ready and clustered')
|
|
||||||
else:
|
|
||||||
# Unit should be running and clustered, but no quorum
|
|
||||||
# TODO: should this be blocked or waiting?
|
|
||||||
status_set('blocked', 'Unit not clustered (no quorum)')
|
|
||||||
# If there's a pending lock for this unit,
|
|
||||||
# can i get the lock?
|
|
||||||
# reboot the ceph-mon process
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('update-status')
|
|
||||||
@harden()
|
|
||||||
def update_status():
|
|
||||||
log('Updating status.')
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('stop')
|
|
||||||
def stop():
|
|
||||||
# NOTE(jamespage)
|
|
||||||
# Ensure monitor is removed from monmap prior to shutdown
|
|
||||||
# otherwise we end up with odd quorum loss issues during
|
|
||||||
# migration.
|
|
||||||
# NOTE(jamespage): remove is compat with >= firefly
|
|
||||||
cmd = ['ceph', 'mon', 'remove', socket.gethostname()]
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
# NOTE(jamespage)
|
|
||||||
# Pause MON and MGR processes running on this unit, leaving
|
|
||||||
# any OSD processes running, supporting the migration to
|
|
||||||
# using the ceph-mon charm.
|
|
||||||
service_pause('ceph-mon')
|
|
||||||
if cmp_pkgrevno('ceph', '12.0.0') >= 0:
|
|
||||||
service_pause('ceph-mgr@{}'.format(socket.gethostname()))
|
|
||||||
# NOTE(jamespage)
|
|
||||||
# Remove the ceph.conf provided by this charm so
|
|
||||||
# that the ceph.conf from other deployed applications
|
|
||||||
# can take priority post removal.
|
|
||||||
remove_alternative('ceph.conf', ceph_conf_path())
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
try:
|
|
||||||
hooks.execute(sys.argv)
|
|
||||||
except UnregisteredHookError as e:
|
|
||||||
log('Unknown hook {} - skipping.'.format(e))
|
|
||||||
assess_status()
|
|
|
@ -1,97 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
# Bootstrap charm-helpers, installing its dependencies if necessary using
|
|
||||||
# only standard libraries.
|
|
||||||
from __future__ import print_function
|
|
||||||
from __future__ import absolute_import
|
|
||||||
|
|
||||||
import functools
|
|
||||||
import inspect
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
|
|
||||||
try:
|
|
||||||
import six # flake8: noqa
|
|
||||||
except ImportError:
|
|
||||||
if sys.version_info.major == 2:
|
|
||||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
|
|
||||||
else:
|
|
||||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
|
|
||||||
import six # flake8: noqa
|
|
||||||
|
|
||||||
try:
|
|
||||||
import yaml # flake8: noqa
|
|
||||||
except ImportError:
|
|
||||||
if sys.version_info.major == 2:
|
|
||||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
|
|
||||||
else:
|
|
||||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
|
|
||||||
import yaml # flake8: noqa
|
|
||||||
|
|
||||||
|
|
||||||
# Holds a list of mapping of mangled function names that have been deprecated
|
|
||||||
# using the @deprecate decorator below. This is so that the warning is only
|
|
||||||
# printed once for each usage of the function.
|
|
||||||
__deprecated_functions = {}
|
|
||||||
|
|
||||||
|
|
||||||
def deprecate(warning, date=None, log=None):
|
|
||||||
"""Add a deprecation warning the first time the function is used.
|
|
||||||
The date, which is a string in semi-ISO8660 format indicate the year-month
|
|
||||||
that the function is officially going to be removed.
|
|
||||||
|
|
||||||
usage:
|
|
||||||
|
|
||||||
@deprecate('use core/fetch/add_source() instead', '2017-04')
|
|
||||||
def contributed_add_source_thing(...):
|
|
||||||
...
|
|
||||||
|
|
||||||
And it then prints to the log ONCE that the function is deprecated.
|
|
||||||
The reason for passing the logging function (log) is so that hookenv.log
|
|
||||||
can be used for a charm if needed.
|
|
||||||
|
|
||||||
:param warning: String to indicat where it has moved ot.
|
|
||||||
:param date: optional sting, in YYYY-MM format to indicate when the
|
|
||||||
function will definitely (probably) be removed.
|
|
||||||
:param log: The log function to call to log. If not, logs to stdout
|
|
||||||
"""
|
|
||||||
def wrap(f):
|
|
||||||
|
|
||||||
@functools.wraps(f)
|
|
||||||
def wrapped_f(*args, **kwargs):
|
|
||||||
try:
|
|
||||||
module = inspect.getmodule(f)
|
|
||||||
file = inspect.getsourcefile(f)
|
|
||||||
lines = inspect.getsourcelines(f)
|
|
||||||
f_name = "{}-{}-{}..{}-{}".format(
|
|
||||||
module.__name__, file, lines[0], lines[-1], f.__name__)
|
|
||||||
except (IOError, TypeError):
|
|
||||||
# assume it was local, so just use the name of the function
|
|
||||||
f_name = f.__name__
|
|
||||||
if f_name not in __deprecated_functions:
|
|
||||||
__deprecated_functions[f_name] = True
|
|
||||||
s = "DEPRECATION WARNING: Function {} is being removed".format(
|
|
||||||
f.__name__)
|
|
||||||
if date:
|
|
||||||
s = "{} on/around {}".format(s, date)
|
|
||||||
if warning:
|
|
||||||
s = "{} : {}".format(s, warning)
|
|
||||||
if log:
|
|
||||||
log(s)
|
|
||||||
else:
|
|
||||||
print(s)
|
|
||||||
return f(*args, **kwargs)
|
|
||||||
return wrapped_f
|
|
||||||
return wrap
|
|
|
@ -1,189 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import inspect
|
|
||||||
import argparse
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from six.moves import zip
|
|
||||||
|
|
||||||
import charmhelpers.core.unitdata
|
|
||||||
|
|
||||||
|
|
||||||
class OutputFormatter(object):
|
|
||||||
def __init__(self, outfile=sys.stdout):
|
|
||||||
self.formats = (
|
|
||||||
"raw",
|
|
||||||
"json",
|
|
||||||
"py",
|
|
||||||
"yaml",
|
|
||||||
"csv",
|
|
||||||
"tab",
|
|
||||||
)
|
|
||||||
self.outfile = outfile
|
|
||||||
|
|
||||||
def add_arguments(self, argument_parser):
|
|
||||||
formatgroup = argument_parser.add_mutually_exclusive_group()
|
|
||||||
choices = self.supported_formats
|
|
||||||
formatgroup.add_argument("--format", metavar='FMT',
|
|
||||||
help="Select output format for returned data, "
|
|
||||||
"where FMT is one of: {}".format(choices),
|
|
||||||
choices=choices, default='raw')
|
|
||||||
for fmt in self.formats:
|
|
||||||
fmtfunc = getattr(self, fmt)
|
|
||||||
formatgroup.add_argument("-{}".format(fmt[0]),
|
|
||||||
"--{}".format(fmt), action='store_const',
|
|
||||||
const=fmt, dest='format',
|
|
||||||
help=fmtfunc.__doc__)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def supported_formats(self):
|
|
||||||
return self.formats
|
|
||||||
|
|
||||||
def raw(self, output):
|
|
||||||
"""Output data as raw string (default)"""
|
|
||||||
if isinstance(output, (list, tuple)):
|
|
||||||
output = '\n'.join(map(str, output))
|
|
||||||
self.outfile.write(str(output))
|
|
||||||
|
|
||||||
def py(self, output):
|
|
||||||
"""Output data as a nicely-formatted python data structure"""
|
|
||||||
import pprint
|
|
||||||
pprint.pprint(output, stream=self.outfile)
|
|
||||||
|
|
||||||
def json(self, output):
|
|
||||||
"""Output data in JSON format"""
|
|
||||||
import json
|
|
||||||
json.dump(output, self.outfile)
|
|
||||||
|
|
||||||
def yaml(self, output):
|
|
||||||
"""Output data in YAML format"""
|
|
||||||
import yaml
|
|
||||||
yaml.safe_dump(output, self.outfile)
|
|
||||||
|
|
||||||
def csv(self, output):
|
|
||||||
"""Output data as excel-compatible CSV"""
|
|
||||||
import csv
|
|
||||||
csvwriter = csv.writer(self.outfile)
|
|
||||||
csvwriter.writerows(output)
|
|
||||||
|
|
||||||
def tab(self, output):
|
|
||||||
"""Output data in excel-compatible tab-delimited format"""
|
|
||||||
import csv
|
|
||||||
csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab)
|
|
||||||
csvwriter.writerows(output)
|
|
||||||
|
|
||||||
def format_output(self, output, fmt='raw'):
|
|
||||||
fmtfunc = getattr(self, fmt)
|
|
||||||
fmtfunc(output)
|
|
||||||
|
|
||||||
|
|
||||||
class CommandLine(object):
|
|
||||||
argument_parser = None
|
|
||||||
subparsers = None
|
|
||||||
formatter = None
|
|
||||||
exit_code = 0
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
if not self.argument_parser:
|
|
||||||
self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks')
|
|
||||||
if not self.formatter:
|
|
||||||
self.formatter = OutputFormatter()
|
|
||||||
self.formatter.add_arguments(self.argument_parser)
|
|
||||||
if not self.subparsers:
|
|
||||||
self.subparsers = self.argument_parser.add_subparsers(help='Commands')
|
|
||||||
|
|
||||||
def subcommand(self, command_name=None):
|
|
||||||
"""
|
|
||||||
Decorate a function as a subcommand. Use its arguments as the
|
|
||||||
command-line arguments"""
|
|
||||||
def wrapper(decorated):
|
|
||||||
cmd_name = command_name or decorated.__name__
|
|
||||||
subparser = self.subparsers.add_parser(cmd_name,
|
|
||||||
description=decorated.__doc__)
|
|
||||||
for args, kwargs in describe_arguments(decorated):
|
|
||||||
subparser.add_argument(*args, **kwargs)
|
|
||||||
subparser.set_defaults(func=decorated)
|
|
||||||
return decorated
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
def test_command(self, decorated):
|
|
||||||
"""
|
|
||||||
Subcommand is a boolean test function, so bool return values should be
|
|
||||||
converted to a 0/1 exit code.
|
|
||||||
"""
|
|
||||||
decorated._cli_test_command = True
|
|
||||||
return decorated
|
|
||||||
|
|
||||||
def no_output(self, decorated):
|
|
||||||
"""
|
|
||||||
Subcommand is not expected to return a value, so don't print a spurious None.
|
|
||||||
"""
|
|
||||||
decorated._cli_no_output = True
|
|
||||||
return decorated
|
|
||||||
|
|
||||||
def subcommand_builder(self, command_name, description=None):
|
|
||||||
"""
|
|
||||||
Decorate a function that builds a subcommand. Builders should accept a
|
|
||||||
single argument (the subparser instance) and return the function to be
|
|
||||||
run as the command."""
|
|
||||||
def wrapper(decorated):
|
|
||||||
subparser = self.subparsers.add_parser(command_name)
|
|
||||||
func = decorated(subparser)
|
|
||||||
subparser.set_defaults(func=func)
|
|
||||||
subparser.description = description or func.__doc__
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
"Run cli, processing arguments and executing subcommands."
|
|
||||||
arguments = self.argument_parser.parse_args()
|
|
||||||
argspec = inspect.getargspec(arguments.func)
|
|
||||||
vargs = []
|
|
||||||
for arg in argspec.args:
|
|
||||||
vargs.append(getattr(arguments, arg))
|
|
||||||
if argspec.varargs:
|
|
||||||
vargs.extend(getattr(arguments, argspec.varargs))
|
|
||||||
output = arguments.func(*vargs)
|
|
||||||
if getattr(arguments.func, '_cli_test_command', False):
|
|
||||||
self.exit_code = 0 if output else 1
|
|
||||||
output = ''
|
|
||||||
if getattr(arguments.func, '_cli_no_output', False):
|
|
||||||
output = ''
|
|
||||||
self.formatter.format_output(output, arguments.format)
|
|
||||||
if charmhelpers.core.unitdata._KV:
|
|
||||||
charmhelpers.core.unitdata._KV.flush()
|
|
||||||
|
|
||||||
|
|
||||||
cmdline = CommandLine()
|
|
||||||
|
|
||||||
|
|
||||||
def describe_arguments(func):
|
|
||||||
"""
|
|
||||||
Analyze a function's signature and return a data structure suitable for
|
|
||||||
passing in as arguments to an argparse parser's add_argument() method."""
|
|
||||||
|
|
||||||
argspec = inspect.getargspec(func)
|
|
||||||
# we should probably raise an exception somewhere if func includes **kwargs
|
|
||||||
if argspec.defaults:
|
|
||||||
positional_args = argspec.args[:-len(argspec.defaults)]
|
|
||||||
keyword_names = argspec.args[-len(argspec.defaults):]
|
|
||||||
for arg, default in zip(keyword_names, argspec.defaults):
|
|
||||||
yield ('--{}'.format(arg),), {'default': default}
|
|
||||||
else:
|
|
||||||
positional_args = argspec.args
|
|
||||||
|
|
||||||
for arg in positional_args:
|
|
||||||
yield (arg,), {}
|
|
||||||
if argspec.varargs:
|
|
||||||
yield (argspec.varargs,), {'nargs': '*'}
|
|
|
@ -1,34 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from . import cmdline
|
|
||||||
from charmhelpers.contrib.benchmark import Benchmark
|
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand(command_name='benchmark-start')
|
|
||||||
def start():
|
|
||||||
Benchmark.start()
|
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand(command_name='benchmark-finish')
|
|
||||||
def finish():
|
|
||||||
Benchmark.finish()
|
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score")
|
|
||||||
def service(subparser):
|
|
||||||
subparser.add_argument("value", help="The composite score.")
|
|
||||||
subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.")
|
|
||||||
subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.")
|
|
||||||
return Benchmark.set_composite_score
|
|
|
@ -1,30 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
This module loads sub-modules into the python runtime so they can be
|
|
||||||
discovered via the inspect module. In order to prevent flake8 from (rightfully)
|
|
||||||
telling us these are unused modules, throw a ' # noqa' at the end of each import
|
|
||||||
so that the warning is suppressed.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from . import CommandLine # noqa
|
|
||||||
|
|
||||||
"""
|
|
||||||
Import the sub-modules which have decorated subcommands to register with chlp.
|
|
||||||
"""
|
|
||||||
from . import host # noqa
|
|
||||||
from . import benchmark # noqa
|
|
||||||
from . import unitdata # noqa
|
|
||||||
from . import hookenv # noqa
|
|
|
@ -1,21 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from . import cmdline
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
|
|
||||||
|
|
||||||
cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
|
|
||||||
cmdline.subcommand('service-name')(hookenv.service_name)
|
|
||||||
cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)
|
|
|
@ -1,29 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from . import cmdline
|
|
||||||
from charmhelpers.core import host
|
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand()
|
|
||||||
def mounts():
|
|
||||||
"List mounts"
|
|
||||||
return host.mounts()
|
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand_builder('service', description="Control system services")
|
|
||||||
def service(subparser):
|
|
||||||
subparser.add_argument("action", help="The action to perform (start, stop, etc...)")
|
|
||||||
subparser.add_argument("service_name", help="Name of the service to control")
|
|
||||||
return host.service
|
|
|
@ -1,37 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from . import cmdline
|
|
||||||
from charmhelpers.core import unitdata
|
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
|
|
||||||
def unitdata_cmd(subparser):
|
|
||||||
nested = subparser.add_subparsers()
|
|
||||||
get_cmd = nested.add_parser('get', help='Retrieve data')
|
|
||||||
get_cmd.add_argument('key', help='Key to retrieve the value of')
|
|
||||||
get_cmd.set_defaults(action='get', value=None)
|
|
||||||
set_cmd = nested.add_parser('set', help='Store data')
|
|
||||||
set_cmd.add_argument('key', help='Key to set')
|
|
||||||
set_cmd.add_argument('value', help='Value to store')
|
|
||||||
set_cmd.set_defaults(action='set')
|
|
||||||
|
|
||||||
def _unitdata_cmd(action, key, value):
|
|
||||||
if action == 'get':
|
|
||||||
return unitdata.kv().get(key)
|
|
||||||
elif action == 'set':
|
|
||||||
unitdata.kv().set(key, value)
|
|
||||||
unitdata.kv().flush()
|
|
||||||
return ''
|
|
||||||
return _unitdata_cmd
|
|
|
@ -1,13 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
|
@ -1,13 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
|
@ -1,445 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
"""Compatibility with the nrpe-external-master charm"""
|
|
||||||
# Copyright 2012 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import pwd
|
|
||||||
import grp
|
|
||||||
import os
|
|
||||||
import glob
|
|
||||||
import shutil
|
|
||||||
import re
|
|
||||||
import shlex
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config,
|
|
||||||
hook_name,
|
|
||||||
local_unit,
|
|
||||||
log,
|
|
||||||
relation_ids,
|
|
||||||
relation_set,
|
|
||||||
relations_of_type,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.core.host import service
|
|
||||||
from charmhelpers.core import host
|
|
||||||
|
|
||||||
# This module adds compatibility with the nrpe-external-master and plain nrpe
|
|
||||||
# subordinate charms. To use it in your charm:
|
|
||||||
#
|
|
||||||
# 1. Update metadata.yaml
|
|
||||||
#
|
|
||||||
# provides:
|
|
||||||
# (...)
|
|
||||||
# nrpe-external-master:
|
|
||||||
# interface: nrpe-external-master
|
|
||||||
# scope: container
|
|
||||||
#
|
|
||||||
# and/or
|
|
||||||
#
|
|
||||||
# provides:
|
|
||||||
# (...)
|
|
||||||
# local-monitors:
|
|
||||||
# interface: local-monitors
|
|
||||||
# scope: container
|
|
||||||
|
|
||||||
#
|
|
||||||
# 2. Add the following to config.yaml
|
|
||||||
#
|
|
||||||
# nagios_context:
|
|
||||||
# default: "juju"
|
|
||||||
# type: string
|
|
||||||
# description: |
|
|
||||||
# Used by the nrpe subordinate charms.
|
|
||||||
# A string that will be prepended to instance name to set the host name
|
|
||||||
# in nagios. So for instance the hostname would be something like:
|
|
||||||
# juju-myservice-0
|
|
||||||
# If you're running multiple environments with the same services in them
|
|
||||||
# this allows you to differentiate between them.
|
|
||||||
# nagios_servicegroups:
|
|
||||||
# default: ""
|
|
||||||
# type: string
|
|
||||||
# description: |
|
|
||||||
# A comma-separated list of nagios servicegroups.
|
|
||||||
# If left empty, the nagios_context will be used as the servicegroup
|
|
||||||
#
|
|
||||||
# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
|
|
||||||
#
|
|
||||||
# 4. Update your hooks.py with something like this:
|
|
||||||
#
|
|
||||||
# from charmsupport.nrpe import NRPE
|
|
||||||
# (...)
|
|
||||||
# def update_nrpe_config():
|
|
||||||
# nrpe_compat = NRPE()
|
|
||||||
# nrpe_compat.add_check(
|
|
||||||
# shortname = "myservice",
|
|
||||||
# description = "Check MyService",
|
|
||||||
# check_cmd = "check_http -w 2 -c 10 http://localhost"
|
|
||||||
# )
|
|
||||||
# nrpe_compat.add_check(
|
|
||||||
# "myservice_other",
|
|
||||||
# "Check for widget failures",
|
|
||||||
# check_cmd = "/srv/myapp/scripts/widget_check"
|
|
||||||
# )
|
|
||||||
# nrpe_compat.write()
|
|
||||||
#
|
|
||||||
# def config_changed():
|
|
||||||
# (...)
|
|
||||||
# update_nrpe_config()
|
|
||||||
#
|
|
||||||
# def nrpe_external_master_relation_changed():
|
|
||||||
# update_nrpe_config()
|
|
||||||
#
|
|
||||||
# def local_monitors_relation_changed():
|
|
||||||
# update_nrpe_config()
|
|
||||||
#
|
|
||||||
# 4.a If your charm is a subordinate charm set primary=False
|
|
||||||
#
|
|
||||||
# from charmsupport.nrpe import NRPE
|
|
||||||
# (...)
|
|
||||||
# def update_nrpe_config():
|
|
||||||
# nrpe_compat = NRPE(primary=False)
|
|
||||||
#
|
|
||||||
# 5. ln -s hooks.py nrpe-external-master-relation-changed
|
|
||||||
# ln -s hooks.py local-monitors-relation-changed
|
|
||||||
|
|
||||||
|
|
||||||
class CheckException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class Check(object):
|
|
||||||
shortname_re = '[A-Za-z0-9-_.]+$'
|
|
||||||
service_template = ("""
|
|
||||||
#---------------------------------------------------
|
|
||||||
# This file is Juju managed
|
|
||||||
#---------------------------------------------------
|
|
||||||
define service {{
|
|
||||||
use active-service
|
|
||||||
host_name {nagios_hostname}
|
|
||||||
service_description {nagios_hostname}[{shortname}] """
|
|
||||||
"""{description}
|
|
||||||
check_command check_nrpe!{command}
|
|
||||||
servicegroups {nagios_servicegroup}
|
|
||||||
}}
|
|
||||||
""")
|
|
||||||
|
|
||||||
def __init__(self, shortname, description, check_cmd):
|
|
||||||
super(Check, self).__init__()
|
|
||||||
# XXX: could be better to calculate this from the service name
|
|
||||||
if not re.match(self.shortname_re, shortname):
|
|
||||||
raise CheckException("shortname must match {}".format(
|
|
||||||
Check.shortname_re))
|
|
||||||
self.shortname = shortname
|
|
||||||
self.command = "check_{}".format(shortname)
|
|
||||||
# Note: a set of invalid characters is defined by the
|
|
||||||
# Nagios server config
|
|
||||||
# The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
|
|
||||||
self.description = description
|
|
||||||
self.check_cmd = self._locate_cmd(check_cmd)
|
|
||||||
|
|
||||||
def _get_check_filename(self):
|
|
||||||
return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
|
|
||||||
|
|
||||||
def _get_service_filename(self, hostname):
|
|
||||||
return os.path.join(NRPE.nagios_exportdir,
|
|
||||||
'service__{}_{}.cfg'.format(hostname, self.command))
|
|
||||||
|
|
||||||
def _locate_cmd(self, check_cmd):
|
|
||||||
search_path = (
|
|
||||||
'/usr/lib/nagios/plugins',
|
|
||||||
'/usr/local/lib/nagios/plugins',
|
|
||||||
)
|
|
||||||
parts = shlex.split(check_cmd)
|
|
||||||
for path in search_path:
|
|
||||||
if os.path.exists(os.path.join(path, parts[0])):
|
|
||||||
command = os.path.join(path, parts[0])
|
|
||||||
if len(parts) > 1:
|
|
||||||
command += " " + " ".join(parts[1:])
|
|
||||||
return command
|
|
||||||
log('Check command not found: {}'.format(parts[0]))
|
|
||||||
return ''
|
|
||||||
|
|
||||||
def _remove_service_files(self):
|
|
||||||
if not os.path.exists(NRPE.nagios_exportdir):
|
|
||||||
return
|
|
||||||
for f in os.listdir(NRPE.nagios_exportdir):
|
|
||||||
if f.endswith('_{}.cfg'.format(self.command)):
|
|
||||||
os.remove(os.path.join(NRPE.nagios_exportdir, f))
|
|
||||||
|
|
||||||
def remove(self, hostname):
|
|
||||||
nrpe_check_file = self._get_check_filename()
|
|
||||||
if os.path.exists(nrpe_check_file):
|
|
||||||
os.remove(nrpe_check_file)
|
|
||||||
self._remove_service_files()
|
|
||||||
|
|
||||||
def write(self, nagios_context, hostname, nagios_servicegroups):
|
|
||||||
nrpe_check_file = self._get_check_filename()
|
|
||||||
with open(nrpe_check_file, 'w') as nrpe_check_config:
|
|
||||||
nrpe_check_config.write("# check {}\n".format(self.shortname))
|
|
||||||
if nagios_servicegroups:
|
|
||||||
nrpe_check_config.write(
|
|
||||||
"# The following header was added automatically by juju\n")
|
|
||||||
nrpe_check_config.write(
|
|
||||||
"# Modifying it will affect nagios monitoring and alerting\n")
|
|
||||||
nrpe_check_config.write(
|
|
||||||
"# servicegroups: {}\n".format(nagios_servicegroups))
|
|
||||||
nrpe_check_config.write("command[{}]={}\n".format(
|
|
||||||
self.command, self.check_cmd))
|
|
||||||
|
|
||||||
if not os.path.exists(NRPE.nagios_exportdir):
|
|
||||||
log('Not writing service config as {} is not accessible'.format(
|
|
||||||
NRPE.nagios_exportdir))
|
|
||||||
else:
|
|
||||||
self.write_service_config(nagios_context, hostname,
|
|
||||||
nagios_servicegroups)
|
|
||||||
|
|
||||||
def write_service_config(self, nagios_context, hostname,
|
|
||||||
nagios_servicegroups):
|
|
||||||
self._remove_service_files()
|
|
||||||
|
|
||||||
templ_vars = {
|
|
||||||
'nagios_hostname': hostname,
|
|
||||||
'nagios_servicegroup': nagios_servicegroups,
|
|
||||||
'description': self.description,
|
|
||||||
'shortname': self.shortname,
|
|
||||||
'command': self.command,
|
|
||||||
}
|
|
||||||
nrpe_service_text = Check.service_template.format(**templ_vars)
|
|
||||||
nrpe_service_file = self._get_service_filename(hostname)
|
|
||||||
with open(nrpe_service_file, 'w') as nrpe_service_config:
|
|
||||||
nrpe_service_config.write(str(nrpe_service_text))
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
subprocess.call(self.check_cmd)
|
|
||||||
|
|
||||||
|
|
||||||
class NRPE(object):
|
|
||||||
nagios_logdir = '/var/log/nagios'
|
|
||||||
nagios_exportdir = '/var/lib/nagios/export'
|
|
||||||
nrpe_confdir = '/etc/nagios/nrpe.d'
|
|
||||||
homedir = '/var/lib/nagios' # home dir provided by nagios-nrpe-server
|
|
||||||
|
|
||||||
def __init__(self, hostname=None, primary=True):
|
|
||||||
super(NRPE, self).__init__()
|
|
||||||
self.config = config()
|
|
||||||
self.primary = primary
|
|
||||||
self.nagios_context = self.config['nagios_context']
|
|
||||||
if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
|
|
||||||
self.nagios_servicegroups = self.config['nagios_servicegroups']
|
|
||||||
else:
|
|
||||||
self.nagios_servicegroups = self.nagios_context
|
|
||||||
self.unit_name = local_unit().replace('/', '-')
|
|
||||||
if hostname:
|
|
||||||
self.hostname = hostname
|
|
||||||
else:
|
|
||||||
nagios_hostname = get_nagios_hostname()
|
|
||||||
if nagios_hostname:
|
|
||||||
self.hostname = nagios_hostname
|
|
||||||
else:
|
|
||||||
self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
|
|
||||||
self.checks = []
|
|
||||||
# Iff in an nrpe-external-master relation hook, set primary status
|
|
||||||
relation = relation_ids('nrpe-external-master')
|
|
||||||
if relation:
|
|
||||||
log("Setting charm primary status {}".format(primary))
|
|
||||||
for rid in relation_ids('nrpe-external-master'):
|
|
||||||
relation_set(relation_id=rid, relation_settings={'primary': self.primary})
|
|
||||||
|
|
||||||
def add_check(self, *args, **kwargs):
|
|
||||||
self.checks.append(Check(*args, **kwargs))
|
|
||||||
|
|
||||||
def remove_check(self, *args, **kwargs):
|
|
||||||
if kwargs.get('shortname') is None:
|
|
||||||
raise ValueError('shortname of check must be specified')
|
|
||||||
|
|
||||||
# Use sensible defaults if they're not specified - these are not
|
|
||||||
# actually used during removal, but they're required for constructing
|
|
||||||
# the Check object; check_disk is chosen because it's part of the
|
|
||||||
# nagios-plugins-basic package.
|
|
||||||
if kwargs.get('check_cmd') is None:
|
|
||||||
kwargs['check_cmd'] = 'check_disk'
|
|
||||||
if kwargs.get('description') is None:
|
|
||||||
kwargs['description'] = ''
|
|
||||||
|
|
||||||
check = Check(*args, **kwargs)
|
|
||||||
check.remove(self.hostname)
|
|
||||||
|
|
||||||
def write(self):
|
|
||||||
try:
|
|
||||||
nagios_uid = pwd.getpwnam('nagios').pw_uid
|
|
||||||
nagios_gid = grp.getgrnam('nagios').gr_gid
|
|
||||||
except Exception:
|
|
||||||
log("Nagios user not set up, nrpe checks not updated")
|
|
||||||
return
|
|
||||||
|
|
||||||
if not os.path.exists(NRPE.nagios_logdir):
|
|
||||||
os.mkdir(NRPE.nagios_logdir)
|
|
||||||
os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
|
|
||||||
|
|
||||||
nrpe_monitors = {}
|
|
||||||
monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
|
|
||||||
for nrpecheck in self.checks:
|
|
||||||
nrpecheck.write(self.nagios_context, self.hostname,
|
|
||||||
self.nagios_servicegroups)
|
|
||||||
nrpe_monitors[nrpecheck.shortname] = {
|
|
||||||
"command": nrpecheck.command,
|
|
||||||
}
|
|
||||||
|
|
||||||
# update-status hooks are configured to firing every 5 minutes by
|
|
||||||
# default. When nagios-nrpe-server is restarted, the nagios server
|
|
||||||
# reports checks failing causing unneccessary alerts. Let's not restart
|
|
||||||
# on update-status hooks.
|
|
||||||
if not hook_name() == 'update-status':
|
|
||||||
service('restart', 'nagios-nrpe-server')
|
|
||||||
|
|
||||||
monitor_ids = relation_ids("local-monitors") + \
|
|
||||||
relation_ids("nrpe-external-master")
|
|
||||||
for rid in monitor_ids:
|
|
||||||
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
|
|
||||||
|
|
||||||
|
|
||||||
def get_nagios_hostcontext(relation_name='nrpe-external-master'):
|
|
||||||
"""
|
|
||||||
Query relation with nrpe subordinate, return the nagios_host_context
|
|
||||||
|
|
||||||
:param str relation_name: Name of relation nrpe sub joined to
|
|
||||||
"""
|
|
||||||
for rel in relations_of_type(relation_name):
|
|
||||||
if 'nagios_host_context' in rel:
|
|
||||||
return rel['nagios_host_context']
|
|
||||||
|
|
||||||
|
|
||||||
def get_nagios_hostname(relation_name='nrpe-external-master'):
|
|
||||||
"""
|
|
||||||
Query relation with nrpe subordinate, return the nagios_hostname
|
|
||||||
|
|
||||||
:param str relation_name: Name of relation nrpe sub joined to
|
|
||||||
"""
|
|
||||||
for rel in relations_of_type(relation_name):
|
|
||||||
if 'nagios_hostname' in rel:
|
|
||||||
return rel['nagios_hostname']
|
|
||||||
|
|
||||||
|
|
||||||
def get_nagios_unit_name(relation_name='nrpe-external-master'):
|
|
||||||
"""
|
|
||||||
Return the nagios unit name prepended with host_context if needed
|
|
||||||
|
|
||||||
:param str relation_name: Name of relation nrpe sub joined to
|
|
||||||
"""
|
|
||||||
host_context = get_nagios_hostcontext(relation_name)
|
|
||||||
if host_context:
|
|
||||||
unit = "%s:%s" % (host_context, local_unit())
|
|
||||||
else:
|
|
||||||
unit = local_unit()
|
|
||||||
return unit
|
|
||||||
|
|
||||||
|
|
||||||
def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
|
|
||||||
"""
|
|
||||||
Add checks for each service in list
|
|
||||||
|
|
||||||
:param NRPE nrpe: NRPE object to add check to
|
|
||||||
:param list services: List of services to check
|
|
||||||
:param str unit_name: Unit name to use in check description
|
|
||||||
:param bool immediate_check: For sysv init, run the service check immediately
|
|
||||||
"""
|
|
||||||
for svc in services:
|
|
||||||
# Don't add a check for these services from neutron-gateway
|
|
||||||
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:
|
|
||||||
next
|
|
||||||
|
|
||||||
upstart_init = '/etc/init/%s.conf' % svc
|
|
||||||
sysv_init = '/etc/init.d/%s' % svc
|
|
||||||
|
|
||||||
if host.init_is_systemd():
|
|
||||||
nrpe.add_check(
|
|
||||||
shortname=svc,
|
|
||||||
description='process check {%s}' % unit_name,
|
|
||||||
check_cmd='check_systemd.py %s' % svc
|
|
||||||
)
|
|
||||||
elif os.path.exists(upstart_init):
|
|
||||||
nrpe.add_check(
|
|
||||||
shortname=svc,
|
|
||||||
description='process check {%s}' % unit_name,
|
|
||||||
check_cmd='check_upstart_job %s' % svc
|
|
||||||
)
|
|
||||||
elif os.path.exists(sysv_init):
|
|
||||||
cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
|
|
||||||
checkpath = '%s/service-check-%s.txt' % (nrpe.homedir, svc)
|
|
||||||
croncmd = (
|
|
||||||
'/usr/local/lib/nagios/plugins/check_exit_status.pl '
|
|
||||||
'-e -s /etc/init.d/%s status' % svc
|
|
||||||
)
|
|
||||||
cron_file = '*/5 * * * * root %s > %s\n' % (croncmd, checkpath)
|
|
||||||
f = open(cronpath, 'w')
|
|
||||||
f.write(cron_file)
|
|
||||||
f.close()
|
|
||||||
nrpe.add_check(
|
|
||||||
shortname=svc,
|
|
||||||
description='service check {%s}' % unit_name,
|
|
||||||
check_cmd='check_status_file.py -f %s' % checkpath,
|
|
||||||
)
|
|
||||||
# if /var/lib/nagios doesn't exist open(checkpath, 'w') will fail
|
|
||||||
# (LP: #1670223).
|
|
||||||
if immediate_check and os.path.isdir(nrpe.homedir):
|
|
||||||
f = open(checkpath, 'w')
|
|
||||||
subprocess.call(
|
|
||||||
croncmd.split(),
|
|
||||||
stdout=f,
|
|
||||||
stderr=subprocess.STDOUT
|
|
||||||
)
|
|
||||||
f.close()
|
|
||||||
os.chmod(checkpath, 0o644)
|
|
||||||
|
|
||||||
|
|
||||||
def copy_nrpe_checks():
|
|
||||||
"""
|
|
||||||
Copy the nrpe checks into place
|
|
||||||
|
|
||||||
"""
|
|
||||||
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
|
|
||||||
nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
|
|
||||||
'charmhelpers', 'contrib', 'openstack',
|
|
||||||
'files')
|
|
||||||
|
|
||||||
if not os.path.exists(NAGIOS_PLUGINS):
|
|
||||||
os.makedirs(NAGIOS_PLUGINS)
|
|
||||||
for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
|
|
||||||
if os.path.isfile(fname):
|
|
||||||
shutil.copy2(fname,
|
|
||||||
os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
|
|
||||||
|
|
||||||
|
|
||||||
def add_haproxy_checks(nrpe, unit_name):
|
|
||||||
"""
|
|
||||||
Add checks for each service in list
|
|
||||||
|
|
||||||
:param NRPE nrpe: NRPE object to add check to
|
|
||||||
:param str unit_name: Unit name to use in check description
|
|
||||||
"""
|
|
||||||
nrpe.add_check(
|
|
||||||
shortname='haproxy_servers',
|
|
||||||
description='Check HAProxy {%s}' % unit_name,
|
|
||||||
check_cmd='check_haproxy.sh')
|
|
||||||
nrpe.add_check(
|
|
||||||
shortname='haproxy_queue',
|
|
||||||
description='Check HAProxy queue depth {%s}' % unit_name,
|
|
||||||
check_cmd='check_haproxy_queue_depth.sh')
|
|
|
@ -1,173 +0,0 @@
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
'''
|
|
||||||
Functions for managing volumes in juju units. One volume is supported per unit.
|
|
||||||
Subordinates may have their own storage, provided it is on its own partition.
|
|
||||||
|
|
||||||
Configuration stanzas::
|
|
||||||
|
|
||||||
volume-ephemeral:
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
description: >
|
|
||||||
If false, a volume is mounted as sepecified in "volume-map"
|
|
||||||
If true, ephemeral storage will be used, meaning that log data
|
|
||||||
will only exist as long as the machine. YOU HAVE BEEN WARNED.
|
|
||||||
volume-map:
|
|
||||||
type: string
|
|
||||||
default: {}
|
|
||||||
description: >
|
|
||||||
YAML map of units to device names, e.g:
|
|
||||||
"{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
|
|
||||||
Service units will raise a configure-error if volume-ephemeral
|
|
||||||
is 'true' and no volume-map value is set. Use 'juju set' to set a
|
|
||||||
value and 'juju resolved' to complete configuration.
|
|
||||||
|
|
||||||
Usage::
|
|
||||||
|
|
||||||
from charmsupport.volumes import configure_volume, VolumeConfigurationError
|
|
||||||
from charmsupport.hookenv import log, ERROR
|
|
||||||
def post_mount_hook():
|
|
||||||
stop_service('myservice')
|
|
||||||
def post_mount_hook():
|
|
||||||
start_service('myservice')
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
try:
|
|
||||||
configure_volume(before_change=pre_mount_hook,
|
|
||||||
after_change=post_mount_hook)
|
|
||||||
except VolumeConfigurationError:
|
|
||||||
log('Storage could not be configured', ERROR)
|
|
||||||
|
|
||||||
'''
|
|
||||||
|
|
||||||
# XXX: Known limitations
|
|
||||||
# - fstab is neither consulted nor updated
|
|
||||||
|
|
||||||
import os
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
from charmhelpers.core import host
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
|
|
||||||
MOUNT_BASE = '/srv/juju/volumes'
|
|
||||||
|
|
||||||
|
|
||||||
class VolumeConfigurationError(Exception):
|
|
||||||
'''Volume configuration data is missing or invalid'''
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def get_config():
|
|
||||||
'''Gather and sanity-check volume configuration data'''
|
|
||||||
volume_config = {}
|
|
||||||
config = hookenv.config()
|
|
||||||
|
|
||||||
errors = False
|
|
||||||
|
|
||||||
if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
|
|
||||||
volume_config['ephemeral'] = True
|
|
||||||
else:
|
|
||||||
volume_config['ephemeral'] = False
|
|
||||||
|
|
||||||
try:
|
|
||||||
volume_map = yaml.safe_load(config.get('volume-map', '{}'))
|
|
||||||
except yaml.YAMLError as e:
|
|
||||||
hookenv.log("Error parsing YAML volume-map: {}".format(e),
|
|
||||||
hookenv.ERROR)
|
|
||||||
errors = True
|
|
||||||
if volume_map is None:
|
|
||||||
# probably an empty string
|
|
||||||
volume_map = {}
|
|
||||||
elif not isinstance(volume_map, dict):
|
|
||||||
hookenv.log("Volume-map should be a dictionary, not {}".format(
|
|
||||||
type(volume_map)))
|
|
||||||
errors = True
|
|
||||||
|
|
||||||
volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
|
|
||||||
if volume_config['device'] and volume_config['ephemeral']:
|
|
||||||
# asked for ephemeral storage but also defined a volume ID
|
|
||||||
hookenv.log('A volume is defined for this unit, but ephemeral '
|
|
||||||
'storage was requested', hookenv.ERROR)
|
|
||||||
errors = True
|
|
||||||
elif not volume_config['device'] and not volume_config['ephemeral']:
|
|
||||||
# asked for permanent storage but did not define volume ID
|
|
||||||
hookenv.log('Ephemeral storage was requested, but there is no volume '
|
|
||||||
'defined for this unit.', hookenv.ERROR)
|
|
||||||
errors = True
|
|
||||||
|
|
||||||
unit_mount_name = hookenv.local_unit().replace('/', '-')
|
|
||||||
volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
|
|
||||||
|
|
||||||
if errors:
|
|
||||||
return None
|
|
||||||
return volume_config
|
|
||||||
|
|
||||||
|
|
||||||
def mount_volume(config):
|
|
||||||
if os.path.exists(config['mountpoint']):
|
|
||||||
if not os.path.isdir(config['mountpoint']):
|
|
||||||
hookenv.log('Not a directory: {}'.format(config['mountpoint']))
|
|
||||||
raise VolumeConfigurationError()
|
|
||||||
else:
|
|
||||||
host.mkdir(config['mountpoint'])
|
|
||||||
if os.path.ismount(config['mountpoint']):
|
|
||||||
unmount_volume(config)
|
|
||||||
if not host.mount(config['device'], config['mountpoint'], persist=True):
|
|
||||||
raise VolumeConfigurationError()
|
|
||||||
|
|
||||||
|
|
||||||
def unmount_volume(config):
|
|
||||||
if os.path.ismount(config['mountpoint']):
|
|
||||||
if not host.umount(config['mountpoint'], persist=True):
|
|
||||||
raise VolumeConfigurationError()
|
|
||||||
|
|
||||||
|
|
||||||
def managed_mounts():
|
|
||||||
'''List of all mounted managed volumes'''
|
|
||||||
return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
|
|
||||||
|
|
||||||
|
|
||||||
def configure_volume(before_change=lambda: None, after_change=lambda: None):
|
|
||||||
'''Set up storage (or don't) according to the charm's volume configuration.
|
|
||||||
Returns the mount point or "ephemeral". before_change and after_change
|
|
||||||
are optional functions to be called if the volume configuration changes.
|
|
||||||
'''
|
|
||||||
|
|
||||||
config = get_config()
|
|
||||||
if not config:
|
|
||||||
hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
|
|
||||||
raise VolumeConfigurationError()
|
|
||||||
|
|
||||||
if config['ephemeral']:
|
|
||||||
if os.path.ismount(config['mountpoint']):
|
|
||||||
before_change()
|
|
||||||
unmount_volume(config)
|
|
||||||
after_change()
|
|
||||||
return 'ephemeral'
|
|
||||||
else:
|
|
||||||
# persistent storage
|
|
||||||
if os.path.ismount(config['mountpoint']):
|
|
||||||
mounts = dict(managed_mounts())
|
|
||||||
if mounts.get(config['mountpoint']) != config['device']:
|
|
||||||
before_change()
|
|
||||||
unmount_volume(config)
|
|
||||||
mount_volume(config)
|
|
||||||
after_change()
|
|
||||||
else:
|
|
||||||
before_change()
|
|
||||||
mount_volume(config)
|
|
||||||
after_change()
|
|
||||||
return config['mountpoint']
|
|
|
@ -1,38 +0,0 @@
|
||||||
# Juju charm-helpers hardening library
|
|
||||||
|
|
||||||
## Description
|
|
||||||
|
|
||||||
This library provides multiple implementations of system and application
|
|
||||||
hardening that conform to the standards of http://hardening.io/.
|
|
||||||
|
|
||||||
Current implementations include:
|
|
||||||
|
|
||||||
* OS
|
|
||||||
* SSH
|
|
||||||
* MySQL
|
|
||||||
* Apache
|
|
||||||
|
|
||||||
## Requirements
|
|
||||||
|
|
||||||
* Juju Charms
|
|
||||||
|
|
||||||
## Usage
|
|
||||||
|
|
||||||
1. Synchronise this library into your charm and add the harden() decorator
|
|
||||||
(from contrib.hardening.harden) to any functions or methods you want to use
|
|
||||||
to trigger hardening of your application/system.
|
|
||||||
|
|
||||||
2. Add a config option called 'harden' to your charm config.yaml and set it to
|
|
||||||
a space-delimited list of hardening modules you want to run e.g. "os ssh"
|
|
||||||
|
|
||||||
3. Override any config defaults (contrib.hardening.defaults) by adding a file
|
|
||||||
called hardening.yaml to your charm root containing the name(s) of the
|
|
||||||
modules whose settings you want override at root level and then any settings
|
|
||||||
with overrides e.g.
|
|
||||||
|
|
||||||
os:
|
|
||||||
general:
|
|
||||||
desktop_enable: True
|
|
||||||
|
|
||||||
4. Now just run your charm as usual and hardening will be applied each time the
|
|
||||||
hook runs.
|
|
|
@ -1,13 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
|
@ -1,17 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from os import path
|
|
||||||
|
|
||||||
TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
|
|
|
@ -1,29 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.apache.checks import config
|
|
||||||
|
|
||||||
|
|
||||||
def run_apache_checks():
|
|
||||||
log("Starting Apache hardening checks.", level=DEBUG)
|
|
||||||
checks = config.get_audits()
|
|
||||||
for check in checks:
|
|
||||||
log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
|
|
||||||
check.ensure_compliance()
|
|
||||||
|
|
||||||
log("Apache hardening checks complete.", level=DEBUG)
|
|
|
@ -1,101 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
INFO,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import (
|
|
||||||
FilePermissionAudit,
|
|
||||||
DirectoryPermissionAudit,
|
|
||||||
NoReadWriteForOther,
|
|
||||||
TemplatedFile,
|
|
||||||
DeletedFile
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.audits.apache import DisabledModuleAudit
|
|
||||||
from charmhelpers.contrib.hardening.apache import TEMPLATES_DIR
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get Apache hardening config audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
if subprocess.call(['which', 'apache2'], stdout=subprocess.PIPE) != 0:
|
|
||||||
log("Apache server does not appear to be installed on this node - "
|
|
||||||
"skipping apache hardening", level=INFO)
|
|
||||||
return []
|
|
||||||
|
|
||||||
context = ApacheConfContext()
|
|
||||||
settings = utils.get_settings('apache')
|
|
||||||
audits = [
|
|
||||||
FilePermissionAudit(paths=os.path.join(
|
|
||||||
settings['common']['apache_dir'], 'apache2.conf'),
|
|
||||||
user='root', group='root', mode=0o0640),
|
|
||||||
|
|
||||||
TemplatedFile(os.path.join(settings['common']['apache_dir'],
|
|
||||||
'mods-available/alias.conf'),
|
|
||||||
context,
|
|
||||||
TEMPLATES_DIR,
|
|
||||||
mode=0o0640,
|
|
||||||
user='root',
|
|
||||||
service_actions=[{'service': 'apache2',
|
|
||||||
'actions': ['restart']}]),
|
|
||||||
|
|
||||||
TemplatedFile(os.path.join(settings['common']['apache_dir'],
|
|
||||||
'conf-enabled/99-hardening.conf'),
|
|
||||||
context,
|
|
||||||
TEMPLATES_DIR,
|
|
||||||
mode=0o0640,
|
|
||||||
user='root',
|
|
||||||
service_actions=[{'service': 'apache2',
|
|
||||||
'actions': ['restart']}]),
|
|
||||||
|
|
||||||
DirectoryPermissionAudit(settings['common']['apache_dir'],
|
|
||||||
user='root',
|
|
||||||
group='root',
|
|
||||||
mode=0o0750),
|
|
||||||
|
|
||||||
DisabledModuleAudit(settings['hardening']['modules_to_disable']),
|
|
||||||
|
|
||||||
NoReadWriteForOther(settings['common']['apache_dir']),
|
|
||||||
|
|
||||||
DeletedFile(['/var/www/html/index.html'])
|
|
||||||
]
|
|
||||||
|
|
||||||
return audits
|
|
||||||
|
|
||||||
|
|
||||||
class ApacheConfContext(object):
|
|
||||||
"""Defines the set of key/value pairs to set in a apache config file.
|
|
||||||
|
|
||||||
This context, when called, will return a dictionary containing the
|
|
||||||
key/value pairs of setting to specify in the
|
|
||||||
/etc/apache/conf-enabled/hardening.conf file.
|
|
||||||
"""
|
|
||||||
def __call__(self):
|
|
||||||
settings = utils.get_settings('apache')
|
|
||||||
ctxt = settings['hardening']
|
|
||||||
|
|
||||||
out = subprocess.check_output(['apache2', '-v'])
|
|
||||||
ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+',
|
|
||||||
out).group(1)
|
|
||||||
ctxt['apache_icondir'] = '/usr/share/apache2/icons/'
|
|
||||||
return ctxt
|
|
|
@ -1,32 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
|
|
||||||
<Location / >
|
|
||||||
<LimitExcept {{ allowed_http_methods }} >
|
|
||||||
# http://httpd.apache.org/docs/2.4/upgrading.html
|
|
||||||
{% if apache_version > '2.2' -%}
|
|
||||||
Require all granted
|
|
||||||
{% else -%}
|
|
||||||
Order Allow,Deny
|
|
||||||
Deny from all
|
|
||||||
{% endif %}
|
|
||||||
</LimitExcept>
|
|
||||||
</Location>
|
|
||||||
|
|
||||||
<Directory />
|
|
||||||
Options -Indexes -FollowSymLinks
|
|
||||||
AllowOverride None
|
|
||||||
</Directory>
|
|
||||||
|
|
||||||
<Directory /var/www/>
|
|
||||||
Options -Indexes -FollowSymLinks
|
|
||||||
AllowOverride None
|
|
||||||
</Directory>
|
|
||||||
|
|
||||||
TraceEnable {{ traceenable }}
|
|
||||||
ServerTokens {{ servertokens }}
|
|
||||||
|
|
||||||
SSLHonorCipherOrder {{ honor_cipher_order }}
|
|
||||||
SSLCipherSuite {{ cipher_suite }}
|
|
|
@ -1,31 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
<IfModule alias_module>
|
|
||||||
#
|
|
||||||
# Aliases: Add here as many aliases as you need (with no limit). The format is
|
|
||||||
# Alias fakename realname
|
|
||||||
#
|
|
||||||
# Note that if you include a trailing / on fakename then the server will
|
|
||||||
# require it to be present in the URL. So "/icons" isn't aliased in this
|
|
||||||
# example, only "/icons/". If the fakename is slash-terminated, then the
|
|
||||||
# realname must also be slash terminated, and if the fakename omits the
|
|
||||||
# trailing slash, the realname must also omit it.
|
|
||||||
#
|
|
||||||
# We include the /icons/ alias for FancyIndexed directory listings. If
|
|
||||||
# you do not use FancyIndexing, you may comment this out.
|
|
||||||
#
|
|
||||||
Alias /icons/ "{{ apache_icondir }}/"
|
|
||||||
|
|
||||||
<Directory "{{ apache_icondir }}">
|
|
||||||
Options -Indexes -MultiViews -FollowSymLinks
|
|
||||||
AllowOverride None
|
|
||||||
{% if apache_version == '2.4' -%}
|
|
||||||
Require all granted
|
|
||||||
{% else -%}
|
|
||||||
Order allow,deny
|
|
||||||
Allow from all
|
|
||||||
{% endif %}
|
|
||||||
</Directory>
|
|
||||||
</IfModule>
|
|
|
@ -1,54 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
|
|
||||||
class BaseAudit(object): # NO-QA
|
|
||||||
"""Base class for hardening checks.
|
|
||||||
|
|
||||||
The lifecycle of a hardening check is to first check to see if the system
|
|
||||||
is in compliance for the specified check. If it is not in compliance, the
|
|
||||||
check method will return a value which will be supplied to the.
|
|
||||||
"""
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
self.unless = kwargs.get('unless', None)
|
|
||||||
super(BaseAudit, self).__init__()
|
|
||||||
|
|
||||||
def ensure_compliance(self):
|
|
||||||
"""Checks to see if the current hardening check is in compliance or
|
|
||||||
not.
|
|
||||||
|
|
||||||
If the check that is performed is not in compliance, then an exception
|
|
||||||
should be raised.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def _take_action(self):
|
|
||||||
"""Determines whether to perform the action or not.
|
|
||||||
|
|
||||||
Checks whether or not an action should be taken. This is determined by
|
|
||||||
the truthy value for the unless parameter. If unless is a callback
|
|
||||||
method, it will be invoked with no parameters in order to determine
|
|
||||||
whether or not the action should be taken. Otherwise, the truthy value
|
|
||||||
of the unless attribute will determine if the action should be
|
|
||||||
performed.
|
|
||||||
"""
|
|
||||||
# Do the action if there isn't an unless override.
|
|
||||||
if self.unless is None:
|
|
||||||
return True
|
|
||||||
|
|
||||||
# Invoke the callback if there is one.
|
|
||||||
if hasattr(self.unless, '__call__'):
|
|
||||||
return not self.unless()
|
|
||||||
|
|
||||||
return not self.unless
|
|
|
@ -1,98 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from six import string_types
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
INFO,
|
|
||||||
ERROR,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.contrib.hardening.audits import BaseAudit
|
|
||||||
|
|
||||||
|
|
||||||
class DisabledModuleAudit(BaseAudit):
|
|
||||||
"""Audits Apache2 modules.
|
|
||||||
|
|
||||||
Determines if the apache2 modules are enabled. If the modules are enabled
|
|
||||||
then they are removed in the ensure_compliance.
|
|
||||||
"""
|
|
||||||
def __init__(self, modules):
|
|
||||||
if modules is None:
|
|
||||||
self.modules = []
|
|
||||||
elif isinstance(modules, string_types):
|
|
||||||
self.modules = [modules]
|
|
||||||
else:
|
|
||||||
self.modules = modules
|
|
||||||
|
|
||||||
def ensure_compliance(self):
|
|
||||||
"""Ensures that the modules are not loaded."""
|
|
||||||
if not self.modules:
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
loaded_modules = self._get_loaded_modules()
|
|
||||||
non_compliant_modules = []
|
|
||||||
for module in self.modules:
|
|
||||||
if module in loaded_modules:
|
|
||||||
log("Module '%s' is enabled but should not be." %
|
|
||||||
(module), level=INFO)
|
|
||||||
non_compliant_modules.append(module)
|
|
||||||
|
|
||||||
if len(non_compliant_modules) == 0:
|
|
||||||
return
|
|
||||||
|
|
||||||
for module in non_compliant_modules:
|
|
||||||
self._disable_module(module)
|
|
||||||
self._restart_apache()
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
log('Error occurred auditing apache module compliance. '
|
|
||||||
'This may have been already reported. '
|
|
||||||
'Output is: %s' % e.output, level=ERROR)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _get_loaded_modules():
|
|
||||||
"""Returns the modules which are enabled in Apache."""
|
|
||||||
output = subprocess.check_output(['apache2ctl', '-M'])
|
|
||||||
modules = []
|
|
||||||
for line in output.splitlines():
|
|
||||||
# Each line of the enabled module output looks like:
|
|
||||||
# module_name (static|shared)
|
|
||||||
# Plus a header line at the top of the output which is stripped
|
|
||||||
# out by the regex.
|
|
||||||
matcher = re.search(r'^ (\S*)_module (\S*)', line)
|
|
||||||
if matcher:
|
|
||||||
modules.append(matcher.group(1))
|
|
||||||
return modules
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _disable_module(module):
|
|
||||||
"""Disables the specified module in Apache."""
|
|
||||||
try:
|
|
||||||
subprocess.check_call(['a2dismod', module])
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
# Note: catch error here to allow the attempt of disabling
|
|
||||||
# multiple modules in one go rather than failing after the
|
|
||||||
# first module fails.
|
|
||||||
log('Error occurred disabling module %s. '
|
|
||||||
'Output is: %s' % (module, e.output), level=ERROR)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _restart_apache():
|
|
||||||
"""Restarts the apache process"""
|
|
||||||
subprocess.check_output(['service', 'apache2', 'restart'])
|
|
|
@ -1,103 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from __future__ import absolute_import # required for external apt import
|
|
||||||
from apt import apt_pkg
|
|
||||||
from six import string_types
|
|
||||||
|
|
||||||
from charmhelpers.fetch import (
|
|
||||||
apt_cache,
|
|
||||||
apt_purge
|
|
||||||
)
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
WARNING,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.audits import BaseAudit
|
|
||||||
|
|
||||||
|
|
||||||
class AptConfig(BaseAudit):
|
|
||||||
|
|
||||||
def __init__(self, config, **kwargs):
|
|
||||||
self.config = config
|
|
||||||
|
|
||||||
def verify_config(self):
|
|
||||||
apt_pkg.init()
|
|
||||||
for cfg in self.config:
|
|
||||||
value = apt_pkg.config.get(cfg['key'], cfg.get('default', ''))
|
|
||||||
if value and value != cfg['expected']:
|
|
||||||
log("APT config '%s' has unexpected value '%s' "
|
|
||||||
"(expected='%s')" %
|
|
||||||
(cfg['key'], value, cfg['expected']), level=WARNING)
|
|
||||||
|
|
||||||
def ensure_compliance(self):
|
|
||||||
self.verify_config()
|
|
||||||
|
|
||||||
|
|
||||||
class RestrictedPackages(BaseAudit):
|
|
||||||
"""Class used to audit restricted packages on the system."""
|
|
||||||
|
|
||||||
def __init__(self, pkgs, **kwargs):
|
|
||||||
super(RestrictedPackages, self).__init__(**kwargs)
|
|
||||||
if isinstance(pkgs, string_types) or not hasattr(pkgs, '__iter__'):
|
|
||||||
self.pkgs = [pkgs]
|
|
||||||
else:
|
|
||||||
self.pkgs = pkgs
|
|
||||||
|
|
||||||
def ensure_compliance(self):
|
|
||||||
cache = apt_cache()
|
|
||||||
|
|
||||||
for p in self.pkgs:
|
|
||||||
if p not in cache:
|
|
||||||
continue
|
|
||||||
|
|
||||||
pkg = cache[p]
|
|
||||||
if not self.is_virtual_package(pkg):
|
|
||||||
if not pkg.current_ver:
|
|
||||||
log("Package '%s' is not installed." % pkg.name,
|
|
||||||
level=DEBUG)
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
log("Restricted package '%s' is installed" % pkg.name,
|
|
||||||
level=WARNING)
|
|
||||||
self.delete_package(cache, pkg)
|
|
||||||
else:
|
|
||||||
log("Checking restricted virtual package '%s' provides" %
|
|
||||||
pkg.name, level=DEBUG)
|
|
||||||
self.delete_package(cache, pkg)
|
|
||||||
|
|
||||||
def delete_package(self, cache, pkg):
|
|
||||||
"""Deletes the package from the system.
|
|
||||||
|
|
||||||
Deletes the package form the system, properly handling virtual
|
|
||||||
packages.
|
|
||||||
|
|
||||||
:param cache: the apt cache
|
|
||||||
:param pkg: the package to remove
|
|
||||||
"""
|
|
||||||
if self.is_virtual_package(pkg):
|
|
||||||
log("Package '%s' appears to be virtual - purging provides" %
|
|
||||||
pkg.name, level=DEBUG)
|
|
||||||
for _p in pkg.provides_list:
|
|
||||||
self.delete_package(cache, _p[2].parent_pkg)
|
|
||||||
elif not pkg.current_ver:
|
|
||||||
log("Package '%s' not installed" % pkg.name, level=DEBUG)
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
log("Purging package '%s'" % pkg.name, level=DEBUG)
|
|
||||||
apt_purge(pkg.name)
|
|
||||||
|
|
||||||
def is_virtual_package(self, pkg):
|
|
||||||
return pkg.has_provides and not pkg.has_versions
|
|
|
@ -1,550 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import grp
|
|
||||||
import os
|
|
||||||
import pwd
|
|
||||||
import re
|
|
||||||
|
|
||||||
from subprocess import (
|
|
||||||
CalledProcessError,
|
|
||||||
check_output,
|
|
||||||
check_call,
|
|
||||||
)
|
|
||||||
from traceback import format_exc
|
|
||||||
from six import string_types
|
|
||||||
from stat import (
|
|
||||||
S_ISGID,
|
|
||||||
S_ISUID
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
INFO,
|
|
||||||
WARNING,
|
|
||||||
ERROR,
|
|
||||||
)
|
|
||||||
from charmhelpers.core import unitdata
|
|
||||||
from charmhelpers.core.host import file_hash
|
|
||||||
from charmhelpers.contrib.hardening.audits import BaseAudit
|
|
||||||
from charmhelpers.contrib.hardening.templating import (
|
|
||||||
get_template_path,
|
|
||||||
render_and_write,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
|
|
||||||
|
|
||||||
class BaseFileAudit(BaseAudit):
|
|
||||||
"""Base class for file audits.
|
|
||||||
|
|
||||||
Provides api stubs for compliance check flow that must be used by any class
|
|
||||||
that implemented this one.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, paths, always_comply=False, *args, **kwargs):
|
|
||||||
"""
|
|
||||||
:param paths: string path of list of paths of files we want to apply
|
|
||||||
compliance checks are criteria to.
|
|
||||||
:param always_comply: if true compliance criteria is always applied
|
|
||||||
else compliance is skipped for non-existent
|
|
||||||
paths.
|
|
||||||
"""
|
|
||||||
super(BaseFileAudit, self).__init__(*args, **kwargs)
|
|
||||||
self.always_comply = always_comply
|
|
||||||
if isinstance(paths, string_types) or not hasattr(paths, '__iter__'):
|
|
||||||
self.paths = [paths]
|
|
||||||
else:
|
|
||||||
self.paths = paths
|
|
||||||
|
|
||||||
def ensure_compliance(self):
|
|
||||||
"""Ensure that the all registered files comply to registered criteria.
|
|
||||||
"""
|
|
||||||
for p in self.paths:
|
|
||||||
if os.path.exists(p):
|
|
||||||
if self.is_compliant(p):
|
|
||||||
continue
|
|
||||||
|
|
||||||
log('File %s is not in compliance.' % p, level=INFO)
|
|
||||||
else:
|
|
||||||
if not self.always_comply:
|
|
||||||
log("Non-existent path '%s' - skipping compliance check"
|
|
||||||
% (p), level=INFO)
|
|
||||||
continue
|
|
||||||
|
|
||||||
if self._take_action():
|
|
||||||
log("Applying compliance criteria to '%s'" % (p), level=INFO)
|
|
||||||
self.comply(p)
|
|
||||||
|
|
||||||
def is_compliant(self, path):
|
|
||||||
"""Audits the path to see if it is compliance.
|
|
||||||
|
|
||||||
:param path: the path to the file that should be checked.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
def comply(self, path):
|
|
||||||
"""Enforces the compliance of a path.
|
|
||||||
|
|
||||||
:param path: the path to the file that should be enforced.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _get_stat(cls, path):
|
|
||||||
"""Returns the Posix st_stat information for the specified file path.
|
|
||||||
|
|
||||||
:param path: the path to get the st_stat information for.
|
|
||||||
:returns: an st_stat object for the path or None if the path doesn't
|
|
||||||
exist.
|
|
||||||
"""
|
|
||||||
return os.stat(path)
|
|
||||||
|
|
||||||
|
|
||||||
class FilePermissionAudit(BaseFileAudit):
|
|
||||||
"""Implements an audit for file permissions and ownership for a user.
|
|
||||||
|
|
||||||
This class implements functionality that ensures that a specific user/group
|
|
||||||
will own the file(s) specified and that the permissions specified are
|
|
||||||
applied properly to the file.
|
|
||||||
"""
|
|
||||||
def __init__(self, paths, user, group=None, mode=0o600, **kwargs):
|
|
||||||
self.user = user
|
|
||||||
self.group = group
|
|
||||||
self.mode = mode
|
|
||||||
super(FilePermissionAudit, self).__init__(paths, user, group, mode,
|
|
||||||
**kwargs)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def user(self):
|
|
||||||
return self._user
|
|
||||||
|
|
||||||
@user.setter
|
|
||||||
def user(self, name):
|
|
||||||
try:
|
|
||||||
user = pwd.getpwnam(name)
|
|
||||||
except KeyError:
|
|
||||||
log('Unknown user %s' % name, level=ERROR)
|
|
||||||
user = None
|
|
||||||
self._user = user
|
|
||||||
|
|
||||||
@property
|
|
||||||
def group(self):
|
|
||||||
return self._group
|
|
||||||
|
|
||||||
@group.setter
|
|
||||||
def group(self, name):
|
|
||||||
try:
|
|
||||||
group = None
|
|
||||||
if name:
|
|
||||||
group = grp.getgrnam(name)
|
|
||||||
else:
|
|
||||||
group = grp.getgrgid(self.user.pw_gid)
|
|
||||||
except KeyError:
|
|
||||||
log('Unknown group %s' % name, level=ERROR)
|
|
||||||
self._group = group
|
|
||||||
|
|
||||||
def is_compliant(self, path):
|
|
||||||
"""Checks if the path is in compliance.
|
|
||||||
|
|
||||||
Used to determine if the path specified meets the necessary
|
|
||||||
requirements to be in compliance with the check itself.
|
|
||||||
|
|
||||||
:param path: the file path to check
|
|
||||||
:returns: True if the path is compliant, False otherwise.
|
|
||||||
"""
|
|
||||||
stat = self._get_stat(path)
|
|
||||||
user = self.user
|
|
||||||
group = self.group
|
|
||||||
|
|
||||||
compliant = True
|
|
||||||
if stat.st_uid != user.pw_uid or stat.st_gid != group.gr_gid:
|
|
||||||
log('File %s is not owned by %s:%s.' % (path, user.pw_name,
|
|
||||||
group.gr_name),
|
|
||||||
level=INFO)
|
|
||||||
compliant = False
|
|
||||||
|
|
||||||
# POSIX refers to the st_mode bits as corresponding to both the
|
|
||||||
# file type and file permission bits, where the least significant 12
|
|
||||||
# bits (o7777) are the suid (11), sgid (10), sticky bits (9), and the
|
|
||||||
# file permission bits (8-0)
|
|
||||||
perms = stat.st_mode & 0o7777
|
|
||||||
if perms != self.mode:
|
|
||||||
log('File %s has incorrect permissions, currently set to %s' %
|
|
||||||
(path, oct(stat.st_mode & 0o7777)), level=INFO)
|
|
||||||
compliant = False
|
|
||||||
|
|
||||||
return compliant
|
|
||||||
|
|
||||||
def comply(self, path):
|
|
||||||
"""Issues a chown and chmod to the file paths specified."""
|
|
||||||
utils.ensure_permissions(path, self.user.pw_name, self.group.gr_name,
|
|
||||||
self.mode)
|
|
||||||
|
|
||||||
|
|
||||||
class DirectoryPermissionAudit(FilePermissionAudit):
|
|
||||||
"""Performs a permission check for the specified directory path."""
|
|
||||||
|
|
||||||
def __init__(self, paths, user, group=None, mode=0o600,
|
|
||||||
recursive=True, **kwargs):
|
|
||||||
super(DirectoryPermissionAudit, self).__init__(paths, user, group,
|
|
||||||
mode, **kwargs)
|
|
||||||
self.recursive = recursive
|
|
||||||
|
|
||||||
def is_compliant(self, path):
|
|
||||||
"""Checks if the directory is compliant.
|
|
||||||
|
|
||||||
Used to determine if the path specified and all of its children
|
|
||||||
directories are in compliance with the check itself.
|
|
||||||
|
|
||||||
:param path: the directory path to check
|
|
||||||
:returns: True if the directory tree is compliant, otherwise False.
|
|
||||||
"""
|
|
||||||
if not os.path.isdir(path):
|
|
||||||
log('Path specified %s is not a directory.' % path, level=ERROR)
|
|
||||||
raise ValueError("%s is not a directory." % path)
|
|
||||||
|
|
||||||
if not self.recursive:
|
|
||||||
return super(DirectoryPermissionAudit, self).is_compliant(path)
|
|
||||||
|
|
||||||
compliant = True
|
|
||||||
for root, dirs, _ in os.walk(path):
|
|
||||||
if len(dirs) > 0:
|
|
||||||
continue
|
|
||||||
|
|
||||||
if not super(DirectoryPermissionAudit, self).is_compliant(root):
|
|
||||||
compliant = False
|
|
||||||
continue
|
|
||||||
|
|
||||||
return compliant
|
|
||||||
|
|
||||||
def comply(self, path):
|
|
||||||
for root, dirs, _ in os.walk(path):
|
|
||||||
if len(dirs) > 0:
|
|
||||||
super(DirectoryPermissionAudit, self).comply(root)
|
|
||||||
|
|
||||||
|
|
||||||
class ReadOnly(BaseFileAudit):
|
|
||||||
"""Audits that files and folders are read only."""
|
|
||||||
def __init__(self, paths, *args, **kwargs):
|
|
||||||
super(ReadOnly, self).__init__(paths=paths, *args, **kwargs)
|
|
||||||
|
|
||||||
def is_compliant(self, path):
|
|
||||||
try:
|
|
||||||
output = check_output(['find', path, '-perm', '-go+w',
|
|
||||||
'-type', 'f']).strip()
|
|
||||||
|
|
||||||
# The find above will find any files which have permission sets
|
|
||||||
# which allow too broad of write access. As such, the path is
|
|
||||||
# compliant if there is no output.
|
|
||||||
if output:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log('Error occurred checking finding writable files for %s. '
|
|
||||||
'Error information is: command %s failed with returncode '
|
|
||||||
'%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
|
|
||||||
format_exc(e)), level=ERROR)
|
|
||||||
return False
|
|
||||||
|
|
||||||
def comply(self, path):
|
|
||||||
try:
|
|
||||||
check_output(['chmod', 'go-w', '-R', path])
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log('Error occurred removing writeable permissions for %s. '
|
|
||||||
'Error information is: command %s failed with returncode '
|
|
||||||
'%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
|
|
||||||
format_exc(e)), level=ERROR)
|
|
||||||
|
|
||||||
|
|
||||||
class NoReadWriteForOther(BaseFileAudit):
|
|
||||||
"""Ensures that the files found under the base path are readable or
|
|
||||||
writable by anyone other than the owner or the group.
|
|
||||||
"""
|
|
||||||
def __init__(self, paths):
|
|
||||||
super(NoReadWriteForOther, self).__init__(paths)
|
|
||||||
|
|
||||||
def is_compliant(self, path):
|
|
||||||
try:
|
|
||||||
cmd = ['find', path, '-perm', '-o+r', '-type', 'f', '-o',
|
|
||||||
'-perm', '-o+w', '-type', 'f']
|
|
||||||
output = check_output(cmd).strip()
|
|
||||||
|
|
||||||
# The find above here will find any files which have read or
|
|
||||||
# write permissions for other, meaning there is too broad of access
|
|
||||||
# to read/write the file. As such, the path is compliant if there's
|
|
||||||
# no output.
|
|
||||||
if output:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log('Error occurred while finding files which are readable or '
|
|
||||||
'writable to the world in %s. '
|
|
||||||
'Command output is: %s.' % (path, e.output), level=ERROR)
|
|
||||||
|
|
||||||
def comply(self, path):
|
|
||||||
try:
|
|
||||||
check_output(['chmod', '-R', 'o-rw', path])
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log('Error occurred attempting to change modes of files under '
|
|
||||||
'path %s. Output of command is: %s' % (path, e.output))
|
|
||||||
|
|
||||||
|
|
||||||
class NoSUIDSGIDAudit(BaseFileAudit):
|
|
||||||
"""Audits that specified files do not have SUID/SGID bits set."""
|
|
||||||
def __init__(self, paths, *args, **kwargs):
|
|
||||||
super(NoSUIDSGIDAudit, self).__init__(paths=paths, *args, **kwargs)
|
|
||||||
|
|
||||||
def is_compliant(self, path):
|
|
||||||
stat = self._get_stat(path)
|
|
||||||
if (stat.st_mode & (S_ISGID | S_ISUID)) != 0:
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def comply(self, path):
|
|
||||||
try:
|
|
||||||
log('Removing suid/sgid from %s.' % path, level=DEBUG)
|
|
||||||
check_output(['chmod', '-s', path])
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log('Error occurred removing suid/sgid from %s.'
|
|
||||||
'Error information is: command %s failed with returncode '
|
|
||||||
'%d and output %s.\n%s' % (path, e.cmd, e.returncode, e.output,
|
|
||||||
format_exc(e)), level=ERROR)
|
|
||||||
|
|
||||||
|
|
||||||
class TemplatedFile(BaseFileAudit):
|
|
||||||
"""The TemplatedFileAudit audits the contents of a templated file.
|
|
||||||
|
|
||||||
This audit renders a file from a template, sets the appropriate file
|
|
||||||
permissions, then generates a hashsum with which to check the content
|
|
||||||
changed.
|
|
||||||
"""
|
|
||||||
def __init__(self, path, context, template_dir, mode, user='root',
|
|
||||||
group='root', service_actions=None, **kwargs):
|
|
||||||
self.context = context
|
|
||||||
self.user = user
|
|
||||||
self.group = group
|
|
||||||
self.mode = mode
|
|
||||||
self.template_dir = template_dir
|
|
||||||
self.service_actions = service_actions
|
|
||||||
super(TemplatedFile, self).__init__(paths=path, always_comply=True,
|
|
||||||
**kwargs)
|
|
||||||
|
|
||||||
def is_compliant(self, path):
|
|
||||||
"""Determines if the templated file is compliant.
|
|
||||||
|
|
||||||
A templated file is only compliant if it has not changed (as
|
|
||||||
determined by its sha256 hashsum) AND its file permissions are set
|
|
||||||
appropriately.
|
|
||||||
|
|
||||||
:param path: the path to check compliance.
|
|
||||||
"""
|
|
||||||
same_templates = self.templates_match(path)
|
|
||||||
same_content = self.contents_match(path)
|
|
||||||
same_permissions = self.permissions_match(path)
|
|
||||||
|
|
||||||
if same_content and same_permissions and same_templates:
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
def run_service_actions(self):
|
|
||||||
"""Run any actions on services requested."""
|
|
||||||
if not self.service_actions:
|
|
||||||
return
|
|
||||||
|
|
||||||
for svc_action in self.service_actions:
|
|
||||||
name = svc_action['service']
|
|
||||||
actions = svc_action['actions']
|
|
||||||
log("Running service '%s' actions '%s'" % (name, actions),
|
|
||||||
level=DEBUG)
|
|
||||||
for action in actions:
|
|
||||||
cmd = ['service', name, action]
|
|
||||||
try:
|
|
||||||
check_call(cmd)
|
|
||||||
except CalledProcessError as exc:
|
|
||||||
log("Service name='%s' action='%s' failed - %s" %
|
|
||||||
(name, action, exc), level=WARNING)
|
|
||||||
|
|
||||||
def comply(self, path):
|
|
||||||
"""Ensures the contents and the permissions of the file.
|
|
||||||
|
|
||||||
:param path: the path to correct
|
|
||||||
"""
|
|
||||||
dirname = os.path.dirname(path)
|
|
||||||
if not os.path.exists(dirname):
|
|
||||||
os.makedirs(dirname)
|
|
||||||
|
|
||||||
self.pre_write()
|
|
||||||
render_and_write(self.template_dir, path, self.context())
|
|
||||||
utils.ensure_permissions(path, self.user, self.group, self.mode)
|
|
||||||
self.run_service_actions()
|
|
||||||
self.save_checksum(path)
|
|
||||||
self.post_write()
|
|
||||||
|
|
||||||
def pre_write(self):
|
|
||||||
"""Invoked prior to writing the template."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def post_write(self):
|
|
||||||
"""Invoked after writing the template."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def templates_match(self, path):
|
|
||||||
"""Determines if the template files are the same.
|
|
||||||
|
|
||||||
The template file equality is determined by the hashsum of the
|
|
||||||
template files themselves. If there is no hashsum, then the content
|
|
||||||
cannot be sure to be the same so treat it as if they changed.
|
|
||||||
Otherwise, return whether or not the hashsums are the same.
|
|
||||||
|
|
||||||
:param path: the path to check
|
|
||||||
:returns: boolean
|
|
||||||
"""
|
|
||||||
template_path = get_template_path(self.template_dir, path)
|
|
||||||
key = 'hardening:template:%s' % template_path
|
|
||||||
template_checksum = file_hash(template_path)
|
|
||||||
kv = unitdata.kv()
|
|
||||||
stored_tmplt_checksum = kv.get(key)
|
|
||||||
if not stored_tmplt_checksum:
|
|
||||||
kv.set(key, template_checksum)
|
|
||||||
kv.flush()
|
|
||||||
log('Saved template checksum for %s.' % template_path,
|
|
||||||
level=DEBUG)
|
|
||||||
# Since we don't have a template checksum, then assume it doesn't
|
|
||||||
# match and return that the template is different.
|
|
||||||
return False
|
|
||||||
elif stored_tmplt_checksum != template_checksum:
|
|
||||||
kv.set(key, template_checksum)
|
|
||||||
kv.flush()
|
|
||||||
log('Updated template checksum for %s.' % template_path,
|
|
||||||
level=DEBUG)
|
|
||||||
return False
|
|
||||||
|
|
||||||
# Here the template hasn't changed based upon the calculated
|
|
||||||
# checksum of the template and what was previously stored.
|
|
||||||
return True
|
|
||||||
|
|
||||||
def contents_match(self, path):
|
|
||||||
"""Determines if the file content is the same.
|
|
||||||
|
|
||||||
This is determined by comparing hashsum of the file contents and
|
|
||||||
the saved hashsum. If there is no hashsum, then the content cannot
|
|
||||||
be sure to be the same so treat them as if they are not the same.
|
|
||||||
Otherwise, return True if the hashsums are the same, False if they
|
|
||||||
are not the same.
|
|
||||||
|
|
||||||
:param path: the file to check.
|
|
||||||
"""
|
|
||||||
checksum = file_hash(path)
|
|
||||||
|
|
||||||
kv = unitdata.kv()
|
|
||||||
stored_checksum = kv.get('hardening:%s' % path)
|
|
||||||
if not stored_checksum:
|
|
||||||
# If the checksum hasn't been generated, return False to ensure
|
|
||||||
# the file is written and the checksum stored.
|
|
||||||
log('Checksum for %s has not been calculated.' % path, level=DEBUG)
|
|
||||||
return False
|
|
||||||
elif stored_checksum != checksum:
|
|
||||||
log('Checksum mismatch for %s.' % path, level=DEBUG)
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def permissions_match(self, path):
|
|
||||||
"""Determines if the file owner and permissions match.
|
|
||||||
|
|
||||||
:param path: the path to check.
|
|
||||||
"""
|
|
||||||
audit = FilePermissionAudit(path, self.user, self.group, self.mode)
|
|
||||||
return audit.is_compliant(path)
|
|
||||||
|
|
||||||
def save_checksum(self, path):
|
|
||||||
"""Calculates and saves the checksum for the path specified.
|
|
||||||
|
|
||||||
:param path: the path of the file to save the checksum.
|
|
||||||
"""
|
|
||||||
checksum = file_hash(path)
|
|
||||||
kv = unitdata.kv()
|
|
||||||
kv.set('hardening:%s' % path, checksum)
|
|
||||||
kv.flush()
|
|
||||||
|
|
||||||
|
|
||||||
class DeletedFile(BaseFileAudit):
|
|
||||||
"""Audit to ensure that a file is deleted."""
|
|
||||||
def __init__(self, paths):
|
|
||||||
super(DeletedFile, self).__init__(paths)
|
|
||||||
|
|
||||||
def is_compliant(self, path):
|
|
||||||
return not os.path.exists(path)
|
|
||||||
|
|
||||||
def comply(self, path):
|
|
||||||
os.remove(path)
|
|
||||||
|
|
||||||
|
|
||||||
class FileContentAudit(BaseFileAudit):
|
|
||||||
"""Audit the contents of a file."""
|
|
||||||
def __init__(self, paths, cases, **kwargs):
|
|
||||||
# Cases we expect to pass
|
|
||||||
self.pass_cases = cases.get('pass', [])
|
|
||||||
# Cases we expect to fail
|
|
||||||
self.fail_cases = cases.get('fail', [])
|
|
||||||
super(FileContentAudit, self).__init__(paths, **kwargs)
|
|
||||||
|
|
||||||
def is_compliant(self, path):
|
|
||||||
"""
|
|
||||||
Given a set of content matching cases i.e. tuple(regex, bool) where
|
|
||||||
bool value denotes whether or not regex is expected to match, check that
|
|
||||||
all cases match as expected with the contents of the file. Cases can be
|
|
||||||
expected to pass of fail.
|
|
||||||
|
|
||||||
:param path: Path of file to check.
|
|
||||||
:returns: Boolean value representing whether or not all cases are
|
|
||||||
found to be compliant.
|
|
||||||
"""
|
|
||||||
log("Auditing contents of file '%s'" % (path), level=DEBUG)
|
|
||||||
with open(path, 'r') as fd:
|
|
||||||
contents = fd.read()
|
|
||||||
|
|
||||||
matches = 0
|
|
||||||
for pattern in self.pass_cases:
|
|
||||||
key = re.compile(pattern, flags=re.MULTILINE)
|
|
||||||
results = re.search(key, contents)
|
|
||||||
if results:
|
|
||||||
matches += 1
|
|
||||||
else:
|
|
||||||
log("Pattern '%s' was expected to pass but instead it failed"
|
|
||||||
% (pattern), level=WARNING)
|
|
||||||
|
|
||||||
for pattern in self.fail_cases:
|
|
||||||
key = re.compile(pattern, flags=re.MULTILINE)
|
|
||||||
results = re.search(key, contents)
|
|
||||||
if not results:
|
|
||||||
matches += 1
|
|
||||||
else:
|
|
||||||
log("Pattern '%s' was expected to fail but instead it passed"
|
|
||||||
% (pattern), level=WARNING)
|
|
||||||
|
|
||||||
total = len(self.pass_cases) + len(self.fail_cases)
|
|
||||||
log("Checked %s cases and %s passed" % (total, matches), level=DEBUG)
|
|
||||||
return matches == total
|
|
||||||
|
|
||||||
def comply(self, *args, **kwargs):
|
|
||||||
"""NOOP since we just issue warnings. This is to avoid the
|
|
||||||
NotImplememtedError.
|
|
||||||
"""
|
|
||||||
log("Not applying any compliance criteria, only checks.", level=INFO)
|
|
|
@ -1,16 +0,0 @@
|
||||||
# NOTE: this file contains the default configuration for the 'apache' hardening
|
|
||||||
# code. If you want to override any settings you must add them to a file
|
|
||||||
# called hardening.yaml in the root directory of your charm using the
|
|
||||||
# name 'apache' as the root key followed by any of the following with new
|
|
||||||
# values.
|
|
||||||
|
|
||||||
common:
|
|
||||||
apache_dir: '/etc/apache2'
|
|
||||||
|
|
||||||
hardening:
|
|
||||||
traceenable: 'off'
|
|
||||||
allowed_http_methods: "GET POST"
|
|
||||||
modules_to_disable: [ cgi, cgid ]
|
|
||||||
servertokens: 'Prod'
|
|
||||||
honor_cipher_order: 'on'
|
|
||||||
cipher_suite: 'ALL:+MEDIUM:+HIGH:!LOW:!MD5:!RC4:!eNULL:!aNULL:!3DES'
|
|
|
@ -1,12 +0,0 @@
|
||||||
# NOTE: this schema must contain all valid keys from it's associated defaults
|
|
||||||
# file. It is used to validate user-provided overrides.
|
|
||||||
common:
|
|
||||||
apache_dir:
|
|
||||||
traceenable:
|
|
||||||
|
|
||||||
hardening:
|
|
||||||
allowed_http_methods:
|
|
||||||
modules_to_disable:
|
|
||||||
servertokens:
|
|
||||||
honor_cipher_order:
|
|
||||||
cipher_suite:
|
|
|
@ -1,38 +0,0 @@
|
||||||
# NOTE: this file contains the default configuration for the 'mysql' hardening
|
|
||||||
# code. If you want to override any settings you must add them to a file
|
|
||||||
# called hardening.yaml in the root directory of your charm using the
|
|
||||||
# name 'mysql' as the root key followed by any of the following with new
|
|
||||||
# values.
|
|
||||||
|
|
||||||
hardening:
|
|
||||||
mysql-conf: /etc/mysql/my.cnf
|
|
||||||
hardening-conf: /etc/mysql/conf.d/hardening.cnf
|
|
||||||
|
|
||||||
security:
|
|
||||||
# @see http://www.symantec.com/connect/articles/securing-mysql-step-step
|
|
||||||
# @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_chroot
|
|
||||||
chroot: None
|
|
||||||
|
|
||||||
# @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_safe-user-create
|
|
||||||
safe-user-create: 1
|
|
||||||
|
|
||||||
# @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-auth
|
|
||||||
secure-auth: 1
|
|
||||||
|
|
||||||
# @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_symbolic-links
|
|
||||||
skip-symbolic-links: 1
|
|
||||||
|
|
||||||
# @see http://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_skip-show-database
|
|
||||||
skip-show-database: True
|
|
||||||
|
|
||||||
# @see http://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_local_infile
|
|
||||||
local-infile: 0
|
|
||||||
|
|
||||||
# @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_allow-suspicious-udfs
|
|
||||||
allow-suspicious-udfs: 0
|
|
||||||
|
|
||||||
# @see https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_automatic_sp_privileges
|
|
||||||
automatic-sp-privileges: 0
|
|
||||||
|
|
||||||
# @see https://dev.mysql.com/doc/refman/5.7/en/server-options.html#option_mysqld_secure-file-priv
|
|
||||||
secure-file-priv: /tmp
|
|
|
@ -1,15 +0,0 @@
|
||||||
# NOTE: this schema must contain all valid keys from it's associated defaults
|
|
||||||
# file. It is used to validate user-provided overrides.
|
|
||||||
hardening:
|
|
||||||
mysql-conf:
|
|
||||||
hardening-conf:
|
|
||||||
security:
|
|
||||||
chroot:
|
|
||||||
safe-user-create:
|
|
||||||
secure-auth:
|
|
||||||
skip-symbolic-links:
|
|
||||||
skip-show-database:
|
|
||||||
local-infile:
|
|
||||||
allow-suspicious-udfs:
|
|
||||||
automatic-sp-privileges:
|
|
||||||
secure-file-priv:
|
|
|
@ -1,68 +0,0 @@
|
||||||
# NOTE: this file contains the default configuration for the 'os' hardening
|
|
||||||
# code. If you want to override any settings you must add them to a file
|
|
||||||
# called hardening.yaml in the root directory of your charm using the
|
|
||||||
# name 'os' as the root key followed by any of the following with new
|
|
||||||
# values.
|
|
||||||
|
|
||||||
general:
|
|
||||||
desktop_enable: False # (type:boolean)
|
|
||||||
|
|
||||||
environment:
|
|
||||||
extra_user_paths: []
|
|
||||||
umask: 027
|
|
||||||
root_path: /
|
|
||||||
|
|
||||||
auth:
|
|
||||||
pw_max_age: 60
|
|
||||||
# discourage password cycling
|
|
||||||
pw_min_age: 7
|
|
||||||
retries: 5
|
|
||||||
lockout_time: 600
|
|
||||||
timeout: 60
|
|
||||||
allow_homeless: False # (type:boolean)
|
|
||||||
pam_passwdqc_enable: True # (type:boolean)
|
|
||||||
pam_passwdqc_options: 'min=disabled,disabled,16,12,8'
|
|
||||||
root_ttys:
|
|
||||||
console
|
|
||||||
tty1
|
|
||||||
tty2
|
|
||||||
tty3
|
|
||||||
tty4
|
|
||||||
tty5
|
|
||||||
tty6
|
|
||||||
uid_min: 1000
|
|
||||||
gid_min: 1000
|
|
||||||
sys_uid_min: 100
|
|
||||||
sys_uid_max: 999
|
|
||||||
sys_gid_min: 100
|
|
||||||
sys_gid_max: 999
|
|
||||||
chfn_restrict:
|
|
||||||
|
|
||||||
security:
|
|
||||||
users_allow: []
|
|
||||||
suid_sgid_enforce: True # (type:boolean)
|
|
||||||
# user-defined blacklist and whitelist
|
|
||||||
suid_sgid_blacklist: []
|
|
||||||
suid_sgid_whitelist: []
|
|
||||||
# if this is True, remove any suid/sgid bits from files that were not in the whitelist
|
|
||||||
suid_sgid_dry_run_on_unknown: False # (type:boolean)
|
|
||||||
suid_sgid_remove_from_unknown: False # (type:boolean)
|
|
||||||
# remove packages with known issues
|
|
||||||
packages_clean: True # (type:boolean)
|
|
||||||
packages_list:
|
|
||||||
xinetd
|
|
||||||
inetd
|
|
||||||
ypserv
|
|
||||||
telnet-server
|
|
||||||
rsh-server
|
|
||||||
rsync
|
|
||||||
kernel_enable_module_loading: True # (type:boolean)
|
|
||||||
kernel_enable_core_dump: False # (type:boolean)
|
|
||||||
ssh_tmout: 300
|
|
||||||
|
|
||||||
sysctl:
|
|
||||||
kernel_secure_sysrq: 244 # 4 + 16 + 32 + 64 + 128
|
|
||||||
kernel_enable_sysrq: False # (type:boolean)
|
|
||||||
forwarding: False # (type:boolean)
|
|
||||||
ipv6_enable: False # (type:boolean)
|
|
||||||
arp_restricted: True # (type:boolean)
|
|
|
@ -1,43 +0,0 @@
|
||||||
# NOTE: this schema must contain all valid keys from it's associated defaults
|
|
||||||
# file. It is used to validate user-provided overrides.
|
|
||||||
general:
|
|
||||||
desktop_enable:
|
|
||||||
environment:
|
|
||||||
extra_user_paths:
|
|
||||||
umask:
|
|
||||||
root_path:
|
|
||||||
auth:
|
|
||||||
pw_max_age:
|
|
||||||
pw_min_age:
|
|
||||||
retries:
|
|
||||||
lockout_time:
|
|
||||||
timeout:
|
|
||||||
allow_homeless:
|
|
||||||
pam_passwdqc_enable:
|
|
||||||
pam_passwdqc_options:
|
|
||||||
root_ttys:
|
|
||||||
uid_min:
|
|
||||||
gid_min:
|
|
||||||
sys_uid_min:
|
|
||||||
sys_uid_max:
|
|
||||||
sys_gid_min:
|
|
||||||
sys_gid_max:
|
|
||||||
chfn_restrict:
|
|
||||||
security:
|
|
||||||
users_allow:
|
|
||||||
suid_sgid_enforce:
|
|
||||||
suid_sgid_blacklist:
|
|
||||||
suid_sgid_whitelist:
|
|
||||||
suid_sgid_dry_run_on_unknown:
|
|
||||||
suid_sgid_remove_from_unknown:
|
|
||||||
packages_clean:
|
|
||||||
packages_list:
|
|
||||||
kernel_enable_module_loading:
|
|
||||||
kernel_enable_core_dump:
|
|
||||||
ssh_tmout:
|
|
||||||
sysctl:
|
|
||||||
kernel_secure_sysrq:
|
|
||||||
kernel_enable_sysrq:
|
|
||||||
forwarding:
|
|
||||||
ipv6_enable:
|
|
||||||
arp_restricted:
|
|
|
@ -1,49 +0,0 @@
|
||||||
# NOTE: this file contains the default configuration for the 'ssh' hardening
|
|
||||||
# code. If you want to override any settings you must add them to a file
|
|
||||||
# called hardening.yaml in the root directory of your charm using the
|
|
||||||
# name 'ssh' as the root key followed by any of the following with new
|
|
||||||
# values.
|
|
||||||
|
|
||||||
common:
|
|
||||||
service_name: 'ssh'
|
|
||||||
network_ipv6_enable: False # (type:boolean)
|
|
||||||
ports: [22]
|
|
||||||
remote_hosts: []
|
|
||||||
|
|
||||||
client:
|
|
||||||
package: 'openssh-client'
|
|
||||||
cbc_required: False # (type:boolean)
|
|
||||||
weak_hmac: False # (type:boolean)
|
|
||||||
weak_kex: False # (type:boolean)
|
|
||||||
roaming: False
|
|
||||||
password_authentication: 'no'
|
|
||||||
|
|
||||||
server:
|
|
||||||
host_key_files: ['/etc/ssh/ssh_host_rsa_key', '/etc/ssh/ssh_host_dsa_key',
|
|
||||||
'/etc/ssh/ssh_host_ecdsa_key']
|
|
||||||
cbc_required: False # (type:boolean)
|
|
||||||
weak_hmac: False # (type:boolean)
|
|
||||||
weak_kex: False # (type:boolean)
|
|
||||||
allow_root_with_key: False # (type:boolean)
|
|
||||||
allow_tcp_forwarding: 'no'
|
|
||||||
allow_agent_forwarding: 'no'
|
|
||||||
allow_x11_forwarding: 'no'
|
|
||||||
use_privilege_separation: 'sandbox'
|
|
||||||
listen_to: ['0.0.0.0']
|
|
||||||
use_pam: 'no'
|
|
||||||
package: 'openssh-server'
|
|
||||||
password_authentication: 'no'
|
|
||||||
alive_interval: '600'
|
|
||||||
alive_count: '3'
|
|
||||||
sftp_enable: False # (type:boolean)
|
|
||||||
sftp_group: 'sftponly'
|
|
||||||
sftp_chroot: '/home/%u'
|
|
||||||
deny_users: []
|
|
||||||
allow_users: []
|
|
||||||
deny_groups: []
|
|
||||||
allow_groups: []
|
|
||||||
print_motd: 'no'
|
|
||||||
print_last_log: 'no'
|
|
||||||
use_dns: 'no'
|
|
||||||
max_auth_tries: 2
|
|
||||||
max_sessions: 10
|
|
|
@ -1,42 +0,0 @@
|
||||||
# NOTE: this schema must contain all valid keys from it's associated defaults
|
|
||||||
# file. It is used to validate user-provided overrides.
|
|
||||||
common:
|
|
||||||
service_name:
|
|
||||||
network_ipv6_enable:
|
|
||||||
ports:
|
|
||||||
remote_hosts:
|
|
||||||
client:
|
|
||||||
package:
|
|
||||||
cbc_required:
|
|
||||||
weak_hmac:
|
|
||||||
weak_kex:
|
|
||||||
roaming:
|
|
||||||
password_authentication:
|
|
||||||
server:
|
|
||||||
host_key_files:
|
|
||||||
cbc_required:
|
|
||||||
weak_hmac:
|
|
||||||
weak_kex:
|
|
||||||
allow_root_with_key:
|
|
||||||
allow_tcp_forwarding:
|
|
||||||
allow_agent_forwarding:
|
|
||||||
allow_x11_forwarding:
|
|
||||||
use_privilege_separation:
|
|
||||||
listen_to:
|
|
||||||
use_pam:
|
|
||||||
package:
|
|
||||||
password_authentication:
|
|
||||||
alive_interval:
|
|
||||||
alive_count:
|
|
||||||
sftp_enable:
|
|
||||||
sftp_group:
|
|
||||||
sftp_chroot:
|
|
||||||
deny_users:
|
|
||||||
allow_users:
|
|
||||||
deny_groups:
|
|
||||||
allow_groups:
|
|
||||||
print_motd:
|
|
||||||
print_last_log:
|
|
||||||
use_dns:
|
|
||||||
max_auth_tries:
|
|
||||||
max_sessions:
|
|
|
@ -1,82 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from collections import OrderedDict
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config,
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
WARNING,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.host.checks import run_os_checks
|
|
||||||
from charmhelpers.contrib.hardening.ssh.checks import run_ssh_checks
|
|
||||||
from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks
|
|
||||||
from charmhelpers.contrib.hardening.apache.checks import run_apache_checks
|
|
||||||
|
|
||||||
|
|
||||||
def harden(overrides=None):
|
|
||||||
"""Hardening decorator.
|
|
||||||
|
|
||||||
This is the main entry point for running the hardening stack. In order to
|
|
||||||
run modules of the stack you must add this decorator to charm hook(s) and
|
|
||||||
ensure that your charm config.yaml contains the 'harden' option set to
|
|
||||||
one or more of the supported modules. Setting these will cause the
|
|
||||||
corresponding hardening code to be run when the hook fires.
|
|
||||||
|
|
||||||
This decorator can and should be applied to more than one hook or function
|
|
||||||
such that hardening modules are called multiple times. This is because
|
|
||||||
subsequent calls will perform auditing checks that will report any changes
|
|
||||||
to resources hardened by the first run (and possibly perform compliance
|
|
||||||
actions as a result of any detected infractions).
|
|
||||||
|
|
||||||
:param overrides: Optional list of stack modules used to override those
|
|
||||||
provided with 'harden' config.
|
|
||||||
:returns: Returns value returned by decorated function once executed.
|
|
||||||
"""
|
|
||||||
def _harden_inner1(f):
|
|
||||||
log("Hardening function '%s'" % (f.__name__), level=DEBUG)
|
|
||||||
|
|
||||||
def _harden_inner2(*args, **kwargs):
|
|
||||||
RUN_CATALOG = OrderedDict([('os', run_os_checks),
|
|
||||||
('ssh', run_ssh_checks),
|
|
||||||
('mysql', run_mysql_checks),
|
|
||||||
('apache', run_apache_checks)])
|
|
||||||
|
|
||||||
enabled = overrides or (config("harden") or "").split()
|
|
||||||
if enabled:
|
|
||||||
modules_to_run = []
|
|
||||||
# modules will always be performed in the following order
|
|
||||||
for module, func in six.iteritems(RUN_CATALOG):
|
|
||||||
if module in enabled:
|
|
||||||
enabled.remove(module)
|
|
||||||
modules_to_run.append(func)
|
|
||||||
|
|
||||||
if enabled:
|
|
||||||
log("Unknown hardening modules '%s' - ignoring" %
|
|
||||||
(', '.join(enabled)), level=WARNING)
|
|
||||||
|
|
||||||
for hardener in modules_to_run:
|
|
||||||
log("Executing hardening module '%s'" %
|
|
||||||
(hardener.__name__), level=DEBUG)
|
|
||||||
hardener()
|
|
||||||
else:
|
|
||||||
log("No hardening applied to '%s'" % (f.__name__), level=DEBUG)
|
|
||||||
|
|
||||||
return f(*args, **kwargs)
|
|
||||||
return _harden_inner2
|
|
||||||
|
|
||||||
return _harden_inner1
|
|
|
@ -1,17 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from os import path
|
|
||||||
|
|
||||||
TEMPLATES_DIR = path.join(path.dirname(__file__), 'templates')
|
|
|
@ -1,48 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.host.checks import (
|
|
||||||
apt,
|
|
||||||
limits,
|
|
||||||
login,
|
|
||||||
minimize_access,
|
|
||||||
pam,
|
|
||||||
profile,
|
|
||||||
securetty,
|
|
||||||
suid_sgid,
|
|
||||||
sysctl
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def run_os_checks():
|
|
||||||
log("Starting OS hardening checks.", level=DEBUG)
|
|
||||||
checks = apt.get_audits()
|
|
||||||
checks.extend(limits.get_audits())
|
|
||||||
checks.extend(login.get_audits())
|
|
||||||
checks.extend(minimize_access.get_audits())
|
|
||||||
checks.extend(pam.get_audits())
|
|
||||||
checks.extend(profile.get_audits())
|
|
||||||
checks.extend(securetty.get_audits())
|
|
||||||
checks.extend(suid_sgid.get_audits())
|
|
||||||
checks.extend(sysctl.get_audits())
|
|
||||||
|
|
||||||
for check in checks:
|
|
||||||
log("Running '%s' check" % (check.__class__.__name__), level=DEBUG)
|
|
||||||
check.ensure_compliance()
|
|
||||||
|
|
||||||
log("OS hardening checks complete.", level=DEBUG)
|
|
|
@ -1,37 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from charmhelpers.contrib.hardening.utils import get_settings
|
|
||||||
from charmhelpers.contrib.hardening.audits.apt import (
|
|
||||||
AptConfig,
|
|
||||||
RestrictedPackages,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get OS hardening apt audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
audits = [AptConfig([{'key': 'APT::Get::AllowUnauthenticated',
|
|
||||||
'expected': 'false'}])]
|
|
||||||
|
|
||||||
settings = get_settings('os')
|
|
||||||
clean_packages = settings['security']['packages_clean']
|
|
||||||
if clean_packages:
|
|
||||||
security_packages = settings['security']['packages_list']
|
|
||||||
if security_packages:
|
|
||||||
audits.append(RestrictedPackages(security_packages))
|
|
||||||
|
|
||||||
return audits
|
|
|
@ -1,53 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import (
|
|
||||||
DirectoryPermissionAudit,
|
|
||||||
TemplatedFile,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get OS hardening security limits audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
audits = []
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
|
|
||||||
# Ensure that the /etc/security/limits.d directory is only writable
|
|
||||||
# by the root user, but others can execute and read.
|
|
||||||
audits.append(DirectoryPermissionAudit('/etc/security/limits.d',
|
|
||||||
user='root', group='root',
|
|
||||||
mode=0o755))
|
|
||||||
|
|
||||||
# If core dumps are not enabled, then don't allow core dumps to be
|
|
||||||
# created as they may contain sensitive information.
|
|
||||||
if not settings['security']['kernel_enable_core_dump']:
|
|
||||||
audits.append(TemplatedFile('/etc/security/limits.d/10.hardcore.conf',
|
|
||||||
SecurityLimitsContext(),
|
|
||||||
template_dir=TEMPLATES_DIR,
|
|
||||||
user='root', group='root', mode=0o0440))
|
|
||||||
return audits
|
|
||||||
|
|
||||||
|
|
||||||
class SecurityLimitsContext(object):
|
|
||||||
|
|
||||||
def __call__(self):
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
ctxt = {'disable_core_dump':
|
|
||||||
not settings['security']['kernel_enable_core_dump']}
|
|
||||||
return ctxt
|
|
|
@ -1,65 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from six import string_types
|
|
||||||
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import TemplatedFile
|
|
||||||
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get OS hardening login.defs audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
audits = [TemplatedFile('/etc/login.defs', LoginContext(),
|
|
||||||
template_dir=TEMPLATES_DIR,
|
|
||||||
user='root', group='root', mode=0o0444)]
|
|
||||||
return audits
|
|
||||||
|
|
||||||
|
|
||||||
class LoginContext(object):
|
|
||||||
|
|
||||||
def __call__(self):
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
|
|
||||||
# Octal numbers in yaml end up being turned into decimal,
|
|
||||||
# so check if the umask is entered as a string (e.g. '027')
|
|
||||||
# or as an octal umask as we know it (e.g. 002). If its not
|
|
||||||
# a string assume it to be octal and turn it into an octal
|
|
||||||
# string.
|
|
||||||
umask = settings['environment']['umask']
|
|
||||||
if not isinstance(umask, string_types):
|
|
||||||
umask = '%s' % oct(umask)
|
|
||||||
|
|
||||||
ctxt = {
|
|
||||||
'additional_user_paths':
|
|
||||||
settings['environment']['extra_user_paths'],
|
|
||||||
'umask': umask,
|
|
||||||
'pwd_max_age': settings['auth']['pw_max_age'],
|
|
||||||
'pwd_min_age': settings['auth']['pw_min_age'],
|
|
||||||
'uid_min': settings['auth']['uid_min'],
|
|
||||||
'sys_uid_min': settings['auth']['sys_uid_min'],
|
|
||||||
'sys_uid_max': settings['auth']['sys_uid_max'],
|
|
||||||
'gid_min': settings['auth']['gid_min'],
|
|
||||||
'sys_gid_min': settings['auth']['sys_gid_min'],
|
|
||||||
'sys_gid_max': settings['auth']['sys_gid_max'],
|
|
||||||
'login_retries': settings['auth']['retries'],
|
|
||||||
'login_timeout': settings['auth']['timeout'],
|
|
||||||
'chfn_restrict': settings['auth']['chfn_restrict'],
|
|
||||||
'allow_login_without_home': settings['auth']['allow_homeless']
|
|
||||||
}
|
|
||||||
|
|
||||||
return ctxt
|
|
|
@ -1,50 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import (
|
|
||||||
FilePermissionAudit,
|
|
||||||
ReadOnly,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get OS hardening access audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
audits = []
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
|
|
||||||
# Remove write permissions from $PATH folders for all regular users.
|
|
||||||
# This prevents changing system-wide commands from normal users.
|
|
||||||
path_folders = {'/usr/local/sbin',
|
|
||||||
'/usr/local/bin',
|
|
||||||
'/usr/sbin',
|
|
||||||
'/usr/bin',
|
|
||||||
'/bin'}
|
|
||||||
extra_user_paths = settings['environment']['extra_user_paths']
|
|
||||||
path_folders.update(extra_user_paths)
|
|
||||||
audits.append(ReadOnly(path_folders))
|
|
||||||
|
|
||||||
# Only allow the root user to have access to the shadow file.
|
|
||||||
audits.append(FilePermissionAudit('/etc/shadow', 'root', 'root', 0o0600))
|
|
||||||
|
|
||||||
if 'change_user' not in settings['security']['users_allow']:
|
|
||||||
# su should only be accessible to user and group root, unless it is
|
|
||||||
# expressly defined to allow users to change to root via the
|
|
||||||
# security_users_allow config option.
|
|
||||||
audits.append(FilePermissionAudit('/bin/su', 'root', 'root', 0o750))
|
|
||||||
|
|
||||||
return audits
|
|
|
@ -1,132 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from subprocess import (
|
|
||||||
check_output,
|
|
||||||
CalledProcessError,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
ERROR,
|
|
||||||
)
|
|
||||||
from charmhelpers.fetch import (
|
|
||||||
apt_install,
|
|
||||||
apt_purge,
|
|
||||||
apt_update,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import (
|
|
||||||
TemplatedFile,
|
|
||||||
DeletedFile,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get OS hardening PAM authentication audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
audits = []
|
|
||||||
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
|
|
||||||
if settings['auth']['pam_passwdqc_enable']:
|
|
||||||
audits.append(PasswdqcPAM('/etc/passwdqc.conf'))
|
|
||||||
|
|
||||||
if settings['auth']['retries']:
|
|
||||||
audits.append(Tally2PAM('/usr/share/pam-configs/tally2'))
|
|
||||||
else:
|
|
||||||
audits.append(DeletedFile('/usr/share/pam-configs/tally2'))
|
|
||||||
|
|
||||||
return audits
|
|
||||||
|
|
||||||
|
|
||||||
class PasswdqcPAMContext(object):
|
|
||||||
|
|
||||||
def __call__(self):
|
|
||||||
ctxt = {}
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
|
|
||||||
ctxt['auth_pam_passwdqc_options'] = \
|
|
||||||
settings['auth']['pam_passwdqc_options']
|
|
||||||
|
|
||||||
return ctxt
|
|
||||||
|
|
||||||
|
|
||||||
class PasswdqcPAM(TemplatedFile):
|
|
||||||
"""The PAM Audit verifies the linux PAM settings."""
|
|
||||||
def __init__(self, path):
|
|
||||||
super(PasswdqcPAM, self).__init__(path=path,
|
|
||||||
template_dir=TEMPLATES_DIR,
|
|
||||||
context=PasswdqcPAMContext(),
|
|
||||||
user='root',
|
|
||||||
group='root',
|
|
||||||
mode=0o0640)
|
|
||||||
|
|
||||||
def pre_write(self):
|
|
||||||
# Always remove?
|
|
||||||
for pkg in ['libpam-ccreds', 'libpam-cracklib']:
|
|
||||||
log("Purging package '%s'" % pkg, level=DEBUG),
|
|
||||||
apt_purge(pkg)
|
|
||||||
|
|
||||||
apt_update(fatal=True)
|
|
||||||
for pkg in ['libpam-passwdqc']:
|
|
||||||
log("Installing package '%s'" % pkg, level=DEBUG),
|
|
||||||
apt_install(pkg)
|
|
||||||
|
|
||||||
def post_write(self):
|
|
||||||
"""Updates the PAM configuration after the file has been written"""
|
|
||||||
try:
|
|
||||||
check_output(['pam-auth-update', '--package'])
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log('Error calling pam-auth-update: %s' % e, level=ERROR)
|
|
||||||
|
|
||||||
|
|
||||||
class Tally2PAMContext(object):
|
|
||||||
|
|
||||||
def __call__(self):
|
|
||||||
ctxt = {}
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
|
|
||||||
ctxt['auth_lockout_time'] = settings['auth']['lockout_time']
|
|
||||||
ctxt['auth_retries'] = settings['auth']['retries']
|
|
||||||
|
|
||||||
return ctxt
|
|
||||||
|
|
||||||
|
|
||||||
class Tally2PAM(TemplatedFile):
|
|
||||||
"""The PAM Audit verifies the linux PAM settings."""
|
|
||||||
def __init__(self, path):
|
|
||||||
super(Tally2PAM, self).__init__(path=path,
|
|
||||||
template_dir=TEMPLATES_DIR,
|
|
||||||
context=Tally2PAMContext(),
|
|
||||||
user='root',
|
|
||||||
group='root',
|
|
||||||
mode=0o0640)
|
|
||||||
|
|
||||||
def pre_write(self):
|
|
||||||
# Always remove?
|
|
||||||
apt_purge('libpam-ccreds')
|
|
||||||
apt_update(fatal=True)
|
|
||||||
apt_install('libpam-modules')
|
|
||||||
|
|
||||||
def post_write(self):
|
|
||||||
"""Updates the PAM configuration after the file has been written"""
|
|
||||||
try:
|
|
||||||
check_output(['pam-auth-update', '--package'])
|
|
||||||
except CalledProcessError as e:
|
|
||||||
log('Error calling pam-auth-update: %s' % e, level=ERROR)
|
|
|
@ -1,49 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import TemplatedFile
|
|
||||||
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get OS hardening profile audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
audits = []
|
|
||||||
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
# If core dumps are not enabled, then don't allow core dumps to be
|
|
||||||
# created as they may contain sensitive information.
|
|
||||||
if not settings['security']['kernel_enable_core_dump']:
|
|
||||||
audits.append(TemplatedFile('/etc/profile.d/pinerolo_profile.sh',
|
|
||||||
ProfileContext(),
|
|
||||||
template_dir=TEMPLATES_DIR,
|
|
||||||
mode=0o0755, user='root', group='root'))
|
|
||||||
if settings['security']['ssh_tmout']:
|
|
||||||
audits.append(TemplatedFile('/etc/profile.d/99-hardening.sh',
|
|
||||||
ProfileContext(),
|
|
||||||
template_dir=TEMPLATES_DIR,
|
|
||||||
mode=0o0644, user='root', group='root'))
|
|
||||||
return audits
|
|
||||||
|
|
||||||
|
|
||||||
class ProfileContext(object):
|
|
||||||
|
|
||||||
def __call__(self):
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
ctxt = {'ssh_tmout':
|
|
||||||
settings['security']['ssh_tmout']}
|
|
||||||
return ctxt
|
|
|
@ -1,37 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import TemplatedFile
|
|
||||||
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get OS hardening Secure TTY audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
audits = []
|
|
||||||
audits.append(TemplatedFile('/etc/securetty', SecureTTYContext(),
|
|
||||||
template_dir=TEMPLATES_DIR,
|
|
||||||
mode=0o0400, user='root', group='root'))
|
|
||||||
return audits
|
|
||||||
|
|
||||||
|
|
||||||
class SecureTTYContext(object):
|
|
||||||
|
|
||||||
def __call__(self):
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
ctxt = {'ttys': settings['auth']['root_ttys']}
|
|
||||||
return ctxt
|
|
|
@ -1,129 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
INFO,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import NoSUIDSGIDAudit
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
|
|
||||||
|
|
||||||
BLACKLIST = ['/usr/bin/rcp', '/usr/bin/rlogin', '/usr/bin/rsh',
|
|
||||||
'/usr/libexec/openssh/ssh-keysign',
|
|
||||||
'/usr/lib/openssh/ssh-keysign',
|
|
||||||
'/sbin/netreport',
|
|
||||||
'/usr/sbin/usernetctl',
|
|
||||||
'/usr/sbin/userisdnctl',
|
|
||||||
'/usr/sbin/pppd',
|
|
||||||
'/usr/bin/lockfile',
|
|
||||||
'/usr/bin/mail-lock',
|
|
||||||
'/usr/bin/mail-unlock',
|
|
||||||
'/usr/bin/mail-touchlock',
|
|
||||||
'/usr/bin/dotlockfile',
|
|
||||||
'/usr/bin/arping',
|
|
||||||
'/usr/sbin/uuidd',
|
|
||||||
'/usr/bin/mtr',
|
|
||||||
'/usr/lib/evolution/camel-lock-helper-1.2',
|
|
||||||
'/usr/lib/pt_chown',
|
|
||||||
'/usr/lib/eject/dmcrypt-get-device',
|
|
||||||
'/usr/lib/mc/cons.saver']
|
|
||||||
|
|
||||||
WHITELIST = ['/bin/mount', '/bin/ping', '/bin/su', '/bin/umount',
|
|
||||||
'/sbin/pam_timestamp_check', '/sbin/unix_chkpwd', '/usr/bin/at',
|
|
||||||
'/usr/bin/gpasswd', '/usr/bin/locate', '/usr/bin/newgrp',
|
|
||||||
'/usr/bin/passwd', '/usr/bin/ssh-agent',
|
|
||||||
'/usr/libexec/utempter/utempter', '/usr/sbin/lockdev',
|
|
||||||
'/usr/sbin/sendmail.sendmail', '/usr/bin/expiry',
|
|
||||||
'/bin/ping6', '/usr/bin/traceroute6.iputils',
|
|
||||||
'/sbin/mount.nfs', '/sbin/umount.nfs',
|
|
||||||
'/sbin/mount.nfs4', '/sbin/umount.nfs4',
|
|
||||||
'/usr/bin/crontab',
|
|
||||||
'/usr/bin/wall', '/usr/bin/write',
|
|
||||||
'/usr/bin/screen',
|
|
||||||
'/usr/bin/mlocate',
|
|
||||||
'/usr/bin/chage', '/usr/bin/chfn', '/usr/bin/chsh',
|
|
||||||
'/bin/fusermount',
|
|
||||||
'/usr/bin/pkexec',
|
|
||||||
'/usr/bin/sudo', '/usr/bin/sudoedit',
|
|
||||||
'/usr/sbin/postdrop', '/usr/sbin/postqueue',
|
|
||||||
'/usr/sbin/suexec',
|
|
||||||
'/usr/lib/squid/ncsa_auth', '/usr/lib/squid/pam_auth',
|
|
||||||
'/usr/kerberos/bin/ksu',
|
|
||||||
'/usr/sbin/ccreds_validate',
|
|
||||||
'/usr/bin/Xorg',
|
|
||||||
'/usr/bin/X',
|
|
||||||
'/usr/lib/dbus-1.0/dbus-daemon-launch-helper',
|
|
||||||
'/usr/lib/vte/gnome-pty-helper',
|
|
||||||
'/usr/lib/libvte9/gnome-pty-helper',
|
|
||||||
'/usr/lib/libvte-2.90-9/gnome-pty-helper']
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get OS hardening suid/sgid audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
checks = []
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
if not settings['security']['suid_sgid_enforce']:
|
|
||||||
log("Skipping suid/sgid hardening", level=INFO)
|
|
||||||
return checks
|
|
||||||
|
|
||||||
# Build the blacklist and whitelist of files for suid/sgid checks.
|
|
||||||
# There are a total of 4 lists:
|
|
||||||
# 1. the system blacklist
|
|
||||||
# 2. the system whitelist
|
|
||||||
# 3. the user blacklist
|
|
||||||
# 4. the user whitelist
|
|
||||||
#
|
|
||||||
# The blacklist is the set of paths which should NOT have the suid/sgid bit
|
|
||||||
# set and the whitelist is the set of paths which MAY have the suid/sgid
|
|
||||||
# bit setl. The user whitelist/blacklist effectively override the system
|
|
||||||
# whitelist/blacklist.
|
|
||||||
u_b = settings['security']['suid_sgid_blacklist']
|
|
||||||
u_w = settings['security']['suid_sgid_whitelist']
|
|
||||||
|
|
||||||
blacklist = set(BLACKLIST) - set(u_w + u_b)
|
|
||||||
whitelist = set(WHITELIST) - set(u_b + u_w)
|
|
||||||
|
|
||||||
checks.append(NoSUIDSGIDAudit(blacklist))
|
|
||||||
|
|
||||||
dry_run = settings['security']['suid_sgid_dry_run_on_unknown']
|
|
||||||
|
|
||||||
if settings['security']['suid_sgid_remove_from_unknown'] or dry_run:
|
|
||||||
# If the policy is a dry_run (e.g. complain only) or remove unknown
|
|
||||||
# suid/sgid bits then find all of the paths which have the suid/sgid
|
|
||||||
# bit set and then remove the whitelisted paths.
|
|
||||||
root_path = settings['environment']['root_path']
|
|
||||||
unknown_paths = find_paths_with_suid_sgid(root_path) - set(whitelist)
|
|
||||||
checks.append(NoSUIDSGIDAudit(unknown_paths, unless=dry_run))
|
|
||||||
|
|
||||||
return checks
|
|
||||||
|
|
||||||
|
|
||||||
def find_paths_with_suid_sgid(root_path):
|
|
||||||
"""Finds all paths/files which have an suid/sgid bit enabled.
|
|
||||||
|
|
||||||
Starting with the root_path, this will recursively find all paths which
|
|
||||||
have an suid or sgid bit set.
|
|
||||||
"""
|
|
||||||
cmd = ['find', root_path, '-perm', '-4000', '-o', '-perm', '-2000',
|
|
||||||
'-type', 'f', '!', '-path', '/proc/*', '-print']
|
|
||||||
|
|
||||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
|
||||||
out, _ = p.communicate()
|
|
||||||
return set(out.split('\n'))
|
|
|
@ -1,209 +0,0 @@
|
||||||
# Copyright 2016 Canonical Limited.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import platform
|
|
||||||
import re
|
|
||||||
import six
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
INFO,
|
|
||||||
WARNING,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening import utils
|
|
||||||
from charmhelpers.contrib.hardening.audits.file import (
|
|
||||||
FilePermissionAudit,
|
|
||||||
TemplatedFile,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hardening.host import TEMPLATES_DIR
|
|
||||||
|
|
||||||
|
|
||||||
SYSCTL_DEFAULTS = """net.ipv4.ip_forward=%(net_ipv4_ip_forward)s
|
|
||||||
net.ipv6.conf.all.forwarding=%(net_ipv6_conf_all_forwarding)s
|
|
||||||
net.ipv4.conf.all.rp_filter=1
|
|
||||||
net.ipv4.conf.default.rp_filter=1
|
|
||||||
net.ipv4.icmp_echo_ignore_broadcasts=1
|
|
||||||
net.ipv4.icmp_ignore_bogus_error_responses=1
|
|
||||||
net.ipv4.icmp_ratelimit=100
|
|
||||||
net.ipv4.icmp_ratemask=88089
|
|
||||||
net.ipv6.conf.all.disable_ipv6=%(net_ipv6_conf_all_disable_ipv6)s
|
|
||||||
net.ipv4.tcp_timestamps=%(net_ipv4_tcp_timestamps)s
|
|
||||||
net.ipv4.conf.all.arp_ignore=%(net_ipv4_conf_all_arp_ignore)s
|
|
||||||
net.ipv4.conf.all.arp_announce=%(net_ipv4_conf_all_arp_announce)s
|
|
||||||
net.ipv4.tcp_rfc1337=1
|
|
||||||
net.ipv4.tcp_syncookies=1
|
|
||||||
net.ipv4.conf.all.shared_media=1
|
|
||||||
net.ipv4.conf.default.shared_media=1
|
|
||||||
net.ipv4.conf.all.accept_source_route=0
|
|
||||||
net.ipv4.conf.default.accept_source_route=0
|
|
||||||
net.ipv4.conf.all.accept_redirects=0
|
|
||||||
net.ipv4.conf.default.accept_redirects=0
|
|
||||||
net.ipv6.conf.all.accept_redirects=0
|
|
||||||
net.ipv6.conf.default.accept_redirects=0
|
|
||||||
net.ipv4.conf.all.secure_redirects=0
|
|
||||||
net.ipv4.conf.default.secure_redirects=0
|
|
||||||
net.ipv4.conf.all.send_redirects=0
|
|
||||||
net.ipv4.conf.default.send_redirects=0
|
|
||||||
net.ipv4.conf.all.log_martians=0
|
|
||||||
net.ipv6.conf.default.router_solicitations=0
|
|
||||||
net.ipv6.conf.default.accept_ra_rtr_pref=0
|
|
||||||
net.ipv6.conf.default.accept_ra_pinfo=0
|
|
||||||
net.ipv6.conf.default.accept_ra_defrtr=0
|
|
||||||
net.ipv6.conf.default.autoconf=0
|
|
||||||
net.ipv6.conf.default.dad_transmits=0
|
|
||||||
net.ipv6.conf.default.max_addresses=1
|
|
||||||
net.ipv6.conf.all.accept_ra=0
|
|
||||||
net.ipv6.conf.default.accept_ra=0
|
|
||||||
kernel.modules_disabled=%(kernel_modules_disabled)s
|
|
||||||
kernel.sysrq=%(kernel_sysrq)s
|
|
||||||
fs.suid_dumpable=%(fs_suid_dumpable)s
|
|
||||||
kernel.randomize_va_space=2
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def get_audits():
|
|
||||||
"""Get OS hardening sysctl audits.
|
|
||||||
|
|
||||||
:returns: dictionary of audits
|
|
||||||
"""
|
|
||||||
audits = []
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
|
|
||||||
# Apply the sysctl settings which are configured to be applied.
|
|
||||||
audits.append(SysctlConf())
|
|
||||||
# Make sure that only root has access to the sysctl.conf file, and
|
|
||||||
# that it is read-only.
|
|
||||||
audits.append(FilePermissionAudit('/etc/sysctl.conf',
|
|
||||||
user='root',
|
|
||||||
group='root', mode=0o0440))
|
|
||||||
# If module loading is not enabled, then ensure that the modules
|
|
||||||
# file has the appropriate permissions and rebuild the initramfs
|
|
||||||
if not settings['security']['kernel_enable_module_loading']:
|
|
||||||
audits.append(ModulesTemplate())
|
|
||||||
|
|
||||||
return audits
|
|
||||||
|
|
||||||
|
|
||||||
class ModulesContext(object):
|
|
||||||
|
|
||||||
def __call__(self):
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
with open('/proc/cpuinfo', 'r') as fd:
|
|
||||||
cpuinfo = fd.readlines()
|
|
||||||
|
|
||||||
for line in cpuinfo:
|
|
||||||
match = re.search(r"^vendor_id\s+:\s+(.+)", line)
|
|
||||||
if match:
|
|
||||||
vendor = match.group(1)
|
|
||||||
|
|
||||||
if vendor == "GenuineIntel":
|
|
||||||
vendor = "intel"
|
|
||||||
elif vendor == "AuthenticAMD":
|
|
||||||
vendor = "amd"
|
|
||||||
|
|
||||||
ctxt = {'arch': platform.processor(),
|
|
||||||
'cpuVendor': vendor,
|
|
||||||
'desktop_enable': settings['general']['desktop_enable']}
|
|
||||||
|
|
||||||
return ctxt
|
|
||||||
|
|
||||||
|
|
||||||
class ModulesTemplate(object):
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super(ModulesTemplate, self).__init__('/etc/initramfs-tools/modules',
|
|
||||||
ModulesContext(),
|
|
||||||
templates_dir=TEMPLATES_DIR,
|
|
||||||
user='root', group='root',
|
|
||||||
mode=0o0440)
|
|
||||||
|
|
||||||
def post_write(self):
|
|
||||||
subprocess.check_call(['update-initramfs', '-u'])
|
|
||||||
|
|
||||||
|
|
||||||
class SysCtlHardeningContext(object):
|
|
||||||
def __call__(self):
|
|
||||||
settings = utils.get_settings('os')
|
|
||||||
ctxt = {'sysctl': {}}
|
|
||||||
|
|
||||||
log("Applying sysctl settings", level=INFO)
|
|
||||||
extras = {'net_ipv4_ip_forward': 0,
|
|
||||||
'net_ipv6_conf_all_forwarding': 0,
|
|
||||||
'net_ipv6_conf_all_disable_ipv6': 1,
|
|
||||||
'net_ipv4_tcp_timestamps': 0,
|
|
||||||
'net_ipv4_conf_all_arp_ignore': 0,
|
|
||||||
'net_ipv4_conf_all_arp_announce': 0,
|
|
||||||
'kernel_sysrq': 0,
|
|
||||||
'fs_suid_dumpable': 0,
|
|
||||||
'kernel_modules_disabled': 1}
|
|
||||||
|
|
||||||
if settings['sysctl']['ipv6_enable']:
|
|
||||||
extras['net_ipv6_conf_all_disable_ipv6'] = 0
|
|
||||||
|
|
||||||
if settings['sysctl']['forwarding']:
|
|
||||||
extras['net_ipv4_ip_forward'] = 1
|
|
||||||
extras['net_ipv6_conf_all_forwarding'] = 1
|
|
||||||
|
|
||||||
if settings['sysctl']['arp_restricted']:
|
|
||||||
extras['net_ipv4_conf_all_arp_ignore'] = 1
|
|
||||||
extras['net_ipv4_conf_all_arp_announce'] = 2
|
|
||||||
|
|
||||||
if settings['security']['kernel_enable_module_loading']:
|
|
||||||
extras['kernel_modules_disabled'] = 0
|
|
||||||
|
|
||||||
if settings['sysctl']['kernel_enable_sysrq']:
|
|
||||||
sysrq_val = settings['sysctl']['kernel_secure_sysrq']
|
|
||||||
extras['kernel_sysrq'] = sysrq_val
|
|
||||||
|
|
||||||
if settings['security']['kernel_enable_core_dump']:
|
|
||||||
extras['fs_suid_dumpable'] = 1
|
|
||||||
|
|
||||||
settings.update(extras)
|
|
||||||
for d in (SYSCTL_DEFAULTS % settings).split():
|
|
||||||
d = d.strip().partition('=')
|
|
||||||
key = d[0].strip()
|
|
||||||
path = os.path.join('/proc/sys', key.replace('.', '/'))
|
|
||||||
if not os.path.exists(path):
|
|
||||||
log("Skipping '%s' since '%s' does not exist" % (key, path),
|
|
||||||
level=WARNING)
|
|
||||||
continue
|
|
||||||
|
|
||||||
ctxt['sysctl'][key] = d[2] or None
|
|
||||||
|
|
||||||
# Translate for python3
|
|
||||||
return {'sysctl_settings':
|
|
||||||
[(k, v) for k, v in six.iteritems(ctxt['sysctl'])]}
|
|
||||||
|
|
||||||
|
|
||||||
class SysctlConf(TemplatedFile):
|
|
||||||
"""An audit check for sysctl settings."""
|
|
||||||
def __init__(self):
|
|
||||||
self.conffile = '/etc/sysctl.d/99-juju-hardening.conf'
|
|
||||||
super(SysctlConf, self).__init__(self.conffile,
|
|
||||||
SysCtlHardeningContext(),
|
|
||||||
template_dir=TEMPLATES_DIR,
|
|
||||||
user='root', group='root',
|
|
||||||
mode=0o0440)
|
|
||||||
|
|
||||||
def post_write(self):
|
|
||||||
try:
|
|
||||||
subprocess.check_call(['sysctl', '-p', self.conffile])
|
|
||||||
except subprocess.CalledProcessError as e:
|
|
||||||
# NOTE: on some systems if sysctl cannot apply all settings it
|
|
||||||
# will return non-zero as well.
|
|
||||||
log("sysctl command returned an error (maybe some "
|
|
||||||
"keys could not be set) - %s" % (e),
|
|
||||||
level=WARNING)
|
|
|
@ -1,8 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
{% if disable_core_dump -%}
|
|
||||||
# Prevent core dumps for all users. These are usually only needed by developers and may contain sensitive information.
|
|
||||||
* hard core 0
|
|
||||||
{% endif %}
|
|
|
@ -1,5 +0,0 @@
|
||||||
TMOUT={{ tmout }}
|
|
||||||
readonly TMOUT
|
|
||||||
export TMOUT
|
|
||||||
|
|
||||||
readonly HISTFILE
|
|
|
@ -1,7 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
{% for key, value in sysctl_settings -%}
|
|
||||||
{{ key }}={{ value }}
|
|
||||||
{% endfor -%}
|
|
|
@ -1,349 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
#
|
|
||||||
# /etc/login.defs - Configuration control definitions for the login package.
|
|
||||||
#
|
|
||||||
# Three items must be defined: MAIL_DIR, ENV_SUPATH, and ENV_PATH.
|
|
||||||
# If unspecified, some arbitrary (and possibly incorrect) value will
|
|
||||||
# be assumed. All other items are optional - if not specified then
|
|
||||||
# the described action or option will be inhibited.
|
|
||||||
#
|
|
||||||
# Comment lines (lines beginning with "#") and blank lines are ignored.
|
|
||||||
#
|
|
||||||
# Modified for Linux. --marekm
|
|
||||||
|
|
||||||
# REQUIRED for useradd/userdel/usermod
|
|
||||||
# Directory where mailboxes reside, _or_ name of file, relative to the
|
|
||||||
# home directory. If you _do_ define MAIL_DIR and MAIL_FILE,
|
|
||||||
# MAIL_DIR takes precedence.
|
|
||||||
#
|
|
||||||
# Essentially:
|
|
||||||
# - MAIL_DIR defines the location of users mail spool files
|
|
||||||
# (for mbox use) by appending the username to MAIL_DIR as defined
|
|
||||||
# below.
|
|
||||||
# - MAIL_FILE defines the location of the users mail spool files as the
|
|
||||||
# fully-qualified filename obtained by prepending the user home
|
|
||||||
# directory before $MAIL_FILE
|
|
||||||
#
|
|
||||||
# NOTE: This is no more used for setting up users MAIL environment variable
|
|
||||||
# which is, starting from shadow 4.0.12-1 in Debian, entirely the
|
|
||||||
# job of the pam_mail PAM modules
|
|
||||||
# See default PAM configuration files provided for
|
|
||||||
# login, su, etc.
|
|
||||||
#
|
|
||||||
# This is a temporary situation: setting these variables will soon
|
|
||||||
# move to /etc/default/useradd and the variables will then be
|
|
||||||
# no more supported
|
|
||||||
MAIL_DIR /var/mail
|
|
||||||
#MAIL_FILE .mail
|
|
||||||
|
|
||||||
#
|
|
||||||
# Enable logging and display of /var/log/faillog login failure info.
|
|
||||||
# This option conflicts with the pam_tally PAM module.
|
|
||||||
#
|
|
||||||
FAILLOG_ENAB yes
|
|
||||||
|
|
||||||
#
|
|
||||||
# Enable display of unknown usernames when login failures are recorded.
|
|
||||||
#
|
|
||||||
# WARNING: Unknown usernames may become world readable.
|
|
||||||
# See #290803 and #298773 for details about how this could become a security
|
|
||||||
# concern
|
|
||||||
LOG_UNKFAIL_ENAB no
|
|
||||||
|
|
||||||
#
|
|
||||||
# Enable logging of successful logins
|
|
||||||
#
|
|
||||||
LOG_OK_LOGINS yes
|
|
||||||
|
|
||||||
#
|
|
||||||
# Enable "syslog" logging of su activity - in addition to sulog file logging.
|
|
||||||
# SYSLOG_SG_ENAB does the same for newgrp and sg.
|
|
||||||
#
|
|
||||||
SYSLOG_SU_ENAB yes
|
|
||||||
SYSLOG_SG_ENAB yes
|
|
||||||
|
|
||||||
#
|
|
||||||
# If defined, all su activity is logged to this file.
|
|
||||||
#
|
|
||||||
#SULOG_FILE /var/log/sulog
|
|
||||||
|
|
||||||
#
|
|
||||||
# If defined, file which maps tty line to TERM environment parameter.
|
|
||||||
# Each line of the file is in a format something like "vt100 tty01".
|
|
||||||
#
|
|
||||||
#TTYTYPE_FILE /etc/ttytype
|
|
||||||
|
|
||||||
#
|
|
||||||
# If defined, login failures will be logged here in a utmp format
|
|
||||||
# last, when invoked as lastb, will read /var/log/btmp, so...
|
|
||||||
#
|
|
||||||
FTMP_FILE /var/log/btmp
|
|
||||||
|
|
||||||
#
|
|
||||||
# If defined, the command name to display when running "su -". For
|
|
||||||
# example, if this is defined as "su" then a "ps" will display the
|
|
||||||
# command is "-su". If not defined, then "ps" would display the
|
|
||||||
# name of the shell actually being run, e.g. something like "-sh".
|
|
||||||
#
|
|
||||||
SU_NAME su
|
|
||||||
|
|
||||||
#
|
|
||||||
# If defined, file which inhibits all the usual chatter during the login
|
|
||||||
# sequence. If a full pathname, then hushed mode will be enabled if the
|
|
||||||
# user's name or shell are found in the file. If not a full pathname, then
|
|
||||||
# hushed mode will be enabled if the file exists in the user's home directory.
|
|
||||||
#
|
|
||||||
HUSHLOGIN_FILE .hushlogin
|
|
||||||
#HUSHLOGIN_FILE /etc/hushlogins
|
|
||||||
|
|
||||||
#
|
|
||||||
# *REQUIRED* The default PATH settings, for superuser and normal users.
|
|
||||||
#
|
|
||||||
# (they are minimal, add the rest in the shell startup files)
|
|
||||||
ENV_SUPATH PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin
|
|
||||||
ENV_PATH PATH=/usr/local/bin:/usr/bin:/bin{% if additional_user_paths %}{{ additional_user_paths }}{% endif %}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Terminal permissions
|
|
||||||
#
|
|
||||||
# TTYGROUP Login tty will be assigned this group ownership.
|
|
||||||
# TTYPERM Login tty will be set to this permission.
|
|
||||||
#
|
|
||||||
# If you have a "write" program which is "setgid" to a special group
|
|
||||||
# which owns the terminals, define TTYGROUP to the group number and
|
|
||||||
# TTYPERM to 0620. Otherwise leave TTYGROUP commented out and assign
|
|
||||||
# TTYPERM to either 622 or 600.
|
|
||||||
#
|
|
||||||
# In Debian /usr/bin/bsd-write or similar programs are setgid tty
|
|
||||||
# However, the default and recommended value for TTYPERM is still 0600
|
|
||||||
# to not allow anyone to write to anyone else console or terminal
|
|
||||||
|
|
||||||
# Users can still allow other people to write them by issuing
|
|
||||||
# the "mesg y" command.
|
|
||||||
|
|
||||||
TTYGROUP tty
|
|
||||||
TTYPERM 0600
|
|
||||||
|
|
||||||
#
|
|
||||||
# Login configuration initializations:
|
|
||||||
#
|
|
||||||
# ERASECHAR Terminal ERASE character ('\010' = backspace).
|
|
||||||
# KILLCHAR Terminal KILL character ('\025' = CTRL/U).
|
|
||||||
# UMASK Default "umask" value.
|
|
||||||
#
|
|
||||||
# The ERASECHAR and KILLCHAR are used only on System V machines.
|
|
||||||
#
|
|
||||||
# UMASK is the default umask value for pam_umask and is used by
|
|
||||||
# useradd and newusers to set the mode of the new home directories.
|
|
||||||
# 022 is the "historical" value in Debian for UMASK
|
|
||||||
# 027, or even 077, could be considered better for privacy
|
|
||||||
# There is no One True Answer here : each sysadmin must make up his/her
|
|
||||||
# mind.
|
|
||||||
#
|
|
||||||
# If USERGROUPS_ENAB is set to "yes", that will modify this UMASK default value
|
|
||||||
# for private user groups, i. e. the uid is the same as gid, and username is
|
|
||||||
# the same as the primary group name: for these, the user permissions will be
|
|
||||||
# used as group permissions, e. g. 022 will become 002.
|
|
||||||
#
|
|
||||||
# Prefix these values with "0" to get octal, "0x" to get hexadecimal.
|
|
||||||
#
|
|
||||||
ERASECHAR 0177
|
|
||||||
KILLCHAR 025
|
|
||||||
UMASK {{ umask }}
|
|
||||||
|
|
||||||
# Enable setting of the umask group bits to be the same as owner bits (examples: `022` -> `002`, `077` -> `007`) for non-root users, if the uid is the same as gid, and username is the same as the primary group name.
|
|
||||||
# If set to yes, userdel will remove the user´s group if it contains no more members, and useradd will create by default a group with the name of the user.
|
|
||||||
USERGROUPS_ENAB yes
|
|
||||||
|
|
||||||
#
|
|
||||||
# Password aging controls:
|
|
||||||
#
|
|
||||||
# PASS_MAX_DAYS Maximum number of days a password may be used.
|
|
||||||
# PASS_MIN_DAYS Minimum number of days allowed between password changes.
|
|
||||||
# PASS_WARN_AGE Number of days warning given before a password expires.
|
|
||||||
#
|
|
||||||
PASS_MAX_DAYS {{ pwd_max_age }}
|
|
||||||
PASS_MIN_DAYS {{ pwd_min_age }}
|
|
||||||
PASS_WARN_AGE 7
|
|
||||||
|
|
||||||
#
|
|
||||||
# Min/max values for automatic uid selection in useradd
|
|
||||||
#
|
|
||||||
UID_MIN {{ uid_min }}
|
|
||||||
UID_MAX 60000
|
|
||||||
# System accounts
|
|
||||||
SYS_UID_MIN {{ sys_uid_min }}
|
|
||||||
SYS_UID_MAX {{ sys_uid_max }}
|
|
||||||
|
|
||||||
# Min/max values for automatic gid selection in groupadd
|
|
||||||
GID_MIN {{ gid_min }}
|
|
||||||
GID_MAX 60000
|
|
||||||
# System accounts
|
|
||||||
SYS_GID_MIN {{ sys_gid_min }}
|
|
||||||
SYS_GID_MAX {{ sys_gid_max }}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Max number of login retries if password is bad. This will most likely be
|
|
||||||
# overriden by PAM, since the default pam_unix module has it's own built
|
|
||||||
# in of 3 retries. However, this is a safe fallback in case you are using
|
|
||||||
# an authentication module that does not enforce PAM_MAXTRIES.
|
|
||||||
#
|
|
||||||
LOGIN_RETRIES {{ login_retries }}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Max time in seconds for login
|
|
||||||
#
|
|
||||||
LOGIN_TIMEOUT {{ login_timeout }}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Which fields may be changed by regular users using chfn - use
|
|
||||||
# any combination of letters "frwh" (full name, room number, work
|
|
||||||
# phone, home phone). If not defined, no changes are allowed.
|
|
||||||
# For backward compatibility, "yes" = "rwh" and "no" = "frwh".
|
|
||||||
#
|
|
||||||
{% if chfn_restrict %}
|
|
||||||
CHFN_RESTRICT {{ chfn_restrict }}
|
|
||||||
{% endif %}
|
|
||||||
|
|
||||||
#
|
|
||||||
# Should login be allowed if we can't cd to the home directory?
|
|
||||||
# Default in no.
|
|
||||||
#
|
|
||||||
DEFAULT_HOME {% if allow_login_without_home %} yes {% else %} no {% endif %}
|
|
||||||
|
|
||||||
#
|
|
||||||
# If defined, this command is run when removing a user.
|
|
||||||
# It should remove any at/cron/print jobs etc. owned by
|
|
||||||
# the user to be removed (passed as the first argument).
|
|
||||||
#
|
|
||||||
#USERDEL_CMD /usr/sbin/userdel_local
|
|
||||||
|
|
||||||
#
|
|
||||||
# Enable setting of the umask group bits to be the same as owner bits
|
|
||||||
# (examples: 022 -> 002, 077 -> 007) for non-root users, if the uid is
|
|
||||||
# the same as gid, and username is the same as the primary group name.
|
|
||||||
#
|
|
||||||
# If set to yes, userdel will remove the user´s group if it contains no
|
|
||||||
# more members, and useradd will create by default a group with the name
|
|
||||||
# of the user.
|
|
||||||
#
|
|
||||||
USERGROUPS_ENAB yes
|
|
||||||
|
|
||||||
#
|
|
||||||
# Instead of the real user shell, the program specified by this parameter
|
|
||||||
# will be launched, although its visible name (argv[0]) will be the shell's.
|
|
||||||
# The program may do whatever it wants (logging, additional authentification,
|
|
||||||
# banner, ...) before running the actual shell.
|
|
||||||
#
|
|
||||||
# FAKE_SHELL /bin/fakeshell
|
|
||||||
|
|
||||||
#
|
|
||||||
# If defined, either full pathname of a file containing device names or
|
|
||||||
# a ":" delimited list of device names. Root logins will be allowed only
|
|
||||||
# upon these devices.
|
|
||||||
#
|
|
||||||
# This variable is used by login and su.
|
|
||||||
#
|
|
||||||
#CONSOLE /etc/consoles
|
|
||||||
#CONSOLE console:tty01:tty02:tty03:tty04
|
|
||||||
|
|
||||||
#
|
|
||||||
# List of groups to add to the user's supplementary group set
|
|
||||||
# when logging in on the console (as determined by the CONSOLE
|
|
||||||
# setting). Default is none.
|
|
||||||
#
|
|
||||||
# Use with caution - it is possible for users to gain permanent
|
|
||||||
# access to these groups, even when not logged in on the console.
|
|
||||||
# How to do it is left as an exercise for the reader...
|
|
||||||
#
|
|
||||||
# This variable is used by login and su.
|
|
||||||
#
|
|
||||||
#CONSOLE_GROUPS floppy:audio:cdrom
|
|
||||||
|
|
||||||
#
|
|
||||||
# If set to "yes", new passwords will be encrypted using the MD5-based
|
|
||||||
# algorithm compatible with the one used by recent releases of FreeBSD.
|
|
||||||
# It supports passwords of unlimited length and longer salt strings.
|
|
||||||
# Set to "no" if you need to copy encrypted passwords to other systems
|
|
||||||
# which don't understand the new algorithm. Default is "no".
|
|
||||||
#
|
|
||||||
# This variable is deprecated. You should use ENCRYPT_METHOD.
|
|
||||||
#
|
|
||||||
MD5_CRYPT_ENAB no
|
|
||||||
|
|
||||||
#
|
|
||||||
# If set to MD5 , MD5-based algorithm will be used for encrypting password
|
|
||||||
# If set to SHA256, SHA256-based algorithm will be used for encrypting password
|
|
||||||
# If set to SHA512, SHA512-based algorithm will be used for encrypting password
|
|
||||||
# If set to DES, DES-based algorithm will be used for encrypting password (default)
|
|
||||||
# Overrides the MD5_CRYPT_ENAB option
|
|
||||||
#
|
|
||||||
# Note: It is recommended to use a value consistent with
|
|
||||||
# the PAM modules configuration.
|
|
||||||
#
|
|
||||||
ENCRYPT_METHOD SHA512
|
|
||||||
|
|
||||||
#
|
|
||||||
# Only used if ENCRYPT_METHOD is set to SHA256 or SHA512.
|
|
||||||
#
|
|
||||||
# Define the number of SHA rounds.
|
|
||||||
# With a lot of rounds, it is more difficult to brute forcing the password.
|
|
||||||
# But note also that it more CPU resources will be needed to authenticate
|
|
||||||
# users.
|
|
||||||
#
|
|
||||||
# If not specified, the libc will choose the default number of rounds (5000).
|
|
||||||
# The values must be inside the 1000-999999999 range.
|
|
||||||
# If only one of the MIN or MAX values is set, then this value will be used.
|
|
||||||
# If MIN > MAX, the highest value will be used.
|
|
||||||
#
|
|
||||||
# SHA_CRYPT_MIN_ROUNDS 5000
|
|
||||||
# SHA_CRYPT_MAX_ROUNDS 5000
|
|
||||||
|
|
||||||
################# OBSOLETED BY PAM ##############
|
|
||||||
# #
|
|
||||||
# These options are now handled by PAM. Please #
|
|
||||||
# edit the appropriate file in /etc/pam.d/ to #
|
|
||||||
# enable the equivelants of them.
|
|
||||||
#
|
|
||||||
###############
|
|
||||||
|
|
||||||
#MOTD_FILE
|
|
||||||
#DIALUPS_CHECK_ENAB
|
|
||||||
#LASTLOG_ENAB
|
|
||||||
#MAIL_CHECK_ENAB
|
|
||||||
#OBSCURE_CHECKS_ENAB
|
|
||||||
#PORTTIME_CHECKS_ENAB
|
|
||||||
#SU_WHEEL_ONLY
|
|
||||||
#CRACKLIB_DICTPATH
|
|
||||||
#PASS_CHANGE_TRIES
|
|
||||||
#PASS_ALWAYS_WARN
|
|
||||||
#ENVIRON_FILE
|
|
||||||
#NOLOGINS_FILE
|
|
||||||
#ISSUE_FILE
|
|
||||||
#PASS_MIN_LEN
|
|
||||||
#PASS_MAX_LEN
|
|
||||||
#ULIMIT
|
|
||||||
#ENV_HZ
|
|
||||||
#CHFN_AUTH
|
|
||||||
#CHSH_AUTH
|
|
||||||
#FAIL_DELAY
|
|
||||||
|
|
||||||
################# OBSOLETED #######################
|
|
||||||
# #
|
|
||||||
# These options are no more handled by shadow. #
|
|
||||||
# #
|
|
||||||
# Shadow utilities will display a warning if they #
|
|
||||||
# still appear. #
|
|
||||||
# #
|
|
||||||
###################################################
|
|
||||||
|
|
||||||
# CLOSE_SESSIONS
|
|
||||||
# LOGIN_STRING
|
|
||||||
# NO_PASSWORD_CONSOLE
|
|
||||||
# QMAIL_DIR
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,117 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
# /etc/modules: kernel modules to load at boot time.
|
|
||||||
#
|
|
||||||
# This file contains the names of kernel modules that should be loaded
|
|
||||||
# at boot time, one per line. Lines beginning with "#" are ignored.
|
|
||||||
# Parameters can be specified after the module name.
|
|
||||||
|
|
||||||
# Arch
|
|
||||||
# ----
|
|
||||||
#
|
|
||||||
# Modules for certains builds, contains support modules and some CPU-specific optimizations.
|
|
||||||
|
|
||||||
{% if arch == "x86_64" -%}
|
|
||||||
# Optimize for x86_64 cryptographic features
|
|
||||||
twofish-x86_64-3way
|
|
||||||
twofish-x86_64
|
|
||||||
aes-x86_64
|
|
||||||
salsa20-x86_64
|
|
||||||
blowfish-x86_64
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
{% if cpuVendor == "intel" -%}
|
|
||||||
# Intel-specific optimizations
|
|
||||||
ghash-clmulni-intel
|
|
||||||
aesni-intel
|
|
||||||
kvm-intel
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
{% if cpuVendor == "amd" -%}
|
|
||||||
# AMD-specific optimizations
|
|
||||||
kvm-amd
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
kvm
|
|
||||||
|
|
||||||
|
|
||||||
# Crypto
|
|
||||||
# ------
|
|
||||||
|
|
||||||
# Some core modules which comprise strong cryptography.
|
|
||||||
blowfish_common
|
|
||||||
blowfish_generic
|
|
||||||
ctr
|
|
||||||
cts
|
|
||||||
lrw
|
|
||||||
lzo
|
|
||||||
rmd160
|
|
||||||
rmd256
|
|
||||||
rmd320
|
|
||||||
serpent
|
|
||||||
sha512_generic
|
|
||||||
twofish_common
|
|
||||||
twofish_generic
|
|
||||||
xts
|
|
||||||
zlib
|
|
||||||
|
|
||||||
|
|
||||||
# Drivers
|
|
||||||
# -------
|
|
||||||
|
|
||||||
# Basics
|
|
||||||
lp
|
|
||||||
rtc
|
|
||||||
loop
|
|
||||||
|
|
||||||
# Filesystems
|
|
||||||
ext2
|
|
||||||
btrfs
|
|
||||||
|
|
||||||
{% if desktop_enable -%}
|
|
||||||
# Desktop
|
|
||||||
psmouse
|
|
||||||
snd
|
|
||||||
snd_ac97_codec
|
|
||||||
snd_intel8x0
|
|
||||||
snd_page_alloc
|
|
||||||
snd_pcm
|
|
||||||
snd_timer
|
|
||||||
soundcore
|
|
||||||
usbhid
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
# Lib
|
|
||||||
# ---
|
|
||||||
xz
|
|
||||||
|
|
||||||
|
|
||||||
# Net
|
|
||||||
# ---
|
|
||||||
|
|
||||||
# All packets needed for netfilter rules (ie iptables, ebtables).
|
|
||||||
ip_tables
|
|
||||||
x_tables
|
|
||||||
iptable_filter
|
|
||||||
iptable_nat
|
|
||||||
|
|
||||||
# Targets
|
|
||||||
ipt_LOG
|
|
||||||
ipt_REJECT
|
|
||||||
|
|
||||||
# Modules
|
|
||||||
xt_connlimit
|
|
||||||
xt_tcpudp
|
|
||||||
xt_recent
|
|
||||||
xt_limit
|
|
||||||
xt_conntrack
|
|
||||||
nf_conntrack
|
|
||||||
nf_conntrack_ipv4
|
|
||||||
nf_defrag_ipv4
|
|
||||||
xt_state
|
|
||||||
nf_nat
|
|
||||||
|
|
||||||
# Addons
|
|
||||||
xt_pknock
|
|
|
@ -1,11 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
Name: passwdqc password strength enforcement
|
|
||||||
Default: yes
|
|
||||||
Priority: 1024
|
|
||||||
Conflicts: cracklib
|
|
||||||
Password-Type: Primary
|
|
||||||
Password:
|
|
||||||
requisite pam_passwdqc.so {{ auth_pam_passwdqc_options }}
|
|
|
@ -1,8 +0,0 @@
|
||||||
###############################################################################
|
|
||||||
# WARNING: This configuration file is maintained by Juju. Local changes may
|
|
||||||
# be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
# Disable core dumps via soft limits for all users. Compliance to this setting
|
|
||||||
# is voluntary and can be modified by users up to a hard limit. This setting is
|
|
||||||
# a sane default.
|
|
||||||
ulimit -S -c 0 > /dev/null 2>&1
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue