Remove the charm subdir which includes the bundled charm-helpers
As part of the layered approach, charm-helpers is brought in via the charm build process.
This commit is contained in:
parent
80f8be29e1
commit
4bbf9e0de8
@ -1,3 +0,0 @@
|
|||||||
This is a Bazaar control directory.
|
|
||||||
Do not change any files in this directory.
|
|
||||||
See http://bazaar.canonical.com/ for more information about Bazaar.
|
|
@ -1 +0,0 @@
|
|||||||
Bazaar-NG meta directory, format 1
|
|
@ -1 +0,0 @@
|
|||||||
parent_location = bzr+ssh://bazaar.launchpad.net/+branch/charm-helpers/
|
|
@ -1 +0,0 @@
|
|||||||
Bazaar Branch Format 7 (needs bzr 1.6)
|
|
@ -1 +0,0 @@
|
|||||||
504 stuart.bishop@canonical.com-20151210052353-xu19weti1yiknmjw
|
|
@ -1 +0,0 @@
|
|||||||
d16:0.3+121-0ubuntu148:clint@ubuntu.com-20120119205311-farsam12qwqfpeht16:0.3+151-0ubuntu157:package-import@ubuntu.com-20120723135208-55bdkt9jj6kthm4715:0.3+85-0ubuntu157:package-import@ubuntu.com-20110602121239-s187dh7hw4nsxci815:0.3+91-0ubuntu157:package-import@ubuntu.com-20111213094623-0jnrct1rra5t3i8c15:0.3+92-0ubuntu157:package-import@ubuntu.com-20111213125509-byllhab72ng4v8ud5:15.0763:christopher.glass@canonical.com-20150807090030-91abn11wg2m3rkkm5:15.1056:liam.young@canonical.com-20151021153122-r37il9ns908m9jlo16:upstream-0.3+11948:clint@ubuntu.com-20120116061231-c556j27mlme85pd316:upstream-0.3+12148:clint@ubuntu.com-20120119203619-8v109cyzgz26y7j216:upstream-0.3+13048:clint@ubuntu.com-20120423215821-7q8t24irg9oqxh2h16:upstream-0.3+13648:clint@ubuntu.com-20120315055631-fgk068rg71jcok4y23:upstream-0.3+136+bzr15148:clint@ubuntu.com-20120723172418-u4wp28bguqfy179e16:upstream-0.3+15157:package-import@ubuntu.com-20120723135208-fl89sbp2yji1eq6m16:upstream-0.3+15648:clint@ubuntu.com-20120828191429-p8blvq5urztls6pg15:upstream-0.3+8557:package-import@ubuntu.com-20110602121239-5531tjj4wq3b6lua15:upstream-0.3+9157:package-import@ubuntu.com-20111213094623-e7977kuf8j0ptlpy15:upstream-0.3+9257:package-import@ubuntu.com-20111213125509-9bbijfcobhtg28bv15:upstream-0.3+9848:clint@ubuntu.com-20111222083809-wnde1hp8bs1comil22:upstream-0.3+98+bzr11948:clint@ubuntu.com-20120116061231-c556j27mlme85pd321:upstream-0.3+bzr128-152:graham@canonical.com-20120306180338-njpkulb3k71hyn7i30:upstream-0.3+bzr128-1~precise152:graham@canonical.com-20120308173903-63bfm28mf4ny7cc86:v0.2.047:marco@ceppi.net-20140919172041-ibzt6ihw1u0x9rdq6:v0.2.147:marco@ceppi.net-20140919225737-k79k62rc61cst3c86:v0.2.264:tim.van.steenburgh@canonical.com-20140923163551-95mo9lu3qjexe2y66:v0.6.064:tim.van.steenburgh@canonical.com-20151202165701-ye2hvfgo5vgd27za6:v1.0.047:marco@ceppi.net-20130917104100-8tuzy22d3aug9gcd6:v1.0.147:marco@ceppi.net-20131009172145-2sqdmmh56a50vwot6:v1.1.047:marco@ceppi.net-20131101023136-rwc4xad5ul0zw20g6:v1.1.147:marco@ceppi.net-20131104182609-hxjri0q0e3tanfrw6:v1.1.247:marco@ceppi.net-20131104210314-4fvutpctef3rnb6u6:v1.2.147:marco@ceppi.net-20131210210314-b993t2h57yrb8ppy6:v1.2.247:marco@ceppi.net-20131210213152-4njgjqioozrtyw4s6:v1.2.347:marco@ceppi.net-20131211171830-u4lrxnp0j8pwycs36:v1.2.447:marco@ceppi.net-20131212172808-mcdjw4uc0dwtr26m6:v1.2.547:marco@ceppi.net-20131214233842-677o4u4g6gy1s0o46:v1.2.647:marco@ceppi.net-20140110201401-vtm5y017f0q7jmeq6:v1.2.747:marco@ceppi.net-20140112183856-hh3k2bsak2zmxvpp6:v1.2.847:marco@ceppi.net-20140115203234-5cmfvs37yt330ixd6:v1.2.947:marco@ceppi.net-20140124172015-tcwt43x00lkg3dld6:v1.3.047:marco@ceppi.net-20140618195927-ru2f071n0rdkuxgc6:v1.3.147:marco@ceppi.net-20140620183431-ebv6f0xyknpfarnp6:v1.3.247:marco@ceppi.net-20140623144459-0gacbpgknc08xrnve
|
|
@ -1 +0,0 @@
|
|||||||
BZR conflict list format 1
|
|
Binary file not shown.
@ -1 +0,0 @@
|
|||||||
Bazaar Working Tree Format 6 (bzr 1.14)
|
|
@ -1 +0,0 @@
|
|||||||
Bazaar repository format 2a (needs bzr 1.16 or later)
|
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,16 +0,0 @@
|
|||||||
*.pyc
|
|
||||||
__pycache__/
|
|
||||||
dist/
|
|
||||||
build/
|
|
||||||
MANIFEST
|
|
||||||
charmhelpers/version.py
|
|
||||||
.coverage
|
|
||||||
.env/
|
|
||||||
coverage.xml
|
|
||||||
docs/_build
|
|
||||||
.idea
|
|
||||||
.project
|
|
||||||
.pydevproject
|
|
||||||
.settings
|
|
||||||
.venv
|
|
||||||
.venv3
|
|
@ -1,28 +0,0 @@
|
|||||||
Hacking on Docs
|
|
||||||
---------------
|
|
||||||
|
|
||||||
To build the html documentation::
|
|
||||||
|
|
||||||
make docs
|
|
||||||
|
|
||||||
To browse the html documentation locally::
|
|
||||||
|
|
||||||
make docs
|
|
||||||
cd docs/_build/html
|
|
||||||
python -m SimpleHTTPServer 8765
|
|
||||||
# point web browser to http://localhost:8765
|
|
||||||
|
|
||||||
To build and upload package and doc updates to PyPI::
|
|
||||||
|
|
||||||
make release
|
|
||||||
# note: if the package version already exists on PyPI
|
|
||||||
# this command will upload doc updates only
|
|
||||||
|
|
||||||
|
|
||||||
PyPI Package and Docs
|
|
||||||
---------------------
|
|
||||||
|
|
||||||
The published package and docs currently live at:
|
|
||||||
|
|
||||||
https://pypi.python.org/pypi/charmhelpers
|
|
||||||
http://pythonhosted.org/charmhelpers/
|
|
@ -1,675 +0,0 @@
|
|||||||
GNU GENERAL PUBLIC LICENSE
|
|
||||||
Version 3, 29 June 2007
|
|
||||||
|
|
||||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
|
||||||
Everyone is permitted to copy and distribute verbatim copies
|
|
||||||
of this license document, but changing it is not allowed.
|
|
||||||
|
|
||||||
Preamble
|
|
||||||
|
|
||||||
The GNU General Public License is a free, copyleft license for
|
|
||||||
software and other kinds of works.
|
|
||||||
|
|
||||||
The licenses for most software and other practical works are designed
|
|
||||||
to take away your freedom to share and change the works. By contrast,
|
|
||||||
the GNU General Public License is intended to guarantee your freedom to
|
|
||||||
share and change all versions of a program--to make sure it remains free
|
|
||||||
software for all its users. We, the Free Software Foundation, use the
|
|
||||||
GNU General Public License for most of our software; it applies also to
|
|
||||||
any other work released this way by its authors. You can apply it to
|
|
||||||
your programs, too.
|
|
||||||
|
|
||||||
When we speak of free software, we are referring to freedom, not
|
|
||||||
price. Our General Public Licenses are designed to make sure that you
|
|
||||||
have the freedom to distribute copies of free software (and charge for
|
|
||||||
them if you wish), that you receive source code or can get it if you
|
|
||||||
want it, that you can change the software or use pieces of it in new
|
|
||||||
free programs, and that you know you can do these things.
|
|
||||||
|
|
||||||
To protect your rights, we need to prevent others from denying you
|
|
||||||
these rights or asking you to surrender the rights. Therefore, you have
|
|
||||||
certain responsibilities if you distribute copies of the software, or if
|
|
||||||
you modify it: responsibilities to respect the freedom of others.
|
|
||||||
|
|
||||||
For example, if you distribute copies of such a program, whether
|
|
||||||
gratis or for a fee, you must pass on to the recipients the same
|
|
||||||
freedoms that you received. You must make sure that they, too, receive
|
|
||||||
or can get the source code. And you must show them these terms so they
|
|
||||||
know their rights.
|
|
||||||
|
|
||||||
Developers that use the GNU GPL protect your rights with two steps:
|
|
||||||
(1) assert copyright on the software, and (2) offer you this License
|
|
||||||
giving you legal permission to copy, distribute and/or modify it.
|
|
||||||
|
|
||||||
For the developers' and authors' protection, the GPL clearly explains
|
|
||||||
that there is no warranty for this free software. For both users' and
|
|
||||||
authors' sake, the GPL requires that modified versions be marked as
|
|
||||||
changed, so that their problems will not be attributed erroneously to
|
|
||||||
authors of previous versions.
|
|
||||||
|
|
||||||
Some devices are designed to deny users access to install or run
|
|
||||||
modified versions of the software inside them, although the manufacturer
|
|
||||||
can do so. This is fundamentally incompatible with the aim of
|
|
||||||
protecting users' freedom to change the software. The systematic
|
|
||||||
pattern of such abuse occurs in the area of products for individuals to
|
|
||||||
use, which is precisely where it is most unacceptable. Therefore, we
|
|
||||||
have designed this version of the GPL to prohibit the practice for those
|
|
||||||
products. If such problems arise substantially in other domains, we
|
|
||||||
stand ready to extend this provision to those domains in future versions
|
|
||||||
of the GPL, as needed to protect the freedom of users.
|
|
||||||
|
|
||||||
Finally, every program is threatened constantly by software patents.
|
|
||||||
States should not allow patents to restrict development and use of
|
|
||||||
software on general-purpose computers, but in those that do, we wish to
|
|
||||||
avoid the special danger that patents applied to a free program could
|
|
||||||
make it effectively proprietary. To prevent this, the GPL assures that
|
|
||||||
patents cannot be used to render the program non-free.
|
|
||||||
|
|
||||||
The precise terms and conditions for copying, distribution and
|
|
||||||
modification follow.
|
|
||||||
|
|
||||||
TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
0. Definitions.
|
|
||||||
|
|
||||||
"This License" refers to version 3 of the GNU General Public License.
|
|
||||||
|
|
||||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
|
||||||
works, such as semiconductor masks.
|
|
||||||
|
|
||||||
"The Program" refers to any copyrightable work licensed under this
|
|
||||||
License. Each licensee is addressed as "you". "Licensees" and
|
|
||||||
"recipients" may be individuals or organizations.
|
|
||||||
|
|
||||||
To "modify" a work means to copy from or adapt all or part of the work
|
|
||||||
in a fashion requiring copyright permission, other than the making of an
|
|
||||||
exact copy. The resulting work is called a "modified version" of the
|
|
||||||
earlier work or a work "based on" the earlier work.
|
|
||||||
|
|
||||||
A "covered work" means either the unmodified Program or a work based
|
|
||||||
on the Program.
|
|
||||||
|
|
||||||
To "propagate" a work means to do anything with it that, without
|
|
||||||
permission, would make you directly or secondarily liable for
|
|
||||||
infringement under applicable copyright law, except executing it on a
|
|
||||||
computer or modifying a private copy. Propagation includes copying,
|
|
||||||
distribution (with or without modification), making available to the
|
|
||||||
public, and in some countries other activities as well.
|
|
||||||
|
|
||||||
To "convey" a work means any kind of propagation that enables other
|
|
||||||
parties to make or receive copies. Mere interaction with a user through
|
|
||||||
a computer network, with no transfer of a copy, is not conveying.
|
|
||||||
|
|
||||||
An interactive user interface displays "Appropriate Legal Notices"
|
|
||||||
to the extent that it includes a convenient and prominently visible
|
|
||||||
feature that (1) displays an appropriate copyright notice, and (2)
|
|
||||||
tells the user that there is no warranty for the work (except to the
|
|
||||||
extent that warranties are provided), that licensees may convey the
|
|
||||||
work under this License, and how to view a copy of this License. If
|
|
||||||
the interface presents a list of user commands or options, such as a
|
|
||||||
menu, a prominent item in the list meets this criterion.
|
|
||||||
|
|
||||||
1. Source Code.
|
|
||||||
|
|
||||||
The "source code" for a work means the preferred form of the work
|
|
||||||
for making modifications to it. "Object code" means any non-source
|
|
||||||
form of a work.
|
|
||||||
|
|
||||||
A "Standard Interface" means an interface that either is an official
|
|
||||||
standard defined by a recognized standards body, or, in the case of
|
|
||||||
interfaces specified for a particular programming language, one that
|
|
||||||
is widely used among developers working in that language.
|
|
||||||
|
|
||||||
The "System Libraries" of an executable work include anything, other
|
|
||||||
than the work as a whole, that (a) is included in the normal form of
|
|
||||||
packaging a Major Component, but which is not part of that Major
|
|
||||||
Component, and (b) serves only to enable use of the work with that
|
|
||||||
Major Component, or to implement a Standard Interface for which an
|
|
||||||
implementation is available to the public in source code form. A
|
|
||||||
"Major Component", in this context, means a major essential component
|
|
||||||
(kernel, window system, and so on) of the specific operating system
|
|
||||||
(if any) on which the executable work runs, or a compiler used to
|
|
||||||
produce the work, or an object code interpreter used to run it.
|
|
||||||
|
|
||||||
The "Corresponding Source" for a work in object code form means all
|
|
||||||
the source code needed to generate, install, and (for an executable
|
|
||||||
work) run the object code and to modify the work, including scripts to
|
|
||||||
control those activities. However, it does not include the work's
|
|
||||||
System Libraries, or general-purpose tools or generally available free
|
|
||||||
programs which are used unmodified in performing those activities but
|
|
||||||
which are not part of the work. For example, Corresponding Source
|
|
||||||
includes interface definition files associated with source files for
|
|
||||||
the work, and the source code for shared libraries and dynamically
|
|
||||||
linked subprograms that the work is specifically designed to require,
|
|
||||||
such as by intimate data communication or control flow between those
|
|
||||||
subprograms and other parts of the work.
|
|
||||||
|
|
||||||
The Corresponding Source need not include anything that users
|
|
||||||
can regenerate automatically from other parts of the Corresponding
|
|
||||||
Source.
|
|
||||||
|
|
||||||
The Corresponding Source for a work in source code form is that
|
|
||||||
same work.
|
|
||||||
|
|
||||||
2. Basic Permissions.
|
|
||||||
|
|
||||||
All rights granted under this License are granted for the term of
|
|
||||||
copyright on the Program, and are irrevocable provided the stated
|
|
||||||
conditions are met. This License explicitly affirms your unlimited
|
|
||||||
permission to run the unmodified Program. The output from running a
|
|
||||||
covered work is covered by this License only if the output, given its
|
|
||||||
content, constitutes a covered work. This License acknowledges your
|
|
||||||
rights of fair use or other equivalent, as provided by copyright law.
|
|
||||||
|
|
||||||
You may make, run and propagate covered works that you do not
|
|
||||||
convey, without conditions so long as your license otherwise remains
|
|
||||||
in force. You may convey covered works to others for the sole purpose
|
|
||||||
of having them make modifications exclusively for you, or provide you
|
|
||||||
with facilities for running those works, provided that you comply with
|
|
||||||
the terms of this License in conveying all material for which you do
|
|
||||||
not control copyright. Those thus making or running the covered works
|
|
||||||
for you must do so exclusively on your behalf, under your direction
|
|
||||||
and control, on terms that prohibit them from making any copies of
|
|
||||||
your copyrighted material outside their relationship with you.
|
|
||||||
|
|
||||||
Conveying under any other circumstances is permitted solely under
|
|
||||||
the conditions stated below. Sublicensing is not allowed; section 10
|
|
||||||
makes it unnecessary.
|
|
||||||
|
|
||||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
|
||||||
|
|
||||||
No covered work shall be deemed part of an effective technological
|
|
||||||
measure under any applicable law fulfilling obligations under article
|
|
||||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
|
||||||
similar laws prohibiting or restricting circumvention of such
|
|
||||||
measures.
|
|
||||||
|
|
||||||
When you convey a covered work, you waive any legal power to forbid
|
|
||||||
circumvention of technological measures to the extent such circumvention
|
|
||||||
is effected by exercising rights under this License with respect to
|
|
||||||
the covered work, and you disclaim any intention to limit operation or
|
|
||||||
modification of the work as a means of enforcing, against the work's
|
|
||||||
users, your or third parties' legal rights to forbid circumvention of
|
|
||||||
technological measures.
|
|
||||||
|
|
||||||
4. Conveying Verbatim Copies.
|
|
||||||
|
|
||||||
You may convey verbatim copies of the Program's source code as you
|
|
||||||
receive it, in any medium, provided that you conspicuously and
|
|
||||||
appropriately publish on each copy an appropriate copyright notice;
|
|
||||||
keep intact all notices stating that this License and any
|
|
||||||
non-permissive terms added in accord with section 7 apply to the code;
|
|
||||||
keep intact all notices of the absence of any warranty; and give all
|
|
||||||
recipients a copy of this License along with the Program.
|
|
||||||
|
|
||||||
You may charge any price or no price for each copy that you convey,
|
|
||||||
and you may offer support or warranty protection for a fee.
|
|
||||||
|
|
||||||
5. Conveying Modified Source Versions.
|
|
||||||
|
|
||||||
You may convey a work based on the Program, or the modifications to
|
|
||||||
produce it from the Program, in the form of source code under the
|
|
||||||
terms of section 4, provided that you also meet all of these conditions:
|
|
||||||
|
|
||||||
a) The work must carry prominent notices stating that you modified
|
|
||||||
it, and giving a relevant date.
|
|
||||||
|
|
||||||
b) The work must carry prominent notices stating that it is
|
|
||||||
released under this License and any conditions added under section
|
|
||||||
7. This requirement modifies the requirement in section 4 to
|
|
||||||
"keep intact all notices".
|
|
||||||
|
|
||||||
c) You must license the entire work, as a whole, under this
|
|
||||||
License to anyone who comes into possession of a copy. This
|
|
||||||
License will therefore apply, along with any applicable section 7
|
|
||||||
additional terms, to the whole of the work, and all its parts,
|
|
||||||
regardless of how they are packaged. This License gives no
|
|
||||||
permission to license the work in any other way, but it does not
|
|
||||||
invalidate such permission if you have separately received it.
|
|
||||||
|
|
||||||
d) If the work has interactive user interfaces, each must display
|
|
||||||
Appropriate Legal Notices; however, if the Program has interactive
|
|
||||||
interfaces that do not display Appropriate Legal Notices, your
|
|
||||||
work need not make them do so.
|
|
||||||
|
|
||||||
A compilation of a covered work with other separate and independent
|
|
||||||
works, which are not by their nature extensions of the covered work,
|
|
||||||
and which are not combined with it such as to form a larger program,
|
|
||||||
in or on a volume of a storage or distribution medium, is called an
|
|
||||||
"aggregate" if the compilation and its resulting copyright are not
|
|
||||||
used to limit the access or legal rights of the compilation's users
|
|
||||||
beyond what the individual works permit. Inclusion of a covered work
|
|
||||||
in an aggregate does not cause this License to apply to the other
|
|
||||||
parts of the aggregate.
|
|
||||||
|
|
||||||
6. Conveying Non-Source Forms.
|
|
||||||
|
|
||||||
You may convey a covered work in object code form under the terms
|
|
||||||
of sections 4 and 5, provided that you also convey the
|
|
||||||
machine-readable Corresponding Source under the terms of this License,
|
|
||||||
in one of these ways:
|
|
||||||
|
|
||||||
a) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by the
|
|
||||||
Corresponding Source fixed on a durable physical medium
|
|
||||||
customarily used for software interchange.
|
|
||||||
|
|
||||||
b) Convey the object code in, or embodied in, a physical product
|
|
||||||
(including a physical distribution medium), accompanied by a
|
|
||||||
written offer, valid for at least three years and valid for as
|
|
||||||
long as you offer spare parts or customer support for that product
|
|
||||||
model, to give anyone who possesses the object code either (1) a
|
|
||||||
copy of the Corresponding Source for all the software in the
|
|
||||||
product that is covered by this License, on a durable physical
|
|
||||||
medium customarily used for software interchange, for a price no
|
|
||||||
more than your reasonable cost of physically performing this
|
|
||||||
conveying of source, or (2) access to copy the
|
|
||||||
Corresponding Source from a network server at no charge.
|
|
||||||
|
|
||||||
c) Convey individual copies of the object code with a copy of the
|
|
||||||
written offer to provide the Corresponding Source. This
|
|
||||||
alternative is allowed only occasionally and noncommercially, and
|
|
||||||
only if you received the object code with such an offer, in accord
|
|
||||||
with subsection 6b.
|
|
||||||
|
|
||||||
d) Convey the object code by offering access from a designated
|
|
||||||
place (gratis or for a charge), and offer equivalent access to the
|
|
||||||
Corresponding Source in the same way through the same place at no
|
|
||||||
further charge. You need not require recipients to copy the
|
|
||||||
Corresponding Source along with the object code. If the place to
|
|
||||||
copy the object code is a network server, the Corresponding Source
|
|
||||||
may be on a different server (operated by you or a third party)
|
|
||||||
that supports equivalent copying facilities, provided you maintain
|
|
||||||
clear directions next to the object code saying where to find the
|
|
||||||
Corresponding Source. Regardless of what server hosts the
|
|
||||||
Corresponding Source, you remain obligated to ensure that it is
|
|
||||||
available for as long as needed to satisfy these requirements.
|
|
||||||
|
|
||||||
e) Convey the object code using peer-to-peer transmission, provided
|
|
||||||
you inform other peers where the object code and Corresponding
|
|
||||||
Source of the work are being offered to the general public at no
|
|
||||||
charge under subsection 6d.
|
|
||||||
|
|
||||||
A separable portion of the object code, whose source code is excluded
|
|
||||||
from the Corresponding Source as a System Library, need not be
|
|
||||||
included in conveying the object code work.
|
|
||||||
|
|
||||||
A "User Product" is either (1) a "consumer product", which means any
|
|
||||||
tangible personal property which is normally used for personal, family,
|
|
||||||
or household purposes, or (2) anything designed or sold for incorporation
|
|
||||||
into a dwelling. In determining whether a product is a consumer product,
|
|
||||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
|
||||||
product received by a particular user, "normally used" refers to a
|
|
||||||
typical or common use of that class of product, regardless of the status
|
|
||||||
of the particular user or of the way in which the particular user
|
|
||||||
actually uses, or expects or is expected to use, the product. A product
|
|
||||||
is a consumer product regardless of whether the product has substantial
|
|
||||||
commercial, industrial or non-consumer uses, unless such uses represent
|
|
||||||
the only significant mode of use of the product.
|
|
||||||
|
|
||||||
"Installation Information" for a User Product means any methods,
|
|
||||||
procedures, authorization keys, or other information required to install
|
|
||||||
and execute modified versions of a covered work in that User Product from
|
|
||||||
a modified version of its Corresponding Source. The information must
|
|
||||||
suffice to ensure that the continued functioning of the modified object
|
|
||||||
code is in no case prevented or interfered with solely because
|
|
||||||
modification has been made.
|
|
||||||
|
|
||||||
If you convey an object code work under this section in, or with, or
|
|
||||||
specifically for use in, a User Product, and the conveying occurs as
|
|
||||||
part of a transaction in which the right of possession and use of the
|
|
||||||
User Product is transferred to the recipient in perpetuity or for a
|
|
||||||
fixed term (regardless of how the transaction is characterized), the
|
|
||||||
Corresponding Source conveyed under this section must be accompanied
|
|
||||||
by the Installation Information. But this requirement does not apply
|
|
||||||
if neither you nor any third party retains the ability to install
|
|
||||||
modified object code on the User Product (for example, the work has
|
|
||||||
been installed in ROM).
|
|
||||||
|
|
||||||
The requirement to provide Installation Information does not include a
|
|
||||||
requirement to continue to provide support service, warranty, or updates
|
|
||||||
for a work that has been modified or installed by the recipient, or for
|
|
||||||
the User Product in which it has been modified or installed. Access to a
|
|
||||||
network may be denied when the modification itself materially and
|
|
||||||
adversely affects the operation of the network or violates the rules and
|
|
||||||
protocols for communication across the network.
|
|
||||||
|
|
||||||
Corresponding Source conveyed, and Installation Information provided,
|
|
||||||
in accord with this section must be in a format that is publicly
|
|
||||||
documented (and with an implementation available to the public in
|
|
||||||
source code form), and must require no special password or key for
|
|
||||||
unpacking, reading or copying.
|
|
||||||
|
|
||||||
7. Additional Terms.
|
|
||||||
|
|
||||||
"Additional permissions" are terms that supplement the terms of this
|
|
||||||
License by making exceptions from one or more of its conditions.
|
|
||||||
Additional permissions that are applicable to the entire Program shall
|
|
||||||
be treated as though they were included in this License, to the extent
|
|
||||||
that they are valid under applicable law. If additional permissions
|
|
||||||
apply only to part of the Program, that part may be used separately
|
|
||||||
under those permissions, but the entire Program remains governed by
|
|
||||||
this License without regard to the additional permissions.
|
|
||||||
|
|
||||||
When you convey a copy of a covered work, you may at your option
|
|
||||||
remove any additional permissions from that copy, or from any part of
|
|
||||||
it. (Additional permissions may be written to require their own
|
|
||||||
removal in certain cases when you modify the work.) You may place
|
|
||||||
additional permissions on material, added by you to a covered work,
|
|
||||||
for which you have or can give appropriate copyright permission.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, for material you
|
|
||||||
add to a covered work, you may (if authorized by the copyright holders of
|
|
||||||
that material) supplement the terms of this License with terms:
|
|
||||||
|
|
||||||
a) Disclaiming warranty or limiting liability differently from the
|
|
||||||
terms of sections 15 and 16 of this License; or
|
|
||||||
|
|
||||||
b) Requiring preservation of specified reasonable legal notices or
|
|
||||||
author attributions in that material or in the Appropriate Legal
|
|
||||||
Notices displayed by works containing it; or
|
|
||||||
|
|
||||||
c) Prohibiting misrepresentation of the origin of that material, or
|
|
||||||
requiring that modified versions of such material be marked in
|
|
||||||
reasonable ways as different from the original version; or
|
|
||||||
|
|
||||||
d) Limiting the use for publicity purposes of names of licensors or
|
|
||||||
authors of the material; or
|
|
||||||
|
|
||||||
e) Declining to grant rights under trademark law for use of some
|
|
||||||
trade names, trademarks, or service marks; or
|
|
||||||
|
|
||||||
f) Requiring indemnification of licensors and authors of that
|
|
||||||
material by anyone who conveys the material (or modified versions of
|
|
||||||
it) with contractual assumptions of liability to the recipient, for
|
|
||||||
any liability that these contractual assumptions directly impose on
|
|
||||||
those licensors and authors.
|
|
||||||
|
|
||||||
All other non-permissive additional terms are considered "further
|
|
||||||
restrictions" within the meaning of section 10. If the Program as you
|
|
||||||
received it, or any part of it, contains a notice stating that it is
|
|
||||||
governed by this License along with a term that is a further
|
|
||||||
restriction, you may remove that term. If a license document contains
|
|
||||||
a further restriction but permits relicensing or conveying under this
|
|
||||||
License, you may add to a covered work material governed by the terms
|
|
||||||
of that license document, provided that the further restriction does
|
|
||||||
not survive such relicensing or conveying.
|
|
||||||
|
|
||||||
If you add terms to a covered work in accord with this section, you
|
|
||||||
must place, in the relevant source files, a statement of the
|
|
||||||
additional terms that apply to those files, or a notice indicating
|
|
||||||
where to find the applicable terms.
|
|
||||||
|
|
||||||
Additional terms, permissive or non-permissive, may be stated in the
|
|
||||||
form of a separately written license, or stated as exceptions;
|
|
||||||
the above requirements apply either way.
|
|
||||||
|
|
||||||
8. Termination.
|
|
||||||
|
|
||||||
You may not propagate or modify a covered work except as expressly
|
|
||||||
provided under this License. Any attempt otherwise to propagate or
|
|
||||||
modify it is void, and will automatically terminate your rights under
|
|
||||||
this License (including any patent licenses granted under the third
|
|
||||||
paragraph of section 11).
|
|
||||||
|
|
||||||
However, if you cease all violation of this License, then your
|
|
||||||
license from a particular copyright holder is reinstated (a)
|
|
||||||
provisionally, unless and until the copyright holder explicitly and
|
|
||||||
finally terminates your license, and (b) permanently, if the copyright
|
|
||||||
holder fails to notify you of the violation by some reasonable means
|
|
||||||
prior to 60 days after the cessation.
|
|
||||||
|
|
||||||
Moreover, your license from a particular copyright holder is
|
|
||||||
reinstated permanently if the copyright holder notifies you of the
|
|
||||||
violation by some reasonable means, this is the first time you have
|
|
||||||
received notice of violation of this License (for any work) from that
|
|
||||||
copyright holder, and you cure the violation prior to 30 days after
|
|
||||||
your receipt of the notice.
|
|
||||||
|
|
||||||
Termination of your rights under this section does not terminate the
|
|
||||||
licenses of parties who have received copies or rights from you under
|
|
||||||
this License. If your rights have been terminated and not permanently
|
|
||||||
reinstated, you do not qualify to receive new licenses for the same
|
|
||||||
material under section 10.
|
|
||||||
|
|
||||||
9. Acceptance Not Required for Having Copies.
|
|
||||||
|
|
||||||
You are not required to accept this License in order to receive or
|
|
||||||
run a copy of the Program. Ancillary propagation of a covered work
|
|
||||||
occurring solely as a consequence of using peer-to-peer transmission
|
|
||||||
to receive a copy likewise does not require acceptance. However,
|
|
||||||
nothing other than this License grants you permission to propagate or
|
|
||||||
modify any covered work. These actions infringe copyright if you do
|
|
||||||
not accept this License. Therefore, by modifying or propagating a
|
|
||||||
covered work, you indicate your acceptance of this License to do so.
|
|
||||||
|
|
||||||
10. Automatic Licensing of Downstream Recipients.
|
|
||||||
|
|
||||||
Each time you convey a covered work, the recipient automatically
|
|
||||||
receives a license from the original licensors, to run, modify and
|
|
||||||
propagate that work, subject to this License. You are not responsible
|
|
||||||
for enforcing compliance by third parties with this License.
|
|
||||||
|
|
||||||
An "entity transaction" is a transaction transferring control of an
|
|
||||||
organization, or substantially all assets of one, or subdividing an
|
|
||||||
organization, or merging organizations. If propagation of a covered
|
|
||||||
work results from an entity transaction, each party to that
|
|
||||||
transaction who receives a copy of the work also receives whatever
|
|
||||||
licenses to the work the party's predecessor in interest had or could
|
|
||||||
give under the previous paragraph, plus a right to possession of the
|
|
||||||
Corresponding Source of the work from the predecessor in interest, if
|
|
||||||
the predecessor has it or can get it with reasonable efforts.
|
|
||||||
|
|
||||||
You may not impose any further restrictions on the exercise of the
|
|
||||||
rights granted or affirmed under this License. For example, you may
|
|
||||||
not impose a license fee, royalty, or other charge for exercise of
|
|
||||||
rights granted under this License, and you may not initiate litigation
|
|
||||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
|
||||||
any patent claim is infringed by making, using, selling, offering for
|
|
||||||
sale, or importing the Program or any portion of it.
|
|
||||||
|
|
||||||
11. Patents.
|
|
||||||
|
|
||||||
A "contributor" is a copyright holder who authorizes use under this
|
|
||||||
License of the Program or a work on which the Program is based. The
|
|
||||||
work thus licensed is called the contributor's "contributor version".
|
|
||||||
|
|
||||||
A contributor's "essential patent claims" are all patent claims
|
|
||||||
owned or controlled by the contributor, whether already acquired or
|
|
||||||
hereafter acquired, that would be infringed by some manner, permitted
|
|
||||||
by this License, of making, using, or selling its contributor version,
|
|
||||||
but do not include claims that would be infringed only as a
|
|
||||||
consequence of further modification of the contributor version. For
|
|
||||||
purposes of this definition, "control" includes the right to grant
|
|
||||||
patent sublicenses in a manner consistent with the requirements of
|
|
||||||
this License.
|
|
||||||
|
|
||||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
|
||||||
patent license under the contributor's essential patent claims, to
|
|
||||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
|
||||||
propagate the contents of its contributor version.
|
|
||||||
|
|
||||||
In the following three paragraphs, a "patent license" is any express
|
|
||||||
agreement or commitment, however denominated, not to enforce a patent
|
|
||||||
(such as an express permission to practice a patent or covenant not to
|
|
||||||
sue for patent infringement). To "grant" such a patent license to a
|
|
||||||
party means to make such an agreement or commitment not to enforce a
|
|
||||||
patent against the party.
|
|
||||||
|
|
||||||
If you convey a covered work, knowingly relying on a patent license,
|
|
||||||
and the Corresponding Source of the work is not available for anyone
|
|
||||||
to copy, free of charge and under the terms of this License, through a
|
|
||||||
publicly available network server or other readily accessible means,
|
|
||||||
then you must either (1) cause the Corresponding Source to be so
|
|
||||||
available, or (2) arrange to deprive yourself of the benefit of the
|
|
||||||
patent license for this particular work, or (3) arrange, in a manner
|
|
||||||
consistent with the requirements of this License, to extend the patent
|
|
||||||
license to downstream recipients. "Knowingly relying" means you have
|
|
||||||
actual knowledge that, but for the patent license, your conveying the
|
|
||||||
covered work in a country, or your recipient's use of the covered work
|
|
||||||
in a country, would infringe one or more identifiable patents in that
|
|
||||||
country that you have reason to believe are valid.
|
|
||||||
|
|
||||||
If, pursuant to or in connection with a single transaction or
|
|
||||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
|
||||||
covered work, and grant a patent license to some of the parties
|
|
||||||
receiving the covered work authorizing them to use, propagate, modify
|
|
||||||
or convey a specific copy of the covered work, then the patent license
|
|
||||||
you grant is automatically extended to all recipients of the covered
|
|
||||||
work and works based on it.
|
|
||||||
|
|
||||||
A patent license is "discriminatory" if it does not include within
|
|
||||||
the scope of its coverage, prohibits the exercise of, or is
|
|
||||||
conditioned on the non-exercise of one or more of the rights that are
|
|
||||||
specifically granted under this License. You may not convey a covered
|
|
||||||
work if you are a party to an arrangement with a third party that is
|
|
||||||
in the business of distributing software, under which you make payment
|
|
||||||
to the third party based on the extent of your activity of conveying
|
|
||||||
the work, and under which the third party grants, to any of the
|
|
||||||
parties who would receive the covered work from you, a discriminatory
|
|
||||||
patent license (a) in connection with copies of the covered work
|
|
||||||
conveyed by you (or copies made from those copies), or (b) primarily
|
|
||||||
for and in connection with specific products or compilations that
|
|
||||||
contain the covered work, unless you entered into that arrangement,
|
|
||||||
or that patent license was granted, prior to 28 March 2007.
|
|
||||||
|
|
||||||
Nothing in this License shall be construed as excluding or limiting
|
|
||||||
any implied license or other defenses to infringement that may
|
|
||||||
otherwise be available to you under applicable patent law.
|
|
||||||
|
|
||||||
12. No Surrender of Others' Freedom.
|
|
||||||
|
|
||||||
If conditions are imposed on you (whether by court order, agreement or
|
|
||||||
otherwise) that contradict the conditions of this License, they do not
|
|
||||||
excuse you from the conditions of this License. If you cannot convey a
|
|
||||||
covered work so as to satisfy simultaneously your obligations under this
|
|
||||||
License and any other pertinent obligations, then as a consequence you may
|
|
||||||
not convey it at all. For example, if you agree to terms that obligate you
|
|
||||||
to collect a royalty for further conveying from those to whom you convey
|
|
||||||
the Program, the only way you could satisfy both those terms and this
|
|
||||||
License would be to refrain entirely from conveying the Program.
|
|
||||||
|
|
||||||
13. Use with the GNU Affero General Public License.
|
|
||||||
|
|
||||||
Notwithstanding any other provision of this License, you have
|
|
||||||
permission to link or combine any covered work with a work licensed
|
|
||||||
under version 3 of the GNU Affero General Public License into a single
|
|
||||||
combined work, and to convey the resulting work. The terms of this
|
|
||||||
License will continue to apply to the part which is the covered work,
|
|
||||||
but the special requirements of the GNU Affero General Public License,
|
|
||||||
section 13, concerning interaction through a network will apply to the
|
|
||||||
combination as such.
|
|
||||||
|
|
||||||
14. Revised Versions of this License.
|
|
||||||
|
|
||||||
The Free Software Foundation may publish revised and/or new versions of
|
|
||||||
the GNU General Public License from time to time. Such new versions will
|
|
||||||
be similar in spirit to the present version, but may differ in detail to
|
|
||||||
address new problems or concerns.
|
|
||||||
|
|
||||||
Each version is given a distinguishing version number. If the
|
|
||||||
Program specifies that a certain numbered version of the GNU General
|
|
||||||
Public License "or any later version" applies to it, you have the
|
|
||||||
option of following the terms and conditions either of that numbered
|
|
||||||
version or of any later version published by the Free Software
|
|
||||||
Foundation. If the Program does not specify a version number of the
|
|
||||||
GNU General Public License, you may choose any version ever published
|
|
||||||
by the Free Software Foundation.
|
|
||||||
|
|
||||||
If the Program specifies that a proxy can decide which future
|
|
||||||
versions of the GNU General Public License can be used, that proxy's
|
|
||||||
public statement of acceptance of a version permanently authorizes you
|
|
||||||
to choose that version for the Program.
|
|
||||||
|
|
||||||
Later license versions may give you additional or different
|
|
||||||
permissions. However, no additional obligations are imposed on any
|
|
||||||
author or copyright holder as a result of your choosing to follow a
|
|
||||||
later version.
|
|
||||||
|
|
||||||
15. Disclaimer of Warranty.
|
|
||||||
|
|
||||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
|
||||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
|
||||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
|
||||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
|
||||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
|
||||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
|
||||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
|
||||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
|
||||||
|
|
||||||
16. Limitation of Liability.
|
|
||||||
|
|
||||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
|
||||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
|
||||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
|
||||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
|
||||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
|
||||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
|
||||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
|
||||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
|
||||||
SUCH DAMAGES.
|
|
||||||
|
|
||||||
17. Interpretation of Sections 15 and 16.
|
|
||||||
|
|
||||||
If the disclaimer of warranty and limitation of liability provided
|
|
||||||
above cannot be given local legal effect according to their terms,
|
|
||||||
reviewing courts shall apply local law that most closely approximates
|
|
||||||
an absolute waiver of all civil liability in connection with the
|
|
||||||
Program, unless a warranty or assumption of liability accompanies a
|
|
||||||
copy of the Program in return for a fee.
|
|
||||||
|
|
||||||
END OF TERMS AND CONDITIONS
|
|
||||||
|
|
||||||
How to Apply These Terms to Your New Programs
|
|
||||||
|
|
||||||
If you develop a new program, and you want it to be of the greatest
|
|
||||||
possible use to the public, the best way to achieve this is to make it
|
|
||||||
free software which everyone can redistribute and change under these terms.
|
|
||||||
|
|
||||||
To do so, attach the following notices to the program. It is safest
|
|
||||||
to attach them to the start of each source file to most effectively
|
|
||||||
state the exclusion of warranty; and each file should have at least
|
|
||||||
the "copyright" line and a pointer to where the full notice is found.
|
|
||||||
|
|
||||||
<one line to give the program's name and a brief idea of what it does.>
|
|
||||||
Copyright (C) <year> <name of author>
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU General Public License as published by
|
|
||||||
the Free Software Foundation, either version 3 of the License, or
|
|
||||||
(at your option) any later version.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
Also add information on how to contact you by electronic and paper mail.
|
|
||||||
|
|
||||||
If the program does terminal interaction, make it output a short
|
|
||||||
notice like this when it starts in an interactive mode:
|
|
||||||
|
|
||||||
<program> Copyright (C) <year> <name of author>
|
|
||||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
|
||||||
This is free software, and you are welcome to redistribute it
|
|
||||||
under certain conditions; type `show c' for details.
|
|
||||||
|
|
||||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
|
||||||
parts of the General Public License. Of course, your program's commands
|
|
||||||
might be different; for a GUI interface, you would use an "about box".
|
|
||||||
|
|
||||||
You should also get your employer (if you work as a programmer) or school,
|
|
||||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
|
||||||
For more information on this, and how to apply and follow the GNU GPL, see
|
|
||||||
<http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
The GNU General Public License does not permit incorporating your program
|
|
||||||
into proprietary programs. If your program is a subroutine library, you
|
|
||||||
may consider it more useful to permit linking proprietary applications with
|
|
||||||
the library. If this is what you want to do, use the GNU Lesser General
|
|
||||||
Public License instead of this License. But first, please read
|
|
||||||
<http://www.gnu.org/philosophy/why-not-lgpl.html>.
|
|
||||||
|
|
@ -1,166 +0,0 @@
|
|||||||
GNU LESSER GENERAL PUBLIC LICENSE
|
|
||||||
Version 3, 29 June 2007
|
|
||||||
|
|
||||||
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
|
|
||||||
Everyone is permitted to copy and distribute verbatim copies
|
|
||||||
of this license document, but changing it is not allowed.
|
|
||||||
|
|
||||||
|
|
||||||
This version of the GNU Lesser General Public License incorporates
|
|
||||||
the terms and conditions of version 3 of the GNU General Public
|
|
||||||
License, supplemented by the additional permissions listed below.
|
|
||||||
|
|
||||||
0. Additional Definitions.
|
|
||||||
|
|
||||||
As used herein, "this License" refers to version 3 of the GNU Lesser
|
|
||||||
General Public License, and the "GNU GPL" refers to version 3 of the GNU
|
|
||||||
General Public License.
|
|
||||||
|
|
||||||
"The Library" refers to a covered work governed by this License,
|
|
||||||
other than an Application or a Combined Work as defined below.
|
|
||||||
|
|
||||||
An "Application" is any work that makes use of an interface provided
|
|
||||||
by the Library, but which is not otherwise based on the Library.
|
|
||||||
Defining a subclass of a class defined by the Library is deemed a mode
|
|
||||||
of using an interface provided by the Library.
|
|
||||||
|
|
||||||
A "Combined Work" is a work produced by combining or linking an
|
|
||||||
Application with the Library. The particular version of the Library
|
|
||||||
with which the Combined Work was made is also called the "Linked
|
|
||||||
Version".
|
|
||||||
|
|
||||||
The "Minimal Corresponding Source" for a Combined Work means the
|
|
||||||
Corresponding Source for the Combined Work, excluding any source code
|
|
||||||
for portions of the Combined Work that, considered in isolation, are
|
|
||||||
based on the Application, and not on the Linked Version.
|
|
||||||
|
|
||||||
The "Corresponding Application Code" for a Combined Work means the
|
|
||||||
object code and/or source code for the Application, including any data
|
|
||||||
and utility programs needed for reproducing the Combined Work from the
|
|
||||||
Application, but excluding the System Libraries of the Combined Work.
|
|
||||||
|
|
||||||
1. Exception to Section 3 of the GNU GPL.
|
|
||||||
|
|
||||||
You may convey a covered work under sections 3 and 4 of this License
|
|
||||||
without being bound by section 3 of the GNU GPL.
|
|
||||||
|
|
||||||
2. Conveying Modified Versions.
|
|
||||||
|
|
||||||
If you modify a copy of the Library, and, in your modifications, a
|
|
||||||
facility refers to a function or data to be supplied by an Application
|
|
||||||
that uses the facility (other than as an argument passed when the
|
|
||||||
facility is invoked), then you may convey a copy of the modified
|
|
||||||
version:
|
|
||||||
|
|
||||||
a) under this License, provided that you make a good faith effort to
|
|
||||||
ensure that, in the event an Application does not supply the
|
|
||||||
function or data, the facility still operates, and performs
|
|
||||||
whatever part of its purpose remains meaningful, or
|
|
||||||
|
|
||||||
b) under the GNU GPL, with none of the additional permissions of
|
|
||||||
this License applicable to that copy.
|
|
||||||
|
|
||||||
3. Object Code Incorporating Material from Library Header Files.
|
|
||||||
|
|
||||||
The object code form of an Application may incorporate material from
|
|
||||||
a header file that is part of the Library. You may convey such object
|
|
||||||
code under terms of your choice, provided that, if the incorporated
|
|
||||||
material is not limited to numerical parameters, data structure
|
|
||||||
layouts and accessors, or small macros, inline functions and templates
|
|
||||||
(ten or fewer lines in length), you do both of the following:
|
|
||||||
|
|
||||||
a) Give prominent notice with each copy of the object code that the
|
|
||||||
Library is used in it and that the Library and its use are
|
|
||||||
covered by this License.
|
|
||||||
|
|
||||||
b) Accompany the object code with a copy of the GNU GPL and this license
|
|
||||||
document.
|
|
||||||
|
|
||||||
4. Combined Works.
|
|
||||||
|
|
||||||
You may convey a Combined Work under terms of your choice that,
|
|
||||||
taken together, effectively do not restrict modification of the
|
|
||||||
portions of the Library contained in the Combined Work and reverse
|
|
||||||
engineering for debugging such modifications, if you also do each of
|
|
||||||
the following:
|
|
||||||
|
|
||||||
a) Give prominent notice with each copy of the Combined Work that
|
|
||||||
the Library is used in it and that the Library and its use are
|
|
||||||
covered by this License.
|
|
||||||
|
|
||||||
b) Accompany the Combined Work with a copy of the GNU GPL and this license
|
|
||||||
document.
|
|
||||||
|
|
||||||
c) For a Combined Work that displays copyright notices during
|
|
||||||
execution, include the copyright notice for the Library among
|
|
||||||
these notices, as well as a reference directing the user to the
|
|
||||||
copies of the GNU GPL and this license document.
|
|
||||||
|
|
||||||
d) Do one of the following:
|
|
||||||
|
|
||||||
0) Convey the Minimal Corresponding Source under the terms of this
|
|
||||||
License, and the Corresponding Application Code in a form
|
|
||||||
suitable for, and under terms that permit, the user to
|
|
||||||
recombine or relink the Application with a modified version of
|
|
||||||
the Linked Version to produce a modified Combined Work, in the
|
|
||||||
manner specified by section 6 of the GNU GPL for conveying
|
|
||||||
Corresponding Source.
|
|
||||||
|
|
||||||
1) Use a suitable shared library mechanism for linking with the
|
|
||||||
Library. A suitable mechanism is one that (a) uses at run time
|
|
||||||
a copy of the Library already present on the user's computer
|
|
||||||
system, and (b) will operate properly with a modified version
|
|
||||||
of the Library that is interface-compatible with the Linked
|
|
||||||
Version.
|
|
||||||
|
|
||||||
e) Provide Installation Information, but only if you would otherwise
|
|
||||||
be required to provide such information under section 6 of the
|
|
||||||
GNU GPL, and only to the extent that such information is
|
|
||||||
necessary to install and execute a modified version of the
|
|
||||||
Combined Work produced by recombining or relinking the
|
|
||||||
Application with a modified version of the Linked Version. (If
|
|
||||||
you use option 4d0, the Installation Information must accompany
|
|
||||||
the Minimal Corresponding Source and Corresponding Application
|
|
||||||
Code. If you use option 4d1, you must provide the Installation
|
|
||||||
Information in the manner specified by section 6 of the GNU GPL
|
|
||||||
for conveying Corresponding Source.)
|
|
||||||
|
|
||||||
5. Combined Libraries.
|
|
||||||
|
|
||||||
You may place library facilities that are a work based on the
|
|
||||||
Library side by side in a single library together with other library
|
|
||||||
facilities that are not Applications and are not covered by this
|
|
||||||
License, and convey such a combined library under terms of your
|
|
||||||
choice, if you do both of the following:
|
|
||||||
|
|
||||||
a) Accompany the combined library with a copy of the same work based
|
|
||||||
on the Library, uncombined with any other library facilities,
|
|
||||||
conveyed under the terms of this License.
|
|
||||||
|
|
||||||
b) Give prominent notice with the combined library that part of it
|
|
||||||
is a work based on the Library, and explaining where to find the
|
|
||||||
accompanying uncombined form of the same work.
|
|
||||||
|
|
||||||
6. Revised Versions of the GNU Lesser General Public License.
|
|
||||||
|
|
||||||
The Free Software Foundation may publish revised and/or new versions
|
|
||||||
of the GNU Lesser General Public License from time to time. Such new
|
|
||||||
versions will be similar in spirit to the present version, but may
|
|
||||||
differ in detail to address new problems or concerns.
|
|
||||||
|
|
||||||
Each version is given a distinguishing version number. If the
|
|
||||||
Library as you received it specifies that a certain numbered version
|
|
||||||
of the GNU Lesser General Public License "or any later version"
|
|
||||||
applies to it, you have the option of following the terms and
|
|
||||||
conditions either of that published version or of any later version
|
|
||||||
published by the Free Software Foundation. If the Library as you
|
|
||||||
received it does not specify a version number of the GNU Lesser
|
|
||||||
General Public License, you may choose any version of the GNU Lesser
|
|
||||||
General Public License ever published by the Free Software Foundation.
|
|
||||||
|
|
||||||
If the Library as you received it specifies that a proxy can decide
|
|
||||||
whether future versions of the GNU Lesser General Public License shall
|
|
||||||
apply, that proxy's public statement of acceptance of any version is
|
|
||||||
permanent authorization for you to choose that version for the
|
|
||||||
Library.
|
|
||||||
|
|
@ -1,6 +0,0 @@
|
|||||||
include *.txt
|
|
||||||
include Makefile
|
|
||||||
include VERSION
|
|
||||||
include MANIFEST.in
|
|
||||||
include scripts/*
|
|
||||||
recursive-include debian *
|
|
@ -1,88 +0,0 @@
|
|||||||
PROJECT=charmhelpers
|
|
||||||
PYTHON := /usr/bin/env python
|
|
||||||
SUITE=unstable
|
|
||||||
TESTS=tests/
|
|
||||||
|
|
||||||
all:
|
|
||||||
@echo "make source - Create source package"
|
|
||||||
@echo "make sdeb - Create debian source package"
|
|
||||||
@echo "make deb - Create debian package"
|
|
||||||
@echo "make clean"
|
|
||||||
@echo "make userinstall - Install locally"
|
|
||||||
@echo "make docs - Build html documentation"
|
|
||||||
@echo "make release - Build and upload package and docs to PyPI"
|
|
||||||
@echo "make test"
|
|
||||||
|
|
||||||
sdeb: source
|
|
||||||
scripts/build source
|
|
||||||
|
|
||||||
deb: source
|
|
||||||
scripts/build
|
|
||||||
|
|
||||||
source: setup.py
|
|
||||||
scripts/update-revno
|
|
||||||
python setup.py sdist
|
|
||||||
|
|
||||||
clean:
|
|
||||||
-python setup.py clean
|
|
||||||
rm -rf build/ MANIFEST
|
|
||||||
find . -name '*.pyc' -delete
|
|
||||||
rm -rf dist/*
|
|
||||||
rm -rf .venv
|
|
||||||
rm -rf .venv3
|
|
||||||
(which dh_clean && dh_clean) || true
|
|
||||||
|
|
||||||
userinstall:
|
|
||||||
scripts/update-revno
|
|
||||||
python setup.py install --user
|
|
||||||
|
|
||||||
.venv:
|
|
||||||
sudo apt-get install -y gcc python-dev python-virtualenv python-apt
|
|
||||||
virtualenv .venv --system-site-packages
|
|
||||||
.venv/bin/pip install -U pip
|
|
||||||
.venv/bin/pip install -U distribute
|
|
||||||
.venv/bin/pip install -I -r test_requirements.txt
|
|
||||||
.venv/bin/pip install bzr
|
|
||||||
|
|
||||||
.venv3:
|
|
||||||
sudo apt-get install -y gcc python3-dev python-virtualenv python3-apt
|
|
||||||
virtualenv .venv3 --python=python3 --system-site-packages
|
|
||||||
.venv3/bin/pip install -U pip
|
|
||||||
.venv3/bin/pip install -U distribute
|
|
||||||
.venv3/bin/pip install -I -r test_requirements.txt
|
|
||||||
|
|
||||||
# Note we don't even attempt to run tests if lint isn't passing.
|
|
||||||
test: lint test2 test3
|
|
||||||
@echo OK
|
|
||||||
|
|
||||||
test2:
|
|
||||||
@echo Starting Py2 tests...
|
|
||||||
.venv/bin/nosetests -s --nologcapture tests/
|
|
||||||
|
|
||||||
test3:
|
|
||||||
@echo Starting Py3 tests...
|
|
||||||
.venv3/bin/nosetests -s --nologcapture tests/
|
|
||||||
|
|
||||||
ftest: lint
|
|
||||||
@echo Starting fast tests...
|
|
||||||
.venv/bin/nosetests --attr '!slow' --nologcapture tests/
|
|
||||||
.venv3/bin/nosetests --attr '!slow' --nologcapture tests/
|
|
||||||
|
|
||||||
lint: .venv .venv3
|
|
||||||
@echo Checking for Python syntax...
|
|
||||||
@.venv/bin/flake8 --ignore=E501,E402 $(PROJECT) $(TESTS) tools/ \
|
|
||||||
&& echo Py2 OK
|
|
||||||
@.venv3/bin/flake8 --ignore=E501,E402 $(PROJECT) $(TESTS) tools/ \
|
|
||||||
&& echo Py3 OK
|
|
||||||
|
|
||||||
docs:
|
|
||||||
- [ -z "`dpkg -l | grep python-sphinx`" ] && sudo apt-get install python-sphinx -y
|
|
||||||
- [ -z "`dpkg -l | grep python-pip`" ] && sudo apt-get install python-pip -y
|
|
||||||
- [ -z "`pip list | grep -i sphinx-pypi-upload`" ] && sudo pip install sphinx-pypi-upload
|
|
||||||
cd docs && make html && cd -
|
|
||||||
.PHONY: docs
|
|
||||||
|
|
||||||
release: docs
|
|
||||||
$(PYTHON) setup.py sdist upload upload_sphinx
|
|
||||||
|
|
||||||
build: test lint docs
|
|
@ -1,5 +0,0 @@
|
|||||||
Required Packages for Running Tests
|
|
||||||
-----------------------------------
|
|
||||||
sudo apt-get install python-flake8 python-shelltoolbox python-tempita \
|
|
||||||
python-nose python-mock python-testtools python-jinja2 python-coverage \
|
|
||||||
python-git python-netifaces python-netaddr python-pip zip
|
|
@ -1,24 +0,0 @@
|
|||||||
============
|
|
||||||
CharmHelpers
|
|
||||||
============
|
|
||||||
|
|
||||||
CharmHelpers provides an opinionated set of tools for building Juju
|
|
||||||
charms that work together. In addition to basic tasks like interact-
|
|
||||||
ing with the charm environment and the machine it runs on, it also
|
|
||||||
helps keep you build hooks and establish relations effortlessly.
|
|
||||||
|
|
||||||
License
|
|
||||||
========
|
|
||||||
|
|
||||||
This program is free software: you can redistribute it and/or modify
|
|
||||||
it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
published by the Free Software Foundation.
|
|
||||||
|
|
||||||
This program is distributed in the hope that it will be useful,
|
|
||||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
GNU Lesser General Public License for more details.
|
|
||||||
|
|
||||||
You should have received a copy of the GNU General Public License
|
|
||||||
and GNU Lesser General Public License
|
|
||||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1 +0,0 @@
|
|||||||
0.6.0
|
|
@ -1,4 +0,0 @@
|
|||||||
This directory contains executables for accessing charmhelpers functionality
|
|
||||||
|
|
||||||
|
|
||||||
Please see charmhelpers.cli for the recommended way to add scripts.
|
|
@ -1,8 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
from charmhelpers.cli import cmdline
|
|
||||||
from charmhelpers.cli.commands import *
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
cmdline.run()
|
|
@ -1,31 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
from charmhelpers.contrib.charmsupport import execd
|
|
||||||
|
|
||||||
|
|
||||||
def run_execd(args):
|
|
||||||
execd.execd_run(args.module, args.dir, die_on_error=True)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_args():
|
|
||||||
parser = argparse.ArgumentParser(description='Perform common charm tasks')
|
|
||||||
subparsers = parser.add_subparsers(help='Commands')
|
|
||||||
|
|
||||||
execd_parser = subparsers.add_parser('execd',
|
|
||||||
help='Execute a directory of commands')
|
|
||||||
execd_parser.add_argument('--module', default='charm-pre-install',
|
|
||||||
help='module to run (default: charm-pre-install)')
|
|
||||||
execd_parser.add_argument('--dir',
|
|
||||||
help="Override the exec.d directory path")
|
|
||||||
execd_parser.set_defaults(func=run_execd)
|
|
||||||
|
|
||||||
return parser.parse_args()
|
|
||||||
|
|
||||||
|
|
||||||
def main():
|
|
||||||
arguments = parse_args()
|
|
||||||
arguments.func(arguments)
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
exit(main())
|
|
@ -1,11 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
'''
|
|
||||||
Directly call a salt command in the modules, does not require a running salt
|
|
||||||
minion to run.
|
|
||||||
'''
|
|
||||||
|
|
||||||
from salt.scripts import salt_call
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
salt_call()
|
|
@ -1,38 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# Bootstrap charm-helpers, installing its dependencies if necessary using
|
|
||||||
# only standard libraries.
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
|
|
||||||
try:
|
|
||||||
import six # flake8: noqa
|
|
||||||
except ImportError:
|
|
||||||
if sys.version_info.major == 2:
|
|
||||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
|
|
||||||
else:
|
|
||||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
|
|
||||||
import six # flake8: noqa
|
|
||||||
|
|
||||||
try:
|
|
||||||
import yaml # flake8: noqa
|
|
||||||
except ImportError:
|
|
||||||
if sys.version_info.major == 2:
|
|
||||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
|
|
||||||
else:
|
|
||||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
|
|
||||||
import yaml # flake8: noqa
|
|
@ -1,57 +0,0 @@
|
|||||||
==========
|
|
||||||
Commandant
|
|
||||||
==========
|
|
||||||
|
|
||||||
-----------------------------------------------------
|
|
||||||
Automatic command-line interfaces to Python functions
|
|
||||||
-----------------------------------------------------
|
|
||||||
|
|
||||||
One of the benefits of ``libvirt`` is the uniformity of the interface: the C API (as well as the bindings in other languages) is a set of functions that accept parameters that are nearly identical to the command-line arguments. If you run ``virsh``, you get an interactive command prompt that supports all of the same commands that your shell scripts use as ``virsh`` subcommands.
|
|
||||||
|
|
||||||
Command execution and stdio manipulation is the greatest common factor across all development systems in the POSIX environment. By exposing your functions as commands that manipulate streams of text, you can make life easier for all the Ruby and Erlang and Go programmers in your life.
|
|
||||||
|
|
||||||
Goals
|
|
||||||
=====
|
|
||||||
|
|
||||||
* Single decorator to expose a function as a command.
|
|
||||||
* now two decorators - one "automatic" and one that allows authors to manipulate the arguments for fine-grained control.(MW)
|
|
||||||
* Automatic analysis of function signature through ``inspect.getargspec()``
|
|
||||||
* Command argument parser built automatically with ``argparse``
|
|
||||||
* Interactive interpreter loop object made with ``Cmd``
|
|
||||||
* Options to output structured return value data via ``pprint``, ``yaml`` or ``json`` dumps.
|
|
||||||
|
|
||||||
Other Important Features that need writing
|
|
||||||
------------------------------------------
|
|
||||||
|
|
||||||
* Help and Usage documentation can be automatically generated, but it will be important to let users override this behaviour
|
|
||||||
* The decorator should allow specifying further parameters to the parser's add_argument() calls, to specify types or to make arguments behave as boolean flags, etc.
|
|
||||||
- Filename arguments are important, as good practice is for functions to accept file objects as parameters.
|
|
||||||
- choices arguments help to limit bad input before the function is called
|
|
||||||
* Some automatic behaviour could make for better defaults, once the user can override them.
|
|
||||||
- We could automatically detect arguments that default to False or True, and automatically support --no-foo for foo=True.
|
|
||||||
- We could automatically support hyphens as alternates for underscores
|
|
||||||
- Arguments defaulting to sequence types could support the ``append`` action.
|
|
||||||
|
|
||||||
|
|
||||||
-----------------------------------------------------
|
|
||||||
Implementing subcommands
|
|
||||||
-----------------------------------------------------
|
|
||||||
|
|
||||||
(WIP)
|
|
||||||
|
|
||||||
So as to avoid dependencies on the cli module, subcommands should be defined separately from their implementations. The recommmendation would be to place definitions into separate modules near the implementations which they expose.
|
|
||||||
|
|
||||||
Some examples::
|
|
||||||
|
|
||||||
from charmhelpers.cli import CommandLine
|
|
||||||
from charmhelpers.payload import execd
|
|
||||||
from charmhelpers.foo import bar
|
|
||||||
|
|
||||||
cli = CommandLine()
|
|
||||||
|
|
||||||
cli.subcommand(execd.execd_run)
|
|
||||||
|
|
||||||
@cli.subcommand_builder("bar", help="Bar baz qux")
|
|
||||||
def barcmd_builder(subparser):
|
|
||||||
subparser.add_argument('argument1', help="yackety")
|
|
||||||
return bar
|
|
@ -1,191 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import inspect
|
|
||||||
import argparse
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from six.moves import zip
|
|
||||||
|
|
||||||
import charmhelpers.core.unitdata
|
|
||||||
|
|
||||||
|
|
||||||
class OutputFormatter(object):
|
|
||||||
def __init__(self, outfile=sys.stdout):
|
|
||||||
self.formats = (
|
|
||||||
"raw",
|
|
||||||
"json",
|
|
||||||
"py",
|
|
||||||
"yaml",
|
|
||||||
"csv",
|
|
||||||
"tab",
|
|
||||||
)
|
|
||||||
self.outfile = outfile
|
|
||||||
|
|
||||||
def add_arguments(self, argument_parser):
|
|
||||||
formatgroup = argument_parser.add_mutually_exclusive_group()
|
|
||||||
choices = self.supported_formats
|
|
||||||
formatgroup.add_argument("--format", metavar='FMT',
|
|
||||||
help="Select output format for returned data, "
|
|
||||||
"where FMT is one of: {}".format(choices),
|
|
||||||
choices=choices, default='raw')
|
|
||||||
for fmt in self.formats:
|
|
||||||
fmtfunc = getattr(self, fmt)
|
|
||||||
formatgroup.add_argument("-{}".format(fmt[0]),
|
|
||||||
"--{}".format(fmt), action='store_const',
|
|
||||||
const=fmt, dest='format',
|
|
||||||
help=fmtfunc.__doc__)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def supported_formats(self):
|
|
||||||
return self.formats
|
|
||||||
|
|
||||||
def raw(self, output):
|
|
||||||
"""Output data as raw string (default)"""
|
|
||||||
if isinstance(output, (list, tuple)):
|
|
||||||
output = '\n'.join(map(str, output))
|
|
||||||
self.outfile.write(str(output))
|
|
||||||
|
|
||||||
def py(self, output):
|
|
||||||
"""Output data as a nicely-formatted python data structure"""
|
|
||||||
import pprint
|
|
||||||
pprint.pprint(output, stream=self.outfile)
|
|
||||||
|
|
||||||
def json(self, output):
|
|
||||||
"""Output data in JSON format"""
|
|
||||||
import json
|
|
||||||
json.dump(output, self.outfile)
|
|
||||||
|
|
||||||
def yaml(self, output):
|
|
||||||
"""Output data in YAML format"""
|
|
||||||
import yaml
|
|
||||||
yaml.safe_dump(output, self.outfile)
|
|
||||||
|
|
||||||
def csv(self, output):
|
|
||||||
"""Output data as excel-compatible CSV"""
|
|
||||||
import csv
|
|
||||||
csvwriter = csv.writer(self.outfile)
|
|
||||||
csvwriter.writerows(output)
|
|
||||||
|
|
||||||
def tab(self, output):
|
|
||||||
"""Output data in excel-compatible tab-delimited format"""
|
|
||||||
import csv
|
|
||||||
csvwriter = csv.writer(self.outfile, dialect=csv.excel_tab)
|
|
||||||
csvwriter.writerows(output)
|
|
||||||
|
|
||||||
def format_output(self, output, fmt='raw'):
|
|
||||||
fmtfunc = getattr(self, fmt)
|
|
||||||
fmtfunc(output)
|
|
||||||
|
|
||||||
|
|
||||||
class CommandLine(object):
|
|
||||||
argument_parser = None
|
|
||||||
subparsers = None
|
|
||||||
formatter = None
|
|
||||||
exit_code = 0
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
if not self.argument_parser:
|
|
||||||
self.argument_parser = argparse.ArgumentParser(description='Perform common charm tasks')
|
|
||||||
if not self.formatter:
|
|
||||||
self.formatter = OutputFormatter()
|
|
||||||
self.formatter.add_arguments(self.argument_parser)
|
|
||||||
if not self.subparsers:
|
|
||||||
self.subparsers = self.argument_parser.add_subparsers(help='Commands')
|
|
||||||
|
|
||||||
def subcommand(self, command_name=None):
|
|
||||||
"""
|
|
||||||
Decorate a function as a subcommand. Use its arguments as the
|
|
||||||
command-line arguments"""
|
|
||||||
def wrapper(decorated):
|
|
||||||
cmd_name = command_name or decorated.__name__
|
|
||||||
subparser = self.subparsers.add_parser(cmd_name,
|
|
||||||
description=decorated.__doc__)
|
|
||||||
for args, kwargs in describe_arguments(decorated):
|
|
||||||
subparser.add_argument(*args, **kwargs)
|
|
||||||
subparser.set_defaults(func=decorated)
|
|
||||||
return decorated
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
def test_command(self, decorated):
|
|
||||||
"""
|
|
||||||
Subcommand is a boolean test function, so bool return values should be
|
|
||||||
converted to a 0/1 exit code.
|
|
||||||
"""
|
|
||||||
decorated._cli_test_command = True
|
|
||||||
return decorated
|
|
||||||
|
|
||||||
def no_output(self, decorated):
|
|
||||||
"""
|
|
||||||
Subcommand is not expected to return a value, so don't print a spurious None.
|
|
||||||
"""
|
|
||||||
decorated._cli_no_output = True
|
|
||||||
return decorated
|
|
||||||
|
|
||||||
def subcommand_builder(self, command_name, description=None):
|
|
||||||
"""
|
|
||||||
Decorate a function that builds a subcommand. Builders should accept a
|
|
||||||
single argument (the subparser instance) and return the function to be
|
|
||||||
run as the command."""
|
|
||||||
def wrapper(decorated):
|
|
||||||
subparser = self.subparsers.add_parser(command_name)
|
|
||||||
func = decorated(subparser)
|
|
||||||
subparser.set_defaults(func=func)
|
|
||||||
subparser.description = description or func.__doc__
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
"Run cli, processing arguments and executing subcommands."
|
|
||||||
arguments = self.argument_parser.parse_args()
|
|
||||||
argspec = inspect.getargspec(arguments.func)
|
|
||||||
vargs = []
|
|
||||||
for arg in argspec.args:
|
|
||||||
vargs.append(getattr(arguments, arg))
|
|
||||||
if argspec.varargs:
|
|
||||||
vargs.extend(getattr(arguments, argspec.varargs))
|
|
||||||
output = arguments.func(*vargs)
|
|
||||||
if getattr(arguments.func, '_cli_test_command', False):
|
|
||||||
self.exit_code = 0 if output else 1
|
|
||||||
output = ''
|
|
||||||
if getattr(arguments.func, '_cli_no_output', False):
|
|
||||||
output = ''
|
|
||||||
self.formatter.format_output(output, arguments.format)
|
|
||||||
if charmhelpers.core.unitdata._KV:
|
|
||||||
charmhelpers.core.unitdata._KV.flush()
|
|
||||||
|
|
||||||
|
|
||||||
cmdline = CommandLine()
|
|
||||||
|
|
||||||
|
|
||||||
def describe_arguments(func):
|
|
||||||
"""
|
|
||||||
Analyze a function's signature and return a data structure suitable for
|
|
||||||
passing in as arguments to an argparse parser's add_argument() method."""
|
|
||||||
|
|
||||||
argspec = inspect.getargspec(func)
|
|
||||||
# we should probably raise an exception somewhere if func includes **kwargs
|
|
||||||
if argspec.defaults:
|
|
||||||
positional_args = argspec.args[:-len(argspec.defaults)]
|
|
||||||
keyword_names = argspec.args[-len(argspec.defaults):]
|
|
||||||
for arg, default in zip(keyword_names, argspec.defaults):
|
|
||||||
yield ('--{}'.format(arg),), {'default': default}
|
|
||||||
else:
|
|
||||||
positional_args = argspec.args
|
|
||||||
|
|
||||||
for arg in positional_args:
|
|
||||||
yield (arg,), {}
|
|
||||||
if argspec.varargs:
|
|
||||||
yield (argspec.varargs,), {'nargs': '*'}
|
|
@ -1,36 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from . import cmdline
|
|
||||||
from charmhelpers.contrib.benchmark import Benchmark
|
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand(command_name='benchmark-start')
|
|
||||||
def start():
|
|
||||||
Benchmark.start()
|
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand(command_name='benchmark-finish')
|
|
||||||
def finish():
|
|
||||||
Benchmark.finish()
|
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand_builder('benchmark-composite', description="Set the benchmark composite score")
|
|
||||||
def service(subparser):
|
|
||||||
subparser.add_argument("value", help="The composite score.")
|
|
||||||
subparser.add_argument("units", help="The units the composite score represents, i.e., 'reads/sec'.")
|
|
||||||
subparser.add_argument("direction", help="'asc' if a lower score is better, 'desc' if a higher score is better.")
|
|
||||||
return Benchmark.set_composite_score
|
|
@ -1,32 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"""
|
|
||||||
This module loads sub-modules into the python runtime so they can be
|
|
||||||
discovered via the inspect module. In order to prevent flake8 from (rightfully)
|
|
||||||
telling us these are unused modules, throw a ' # noqa' at the end of each import
|
|
||||||
so that the warning is suppressed.
|
|
||||||
"""
|
|
||||||
|
|
||||||
from . import CommandLine # noqa
|
|
||||||
|
|
||||||
"""
|
|
||||||
Import the sub-modules which have decorated subcommands to register with chlp.
|
|
||||||
"""
|
|
||||||
from . import host # noqa
|
|
||||||
from . import benchmark # noqa
|
|
||||||
from . import unitdata # noqa
|
|
||||||
from . import hookenv # noqa
|
|
@ -1,23 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from . import cmdline
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
|
|
||||||
|
|
||||||
cmdline.subcommand('relation-id')(hookenv.relation_id._wrapped)
|
|
||||||
cmdline.subcommand('service-name')(hookenv.service_name)
|
|
||||||
cmdline.subcommand('remote-service-name')(hookenv.remote_service_name._wrapped)
|
|
@ -1,31 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from . import cmdline
|
|
||||||
from charmhelpers.core import host
|
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand()
|
|
||||||
def mounts():
|
|
||||||
"List mounts"
|
|
||||||
return host.mounts()
|
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand_builder('service', description="Control system services")
|
|
||||||
def service(subparser):
|
|
||||||
subparser.add_argument("action", help="The action to perform (start, stop, etc...)")
|
|
||||||
subparser.add_argument("service_name", help="Name of the service to control")
|
|
||||||
return host.service
|
|
@ -1,39 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from . import cmdline
|
|
||||||
from charmhelpers.core import unitdata
|
|
||||||
|
|
||||||
|
|
||||||
@cmdline.subcommand_builder('unitdata', description="Store and retrieve data")
|
|
||||||
def unitdata_cmd(subparser):
|
|
||||||
nested = subparser.add_subparsers()
|
|
||||||
get_cmd = nested.add_parser('get', help='Retrieve data')
|
|
||||||
get_cmd.add_argument('key', help='Key to retrieve the value of')
|
|
||||||
get_cmd.set_defaults(action='get', value=None)
|
|
||||||
set_cmd = nested.add_parser('set', help='Store data')
|
|
||||||
set_cmd.add_argument('key', help='Key to set')
|
|
||||||
set_cmd.add_argument('value', help='Value to store')
|
|
||||||
set_cmd.set_defaults(action='set')
|
|
||||||
|
|
||||||
def _unitdata_cmd(action, key, value):
|
|
||||||
if action == 'get':
|
|
||||||
return unitdata.kv().get(key)
|
|
||||||
elif action == 'set':
|
|
||||||
unitdata.kv().set(key, value)
|
|
||||||
unitdata.kv().flush()
|
|
||||||
return ''
|
|
||||||
return _unitdata_cmd
|
|
@ -1,206 +0,0 @@
|
|||||||
# Copyright 2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
'''
|
|
||||||
A Pythonic API to interact with the charm hook environment.
|
|
||||||
|
|
||||||
:author: Stuart Bishop <stuart.bishop@canonical.com>
|
|
||||||
'''
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
|
|
||||||
from collections import OrderedDict
|
|
||||||
if six.PY3:
|
|
||||||
from collections import UserDict # pragma: nocover
|
|
||||||
else:
|
|
||||||
from UserDict import IterableUserDict as UserDict # pragma: nocover
|
|
||||||
|
|
||||||
|
|
||||||
class Relations(OrderedDict):
|
|
||||||
'''Mapping relation name -> relation id -> Relation.
|
|
||||||
|
|
||||||
>>> rels = Relations()
|
|
||||||
>>> rels['sprog']['sprog:12']['client/6']['widget']
|
|
||||||
'remote widget'
|
|
||||||
>>> rels['sprog']['sprog:12'].local['widget'] = 'local widget'
|
|
||||||
>>> rels['sprog']['sprog:12'].local['widget']
|
|
||||||
'local widget'
|
|
||||||
>>> rels.peer.local['widget']
|
|
||||||
'local widget on the peer relation'
|
|
||||||
'''
|
|
||||||
def __init__(self):
|
|
||||||
super(Relations, self).__init__()
|
|
||||||
for relname in sorted(hookenv.relation_types()):
|
|
||||||
self[relname] = OrderedDict()
|
|
||||||
relids = hookenv.relation_ids(relname)
|
|
||||||
relids.sort(key=lambda x: int(x.split(':', 1)[-1]))
|
|
||||||
for relid in relids:
|
|
||||||
self[relname][relid] = Relation(relid)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def peer(self):
|
|
||||||
peer_relid = hookenv.peer_relation_id()
|
|
||||||
for rels in self.values():
|
|
||||||
if peer_relid in rels:
|
|
||||||
return rels[peer_relid]
|
|
||||||
|
|
||||||
|
|
||||||
class Relation(OrderedDict):
|
|
||||||
'''Mapping of unit -> remote RelationInfo for a relation.
|
|
||||||
|
|
||||||
This is an OrderedDict mapping, ordered numerically by
|
|
||||||
by unit number.
|
|
||||||
|
|
||||||
Also provides access to the local RelationInfo, and peer RelationInfo
|
|
||||||
instances by the 'local' and 'peers' attributes.
|
|
||||||
|
|
||||||
>>> r = Relation('sprog:12')
|
|
||||||
>>> r.keys()
|
|
||||||
['client/9', 'client/10'] # Ordered numerically
|
|
||||||
>>> r['client/10']['widget'] # A remote RelationInfo setting
|
|
||||||
'remote widget'
|
|
||||||
>>> r.local['widget'] # The local RelationInfo setting
|
|
||||||
'local widget'
|
|
||||||
'''
|
|
||||||
relid = None # The relation id.
|
|
||||||
relname = None # The relation name (also known as relation type).
|
|
||||||
service = None # The remote service name, if known.
|
|
||||||
local = None # The local end's RelationInfo.
|
|
||||||
peers = None # Map of peer -> RelationInfo. None if no peer relation.
|
|
||||||
|
|
||||||
def __init__(self, relid):
|
|
||||||
remote_units = hookenv.related_units(relid)
|
|
||||||
remote_units.sort(key=lambda u: int(u.split('/', 1)[-1]))
|
|
||||||
super(Relation, self).__init__((unit, RelationInfo(relid, unit))
|
|
||||||
for unit in remote_units)
|
|
||||||
|
|
||||||
self.relname = relid.split(':', 1)[0]
|
|
||||||
self.relid = relid
|
|
||||||
self.local = RelationInfo(relid, hookenv.local_unit())
|
|
||||||
|
|
||||||
for relinfo in self.values():
|
|
||||||
self.service = relinfo.service
|
|
||||||
break
|
|
||||||
|
|
||||||
# If we have peers, and they have joined both the provided peer
|
|
||||||
# relation and this relation, we can peek at their data too.
|
|
||||||
# This is useful for creating consensus without leadership.
|
|
||||||
peer_relid = hookenv.peer_relation_id()
|
|
||||||
if peer_relid and peer_relid != relid:
|
|
||||||
peers = hookenv.related_units(peer_relid)
|
|
||||||
if peers:
|
|
||||||
peers.sort(key=lambda u: int(u.split('/', 1)[-1]))
|
|
||||||
self.peers = OrderedDict((peer, RelationInfo(relid, peer))
|
|
||||||
for peer in peers)
|
|
||||||
else:
|
|
||||||
self.peers = OrderedDict()
|
|
||||||
else:
|
|
||||||
self.peers = None
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return '{} ({})'.format(self.relid, self.service)
|
|
||||||
|
|
||||||
|
|
||||||
class RelationInfo(UserDict):
|
|
||||||
'''The bag of data at an end of a relation.
|
|
||||||
|
|
||||||
Every unit participating in a relation has a single bag of
|
|
||||||
data associated with that relation. This is that bag.
|
|
||||||
|
|
||||||
The bag of data for the local unit may be updated. Remote data
|
|
||||||
is immutable and will remain static for the duration of the hook.
|
|
||||||
|
|
||||||
Changes made to the local units relation data only become visible
|
|
||||||
to other units after the hook completes successfully. If the hook
|
|
||||||
does not complete successfully, the changes are rolled back.
|
|
||||||
|
|
||||||
Unlike standard Python mappings, setting an item to None is the
|
|
||||||
same as deleting it.
|
|
||||||
|
|
||||||
>>> relinfo = RelationInfo('db:12') # Default is the local unit.
|
|
||||||
>>> relinfo['user'] = 'fred'
|
|
||||||
>>> relinfo['user']
|
|
||||||
'fred'
|
|
||||||
>>> relinfo['user'] = None
|
|
||||||
>>> 'fred' in relinfo
|
|
||||||
False
|
|
||||||
|
|
||||||
This class wraps hookenv.relation_get and hookenv.relation_set.
|
|
||||||
All caching is left up to these two methods to avoid synchronization
|
|
||||||
issues. Data is only loaded on demand.
|
|
||||||
'''
|
|
||||||
relid = None # The relation id.
|
|
||||||
relname = None # The relation name (also know as the relation type).
|
|
||||||
unit = None # The unit id.
|
|
||||||
number = None # The unit number (integer).
|
|
||||||
service = None # The service name.
|
|
||||||
|
|
||||||
def __init__(self, relid, unit):
|
|
||||||
self.relname = relid.split(':', 1)[0]
|
|
||||||
self.relid = relid
|
|
||||||
self.unit = unit
|
|
||||||
self.service, num = self.unit.split('/', 1)
|
|
||||||
self.number = int(num)
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return '{} ({})'.format(self.relid, self.unit)
|
|
||||||
|
|
||||||
@property
|
|
||||||
def data(self):
|
|
||||||
return hookenv.relation_get(rid=self.relid, unit=self.unit)
|
|
||||||
|
|
||||||
def __setitem__(self, key, value):
|
|
||||||
if self.unit != hookenv.local_unit():
|
|
||||||
raise TypeError('Attempting to set {} on remote unit {}'
|
|
||||||
''.format(key, self.unit))
|
|
||||||
if value is not None and not isinstance(value, six.string_types):
|
|
||||||
# We don't do implicit casting. This would cause simple
|
|
||||||
# types like integers to be read back as strings in subsequent
|
|
||||||
# hooks, and mutable types would require a lot of wrapping
|
|
||||||
# to ensure relation-set gets called when they are mutated.
|
|
||||||
raise ValueError('Only string values allowed')
|
|
||||||
hookenv.relation_set(self.relid, {key: value})
|
|
||||||
|
|
||||||
def __delitem__(self, key):
|
|
||||||
# Deleting a key and setting it to null is the same thing in
|
|
||||||
# Juju relations.
|
|
||||||
self[key] = None
|
|
||||||
|
|
||||||
|
|
||||||
class Leader(UserDict):
|
|
||||||
def __init__(self):
|
|
||||||
pass # Don't call superclass initializer, as it will nuke self.data
|
|
||||||
|
|
||||||
@property
|
|
||||||
def data(self):
|
|
||||||
return hookenv.leader_get()
|
|
||||||
|
|
||||||
def __setitem__(self, key, value):
|
|
||||||
if not hookenv.is_leader():
|
|
||||||
raise TypeError('Not the leader. Cannot change leader settings.')
|
|
||||||
if value is not None and not isinstance(value, six.string_types):
|
|
||||||
# We don't do implicit casting. This would cause simple
|
|
||||||
# types like integers to be read back as strings in subsequent
|
|
||||||
# hooks, and mutable types would require a lot of wrapping
|
|
||||||
# to ensure leader-set gets called when they are mutated.
|
|
||||||
raise ValueError('Only string values allowed')
|
|
||||||
hookenv.leader_set({key: value})
|
|
||||||
|
|
||||||
def __delitem__(self, key):
|
|
||||||
# Deleting a key and setting it to null is the same thing in
|
|
||||||
# Juju leadership settings.
|
|
||||||
self[key] = None
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1,95 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import amulet
|
|
||||||
import os
|
|
||||||
import six
|
|
||||||
|
|
||||||
|
|
||||||
class AmuletDeployment(object):
|
|
||||||
"""Amulet deployment.
|
|
||||||
|
|
||||||
This class provides generic Amulet deployment and test runner
|
|
||||||
methods.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, series=None):
|
|
||||||
"""Initialize the deployment environment."""
|
|
||||||
self.series = None
|
|
||||||
|
|
||||||
if series:
|
|
||||||
self.series = series
|
|
||||||
self.d = amulet.Deployment(series=self.series)
|
|
||||||
else:
|
|
||||||
self.d = amulet.Deployment()
|
|
||||||
|
|
||||||
def _add_services(self, this_service, other_services):
|
|
||||||
"""Add services.
|
|
||||||
|
|
||||||
Add services to the deployment where this_service is the local charm
|
|
||||||
that we're testing and other_services are the other services that
|
|
||||||
are being used in the local amulet tests.
|
|
||||||
"""
|
|
||||||
if this_service['name'] != os.path.basename(os.getcwd()):
|
|
||||||
s = this_service['name']
|
|
||||||
msg = "The charm's root directory name needs to be {}".format(s)
|
|
||||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
|
||||||
|
|
||||||
if 'units' not in this_service:
|
|
||||||
this_service['units'] = 1
|
|
||||||
|
|
||||||
self.d.add(this_service['name'], units=this_service['units'],
|
|
||||||
constraints=this_service.get('constraints'))
|
|
||||||
|
|
||||||
for svc in other_services:
|
|
||||||
if 'location' in svc:
|
|
||||||
branch_location = svc['location']
|
|
||||||
elif self.series:
|
|
||||||
branch_location = 'cs:{}/{}'.format(self.series, svc['name']),
|
|
||||||
else:
|
|
||||||
branch_location = None
|
|
||||||
|
|
||||||
if 'units' not in svc:
|
|
||||||
svc['units'] = 1
|
|
||||||
|
|
||||||
self.d.add(svc['name'], charm=branch_location, units=svc['units'],
|
|
||||||
constraints=svc.get('constraints'))
|
|
||||||
|
|
||||||
def _add_relations(self, relations):
|
|
||||||
"""Add all of the relations for the services."""
|
|
||||||
for k, v in six.iteritems(relations):
|
|
||||||
self.d.relate(k, v)
|
|
||||||
|
|
||||||
def _configure_services(self, configs):
|
|
||||||
"""Configure all of the services."""
|
|
||||||
for service, config in six.iteritems(configs):
|
|
||||||
self.d.configure(service, config)
|
|
||||||
|
|
||||||
def _deploy(self):
|
|
||||||
"""Deploy environment and wait for all hooks to finish executing."""
|
|
||||||
try:
|
|
||||||
self.d.setup(timeout=900)
|
|
||||||
self.d.sentry.wait(timeout=900)
|
|
||||||
except amulet.helpers.TimeoutError:
|
|
||||||
amulet.raise_status(amulet.FAIL, msg="Deployment timed out")
|
|
||||||
except Exception:
|
|
||||||
raise
|
|
||||||
|
|
||||||
def run_tests(self):
|
|
||||||
"""Run all of the methods that are prefixed with 'test_'."""
|
|
||||||
for test in dir(self):
|
|
||||||
if test.startswith('test_'):
|
|
||||||
getattr(self, test)()
|
|
@ -1,818 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import io
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import socket
|
|
||||||
import subprocess
|
|
||||||
import sys
|
|
||||||
import time
|
|
||||||
import uuid
|
|
||||||
|
|
||||||
import amulet
|
|
||||||
import distro_info
|
|
||||||
import six
|
|
||||||
from six.moves import configparser
|
|
||||||
if six.PY3:
|
|
||||||
from urllib import parse as urlparse
|
|
||||||
else:
|
|
||||||
import urlparse
|
|
||||||
|
|
||||||
|
|
||||||
class AmuletUtils(object):
|
|
||||||
"""Amulet utilities.
|
|
||||||
|
|
||||||
This class provides common utility functions that are used by Amulet
|
|
||||||
tests.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, log_level=logging.ERROR):
|
|
||||||
self.log = self.get_logger(level=log_level)
|
|
||||||
self.ubuntu_releases = self.get_ubuntu_releases()
|
|
||||||
|
|
||||||
def get_logger(self, name="amulet-logger", level=logging.DEBUG):
|
|
||||||
"""Get a logger object that will log to stdout."""
|
|
||||||
log = logging
|
|
||||||
logger = log.getLogger(name)
|
|
||||||
fmt = log.Formatter("%(asctime)s %(funcName)s "
|
|
||||||
"%(levelname)s: %(message)s")
|
|
||||||
|
|
||||||
handler = log.StreamHandler(stream=sys.stdout)
|
|
||||||
handler.setLevel(level)
|
|
||||||
handler.setFormatter(fmt)
|
|
||||||
|
|
||||||
logger.addHandler(handler)
|
|
||||||
logger.setLevel(level)
|
|
||||||
|
|
||||||
return logger
|
|
||||||
|
|
||||||
def valid_ip(self, ip):
|
|
||||||
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
def valid_url(self, url):
|
|
||||||
p = re.compile(
|
|
||||||
r'^(?:http|ftp)s?://'
|
|
||||||
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # noqa
|
|
||||||
r'localhost|'
|
|
||||||
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'
|
|
||||||
r'(?::\d+)?'
|
|
||||||
r'(?:/?|[/?]\S+)$',
|
|
||||||
re.IGNORECASE)
|
|
||||||
if p.match(url):
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
def get_ubuntu_release_from_sentry(self, sentry_unit):
|
|
||||||
"""Get Ubuntu release codename from sentry unit.
|
|
||||||
|
|
||||||
:param sentry_unit: amulet sentry/service unit pointer
|
|
||||||
:returns: list of strings - release codename, failure message
|
|
||||||
"""
|
|
||||||
msg = None
|
|
||||||
cmd = 'lsb_release -cs'
|
|
||||||
release, code = sentry_unit.run(cmd)
|
|
||||||
if code == 0:
|
|
||||||
self.log.debug('{} lsb_release: {}'.format(
|
|
||||||
sentry_unit.info['unit_name'], release))
|
|
||||||
else:
|
|
||||||
msg = ('{} `{}` returned {} '
|
|
||||||
'{}'.format(sentry_unit.info['unit_name'],
|
|
||||||
cmd, release, code))
|
|
||||||
if release not in self.ubuntu_releases:
|
|
||||||
msg = ("Release ({}) not found in Ubuntu releases "
|
|
||||||
"({})".format(release, self.ubuntu_releases))
|
|
||||||
return release, msg
|
|
||||||
|
|
||||||
def validate_services(self, commands):
|
|
||||||
"""Validate that lists of commands succeed on service units. Can be
|
|
||||||
used to verify system services are running on the corresponding
|
|
||||||
service units.
|
|
||||||
|
|
||||||
:param commands: dict with sentry keys and arbitrary command list vals
|
|
||||||
:returns: None if successful, Failure string message otherwise
|
|
||||||
"""
|
|
||||||
self.log.debug('Checking status of system services...')
|
|
||||||
|
|
||||||
# /!\ DEPRECATION WARNING (beisner):
|
|
||||||
# New and existing tests should be rewritten to use
|
|
||||||
# validate_services_by_name() as it is aware of init systems.
|
|
||||||
self.log.warn('DEPRECATION WARNING: use '
|
|
||||||
'validate_services_by_name instead of validate_services '
|
|
||||||
'due to init system differences.')
|
|
||||||
|
|
||||||
for k, v in six.iteritems(commands):
|
|
||||||
for cmd in v:
|
|
||||||
output, code = k.run(cmd)
|
|
||||||
self.log.debug('{} `{}` returned '
|
|
||||||
'{}'.format(k.info['unit_name'],
|
|
||||||
cmd, code))
|
|
||||||
if code != 0:
|
|
||||||
return "command `{}` returned {}".format(cmd, str(code))
|
|
||||||
return None
|
|
||||||
|
|
||||||
def validate_services_by_name(self, sentry_services):
|
|
||||||
"""Validate system service status by service name, automatically
|
|
||||||
detecting init system based on Ubuntu release codename.
|
|
||||||
|
|
||||||
:param sentry_services: dict with sentry keys and svc list values
|
|
||||||
:returns: None if successful, Failure string message otherwise
|
|
||||||
"""
|
|
||||||
self.log.debug('Checking status of system services...')
|
|
||||||
|
|
||||||
# Point at which systemd became a thing
|
|
||||||
systemd_switch = self.ubuntu_releases.index('vivid')
|
|
||||||
|
|
||||||
for sentry_unit, services_list in six.iteritems(sentry_services):
|
|
||||||
# Get lsb_release codename from unit
|
|
||||||
release, ret = self.get_ubuntu_release_from_sentry(sentry_unit)
|
|
||||||
if ret:
|
|
||||||
return ret
|
|
||||||
|
|
||||||
for service_name in services_list:
|
|
||||||
if (self.ubuntu_releases.index(release) >= systemd_switch or
|
|
||||||
service_name in ['rabbitmq-server', 'apache2']):
|
|
||||||
# init is systemd (or regular sysv)
|
|
||||||
cmd = 'sudo service {} status'.format(service_name)
|
|
||||||
output, code = sentry_unit.run(cmd)
|
|
||||||
service_running = code == 0
|
|
||||||
elif self.ubuntu_releases.index(release) < systemd_switch:
|
|
||||||
# init is upstart
|
|
||||||
cmd = 'sudo status {}'.format(service_name)
|
|
||||||
output, code = sentry_unit.run(cmd)
|
|
||||||
service_running = code == 0 and "start/running" in output
|
|
||||||
|
|
||||||
self.log.debug('{} `{}` returned '
|
|
||||||
'{}'.format(sentry_unit.info['unit_name'],
|
|
||||||
cmd, code))
|
|
||||||
if not service_running:
|
|
||||||
return u"command `{}` returned {} {}".format(
|
|
||||||
cmd, output, str(code))
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _get_config(self, unit, filename):
|
|
||||||
"""Get a ConfigParser object for parsing a unit's config file."""
|
|
||||||
file_contents = unit.file_contents(filename)
|
|
||||||
|
|
||||||
# NOTE(beisner): by default, ConfigParser does not handle options
|
|
||||||
# with no value, such as the flags used in the mysql my.cnf file.
|
|
||||||
# https://bugs.python.org/issue7005
|
|
||||||
config = configparser.ConfigParser(allow_no_value=True)
|
|
||||||
config.readfp(io.StringIO(file_contents))
|
|
||||||
return config
|
|
||||||
|
|
||||||
def validate_config_data(self, sentry_unit, config_file, section,
|
|
||||||
expected):
|
|
||||||
"""Validate config file data.
|
|
||||||
|
|
||||||
Verify that the specified section of the config file contains
|
|
||||||
the expected option key:value pairs.
|
|
||||||
|
|
||||||
Compare expected dictionary data vs actual dictionary data.
|
|
||||||
The values in the 'expected' dictionary can be strings, bools, ints,
|
|
||||||
longs, or can be a function that evaluates a variable and returns a
|
|
||||||
bool.
|
|
||||||
"""
|
|
||||||
self.log.debug('Validating config file data ({} in {} on {})'
|
|
||||||
'...'.format(section, config_file,
|
|
||||||
sentry_unit.info['unit_name']))
|
|
||||||
config = self._get_config(sentry_unit, config_file)
|
|
||||||
|
|
||||||
if section != 'DEFAULT' and not config.has_section(section):
|
|
||||||
return "section [{}] does not exist".format(section)
|
|
||||||
|
|
||||||
for k in expected.keys():
|
|
||||||
if not config.has_option(section, k):
|
|
||||||
return "section [{}] is missing option {}".format(section, k)
|
|
||||||
|
|
||||||
actual = config.get(section, k)
|
|
||||||
v = expected[k]
|
|
||||||
if (isinstance(v, six.string_types) or
|
|
||||||
isinstance(v, bool) or
|
|
||||||
isinstance(v, six.integer_types)):
|
|
||||||
# handle explicit values
|
|
||||||
if actual != v:
|
|
||||||
return "section [{}] {}:{} != expected {}:{}".format(
|
|
||||||
section, k, actual, k, expected[k])
|
|
||||||
# handle function pointers, such as not_null or valid_ip
|
|
||||||
elif not v(actual):
|
|
||||||
return "section [{}] {}:{} != expected {}:{}".format(
|
|
||||||
section, k, actual, k, expected[k])
|
|
||||||
return None
|
|
||||||
|
|
||||||
def _validate_dict_data(self, expected, actual):
|
|
||||||
"""Validate dictionary data.
|
|
||||||
|
|
||||||
Compare expected dictionary data vs actual dictionary data.
|
|
||||||
The values in the 'expected' dictionary can be strings, bools, ints,
|
|
||||||
longs, or can be a function that evaluates a variable and returns a
|
|
||||||
bool.
|
|
||||||
"""
|
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
|
||||||
self.log.debug('expected: {}'.format(repr(expected)))
|
|
||||||
|
|
||||||
for k, v in six.iteritems(expected):
|
|
||||||
if k in actual:
|
|
||||||
if (isinstance(v, six.string_types) or
|
|
||||||
isinstance(v, bool) or
|
|
||||||
isinstance(v, six.integer_types)):
|
|
||||||
# handle explicit values
|
|
||||||
if v != actual[k]:
|
|
||||||
return "{}:{}".format(k, actual[k])
|
|
||||||
# handle function pointers, such as not_null or valid_ip
|
|
||||||
elif not v(actual[k]):
|
|
||||||
return "{}:{}".format(k, actual[k])
|
|
||||||
else:
|
|
||||||
return "key '{}' does not exist".format(k)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def validate_relation_data(self, sentry_unit, relation, expected):
|
|
||||||
"""Validate actual relation data based on expected relation data."""
|
|
||||||
actual = sentry_unit.relation(relation[0], relation[1])
|
|
||||||
return self._validate_dict_data(expected, actual)
|
|
||||||
|
|
||||||
def _validate_list_data(self, expected, actual):
|
|
||||||
"""Compare expected list vs actual list data."""
|
|
||||||
for e in expected:
|
|
||||||
if e not in actual:
|
|
||||||
return "expected item {} not found in actual list".format(e)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def not_null(self, string):
|
|
||||||
if string is not None:
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
def _get_file_mtime(self, sentry_unit, filename):
|
|
||||||
"""Get last modification time of file."""
|
|
||||||
return sentry_unit.file_stat(filename)['mtime']
|
|
||||||
|
|
||||||
def _get_dir_mtime(self, sentry_unit, directory):
|
|
||||||
"""Get last modification time of directory."""
|
|
||||||
return sentry_unit.directory_stat(directory)['mtime']
|
|
||||||
|
|
||||||
def _get_proc_start_time(self, sentry_unit, service, pgrep_full=None):
|
|
||||||
"""Get start time of a process based on the last modification time
|
|
||||||
of the /proc/pid directory.
|
|
||||||
|
|
||||||
:sentry_unit: The sentry unit to check for the service on
|
|
||||||
:service: service name to look for in process table
|
|
||||||
:pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
|
||||||
:returns: epoch time of service process start
|
|
||||||
:param commands: list of bash commands
|
|
||||||
:param sentry_units: list of sentry unit pointers
|
|
||||||
:returns: None if successful; Failure message otherwise
|
|
||||||
"""
|
|
||||||
if pgrep_full is not None:
|
|
||||||
# /!\ DEPRECATION WARNING (beisner):
|
|
||||||
# No longer implemented, as pidof is now used instead of pgrep.
|
|
||||||
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
|
||||||
self.log.warn('DEPRECATION WARNING: pgrep_full bool is no '
|
|
||||||
'longer implemented re: lp 1474030.')
|
|
||||||
|
|
||||||
pid_list = self.get_process_id_list(sentry_unit, service)
|
|
||||||
pid = pid_list[0]
|
|
||||||
proc_dir = '/proc/{}'.format(pid)
|
|
||||||
self.log.debug('Pid for {} on {}: {}'.format(
|
|
||||||
service, sentry_unit.info['unit_name'], pid))
|
|
||||||
|
|
||||||
return self._get_dir_mtime(sentry_unit, proc_dir)
|
|
||||||
|
|
||||||
def service_restarted(self, sentry_unit, service, filename,
|
|
||||||
pgrep_full=None, sleep_time=20):
|
|
||||||
"""Check if service was restarted.
|
|
||||||
|
|
||||||
Compare a service's start time vs a file's last modification time
|
|
||||||
(such as a config file for that service) to determine if the service
|
|
||||||
has been restarted.
|
|
||||||
"""
|
|
||||||
# /!\ DEPRECATION WARNING (beisner):
|
|
||||||
# This method is prone to races in that no before-time is known.
|
|
||||||
# Use validate_service_config_changed instead.
|
|
||||||
|
|
||||||
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
|
||||||
# used instead of pgrep. pgrep_full is still passed through to ensure
|
|
||||||
# deprecation WARNS. lp1474030
|
|
||||||
self.log.warn('DEPRECATION WARNING: use '
|
|
||||||
'validate_service_config_changed instead of '
|
|
||||||
'service_restarted due to known races.')
|
|
||||||
|
|
||||||
time.sleep(sleep_time)
|
|
||||||
if (self._get_proc_start_time(sentry_unit, service, pgrep_full) >=
|
|
||||||
self._get_file_mtime(sentry_unit, filename)):
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
def service_restarted_since(self, sentry_unit, mtime, service,
|
|
||||||
pgrep_full=None, sleep_time=20,
|
|
||||||
retry_count=30, retry_sleep_time=10):
|
|
||||||
"""Check if service was been started after a given time.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
sentry_unit (sentry): The sentry unit to check for the service on
|
|
||||||
mtime (float): The epoch time to check against
|
|
||||||
service (string): service name to look for in process table
|
|
||||||
pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
|
||||||
sleep_time (int): Initial sleep time (s) before looking for file
|
|
||||||
retry_sleep_time (int): Time (s) to sleep between retries
|
|
||||||
retry_count (int): If file is not found, how many times to retry
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if service found and its start time it newer than mtime,
|
|
||||||
False if service is older than mtime or if service was
|
|
||||||
not found.
|
|
||||||
"""
|
|
||||||
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
|
||||||
# used instead of pgrep. pgrep_full is still passed through to ensure
|
|
||||||
# deprecation WARNS. lp1474030
|
|
||||||
|
|
||||||
unit_name = sentry_unit.info['unit_name']
|
|
||||||
self.log.debug('Checking that %s service restarted since %s on '
|
|
||||||
'%s' % (service, mtime, unit_name))
|
|
||||||
time.sleep(sleep_time)
|
|
||||||
proc_start_time = None
|
|
||||||
tries = 0
|
|
||||||
while tries <= retry_count and not proc_start_time:
|
|
||||||
try:
|
|
||||||
proc_start_time = self._get_proc_start_time(sentry_unit,
|
|
||||||
service,
|
|
||||||
pgrep_full)
|
|
||||||
self.log.debug('Attempt {} to get {} proc start time on {} '
|
|
||||||
'OK'.format(tries, service, unit_name))
|
|
||||||
except IOError as e:
|
|
||||||
# NOTE(beisner) - race avoidance, proc may not exist yet.
|
|
||||||
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
|
||||||
self.log.debug('Attempt {} to get {} proc start time on {} '
|
|
||||||
'failed\n{}'.format(tries, service,
|
|
||||||
unit_name, e))
|
|
||||||
time.sleep(retry_sleep_time)
|
|
||||||
tries += 1
|
|
||||||
|
|
||||||
if not proc_start_time:
|
|
||||||
self.log.warn('No proc start time found, assuming service did '
|
|
||||||
'not start')
|
|
||||||
return False
|
|
||||||
if proc_start_time >= mtime:
|
|
||||||
self.log.debug('Proc start time is newer than provided mtime'
|
|
||||||
'(%s >= %s) on %s (OK)' % (proc_start_time,
|
|
||||||
mtime, unit_name))
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
self.log.warn('Proc start time (%s) is older than provided mtime '
|
|
||||||
'(%s) on %s, service did not '
|
|
||||||
'restart' % (proc_start_time, mtime, unit_name))
|
|
||||||
return False
|
|
||||||
|
|
||||||
def config_updated_since(self, sentry_unit, filename, mtime,
|
|
||||||
sleep_time=20, retry_count=30,
|
|
||||||
retry_sleep_time=10):
|
|
||||||
"""Check if file was modified after a given time.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
sentry_unit (sentry): The sentry unit to check the file mtime on
|
|
||||||
filename (string): The file to check mtime of
|
|
||||||
mtime (float): The epoch time to check against
|
|
||||||
sleep_time (int): Initial sleep time (s) before looking for file
|
|
||||||
retry_sleep_time (int): Time (s) to sleep between retries
|
|
||||||
retry_count (int): If file is not found, how many times to retry
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
bool: True if file was modified more recently than mtime, False if
|
|
||||||
file was modified before mtime, or if file not found.
|
|
||||||
"""
|
|
||||||
unit_name = sentry_unit.info['unit_name']
|
|
||||||
self.log.debug('Checking that %s updated since %s on '
|
|
||||||
'%s' % (filename, mtime, unit_name))
|
|
||||||
time.sleep(sleep_time)
|
|
||||||
file_mtime = None
|
|
||||||
tries = 0
|
|
||||||
while tries <= retry_count and not file_mtime:
|
|
||||||
try:
|
|
||||||
file_mtime = self._get_file_mtime(sentry_unit, filename)
|
|
||||||
self.log.debug('Attempt {} to get {} file mtime on {} '
|
|
||||||
'OK'.format(tries, filename, unit_name))
|
|
||||||
except IOError as e:
|
|
||||||
# NOTE(beisner) - race avoidance, file may not exist yet.
|
|
||||||
# https://bugs.launchpad.net/charm-helpers/+bug/1474030
|
|
||||||
self.log.debug('Attempt {} to get {} file mtime on {} '
|
|
||||||
'failed\n{}'.format(tries, filename,
|
|
||||||
unit_name, e))
|
|
||||||
time.sleep(retry_sleep_time)
|
|
||||||
tries += 1
|
|
||||||
|
|
||||||
if not file_mtime:
|
|
||||||
self.log.warn('Could not determine file mtime, assuming '
|
|
||||||
'file does not exist')
|
|
||||||
return False
|
|
||||||
|
|
||||||
if file_mtime >= mtime:
|
|
||||||
self.log.debug('File mtime is newer than provided mtime '
|
|
||||||
'(%s >= %s) on %s (OK)' % (file_mtime,
|
|
||||||
mtime, unit_name))
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
self.log.warn('File mtime is older than provided mtime'
|
|
||||||
'(%s < on %s) on %s' % (file_mtime,
|
|
||||||
mtime, unit_name))
|
|
||||||
return False
|
|
||||||
|
|
||||||
def validate_service_config_changed(self, sentry_unit, mtime, service,
|
|
||||||
filename, pgrep_full=None,
|
|
||||||
sleep_time=20, retry_count=30,
|
|
||||||
retry_sleep_time=10):
|
|
||||||
"""Check service and file were updated after mtime
|
|
||||||
|
|
||||||
Args:
|
|
||||||
sentry_unit (sentry): The sentry unit to check for the service on
|
|
||||||
mtime (float): The epoch time to check against
|
|
||||||
service (string): service name to look for in process table
|
|
||||||
filename (string): The file to check mtime of
|
|
||||||
pgrep_full: [Deprecated] Use full command line search mode with pgrep
|
|
||||||
sleep_time (int): Initial sleep in seconds to pass to test helpers
|
|
||||||
retry_count (int): If service is not found, how many times to retry
|
|
||||||
retry_sleep_time (int): Time in seconds to wait between retries
|
|
||||||
|
|
||||||
Typical Usage:
|
|
||||||
u = OpenStackAmuletUtils(ERROR)
|
|
||||||
...
|
|
||||||
mtime = u.get_sentry_time(self.cinder_sentry)
|
|
||||||
self.d.configure('cinder', {'verbose': 'True', 'debug': 'True'})
|
|
||||||
if not u.validate_service_config_changed(self.cinder_sentry,
|
|
||||||
mtime,
|
|
||||||
'cinder-api',
|
|
||||||
'/etc/cinder/cinder.conf')
|
|
||||||
amulet.raise_status(amulet.FAIL, msg='update failed')
|
|
||||||
Returns:
|
|
||||||
bool: True if both service and file where updated/restarted after
|
|
||||||
mtime, False if service is older than mtime or if service was
|
|
||||||
not found or if filename was modified before mtime.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# NOTE(beisner) pgrep_full is no longer implemented, as pidof is now
|
|
||||||
# used instead of pgrep. pgrep_full is still passed through to ensure
|
|
||||||
# deprecation WARNS. lp1474030
|
|
||||||
|
|
||||||
service_restart = self.service_restarted_since(
|
|
||||||
sentry_unit, mtime,
|
|
||||||
service,
|
|
||||||
pgrep_full=pgrep_full,
|
|
||||||
sleep_time=sleep_time,
|
|
||||||
retry_count=retry_count,
|
|
||||||
retry_sleep_time=retry_sleep_time)
|
|
||||||
|
|
||||||
config_update = self.config_updated_since(
|
|
||||||
sentry_unit,
|
|
||||||
filename,
|
|
||||||
mtime,
|
|
||||||
sleep_time=sleep_time,
|
|
||||||
retry_count=retry_count,
|
|
||||||
retry_sleep_time=retry_sleep_time)
|
|
||||||
|
|
||||||
return service_restart and config_update
|
|
||||||
|
|
||||||
def get_sentry_time(self, sentry_unit):
|
|
||||||
"""Return current epoch time on a sentry"""
|
|
||||||
cmd = "date +'%s'"
|
|
||||||
return float(sentry_unit.run(cmd)[0])
|
|
||||||
|
|
||||||
def relation_error(self, name, data):
|
|
||||||
return 'unexpected relation data in {} - {}'.format(name, data)
|
|
||||||
|
|
||||||
def endpoint_error(self, name, data):
|
|
||||||
return 'unexpected endpoint data in {} - {}'.format(name, data)
|
|
||||||
|
|
||||||
def get_ubuntu_releases(self):
|
|
||||||
"""Return a list of all Ubuntu releases in order of release."""
|
|
||||||
_d = distro_info.UbuntuDistroInfo()
|
|
||||||
_release_list = _d.all
|
|
||||||
return _release_list
|
|
||||||
|
|
||||||
def file_to_url(self, file_rel_path):
|
|
||||||
"""Convert a relative file path to a file URL."""
|
|
||||||
_abs_path = os.path.abspath(file_rel_path)
|
|
||||||
return urlparse.urlparse(_abs_path, scheme='file').geturl()
|
|
||||||
|
|
||||||
def check_commands_on_units(self, commands, sentry_units):
|
|
||||||
"""Check that all commands in a list exit zero on all
|
|
||||||
sentry units in a list.
|
|
||||||
|
|
||||||
:param commands: list of bash commands
|
|
||||||
:param sentry_units: list of sentry unit pointers
|
|
||||||
:returns: None if successful; Failure message otherwise
|
|
||||||
"""
|
|
||||||
self.log.debug('Checking exit codes for {} commands on {} '
|
|
||||||
'sentry units...'.format(len(commands),
|
|
||||||
len(sentry_units)))
|
|
||||||
for sentry_unit in sentry_units:
|
|
||||||
for cmd in commands:
|
|
||||||
output, code = sentry_unit.run(cmd)
|
|
||||||
if code == 0:
|
|
||||||
self.log.debug('{} `{}` returned {} '
|
|
||||||
'(OK)'.format(sentry_unit.info['unit_name'],
|
|
||||||
cmd, code))
|
|
||||||
else:
|
|
||||||
return ('{} `{}` returned {} '
|
|
||||||
'{}'.format(sentry_unit.info['unit_name'],
|
|
||||||
cmd, code, output))
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_process_id_list(self, sentry_unit, process_name,
|
|
||||||
expect_success=True):
|
|
||||||
"""Get a list of process ID(s) from a single sentry juju unit
|
|
||||||
for a single process name.
|
|
||||||
|
|
||||||
:param sentry_unit: Amulet sentry instance (juju unit)
|
|
||||||
:param process_name: Process name
|
|
||||||
:param expect_success: If False, expect the PID to be missing,
|
|
||||||
raise if it is present.
|
|
||||||
:returns: List of process IDs
|
|
||||||
"""
|
|
||||||
cmd = 'pidof -x {}'.format(process_name)
|
|
||||||
if not expect_success:
|
|
||||||
cmd += " || exit 0 && exit 1"
|
|
||||||
output, code = sentry_unit.run(cmd)
|
|
||||||
if code != 0:
|
|
||||||
msg = ('{} `{}` returned {} '
|
|
||||||
'{}'.format(sentry_unit.info['unit_name'],
|
|
||||||
cmd, code, output))
|
|
||||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
|
||||||
return str(output).split()
|
|
||||||
|
|
||||||
def get_unit_process_ids(self, unit_processes, expect_success=True):
|
|
||||||
"""Construct a dict containing unit sentries, process names, and
|
|
||||||
process IDs.
|
|
||||||
|
|
||||||
:param unit_processes: A dictionary of Amulet sentry instance
|
|
||||||
to list of process names.
|
|
||||||
:param expect_success: if False expect the processes to not be
|
|
||||||
running, raise if they are.
|
|
||||||
:returns: Dictionary of Amulet sentry instance to dictionary
|
|
||||||
of process names to PIDs.
|
|
||||||
"""
|
|
||||||
pid_dict = {}
|
|
||||||
for sentry_unit, process_list in six.iteritems(unit_processes):
|
|
||||||
pid_dict[sentry_unit] = {}
|
|
||||||
for process in process_list:
|
|
||||||
pids = self.get_process_id_list(
|
|
||||||
sentry_unit, process, expect_success=expect_success)
|
|
||||||
pid_dict[sentry_unit].update({process: pids})
|
|
||||||
return pid_dict
|
|
||||||
|
|
||||||
def validate_unit_process_ids(self, expected, actual):
|
|
||||||
"""Validate process id quantities for services on units."""
|
|
||||||
self.log.debug('Checking units for running processes...')
|
|
||||||
self.log.debug('Expected PIDs: {}'.format(expected))
|
|
||||||
self.log.debug('Actual PIDs: {}'.format(actual))
|
|
||||||
|
|
||||||
if len(actual) != len(expected):
|
|
||||||
return ('Unit count mismatch. expected, actual: {}, '
|
|
||||||
'{} '.format(len(expected), len(actual)))
|
|
||||||
|
|
||||||
for (e_sentry, e_proc_names) in six.iteritems(expected):
|
|
||||||
e_sentry_name = e_sentry.info['unit_name']
|
|
||||||
if e_sentry in actual.keys():
|
|
||||||
a_proc_names = actual[e_sentry]
|
|
||||||
else:
|
|
||||||
return ('Expected sentry ({}) not found in actual dict data.'
|
|
||||||
'{}'.format(e_sentry_name, e_sentry))
|
|
||||||
|
|
||||||
if len(e_proc_names.keys()) != len(a_proc_names.keys()):
|
|
||||||
return ('Process name count mismatch. expected, actual: {}, '
|
|
||||||
'{}'.format(len(expected), len(actual)))
|
|
||||||
|
|
||||||
for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
|
|
||||||
zip(e_proc_names.items(), a_proc_names.items()):
|
|
||||||
if e_proc_name != a_proc_name:
|
|
||||||
return ('Process name mismatch. expected, actual: {}, '
|
|
||||||
'{}'.format(e_proc_name, a_proc_name))
|
|
||||||
|
|
||||||
a_pids_length = len(a_pids)
|
|
||||||
fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
|
|
||||||
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
|
|
||||||
e_pids_length, a_pids_length,
|
|
||||||
a_pids))
|
|
||||||
|
|
||||||
# If expected is not bool, ensure PID quantities match
|
|
||||||
if not isinstance(e_pids_length, bool) and \
|
|
||||||
a_pids_length != e_pids_length:
|
|
||||||
return fail_msg
|
|
||||||
# If expected is bool True, ensure 1 or more PIDs exist
|
|
||||||
elif isinstance(e_pids_length, bool) and \
|
|
||||||
e_pids_length is True and a_pids_length < 1:
|
|
||||||
return fail_msg
|
|
||||||
# If expected is bool False, ensure 0 PIDs exist
|
|
||||||
elif isinstance(e_pids_length, bool) and \
|
|
||||||
e_pids_length is False and a_pids_length != 0:
|
|
||||||
return fail_msg
|
|
||||||
else:
|
|
||||||
self.log.debug('PID check OK: {} {} {}: '
|
|
||||||
'{}'.format(e_sentry_name, e_proc_name,
|
|
||||||
e_pids_length, a_pids))
|
|
||||||
return None
|
|
||||||
|
|
||||||
def validate_list_of_identical_dicts(self, list_of_dicts):
|
|
||||||
"""Check that all dicts within a list are identical."""
|
|
||||||
hashes = []
|
|
||||||
for _dict in list_of_dicts:
|
|
||||||
hashes.append(hash(frozenset(_dict.items())))
|
|
||||||
|
|
||||||
self.log.debug('Hashes: {}'.format(hashes))
|
|
||||||
if len(set(hashes)) == 1:
|
|
||||||
self.log.debug('Dicts within list are identical')
|
|
||||||
else:
|
|
||||||
return 'Dicts within list are not identical'
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
def validate_sectionless_conf(self, file_contents, expected):
|
|
||||||
"""A crude conf parser. Useful to inspect configuration files which
|
|
||||||
do not have section headers (as would be necessary in order to use
|
|
||||||
the configparser). Such as openstack-dashboard or rabbitmq confs."""
|
|
||||||
for line in file_contents.split('\n'):
|
|
||||||
if '=' in line:
|
|
||||||
args = line.split('=')
|
|
||||||
if len(args) <= 1:
|
|
||||||
continue
|
|
||||||
key = args[0].strip()
|
|
||||||
value = args[1].strip()
|
|
||||||
if key in expected.keys():
|
|
||||||
if expected[key] != value:
|
|
||||||
msg = ('Config mismatch. Expected, actual: {}, '
|
|
||||||
'{}'.format(expected[key], value))
|
|
||||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
|
||||||
|
|
||||||
def get_unit_hostnames(self, units):
|
|
||||||
"""Return a dict of juju unit names to hostnames."""
|
|
||||||
host_names = {}
|
|
||||||
for unit in units:
|
|
||||||
host_names[unit.info['unit_name']] = \
|
|
||||||
str(unit.file_contents('/etc/hostname').strip())
|
|
||||||
self.log.debug('Unit host names: {}'.format(host_names))
|
|
||||||
return host_names
|
|
||||||
|
|
||||||
def run_cmd_unit(self, sentry_unit, cmd):
|
|
||||||
"""Run a command on a unit, return the output and exit code."""
|
|
||||||
output, code = sentry_unit.run(cmd)
|
|
||||||
if code == 0:
|
|
||||||
self.log.debug('{} `{}` command returned {} '
|
|
||||||
'(OK)'.format(sentry_unit.info['unit_name'],
|
|
||||||
cmd, code))
|
|
||||||
else:
|
|
||||||
msg = ('{} `{}` command returned {} '
|
|
||||||
'{}'.format(sentry_unit.info['unit_name'],
|
|
||||||
cmd, code, output))
|
|
||||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
|
||||||
return str(output), code
|
|
||||||
|
|
||||||
def file_exists_on_unit(self, sentry_unit, file_name):
|
|
||||||
"""Check if a file exists on a unit."""
|
|
||||||
try:
|
|
||||||
sentry_unit.file_stat(file_name)
|
|
||||||
return True
|
|
||||||
except IOError:
|
|
||||||
return False
|
|
||||||
except Exception as e:
|
|
||||||
msg = 'Error checking file {}: {}'.format(file_name, e)
|
|
||||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
|
||||||
|
|
||||||
def file_contents_safe(self, sentry_unit, file_name,
|
|
||||||
max_wait=60, fatal=False):
|
|
||||||
"""Get file contents from a sentry unit. Wrap amulet file_contents
|
|
||||||
with retry logic to address races where a file checks as existing,
|
|
||||||
but no longer exists by the time file_contents is called.
|
|
||||||
Return None if file not found. Optionally raise if fatal is True."""
|
|
||||||
unit_name = sentry_unit.info['unit_name']
|
|
||||||
file_contents = False
|
|
||||||
tries = 0
|
|
||||||
while not file_contents and tries < (max_wait / 4):
|
|
||||||
try:
|
|
||||||
file_contents = sentry_unit.file_contents(file_name)
|
|
||||||
except IOError:
|
|
||||||
self.log.debug('Attempt {} to open file {} from {} '
|
|
||||||
'failed'.format(tries, file_name,
|
|
||||||
unit_name))
|
|
||||||
time.sleep(4)
|
|
||||||
tries += 1
|
|
||||||
|
|
||||||
if file_contents:
|
|
||||||
return file_contents
|
|
||||||
elif not fatal:
|
|
||||||
return None
|
|
||||||
elif fatal:
|
|
||||||
msg = 'Failed to get file contents from unit.'
|
|
||||||
amulet.raise_status(amulet.FAIL, msg)
|
|
||||||
|
|
||||||
def port_knock_tcp(self, host="localhost", port=22, timeout=15):
|
|
||||||
"""Open a TCP socket to check for a listening sevice on a host.
|
|
||||||
|
|
||||||
:param host: host name or IP address, default to localhost
|
|
||||||
:param port: TCP port number, default to 22
|
|
||||||
:param timeout: Connect timeout, default to 15 seconds
|
|
||||||
:returns: True if successful, False if connect failed
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Resolve host name if possible
|
|
||||||
try:
|
|
||||||
connect_host = socket.gethostbyname(host)
|
|
||||||
host_human = "{} ({})".format(connect_host, host)
|
|
||||||
except socket.error as e:
|
|
||||||
self.log.warn('Unable to resolve address: '
|
|
||||||
'{} ({}) Trying anyway!'.format(host, e))
|
|
||||||
connect_host = host
|
|
||||||
host_human = connect_host
|
|
||||||
|
|
||||||
# Attempt socket connection
|
|
||||||
try:
|
|
||||||
knock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
||||||
knock.settimeout(timeout)
|
|
||||||
knock.connect((connect_host, port))
|
|
||||||
knock.close()
|
|
||||||
self.log.debug('Socket connect OK for host '
|
|
||||||
'{} on port {}.'.format(host_human, port))
|
|
||||||
return True
|
|
||||||
except socket.error as e:
|
|
||||||
self.log.debug('Socket connect FAIL for'
|
|
||||||
' {} port {} ({})'.format(host_human, port, e))
|
|
||||||
return False
|
|
||||||
|
|
||||||
def port_knock_units(self, sentry_units, port=22,
|
|
||||||
timeout=15, expect_success=True):
|
|
||||||
"""Open a TCP socket to check for a listening sevice on each
|
|
||||||
listed juju unit.
|
|
||||||
|
|
||||||
:param sentry_units: list of sentry unit pointers
|
|
||||||
:param port: TCP port number, default to 22
|
|
||||||
:param timeout: Connect timeout, default to 15 seconds
|
|
||||||
:expect_success: True by default, set False to invert logic
|
|
||||||
:returns: None if successful, Failure message otherwise
|
|
||||||
"""
|
|
||||||
for unit in sentry_units:
|
|
||||||
host = unit.info['public-address']
|
|
||||||
connected = self.port_knock_tcp(host, port, timeout)
|
|
||||||
if not connected and expect_success:
|
|
||||||
return 'Socket connect failed.'
|
|
||||||
elif connected and not expect_success:
|
|
||||||
return 'Socket connected unexpectedly.'
|
|
||||||
|
|
||||||
def get_uuid_epoch_stamp(self):
|
|
||||||
"""Returns a stamp string based on uuid4 and epoch time. Useful in
|
|
||||||
generating test messages which need to be unique-ish."""
|
|
||||||
return '[{}-{}]'.format(uuid.uuid4(), time.time())
|
|
||||||
|
|
||||||
# amulet juju action helpers:
|
|
||||||
def run_action(self, unit_sentry, action,
|
|
||||||
_check_output=subprocess.check_output):
|
|
||||||
"""Run the named action on a given unit sentry.
|
|
||||||
|
|
||||||
_check_output parameter is used for dependency injection.
|
|
||||||
|
|
||||||
@return action_id.
|
|
||||||
"""
|
|
||||||
unit_id = unit_sentry.info["unit_name"]
|
|
||||||
command = ["juju", "action", "do", "--format=json", unit_id, action]
|
|
||||||
self.log.info("Running command: %s\n" % " ".join(command))
|
|
||||||
output = _check_output(command, universal_newlines=True)
|
|
||||||
data = json.loads(output)
|
|
||||||
action_id = data[u'Action queued with id']
|
|
||||||
return action_id
|
|
||||||
|
|
||||||
def wait_on_action(self, action_id, _check_output=subprocess.check_output):
|
|
||||||
"""Wait for a given action, returning if it completed or not.
|
|
||||||
|
|
||||||
_check_output parameter is used for dependency injection.
|
|
||||||
"""
|
|
||||||
command = ["juju", "action", "fetch", "--format=json", "--wait=0",
|
|
||||||
action_id]
|
|
||||||
output = _check_output(command, universal_newlines=True)
|
|
||||||
data = json.loads(output)
|
|
||||||
return data.get(u"status") == "completed"
|
|
||||||
|
|
||||||
def status_get(self, unit):
|
|
||||||
"""Return the current service status of this unit."""
|
|
||||||
raw_status, return_code = unit.run(
|
|
||||||
"status-get --format=json --include-data")
|
|
||||||
if return_code != 0:
|
|
||||||
return ("unknown", "")
|
|
||||||
status = json.loads(raw_status)
|
|
||||||
return (status["status"], status["message"])
|
|
@ -1,254 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# Copyright 2013 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
|
||||||
"""Charm Helpers ansible - declare the state of your machines.
|
|
||||||
|
|
||||||
This helper enables you to declare your machine state, rather than
|
|
||||||
program it procedurally (and have to test each change to your procedures).
|
|
||||||
Your install hook can be as simple as::
|
|
||||||
|
|
||||||
{{{
|
|
||||||
import charmhelpers.contrib.ansible
|
|
||||||
|
|
||||||
|
|
||||||
def install():
|
|
||||||
charmhelpers.contrib.ansible.install_ansible_support()
|
|
||||||
charmhelpers.contrib.ansible.apply_playbook('playbooks/install.yaml')
|
|
||||||
}}}
|
|
||||||
|
|
||||||
and won't need to change (nor will its tests) when you change the machine
|
|
||||||
state.
|
|
||||||
|
|
||||||
All of your juju config and relation-data are available as template
|
|
||||||
variables within your playbooks and templates. An install playbook looks
|
|
||||||
something like::
|
|
||||||
|
|
||||||
{{{
|
|
||||||
---
|
|
||||||
- hosts: localhost
|
|
||||||
user: root
|
|
||||||
|
|
||||||
tasks:
|
|
||||||
- name: Add private repositories.
|
|
||||||
template:
|
|
||||||
src: ../templates/private-repositories.list.jinja2
|
|
||||||
dest: /etc/apt/sources.list.d/private.list
|
|
||||||
|
|
||||||
- name: Update the cache.
|
|
||||||
apt: update_cache=yes
|
|
||||||
|
|
||||||
- name: Install dependencies.
|
|
||||||
apt: pkg={{ item }}
|
|
||||||
with_items:
|
|
||||||
- python-mimeparse
|
|
||||||
- python-webob
|
|
||||||
- sunburnt
|
|
||||||
|
|
||||||
- name: Setup groups.
|
|
||||||
group: name={{ item.name }} gid={{ item.gid }}
|
|
||||||
with_items:
|
|
||||||
- { name: 'deploy_user', gid: 1800 }
|
|
||||||
- { name: 'service_user', gid: 1500 }
|
|
||||||
|
|
||||||
...
|
|
||||||
}}}
|
|
||||||
|
|
||||||
Read more online about `playbooks`_ and standard ansible `modules`_.
|
|
||||||
|
|
||||||
.. _playbooks: http://www.ansibleworks.com/docs/playbooks.html
|
|
||||||
.. _modules: http://www.ansibleworks.com/docs/modules.html
|
|
||||||
|
|
||||||
A further feature os the ansible hooks is to provide a light weight "action"
|
|
||||||
scripting tool. This is a decorator that you apply to a function, and that
|
|
||||||
function can now receive cli args, and can pass extra args to the playbook.
|
|
||||||
|
|
||||||
e.g.
|
|
||||||
|
|
||||||
|
|
||||||
@hooks.action()
|
|
||||||
def some_action(amount, force="False"):
|
|
||||||
"Usage: some-action AMOUNT [force=True]" # <-- shown on error
|
|
||||||
# process the arguments
|
|
||||||
# do some calls
|
|
||||||
# return extra-vars to be passed to ansible-playbook
|
|
||||||
return {
|
|
||||||
'amount': int(amount),
|
|
||||||
'type': force,
|
|
||||||
}
|
|
||||||
|
|
||||||
You can now create a symlink to hooks.py that can be invoked like a hook, but
|
|
||||||
with cli params:
|
|
||||||
|
|
||||||
# link actions/some-action to hooks/hooks.py
|
|
||||||
|
|
||||||
actions/some-action amount=10 force=true
|
|
||||||
|
|
||||||
"""
|
|
||||||
import os
|
|
||||||
import stat
|
|
||||||
import subprocess
|
|
||||||
import functools
|
|
||||||
|
|
||||||
import charmhelpers.contrib.templating.contexts
|
|
||||||
import charmhelpers.core.host
|
|
||||||
import charmhelpers.core.hookenv
|
|
||||||
import charmhelpers.fetch
|
|
||||||
|
|
||||||
|
|
||||||
charm_dir = os.environ.get('CHARM_DIR', '')
|
|
||||||
ansible_hosts_path = '/etc/ansible/hosts'
|
|
||||||
# Ansible will automatically include any vars in the following
|
|
||||||
# file in its inventory when run locally.
|
|
||||||
ansible_vars_path = '/etc/ansible/host_vars/localhost'
|
|
||||||
|
|
||||||
|
|
||||||
def install_ansible_support(from_ppa=True, ppa_location='ppa:rquillo/ansible'):
|
|
||||||
"""Installs the ansible package.
|
|
||||||
|
|
||||||
By default it is installed from the `PPA`_ linked from
|
|
||||||
the ansible `website`_ or from a ppa specified by a charm config..
|
|
||||||
|
|
||||||
.. _PPA: https://launchpad.net/~rquillo/+archive/ansible
|
|
||||||
.. _website: http://docs.ansible.com/intro_installation.html#latest-releases-via-apt-ubuntu
|
|
||||||
|
|
||||||
If from_ppa is empty, you must ensure that the package is available
|
|
||||||
from a configured repository.
|
|
||||||
"""
|
|
||||||
if from_ppa:
|
|
||||||
charmhelpers.fetch.add_source(ppa_location)
|
|
||||||
charmhelpers.fetch.apt_update(fatal=True)
|
|
||||||
charmhelpers.fetch.apt_install('ansible')
|
|
||||||
with open(ansible_hosts_path, 'w+') as hosts_file:
|
|
||||||
hosts_file.write('localhost ansible_connection=local')
|
|
||||||
|
|
||||||
|
|
||||||
def apply_playbook(playbook, tags=None, extra_vars=None):
|
|
||||||
tags = tags or []
|
|
||||||
tags = ",".join(tags)
|
|
||||||
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
|
|
||||||
ansible_vars_path, namespace_separator='__',
|
|
||||||
allow_hyphens_in_keys=False, mode=(stat.S_IRUSR | stat.S_IWUSR))
|
|
||||||
|
|
||||||
# we want ansible's log output to be unbuffered
|
|
||||||
env = os.environ.copy()
|
|
||||||
env['PYTHONUNBUFFERED'] = "1"
|
|
||||||
call = [
|
|
||||||
'ansible-playbook',
|
|
||||||
'-c',
|
|
||||||
'local',
|
|
||||||
playbook,
|
|
||||||
]
|
|
||||||
if tags:
|
|
||||||
call.extend(['--tags', '{}'.format(tags)])
|
|
||||||
if extra_vars:
|
|
||||||
extra = ["%s=%s" % (k, v) for k, v in extra_vars.items()]
|
|
||||||
call.extend(['--extra-vars', " ".join(extra)])
|
|
||||||
subprocess.check_call(call, env=env)
|
|
||||||
|
|
||||||
|
|
||||||
class AnsibleHooks(charmhelpers.core.hookenv.Hooks):
|
|
||||||
"""Run a playbook with the hook-name as the tag.
|
|
||||||
|
|
||||||
This helper builds on the standard hookenv.Hooks helper,
|
|
||||||
but additionally runs the playbook with the hook-name specified
|
|
||||||
using --tags (ie. running all the tasks tagged with the hook-name).
|
|
||||||
|
|
||||||
Example::
|
|
||||||
|
|
||||||
hooks = AnsibleHooks(playbook_path='playbooks/my_machine_state.yaml')
|
|
||||||
|
|
||||||
# All the tasks within my_machine_state.yaml tagged with 'install'
|
|
||||||
# will be run automatically after do_custom_work()
|
|
||||||
@hooks.hook()
|
|
||||||
def install():
|
|
||||||
do_custom_work()
|
|
||||||
|
|
||||||
# For most of your hooks, you won't need to do anything other
|
|
||||||
# than run the tagged tasks for the hook:
|
|
||||||
@hooks.hook('config-changed', 'start', 'stop')
|
|
||||||
def just_use_playbook():
|
|
||||||
pass
|
|
||||||
|
|
||||||
# As a convenience, you can avoid the above noop function by specifying
|
|
||||||
# the hooks which are handled by ansible-only and they'll be registered
|
|
||||||
# for you:
|
|
||||||
# hooks = AnsibleHooks(
|
|
||||||
# 'playbooks/my_machine_state.yaml',
|
|
||||||
# default_hooks=['config-changed', 'start', 'stop'])
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
# execute a hook based on the name the program is called by
|
|
||||||
hooks.execute(sys.argv)
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, playbook_path, default_hooks=None):
|
|
||||||
"""Register any hooks handled by ansible."""
|
|
||||||
super(AnsibleHooks, self).__init__()
|
|
||||||
|
|
||||||
self._actions = {}
|
|
||||||
self.playbook_path = playbook_path
|
|
||||||
|
|
||||||
default_hooks = default_hooks or []
|
|
||||||
|
|
||||||
def noop(*args, **kwargs):
|
|
||||||
pass
|
|
||||||
|
|
||||||
for hook in default_hooks:
|
|
||||||
self.register(hook, noop)
|
|
||||||
|
|
||||||
def register_action(self, name, function):
|
|
||||||
"""Register a hook"""
|
|
||||||
self._actions[name] = function
|
|
||||||
|
|
||||||
def execute(self, args):
|
|
||||||
"""Execute the hook followed by the playbook using the hook as tag."""
|
|
||||||
hook_name = os.path.basename(args[0])
|
|
||||||
extra_vars = None
|
|
||||||
if hook_name in self._actions:
|
|
||||||
extra_vars = self._actions[hook_name](args[1:])
|
|
||||||
else:
|
|
||||||
super(AnsibleHooks, self).execute(args)
|
|
||||||
|
|
||||||
charmhelpers.contrib.ansible.apply_playbook(
|
|
||||||
self.playbook_path, tags=[hook_name], extra_vars=extra_vars)
|
|
||||||
|
|
||||||
def action(self, *action_names):
|
|
||||||
"""Decorator, registering them as actions"""
|
|
||||||
def action_wrapper(decorated):
|
|
||||||
|
|
||||||
@functools.wraps(decorated)
|
|
||||||
def wrapper(argv):
|
|
||||||
kwargs = dict(arg.split('=') for arg in argv)
|
|
||||||
try:
|
|
||||||
return decorated(**kwargs)
|
|
||||||
except TypeError as e:
|
|
||||||
if decorated.__doc__:
|
|
||||||
e.args += (decorated.__doc__,)
|
|
||||||
raise
|
|
||||||
|
|
||||||
self.register_action(decorated.__name__, wrapper)
|
|
||||||
if '_' in decorated.__name__:
|
|
||||||
self.register_action(
|
|
||||||
decorated.__name__.replace('_', '-'), wrapper)
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
return action_wrapper
|
|
@ -1,126 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import time
|
|
||||||
import os
|
|
||||||
from distutils.spawn import find_executable
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
in_relation_hook,
|
|
||||||
relation_ids,
|
|
||||||
relation_set,
|
|
||||||
relation_get,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def action_set(key, val):
|
|
||||||
if find_executable('action-set'):
|
|
||||||
action_cmd = ['action-set']
|
|
||||||
|
|
||||||
if isinstance(val, dict):
|
|
||||||
for k, v in iter(val.items()):
|
|
||||||
action_set('%s.%s' % (key, k), v)
|
|
||||||
return True
|
|
||||||
|
|
||||||
action_cmd.append('%s=%s' % (key, val))
|
|
||||||
subprocess.check_call(action_cmd)
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class Benchmark():
|
|
||||||
"""
|
|
||||||
Helper class for the `benchmark` interface.
|
|
||||||
|
|
||||||
:param list actions: Define the actions that are also benchmarks
|
|
||||||
|
|
||||||
From inside the benchmark-relation-changed hook, you would
|
|
||||||
Benchmark(['memory', 'cpu', 'disk', 'smoke', 'custom'])
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
siege = Benchmark(['siege'])
|
|
||||||
siege.start()
|
|
||||||
[... run siege ...]
|
|
||||||
# The higher the score, the better the benchmark
|
|
||||||
siege.set_composite_score(16.70, 'trans/sec', 'desc')
|
|
||||||
siege.finish()
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
BENCHMARK_CONF = '/etc/benchmark.conf' # Replaced in testing
|
|
||||||
|
|
||||||
required_keys = [
|
|
||||||
'hostname',
|
|
||||||
'port',
|
|
||||||
'graphite_port',
|
|
||||||
'graphite_endpoint',
|
|
||||||
'api_port'
|
|
||||||
]
|
|
||||||
|
|
||||||
def __init__(self, benchmarks=None):
|
|
||||||
if in_relation_hook():
|
|
||||||
if benchmarks is not None:
|
|
||||||
for rid in sorted(relation_ids('benchmark')):
|
|
||||||
relation_set(relation_id=rid, relation_settings={
|
|
||||||
'benchmarks': ",".join(benchmarks)
|
|
||||||
})
|
|
||||||
|
|
||||||
# Check the relation data
|
|
||||||
config = {}
|
|
||||||
for key in self.required_keys:
|
|
||||||
val = relation_get(key)
|
|
||||||
if val is not None:
|
|
||||||
config[key] = val
|
|
||||||
else:
|
|
||||||
# We don't have all of the required keys
|
|
||||||
config = {}
|
|
||||||
break
|
|
||||||
|
|
||||||
if len(config):
|
|
||||||
with open(self.BENCHMARK_CONF, 'w') as f:
|
|
||||||
for key, val in iter(config.items()):
|
|
||||||
f.write("%s=%s\n" % (key, val))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def start():
|
|
||||||
action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
|
|
||||||
|
|
||||||
"""
|
|
||||||
If the collectd charm is also installed, tell it to send a snapshot
|
|
||||||
of the current profile data.
|
|
||||||
"""
|
|
||||||
COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data'
|
|
||||||
if os.path.exists(COLLECT_PROFILE_DATA):
|
|
||||||
subprocess.check_output([COLLECT_PROFILE_DATA])
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def finish():
|
|
||||||
action_set('meta.stop', time.strftime('%Y-%m-%dT%H:%M:%SZ'))
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def set_composite_score(value, units, direction='asc'):
|
|
||||||
"""
|
|
||||||
Set the composite score for a benchmark run. This is a single number
|
|
||||||
representative of the benchmark results. This could be the most
|
|
||||||
important metric, or an amalgamation of metric scores.
|
|
||||||
"""
|
|
||||||
return action_set(
|
|
||||||
"meta.composite",
|
|
||||||
{'value': value, 'units': units, 'direction': direction}
|
|
||||||
)
|
|
@ -1,4 +0,0 @@
|
|||||||
Source lp:charm-tools/trunk
|
|
||||||
|
|
||||||
charm-tools/helpers/python/charmhelpers/__init__.py -> charmhelpers/charmhelpers/contrib/charmhelpers/__init__.py
|
|
||||||
charm-tools/helpers/python/charmhelpers/tests/test_charmhelpers.py -> charmhelpers/tests/contrib/charmhelpers/test_charmhelpers.py
|
|
@ -1,208 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# Copyright 2012 Canonical Ltd. This software is licensed under the
|
|
||||||
# GNU Affero General Public License version 3 (see the file LICENSE).
|
|
||||||
|
|
||||||
import warnings
|
|
||||||
warnings.warn("contrib.charmhelpers is deprecated", DeprecationWarning) # noqa
|
|
||||||
|
|
||||||
import operator
|
|
||||||
import tempfile
|
|
||||||
import time
|
|
||||||
import yaml
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
import six
|
|
||||||
if six.PY3:
|
|
||||||
from urllib.request import urlopen
|
|
||||||
from urllib.error import (HTTPError, URLError)
|
|
||||||
else:
|
|
||||||
from urllib2 import (urlopen, HTTPError, URLError)
|
|
||||||
|
|
||||||
"""Helper functions for writing Juju charms in Python."""
|
|
||||||
|
|
||||||
__metaclass__ = type
|
|
||||||
__all__ = [
|
|
||||||
# 'get_config', # core.hookenv.config()
|
|
||||||
# 'log', # core.hookenv.log()
|
|
||||||
# 'log_entry', # core.hookenv.log()
|
|
||||||
# 'log_exit', # core.hookenv.log()
|
|
||||||
# 'relation_get', # core.hookenv.relation_get()
|
|
||||||
# 'relation_set', # core.hookenv.relation_set()
|
|
||||||
# 'relation_ids', # core.hookenv.relation_ids()
|
|
||||||
# 'relation_list', # core.hookenv.relation_units()
|
|
||||||
# 'config_get', # core.hookenv.config()
|
|
||||||
# 'unit_get', # core.hookenv.unit_get()
|
|
||||||
# 'open_port', # core.hookenv.open_port()
|
|
||||||
# 'close_port', # core.hookenv.close_port()
|
|
||||||
# 'service_control', # core.host.service()
|
|
||||||
'unit_info', # client-side, NOT IMPLEMENTED
|
|
||||||
'wait_for_machine', # client-side, NOT IMPLEMENTED
|
|
||||||
'wait_for_page_contents', # client-side, NOT IMPLEMENTED
|
|
||||||
'wait_for_relation', # client-side, NOT IMPLEMENTED
|
|
||||||
'wait_for_unit', # client-side, NOT IMPLEMENTED
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
SLEEP_AMOUNT = 0.1
|
|
||||||
|
|
||||||
|
|
||||||
# We create a juju_status Command here because it makes testing much,
|
|
||||||
# much easier.
|
|
||||||
def juju_status():
|
|
||||||
subprocess.check_call(['juju', 'status'])
|
|
||||||
|
|
||||||
# re-implemented as charmhelpers.fetch.configure_sources()
|
|
||||||
# def configure_source(update=False):
|
|
||||||
# source = config_get('source')
|
|
||||||
# if ((source.startswith('ppa:') or
|
|
||||||
# source.startswith('cloud:') or
|
|
||||||
# source.startswith('http:'))):
|
|
||||||
# run('add-apt-repository', source)
|
|
||||||
# if source.startswith("http:"):
|
|
||||||
# run('apt-key', 'import', config_get('key'))
|
|
||||||
# if update:
|
|
||||||
# run('apt-get', 'update')
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def make_charm_config_file(charm_config):
|
|
||||||
charm_config_file = tempfile.NamedTemporaryFile(mode='w+')
|
|
||||||
charm_config_file.write(yaml.dump(charm_config))
|
|
||||||
charm_config_file.flush()
|
|
||||||
# The NamedTemporaryFile instance is returned instead of just the name
|
|
||||||
# because we want to take advantage of garbage collection-triggered
|
|
||||||
# deletion of the temp file when it goes out of scope in the caller.
|
|
||||||
return charm_config_file
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def unit_info(service_name, item_name, data=None, unit=None):
|
|
||||||
if data is None:
|
|
||||||
data = yaml.safe_load(juju_status())
|
|
||||||
service = data['services'].get(service_name)
|
|
||||||
if service is None:
|
|
||||||
# XXX 2012-02-08 gmb:
|
|
||||||
# This allows us to cope with the race condition that we
|
|
||||||
# have between deploying a service and having it come up in
|
|
||||||
# `juju status`. We could probably do with cleaning it up so
|
|
||||||
# that it fails a bit more noisily after a while.
|
|
||||||
return ''
|
|
||||||
units = service['units']
|
|
||||||
if unit is not None:
|
|
||||||
item = units[unit][item_name]
|
|
||||||
else:
|
|
||||||
# It might seem odd to sort the units here, but we do it to
|
|
||||||
# ensure that when no unit is specified, the first unit for the
|
|
||||||
# service (or at least the one with the lowest number) is the
|
|
||||||
# one whose data gets returned.
|
|
||||||
sorted_unit_names = sorted(units.keys())
|
|
||||||
item = units[sorted_unit_names[0]][item_name]
|
|
||||||
return item
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def get_machine_data():
|
|
||||||
return yaml.safe_load(juju_status())['machines']
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def wait_for_machine(num_machines=1, timeout=300):
|
|
||||||
"""Wait `timeout` seconds for `num_machines` machines to come up.
|
|
||||||
|
|
||||||
This wait_for... function can be called by other wait_for functions
|
|
||||||
whose timeouts might be too short in situations where only a bare
|
|
||||||
Juju setup has been bootstrapped.
|
|
||||||
|
|
||||||
:return: A tuple of (num_machines, time_taken). This is used for
|
|
||||||
testing.
|
|
||||||
"""
|
|
||||||
# You may think this is a hack, and you'd be right. The easiest way
|
|
||||||
# to tell what environment we're working in (LXC vs EC2) is to check
|
|
||||||
# the dns-name of the first machine. If it's localhost we're in LXC
|
|
||||||
# and we can just return here.
|
|
||||||
if get_machine_data()[0]['dns-name'] == 'localhost':
|
|
||||||
return 1, 0
|
|
||||||
start_time = time.time()
|
|
||||||
while True:
|
|
||||||
# Drop the first machine, since it's the Zookeeper and that's
|
|
||||||
# not a machine that we need to wait for. This will only work
|
|
||||||
# for EC2 environments, which is why we return early above if
|
|
||||||
# we're in LXC.
|
|
||||||
machine_data = get_machine_data()
|
|
||||||
non_zookeeper_machines = [
|
|
||||||
machine_data[key] for key in list(machine_data.keys())[1:]]
|
|
||||||
if len(non_zookeeper_machines) >= num_machines:
|
|
||||||
all_machines_running = True
|
|
||||||
for machine in non_zookeeper_machines:
|
|
||||||
if machine.get('instance-state') != 'running':
|
|
||||||
all_machines_running = False
|
|
||||||
break
|
|
||||||
if all_machines_running:
|
|
||||||
break
|
|
||||||
if time.time() - start_time >= timeout:
|
|
||||||
raise RuntimeError('timeout waiting for service to start')
|
|
||||||
time.sleep(SLEEP_AMOUNT)
|
|
||||||
return num_machines, time.time() - start_time
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def wait_for_unit(service_name, timeout=480):
|
|
||||||
"""Wait `timeout` seconds for a given service name to come up."""
|
|
||||||
wait_for_machine(num_machines=1)
|
|
||||||
start_time = time.time()
|
|
||||||
while True:
|
|
||||||
state = unit_info(service_name, 'agent-state')
|
|
||||||
if 'error' in state or state == 'started':
|
|
||||||
break
|
|
||||||
if time.time() - start_time >= timeout:
|
|
||||||
raise RuntimeError('timeout waiting for service to start')
|
|
||||||
time.sleep(SLEEP_AMOUNT)
|
|
||||||
if state != 'started':
|
|
||||||
raise RuntimeError('unit did not start, agent-state: ' + state)
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def wait_for_relation(service_name, relation_name, timeout=120):
|
|
||||||
"""Wait `timeout` seconds for a given relation to come up."""
|
|
||||||
start_time = time.time()
|
|
||||||
while True:
|
|
||||||
relation = unit_info(service_name, 'relations').get(relation_name)
|
|
||||||
if relation is not None and relation['state'] == 'up':
|
|
||||||
break
|
|
||||||
if time.time() - start_time >= timeout:
|
|
||||||
raise RuntimeError('timeout waiting for relation to be up')
|
|
||||||
time.sleep(SLEEP_AMOUNT)
|
|
||||||
|
|
||||||
|
|
||||||
# DEPRECATED: client-side only
|
|
||||||
def wait_for_page_contents(url, contents, timeout=120, validate=None):
|
|
||||||
if validate is None:
|
|
||||||
validate = operator.contains
|
|
||||||
start_time = time.time()
|
|
||||||
while True:
|
|
||||||
try:
|
|
||||||
stream = urlopen(url)
|
|
||||||
except (HTTPError, URLError):
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
page = stream.read()
|
|
||||||
if validate(page, contents):
|
|
||||||
return page
|
|
||||||
if time.time() - start_time >= timeout:
|
|
||||||
raise RuntimeError('timeout waiting for contents of ' + url)
|
|
||||||
time.sleep(SLEEP_AMOUNT)
|
|
@ -1,14 +0,0 @@
|
|||||||
Source: lp:charmsupport/trunk
|
|
||||||
|
|
||||||
charmsupport/charmsupport/execd.py -> charm-helpers/charmhelpers/contrib/charmsupport/execd.py
|
|
||||||
charmsupport/charmsupport/hookenv.py -> charm-helpers/charmhelpers/contrib/charmsupport/hookenv.py
|
|
||||||
charmsupport/charmsupport/host.py -> charm-helpers/charmhelpers/contrib/charmsupport/host.py
|
|
||||||
charmsupport/charmsupport/nrpe.py -> charm-helpers/charmhelpers/contrib/charmsupport/nrpe.py
|
|
||||||
charmsupport/charmsupport/volumes.py -> charm-helpers/charmhelpers/contrib/charmsupport/volumes.py
|
|
||||||
|
|
||||||
charmsupport/tests/test_execd.py -> charm-helpers/tests/contrib/charmsupport/test_execd.py
|
|
||||||
charmsupport/tests/test_hookenv.py -> charm-helpers/tests/contrib/charmsupport/test_hookenv.py
|
|
||||||
charmsupport/tests/test_host.py -> charm-helpers/tests/contrib/charmsupport/test_host.py
|
|
||||||
charmsupport/tests/test_nrpe.py -> charm-helpers/tests/contrib/charmsupport/test_nrpe.py
|
|
||||||
|
|
||||||
charmsupport/bin/charmsupport -> charm-helpers/bin/contrib/charmsupport/charmsupport
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1,398 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"""Compatibility with the nrpe-external-master charm"""
|
|
||||||
# Copyright 2012 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import pwd
|
|
||||||
import grp
|
|
||||||
import os
|
|
||||||
import glob
|
|
||||||
import shutil
|
|
||||||
import re
|
|
||||||
import shlex
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config,
|
|
||||||
local_unit,
|
|
||||||
log,
|
|
||||||
relation_ids,
|
|
||||||
relation_set,
|
|
||||||
relations_of_type,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.core.host import service
|
|
||||||
|
|
||||||
# This module adds compatibility with the nrpe-external-master and plain nrpe
|
|
||||||
# subordinate charms. To use it in your charm:
|
|
||||||
#
|
|
||||||
# 1. Update metadata.yaml
|
|
||||||
#
|
|
||||||
# provides:
|
|
||||||
# (...)
|
|
||||||
# nrpe-external-master:
|
|
||||||
# interface: nrpe-external-master
|
|
||||||
# scope: container
|
|
||||||
#
|
|
||||||
# and/or
|
|
||||||
#
|
|
||||||
# provides:
|
|
||||||
# (...)
|
|
||||||
# local-monitors:
|
|
||||||
# interface: local-monitors
|
|
||||||
# scope: container
|
|
||||||
|
|
||||||
#
|
|
||||||
# 2. Add the following to config.yaml
|
|
||||||
#
|
|
||||||
# nagios_context:
|
|
||||||
# default: "juju"
|
|
||||||
# type: string
|
|
||||||
# description: |
|
|
||||||
# Used by the nrpe subordinate charms.
|
|
||||||
# A string that will be prepended to instance name to set the host name
|
|
||||||
# in nagios. So for instance the hostname would be something like:
|
|
||||||
# juju-myservice-0
|
|
||||||
# If you're running multiple environments with the same services in them
|
|
||||||
# this allows you to differentiate between them.
|
|
||||||
# nagios_servicegroups:
|
|
||||||
# default: ""
|
|
||||||
# type: string
|
|
||||||
# description: |
|
|
||||||
# A comma-separated list of nagios servicegroups.
|
|
||||||
# If left empty, the nagios_context will be used as the servicegroup
|
|
||||||
#
|
|
||||||
# 3. Add custom checks (Nagios plugins) to files/nrpe-external-master
|
|
||||||
#
|
|
||||||
# 4. Update your hooks.py with something like this:
|
|
||||||
#
|
|
||||||
# from charmsupport.nrpe import NRPE
|
|
||||||
# (...)
|
|
||||||
# def update_nrpe_config():
|
|
||||||
# nrpe_compat = NRPE()
|
|
||||||
# nrpe_compat.add_check(
|
|
||||||
# shortname = "myservice",
|
|
||||||
# description = "Check MyService",
|
|
||||||
# check_cmd = "check_http -w 2 -c 10 http://localhost"
|
|
||||||
# )
|
|
||||||
# nrpe_compat.add_check(
|
|
||||||
# "myservice_other",
|
|
||||||
# "Check for widget failures",
|
|
||||||
# check_cmd = "/srv/myapp/scripts/widget_check"
|
|
||||||
# )
|
|
||||||
# nrpe_compat.write()
|
|
||||||
#
|
|
||||||
# def config_changed():
|
|
||||||
# (...)
|
|
||||||
# update_nrpe_config()
|
|
||||||
#
|
|
||||||
# def nrpe_external_master_relation_changed():
|
|
||||||
# update_nrpe_config()
|
|
||||||
#
|
|
||||||
# def local_monitors_relation_changed():
|
|
||||||
# update_nrpe_config()
|
|
||||||
#
|
|
||||||
# 5. ln -s hooks.py nrpe-external-master-relation-changed
|
|
||||||
# ln -s hooks.py local-monitors-relation-changed
|
|
||||||
|
|
||||||
|
|
||||||
class CheckException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class Check(object):
|
|
||||||
shortname_re = '[A-Za-z0-9-_]+$'
|
|
||||||
service_template = ("""
|
|
||||||
#---------------------------------------------------
|
|
||||||
# This file is Juju managed
|
|
||||||
#---------------------------------------------------
|
|
||||||
define service {{
|
|
||||||
use active-service
|
|
||||||
host_name {nagios_hostname}
|
|
||||||
service_description {nagios_hostname}[{shortname}] """
|
|
||||||
"""{description}
|
|
||||||
check_command check_nrpe!{command}
|
|
||||||
servicegroups {nagios_servicegroup}
|
|
||||||
}}
|
|
||||||
""")
|
|
||||||
|
|
||||||
def __init__(self, shortname, description, check_cmd):
|
|
||||||
super(Check, self).__init__()
|
|
||||||
# XXX: could be better to calculate this from the service name
|
|
||||||
if not re.match(self.shortname_re, shortname):
|
|
||||||
raise CheckException("shortname must match {}".format(
|
|
||||||
Check.shortname_re))
|
|
||||||
self.shortname = shortname
|
|
||||||
self.command = "check_{}".format(shortname)
|
|
||||||
# Note: a set of invalid characters is defined by the
|
|
||||||
# Nagios server config
|
|
||||||
# The default is: illegal_object_name_chars=`~!$%^&*"|'<>?,()=
|
|
||||||
self.description = description
|
|
||||||
self.check_cmd = self._locate_cmd(check_cmd)
|
|
||||||
|
|
||||||
def _get_check_filename(self):
|
|
||||||
return os.path.join(NRPE.nrpe_confdir, '{}.cfg'.format(self.command))
|
|
||||||
|
|
||||||
def _get_service_filename(self, hostname):
|
|
||||||
return os.path.join(NRPE.nagios_exportdir,
|
|
||||||
'service__{}_{}.cfg'.format(hostname, self.command))
|
|
||||||
|
|
||||||
def _locate_cmd(self, check_cmd):
|
|
||||||
search_path = (
|
|
||||||
'/usr/lib/nagios/plugins',
|
|
||||||
'/usr/local/lib/nagios/plugins',
|
|
||||||
)
|
|
||||||
parts = shlex.split(check_cmd)
|
|
||||||
for path in search_path:
|
|
||||||
if os.path.exists(os.path.join(path, parts[0])):
|
|
||||||
command = os.path.join(path, parts[0])
|
|
||||||
if len(parts) > 1:
|
|
||||||
command += " " + " ".join(parts[1:])
|
|
||||||
return command
|
|
||||||
log('Check command not found: {}'.format(parts[0]))
|
|
||||||
return ''
|
|
||||||
|
|
||||||
def _remove_service_files(self):
|
|
||||||
if not os.path.exists(NRPE.nagios_exportdir):
|
|
||||||
return
|
|
||||||
for f in os.listdir(NRPE.nagios_exportdir):
|
|
||||||
if f.endswith('_{}.cfg'.format(self.command)):
|
|
||||||
os.remove(os.path.join(NRPE.nagios_exportdir, f))
|
|
||||||
|
|
||||||
def remove(self, hostname):
|
|
||||||
nrpe_check_file = self._get_check_filename()
|
|
||||||
if os.path.exists(nrpe_check_file):
|
|
||||||
os.remove(nrpe_check_file)
|
|
||||||
self._remove_service_files()
|
|
||||||
|
|
||||||
def write(self, nagios_context, hostname, nagios_servicegroups):
|
|
||||||
nrpe_check_file = self._get_check_filename()
|
|
||||||
with open(nrpe_check_file, 'w') as nrpe_check_config:
|
|
||||||
nrpe_check_config.write("# check {}\n".format(self.shortname))
|
|
||||||
nrpe_check_config.write("command[{}]={}\n".format(
|
|
||||||
self.command, self.check_cmd))
|
|
||||||
|
|
||||||
if not os.path.exists(NRPE.nagios_exportdir):
|
|
||||||
log('Not writing service config as {} is not accessible'.format(
|
|
||||||
NRPE.nagios_exportdir))
|
|
||||||
else:
|
|
||||||
self.write_service_config(nagios_context, hostname,
|
|
||||||
nagios_servicegroups)
|
|
||||||
|
|
||||||
def write_service_config(self, nagios_context, hostname,
|
|
||||||
nagios_servicegroups):
|
|
||||||
self._remove_service_files()
|
|
||||||
|
|
||||||
templ_vars = {
|
|
||||||
'nagios_hostname': hostname,
|
|
||||||
'nagios_servicegroup': nagios_servicegroups,
|
|
||||||
'description': self.description,
|
|
||||||
'shortname': self.shortname,
|
|
||||||
'command': self.command,
|
|
||||||
}
|
|
||||||
nrpe_service_text = Check.service_template.format(**templ_vars)
|
|
||||||
nrpe_service_file = self._get_service_filename(hostname)
|
|
||||||
with open(nrpe_service_file, 'w') as nrpe_service_config:
|
|
||||||
nrpe_service_config.write(str(nrpe_service_text))
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
subprocess.call(self.check_cmd)
|
|
||||||
|
|
||||||
|
|
||||||
class NRPE(object):
|
|
||||||
nagios_logdir = '/var/log/nagios'
|
|
||||||
nagios_exportdir = '/var/lib/nagios/export'
|
|
||||||
nrpe_confdir = '/etc/nagios/nrpe.d'
|
|
||||||
|
|
||||||
def __init__(self, hostname=None):
|
|
||||||
super(NRPE, self).__init__()
|
|
||||||
self.config = config()
|
|
||||||
self.nagios_context = self.config['nagios_context']
|
|
||||||
if 'nagios_servicegroups' in self.config and self.config['nagios_servicegroups']:
|
|
||||||
self.nagios_servicegroups = self.config['nagios_servicegroups']
|
|
||||||
else:
|
|
||||||
self.nagios_servicegroups = self.nagios_context
|
|
||||||
self.unit_name = local_unit().replace('/', '-')
|
|
||||||
if hostname:
|
|
||||||
self.hostname = hostname
|
|
||||||
else:
|
|
||||||
nagios_hostname = get_nagios_hostname()
|
|
||||||
if nagios_hostname:
|
|
||||||
self.hostname = nagios_hostname
|
|
||||||
else:
|
|
||||||
self.hostname = "{}-{}".format(self.nagios_context, self.unit_name)
|
|
||||||
self.checks = []
|
|
||||||
|
|
||||||
def add_check(self, *args, **kwargs):
|
|
||||||
self.checks.append(Check(*args, **kwargs))
|
|
||||||
|
|
||||||
def remove_check(self, *args, **kwargs):
|
|
||||||
if kwargs.get('shortname') is None:
|
|
||||||
raise ValueError('shortname of check must be specified')
|
|
||||||
|
|
||||||
# Use sensible defaults if they're not specified - these are not
|
|
||||||
# actually used during removal, but they're required for constructing
|
|
||||||
# the Check object; check_disk is chosen because it's part of the
|
|
||||||
# nagios-plugins-basic package.
|
|
||||||
if kwargs.get('check_cmd') is None:
|
|
||||||
kwargs['check_cmd'] = 'check_disk'
|
|
||||||
if kwargs.get('description') is None:
|
|
||||||
kwargs['description'] = ''
|
|
||||||
|
|
||||||
check = Check(*args, **kwargs)
|
|
||||||
check.remove(self.hostname)
|
|
||||||
|
|
||||||
def write(self):
|
|
||||||
try:
|
|
||||||
nagios_uid = pwd.getpwnam('nagios').pw_uid
|
|
||||||
nagios_gid = grp.getgrnam('nagios').gr_gid
|
|
||||||
except:
|
|
||||||
log("Nagios user not set up, nrpe checks not updated")
|
|
||||||
return
|
|
||||||
|
|
||||||
if not os.path.exists(NRPE.nagios_logdir):
|
|
||||||
os.mkdir(NRPE.nagios_logdir)
|
|
||||||
os.chown(NRPE.nagios_logdir, nagios_uid, nagios_gid)
|
|
||||||
|
|
||||||
nrpe_monitors = {}
|
|
||||||
monitors = {"monitors": {"remote": {"nrpe": nrpe_monitors}}}
|
|
||||||
for nrpecheck in self.checks:
|
|
||||||
nrpecheck.write(self.nagios_context, self.hostname,
|
|
||||||
self.nagios_servicegroups)
|
|
||||||
nrpe_monitors[nrpecheck.shortname] = {
|
|
||||||
"command": nrpecheck.command,
|
|
||||||
}
|
|
||||||
|
|
||||||
service('restart', 'nagios-nrpe-server')
|
|
||||||
|
|
||||||
monitor_ids = relation_ids("local-monitors") + \
|
|
||||||
relation_ids("nrpe-external-master")
|
|
||||||
for rid in monitor_ids:
|
|
||||||
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
|
|
||||||
|
|
||||||
|
|
||||||
def get_nagios_hostcontext(relation_name='nrpe-external-master'):
|
|
||||||
"""
|
|
||||||
Query relation with nrpe subordinate, return the nagios_host_context
|
|
||||||
|
|
||||||
:param str relation_name: Name of relation nrpe sub joined to
|
|
||||||
"""
|
|
||||||
for rel in relations_of_type(relation_name):
|
|
||||||
if 'nagios_host_context' in rel:
|
|
||||||
return rel['nagios_host_context']
|
|
||||||
|
|
||||||
|
|
||||||
def get_nagios_hostname(relation_name='nrpe-external-master'):
|
|
||||||
"""
|
|
||||||
Query relation with nrpe subordinate, return the nagios_hostname
|
|
||||||
|
|
||||||
:param str relation_name: Name of relation nrpe sub joined to
|
|
||||||
"""
|
|
||||||
for rel in relations_of_type(relation_name):
|
|
||||||
if 'nagios_hostname' in rel:
|
|
||||||
return rel['nagios_hostname']
|
|
||||||
|
|
||||||
|
|
||||||
def get_nagios_unit_name(relation_name='nrpe-external-master'):
|
|
||||||
"""
|
|
||||||
Return the nagios unit name prepended with host_context if needed
|
|
||||||
|
|
||||||
:param str relation_name: Name of relation nrpe sub joined to
|
|
||||||
"""
|
|
||||||
host_context = get_nagios_hostcontext(relation_name)
|
|
||||||
if host_context:
|
|
||||||
unit = "%s:%s" % (host_context, local_unit())
|
|
||||||
else:
|
|
||||||
unit = local_unit()
|
|
||||||
return unit
|
|
||||||
|
|
||||||
|
|
||||||
def add_init_service_checks(nrpe, services, unit_name):
|
|
||||||
"""
|
|
||||||
Add checks for each service in list
|
|
||||||
|
|
||||||
:param NRPE nrpe: NRPE object to add check to
|
|
||||||
:param list services: List of services to check
|
|
||||||
:param str unit_name: Unit name to use in check description
|
|
||||||
"""
|
|
||||||
for svc in services:
|
|
||||||
upstart_init = '/etc/init/%s.conf' % svc
|
|
||||||
sysv_init = '/etc/init.d/%s' % svc
|
|
||||||
if os.path.exists(upstart_init):
|
|
||||||
# Don't add a check for these services from neutron-gateway
|
|
||||||
if svc not in ['ext-port', 'os-charm-phy-nic-mtu']:
|
|
||||||
nrpe.add_check(
|
|
||||||
shortname=svc,
|
|
||||||
description='process check {%s}' % unit_name,
|
|
||||||
check_cmd='check_upstart_job %s' % svc
|
|
||||||
)
|
|
||||||
elif os.path.exists(sysv_init):
|
|
||||||
cronpath = '/etc/cron.d/nagios-service-check-%s' % svc
|
|
||||||
cron_file = ('*/5 * * * * root '
|
|
||||||
'/usr/local/lib/nagios/plugins/check_exit_status.pl '
|
|
||||||
'-s /etc/init.d/%s status > '
|
|
||||||
'/var/lib/nagios/service-check-%s.txt\n' % (svc,
|
|
||||||
svc)
|
|
||||||
)
|
|
||||||
f = open(cronpath, 'w')
|
|
||||||
f.write(cron_file)
|
|
||||||
f.close()
|
|
||||||
nrpe.add_check(
|
|
||||||
shortname=svc,
|
|
||||||
description='process check {%s}' % unit_name,
|
|
||||||
check_cmd='check_status_file.py -f '
|
|
||||||
'/var/lib/nagios/service-check-%s.txt' % svc,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def copy_nrpe_checks():
|
|
||||||
"""
|
|
||||||
Copy the nrpe checks into place
|
|
||||||
|
|
||||||
"""
|
|
||||||
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
|
|
||||||
nrpe_files_dir = os.path.join(os.getenv('CHARM_DIR'), 'hooks',
|
|
||||||
'charmhelpers', 'contrib', 'openstack',
|
|
||||||
'files')
|
|
||||||
|
|
||||||
if not os.path.exists(NAGIOS_PLUGINS):
|
|
||||||
os.makedirs(NAGIOS_PLUGINS)
|
|
||||||
for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")):
|
|
||||||
if os.path.isfile(fname):
|
|
||||||
shutil.copy2(fname,
|
|
||||||
os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
|
|
||||||
|
|
||||||
|
|
||||||
def add_haproxy_checks(nrpe, unit_name):
|
|
||||||
"""
|
|
||||||
Add checks for each service in list
|
|
||||||
|
|
||||||
:param NRPE nrpe: NRPE object to add check to
|
|
||||||
:param str unit_name: Unit name to use in check description
|
|
||||||
"""
|
|
||||||
nrpe.add_check(
|
|
||||||
shortname='haproxy_servers',
|
|
||||||
description='Check HAProxy {%s}' % unit_name,
|
|
||||||
check_cmd='check_haproxy.sh')
|
|
||||||
nrpe.add_check(
|
|
||||||
shortname='haproxy_queue',
|
|
||||||
description='Check HAProxy queue depth {%s}' % unit_name,
|
|
||||||
check_cmd='check_haproxy_queue_depth.sh')
|
|
@ -1,175 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
'''
|
|
||||||
Functions for managing volumes in juju units. One volume is supported per unit.
|
|
||||||
Subordinates may have their own storage, provided it is on its own partition.
|
|
||||||
|
|
||||||
Configuration stanzas::
|
|
||||||
|
|
||||||
volume-ephemeral:
|
|
||||||
type: boolean
|
|
||||||
default: true
|
|
||||||
description: >
|
|
||||||
If false, a volume is mounted as sepecified in "volume-map"
|
|
||||||
If true, ephemeral storage will be used, meaning that log data
|
|
||||||
will only exist as long as the machine. YOU HAVE BEEN WARNED.
|
|
||||||
volume-map:
|
|
||||||
type: string
|
|
||||||
default: {}
|
|
||||||
description: >
|
|
||||||
YAML map of units to device names, e.g:
|
|
||||||
"{ rsyslog/0: /dev/vdb, rsyslog/1: /dev/vdb }"
|
|
||||||
Service units will raise a configure-error if volume-ephemeral
|
|
||||||
is 'true' and no volume-map value is set. Use 'juju set' to set a
|
|
||||||
value and 'juju resolved' to complete configuration.
|
|
||||||
|
|
||||||
Usage::
|
|
||||||
|
|
||||||
from charmsupport.volumes import configure_volume, VolumeConfigurationError
|
|
||||||
from charmsupport.hookenv import log, ERROR
|
|
||||||
def post_mount_hook():
|
|
||||||
stop_service('myservice')
|
|
||||||
def post_mount_hook():
|
|
||||||
start_service('myservice')
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
try:
|
|
||||||
configure_volume(before_change=pre_mount_hook,
|
|
||||||
after_change=post_mount_hook)
|
|
||||||
except VolumeConfigurationError:
|
|
||||||
log('Storage could not be configured', ERROR)
|
|
||||||
|
|
||||||
'''
|
|
||||||
|
|
||||||
# XXX: Known limitations
|
|
||||||
# - fstab is neither consulted nor updated
|
|
||||||
|
|
||||||
import os
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
from charmhelpers.core import host
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
|
|
||||||
MOUNT_BASE = '/srv/juju/volumes'
|
|
||||||
|
|
||||||
|
|
||||||
class VolumeConfigurationError(Exception):
|
|
||||||
'''Volume configuration data is missing or invalid'''
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def get_config():
|
|
||||||
'''Gather and sanity-check volume configuration data'''
|
|
||||||
volume_config = {}
|
|
||||||
config = hookenv.config()
|
|
||||||
|
|
||||||
errors = False
|
|
||||||
|
|
||||||
if config.get('volume-ephemeral') in (True, 'True', 'true', 'Yes', 'yes'):
|
|
||||||
volume_config['ephemeral'] = True
|
|
||||||
else:
|
|
||||||
volume_config['ephemeral'] = False
|
|
||||||
|
|
||||||
try:
|
|
||||||
volume_map = yaml.safe_load(config.get('volume-map', '{}'))
|
|
||||||
except yaml.YAMLError as e:
|
|
||||||
hookenv.log("Error parsing YAML volume-map: {}".format(e),
|
|
||||||
hookenv.ERROR)
|
|
||||||
errors = True
|
|
||||||
if volume_map is None:
|
|
||||||
# probably an empty string
|
|
||||||
volume_map = {}
|
|
||||||
elif not isinstance(volume_map, dict):
|
|
||||||
hookenv.log("Volume-map should be a dictionary, not {}".format(
|
|
||||||
type(volume_map)))
|
|
||||||
errors = True
|
|
||||||
|
|
||||||
volume_config['device'] = volume_map.get(os.environ['JUJU_UNIT_NAME'])
|
|
||||||
if volume_config['device'] and volume_config['ephemeral']:
|
|
||||||
# asked for ephemeral storage but also defined a volume ID
|
|
||||||
hookenv.log('A volume is defined for this unit, but ephemeral '
|
|
||||||
'storage was requested', hookenv.ERROR)
|
|
||||||
errors = True
|
|
||||||
elif not volume_config['device'] and not volume_config['ephemeral']:
|
|
||||||
# asked for permanent storage but did not define volume ID
|
|
||||||
hookenv.log('Ephemeral storage was requested, but there is no volume '
|
|
||||||
'defined for this unit.', hookenv.ERROR)
|
|
||||||
errors = True
|
|
||||||
|
|
||||||
unit_mount_name = hookenv.local_unit().replace('/', '-')
|
|
||||||
volume_config['mountpoint'] = os.path.join(MOUNT_BASE, unit_mount_name)
|
|
||||||
|
|
||||||
if errors:
|
|
||||||
return None
|
|
||||||
return volume_config
|
|
||||||
|
|
||||||
|
|
||||||
def mount_volume(config):
|
|
||||||
if os.path.exists(config['mountpoint']):
|
|
||||||
if not os.path.isdir(config['mountpoint']):
|
|
||||||
hookenv.log('Not a directory: {}'.format(config['mountpoint']))
|
|
||||||
raise VolumeConfigurationError()
|
|
||||||
else:
|
|
||||||
host.mkdir(config['mountpoint'])
|
|
||||||
if os.path.ismount(config['mountpoint']):
|
|
||||||
unmount_volume(config)
|
|
||||||
if not host.mount(config['device'], config['mountpoint'], persist=True):
|
|
||||||
raise VolumeConfigurationError()
|
|
||||||
|
|
||||||
|
|
||||||
def unmount_volume(config):
|
|
||||||
if os.path.ismount(config['mountpoint']):
|
|
||||||
if not host.umount(config['mountpoint'], persist=True):
|
|
||||||
raise VolumeConfigurationError()
|
|
||||||
|
|
||||||
|
|
||||||
def managed_mounts():
|
|
||||||
'''List of all mounted managed volumes'''
|
|
||||||
return filter(lambda mount: mount[0].startswith(MOUNT_BASE), host.mounts())
|
|
||||||
|
|
||||||
|
|
||||||
def configure_volume(before_change=lambda: None, after_change=lambda: None):
|
|
||||||
'''Set up storage (or don't) according to the charm's volume configuration.
|
|
||||||
Returns the mount point or "ephemeral". before_change and after_change
|
|
||||||
are optional functions to be called if the volume configuration changes.
|
|
||||||
'''
|
|
||||||
|
|
||||||
config = get_config()
|
|
||||||
if not config:
|
|
||||||
hookenv.log('Failed to read volume configuration', hookenv.CRITICAL)
|
|
||||||
raise VolumeConfigurationError()
|
|
||||||
|
|
||||||
if config['ephemeral']:
|
|
||||||
if os.path.ismount(config['mountpoint']):
|
|
||||||
before_change()
|
|
||||||
unmount_volume(config)
|
|
||||||
after_change()
|
|
||||||
return 'ephemeral'
|
|
||||||
else:
|
|
||||||
# persistent storage
|
|
||||||
if os.path.ismount(config['mountpoint']):
|
|
||||||
mounts = dict(managed_mounts())
|
|
||||||
if mounts.get(config['mountpoint']) != config['device']:
|
|
||||||
before_change()
|
|
||||||
unmount_volume(config)
|
|
||||||
mount_volume(config)
|
|
||||||
after_change()
|
|
||||||
else:
|
|
||||||
before_change()
|
|
||||||
mount_volume(config)
|
|
||||||
after_change()
|
|
||||||
return config['mountpoint']
|
|
@ -1,412 +0,0 @@
|
|||||||
"""Helper for working with a MySQL database"""
|
|
||||||
import json
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
import platform
|
|
||||||
import os
|
|
||||||
import glob
|
|
||||||
|
|
||||||
# from string import upper
|
|
||||||
|
|
||||||
from charmhelpers.core.host import (
|
|
||||||
mkdir,
|
|
||||||
pwgen,
|
|
||||||
write_file
|
|
||||||
)
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config as config_get,
|
|
||||||
relation_get,
|
|
||||||
related_units,
|
|
||||||
unit_get,
|
|
||||||
log,
|
|
||||||
DEBUG,
|
|
||||||
INFO,
|
|
||||||
WARNING,
|
|
||||||
)
|
|
||||||
from charmhelpers.fetch import (
|
|
||||||
apt_install,
|
|
||||||
apt_update,
|
|
||||||
filter_installed_packages,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.peerstorage import (
|
|
||||||
peer_store,
|
|
||||||
peer_retrieve,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.network.ip import get_host_ip
|
|
||||||
|
|
||||||
try:
|
|
||||||
import MySQLdb
|
|
||||||
except ImportError:
|
|
||||||
apt_update(fatal=True)
|
|
||||||
apt_install(filter_installed_packages(['python-mysqldb']), fatal=True)
|
|
||||||
import MySQLdb
|
|
||||||
|
|
||||||
|
|
||||||
class MySQLHelper(object):
|
|
||||||
|
|
||||||
def __init__(self, rpasswdf_template, upasswdf_template, host='localhost',
|
|
||||||
migrate_passwd_to_peer_relation=True,
|
|
||||||
delete_ondisk_passwd_file=True):
|
|
||||||
self.host = host
|
|
||||||
# Password file path templates
|
|
||||||
self.root_passwd_file_template = rpasswdf_template
|
|
||||||
self.user_passwd_file_template = upasswdf_template
|
|
||||||
|
|
||||||
self.migrate_passwd_to_peer_relation = migrate_passwd_to_peer_relation
|
|
||||||
# If we migrate we have the option to delete local copy of root passwd
|
|
||||||
self.delete_ondisk_passwd_file = delete_ondisk_passwd_file
|
|
||||||
|
|
||||||
def connect(self, user='root', password=None):
|
|
||||||
log("Opening db connection for %s@%s" % (user, self.host), level=DEBUG)
|
|
||||||
self.connection = MySQLdb.connect(user=user, host=self.host,
|
|
||||||
passwd=password)
|
|
||||||
|
|
||||||
def database_exists(self, db_name):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute("SHOW DATABASES")
|
|
||||||
databases = [i[0] for i in cursor.fetchall()]
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
return db_name in databases
|
|
||||||
|
|
||||||
def create_database(self, db_name):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute("CREATE DATABASE {} CHARACTER SET UTF8"
|
|
||||||
.format(db_name))
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def grant_exists(self, db_name, db_user, remote_ip):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
priv_string = "GRANT ALL PRIVILEGES ON `{}`.* " \
|
|
||||||
"TO '{}'@'{}'".format(db_name, db_user, remote_ip)
|
|
||||||
try:
|
|
||||||
cursor.execute("SHOW GRANTS for '{}'@'{}'".format(db_user,
|
|
||||||
remote_ip))
|
|
||||||
grants = [i[0] for i in cursor.fetchall()]
|
|
||||||
except MySQLdb.OperationalError:
|
|
||||||
return False
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
# TODO: review for different grants
|
|
||||||
return priv_string in grants
|
|
||||||
|
|
||||||
def create_grant(self, db_name, db_user, remote_ip, password):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
# TODO: review for different grants
|
|
||||||
cursor.execute("GRANT ALL PRIVILEGES ON {}.* TO '{}'@'{}' "
|
|
||||||
"IDENTIFIED BY '{}'".format(db_name,
|
|
||||||
db_user,
|
|
||||||
remote_ip,
|
|
||||||
password))
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def create_admin_grant(self, db_user, remote_ip, password):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute("GRANT ALL PRIVILEGES ON *.* TO '{}'@'{}' "
|
|
||||||
"IDENTIFIED BY '{}'".format(db_user,
|
|
||||||
remote_ip,
|
|
||||||
password))
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def cleanup_grant(self, db_user, remote_ip):
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute("DROP FROM mysql.user WHERE user='{}' "
|
|
||||||
"AND HOST='{}'".format(db_user,
|
|
||||||
remote_ip))
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def execute(self, sql):
|
|
||||||
"""Execute arbitary SQL against the database."""
|
|
||||||
cursor = self.connection.cursor()
|
|
||||||
try:
|
|
||||||
cursor.execute(sql)
|
|
||||||
finally:
|
|
||||||
cursor.close()
|
|
||||||
|
|
||||||
def migrate_passwords_to_peer_relation(self, excludes=None):
|
|
||||||
"""Migrate any passwords storage on disk to cluster peer relation."""
|
|
||||||
dirname = os.path.dirname(self.root_passwd_file_template)
|
|
||||||
path = os.path.join(dirname, '*.passwd')
|
|
||||||
for f in glob.glob(path):
|
|
||||||
if excludes and f in excludes:
|
|
||||||
log("Excluding %s from peer migration" % (f), level=DEBUG)
|
|
||||||
continue
|
|
||||||
|
|
||||||
key = os.path.basename(f)
|
|
||||||
with open(f, 'r') as passwd:
|
|
||||||
_value = passwd.read().strip()
|
|
||||||
|
|
||||||
try:
|
|
||||||
peer_store(key, _value)
|
|
||||||
|
|
||||||
if self.delete_ondisk_passwd_file:
|
|
||||||
os.unlink(f)
|
|
||||||
except ValueError:
|
|
||||||
# NOTE cluster relation not yet ready - skip for now
|
|
||||||
pass
|
|
||||||
|
|
||||||
def get_mysql_password_on_disk(self, username=None, password=None):
|
|
||||||
"""Retrieve, generate or store a mysql password for the provided
|
|
||||||
username on disk."""
|
|
||||||
if username:
|
|
||||||
template = self.user_passwd_file_template
|
|
||||||
passwd_file = template.format(username)
|
|
||||||
else:
|
|
||||||
passwd_file = self.root_passwd_file_template
|
|
||||||
|
|
||||||
_password = None
|
|
||||||
if os.path.exists(passwd_file):
|
|
||||||
log("Using existing password file '%s'" % passwd_file, level=DEBUG)
|
|
||||||
with open(passwd_file, 'r') as passwd:
|
|
||||||
_password = passwd.read().strip()
|
|
||||||
else:
|
|
||||||
log("Generating new password file '%s'" % passwd_file, level=DEBUG)
|
|
||||||
if not os.path.isdir(os.path.dirname(passwd_file)):
|
|
||||||
# NOTE: need to ensure this is not mysql root dir (which needs
|
|
||||||
# to be mysql readable)
|
|
||||||
mkdir(os.path.dirname(passwd_file), owner='root', group='root',
|
|
||||||
perms=0o770)
|
|
||||||
# Force permissions - for some reason the chmod in makedirs
|
|
||||||
# fails
|
|
||||||
os.chmod(os.path.dirname(passwd_file), 0o770)
|
|
||||||
|
|
||||||
_password = password or pwgen(length=32)
|
|
||||||
write_file(passwd_file, _password, owner='root', group='root',
|
|
||||||
perms=0o660)
|
|
||||||
|
|
||||||
return _password
|
|
||||||
|
|
||||||
def passwd_keys(self, username):
|
|
||||||
"""Generator to return keys used to store passwords in peer store.
|
|
||||||
|
|
||||||
NOTE: we support both legacy and new format to support mysql
|
|
||||||
charm prior to refactor. This is necessary to avoid LP 1451890.
|
|
||||||
"""
|
|
||||||
keys = []
|
|
||||||
if username == 'mysql':
|
|
||||||
log("Bad username '%s'" % (username), level=WARNING)
|
|
||||||
|
|
||||||
if username:
|
|
||||||
# IMPORTANT: *newer* format must be returned first
|
|
||||||
keys.append('mysql-%s.passwd' % (username))
|
|
||||||
keys.append('%s.passwd' % (username))
|
|
||||||
else:
|
|
||||||
keys.append('mysql.passwd')
|
|
||||||
|
|
||||||
for key in keys:
|
|
||||||
yield key
|
|
||||||
|
|
||||||
def get_mysql_password(self, username=None, password=None):
|
|
||||||
"""Retrieve, generate or store a mysql password for the provided
|
|
||||||
username using peer relation cluster."""
|
|
||||||
excludes = []
|
|
||||||
|
|
||||||
# First check peer relation.
|
|
||||||
try:
|
|
||||||
for key in self.passwd_keys(username):
|
|
||||||
_password = peer_retrieve(key)
|
|
||||||
if _password:
|
|
||||||
break
|
|
||||||
|
|
||||||
# If root password available don't update peer relation from local
|
|
||||||
if _password and not username:
|
|
||||||
excludes.append(self.root_passwd_file_template)
|
|
||||||
|
|
||||||
except ValueError:
|
|
||||||
# cluster relation is not yet started; use on-disk
|
|
||||||
_password = None
|
|
||||||
|
|
||||||
# If none available, generate new one
|
|
||||||
if not _password:
|
|
||||||
_password = self.get_mysql_password_on_disk(username, password)
|
|
||||||
|
|
||||||
# Put on wire if required
|
|
||||||
if self.migrate_passwd_to_peer_relation:
|
|
||||||
self.migrate_passwords_to_peer_relation(excludes=excludes)
|
|
||||||
|
|
||||||
return _password
|
|
||||||
|
|
||||||
def get_mysql_root_password(self, password=None):
|
|
||||||
"""Retrieve or generate mysql root password for service units."""
|
|
||||||
return self.get_mysql_password(username=None, password=password)
|
|
||||||
|
|
||||||
def normalize_address(self, hostname):
|
|
||||||
"""Ensure that address returned is an IP address (i.e. not fqdn)"""
|
|
||||||
if config_get('prefer-ipv6'):
|
|
||||||
# TODO: add support for ipv6 dns
|
|
||||||
return hostname
|
|
||||||
|
|
||||||
if hostname != unit_get('private-address'):
|
|
||||||
return get_host_ip(hostname, fallback=hostname)
|
|
||||||
|
|
||||||
# Otherwise assume localhost
|
|
||||||
return '127.0.0.1'
|
|
||||||
|
|
||||||
def get_allowed_units(self, database, username, relation_id=None):
|
|
||||||
"""Get list of units with access grants for database with username.
|
|
||||||
|
|
||||||
This is typically used to provide shared-db relations with a list of
|
|
||||||
which units have been granted access to the given database.
|
|
||||||
"""
|
|
||||||
self.connect(password=self.get_mysql_root_password())
|
|
||||||
allowed_units = set()
|
|
||||||
for unit in related_units(relation_id):
|
|
||||||
settings = relation_get(rid=relation_id, unit=unit)
|
|
||||||
# First check for setting with prefix, then without
|
|
||||||
for attr in ["%s_hostname" % (database), 'hostname']:
|
|
||||||
hosts = settings.get(attr, None)
|
|
||||||
if hosts:
|
|
||||||
break
|
|
||||||
|
|
||||||
if hosts:
|
|
||||||
# hostname can be json-encoded list of hostnames
|
|
||||||
try:
|
|
||||||
hosts = json.loads(hosts)
|
|
||||||
except ValueError:
|
|
||||||
hosts = [hosts]
|
|
||||||
else:
|
|
||||||
hosts = [settings['private-address']]
|
|
||||||
|
|
||||||
if hosts:
|
|
||||||
for host in hosts:
|
|
||||||
host = self.normalize_address(host)
|
|
||||||
if self.grant_exists(database, username, host):
|
|
||||||
log("Grant exists for host '%s' on db '%s'" %
|
|
||||||
(host, database), level=DEBUG)
|
|
||||||
if unit not in allowed_units:
|
|
||||||
allowed_units.add(unit)
|
|
||||||
else:
|
|
||||||
log("Grant does NOT exist for host '%s' on db '%s'" %
|
|
||||||
(host, database), level=DEBUG)
|
|
||||||
else:
|
|
||||||
log("No hosts found for grant check", level=INFO)
|
|
||||||
|
|
||||||
return allowed_units
|
|
||||||
|
|
||||||
def configure_db(self, hostname, database, username, admin=False):
|
|
||||||
"""Configure access to database for username from hostname."""
|
|
||||||
self.connect(password=self.get_mysql_root_password())
|
|
||||||
if not self.database_exists(database):
|
|
||||||
self.create_database(database)
|
|
||||||
|
|
||||||
remote_ip = self.normalize_address(hostname)
|
|
||||||
password = self.get_mysql_password(username)
|
|
||||||
if not self.grant_exists(database, username, remote_ip):
|
|
||||||
if not admin:
|
|
||||||
self.create_grant(database, username, remote_ip, password)
|
|
||||||
else:
|
|
||||||
self.create_admin_grant(username, remote_ip, password)
|
|
||||||
|
|
||||||
return password
|
|
||||||
|
|
||||||
|
|
||||||
class PerconaClusterHelper(object):
|
|
||||||
|
|
||||||
# Going for the biggest page size to avoid wasted bytes.
|
|
||||||
# InnoDB page size is 16MB
|
|
||||||
|
|
||||||
DEFAULT_PAGE_SIZE = 16 * 1024 * 1024
|
|
||||||
DEFAULT_INNODB_BUFFER_FACTOR = 0.50
|
|
||||||
|
|
||||||
def human_to_bytes(self, human):
|
|
||||||
"""Convert human readable configuration options to bytes."""
|
|
||||||
num_re = re.compile('^[0-9]+$')
|
|
||||||
if num_re.match(human):
|
|
||||||
return human
|
|
||||||
|
|
||||||
factors = {
|
|
||||||
'K': 1024,
|
|
||||||
'M': 1048576,
|
|
||||||
'G': 1073741824,
|
|
||||||
'T': 1099511627776
|
|
||||||
}
|
|
||||||
modifier = human[-1]
|
|
||||||
if modifier in factors:
|
|
||||||
return int(human[:-1]) * factors[modifier]
|
|
||||||
|
|
||||||
if modifier == '%':
|
|
||||||
total_ram = self.human_to_bytes(self.get_mem_total())
|
|
||||||
if self.is_32bit_system() and total_ram > self.sys_mem_limit():
|
|
||||||
total_ram = self.sys_mem_limit()
|
|
||||||
factor = int(human[:-1]) * 0.01
|
|
||||||
pctram = total_ram * factor
|
|
||||||
return int(pctram - (pctram % self.DEFAULT_PAGE_SIZE))
|
|
||||||
|
|
||||||
raise ValueError("Can only convert K,M,G, or T")
|
|
||||||
|
|
||||||
def is_32bit_system(self):
|
|
||||||
"""Determine whether system is 32 or 64 bit."""
|
|
||||||
try:
|
|
||||||
return sys.maxsize < 2 ** 32
|
|
||||||
except OverflowError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
def sys_mem_limit(self):
|
|
||||||
"""Determine the default memory limit for the current service unit."""
|
|
||||||
if platform.machine() in ['armv7l']:
|
|
||||||
_mem_limit = self.human_to_bytes('2700M') # experimentally determined
|
|
||||||
else:
|
|
||||||
# Limit for x86 based 32bit systems
|
|
||||||
_mem_limit = self.human_to_bytes('4G')
|
|
||||||
|
|
||||||
return _mem_limit
|
|
||||||
|
|
||||||
def get_mem_total(self):
|
|
||||||
"""Calculate the total memory in the current service unit."""
|
|
||||||
with open('/proc/meminfo') as meminfo_file:
|
|
||||||
for line in meminfo_file:
|
|
||||||
key, mem = line.split(':', 2)
|
|
||||||
if key == 'MemTotal':
|
|
||||||
mtot, modifier = mem.strip().split(' ')
|
|
||||||
return '%s%s' % (mtot, modifier[0].upper())
|
|
||||||
|
|
||||||
def parse_config(self):
|
|
||||||
"""Parse charm configuration and calculate values for config files."""
|
|
||||||
config = config_get()
|
|
||||||
mysql_config = {}
|
|
||||||
if 'max-connections' in config:
|
|
||||||
mysql_config['max_connections'] = config['max-connections']
|
|
||||||
|
|
||||||
if 'wait-timeout' in config:
|
|
||||||
mysql_config['wait_timeout'] = config['wait-timeout']
|
|
||||||
|
|
||||||
if 'innodb-flush-log-at-trx-commit' in config:
|
|
||||||
mysql_config['innodb_flush_log_at_trx_commit'] = config['innodb-flush-log-at-trx-commit']
|
|
||||||
|
|
||||||
# Set a sane default key_buffer size
|
|
||||||
mysql_config['key_buffer'] = self.human_to_bytes('32M')
|
|
||||||
total_memory = self.human_to_bytes(self.get_mem_total())
|
|
||||||
|
|
||||||
dataset_bytes = config.get('dataset-size', None)
|
|
||||||
innodb_buffer_pool_size = config.get('innodb-buffer-pool-size', None)
|
|
||||||
|
|
||||||
if innodb_buffer_pool_size:
|
|
||||||
innodb_buffer_pool_size = self.human_to_bytes(
|
|
||||||
innodb_buffer_pool_size)
|
|
||||||
elif dataset_bytes:
|
|
||||||
log("Option 'dataset-size' has been deprecated, please use"
|
|
||||||
"innodb_buffer_pool_size option instead", level="WARN")
|
|
||||||
innodb_buffer_pool_size = self.human_to_bytes(
|
|
||||||
dataset_bytes)
|
|
||||||
else:
|
|
||||||
innodb_buffer_pool_size = int(
|
|
||||||
total_memory * self.DEFAULT_INNODB_BUFFER_FACTOR)
|
|
||||||
|
|
||||||
if innodb_buffer_pool_size > total_memory:
|
|
||||||
log("innodb_buffer_pool_size; {} is greater than system available memory:{}".format(
|
|
||||||
innodb_buffer_pool_size,
|
|
||||||
total_memory), level='WARN')
|
|
||||||
|
|
||||||
mysql_config['innodb_buffer_pool_size'] = innodb_buffer_pool_size
|
|
||||||
return mysql_config
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1,82 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
#
|
|
||||||
# Copyright 2012 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# This file is sourced from lp:openstack-charm-helpers
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# James Page <james.page@ubuntu.com>
|
|
||||||
# Adam Gandelman <adamg@ubuntu.com>
|
|
||||||
#
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config as config_get,
|
|
||||||
relation_get,
|
|
||||||
relation_ids,
|
|
||||||
related_units as relation_list,
|
|
||||||
log,
|
|
||||||
INFO,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def get_cert(cn=None):
|
|
||||||
# TODO: deal with multiple https endpoints via charm config
|
|
||||||
cert = config_get('ssl_cert')
|
|
||||||
key = config_get('ssl_key')
|
|
||||||
if not (cert and key):
|
|
||||||
log("Inspecting identity-service relations for SSL certificate.",
|
|
||||||
level=INFO)
|
|
||||||
cert = key = None
|
|
||||||
if cn:
|
|
||||||
ssl_cert_attr = 'ssl_cert_{}'.format(cn)
|
|
||||||
ssl_key_attr = 'ssl_key_{}'.format(cn)
|
|
||||||
else:
|
|
||||||
ssl_cert_attr = 'ssl_cert'
|
|
||||||
ssl_key_attr = 'ssl_key'
|
|
||||||
for r_id in relation_ids('identity-service'):
|
|
||||||
for unit in relation_list(r_id):
|
|
||||||
if not cert:
|
|
||||||
cert = relation_get(ssl_cert_attr,
|
|
||||||
rid=r_id, unit=unit)
|
|
||||||
if not key:
|
|
||||||
key = relation_get(ssl_key_attr,
|
|
||||||
rid=r_id, unit=unit)
|
|
||||||
return (cert, key)
|
|
||||||
|
|
||||||
|
|
||||||
def get_ca_cert():
|
|
||||||
ca_cert = config_get('ssl_ca')
|
|
||||||
if ca_cert is None:
|
|
||||||
log("Inspecting identity-service relations for CA SSL certificate.",
|
|
||||||
level=INFO)
|
|
||||||
for r_id in relation_ids('identity-service'):
|
|
||||||
for unit in relation_list(r_id):
|
|
||||||
if ca_cert is None:
|
|
||||||
ca_cert = relation_get('ca_cert',
|
|
||||||
rid=r_id, unit=unit)
|
|
||||||
return ca_cert
|
|
||||||
|
|
||||||
|
|
||||||
def install_ca_cert(ca_cert):
|
|
||||||
if ca_cert:
|
|
||||||
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
|
|
||||||
'w') as crt:
|
|
||||||
crt.write(ca_cert)
|
|
||||||
subprocess.check_call(['update-ca-certificates', '--fresh'])
|
|
@ -1,316 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
#
|
|
||||||
# Copyright 2012 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# James Page <james.page@ubuntu.com>
|
|
||||||
# Adam Gandelman <adamg@ubuntu.com>
|
|
||||||
#
|
|
||||||
|
|
||||||
"""
|
|
||||||
Helpers for clustering and determining "cluster leadership" and other
|
|
||||||
clustering-related helpers.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import os
|
|
||||||
|
|
||||||
from socket import gethostname as get_unit_hostname
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
relation_ids,
|
|
||||||
related_units as relation_list,
|
|
||||||
relation_get,
|
|
||||||
config as config_get,
|
|
||||||
INFO,
|
|
||||||
ERROR,
|
|
||||||
WARNING,
|
|
||||||
unit_get,
|
|
||||||
is_leader as juju_is_leader
|
|
||||||
)
|
|
||||||
from charmhelpers.core.decorators import (
|
|
||||||
retry_on_exception,
|
|
||||||
)
|
|
||||||
from charmhelpers.core.strutils import (
|
|
||||||
bool_from_string,
|
|
||||||
)
|
|
||||||
|
|
||||||
DC_RESOURCE_NAME = 'DC'
|
|
||||||
|
|
||||||
|
|
||||||
class HAIncompleteConfig(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class CRMResourceNotFound(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class CRMDCNotFound(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def is_elected_leader(resource):
|
|
||||||
"""
|
|
||||||
Returns True if the charm executing this is the elected cluster leader.
|
|
||||||
|
|
||||||
It relies on two mechanisms to determine leadership:
|
|
||||||
1. If juju is sufficiently new and leadership election is supported,
|
|
||||||
the is_leader command will be used.
|
|
||||||
2. If the charm is part of a corosync cluster, call corosync to
|
|
||||||
determine leadership.
|
|
||||||
3. If the charm is not part of a corosync cluster, the leader is
|
|
||||||
determined as being "the alive unit with the lowest unit numer". In
|
|
||||||
other words, the oldest surviving unit.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
return juju_is_leader()
|
|
||||||
except NotImplementedError:
|
|
||||||
log('Juju leadership election feature not enabled'
|
|
||||||
', using fallback support',
|
|
||||||
level=WARNING)
|
|
||||||
|
|
||||||
if is_clustered():
|
|
||||||
if not is_crm_leader(resource):
|
|
||||||
log('Deferring action to CRM leader.', level=INFO)
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
peers = peer_units()
|
|
||||||
if peers and not oldest_peer(peers):
|
|
||||||
log('Deferring action to oldest service unit.', level=INFO)
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def is_clustered():
|
|
||||||
for r_id in (relation_ids('ha') or []):
|
|
||||||
for unit in (relation_list(r_id) or []):
|
|
||||||
clustered = relation_get('clustered',
|
|
||||||
rid=r_id,
|
|
||||||
unit=unit)
|
|
||||||
if clustered:
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def is_crm_dc():
|
|
||||||
"""
|
|
||||||
Determine leadership by querying the pacemaker Designated Controller
|
|
||||||
"""
|
|
||||||
cmd = ['crm', 'status']
|
|
||||||
try:
|
|
||||||
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
|
||||||
if not isinstance(status, six.text_type):
|
|
||||||
status = six.text_type(status, "utf-8")
|
|
||||||
except subprocess.CalledProcessError as ex:
|
|
||||||
raise CRMDCNotFound(str(ex))
|
|
||||||
|
|
||||||
current_dc = ''
|
|
||||||
for line in status.split('\n'):
|
|
||||||
if line.startswith('Current DC'):
|
|
||||||
# Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
|
|
||||||
current_dc = line.split(':')[1].split()[0]
|
|
||||||
if current_dc == get_unit_hostname():
|
|
||||||
return True
|
|
||||||
elif current_dc == 'NONE':
|
|
||||||
raise CRMDCNotFound('Current DC: NONE')
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
@retry_on_exception(5, base_delay=2,
|
|
||||||
exc_type=(CRMResourceNotFound, CRMDCNotFound))
|
|
||||||
def is_crm_leader(resource, retry=False):
|
|
||||||
"""
|
|
||||||
Returns True if the charm calling this is the elected corosync leader,
|
|
||||||
as returned by calling the external "crm" command.
|
|
||||||
|
|
||||||
We allow this operation to be retried to avoid the possibility of getting a
|
|
||||||
false negative. See LP #1396246 for more info.
|
|
||||||
"""
|
|
||||||
if resource == DC_RESOURCE_NAME:
|
|
||||||
return is_crm_dc()
|
|
||||||
cmd = ['crm', 'resource', 'show', resource]
|
|
||||||
try:
|
|
||||||
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
|
||||||
if not isinstance(status, six.text_type):
|
|
||||||
status = six.text_type(status, "utf-8")
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
status = None
|
|
||||||
|
|
||||||
if status and get_unit_hostname() in status:
|
|
||||||
return True
|
|
||||||
|
|
||||||
if status and "resource %s is NOT running" % (resource) in status:
|
|
||||||
raise CRMResourceNotFound("CRM resource %s not found" % (resource))
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def is_leader(resource):
|
|
||||||
log("is_leader is deprecated. Please consider using is_crm_leader "
|
|
||||||
"instead.", level=WARNING)
|
|
||||||
return is_crm_leader(resource)
|
|
||||||
|
|
||||||
|
|
||||||
def peer_units(peer_relation="cluster"):
|
|
||||||
peers = []
|
|
||||||
for r_id in (relation_ids(peer_relation) or []):
|
|
||||||
for unit in (relation_list(r_id) or []):
|
|
||||||
peers.append(unit)
|
|
||||||
return peers
|
|
||||||
|
|
||||||
|
|
||||||
def peer_ips(peer_relation='cluster', addr_key='private-address'):
|
|
||||||
'''Return a dict of peers and their private-address'''
|
|
||||||
peers = {}
|
|
||||||
for r_id in relation_ids(peer_relation):
|
|
||||||
for unit in relation_list(r_id):
|
|
||||||
peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
|
|
||||||
return peers
|
|
||||||
|
|
||||||
|
|
||||||
def oldest_peer(peers):
|
|
||||||
"""Determines who the oldest peer is by comparing unit numbers."""
|
|
||||||
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
|
|
||||||
for peer in peers:
|
|
||||||
remote_unit_no = int(peer.split('/')[1])
|
|
||||||
if remote_unit_no < local_unit_no:
|
|
||||||
return False
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def eligible_leader(resource):
|
|
||||||
log("eligible_leader is deprecated. Please consider using "
|
|
||||||
"is_elected_leader instead.", level=WARNING)
|
|
||||||
return is_elected_leader(resource)
|
|
||||||
|
|
||||||
|
|
||||||
def https():
|
|
||||||
'''
|
|
||||||
Determines whether enough data has been provided in configuration
|
|
||||||
or relation data to configure HTTPS
|
|
||||||
.
|
|
||||||
returns: boolean
|
|
||||||
'''
|
|
||||||
use_https = config_get('use-https')
|
|
||||||
if use_https and bool_from_string(use_https):
|
|
||||||
return True
|
|
||||||
if config_get('ssl_cert') and config_get('ssl_key'):
|
|
||||||
return True
|
|
||||||
for r_id in relation_ids('identity-service'):
|
|
||||||
for unit in relation_list(r_id):
|
|
||||||
# TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
|
|
||||||
rel_state = [
|
|
||||||
relation_get('https_keystone', rid=r_id, unit=unit),
|
|
||||||
relation_get('ca_cert', rid=r_id, unit=unit),
|
|
||||||
]
|
|
||||||
# NOTE: works around (LP: #1203241)
|
|
||||||
if (None not in rel_state) and ('' not in rel_state):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def determine_api_port(public_port, singlenode_mode=False):
|
|
||||||
'''
|
|
||||||
Determine correct API server listening port based on
|
|
||||||
existence of HTTPS reverse proxy and/or haproxy.
|
|
||||||
|
|
||||||
public_port: int: standard public port for given service
|
|
||||||
|
|
||||||
singlenode_mode: boolean: Shuffle ports when only a single unit is present
|
|
||||||
|
|
||||||
returns: int: the correct listening port for the API service
|
|
||||||
'''
|
|
||||||
i = 0
|
|
||||||
if singlenode_mode:
|
|
||||||
i += 1
|
|
||||||
elif len(peer_units()) > 0 or is_clustered():
|
|
||||||
i += 1
|
|
||||||
if https():
|
|
||||||
i += 1
|
|
||||||
return public_port - (i * 10)
|
|
||||||
|
|
||||||
|
|
||||||
def determine_apache_port(public_port, singlenode_mode=False):
|
|
||||||
'''
|
|
||||||
Description: Determine correct apache listening port based on public IP +
|
|
||||||
state of the cluster.
|
|
||||||
|
|
||||||
public_port: int: standard public port for given service
|
|
||||||
|
|
||||||
singlenode_mode: boolean: Shuffle ports when only a single unit is present
|
|
||||||
|
|
||||||
returns: int: the correct listening port for the HAProxy service
|
|
||||||
'''
|
|
||||||
i = 0
|
|
||||||
if singlenode_mode:
|
|
||||||
i += 1
|
|
||||||
elif len(peer_units()) > 0 or is_clustered():
|
|
||||||
i += 1
|
|
||||||
return public_port - (i * 10)
|
|
||||||
|
|
||||||
|
|
||||||
def get_hacluster_config(exclude_keys=None):
|
|
||||||
'''
|
|
||||||
Obtains all relevant configuration from charm configuration required
|
|
||||||
for initiating a relation to hacluster:
|
|
||||||
|
|
||||||
ha-bindiface, ha-mcastport, vip
|
|
||||||
|
|
||||||
param: exclude_keys: list of setting key(s) to be excluded.
|
|
||||||
returns: dict: A dict containing settings keyed by setting name.
|
|
||||||
raises: HAIncompleteConfig if settings are missing.
|
|
||||||
'''
|
|
||||||
settings = ['ha-bindiface', 'ha-mcastport', 'vip']
|
|
||||||
conf = {}
|
|
||||||
for setting in settings:
|
|
||||||
if exclude_keys and setting in exclude_keys:
|
|
||||||
continue
|
|
||||||
|
|
||||||
conf[setting] = config_get(setting)
|
|
||||||
missing = []
|
|
||||||
[missing.append(s) for s, v in six.iteritems(conf) if v is None]
|
|
||||||
if missing:
|
|
||||||
log('Insufficient config data to configure hacluster.', level=ERROR)
|
|
||||||
raise HAIncompleteConfig
|
|
||||||
return conf
|
|
||||||
|
|
||||||
|
|
||||||
def canonical_url(configs, vip_setting='vip'):
|
|
||||||
'''
|
|
||||||
Returns the correct HTTP URL to this host given the state of HTTPS
|
|
||||||
configuration and hacluster.
|
|
||||||
|
|
||||||
:configs : OSTemplateRenderer: A config tempating object to inspect for
|
|
||||||
a complete https context.
|
|
||||||
|
|
||||||
:vip_setting: str: Setting in charm config that specifies
|
|
||||||
VIP address.
|
|
||||||
'''
|
|
||||||
scheme = 'http'
|
|
||||||
if 'https' in configs.complete_contexts():
|
|
||||||
scheme = 'https'
|
|
||||||
if is_clustered():
|
|
||||||
addr = config_get(vip_setting)
|
|
||||||
else:
|
|
||||||
addr = unit_get('private-address')
|
|
||||||
return '%s://%s' % (scheme, addr)
|
|
@ -1,151 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# -*- coding: utf-8 -*-
|
|
||||||
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
|
|
||||||
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
|
||||||
|
|
||||||
from charmhelpers.fetch import (
|
|
||||||
apt_install,
|
|
||||||
apt_update,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
INFO,
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
from netifaces import interfaces as network_interfaces
|
|
||||||
except ImportError:
|
|
||||||
apt_install('python-netifaces')
|
|
||||||
from netifaces import interfaces as network_interfaces
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from charmhelpers.core.kernel import modprobe
|
|
||||||
|
|
||||||
REQUIRED_MODULES = (
|
|
||||||
"mlx4_ib",
|
|
||||||
"mlx4_en",
|
|
||||||
"mlx4_core",
|
|
||||||
"ib_ipath",
|
|
||||||
"ib_mthca",
|
|
||||||
"ib_srpt",
|
|
||||||
"ib_srp",
|
|
||||||
"ib_ucm",
|
|
||||||
"ib_isert",
|
|
||||||
"ib_iser",
|
|
||||||
"ib_ipoib",
|
|
||||||
"ib_cm",
|
|
||||||
"ib_uverbs"
|
|
||||||
"ib_umad",
|
|
||||||
"ib_sa",
|
|
||||||
"ib_mad",
|
|
||||||
"ib_core",
|
|
||||||
"ib_addr",
|
|
||||||
"rdma_ucm",
|
|
||||||
)
|
|
||||||
|
|
||||||
REQUIRED_PACKAGES = (
|
|
||||||
"ibutils",
|
|
||||||
"infiniband-diags",
|
|
||||||
"ibverbs-utils",
|
|
||||||
)
|
|
||||||
|
|
||||||
IPOIB_DRIVERS = (
|
|
||||||
"ib_ipoib",
|
|
||||||
)
|
|
||||||
|
|
||||||
ABI_VERSION_FILE = "/sys/class/infiniband_mad/abi_version"
|
|
||||||
|
|
||||||
|
|
||||||
class DeviceInfo(object):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def install_packages():
|
|
||||||
apt_update()
|
|
||||||
apt_install(REQUIRED_PACKAGES, fatal=True)
|
|
||||||
|
|
||||||
|
|
||||||
def load_modules():
|
|
||||||
for module in REQUIRED_MODULES:
|
|
||||||
modprobe(module, persist=True)
|
|
||||||
|
|
||||||
|
|
||||||
def is_enabled():
|
|
||||||
"""Check if infiniband is loaded on the system"""
|
|
||||||
return os.path.exists(ABI_VERSION_FILE)
|
|
||||||
|
|
||||||
|
|
||||||
def stat():
|
|
||||||
"""Return full output of ibstat"""
|
|
||||||
return subprocess.check_output(["ibstat"])
|
|
||||||
|
|
||||||
|
|
||||||
def devices():
|
|
||||||
"""Returns a list of IB enabled devices"""
|
|
||||||
return subprocess.check_output(['ibstat', '-l']).splitlines()
|
|
||||||
|
|
||||||
|
|
||||||
def device_info(device):
|
|
||||||
"""Returns a DeviceInfo object with the current device settings"""
|
|
||||||
|
|
||||||
status = subprocess.check_output([
|
|
||||||
'ibstat', device, '-s']).splitlines()
|
|
||||||
|
|
||||||
regexes = {
|
|
||||||
"CA type: (.*)": "device_type",
|
|
||||||
"Number of ports: (.*)": "num_ports",
|
|
||||||
"Firmware version: (.*)": "fw_ver",
|
|
||||||
"Hardware version: (.*)": "hw_ver",
|
|
||||||
"Node GUID: (.*)": "node_guid",
|
|
||||||
"System image GUID: (.*)": "sys_guid",
|
|
||||||
}
|
|
||||||
|
|
||||||
device = DeviceInfo()
|
|
||||||
|
|
||||||
for line in status:
|
|
||||||
for expression, key in regexes.items():
|
|
||||||
matches = re.search(expression, line)
|
|
||||||
if matches:
|
|
||||||
setattr(device, key, matches.group(1))
|
|
||||||
|
|
||||||
return device
|
|
||||||
|
|
||||||
|
|
||||||
def ipoib_interfaces():
|
|
||||||
"""Return a list of IPOIB capable ethernet interfaces"""
|
|
||||||
interfaces = []
|
|
||||||
|
|
||||||
for interface in network_interfaces():
|
|
||||||
try:
|
|
||||||
driver = re.search('^driver: (.+)$', subprocess.check_output([
|
|
||||||
'ethtool', '-i',
|
|
||||||
interface]), re.M).group(1)
|
|
||||||
|
|
||||||
if driver in IPOIB_DRIVERS:
|
|
||||||
interfaces.append(interface)
|
|
||||||
except:
|
|
||||||
log("Skipping interface %s" % interface, level=INFO)
|
|
||||||
continue
|
|
||||||
|
|
||||||
return interfaces
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1,456 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import glob
|
|
||||||
import re
|
|
||||||
import subprocess
|
|
||||||
import six
|
|
||||||
import socket
|
|
||||||
|
|
||||||
from functools import partial
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import unit_get
|
|
||||||
from charmhelpers.fetch import apt_install, apt_update
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
WARNING,
|
|
||||||
)
|
|
||||||
|
|
||||||
try:
|
|
||||||
import netifaces
|
|
||||||
except ImportError:
|
|
||||||
apt_update(fatal=True)
|
|
||||||
apt_install('python-netifaces', fatal=True)
|
|
||||||
import netifaces
|
|
||||||
|
|
||||||
try:
|
|
||||||
import netaddr
|
|
||||||
except ImportError:
|
|
||||||
apt_update(fatal=True)
|
|
||||||
apt_install('python-netaddr', fatal=True)
|
|
||||||
import netaddr
|
|
||||||
|
|
||||||
|
|
||||||
def _validate_cidr(network):
|
|
||||||
try:
|
|
||||||
netaddr.IPNetwork(network)
|
|
||||||
except (netaddr.core.AddrFormatError, ValueError):
|
|
||||||
raise ValueError("Network (%s) is not in CIDR presentation format" %
|
|
||||||
network)
|
|
||||||
|
|
||||||
|
|
||||||
def no_ip_found_error_out(network):
|
|
||||||
errmsg = ("No IP address found in network: %s" % network)
|
|
||||||
raise ValueError(errmsg)
|
|
||||||
|
|
||||||
|
|
||||||
def get_address_in_network(network, fallback=None, fatal=False):
|
|
||||||
"""Get an IPv4 or IPv6 address within the network from the host.
|
|
||||||
|
|
||||||
:param network (str): CIDR presentation format. For example,
|
|
||||||
'192.168.1.0/24'.
|
|
||||||
:param fallback (str): If no address is found, return fallback.
|
|
||||||
:param fatal (boolean): If no address is found, fallback is not
|
|
||||||
set and fatal is True then exit(1).
|
|
||||||
"""
|
|
||||||
if network is None:
|
|
||||||
if fallback is not None:
|
|
||||||
return fallback
|
|
||||||
|
|
||||||
if fatal:
|
|
||||||
no_ip_found_error_out(network)
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
_validate_cidr(network)
|
|
||||||
network = netaddr.IPNetwork(network)
|
|
||||||
for iface in netifaces.interfaces():
|
|
||||||
addresses = netifaces.ifaddresses(iface)
|
|
||||||
if network.version == 4 and netifaces.AF_INET in addresses:
|
|
||||||
addr = addresses[netifaces.AF_INET][0]['addr']
|
|
||||||
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
|
||||||
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
|
||||||
if cidr in network:
|
|
||||||
return str(cidr.ip)
|
|
||||||
|
|
||||||
if network.version == 6 and netifaces.AF_INET6 in addresses:
|
|
||||||
for addr in addresses[netifaces.AF_INET6]:
|
|
||||||
if not addr['addr'].startswith('fe80'):
|
|
||||||
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
|
||||||
addr['netmask']))
|
|
||||||
if cidr in network:
|
|
||||||
return str(cidr.ip)
|
|
||||||
|
|
||||||
if fallback is not None:
|
|
||||||
return fallback
|
|
||||||
|
|
||||||
if fatal:
|
|
||||||
no_ip_found_error_out(network)
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def is_ipv6(address):
|
|
||||||
"""Determine whether provided address is IPv6 or not."""
|
|
||||||
try:
|
|
||||||
address = netaddr.IPAddress(address)
|
|
||||||
except netaddr.AddrFormatError:
|
|
||||||
# probably a hostname - so not an address at all!
|
|
||||||
return False
|
|
||||||
|
|
||||||
return address.version == 6
|
|
||||||
|
|
||||||
|
|
||||||
def is_address_in_network(network, address):
|
|
||||||
"""
|
|
||||||
Determine whether the provided address is within a network range.
|
|
||||||
|
|
||||||
:param network (str): CIDR presentation format. For example,
|
|
||||||
'192.168.1.0/24'.
|
|
||||||
:param address: An individual IPv4 or IPv6 address without a net
|
|
||||||
mask or subnet prefix. For example, '192.168.1.1'.
|
|
||||||
:returns boolean: Flag indicating whether address is in network.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
network = netaddr.IPNetwork(network)
|
|
||||||
except (netaddr.core.AddrFormatError, ValueError):
|
|
||||||
raise ValueError("Network (%s) is not in CIDR presentation format" %
|
|
||||||
network)
|
|
||||||
|
|
||||||
try:
|
|
||||||
address = netaddr.IPAddress(address)
|
|
||||||
except (netaddr.core.AddrFormatError, ValueError):
|
|
||||||
raise ValueError("Address (%s) is not in correct presentation format" %
|
|
||||||
address)
|
|
||||||
|
|
||||||
if address in network:
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def _get_for_address(address, key):
|
|
||||||
"""Retrieve an attribute of or the physical interface that
|
|
||||||
the IP address provided could be bound to.
|
|
||||||
|
|
||||||
:param address (str): An individual IPv4 or IPv6 address without a net
|
|
||||||
mask or subnet prefix. For example, '192.168.1.1'.
|
|
||||||
:param key: 'iface' for the physical interface name or an attribute
|
|
||||||
of the configured interface, for example 'netmask'.
|
|
||||||
:returns str: Requested attribute or None if address is not bindable.
|
|
||||||
"""
|
|
||||||
address = netaddr.IPAddress(address)
|
|
||||||
for iface in netifaces.interfaces():
|
|
||||||
addresses = netifaces.ifaddresses(iface)
|
|
||||||
if address.version == 4 and netifaces.AF_INET in addresses:
|
|
||||||
addr = addresses[netifaces.AF_INET][0]['addr']
|
|
||||||
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
|
||||||
network = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
|
||||||
cidr = network.cidr
|
|
||||||
if address in cidr:
|
|
||||||
if key == 'iface':
|
|
||||||
return iface
|
|
||||||
else:
|
|
||||||
return addresses[netifaces.AF_INET][0][key]
|
|
||||||
|
|
||||||
if address.version == 6 and netifaces.AF_INET6 in addresses:
|
|
||||||
for addr in addresses[netifaces.AF_INET6]:
|
|
||||||
if not addr['addr'].startswith('fe80'):
|
|
||||||
network = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
|
||||||
addr['netmask']))
|
|
||||||
cidr = network.cidr
|
|
||||||
if address in cidr:
|
|
||||||
if key == 'iface':
|
|
||||||
return iface
|
|
||||||
elif key == 'netmask' and cidr:
|
|
||||||
return str(cidr).split('/')[1]
|
|
||||||
else:
|
|
||||||
return addr[key]
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
get_iface_for_address = partial(_get_for_address, key='iface')
|
|
||||||
|
|
||||||
|
|
||||||
get_netmask_for_address = partial(_get_for_address, key='netmask')
|
|
||||||
|
|
||||||
|
|
||||||
def format_ipv6_addr(address):
|
|
||||||
"""If address is IPv6, wrap it in '[]' otherwise return None.
|
|
||||||
|
|
||||||
This is required by most configuration files when specifying IPv6
|
|
||||||
addresses.
|
|
||||||
"""
|
|
||||||
if is_ipv6(address):
|
|
||||||
return "[%s]" % address
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
|
|
||||||
fatal=True, exc_list=None):
|
|
||||||
"""Return the assigned IP address for a given interface, if any."""
|
|
||||||
# Extract nic if passed /dev/ethX
|
|
||||||
if '/' in iface:
|
|
||||||
iface = iface.split('/')[-1]
|
|
||||||
|
|
||||||
if not exc_list:
|
|
||||||
exc_list = []
|
|
||||||
|
|
||||||
try:
|
|
||||||
inet_num = getattr(netifaces, inet_type)
|
|
||||||
except AttributeError:
|
|
||||||
raise Exception("Unknown inet type '%s'" % str(inet_type))
|
|
||||||
|
|
||||||
interfaces = netifaces.interfaces()
|
|
||||||
if inc_aliases:
|
|
||||||
ifaces = []
|
|
||||||
for _iface in interfaces:
|
|
||||||
if iface == _iface or _iface.split(':')[0] == iface:
|
|
||||||
ifaces.append(_iface)
|
|
||||||
|
|
||||||
if fatal and not ifaces:
|
|
||||||
raise Exception("Invalid interface '%s'" % iface)
|
|
||||||
|
|
||||||
ifaces.sort()
|
|
||||||
else:
|
|
||||||
if iface not in interfaces:
|
|
||||||
if fatal:
|
|
||||||
raise Exception("Interface '%s' not found " % (iface))
|
|
||||||
else:
|
|
||||||
return []
|
|
||||||
|
|
||||||
else:
|
|
||||||
ifaces = [iface]
|
|
||||||
|
|
||||||
addresses = []
|
|
||||||
for netiface in ifaces:
|
|
||||||
net_info = netifaces.ifaddresses(netiface)
|
|
||||||
if inet_num in net_info:
|
|
||||||
for entry in net_info[inet_num]:
|
|
||||||
if 'addr' in entry and entry['addr'] not in exc_list:
|
|
||||||
addresses.append(entry['addr'])
|
|
||||||
|
|
||||||
if fatal and not addresses:
|
|
||||||
raise Exception("Interface '%s' doesn't have any %s addresses." %
|
|
||||||
(iface, inet_type))
|
|
||||||
|
|
||||||
return sorted(addresses)
|
|
||||||
|
|
||||||
|
|
||||||
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
|
|
||||||
|
|
||||||
|
|
||||||
def get_iface_from_addr(addr):
|
|
||||||
"""Work out on which interface the provided address is configured."""
|
|
||||||
for iface in netifaces.interfaces():
|
|
||||||
addresses = netifaces.ifaddresses(iface)
|
|
||||||
for inet_type in addresses:
|
|
||||||
for _addr in addresses[inet_type]:
|
|
||||||
_addr = _addr['addr']
|
|
||||||
# link local
|
|
||||||
ll_key = re.compile("(.+)%.*")
|
|
||||||
raw = re.match(ll_key, _addr)
|
|
||||||
if raw:
|
|
||||||
_addr = raw.group(1)
|
|
||||||
|
|
||||||
if _addr == addr:
|
|
||||||
log("Address '%s' is configured on iface '%s'" %
|
|
||||||
(addr, iface))
|
|
||||||
return iface
|
|
||||||
|
|
||||||
msg = "Unable to infer net iface on which '%s' is configured" % (addr)
|
|
||||||
raise Exception(msg)
|
|
||||||
|
|
||||||
|
|
||||||
def sniff_iface(f):
|
|
||||||
"""Ensure decorated function is called with a value for iface.
|
|
||||||
|
|
||||||
If no iface provided, inject net iface inferred from unit private address.
|
|
||||||
"""
|
|
||||||
def iface_sniffer(*args, **kwargs):
|
|
||||||
if not kwargs.get('iface', None):
|
|
||||||
kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
|
|
||||||
|
|
||||||
return f(*args, **kwargs)
|
|
||||||
|
|
||||||
return iface_sniffer
|
|
||||||
|
|
||||||
|
|
||||||
@sniff_iface
|
|
||||||
def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
|
|
||||||
dynamic_only=True):
|
|
||||||
"""Get assigned IPv6 address for a given interface.
|
|
||||||
|
|
||||||
Returns list of addresses found. If no address found, returns empty list.
|
|
||||||
|
|
||||||
If iface is None, we infer the current primary interface by doing a reverse
|
|
||||||
lookup on the unit private-address.
|
|
||||||
|
|
||||||
We currently only support scope global IPv6 addresses i.e. non-temporary
|
|
||||||
addresses. If no global IPv6 address is found, return the first one found
|
|
||||||
in the ipv6 address list.
|
|
||||||
"""
|
|
||||||
addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
|
|
||||||
inc_aliases=inc_aliases, fatal=fatal,
|
|
||||||
exc_list=exc_list)
|
|
||||||
|
|
||||||
if addresses:
|
|
||||||
global_addrs = []
|
|
||||||
for addr in addresses:
|
|
||||||
key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
|
|
||||||
m = re.match(key_scope_link_local, addr)
|
|
||||||
if m:
|
|
||||||
eui_64_mac = m.group(1)
|
|
||||||
iface = m.group(2)
|
|
||||||
else:
|
|
||||||
global_addrs.append(addr)
|
|
||||||
|
|
||||||
if global_addrs:
|
|
||||||
# Make sure any found global addresses are not temporary
|
|
||||||
cmd = ['ip', 'addr', 'show', iface]
|
|
||||||
out = subprocess.check_output(cmd).decode('UTF-8')
|
|
||||||
if dynamic_only:
|
|
||||||
key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
|
|
||||||
else:
|
|
||||||
key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
|
|
||||||
|
|
||||||
addrs = []
|
|
||||||
for line in out.split('\n'):
|
|
||||||
line = line.strip()
|
|
||||||
m = re.match(key, line)
|
|
||||||
if m and 'temporary' not in line:
|
|
||||||
# Return the first valid address we find
|
|
||||||
for addr in global_addrs:
|
|
||||||
if m.group(1) == addr:
|
|
||||||
if not dynamic_only or \
|
|
||||||
m.group(1).endswith(eui_64_mac):
|
|
||||||
addrs.append(addr)
|
|
||||||
|
|
||||||
if addrs:
|
|
||||||
return addrs
|
|
||||||
|
|
||||||
if fatal:
|
|
||||||
raise Exception("Interface '%s' does not have a scope global "
|
|
||||||
"non-temporary ipv6 address." % iface)
|
|
||||||
|
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
def get_bridges(vnic_dir='/sys/devices/virtual/net'):
|
|
||||||
"""Return a list of bridges on the system."""
|
|
||||||
b_regex = "%s/*/bridge" % vnic_dir
|
|
||||||
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_regex)]
|
|
||||||
|
|
||||||
|
|
||||||
def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
|
|
||||||
"""Return a list of nics comprising a given bridge on the system."""
|
|
||||||
brif_regex = "%s/%s/brif/*" % (vnic_dir, bridge)
|
|
||||||
return [x.split('/')[-1] for x in glob.glob(brif_regex)]
|
|
||||||
|
|
||||||
|
|
||||||
def is_bridge_member(nic):
|
|
||||||
"""Check if a given nic is a member of a bridge."""
|
|
||||||
for bridge in get_bridges():
|
|
||||||
if nic in get_bridge_nics(bridge):
|
|
||||||
return True
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def is_ip(address):
|
|
||||||
"""
|
|
||||||
Returns True if address is a valid IP address.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
# Test to see if already an IPv4 address
|
|
||||||
socket.inet_aton(address)
|
|
||||||
return True
|
|
||||||
except socket.error:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def ns_query(address):
|
|
||||||
try:
|
|
||||||
import dns.resolver
|
|
||||||
except ImportError:
|
|
||||||
apt_install('python-dnspython')
|
|
||||||
import dns.resolver
|
|
||||||
|
|
||||||
if isinstance(address, dns.name.Name):
|
|
||||||
rtype = 'PTR'
|
|
||||||
elif isinstance(address, six.string_types):
|
|
||||||
rtype = 'A'
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
answers = dns.resolver.query(address, rtype)
|
|
||||||
if answers:
|
|
||||||
return str(answers[0])
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def get_host_ip(hostname, fallback=None):
|
|
||||||
"""
|
|
||||||
Resolves the IP for a given hostname, or returns
|
|
||||||
the input if it is already an IP.
|
|
||||||
"""
|
|
||||||
if is_ip(hostname):
|
|
||||||
return hostname
|
|
||||||
|
|
||||||
ip_addr = ns_query(hostname)
|
|
||||||
if not ip_addr:
|
|
||||||
try:
|
|
||||||
ip_addr = socket.gethostbyname(hostname)
|
|
||||||
except:
|
|
||||||
log("Failed to resolve hostname '%s'" % (hostname),
|
|
||||||
level=WARNING)
|
|
||||||
return fallback
|
|
||||||
return ip_addr
|
|
||||||
|
|
||||||
|
|
||||||
def get_hostname(address, fqdn=True):
|
|
||||||
"""
|
|
||||||
Resolves hostname for given IP, or returns the input
|
|
||||||
if it is already a hostname.
|
|
||||||
"""
|
|
||||||
if is_ip(address):
|
|
||||||
try:
|
|
||||||
import dns.reversename
|
|
||||||
except ImportError:
|
|
||||||
apt_install("python-dnspython")
|
|
||||||
import dns.reversename
|
|
||||||
|
|
||||||
rev = dns.reversename.from_address(address)
|
|
||||||
result = ns_query(rev)
|
|
||||||
|
|
||||||
if not result:
|
|
||||||
try:
|
|
||||||
result = socket.gethostbyaddr(address)[0]
|
|
||||||
except:
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
result = address
|
|
||||||
|
|
||||||
if fqdn:
|
|
||||||
# strip trailing .
|
|
||||||
if result.endswith('.'):
|
|
||||||
return result[:-1]
|
|
||||||
else:
|
|
||||||
return result
|
|
||||||
else:
|
|
||||||
return result.split('.')[0]
|
|
@ -1,96 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
''' Helpers for interacting with OpenvSwitch '''
|
|
||||||
import subprocess
|
|
||||||
import os
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log, WARNING
|
|
||||||
)
|
|
||||||
from charmhelpers.core.host import (
|
|
||||||
service
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def add_bridge(name):
|
|
||||||
''' Add the named bridge to openvswitch '''
|
|
||||||
log('Creating bridge {}'.format(name))
|
|
||||||
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-br", name])
|
|
||||||
|
|
||||||
|
|
||||||
def del_bridge(name):
|
|
||||||
''' Delete the named bridge from openvswitch '''
|
|
||||||
log('Deleting bridge {}'.format(name))
|
|
||||||
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-br", name])
|
|
||||||
|
|
||||||
|
|
||||||
def add_bridge_port(name, port, promisc=False):
|
|
||||||
''' Add a port to the named openvswitch bridge '''
|
|
||||||
log('Adding port {} to bridge {}'.format(port, name))
|
|
||||||
subprocess.check_call(["ovs-vsctl", "--", "--may-exist", "add-port",
|
|
||||||
name, port])
|
|
||||||
subprocess.check_call(["ip", "link", "set", port, "up"])
|
|
||||||
if promisc:
|
|
||||||
subprocess.check_call(["ip", "link", "set", port, "promisc", "on"])
|
|
||||||
else:
|
|
||||||
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
|
|
||||||
|
|
||||||
|
|
||||||
def del_bridge_port(name, port):
|
|
||||||
''' Delete a port from the named openvswitch bridge '''
|
|
||||||
log('Deleting port {} from bridge {}'.format(port, name))
|
|
||||||
subprocess.check_call(["ovs-vsctl", "--", "--if-exists", "del-port",
|
|
||||||
name, port])
|
|
||||||
subprocess.check_call(["ip", "link", "set", port, "down"])
|
|
||||||
subprocess.check_call(["ip", "link", "set", port, "promisc", "off"])
|
|
||||||
|
|
||||||
|
|
||||||
def set_manager(manager):
|
|
||||||
''' Set the controller for the local openvswitch '''
|
|
||||||
log('Setting manager for local ovs to {}'.format(manager))
|
|
||||||
subprocess.check_call(['ovs-vsctl', 'set-manager',
|
|
||||||
'ssl:{}'.format(manager)])
|
|
||||||
|
|
||||||
|
|
||||||
CERT_PATH = '/etc/openvswitch/ovsclient-cert.pem'
|
|
||||||
|
|
||||||
|
|
||||||
def get_certificate():
|
|
||||||
''' Read openvswitch certificate from disk '''
|
|
||||||
if os.path.exists(CERT_PATH):
|
|
||||||
log('Reading ovs certificate from {}'.format(CERT_PATH))
|
|
||||||
with open(CERT_PATH, 'r') as cert:
|
|
||||||
full_cert = cert.read()
|
|
||||||
begin_marker = "-----BEGIN CERTIFICATE-----"
|
|
||||||
end_marker = "-----END CERTIFICATE-----"
|
|
||||||
begin_index = full_cert.find(begin_marker)
|
|
||||||
end_index = full_cert.rfind(end_marker)
|
|
||||||
if end_index == -1 or begin_index == -1:
|
|
||||||
raise RuntimeError("Certificate does not contain valid begin"
|
|
||||||
" and end markers.")
|
|
||||||
full_cert = full_cert[begin_index:(end_index + len(end_marker))]
|
|
||||||
return full_cert
|
|
||||||
else:
|
|
||||||
log('Certificate not found', level=WARNING)
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def full_restart():
|
|
||||||
''' Full restart and reload of openvswitch '''
|
|
||||||
if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
|
|
||||||
service('start', 'openvswitch-force-reload-kmod')
|
|
||||||
else:
|
|
||||||
service('force-reload-kmod', 'openvswitch-switch')
|
|
@ -1,318 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"""
|
|
||||||
This module contains helpers to add and remove ufw rules.
|
|
||||||
|
|
||||||
Examples:
|
|
||||||
|
|
||||||
- open SSH port for subnet 10.0.3.0/24:
|
|
||||||
|
|
||||||
>>> from charmhelpers.contrib.network import ufw
|
|
||||||
>>> ufw.enable()
|
|
||||||
>>> ufw.grant_access(src='10.0.3.0/24', dst='any', port='22', proto='tcp')
|
|
||||||
|
|
||||||
- open service by name as defined in /etc/services:
|
|
||||||
|
|
||||||
>>> from charmhelpers.contrib.network import ufw
|
|
||||||
>>> ufw.enable()
|
|
||||||
>>> ufw.service('ssh', 'open')
|
|
||||||
|
|
||||||
- close service by port number:
|
|
||||||
|
|
||||||
>>> from charmhelpers.contrib.network import ufw
|
|
||||||
>>> ufw.enable()
|
|
||||||
>>> ufw.service('4949', 'close') # munin
|
|
||||||
"""
|
|
||||||
import re
|
|
||||||
import os
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
from charmhelpers.core.kernel import modprobe, is_module_loaded
|
|
||||||
|
|
||||||
__author__ = "Felipe Reyes <felipe.reyes@canonical.com>"
|
|
||||||
|
|
||||||
|
|
||||||
class UFWError(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class UFWIPv6Error(UFWError):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def is_enabled():
|
|
||||||
"""
|
|
||||||
Check if `ufw` is enabled
|
|
||||||
|
|
||||||
:returns: True if ufw is enabled
|
|
||||||
"""
|
|
||||||
output = subprocess.check_output(['ufw', 'status'],
|
|
||||||
universal_newlines=True,
|
|
||||||
env={'LANG': 'en_US',
|
|
||||||
'PATH': os.environ['PATH']})
|
|
||||||
|
|
||||||
m = re.findall(r'^Status: active\n', output, re.M)
|
|
||||||
|
|
||||||
return len(m) >= 1
|
|
||||||
|
|
||||||
|
|
||||||
def is_ipv6_ok(soft_fail=False):
|
|
||||||
"""
|
|
||||||
Check if IPv6 support is present and ip6tables functional
|
|
||||||
|
|
||||||
:param soft_fail: If set to True and IPv6 support is broken, then reports
|
|
||||||
that the host doesn't have IPv6 support, otherwise a
|
|
||||||
UFWIPv6Error exception is raised.
|
|
||||||
:returns: True if IPv6 is working, False otherwise
|
|
||||||
"""
|
|
||||||
|
|
||||||
# do we have IPv6 in the machine?
|
|
||||||
if os.path.isdir('/proc/sys/net/ipv6'):
|
|
||||||
# is ip6tables kernel module loaded?
|
|
||||||
if not is_module_loaded('ip6_tables'):
|
|
||||||
# ip6tables support isn't complete, let's try to load it
|
|
||||||
try:
|
|
||||||
modprobe('ip6_tables')
|
|
||||||
# great, we can load the module
|
|
||||||
return True
|
|
||||||
except subprocess.CalledProcessError as ex:
|
|
||||||
hookenv.log("Couldn't load ip6_tables module: %s" % ex.output,
|
|
||||||
level="WARN")
|
|
||||||
# we are in a world where ip6tables isn't working
|
|
||||||
if soft_fail:
|
|
||||||
# so we inform that the machine doesn't have IPv6
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
raise UFWIPv6Error("IPv6 firewall support broken")
|
|
||||||
else:
|
|
||||||
# the module is present :)
|
|
||||||
return True
|
|
||||||
|
|
||||||
else:
|
|
||||||
# the system doesn't have IPv6
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def disable_ipv6():
|
|
||||||
"""
|
|
||||||
Disable ufw IPv6 support in /etc/default/ufw
|
|
||||||
"""
|
|
||||||
exit_code = subprocess.call(['sed', '-i', 's/IPV6=.*/IPV6=no/g',
|
|
||||||
'/etc/default/ufw'])
|
|
||||||
if exit_code == 0:
|
|
||||||
hookenv.log('IPv6 support in ufw disabled', level='INFO')
|
|
||||||
else:
|
|
||||||
hookenv.log("Couldn't disable IPv6 support in ufw", level="ERROR")
|
|
||||||
raise UFWError("Couldn't disable IPv6 support in ufw")
|
|
||||||
|
|
||||||
|
|
||||||
def enable(soft_fail=False):
|
|
||||||
"""
|
|
||||||
Enable ufw
|
|
||||||
|
|
||||||
:param soft_fail: If set to True silently disables IPv6 support in ufw,
|
|
||||||
otherwise a UFWIPv6Error exception is raised when IP6
|
|
||||||
support is broken.
|
|
||||||
:returns: True if ufw is successfully enabled
|
|
||||||
"""
|
|
||||||
if is_enabled():
|
|
||||||
return True
|
|
||||||
|
|
||||||
if not is_ipv6_ok(soft_fail):
|
|
||||||
disable_ipv6()
|
|
||||||
|
|
||||||
output = subprocess.check_output(['ufw', 'enable'],
|
|
||||||
universal_newlines=True,
|
|
||||||
env={'LANG': 'en_US',
|
|
||||||
'PATH': os.environ['PATH']})
|
|
||||||
|
|
||||||
m = re.findall('^Firewall is active and enabled on system startup\n',
|
|
||||||
output, re.M)
|
|
||||||
hookenv.log(output, level='DEBUG')
|
|
||||||
|
|
||||||
if len(m) == 0:
|
|
||||||
hookenv.log("ufw couldn't be enabled", level='WARN')
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
hookenv.log("ufw enabled", level='INFO')
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def disable():
|
|
||||||
"""
|
|
||||||
Disable ufw
|
|
||||||
|
|
||||||
:returns: True if ufw is successfully disabled
|
|
||||||
"""
|
|
||||||
if not is_enabled():
|
|
||||||
return True
|
|
||||||
|
|
||||||
output = subprocess.check_output(['ufw', 'disable'],
|
|
||||||
universal_newlines=True,
|
|
||||||
env={'LANG': 'en_US',
|
|
||||||
'PATH': os.environ['PATH']})
|
|
||||||
|
|
||||||
m = re.findall(r'^Firewall stopped and disabled on system startup\n',
|
|
||||||
output, re.M)
|
|
||||||
hookenv.log(output, level='DEBUG')
|
|
||||||
|
|
||||||
if len(m) == 0:
|
|
||||||
hookenv.log("ufw couldn't be disabled", level='WARN')
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
hookenv.log("ufw disabled", level='INFO')
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def default_policy(policy='deny', direction='incoming'):
|
|
||||||
"""
|
|
||||||
Changes the default policy for traffic `direction`
|
|
||||||
|
|
||||||
:param policy: allow, deny or reject
|
|
||||||
:param direction: traffic direction, possible values: incoming, outgoing,
|
|
||||||
routed
|
|
||||||
"""
|
|
||||||
if policy not in ['allow', 'deny', 'reject']:
|
|
||||||
raise UFWError(('Unknown policy %s, valid values: '
|
|
||||||
'allow, deny, reject') % policy)
|
|
||||||
|
|
||||||
if direction not in ['incoming', 'outgoing', 'routed']:
|
|
||||||
raise UFWError(('Unknown direction %s, valid values: '
|
|
||||||
'incoming, outgoing, routed') % direction)
|
|
||||||
|
|
||||||
output = subprocess.check_output(['ufw', 'default', policy, direction],
|
|
||||||
universal_newlines=True,
|
|
||||||
env={'LANG': 'en_US',
|
|
||||||
'PATH': os.environ['PATH']})
|
|
||||||
hookenv.log(output, level='DEBUG')
|
|
||||||
|
|
||||||
m = re.findall("^Default %s policy changed to '%s'\n" % (direction,
|
|
||||||
policy),
|
|
||||||
output, re.M)
|
|
||||||
if len(m) == 0:
|
|
||||||
hookenv.log("ufw couldn't change the default policy to %s for %s"
|
|
||||||
% (policy, direction), level='WARN')
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
hookenv.log("ufw default policy for %s changed to %s"
|
|
||||||
% (direction, policy), level='INFO')
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
def modify_access(src, dst='any', port=None, proto=None, action='allow',
|
|
||||||
index=None):
|
|
||||||
"""
|
|
||||||
Grant access to an address or subnet
|
|
||||||
|
|
||||||
:param src: address (e.g. 192.168.1.234) or subnet
|
|
||||||
(e.g. 192.168.1.0/24).
|
|
||||||
:param dst: destiny of the connection, if the machine has multiple IPs and
|
|
||||||
connections to only one of those have to accepted this is the
|
|
||||||
field has to be set.
|
|
||||||
:param port: destiny port
|
|
||||||
:param proto: protocol (tcp or udp)
|
|
||||||
:param action: `allow` or `delete`
|
|
||||||
:param index: if different from None the rule is inserted at the given
|
|
||||||
`index`.
|
|
||||||
"""
|
|
||||||
if not is_enabled():
|
|
||||||
hookenv.log('ufw is disabled, skipping modify_access()', level='WARN')
|
|
||||||
return
|
|
||||||
|
|
||||||
if action == 'delete':
|
|
||||||
cmd = ['ufw', 'delete', 'allow']
|
|
||||||
elif index is not None:
|
|
||||||
cmd = ['ufw', 'insert', str(index), action]
|
|
||||||
else:
|
|
||||||
cmd = ['ufw', action]
|
|
||||||
|
|
||||||
if src is not None:
|
|
||||||
cmd += ['from', src]
|
|
||||||
|
|
||||||
if dst is not None:
|
|
||||||
cmd += ['to', dst]
|
|
||||||
|
|
||||||
if port is not None:
|
|
||||||
cmd += ['port', str(port)]
|
|
||||||
|
|
||||||
if proto is not None:
|
|
||||||
cmd += ['proto', proto]
|
|
||||||
|
|
||||||
hookenv.log('ufw {}: {}'.format(action, ' '.join(cmd)), level='DEBUG')
|
|
||||||
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
|
|
||||||
(stdout, stderr) = p.communicate()
|
|
||||||
|
|
||||||
hookenv.log(stdout, level='INFO')
|
|
||||||
|
|
||||||
if p.returncode != 0:
|
|
||||||
hookenv.log(stderr, level='ERROR')
|
|
||||||
hookenv.log('Error running: {}, exit code: {}'.format(' '.join(cmd),
|
|
||||||
p.returncode),
|
|
||||||
level='ERROR')
|
|
||||||
|
|
||||||
|
|
||||||
def grant_access(src, dst='any', port=None, proto=None, index=None):
|
|
||||||
"""
|
|
||||||
Grant access to an address or subnet
|
|
||||||
|
|
||||||
:param src: address (e.g. 192.168.1.234) or subnet
|
|
||||||
(e.g. 192.168.1.0/24).
|
|
||||||
:param dst: destiny of the connection, if the machine has multiple IPs and
|
|
||||||
connections to only one of those have to accepted this is the
|
|
||||||
field has to be set.
|
|
||||||
:param port: destiny port
|
|
||||||
:param proto: protocol (tcp or udp)
|
|
||||||
:param index: if different from None the rule is inserted at the given
|
|
||||||
`index`.
|
|
||||||
"""
|
|
||||||
return modify_access(src, dst=dst, port=port, proto=proto, action='allow',
|
|
||||||
index=index)
|
|
||||||
|
|
||||||
|
|
||||||
def revoke_access(src, dst='any', port=None, proto=None):
|
|
||||||
"""
|
|
||||||
Revoke access to an address or subnet
|
|
||||||
|
|
||||||
:param src: address (e.g. 192.168.1.234) or subnet
|
|
||||||
(e.g. 192.168.1.0/24).
|
|
||||||
:param dst: destiny of the connection, if the machine has multiple IPs and
|
|
||||||
connections to only one of those have to accepted this is the
|
|
||||||
field has to be set.
|
|
||||||
:param port: destiny port
|
|
||||||
:param proto: protocol (tcp or udp)
|
|
||||||
"""
|
|
||||||
return modify_access(src, dst=dst, port=port, proto=proto, action='delete')
|
|
||||||
|
|
||||||
|
|
||||||
def service(name, action):
|
|
||||||
"""
|
|
||||||
Open/close access to a service
|
|
||||||
|
|
||||||
:param name: could be a service name defined in `/etc/services` or a port
|
|
||||||
number.
|
|
||||||
:param action: `open` or `close`
|
|
||||||
"""
|
|
||||||
if action == 'open':
|
|
||||||
subprocess.check_output(['ufw', 'allow', str(name)],
|
|
||||||
universal_newlines=True)
|
|
||||||
elif action == 'close':
|
|
||||||
subprocess.check_output(['ufw', 'delete', 'allow', str(name)],
|
|
||||||
universal_newlines=True)
|
|
||||||
else:
|
|
||||||
raise UFWError(("'{}' not supported, use 'allow' "
|
|
||||||
"or 'delete'").format(action))
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1,33 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
''' Helper for managing alternatives for file conflict resolution '''
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import shutil
|
|
||||||
import os
|
|
||||||
|
|
||||||
|
|
||||||
def install_alternative(name, target, source, priority=50):
|
|
||||||
''' Install alternative configuration '''
|
|
||||||
if (os.path.exists(target) and not os.path.islink(target)):
|
|
||||||
# Move existing file/directory away before installing
|
|
||||||
shutil.move(target, '{}.bak'.format(target))
|
|
||||||
cmd = [
|
|
||||||
'update-alternatives', '--force', '--install',
|
|
||||||
target, name, source, str(priority)
|
|
||||||
]
|
|
||||||
subprocess.check_call(cmd)
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1,297 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import logging
|
|
||||||
import re
|
|
||||||
import sys
|
|
||||||
import six
|
|
||||||
from collections import OrderedDict
|
|
||||||
from charmhelpers.contrib.amulet.deployment import (
|
|
||||||
AmuletDeployment
|
|
||||||
)
|
|
||||||
|
|
||||||
DEBUG = logging.DEBUG
|
|
||||||
ERROR = logging.ERROR
|
|
||||||
|
|
||||||
|
|
||||||
class OpenStackAmuletDeployment(AmuletDeployment):
|
|
||||||
"""OpenStack amulet deployment.
|
|
||||||
|
|
||||||
This class inherits from AmuletDeployment and has additional support
|
|
||||||
that is specifically for use by OpenStack charms.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, series=None, openstack=None, source=None,
|
|
||||||
stable=True, log_level=DEBUG):
|
|
||||||
"""Initialize the deployment environment."""
|
|
||||||
super(OpenStackAmuletDeployment, self).__init__(series)
|
|
||||||
self.log = self.get_logger(level=log_level)
|
|
||||||
self.log.info('OpenStackAmuletDeployment: init')
|
|
||||||
self.openstack = openstack
|
|
||||||
self.source = source
|
|
||||||
self.stable = stable
|
|
||||||
# Note(coreycb): this needs to be changed when new next branches come
|
|
||||||
# out.
|
|
||||||
self.current_next = "trusty"
|
|
||||||
|
|
||||||
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
|
|
||||||
"""Get a logger object that will log to stdout."""
|
|
||||||
log = logging
|
|
||||||
logger = log.getLogger(name)
|
|
||||||
fmt = log.Formatter("%(asctime)s %(funcName)s "
|
|
||||||
"%(levelname)s: %(message)s")
|
|
||||||
|
|
||||||
handler = log.StreamHandler(stream=sys.stdout)
|
|
||||||
handler.setLevel(level)
|
|
||||||
handler.setFormatter(fmt)
|
|
||||||
|
|
||||||
logger.addHandler(handler)
|
|
||||||
logger.setLevel(level)
|
|
||||||
|
|
||||||
return logger
|
|
||||||
|
|
||||||
def _determine_branch_locations(self, other_services):
|
|
||||||
"""Determine the branch locations for the other services.
|
|
||||||
|
|
||||||
Determine if the local branch being tested is derived from its
|
|
||||||
stable or next (dev) branch, and based on this, use the corresonding
|
|
||||||
stable or next branches for the other_services."""
|
|
||||||
|
|
||||||
self.log.info('OpenStackAmuletDeployment: determine branch locations')
|
|
||||||
|
|
||||||
# Charms outside the lp:~openstack-charmers namespace
|
|
||||||
base_charms = ['mysql', 'mongodb', 'nrpe']
|
|
||||||
|
|
||||||
# Force these charms to current series even when using an older series.
|
|
||||||
# ie. Use trusty/nrpe even when series is precise, as the P charm
|
|
||||||
# does not possess the necessary external master config and hooks.
|
|
||||||
force_series_current = ['nrpe']
|
|
||||||
|
|
||||||
if self.series in ['precise', 'trusty']:
|
|
||||||
base_series = self.series
|
|
||||||
else:
|
|
||||||
base_series = self.current_next
|
|
||||||
|
|
||||||
for svc in other_services:
|
|
||||||
if svc['name'] in force_series_current:
|
|
||||||
base_series = self.current_next
|
|
||||||
# If a location has been explicitly set, use it
|
|
||||||
if svc.get('location'):
|
|
||||||
continue
|
|
||||||
if self.stable:
|
|
||||||
temp = 'lp:charms/{}/{}'
|
|
||||||
svc['location'] = temp.format(base_series,
|
|
||||||
svc['name'])
|
|
||||||
else:
|
|
||||||
if svc['name'] in base_charms:
|
|
||||||
temp = 'lp:charms/{}/{}'
|
|
||||||
svc['location'] = temp.format(base_series,
|
|
||||||
svc['name'])
|
|
||||||
else:
|
|
||||||
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
|
|
||||||
svc['location'] = temp.format(self.current_next,
|
|
||||||
svc['name'])
|
|
||||||
|
|
||||||
return other_services
|
|
||||||
|
|
||||||
def _add_services(self, this_service, other_services):
|
|
||||||
"""Add services to the deployment and set openstack-origin/source."""
|
|
||||||
self.log.info('OpenStackAmuletDeployment: adding services')
|
|
||||||
|
|
||||||
other_services = self._determine_branch_locations(other_services)
|
|
||||||
|
|
||||||
super(OpenStackAmuletDeployment, self)._add_services(this_service,
|
|
||||||
other_services)
|
|
||||||
|
|
||||||
services = other_services
|
|
||||||
services.append(this_service)
|
|
||||||
|
|
||||||
# Charms which should use the source config option
|
|
||||||
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
|
|
||||||
'ceph-osd', 'ceph-radosgw']
|
|
||||||
|
|
||||||
# Charms which can not use openstack-origin, ie. many subordinates
|
|
||||||
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',
|
|
||||||
'openvswitch-odl', 'neutron-api-odl', 'odl-controller']
|
|
||||||
|
|
||||||
if self.openstack:
|
|
||||||
for svc in services:
|
|
||||||
if svc['name'] not in use_source + no_origin:
|
|
||||||
config = {'openstack-origin': self.openstack}
|
|
||||||
self.d.configure(svc['name'], config)
|
|
||||||
|
|
||||||
if self.source:
|
|
||||||
for svc in services:
|
|
||||||
if svc['name'] in use_source and svc['name'] not in no_origin:
|
|
||||||
config = {'source': self.source}
|
|
||||||
self.d.configure(svc['name'], config)
|
|
||||||
|
|
||||||
def _configure_services(self, configs):
|
|
||||||
"""Configure all of the services."""
|
|
||||||
self.log.info('OpenStackAmuletDeployment: configure services')
|
|
||||||
for service, config in six.iteritems(configs):
|
|
||||||
self.d.configure(service, config)
|
|
||||||
|
|
||||||
def _auto_wait_for_status(self, message=None, exclude_services=None,
|
|
||||||
include_only=None, timeout=1800):
|
|
||||||
"""Wait for all units to have a specific extended status, except
|
|
||||||
for any defined as excluded. Unless specified via message, any
|
|
||||||
status containing any case of 'ready' will be considered a match.
|
|
||||||
|
|
||||||
Examples of message usage:
|
|
||||||
|
|
||||||
Wait for all unit status to CONTAIN any case of 'ready' or 'ok':
|
|
||||||
message = re.compile('.*ready.*|.*ok.*', re.IGNORECASE)
|
|
||||||
|
|
||||||
Wait for all units to reach this status (exact match):
|
|
||||||
message = re.compile('^Unit is ready and clustered$')
|
|
||||||
|
|
||||||
Wait for all units to reach any one of these (exact match):
|
|
||||||
message = re.compile('Unit is ready|OK|Ready')
|
|
||||||
|
|
||||||
Wait for at least one unit to reach this status (exact match):
|
|
||||||
message = {'ready'}
|
|
||||||
|
|
||||||
See Amulet's sentry.wait_for_messages() for message usage detail.
|
|
||||||
https://github.com/juju/amulet/blob/master/amulet/sentry.py
|
|
||||||
|
|
||||||
:param message: Expected status match
|
|
||||||
:param exclude_services: List of juju service names to ignore,
|
|
||||||
not to be used in conjuction with include_only.
|
|
||||||
:param include_only: List of juju service names to exclusively check,
|
|
||||||
not to be used in conjuction with exclude_services.
|
|
||||||
:param timeout: Maximum time in seconds to wait for status match
|
|
||||||
:returns: None. Raises if timeout is hit.
|
|
||||||
"""
|
|
||||||
self.log.info('Waiting for extended status on units...')
|
|
||||||
|
|
||||||
all_services = self.d.services.keys()
|
|
||||||
|
|
||||||
if exclude_services and include_only:
|
|
||||||
raise ValueError('exclude_services can not be used '
|
|
||||||
'with include_only')
|
|
||||||
|
|
||||||
if message:
|
|
||||||
if isinstance(message, re._pattern_type):
|
|
||||||
match = message.pattern
|
|
||||||
else:
|
|
||||||
match = message
|
|
||||||
|
|
||||||
self.log.debug('Custom extended status wait match: '
|
|
||||||
'{}'.format(match))
|
|
||||||
else:
|
|
||||||
self.log.debug('Default extended status wait match: contains '
|
|
||||||
'READY (case-insensitive)')
|
|
||||||
message = re.compile('.*ready.*', re.IGNORECASE)
|
|
||||||
|
|
||||||
if exclude_services:
|
|
||||||
self.log.debug('Excluding services from extended status match: '
|
|
||||||
'{}'.format(exclude_services))
|
|
||||||
else:
|
|
||||||
exclude_services = []
|
|
||||||
|
|
||||||
if include_only:
|
|
||||||
services = include_only
|
|
||||||
else:
|
|
||||||
services = list(set(all_services) - set(exclude_services))
|
|
||||||
|
|
||||||
self.log.debug('Waiting up to {}s for extended status on services: '
|
|
||||||
'{}'.format(timeout, services))
|
|
||||||
service_messages = {service: message for service in services}
|
|
||||||
self.d.sentry.wait_for_messages(service_messages, timeout=timeout)
|
|
||||||
self.log.info('OK')
|
|
||||||
|
|
||||||
def _get_openstack_release(self):
|
|
||||||
"""Get openstack release.
|
|
||||||
|
|
||||||
Return an integer representing the enum value of the openstack
|
|
||||||
release.
|
|
||||||
"""
|
|
||||||
# Must be ordered by OpenStack release (not by Ubuntu release):
|
|
||||||
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
|
|
||||||
self.precise_havana, self.precise_icehouse,
|
|
||||||
self.trusty_icehouse, self.trusty_juno, self.utopic_juno,
|
|
||||||
self.trusty_kilo, self.vivid_kilo, self.trusty_liberty,
|
|
||||||
self.wily_liberty) = range(12)
|
|
||||||
|
|
||||||
releases = {
|
|
||||||
('precise', None): self.precise_essex,
|
|
||||||
('precise', 'cloud:precise-folsom'): self.precise_folsom,
|
|
||||||
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
|
|
||||||
('precise', 'cloud:precise-havana'): self.precise_havana,
|
|
||||||
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
|
|
||||||
('trusty', None): self.trusty_icehouse,
|
|
||||||
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
|
||||||
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo,
|
|
||||||
('trusty', 'cloud:trusty-liberty'): self.trusty_liberty,
|
|
||||||
('utopic', None): self.utopic_juno,
|
|
||||||
('vivid', None): self.vivid_kilo,
|
|
||||||
('wily', None): self.wily_liberty}
|
|
||||||
return releases[(self.series, self.openstack)]
|
|
||||||
|
|
||||||
def _get_openstack_release_string(self):
|
|
||||||
"""Get openstack release string.
|
|
||||||
|
|
||||||
Return a string representing the openstack release.
|
|
||||||
"""
|
|
||||||
releases = OrderedDict([
|
|
||||||
('precise', 'essex'),
|
|
||||||
('quantal', 'folsom'),
|
|
||||||
('raring', 'grizzly'),
|
|
||||||
('saucy', 'havana'),
|
|
||||||
('trusty', 'icehouse'),
|
|
||||||
('utopic', 'juno'),
|
|
||||||
('vivid', 'kilo'),
|
|
||||||
('wily', 'liberty'),
|
|
||||||
])
|
|
||||||
if self.openstack:
|
|
||||||
os_origin = self.openstack.split(':')[1]
|
|
||||||
return os_origin.split('%s-' % self.series)[1].split('/')[0]
|
|
||||||
else:
|
|
||||||
return releases[self.series]
|
|
||||||
|
|
||||||
def get_ceph_expected_pools(self, radosgw=False):
|
|
||||||
"""Return a list of expected ceph pools in a ceph + cinder + glance
|
|
||||||
test scenario, based on OpenStack release and whether ceph radosgw
|
|
||||||
is flagged as present or not."""
|
|
||||||
|
|
||||||
if self._get_openstack_release() >= self.trusty_kilo:
|
|
||||||
# Kilo or later
|
|
||||||
pools = [
|
|
||||||
'rbd',
|
|
||||||
'cinder',
|
|
||||||
'glance'
|
|
||||||
]
|
|
||||||
else:
|
|
||||||
# Juno or earlier
|
|
||||||
pools = [
|
|
||||||
'data',
|
|
||||||
'metadata',
|
|
||||||
'rbd',
|
|
||||||
'cinder',
|
|
||||||
'glance'
|
|
||||||
]
|
|
||||||
|
|
||||||
if radosgw:
|
|
||||||
pools.extend([
|
|
||||||
'.rgw.root',
|
|
||||||
'.rgw.control',
|
|
||||||
'.rgw',
|
|
||||||
'.rgw.gc',
|
|
||||||
'.users.uid'
|
|
||||||
])
|
|
||||||
|
|
||||||
return pools
|
|
@ -1,985 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import amulet
|
|
||||||
import json
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import six
|
|
||||||
import time
|
|
||||||
import urllib
|
|
||||||
|
|
||||||
import cinderclient.v1.client as cinder_client
|
|
||||||
import glanceclient.v1.client as glance_client
|
|
||||||
import heatclient.v1.client as heat_client
|
|
||||||
import keystoneclient.v2_0 as keystone_client
|
|
||||||
import novaclient.v1_1.client as nova_client
|
|
||||||
import pika
|
|
||||||
import swiftclient
|
|
||||||
|
|
||||||
from charmhelpers.contrib.amulet.utils import (
|
|
||||||
AmuletUtils
|
|
||||||
)
|
|
||||||
|
|
||||||
DEBUG = logging.DEBUG
|
|
||||||
ERROR = logging.ERROR
|
|
||||||
|
|
||||||
|
|
||||||
class OpenStackAmuletUtils(AmuletUtils):
|
|
||||||
"""OpenStack amulet utilities.
|
|
||||||
|
|
||||||
This class inherits from AmuletUtils and has additional support
|
|
||||||
that is specifically for use by OpenStack charm tests.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, log_level=ERROR):
|
|
||||||
"""Initialize the deployment environment."""
|
|
||||||
super(OpenStackAmuletUtils, self).__init__(log_level)
|
|
||||||
|
|
||||||
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
|
|
||||||
public_port, expected):
|
|
||||||
"""Validate endpoint data.
|
|
||||||
|
|
||||||
Validate actual endpoint data vs expected endpoint data. The ports
|
|
||||||
are used to find the matching endpoint.
|
|
||||||
"""
|
|
||||||
self.log.debug('Validating endpoint data...')
|
|
||||||
self.log.debug('actual: {}'.format(repr(endpoints)))
|
|
||||||
found = False
|
|
||||||
for ep in endpoints:
|
|
||||||
self.log.debug('endpoint: {}'.format(repr(ep)))
|
|
||||||
if (admin_port in ep.adminurl and
|
|
||||||
internal_port in ep.internalurl and
|
|
||||||
public_port in ep.publicurl):
|
|
||||||
found = True
|
|
||||||
actual = {'id': ep.id,
|
|
||||||
'region': ep.region,
|
|
||||||
'adminurl': ep.adminurl,
|
|
||||||
'internalurl': ep.internalurl,
|
|
||||||
'publicurl': ep.publicurl,
|
|
||||||
'service_id': ep.service_id}
|
|
||||||
ret = self._validate_dict_data(expected, actual)
|
|
||||||
if ret:
|
|
||||||
return 'unexpected endpoint data - {}'.format(ret)
|
|
||||||
|
|
||||||
if not found:
|
|
||||||
return 'endpoint not found'
|
|
||||||
|
|
||||||
def validate_svc_catalog_endpoint_data(self, expected, actual):
|
|
||||||
"""Validate service catalog endpoint data.
|
|
||||||
|
|
||||||
Validate a list of actual service catalog endpoints vs a list of
|
|
||||||
expected service catalog endpoints.
|
|
||||||
"""
|
|
||||||
self.log.debug('Validating service catalog endpoint data...')
|
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
|
||||||
for k, v in six.iteritems(expected):
|
|
||||||
if k in actual:
|
|
||||||
ret = self._validate_dict_data(expected[k][0], actual[k][0])
|
|
||||||
if ret:
|
|
||||||
return self.endpoint_error(k, ret)
|
|
||||||
else:
|
|
||||||
return "endpoint {} does not exist".format(k)
|
|
||||||
return ret
|
|
||||||
|
|
||||||
def validate_tenant_data(self, expected, actual):
|
|
||||||
"""Validate tenant data.
|
|
||||||
|
|
||||||
Validate a list of actual tenant data vs list of expected tenant
|
|
||||||
data.
|
|
||||||
"""
|
|
||||||
self.log.debug('Validating tenant data...')
|
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
|
||||||
for e in expected:
|
|
||||||
found = False
|
|
||||||
for act in actual:
|
|
||||||
a = {'enabled': act.enabled, 'description': act.description,
|
|
||||||
'name': act.name, 'id': act.id}
|
|
||||||
if e['name'] == a['name']:
|
|
||||||
found = True
|
|
||||||
ret = self._validate_dict_data(e, a)
|
|
||||||
if ret:
|
|
||||||
return "unexpected tenant data - {}".format(ret)
|
|
||||||
if not found:
|
|
||||||
return "tenant {} does not exist".format(e['name'])
|
|
||||||
return ret
|
|
||||||
|
|
||||||
def validate_role_data(self, expected, actual):
|
|
||||||
"""Validate role data.
|
|
||||||
|
|
||||||
Validate a list of actual role data vs a list of expected role
|
|
||||||
data.
|
|
||||||
"""
|
|
||||||
self.log.debug('Validating role data...')
|
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
|
||||||
for e in expected:
|
|
||||||
found = False
|
|
||||||
for act in actual:
|
|
||||||
a = {'name': act.name, 'id': act.id}
|
|
||||||
if e['name'] == a['name']:
|
|
||||||
found = True
|
|
||||||
ret = self._validate_dict_data(e, a)
|
|
||||||
if ret:
|
|
||||||
return "unexpected role data - {}".format(ret)
|
|
||||||
if not found:
|
|
||||||
return "role {} does not exist".format(e['name'])
|
|
||||||
return ret
|
|
||||||
|
|
||||||
def validate_user_data(self, expected, actual):
|
|
||||||
"""Validate user data.
|
|
||||||
|
|
||||||
Validate a list of actual user data vs a list of expected user
|
|
||||||
data.
|
|
||||||
"""
|
|
||||||
self.log.debug('Validating user data...')
|
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
|
||||||
for e in expected:
|
|
||||||
found = False
|
|
||||||
for act in actual:
|
|
||||||
a = {'enabled': act.enabled, 'name': act.name,
|
|
||||||
'email': act.email, 'tenantId': act.tenantId,
|
|
||||||
'id': act.id}
|
|
||||||
if e['name'] == a['name']:
|
|
||||||
found = True
|
|
||||||
ret = self._validate_dict_data(e, a)
|
|
||||||
if ret:
|
|
||||||
return "unexpected user data - {}".format(ret)
|
|
||||||
if not found:
|
|
||||||
return "user {} does not exist".format(e['name'])
|
|
||||||
return ret
|
|
||||||
|
|
||||||
def validate_flavor_data(self, expected, actual):
|
|
||||||
"""Validate flavor data.
|
|
||||||
|
|
||||||
Validate a list of actual flavors vs a list of expected flavors.
|
|
||||||
"""
|
|
||||||
self.log.debug('Validating flavor data...')
|
|
||||||
self.log.debug('actual: {}'.format(repr(actual)))
|
|
||||||
act = [a.name for a in actual]
|
|
||||||
return self._validate_list_data(expected, act)
|
|
||||||
|
|
||||||
def tenant_exists(self, keystone, tenant):
|
|
||||||
"""Return True if tenant exists."""
|
|
||||||
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
|
|
||||||
return tenant in [t.name for t in keystone.tenants.list()]
|
|
||||||
|
|
||||||
def authenticate_cinder_admin(self, keystone_sentry, username,
|
|
||||||
password, tenant):
|
|
||||||
"""Authenticates admin user with cinder."""
|
|
||||||
# NOTE(beisner): cinder python client doesn't accept tokens.
|
|
||||||
service_ip = \
|
|
||||||
keystone_sentry.relation('shared-db',
|
|
||||||
'mysql:shared-db')['private-address']
|
|
||||||
ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
|
|
||||||
return cinder_client.Client(username, password, tenant, ept)
|
|
||||||
|
|
||||||
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
|
||||||
tenant):
|
|
||||||
"""Authenticates admin user with the keystone admin endpoint."""
|
|
||||||
self.log.debug('Authenticating keystone admin...')
|
|
||||||
unit = keystone_sentry
|
|
||||||
service_ip = unit.relation('shared-db',
|
|
||||||
'mysql:shared-db')['private-address']
|
|
||||||
ep = "http://{}:35357/v2.0".format(service_ip.strip().decode('utf-8'))
|
|
||||||
return keystone_client.Client(username=user, password=password,
|
|
||||||
tenant_name=tenant, auth_url=ep)
|
|
||||||
|
|
||||||
def authenticate_keystone_user(self, keystone, user, password, tenant):
|
|
||||||
"""Authenticates a regular user with the keystone public endpoint."""
|
|
||||||
self.log.debug('Authenticating keystone user ({})...'.format(user))
|
|
||||||
ep = keystone.service_catalog.url_for(service_type='identity',
|
|
||||||
endpoint_type='publicURL')
|
|
||||||
return keystone_client.Client(username=user, password=password,
|
|
||||||
tenant_name=tenant, auth_url=ep)
|
|
||||||
|
|
||||||
def authenticate_glance_admin(self, keystone):
|
|
||||||
"""Authenticates admin user with glance."""
|
|
||||||
self.log.debug('Authenticating glance admin...')
|
|
||||||
ep = keystone.service_catalog.url_for(service_type='image',
|
|
||||||
endpoint_type='adminURL')
|
|
||||||
return glance_client.Client(ep, token=keystone.auth_token)
|
|
||||||
|
|
||||||
def authenticate_heat_admin(self, keystone):
|
|
||||||
"""Authenticates the admin user with heat."""
|
|
||||||
self.log.debug('Authenticating heat admin...')
|
|
||||||
ep = keystone.service_catalog.url_for(service_type='orchestration',
|
|
||||||
endpoint_type='publicURL')
|
|
||||||
return heat_client.Client(endpoint=ep, token=keystone.auth_token)
|
|
||||||
|
|
||||||
def authenticate_nova_user(self, keystone, user, password, tenant):
|
|
||||||
"""Authenticates a regular user with nova-api."""
|
|
||||||
self.log.debug('Authenticating nova user ({})...'.format(user))
|
|
||||||
ep = keystone.service_catalog.url_for(service_type='identity',
|
|
||||||
endpoint_type='publicURL')
|
|
||||||
return nova_client.Client(username=user, api_key=password,
|
|
||||||
project_id=tenant, auth_url=ep)
|
|
||||||
|
|
||||||
def authenticate_swift_user(self, keystone, user, password, tenant):
|
|
||||||
"""Authenticates a regular user with swift api."""
|
|
||||||
self.log.debug('Authenticating swift user ({})...'.format(user))
|
|
||||||
ep = keystone.service_catalog.url_for(service_type='identity',
|
|
||||||
endpoint_type='publicURL')
|
|
||||||
return swiftclient.Connection(authurl=ep,
|
|
||||||
user=user,
|
|
||||||
key=password,
|
|
||||||
tenant_name=tenant,
|
|
||||||
auth_version='2.0')
|
|
||||||
|
|
||||||
def create_cirros_image(self, glance, image_name):
|
|
||||||
"""Download the latest cirros image and upload it to glance,
|
|
||||||
validate and return a resource pointer.
|
|
||||||
|
|
||||||
:param glance: pointer to authenticated glance connection
|
|
||||||
:param image_name: display name for new image
|
|
||||||
:returns: glance image pointer
|
|
||||||
"""
|
|
||||||
self.log.debug('Creating glance cirros image '
|
|
||||||
'({})...'.format(image_name))
|
|
||||||
|
|
||||||
# Download cirros image
|
|
||||||
http_proxy = os.getenv('AMULET_HTTP_PROXY')
|
|
||||||
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
|
|
||||||
if http_proxy:
|
|
||||||
proxies = {'http': http_proxy}
|
|
||||||
opener = urllib.FancyURLopener(proxies)
|
|
||||||
else:
|
|
||||||
opener = urllib.FancyURLopener()
|
|
||||||
|
|
||||||
f = opener.open('http://download.cirros-cloud.net/version/released')
|
|
||||||
version = f.read().strip()
|
|
||||||
cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
|
|
||||||
local_path = os.path.join('tests', cirros_img)
|
|
||||||
|
|
||||||
if not os.path.exists(local_path):
|
|
||||||
cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
|
|
||||||
version, cirros_img)
|
|
||||||
opener.retrieve(cirros_url, local_path)
|
|
||||||
f.close()
|
|
||||||
|
|
||||||
# Create glance image
|
|
||||||
with open(local_path) as f:
|
|
||||||
image = glance.images.create(name=image_name, is_public=True,
|
|
||||||
disk_format='qcow2',
|
|
||||||
container_format='bare', data=f)
|
|
||||||
|
|
||||||
# Wait for image to reach active status
|
|
||||||
img_id = image.id
|
|
||||||
ret = self.resource_reaches_status(glance.images, img_id,
|
|
||||||
expected_stat='active',
|
|
||||||
msg='Image status wait')
|
|
||||||
if not ret:
|
|
||||||
msg = 'Glance image failed to reach expected state.'
|
|
||||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
|
||||||
|
|
||||||
# Re-validate new image
|
|
||||||
self.log.debug('Validating image attributes...')
|
|
||||||
val_img_name = glance.images.get(img_id).name
|
|
||||||
val_img_stat = glance.images.get(img_id).status
|
|
||||||
val_img_pub = glance.images.get(img_id).is_public
|
|
||||||
val_img_cfmt = glance.images.get(img_id).container_format
|
|
||||||
val_img_dfmt = glance.images.get(img_id).disk_format
|
|
||||||
msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
|
|
||||||
'container fmt:{} disk fmt:{}'.format(
|
|
||||||
val_img_name, val_img_pub, img_id,
|
|
||||||
val_img_stat, val_img_cfmt, val_img_dfmt))
|
|
||||||
|
|
||||||
if val_img_name == image_name and val_img_stat == 'active' \
|
|
||||||
and val_img_pub is True and val_img_cfmt == 'bare' \
|
|
||||||
and val_img_dfmt == 'qcow2':
|
|
||||||
self.log.debug(msg_attr)
|
|
||||||
else:
|
|
||||||
msg = ('Volume validation failed, {}'.format(msg_attr))
|
|
||||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
|
||||||
|
|
||||||
return image
|
|
||||||
|
|
||||||
def delete_image(self, glance, image):
|
|
||||||
"""Delete the specified image."""
|
|
||||||
|
|
||||||
# /!\ DEPRECATION WARNING
|
|
||||||
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
|
||||||
'delete_resource instead of delete_image.')
|
|
||||||
self.log.debug('Deleting glance image ({})...'.format(image))
|
|
||||||
return self.delete_resource(glance.images, image, msg='glance image')
|
|
||||||
|
|
||||||
def create_instance(self, nova, image_name, instance_name, flavor):
|
|
||||||
"""Create the specified instance."""
|
|
||||||
self.log.debug('Creating instance '
|
|
||||||
'({}|{}|{})'.format(instance_name, image_name, flavor))
|
|
||||||
image = nova.images.find(name=image_name)
|
|
||||||
flavor = nova.flavors.find(name=flavor)
|
|
||||||
instance = nova.servers.create(name=instance_name, image=image,
|
|
||||||
flavor=flavor)
|
|
||||||
|
|
||||||
count = 1
|
|
||||||
status = instance.status
|
|
||||||
while status != 'ACTIVE' and count < 60:
|
|
||||||
time.sleep(3)
|
|
||||||
instance = nova.servers.get(instance.id)
|
|
||||||
status = instance.status
|
|
||||||
self.log.debug('instance status: {}'.format(status))
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
if status != 'ACTIVE':
|
|
||||||
self.log.error('instance creation timed out')
|
|
||||||
return None
|
|
||||||
|
|
||||||
return instance
|
|
||||||
|
|
||||||
def delete_instance(self, nova, instance):
|
|
||||||
"""Delete the specified instance."""
|
|
||||||
|
|
||||||
# /!\ DEPRECATION WARNING
|
|
||||||
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
|
||||||
'delete_resource instead of delete_instance.')
|
|
||||||
self.log.debug('Deleting instance ({})...'.format(instance))
|
|
||||||
return self.delete_resource(nova.servers, instance,
|
|
||||||
msg='nova instance')
|
|
||||||
|
|
||||||
def create_or_get_keypair(self, nova, keypair_name="testkey"):
|
|
||||||
"""Create a new keypair, or return pointer if it already exists."""
|
|
||||||
try:
|
|
||||||
_keypair = nova.keypairs.get(keypair_name)
|
|
||||||
self.log.debug('Keypair ({}) already exists, '
|
|
||||||
'using it.'.format(keypair_name))
|
|
||||||
return _keypair
|
|
||||||
except:
|
|
||||||
self.log.debug('Keypair ({}) does not exist, '
|
|
||||||
'creating it.'.format(keypair_name))
|
|
||||||
|
|
||||||
_keypair = nova.keypairs.create(name=keypair_name)
|
|
||||||
return _keypair
|
|
||||||
|
|
||||||
def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
|
|
||||||
img_id=None, src_vol_id=None, snap_id=None):
|
|
||||||
"""Create cinder volume, optionally from a glance image, OR
|
|
||||||
optionally as a clone of an existing volume, OR optionally
|
|
||||||
from a snapshot. Wait for the new volume status to reach
|
|
||||||
the expected status, validate and return a resource pointer.
|
|
||||||
|
|
||||||
:param vol_name: cinder volume display name
|
|
||||||
:param vol_size: size in gigabytes
|
|
||||||
:param img_id: optional glance image id
|
|
||||||
:param src_vol_id: optional source volume id to clone
|
|
||||||
:param snap_id: optional snapshot id to use
|
|
||||||
:returns: cinder volume pointer
|
|
||||||
"""
|
|
||||||
# Handle parameter input and avoid impossible combinations
|
|
||||||
if img_id and not src_vol_id and not snap_id:
|
|
||||||
# Create volume from image
|
|
||||||
self.log.debug('Creating cinder volume from glance image...')
|
|
||||||
bootable = 'true'
|
|
||||||
elif src_vol_id and not img_id and not snap_id:
|
|
||||||
# Clone an existing volume
|
|
||||||
self.log.debug('Cloning cinder volume...')
|
|
||||||
bootable = cinder.volumes.get(src_vol_id).bootable
|
|
||||||
elif snap_id and not src_vol_id and not img_id:
|
|
||||||
# Create volume from snapshot
|
|
||||||
self.log.debug('Creating cinder volume from snapshot...')
|
|
||||||
snap = cinder.volume_snapshots.find(id=snap_id)
|
|
||||||
vol_size = snap.size
|
|
||||||
snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
|
|
||||||
bootable = cinder.volumes.get(snap_vol_id).bootable
|
|
||||||
elif not img_id and not src_vol_id and not snap_id:
|
|
||||||
# Create volume
|
|
||||||
self.log.debug('Creating cinder volume...')
|
|
||||||
bootable = 'false'
|
|
||||||
else:
|
|
||||||
# Impossible combination of parameters
|
|
||||||
msg = ('Invalid method use - name:{} size:{} img_id:{} '
|
|
||||||
'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
|
|
||||||
img_id, src_vol_id,
|
|
||||||
snap_id))
|
|
||||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
|
||||||
|
|
||||||
# Create new volume
|
|
||||||
try:
|
|
||||||
vol_new = cinder.volumes.create(display_name=vol_name,
|
|
||||||
imageRef=img_id,
|
|
||||||
size=vol_size,
|
|
||||||
source_volid=src_vol_id,
|
|
||||||
snapshot_id=snap_id)
|
|
||||||
vol_id = vol_new.id
|
|
||||||
except Exception as e:
|
|
||||||
msg = 'Failed to create volume: {}'.format(e)
|
|
||||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
|
||||||
|
|
||||||
# Wait for volume to reach available status
|
|
||||||
ret = self.resource_reaches_status(cinder.volumes, vol_id,
|
|
||||||
expected_stat="available",
|
|
||||||
msg="Volume status wait")
|
|
||||||
if not ret:
|
|
||||||
msg = 'Cinder volume failed to reach expected state.'
|
|
||||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
|
||||||
|
|
||||||
# Re-validate new volume
|
|
||||||
self.log.debug('Validating volume attributes...')
|
|
||||||
val_vol_name = cinder.volumes.get(vol_id).display_name
|
|
||||||
val_vol_boot = cinder.volumes.get(vol_id).bootable
|
|
||||||
val_vol_stat = cinder.volumes.get(vol_id).status
|
|
||||||
val_vol_size = cinder.volumes.get(vol_id).size
|
|
||||||
msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
|
|
||||||
'{} size:{}'.format(val_vol_name, vol_id,
|
|
||||||
val_vol_stat, val_vol_boot,
|
|
||||||
val_vol_size))
|
|
||||||
|
|
||||||
if val_vol_boot == bootable and val_vol_stat == 'available' \
|
|
||||||
and val_vol_name == vol_name and val_vol_size == vol_size:
|
|
||||||
self.log.debug(msg_attr)
|
|
||||||
else:
|
|
||||||
msg = ('Volume validation failed, {}'.format(msg_attr))
|
|
||||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
|
||||||
|
|
||||||
return vol_new
|
|
||||||
|
|
||||||
def delete_resource(self, resource, resource_id,
|
|
||||||
msg="resource", max_wait=120):
|
|
||||||
"""Delete one openstack resource, such as one instance, keypair,
|
|
||||||
image, volume, stack, etc., and confirm deletion within max wait time.
|
|
||||||
|
|
||||||
:param resource: pointer to os resource type, ex:glance_client.images
|
|
||||||
:param resource_id: unique name or id for the openstack resource
|
|
||||||
:param msg: text to identify purpose in logging
|
|
||||||
:param max_wait: maximum wait time in seconds
|
|
||||||
:returns: True if successful, otherwise False
|
|
||||||
"""
|
|
||||||
self.log.debug('Deleting OpenStack resource '
|
|
||||||
'{} ({})'.format(resource_id, msg))
|
|
||||||
num_before = len(list(resource.list()))
|
|
||||||
resource.delete(resource_id)
|
|
||||||
|
|
||||||
tries = 0
|
|
||||||
num_after = len(list(resource.list()))
|
|
||||||
while num_after != (num_before - 1) and tries < (max_wait / 4):
|
|
||||||
self.log.debug('{} delete check: '
|
|
||||||
'{} [{}:{}] {}'.format(msg, tries,
|
|
||||||
num_before,
|
|
||||||
num_after,
|
|
||||||
resource_id))
|
|
||||||
time.sleep(4)
|
|
||||||
num_after = len(list(resource.list()))
|
|
||||||
tries += 1
|
|
||||||
|
|
||||||
self.log.debug('{}: expected, actual count = {}, '
|
|
||||||
'{}'.format(msg, num_before - 1, num_after))
|
|
||||||
|
|
||||||
if num_after == (num_before - 1):
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
self.log.error('{} delete timed out'.format(msg))
|
|
||||||
return False
|
|
||||||
|
|
||||||
def resource_reaches_status(self, resource, resource_id,
|
|
||||||
expected_stat='available',
|
|
||||||
msg='resource', max_wait=120):
|
|
||||||
"""Wait for an openstack resources status to reach an
|
|
||||||
expected status within a specified time. Useful to confirm that
|
|
||||||
nova instances, cinder vols, snapshots, glance images, heat stacks
|
|
||||||
and other resources eventually reach the expected status.
|
|
||||||
|
|
||||||
:param resource: pointer to os resource type, ex: heat_client.stacks
|
|
||||||
:param resource_id: unique id for the openstack resource
|
|
||||||
:param expected_stat: status to expect resource to reach
|
|
||||||
:param msg: text to identify purpose in logging
|
|
||||||
:param max_wait: maximum wait time in seconds
|
|
||||||
:returns: True if successful, False if status is not reached
|
|
||||||
"""
|
|
||||||
|
|
||||||
tries = 0
|
|
||||||
resource_stat = resource.get(resource_id).status
|
|
||||||
while resource_stat != expected_stat and tries < (max_wait / 4):
|
|
||||||
self.log.debug('{} status check: '
|
|
||||||
'{} [{}:{}] {}'.format(msg, tries,
|
|
||||||
resource_stat,
|
|
||||||
expected_stat,
|
|
||||||
resource_id))
|
|
||||||
time.sleep(4)
|
|
||||||
resource_stat = resource.get(resource_id).status
|
|
||||||
tries += 1
|
|
||||||
|
|
||||||
self.log.debug('{}: expected, actual status = {}, '
|
|
||||||
'{}'.format(msg, resource_stat, expected_stat))
|
|
||||||
|
|
||||||
if resource_stat == expected_stat:
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
self.log.debug('{} never reached expected status: '
|
|
||||||
'{}'.format(resource_id, expected_stat))
|
|
||||||
return False
|
|
||||||
|
|
||||||
def get_ceph_osd_id_cmd(self, index):
|
|
||||||
"""Produce a shell command that will return a ceph-osd id."""
|
|
||||||
return ("`initctl list | grep 'ceph-osd ' | "
|
|
||||||
"awk 'NR=={} {{ print $2 }}' | "
|
|
||||||
"grep -o '[0-9]*'`".format(index + 1))
|
|
||||||
|
|
||||||
def get_ceph_pools(self, sentry_unit):
|
|
||||||
"""Return a dict of ceph pools from a single ceph unit, with
|
|
||||||
pool name as keys, pool id as vals."""
|
|
||||||
pools = {}
|
|
||||||
cmd = 'sudo ceph osd lspools'
|
|
||||||
output, code = sentry_unit.run(cmd)
|
|
||||||
if code != 0:
|
|
||||||
msg = ('{} `{}` returned {} '
|
|
||||||
'{}'.format(sentry_unit.info['unit_name'],
|
|
||||||
cmd, code, output))
|
|
||||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
|
||||||
|
|
||||||
# Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
|
|
||||||
for pool in str(output).split(','):
|
|
||||||
pool_id_name = pool.split(' ')
|
|
||||||
if len(pool_id_name) == 2:
|
|
||||||
pool_id = pool_id_name[0]
|
|
||||||
pool_name = pool_id_name[1]
|
|
||||||
pools[pool_name] = int(pool_id)
|
|
||||||
|
|
||||||
self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
|
|
||||||
pools))
|
|
||||||
return pools
|
|
||||||
|
|
||||||
def get_ceph_df(self, sentry_unit):
|
|
||||||
"""Return dict of ceph df json output, including ceph pool state.
|
|
||||||
|
|
||||||
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
|
||||||
:returns: Dict of ceph df output
|
|
||||||
"""
|
|
||||||
cmd = 'sudo ceph df --format=json'
|
|
||||||
output, code = sentry_unit.run(cmd)
|
|
||||||
if code != 0:
|
|
||||||
msg = ('{} `{}` returned {} '
|
|
||||||
'{}'.format(sentry_unit.info['unit_name'],
|
|
||||||
cmd, code, output))
|
|
||||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
|
||||||
return json.loads(output)
|
|
||||||
|
|
||||||
def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
|
|
||||||
"""Take a sample of attributes of a ceph pool, returning ceph
|
|
||||||
pool name, object count and disk space used for the specified
|
|
||||||
pool ID number.
|
|
||||||
|
|
||||||
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
|
||||||
:param pool_id: Ceph pool ID
|
|
||||||
:returns: List of pool name, object count, kb disk space used
|
|
||||||
"""
|
|
||||||
df = self.get_ceph_df(sentry_unit)
|
|
||||||
pool_name = df['pools'][pool_id]['name']
|
|
||||||
obj_count = df['pools'][pool_id]['stats']['objects']
|
|
||||||
kb_used = df['pools'][pool_id]['stats']['kb_used']
|
|
||||||
self.log.debug('Ceph {} pool (ID {}): {} objects, '
|
|
||||||
'{} kb used'.format(pool_name, pool_id,
|
|
||||||
obj_count, kb_used))
|
|
||||||
return pool_name, obj_count, kb_used
|
|
||||||
|
|
||||||
def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
|
|
||||||
"""Validate ceph pool samples taken over time, such as pool
|
|
||||||
object counts or pool kb used, before adding, after adding, and
|
|
||||||
after deleting items which affect those pool attributes. The
|
|
||||||
2nd element is expected to be greater than the 1st; 3rd is expected
|
|
||||||
to be less than the 2nd.
|
|
||||||
|
|
||||||
:param samples: List containing 3 data samples
|
|
||||||
:param sample_type: String for logging and usage context
|
|
||||||
:returns: None if successful, Failure message otherwise
|
|
||||||
"""
|
|
||||||
original, created, deleted = range(3)
|
|
||||||
if samples[created] <= samples[original] or \
|
|
||||||
samples[deleted] >= samples[created]:
|
|
||||||
return ('Ceph {} samples ({}) '
|
|
||||||
'unexpected.'.format(sample_type, samples))
|
|
||||||
else:
|
|
||||||
self.log.debug('Ceph {} samples (OK): '
|
|
||||||
'{}'.format(sample_type, samples))
|
|
||||||
return None
|
|
||||||
|
|
||||||
# rabbitmq/amqp specific helpers:
|
|
||||||
|
|
||||||
def rmq_wait_for_cluster(self, deployment, init_sleep=15, timeout=1200):
|
|
||||||
"""Wait for rmq units extended status to show cluster readiness,
|
|
||||||
after an optional initial sleep period. Initial sleep is likely
|
|
||||||
necessary to be effective following a config change, as status
|
|
||||||
message may not instantly update to non-ready."""
|
|
||||||
|
|
||||||
if init_sleep:
|
|
||||||
time.sleep(init_sleep)
|
|
||||||
|
|
||||||
message = re.compile('^Unit is ready and clustered$')
|
|
||||||
deployment._auto_wait_for_status(message=message,
|
|
||||||
timeout=timeout,
|
|
||||||
include_only=['rabbitmq-server'])
|
|
||||||
|
|
||||||
def add_rmq_test_user(self, sentry_units,
|
|
||||||
username="testuser1", password="changeme"):
|
|
||||||
"""Add a test user via the first rmq juju unit, check connection as
|
|
||||||
the new user against all sentry units.
|
|
||||||
|
|
||||||
:param sentry_units: list of sentry unit pointers
|
|
||||||
:param username: amqp user name, default to testuser1
|
|
||||||
:param password: amqp user password
|
|
||||||
:returns: None if successful. Raise on error.
|
|
||||||
"""
|
|
||||||
self.log.debug('Adding rmq user ({})...'.format(username))
|
|
||||||
|
|
||||||
# Check that user does not already exist
|
|
||||||
cmd_user_list = 'rabbitmqctl list_users'
|
|
||||||
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
|
|
||||||
if username in output:
|
|
||||||
self.log.warning('User ({}) already exists, returning '
|
|
||||||
'gracefully.'.format(username))
|
|
||||||
return
|
|
||||||
|
|
||||||
perms = '".*" ".*" ".*"'
|
|
||||||
cmds = ['rabbitmqctl add_user {} {}'.format(username, password),
|
|
||||||
'rabbitmqctl set_permissions {} {}'.format(username, perms)]
|
|
||||||
|
|
||||||
# Add user via first unit
|
|
||||||
for cmd in cmds:
|
|
||||||
output, _ = self.run_cmd_unit(sentry_units[0], cmd)
|
|
||||||
|
|
||||||
# Check connection against the other sentry_units
|
|
||||||
self.log.debug('Checking user connect against units...')
|
|
||||||
for sentry_unit in sentry_units:
|
|
||||||
connection = self.connect_amqp_by_unit(sentry_unit, ssl=False,
|
|
||||||
username=username,
|
|
||||||
password=password)
|
|
||||||
connection.close()
|
|
||||||
|
|
||||||
def delete_rmq_test_user(self, sentry_units, username="testuser1"):
|
|
||||||
"""Delete a rabbitmq user via the first rmq juju unit.
|
|
||||||
|
|
||||||
:param sentry_units: list of sentry unit pointers
|
|
||||||
:param username: amqp user name, default to testuser1
|
|
||||||
:param password: amqp user password
|
|
||||||
:returns: None if successful or no such user.
|
|
||||||
"""
|
|
||||||
self.log.debug('Deleting rmq user ({})...'.format(username))
|
|
||||||
|
|
||||||
# Check that the user exists
|
|
||||||
cmd_user_list = 'rabbitmqctl list_users'
|
|
||||||
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_list)
|
|
||||||
|
|
||||||
if username not in output:
|
|
||||||
self.log.warning('User ({}) does not exist, returning '
|
|
||||||
'gracefully.'.format(username))
|
|
||||||
return
|
|
||||||
|
|
||||||
# Delete the user
|
|
||||||
cmd_user_del = 'rabbitmqctl delete_user {}'.format(username)
|
|
||||||
output, _ = self.run_cmd_unit(sentry_units[0], cmd_user_del)
|
|
||||||
|
|
||||||
def get_rmq_cluster_status(self, sentry_unit):
|
|
||||||
"""Execute rabbitmq cluster status command on a unit and return
|
|
||||||
the full output.
|
|
||||||
|
|
||||||
:param unit: sentry unit
|
|
||||||
:returns: String containing console output of cluster status command
|
|
||||||
"""
|
|
||||||
cmd = 'rabbitmqctl cluster_status'
|
|
||||||
output, _ = self.run_cmd_unit(sentry_unit, cmd)
|
|
||||||
self.log.debug('{} cluster_status:\n{}'.format(
|
|
||||||
sentry_unit.info['unit_name'], output))
|
|
||||||
return str(output)
|
|
||||||
|
|
||||||
def get_rmq_cluster_running_nodes(self, sentry_unit):
|
|
||||||
"""Parse rabbitmqctl cluster_status output string, return list of
|
|
||||||
running rabbitmq cluster nodes.
|
|
||||||
|
|
||||||
:param unit: sentry unit
|
|
||||||
:returns: List containing node names of running nodes
|
|
||||||
"""
|
|
||||||
# NOTE(beisner): rabbitmqctl cluster_status output is not
|
|
||||||
# json-parsable, do string chop foo, then json.loads that.
|
|
||||||
str_stat = self.get_rmq_cluster_status(sentry_unit)
|
|
||||||
if 'running_nodes' in str_stat:
|
|
||||||
pos_start = str_stat.find("{running_nodes,") + 15
|
|
||||||
pos_end = str_stat.find("]},", pos_start) + 1
|
|
||||||
str_run_nodes = str_stat[pos_start:pos_end].replace("'", '"')
|
|
||||||
run_nodes = json.loads(str_run_nodes)
|
|
||||||
return run_nodes
|
|
||||||
else:
|
|
||||||
return []
|
|
||||||
|
|
||||||
def validate_rmq_cluster_running_nodes(self, sentry_units):
|
|
||||||
"""Check that all rmq unit hostnames are represented in the
|
|
||||||
cluster_status output of all units.
|
|
||||||
|
|
||||||
:param host_names: dict of juju unit names to host names
|
|
||||||
:param units: list of sentry unit pointers (all rmq units)
|
|
||||||
:returns: None if successful, otherwise return error message
|
|
||||||
"""
|
|
||||||
host_names = self.get_unit_hostnames(sentry_units)
|
|
||||||
errors = []
|
|
||||||
|
|
||||||
# Query every unit for cluster_status running nodes
|
|
||||||
for query_unit in sentry_units:
|
|
||||||
query_unit_name = query_unit.info['unit_name']
|
|
||||||
running_nodes = self.get_rmq_cluster_running_nodes(query_unit)
|
|
||||||
|
|
||||||
# Confirm that every unit is represented in the queried unit's
|
|
||||||
# cluster_status running nodes output.
|
|
||||||
for validate_unit in sentry_units:
|
|
||||||
val_host_name = host_names[validate_unit.info['unit_name']]
|
|
||||||
val_node_name = 'rabbit@{}'.format(val_host_name)
|
|
||||||
|
|
||||||
if val_node_name not in running_nodes:
|
|
||||||
errors.append('Cluster member check failed on {}: {} not '
|
|
||||||
'in {}\n'.format(query_unit_name,
|
|
||||||
val_node_name,
|
|
||||||
running_nodes))
|
|
||||||
if errors:
|
|
||||||
return ''.join(errors)
|
|
||||||
|
|
||||||
def rmq_ssl_is_enabled_on_unit(self, sentry_unit, port=None):
|
|
||||||
"""Check a single juju rmq unit for ssl and port in the config file."""
|
|
||||||
host = sentry_unit.info['public-address']
|
|
||||||
unit_name = sentry_unit.info['unit_name']
|
|
||||||
|
|
||||||
conf_file = '/etc/rabbitmq/rabbitmq.config'
|
|
||||||
conf_contents = str(self.file_contents_safe(sentry_unit,
|
|
||||||
conf_file, max_wait=16))
|
|
||||||
# Checks
|
|
||||||
conf_ssl = 'ssl' in conf_contents
|
|
||||||
conf_port = str(port) in conf_contents
|
|
||||||
|
|
||||||
# Port explicitly checked in config
|
|
||||||
if port and conf_port and conf_ssl:
|
|
||||||
self.log.debug('SSL is enabled @{}:{} '
|
|
||||||
'({})'.format(host, port, unit_name))
|
|
||||||
return True
|
|
||||||
elif port and not conf_port and conf_ssl:
|
|
||||||
self.log.debug('SSL is enabled @{} but not on port {} '
|
|
||||||
'({})'.format(host, port, unit_name))
|
|
||||||
return False
|
|
||||||
# Port not checked (useful when checking that ssl is disabled)
|
|
||||||
elif not port and conf_ssl:
|
|
||||||
self.log.debug('SSL is enabled @{}:{} '
|
|
||||||
'({})'.format(host, port, unit_name))
|
|
||||||
return True
|
|
||||||
elif not conf_ssl:
|
|
||||||
self.log.debug('SSL not enabled @{}:{} '
|
|
||||||
'({})'.format(host, port, unit_name))
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
msg = ('Unknown condition when checking SSL status @{}:{} '
|
|
||||||
'({})'.format(host, port, unit_name))
|
|
||||||
amulet.raise_status(amulet.FAIL, msg)
|
|
||||||
|
|
||||||
def validate_rmq_ssl_enabled_units(self, sentry_units, port=None):
|
|
||||||
"""Check that ssl is enabled on rmq juju sentry units.
|
|
||||||
|
|
||||||
:param sentry_units: list of all rmq sentry units
|
|
||||||
:param port: optional ssl port override to validate
|
|
||||||
:returns: None if successful, otherwise return error message
|
|
||||||
"""
|
|
||||||
for sentry_unit in sentry_units:
|
|
||||||
if not self.rmq_ssl_is_enabled_on_unit(sentry_unit, port=port):
|
|
||||||
return ('Unexpected condition: ssl is disabled on unit '
|
|
||||||
'({})'.format(sentry_unit.info['unit_name']))
|
|
||||||
return None
|
|
||||||
|
|
||||||
def validate_rmq_ssl_disabled_units(self, sentry_units):
|
|
||||||
"""Check that ssl is enabled on listed rmq juju sentry units.
|
|
||||||
|
|
||||||
:param sentry_units: list of all rmq sentry units
|
|
||||||
:returns: True if successful. Raise on error.
|
|
||||||
"""
|
|
||||||
for sentry_unit in sentry_units:
|
|
||||||
if self.rmq_ssl_is_enabled_on_unit(sentry_unit):
|
|
||||||
return ('Unexpected condition: ssl is enabled on unit '
|
|
||||||
'({})'.format(sentry_unit.info['unit_name']))
|
|
||||||
return None
|
|
||||||
|
|
||||||
def configure_rmq_ssl_on(self, sentry_units, deployment,
|
|
||||||
port=None, max_wait=60):
|
|
||||||
"""Turn ssl charm config option on, with optional non-default
|
|
||||||
ssl port specification. Confirm that it is enabled on every
|
|
||||||
unit.
|
|
||||||
|
|
||||||
:param sentry_units: list of sentry units
|
|
||||||
:param deployment: amulet deployment object pointer
|
|
||||||
:param port: amqp port, use defaults if None
|
|
||||||
:param max_wait: maximum time to wait in seconds to confirm
|
|
||||||
:returns: None if successful. Raise on error.
|
|
||||||
"""
|
|
||||||
self.log.debug('Setting ssl charm config option: on')
|
|
||||||
|
|
||||||
# Enable RMQ SSL
|
|
||||||
config = {'ssl': 'on'}
|
|
||||||
if port:
|
|
||||||
config['ssl_port'] = port
|
|
||||||
|
|
||||||
deployment.d.configure('rabbitmq-server', config)
|
|
||||||
|
|
||||||
# Wait for unit status
|
|
||||||
self.rmq_wait_for_cluster(deployment)
|
|
||||||
|
|
||||||
# Confirm
|
|
||||||
tries = 0
|
|
||||||
ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
|
|
||||||
while ret and tries < (max_wait / 4):
|
|
||||||
time.sleep(4)
|
|
||||||
self.log.debug('Attempt {}: {}'.format(tries, ret))
|
|
||||||
ret = self.validate_rmq_ssl_enabled_units(sentry_units, port=port)
|
|
||||||
tries += 1
|
|
||||||
|
|
||||||
if ret:
|
|
||||||
amulet.raise_status(amulet.FAIL, ret)
|
|
||||||
|
|
||||||
def configure_rmq_ssl_off(self, sentry_units, deployment, max_wait=60):
|
|
||||||
"""Turn ssl charm config option off, confirm that it is disabled
|
|
||||||
on every unit.
|
|
||||||
|
|
||||||
:param sentry_units: list of sentry units
|
|
||||||
:param deployment: amulet deployment object pointer
|
|
||||||
:param max_wait: maximum time to wait in seconds to confirm
|
|
||||||
:returns: None if successful. Raise on error.
|
|
||||||
"""
|
|
||||||
self.log.debug('Setting ssl charm config option: off')
|
|
||||||
|
|
||||||
# Disable RMQ SSL
|
|
||||||
config = {'ssl': 'off'}
|
|
||||||
deployment.d.configure('rabbitmq-server', config)
|
|
||||||
|
|
||||||
# Wait for unit status
|
|
||||||
self.rmq_wait_for_cluster(deployment)
|
|
||||||
|
|
||||||
# Confirm
|
|
||||||
tries = 0
|
|
||||||
ret = self.validate_rmq_ssl_disabled_units(sentry_units)
|
|
||||||
while ret and tries < (max_wait / 4):
|
|
||||||
time.sleep(4)
|
|
||||||
self.log.debug('Attempt {}: {}'.format(tries, ret))
|
|
||||||
ret = self.validate_rmq_ssl_disabled_units(sentry_units)
|
|
||||||
tries += 1
|
|
||||||
|
|
||||||
if ret:
|
|
||||||
amulet.raise_status(amulet.FAIL, ret)
|
|
||||||
|
|
||||||
def connect_amqp_by_unit(self, sentry_unit, ssl=False,
|
|
||||||
port=None, fatal=True,
|
|
||||||
username="testuser1", password="changeme"):
|
|
||||||
"""Establish and return a pika amqp connection to the rabbitmq service
|
|
||||||
running on a rmq juju unit.
|
|
||||||
|
|
||||||
:param sentry_unit: sentry unit pointer
|
|
||||||
:param ssl: boolean, default to False
|
|
||||||
:param port: amqp port, use defaults if None
|
|
||||||
:param fatal: boolean, default to True (raises on connect error)
|
|
||||||
:param username: amqp user name, default to testuser1
|
|
||||||
:param password: amqp user password
|
|
||||||
:returns: pika amqp connection pointer or None if failed and non-fatal
|
|
||||||
"""
|
|
||||||
host = sentry_unit.info['public-address']
|
|
||||||
unit_name = sentry_unit.info['unit_name']
|
|
||||||
|
|
||||||
# Default port logic if port is not specified
|
|
||||||
if ssl and not port:
|
|
||||||
port = 5671
|
|
||||||
elif not ssl and not port:
|
|
||||||
port = 5672
|
|
||||||
|
|
||||||
self.log.debug('Connecting to amqp on {}:{} ({}) as '
|
|
||||||
'{}...'.format(host, port, unit_name, username))
|
|
||||||
|
|
||||||
try:
|
|
||||||
credentials = pika.PlainCredentials(username, password)
|
|
||||||
parameters = pika.ConnectionParameters(host=host, port=port,
|
|
||||||
credentials=credentials,
|
|
||||||
ssl=ssl,
|
|
||||||
connection_attempts=3,
|
|
||||||
retry_delay=5,
|
|
||||||
socket_timeout=1)
|
|
||||||
connection = pika.BlockingConnection(parameters)
|
|
||||||
assert connection.server_properties['product'] == 'RabbitMQ'
|
|
||||||
self.log.debug('Connect OK')
|
|
||||||
return connection
|
|
||||||
except Exception as e:
|
|
||||||
msg = ('amqp connection failed to {}:{} as '
|
|
||||||
'{} ({})'.format(host, port, username, str(e)))
|
|
||||||
if fatal:
|
|
||||||
amulet.raise_status(amulet.FAIL, msg)
|
|
||||||
else:
|
|
||||||
self.log.warn(msg)
|
|
||||||
return None
|
|
||||||
|
|
||||||
def publish_amqp_message_by_unit(self, sentry_unit, message,
|
|
||||||
queue="test", ssl=False,
|
|
||||||
username="testuser1",
|
|
||||||
password="changeme",
|
|
||||||
port=None):
|
|
||||||
"""Publish an amqp message to a rmq juju unit.
|
|
||||||
|
|
||||||
:param sentry_unit: sentry unit pointer
|
|
||||||
:param message: amqp message string
|
|
||||||
:param queue: message queue, default to test
|
|
||||||
:param username: amqp user name, default to testuser1
|
|
||||||
:param password: amqp user password
|
|
||||||
:param ssl: boolean, default to False
|
|
||||||
:param port: amqp port, use defaults if None
|
|
||||||
:returns: None. Raises exception if publish failed.
|
|
||||||
"""
|
|
||||||
self.log.debug('Publishing message to {} queue:\n{}'.format(queue,
|
|
||||||
message))
|
|
||||||
connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
|
|
||||||
port=port,
|
|
||||||
username=username,
|
|
||||||
password=password)
|
|
||||||
|
|
||||||
# NOTE(beisner): extra debug here re: pika hang potential:
|
|
||||||
# https://github.com/pika/pika/issues/297
|
|
||||||
# https://groups.google.com/forum/#!topic/rabbitmq-users/Ja0iyfF0Szw
|
|
||||||
self.log.debug('Defining channel...')
|
|
||||||
channel = connection.channel()
|
|
||||||
self.log.debug('Declaring queue...')
|
|
||||||
channel.queue_declare(queue=queue, auto_delete=False, durable=True)
|
|
||||||
self.log.debug('Publishing message...')
|
|
||||||
channel.basic_publish(exchange='', routing_key=queue, body=message)
|
|
||||||
self.log.debug('Closing channel...')
|
|
||||||
channel.close()
|
|
||||||
self.log.debug('Closing connection...')
|
|
||||||
connection.close()
|
|
||||||
|
|
||||||
def get_amqp_message_by_unit(self, sentry_unit, queue="test",
|
|
||||||
username="testuser1",
|
|
||||||
password="changeme",
|
|
||||||
ssl=False, port=None):
|
|
||||||
"""Get an amqp message from a rmq juju unit.
|
|
||||||
|
|
||||||
:param sentry_unit: sentry unit pointer
|
|
||||||
:param queue: message queue, default to test
|
|
||||||
:param username: amqp user name, default to testuser1
|
|
||||||
:param password: amqp user password
|
|
||||||
:param ssl: boolean, default to False
|
|
||||||
:param port: amqp port, use defaults if None
|
|
||||||
:returns: amqp message body as string. Raise if get fails.
|
|
||||||
"""
|
|
||||||
connection = self.connect_amqp_by_unit(sentry_unit, ssl=ssl,
|
|
||||||
port=port,
|
|
||||||
username=username,
|
|
||||||
password=password)
|
|
||||||
channel = connection.channel()
|
|
||||||
method_frame, _, body = channel.basic_get(queue)
|
|
||||||
|
|
||||||
if method_frame:
|
|
||||||
self.log.debug('Retreived message from {} queue:\n{}'.format(queue,
|
|
||||||
body))
|
|
||||||
channel.basic_ack(method_frame.delivery_tag)
|
|
||||||
channel.close()
|
|
||||||
connection.close()
|
|
||||||
return body
|
|
||||||
else:
|
|
||||||
msg = 'No message retrieved.'
|
|
||||||
amulet.raise_status(amulet.FAIL, msg)
|
|
File diff suppressed because it is too large
Load Diff
@ -1,18 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# dummy __init__.py to fool syncer into thinking this is a syncable python
|
|
||||||
# module
|
|
@ -1,32 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#--------------------------------------------
|
|
||||||
# This file is managed by Juju
|
|
||||||
#--------------------------------------------
|
|
||||||
#
|
|
||||||
# Copyright 2009,2012 Canonical Ltd.
|
|
||||||
# Author: Tom Haddon
|
|
||||||
|
|
||||||
CRITICAL=0
|
|
||||||
NOTACTIVE=''
|
|
||||||
LOGFILE=/var/log/nagios/check_haproxy.log
|
|
||||||
AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
|
|
||||||
|
|
||||||
for appserver in $(grep ' server' /etc/haproxy/haproxy.cfg | awk '{print $2'});
|
|
||||||
do
|
|
||||||
output=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 --regex="class=\"(active|backup)(2|3).*${appserver}" -e ' 200 OK')
|
|
||||||
if [ $? != 0 ]; then
|
|
||||||
date >> $LOGFILE
|
|
||||||
echo $output >> $LOGFILE
|
|
||||||
/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -v | grep $appserver >> $LOGFILE 2>&1
|
|
||||||
CRITICAL=1
|
|
||||||
NOTACTIVE="${NOTACTIVE} $appserver"
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
if [ $CRITICAL = 1 ]; then
|
|
||||||
echo "CRITICAL:${NOTACTIVE}"
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "OK: All haproxy instances looking good"
|
|
||||||
exit 0
|
|
@ -1,30 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
#--------------------------------------------
|
|
||||||
# This file is managed by Juju
|
|
||||||
#--------------------------------------------
|
|
||||||
#
|
|
||||||
# Copyright 2009,2012 Canonical Ltd.
|
|
||||||
# Author: Tom Haddon
|
|
||||||
|
|
||||||
# These should be config options at some stage
|
|
||||||
CURRQthrsh=0
|
|
||||||
MAXQthrsh=100
|
|
||||||
|
|
||||||
AUTH=$(grep -r "stats auth" /etc/haproxy | head -1 | awk '{print $4}')
|
|
||||||
|
|
||||||
HAPROXYSTATS=$(/usr/lib/nagios/plugins/check_http -a ${AUTH} -I 127.0.0.1 -p 8888 -u '/;csv' -v)
|
|
||||||
|
|
||||||
for BACKEND in $(echo $HAPROXYSTATS| xargs -n1 | grep BACKEND | awk -F , '{print $1}')
|
|
||||||
do
|
|
||||||
CURRQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 3)
|
|
||||||
MAXQ=$(echo "$HAPROXYSTATS" | grep $BACKEND | grep BACKEND | cut -d , -f 4)
|
|
||||||
|
|
||||||
if [[ $CURRQ -gt $CURRQthrsh || $MAXQ -gt $MAXQthrsh ]] ; then
|
|
||||||
echo "CRITICAL: queue depth for $BACKEND - CURRENT:$CURRQ MAX:$MAXQ"
|
|
||||||
exit 2
|
|
||||||
fi
|
|
||||||
done
|
|
||||||
|
|
||||||
echo "OK: All haproxy queue depths looking good"
|
|
||||||
exit 0
|
|
||||||
|
|
@ -1,151 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config,
|
|
||||||
unit_get,
|
|
||||||
service_name,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.network.ip import (
|
|
||||||
get_address_in_network,
|
|
||||||
is_address_in_network,
|
|
||||||
is_ipv6,
|
|
||||||
get_ipv6_addr,
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.hahelpers.cluster import is_clustered
|
|
||||||
|
|
||||||
PUBLIC = 'public'
|
|
||||||
INTERNAL = 'int'
|
|
||||||
ADMIN = 'admin'
|
|
||||||
|
|
||||||
ADDRESS_MAP = {
|
|
||||||
PUBLIC: {
|
|
||||||
'config': 'os-public-network',
|
|
||||||
'fallback': 'public-address',
|
|
||||||
'override': 'os-public-hostname',
|
|
||||||
},
|
|
||||||
INTERNAL: {
|
|
||||||
'config': 'os-internal-network',
|
|
||||||
'fallback': 'private-address',
|
|
||||||
'override': 'os-internal-hostname',
|
|
||||||
},
|
|
||||||
ADMIN: {
|
|
||||||
'config': 'os-admin-network',
|
|
||||||
'fallback': 'private-address',
|
|
||||||
'override': 'os-admin-hostname',
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def canonical_url(configs, endpoint_type=PUBLIC):
|
|
||||||
"""Returns the correct HTTP URL to this host given the state of HTTPS
|
|
||||||
configuration, hacluster and charm configuration.
|
|
||||||
|
|
||||||
:param configs: OSTemplateRenderer config templating object to inspect
|
|
||||||
for a complete https context.
|
|
||||||
:param endpoint_type: str endpoint type to resolve.
|
|
||||||
:param returns: str base URL for services on the current service unit.
|
|
||||||
"""
|
|
||||||
scheme = _get_scheme(configs)
|
|
||||||
|
|
||||||
address = resolve_address(endpoint_type)
|
|
||||||
if is_ipv6(address):
|
|
||||||
address = "[{}]".format(address)
|
|
||||||
|
|
||||||
return '%s://%s' % (scheme, address)
|
|
||||||
|
|
||||||
|
|
||||||
def _get_scheme(configs):
|
|
||||||
"""Returns the scheme to use for the url (either http or https)
|
|
||||||
depending upon whether https is in the configs value.
|
|
||||||
|
|
||||||
:param configs: OSTemplateRenderer config templating object to inspect
|
|
||||||
for a complete https context.
|
|
||||||
:returns: either 'http' or 'https' depending on whether https is
|
|
||||||
configured within the configs context.
|
|
||||||
"""
|
|
||||||
scheme = 'http'
|
|
||||||
if configs and 'https' in configs.complete_contexts():
|
|
||||||
scheme = 'https'
|
|
||||||
return scheme
|
|
||||||
|
|
||||||
|
|
||||||
def _get_address_override(endpoint_type=PUBLIC):
|
|
||||||
"""Returns any address overrides that the user has defined based on the
|
|
||||||
endpoint type.
|
|
||||||
|
|
||||||
Note: this function allows for the service name to be inserted into the
|
|
||||||
address if the user specifies {service_name}.somehost.org.
|
|
||||||
|
|
||||||
:param endpoint_type: the type of endpoint to retrieve the override
|
|
||||||
value for.
|
|
||||||
:returns: any endpoint address or hostname that the user has overridden
|
|
||||||
or None if an override is not present.
|
|
||||||
"""
|
|
||||||
override_key = ADDRESS_MAP[endpoint_type]['override']
|
|
||||||
addr_override = config(override_key)
|
|
||||||
if not addr_override:
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
return addr_override.format(service_name=service_name())
|
|
||||||
|
|
||||||
|
|
||||||
def resolve_address(endpoint_type=PUBLIC):
|
|
||||||
"""Return unit address depending on net config.
|
|
||||||
|
|
||||||
If unit is clustered with vip(s) and has net splits defined, return vip on
|
|
||||||
correct network. If clustered with no nets defined, return primary vip.
|
|
||||||
|
|
||||||
If not clustered, return unit address ensuring address is on configured net
|
|
||||||
split if one is configured.
|
|
||||||
|
|
||||||
:param endpoint_type: Network endpoing type
|
|
||||||
"""
|
|
||||||
resolved_address = _get_address_override(endpoint_type)
|
|
||||||
if resolved_address:
|
|
||||||
return resolved_address
|
|
||||||
|
|
||||||
vips = config('vip')
|
|
||||||
if vips:
|
|
||||||
vips = vips.split()
|
|
||||||
|
|
||||||
net_type = ADDRESS_MAP[endpoint_type]['config']
|
|
||||||
net_addr = config(net_type)
|
|
||||||
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
|
|
||||||
clustered = is_clustered()
|
|
||||||
if clustered:
|
|
||||||
if not net_addr:
|
|
||||||
# If no net-splits defined, we expect a single vip
|
|
||||||
resolved_address = vips[0]
|
|
||||||
else:
|
|
||||||
for vip in vips:
|
|
||||||
if is_address_in_network(net_addr, vip):
|
|
||||||
resolved_address = vip
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
if config('prefer-ipv6'):
|
|
||||||
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
|
|
||||||
else:
|
|
||||||
fallback_addr = unit_get(net_fallback)
|
|
||||||
|
|
||||||
resolved_address = get_address_in_network(net_addr, fallback_addr)
|
|
||||||
|
|
||||||
if resolved_address is None:
|
|
||||||
raise ValueError("Unable to resolve a suitable IP address based on "
|
|
||||||
"charm state and configuration. (net_type=%s, "
|
|
||||||
"clustered=%s)" % (net_type, clustered))
|
|
||||||
|
|
||||||
return resolved_address
|
|
@ -1,370 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# Various utilies for dealing with Neutron and the renaming from Quantum.
|
|
||||||
|
|
||||||
import six
|
|
||||||
from subprocess import check_output
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
config,
|
|
||||||
log,
|
|
||||||
ERROR,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.contrib.openstack.utils import os_release
|
|
||||||
|
|
||||||
|
|
||||||
def headers_package():
|
|
||||||
"""Ensures correct linux-headers for running kernel are installed,
|
|
||||||
for building DKMS package"""
|
|
||||||
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
|
|
||||||
return 'linux-headers-%s' % kver
|
|
||||||
|
|
||||||
QUANTUM_CONF_DIR = '/etc/quantum'
|
|
||||||
|
|
||||||
|
|
||||||
def kernel_version():
|
|
||||||
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """
|
|
||||||
kver = check_output(['uname', '-r']).decode('UTF-8').strip()
|
|
||||||
kver = kver.split('.')
|
|
||||||
return (int(kver[0]), int(kver[1]))
|
|
||||||
|
|
||||||
|
|
||||||
def determine_dkms_package():
|
|
||||||
""" Determine which DKMS package should be used based on kernel version """
|
|
||||||
# NOTE: 3.13 kernels have support for GRE and VXLAN native
|
|
||||||
if kernel_version() >= (3, 13):
|
|
||||||
return []
|
|
||||||
else:
|
|
||||||
return ['openvswitch-datapath-dkms']
|
|
||||||
|
|
||||||
|
|
||||||
# legacy
|
|
||||||
|
|
||||||
|
|
||||||
def quantum_plugins():
|
|
||||||
from charmhelpers.contrib.openstack import context
|
|
||||||
return {
|
|
||||||
'ovs': {
|
|
||||||
'config': '/etc/quantum/plugins/openvswitch/'
|
|
||||||
'ovs_quantum_plugin.ini',
|
|
||||||
'driver': 'quantum.plugins.openvswitch.ovs_quantum_plugin.'
|
|
||||||
'OVSQuantumPluginV2',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
|
||||||
database=config('neutron-database'),
|
|
||||||
relation_prefix='neutron',
|
|
||||||
ssl_dir=QUANTUM_CONF_DIR)],
|
|
||||||
'services': ['quantum-plugin-openvswitch-agent'],
|
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
|
||||||
['quantum-plugin-openvswitch-agent']],
|
|
||||||
'server_packages': ['quantum-server',
|
|
||||||
'quantum-plugin-openvswitch'],
|
|
||||||
'server_services': ['quantum-server']
|
|
||||||
},
|
|
||||||
'nvp': {
|
|
||||||
'config': '/etc/quantum/plugins/nicira/nvp.ini',
|
|
||||||
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
|
|
||||||
'QuantumPlugin.NvpPluginV2',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
|
||||||
database=config('neutron-database'),
|
|
||||||
relation_prefix='neutron',
|
|
||||||
ssl_dir=QUANTUM_CONF_DIR)],
|
|
||||||
'services': [],
|
|
||||||
'packages': [],
|
|
||||||
'server_packages': ['quantum-server',
|
|
||||||
'quantum-plugin-nicira'],
|
|
||||||
'server_services': ['quantum-server']
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
NEUTRON_CONF_DIR = '/etc/neutron'
|
|
||||||
|
|
||||||
|
|
||||||
def neutron_plugins():
|
|
||||||
from charmhelpers.contrib.openstack import context
|
|
||||||
release = os_release('nova-common')
|
|
||||||
plugins = {
|
|
||||||
'ovs': {
|
|
||||||
'config': '/etc/neutron/plugins/openvswitch/'
|
|
||||||
'ovs_neutron_plugin.ini',
|
|
||||||
'driver': 'neutron.plugins.openvswitch.ovs_neutron_plugin.'
|
|
||||||
'OVSNeutronPluginV2',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
|
||||||
database=config('neutron-database'),
|
|
||||||
relation_prefix='neutron',
|
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
|
||||||
'services': ['neutron-plugin-openvswitch-agent'],
|
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
|
||||||
['neutron-plugin-openvswitch-agent']],
|
|
||||||
'server_packages': ['neutron-server',
|
|
||||||
'neutron-plugin-openvswitch'],
|
|
||||||
'server_services': ['neutron-server']
|
|
||||||
},
|
|
||||||
'nvp': {
|
|
||||||
'config': '/etc/neutron/plugins/nicira/nvp.ini',
|
|
||||||
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
|
|
||||||
'NeutronPlugin.NvpPluginV2',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
|
||||||
database=config('neutron-database'),
|
|
||||||
relation_prefix='neutron',
|
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
|
||||||
'services': [],
|
|
||||||
'packages': [],
|
|
||||||
'server_packages': ['neutron-server',
|
|
||||||
'neutron-plugin-nicira'],
|
|
||||||
'server_services': ['neutron-server']
|
|
||||||
},
|
|
||||||
'nsx': {
|
|
||||||
'config': '/etc/neutron/plugins/vmware/nsx.ini',
|
|
||||||
'driver': 'vmware',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
|
||||||
database=config('neutron-database'),
|
|
||||||
relation_prefix='neutron',
|
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
|
||||||
'services': [],
|
|
||||||
'packages': [],
|
|
||||||
'server_packages': ['neutron-server',
|
|
||||||
'neutron-plugin-vmware'],
|
|
||||||
'server_services': ['neutron-server']
|
|
||||||
},
|
|
||||||
'n1kv': {
|
|
||||||
'config': '/etc/neutron/plugins/cisco/cisco_plugins.ini',
|
|
||||||
'driver': 'neutron.plugins.cisco.network_plugin.PluginV2',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
|
||||||
database=config('neutron-database'),
|
|
||||||
relation_prefix='neutron',
|
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
|
||||||
'services': [],
|
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
|
||||||
['neutron-plugin-cisco']],
|
|
||||||
'server_packages': ['neutron-server',
|
|
||||||
'neutron-plugin-cisco'],
|
|
||||||
'server_services': ['neutron-server']
|
|
||||||
},
|
|
||||||
'Calico': {
|
|
||||||
'config': '/etc/neutron/plugins/ml2/ml2_conf.ini',
|
|
||||||
'driver': 'neutron.plugins.ml2.plugin.Ml2Plugin',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
|
||||||
database=config('neutron-database'),
|
|
||||||
relation_prefix='neutron',
|
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
|
||||||
'services': ['calico-felix',
|
|
||||||
'bird',
|
|
||||||
'neutron-dhcp-agent',
|
|
||||||
'nova-api-metadata',
|
|
||||||
'etcd'],
|
|
||||||
'packages': [[headers_package()] + determine_dkms_package(),
|
|
||||||
['calico-compute',
|
|
||||||
'bird',
|
|
||||||
'neutron-dhcp-agent',
|
|
||||||
'nova-api-metadata',
|
|
||||||
'etcd']],
|
|
||||||
'server_packages': ['neutron-server', 'calico-control', 'etcd'],
|
|
||||||
'server_services': ['neutron-server', 'etcd']
|
|
||||||
},
|
|
||||||
'vsp': {
|
|
||||||
'config': '/etc/neutron/plugins/nuage/nuage_plugin.ini',
|
|
||||||
'driver': 'neutron.plugins.nuage.plugin.NuagePlugin',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
|
||||||
database=config('neutron-database'),
|
|
||||||
relation_prefix='neutron',
|
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
|
||||||
'services': [],
|
|
||||||
'packages': [],
|
|
||||||
'server_packages': ['neutron-server', 'neutron-plugin-nuage'],
|
|
||||||
'server_services': ['neutron-server']
|
|
||||||
},
|
|
||||||
'plumgrid': {
|
|
||||||
'config': '/etc/neutron/plugins/plumgrid/plumgrid.ini',
|
|
||||||
'driver': 'neutron.plugins.plumgrid.plumgrid_plugin.plumgrid_plugin.NeutronPluginPLUMgridV2',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('database-user'),
|
|
||||||
database=config('database'),
|
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
|
||||||
'services': [],
|
|
||||||
'packages': ['plumgrid-lxc',
|
|
||||||
'iovisor-dkms'],
|
|
||||||
'server_packages': ['neutron-server',
|
|
||||||
'neutron-plugin-plumgrid'],
|
|
||||||
'server_services': ['neutron-server']
|
|
||||||
},
|
|
||||||
'midonet': {
|
|
||||||
'config': '/etc/neutron/plugins/midonet/midonet.ini',
|
|
||||||
'driver': 'midonet.neutron.plugin.MidonetPluginV2',
|
|
||||||
'contexts': [
|
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
|
||||||
database=config('neutron-database'),
|
|
||||||
relation_prefix='neutron',
|
|
||||||
ssl_dir=NEUTRON_CONF_DIR)],
|
|
||||||
'services': [],
|
|
||||||
'packages': [[headers_package()] + determine_dkms_package()],
|
|
||||||
'server_packages': ['neutron-server',
|
|
||||||
'python-neutron-plugin-midonet'],
|
|
||||||
'server_services': ['neutron-server']
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if release >= 'icehouse':
|
|
||||||
# NOTE: patch in ml2 plugin for icehouse onwards
|
|
||||||
plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
|
|
||||||
plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
|
|
||||||
plugins['ovs']['server_packages'] = ['neutron-server',
|
|
||||||
'neutron-plugin-ml2']
|
|
||||||
# NOTE: patch in vmware renames nvp->nsx for icehouse onwards
|
|
||||||
plugins['nvp'] = plugins['nsx']
|
|
||||||
return plugins
|
|
||||||
|
|
||||||
|
|
||||||
def neutron_plugin_attribute(plugin, attr, net_manager=None):
|
|
||||||
manager = net_manager or network_manager()
|
|
||||||
if manager == 'quantum':
|
|
||||||
plugins = quantum_plugins()
|
|
||||||
elif manager == 'neutron':
|
|
||||||
plugins = neutron_plugins()
|
|
||||||
else:
|
|
||||||
log("Network manager '%s' does not support plugins." % (manager),
|
|
||||||
level=ERROR)
|
|
||||||
raise Exception
|
|
||||||
|
|
||||||
try:
|
|
||||||
_plugin = plugins[plugin]
|
|
||||||
except KeyError:
|
|
||||||
log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
|
|
||||||
raise Exception
|
|
||||||
|
|
||||||
try:
|
|
||||||
return _plugin[attr]
|
|
||||||
except KeyError:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def network_manager():
|
|
||||||
'''
|
|
||||||
Deals with the renaming of Quantum to Neutron in H and any situations
|
|
||||||
that require compatability (eg, deploying H with network-manager=quantum,
|
|
||||||
upgrading from G).
|
|
||||||
'''
|
|
||||||
release = os_release('nova-common')
|
|
||||||
manager = config('network-manager').lower()
|
|
||||||
|
|
||||||
if manager not in ['quantum', 'neutron']:
|
|
||||||
return manager
|
|
||||||
|
|
||||||
if release in ['essex']:
|
|
||||||
# E does not support neutron
|
|
||||||
log('Neutron networking not supported in Essex.', level=ERROR)
|
|
||||||
raise Exception
|
|
||||||
elif release in ['folsom', 'grizzly']:
|
|
||||||
# neutron is named quantum in F and G
|
|
||||||
return 'quantum'
|
|
||||||
else:
|
|
||||||
# ensure accurate naming for all releases post-H
|
|
||||||
return 'neutron'
|
|
||||||
|
|
||||||
|
|
||||||
def parse_mappings(mappings, key_rvalue=False):
|
|
||||||
"""By default mappings are lvalue keyed.
|
|
||||||
|
|
||||||
If key_rvalue is True, the mapping will be reversed to allow multiple
|
|
||||||
configs for the same lvalue.
|
|
||||||
"""
|
|
||||||
parsed = {}
|
|
||||||
if mappings:
|
|
||||||
mappings = mappings.split()
|
|
||||||
for m in mappings:
|
|
||||||
p = m.partition(':')
|
|
||||||
|
|
||||||
if key_rvalue:
|
|
||||||
key_index = 2
|
|
||||||
val_index = 0
|
|
||||||
# if there is no rvalue skip to next
|
|
||||||
if not p[1]:
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
key_index = 0
|
|
||||||
val_index = 2
|
|
||||||
|
|
||||||
key = p[key_index].strip()
|
|
||||||
parsed[key] = p[val_index].strip()
|
|
||||||
|
|
||||||
return parsed
|
|
||||||
|
|
||||||
|
|
||||||
def parse_bridge_mappings(mappings):
|
|
||||||
"""Parse bridge mappings.
|
|
||||||
|
|
||||||
Mappings must be a space-delimited list of provider:bridge mappings.
|
|
||||||
|
|
||||||
Returns dict of the form {provider:bridge}.
|
|
||||||
"""
|
|
||||||
return parse_mappings(mappings)
|
|
||||||
|
|
||||||
|
|
||||||
def parse_data_port_mappings(mappings, default_bridge='br-data'):
|
|
||||||
"""Parse data port mappings.
|
|
||||||
|
|
||||||
Mappings must be a space-delimited list of bridge:port.
|
|
||||||
|
|
||||||
Returns dict of the form {port:bridge} where ports may be mac addresses or
|
|
||||||
interface names.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# NOTE(dosaboy): we use rvalue for key to allow multiple values to be
|
|
||||||
# proposed for <port> since it may be a mac address which will differ
|
|
||||||
# across units this allowing first-known-good to be chosen.
|
|
||||||
_mappings = parse_mappings(mappings, key_rvalue=True)
|
|
||||||
if not _mappings or list(_mappings.values()) == ['']:
|
|
||||||
if not mappings:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
# For backwards-compatibility we need to support port-only provided in
|
|
||||||
# config.
|
|
||||||
_mappings = {mappings.split()[0]: default_bridge}
|
|
||||||
|
|
||||||
ports = _mappings.keys()
|
|
||||||
if len(set(ports)) != len(ports):
|
|
||||||
raise Exception("It is not allowed to have the same port configured "
|
|
||||||
"on more than one bridge")
|
|
||||||
|
|
||||||
return _mappings
|
|
||||||
|
|
||||||
|
|
||||||
def parse_vlan_range_mappings(mappings):
|
|
||||||
"""Parse vlan range mappings.
|
|
||||||
|
|
||||||
Mappings must be a space-delimited list of provider:start:end mappings.
|
|
||||||
|
|
||||||
The start:end range is optional and may be omitted.
|
|
||||||
|
|
||||||
Returns dict of the form {provider: (start, end)}.
|
|
||||||
"""
|
|
||||||
_mappings = parse_mappings(mappings)
|
|
||||||
if not _mappings:
|
|
||||||
return {}
|
|
||||||
|
|
||||||
mappings = {}
|
|
||||||
for p, r in six.iteritems(_mappings):
|
|
||||||
mappings[p] = tuple(r.split(':'))
|
|
||||||
|
|
||||||
return mappings
|
|
@ -1,18 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# dummy __init__.py to fool syncer into thinking this is a syncable python
|
|
||||||
# module
|
|
@ -1,21 +0,0 @@
|
|||||||
###############################################################################
|
|
||||||
# [ WARNING ]
|
|
||||||
# cinder configuration file maintained by Juju
|
|
||||||
# local changes may be overwritten.
|
|
||||||
###############################################################################
|
|
||||||
[global]
|
|
||||||
{% if auth -%}
|
|
||||||
auth_supported = {{ auth }}
|
|
||||||
keyring = /etc/ceph/$cluster.$name.keyring
|
|
||||||
mon host = {{ mon_hosts }}
|
|
||||||
{% endif -%}
|
|
||||||
log to syslog = {{ use_syslog }}
|
|
||||||
err to syslog = {{ use_syslog }}
|
|
||||||
clog to syslog = {{ use_syslog }}
|
|
||||||
|
|
||||||
[client]
|
|
||||||
{% if rbd_client_cache_settings -%}
|
|
||||||
{% for key, value in rbd_client_cache_settings.iteritems() -%}
|
|
||||||
{{ key }} = {{ value }}
|
|
||||||
{% endfor -%}
|
|
||||||
{%- endif %}
|
|
@ -1,17 +0,0 @@
|
|||||||
description "{{ service_description }}"
|
|
||||||
author "Juju {{ service_name }} Charm <juju@localhost>"
|
|
||||||
|
|
||||||
start on runlevel [2345]
|
|
||||||
stop on runlevel [!2345]
|
|
||||||
|
|
||||||
respawn
|
|
||||||
|
|
||||||
exec start-stop-daemon --start --chuid {{ user_name }} \
|
|
||||||
--chdir {{ start_dir }} --name {{ process_name }} \
|
|
||||||
--exec {{ executable_name }} -- \
|
|
||||||
{% for config_file in config_files -%}
|
|
||||||
--config-file={{ config_file }} \
|
|
||||||
{% endfor -%}
|
|
||||||
{% if log_file -%}
|
|
||||||
--log-file={{ log_file }}
|
|
||||||
{% endif -%}
|
|
@ -1,65 +0,0 @@
|
|||||||
global
|
|
||||||
log {{ local_host }} local0
|
|
||||||
log {{ local_host }} local1 notice
|
|
||||||
maxconn 20000
|
|
||||||
user haproxy
|
|
||||||
group haproxy
|
|
||||||
spread-checks 0
|
|
||||||
|
|
||||||
defaults
|
|
||||||
log global
|
|
||||||
mode tcp
|
|
||||||
option tcplog
|
|
||||||
option dontlognull
|
|
||||||
retries 3
|
|
||||||
{%- if haproxy_queue_timeout %}
|
|
||||||
timeout queue {{ haproxy_queue_timeout }}
|
|
||||||
{%- else %}
|
|
||||||
timeout queue 5000
|
|
||||||
{%- endif %}
|
|
||||||
{%- if haproxy_connect_timeout %}
|
|
||||||
timeout connect {{ haproxy_connect_timeout }}
|
|
||||||
{%- else %}
|
|
||||||
timeout connect 5000
|
|
||||||
{%- endif %}
|
|
||||||
{%- if haproxy_client_timeout %}
|
|
||||||
timeout client {{ haproxy_client_timeout }}
|
|
||||||
{%- else %}
|
|
||||||
timeout client 30000
|
|
||||||
{%- endif %}
|
|
||||||
{%- if haproxy_server_timeout %}
|
|
||||||
timeout server {{ haproxy_server_timeout }}
|
|
||||||
{%- else %}
|
|
||||||
timeout server 30000
|
|
||||||
{%- endif %}
|
|
||||||
|
|
||||||
listen stats {{ stat_port }}
|
|
||||||
mode http
|
|
||||||
stats enable
|
|
||||||
stats hide-version
|
|
||||||
stats realm Haproxy\ Statistics
|
|
||||||
stats uri /
|
|
||||||
stats auth admin:password
|
|
||||||
|
|
||||||
{% if frontends -%}
|
|
||||||
{% for service, ports in service_ports.items() -%}
|
|
||||||
frontend tcp-in_{{ service }}
|
|
||||||
bind *:{{ ports[0] }}
|
|
||||||
{% if ipv6 -%}
|
|
||||||
bind :::{{ ports[0] }}
|
|
||||||
{% endif -%}
|
|
||||||
{% for frontend in frontends -%}
|
|
||||||
acl net_{{ frontend }} dst {{ frontends[frontend]['network'] }}
|
|
||||||
use_backend {{ service }}_{{ frontend }} if net_{{ frontend }}
|
|
||||||
{% endfor -%}
|
|
||||||
default_backend {{ service }}_{{ default_backend }}
|
|
||||||
|
|
||||||
{% for frontend in frontends -%}
|
|
||||||
backend {{ service }}_{{ frontend }}
|
|
||||||
balance leastconn
|
|
||||||
{% for unit, address in frontends[frontend]['backends'].items() -%}
|
|
||||||
server {{ unit }} {{ address }}:{{ ports[1] }} check
|
|
||||||
{% endfor %}
|
|
||||||
{% endfor -%}
|
|
||||||
{% endfor -%}
|
|
||||||
{% endif -%}
|
|
@ -1,24 +0,0 @@
|
|||||||
{% if endpoints -%}
|
|
||||||
{% for ext_port in ext_ports -%}
|
|
||||||
Listen {{ ext_port }}
|
|
||||||
{% endfor -%}
|
|
||||||
{% for address, endpoint, ext, int in endpoints -%}
|
|
||||||
<VirtualHost {{ address }}:{{ ext }}>
|
|
||||||
ServerName {{ endpoint }}
|
|
||||||
SSLEngine on
|
|
||||||
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
|
|
||||||
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
|
|
||||||
ProxyPass / http://localhost:{{ int }}/
|
|
||||||
ProxyPassReverse / http://localhost:{{ int }}/
|
|
||||||
ProxyPreserveHost on
|
|
||||||
</VirtualHost>
|
|
||||||
{% endfor -%}
|
|
||||||
<Proxy *>
|
|
||||||
Order deny,allow
|
|
||||||
Allow from all
|
|
||||||
</Proxy>
|
|
||||||
<Location />
|
|
||||||
Order allow,deny
|
|
||||||
Allow from all
|
|
||||||
</Location>
|
|
||||||
{% endif -%}
|
|
@ -1 +0,0 @@
|
|||||||
openstack_https_frontend
|
|
@ -1,9 +0,0 @@
|
|||||||
{% if auth_host -%}
|
|
||||||
[keystone_authtoken]
|
|
||||||
identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }}
|
|
||||||
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
|
|
||||||
admin_tenant_name = {{ admin_tenant_name }}
|
|
||||||
admin_user = {{ admin_user }}
|
|
||||||
admin_password = {{ admin_password }}
|
|
||||||
signing_dir = {{ signing_dir }}
|
|
||||||
{% endif -%}
|
|
@ -1,22 +0,0 @@
|
|||||||
{% if rabbitmq_host or rabbitmq_hosts -%}
|
|
||||||
[oslo_messaging_rabbit]
|
|
||||||
rabbit_userid = {{ rabbitmq_user }}
|
|
||||||
rabbit_virtual_host = {{ rabbitmq_virtual_host }}
|
|
||||||
rabbit_password = {{ rabbitmq_password }}
|
|
||||||
{% if rabbitmq_hosts -%}
|
|
||||||
rabbit_hosts = {{ rabbitmq_hosts }}
|
|
||||||
{% if rabbitmq_ha_queues -%}
|
|
||||||
rabbit_ha_queues = True
|
|
||||||
rabbit_durable_queues = False
|
|
||||||
{% endif -%}
|
|
||||||
{% else -%}
|
|
||||||
rabbit_host = {{ rabbitmq_host }}
|
|
||||||
{% endif -%}
|
|
||||||
{% if rabbit_ssl_port -%}
|
|
||||||
rabbit_use_ssl = True
|
|
||||||
rabbit_port = {{ rabbit_ssl_port }}
|
|
||||||
{% if rabbit_ssl_ca -%}
|
|
||||||
kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
|
|
||||||
{% endif -%}
|
|
||||||
{% endif -%}
|
|
||||||
{% endif -%}
|
|
@ -1,14 +0,0 @@
|
|||||||
{% if zmq_host -%}
|
|
||||||
# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }})
|
|
||||||
rpc_backend = zmq
|
|
||||||
rpc_zmq_host = {{ zmq_host }}
|
|
||||||
{% if zmq_redis_address -%}
|
|
||||||
rpc_zmq_matchmaker = redis
|
|
||||||
matchmaker_heartbeat_freq = 15
|
|
||||||
matchmaker_heartbeat_ttl = 30
|
|
||||||
[matchmaker_redis]
|
|
||||||
host = {{ zmq_redis_address }}
|
|
||||||
{% else -%}
|
|
||||||
rpc_zmq_matchmaker = ring
|
|
||||||
{% endif -%}
|
|
||||||
{% endif -%}
|
|
@ -1,323 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
from charmhelpers.fetch import apt_install, apt_update
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
log,
|
|
||||||
ERROR,
|
|
||||||
INFO
|
|
||||||
)
|
|
||||||
from charmhelpers.contrib.openstack.utils import OPENSTACK_CODENAMES
|
|
||||||
|
|
||||||
try:
|
|
||||||
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
|
||||||
except ImportError:
|
|
||||||
apt_update(fatal=True)
|
|
||||||
apt_install('python-jinja2', fatal=True)
|
|
||||||
from jinja2 import FileSystemLoader, ChoiceLoader, Environment, exceptions
|
|
||||||
|
|
||||||
|
|
||||||
class OSConfigException(Exception):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def get_loader(templates_dir, os_release):
|
|
||||||
"""
|
|
||||||
Create a jinja2.ChoiceLoader containing template dirs up to
|
|
||||||
and including os_release. If directory template directory
|
|
||||||
is missing at templates_dir, it will be omitted from the loader.
|
|
||||||
templates_dir is added to the bottom of the search list as a base
|
|
||||||
loading dir.
|
|
||||||
|
|
||||||
A charm may also ship a templates dir with this module
|
|
||||||
and it will be appended to the bottom of the search list, eg::
|
|
||||||
|
|
||||||
hooks/charmhelpers/contrib/openstack/templates
|
|
||||||
|
|
||||||
:param templates_dir (str): Base template directory containing release
|
|
||||||
sub-directories.
|
|
||||||
:param os_release (str): OpenStack release codename to construct template
|
|
||||||
loader.
|
|
||||||
:returns: jinja2.ChoiceLoader constructed with a list of
|
|
||||||
jinja2.FilesystemLoaders, ordered in descending
|
|
||||||
order by OpenStack release.
|
|
||||||
"""
|
|
||||||
tmpl_dirs = [(rel, os.path.join(templates_dir, rel))
|
|
||||||
for rel in six.itervalues(OPENSTACK_CODENAMES)]
|
|
||||||
|
|
||||||
if not os.path.isdir(templates_dir):
|
|
||||||
log('Templates directory not found @ %s.' % templates_dir,
|
|
||||||
level=ERROR)
|
|
||||||
raise OSConfigException
|
|
||||||
|
|
||||||
# the bottom contains tempaltes_dir and possibly a common templates dir
|
|
||||||
# shipped with the helper.
|
|
||||||
loaders = [FileSystemLoader(templates_dir)]
|
|
||||||
helper_templates = os.path.join(os.path.dirname(__file__), 'templates')
|
|
||||||
if os.path.isdir(helper_templates):
|
|
||||||
loaders.append(FileSystemLoader(helper_templates))
|
|
||||||
|
|
||||||
for rel, tmpl_dir in tmpl_dirs:
|
|
||||||
if os.path.isdir(tmpl_dir):
|
|
||||||
loaders.insert(0, FileSystemLoader(tmpl_dir))
|
|
||||||
if rel == os_release:
|
|
||||||
break
|
|
||||||
log('Creating choice loader with dirs: %s' %
|
|
||||||
[l.searchpath for l in loaders], level=INFO)
|
|
||||||
return ChoiceLoader(loaders)
|
|
||||||
|
|
||||||
|
|
||||||
class OSConfigTemplate(object):
|
|
||||||
"""
|
|
||||||
Associates a config file template with a list of context generators.
|
|
||||||
Responsible for constructing a template context based on those generators.
|
|
||||||
"""
|
|
||||||
def __init__(self, config_file, contexts):
|
|
||||||
self.config_file = config_file
|
|
||||||
|
|
||||||
if hasattr(contexts, '__call__'):
|
|
||||||
self.contexts = [contexts]
|
|
||||||
else:
|
|
||||||
self.contexts = contexts
|
|
||||||
|
|
||||||
self._complete_contexts = []
|
|
||||||
|
|
||||||
def context(self):
|
|
||||||
ctxt = {}
|
|
||||||
for context in self.contexts:
|
|
||||||
_ctxt = context()
|
|
||||||
if _ctxt:
|
|
||||||
ctxt.update(_ctxt)
|
|
||||||
# track interfaces for every complete context.
|
|
||||||
[self._complete_contexts.append(interface)
|
|
||||||
for interface in context.interfaces
|
|
||||||
if interface not in self._complete_contexts]
|
|
||||||
return ctxt
|
|
||||||
|
|
||||||
def complete_contexts(self):
|
|
||||||
'''
|
|
||||||
Return a list of interfaces that have satisfied contexts.
|
|
||||||
'''
|
|
||||||
if self._complete_contexts:
|
|
||||||
return self._complete_contexts
|
|
||||||
self.context()
|
|
||||||
return self._complete_contexts
|
|
||||||
|
|
||||||
|
|
||||||
class OSConfigRenderer(object):
|
|
||||||
"""
|
|
||||||
This class provides a common templating system to be used by OpenStack
|
|
||||||
charms. It is intended to help charms share common code and templates,
|
|
||||||
and ease the burden of managing config templates across multiple OpenStack
|
|
||||||
releases.
|
|
||||||
|
|
||||||
Basic usage::
|
|
||||||
|
|
||||||
# import some common context generates from charmhelpers
|
|
||||||
from charmhelpers.contrib.openstack import context
|
|
||||||
|
|
||||||
# Create a renderer object for a specific OS release.
|
|
||||||
configs = OSConfigRenderer(templates_dir='/tmp/templates',
|
|
||||||
openstack_release='folsom')
|
|
||||||
# register some config files with context generators.
|
|
||||||
configs.register(config_file='/etc/nova/nova.conf',
|
|
||||||
contexts=[context.SharedDBContext(),
|
|
||||||
context.AMQPContext()])
|
|
||||||
configs.register(config_file='/etc/nova/api-paste.ini',
|
|
||||||
contexts=[context.IdentityServiceContext()])
|
|
||||||
configs.register(config_file='/etc/haproxy/haproxy.conf',
|
|
||||||
contexts=[context.HAProxyContext()])
|
|
||||||
# write out a single config
|
|
||||||
configs.write('/etc/nova/nova.conf')
|
|
||||||
# write out all registered configs
|
|
||||||
configs.write_all()
|
|
||||||
|
|
||||||
**OpenStack Releases and template loading**
|
|
||||||
|
|
||||||
When the object is instantiated, it is associated with a specific OS
|
|
||||||
release. This dictates how the template loader will be constructed.
|
|
||||||
|
|
||||||
The constructed loader attempts to load the template from several places
|
|
||||||
in the following order:
|
|
||||||
- from the most recent OS release-specific template dir (if one exists)
|
|
||||||
- the base templates_dir
|
|
||||||
- a template directory shipped in the charm with this helper file.
|
|
||||||
|
|
||||||
For the example above, '/tmp/templates' contains the following structure::
|
|
||||||
|
|
||||||
/tmp/templates/nova.conf
|
|
||||||
/tmp/templates/api-paste.ini
|
|
||||||
/tmp/templates/grizzly/api-paste.ini
|
|
||||||
/tmp/templates/havana/api-paste.ini
|
|
||||||
|
|
||||||
Since it was registered with the grizzly release, it first seraches
|
|
||||||
the grizzly directory for nova.conf, then the templates dir.
|
|
||||||
|
|
||||||
When writing api-paste.ini, it will find the template in the grizzly
|
|
||||||
directory.
|
|
||||||
|
|
||||||
If the object were created with folsom, it would fall back to the
|
|
||||||
base templates dir for its api-paste.ini template.
|
|
||||||
|
|
||||||
This system should help manage changes in config files through
|
|
||||||
openstack releases, allowing charms to fall back to the most recently
|
|
||||||
updated config template for a given release
|
|
||||||
|
|
||||||
The haproxy.conf, since it is not shipped in the templates dir, will
|
|
||||||
be loaded from the module directory's template directory, eg
|
|
||||||
$CHARM/hooks/charmhelpers/contrib/openstack/templates. This allows
|
|
||||||
us to ship common templates (haproxy, apache) with the helpers.
|
|
||||||
|
|
||||||
**Context generators**
|
|
||||||
|
|
||||||
Context generators are used to generate template contexts during hook
|
|
||||||
execution. Doing so may require inspecting service relations, charm
|
|
||||||
config, etc. When registered, a config file is associated with a list
|
|
||||||
of generators. When a template is rendered and written, all context
|
|
||||||
generates are called in a chain to generate the context dictionary
|
|
||||||
passed to the jinja2 template. See context.py for more info.
|
|
||||||
"""
|
|
||||||
def __init__(self, templates_dir, openstack_release):
|
|
||||||
if not os.path.isdir(templates_dir):
|
|
||||||
log('Could not locate templates dir %s' % templates_dir,
|
|
||||||
level=ERROR)
|
|
||||||
raise OSConfigException
|
|
||||||
|
|
||||||
self.templates_dir = templates_dir
|
|
||||||
self.openstack_release = openstack_release
|
|
||||||
self.templates = {}
|
|
||||||
self._tmpl_env = None
|
|
||||||
|
|
||||||
if None in [Environment, ChoiceLoader, FileSystemLoader]:
|
|
||||||
# if this code is running, the object is created pre-install hook.
|
|
||||||
# jinja2 shouldn't get touched until the module is reloaded on next
|
|
||||||
# hook execution, with proper jinja2 bits successfully imported.
|
|
||||||
apt_install('python-jinja2')
|
|
||||||
|
|
||||||
def register(self, config_file, contexts):
|
|
||||||
"""
|
|
||||||
Register a config file with a list of context generators to be called
|
|
||||||
during rendering.
|
|
||||||
"""
|
|
||||||
self.templates[config_file] = OSConfigTemplate(config_file=config_file,
|
|
||||||
contexts=contexts)
|
|
||||||
log('Registered config file: %s' % config_file, level=INFO)
|
|
||||||
|
|
||||||
def _get_tmpl_env(self):
|
|
||||||
if not self._tmpl_env:
|
|
||||||
loader = get_loader(self.templates_dir, self.openstack_release)
|
|
||||||
self._tmpl_env = Environment(loader=loader)
|
|
||||||
|
|
||||||
def _get_template(self, template):
|
|
||||||
self._get_tmpl_env()
|
|
||||||
template = self._tmpl_env.get_template(template)
|
|
||||||
log('Loaded template from %s' % template.filename, level=INFO)
|
|
||||||
return template
|
|
||||||
|
|
||||||
def render(self, config_file):
|
|
||||||
if config_file not in self.templates:
|
|
||||||
log('Config not registered: %s' % config_file, level=ERROR)
|
|
||||||
raise OSConfigException
|
|
||||||
ctxt = self.templates[config_file].context()
|
|
||||||
|
|
||||||
_tmpl = os.path.basename(config_file)
|
|
||||||
try:
|
|
||||||
template = self._get_template(_tmpl)
|
|
||||||
except exceptions.TemplateNotFound:
|
|
||||||
# if no template is found with basename, try looking for it
|
|
||||||
# using a munged full path, eg:
|
|
||||||
# /etc/apache2/apache2.conf -> etc_apache2_apache2.conf
|
|
||||||
_tmpl = '_'.join(config_file.split('/')[1:])
|
|
||||||
try:
|
|
||||||
template = self._get_template(_tmpl)
|
|
||||||
except exceptions.TemplateNotFound as e:
|
|
||||||
log('Could not load template from %s by %s or %s.' %
|
|
||||||
(self.templates_dir, os.path.basename(config_file), _tmpl),
|
|
||||||
level=ERROR)
|
|
||||||
raise e
|
|
||||||
|
|
||||||
log('Rendering from template: %s' % _tmpl, level=INFO)
|
|
||||||
return template.render(ctxt)
|
|
||||||
|
|
||||||
def write(self, config_file):
|
|
||||||
"""
|
|
||||||
Write a single config file, raises if config file is not registered.
|
|
||||||
"""
|
|
||||||
if config_file not in self.templates:
|
|
||||||
log('Config not registered: %s' % config_file, level=ERROR)
|
|
||||||
raise OSConfigException
|
|
||||||
|
|
||||||
_out = self.render(config_file)
|
|
||||||
|
|
||||||
with open(config_file, 'wb') as out:
|
|
||||||
out.write(_out)
|
|
||||||
|
|
||||||
log('Wrote template %s.' % config_file, level=INFO)
|
|
||||||
|
|
||||||
def write_all(self):
|
|
||||||
"""
|
|
||||||
Write out all registered config files.
|
|
||||||
"""
|
|
||||||
[self.write(k) for k in six.iterkeys(self.templates)]
|
|
||||||
|
|
||||||
def set_release(self, openstack_release):
|
|
||||||
"""
|
|
||||||
Resets the template environment and generates a new template loader
|
|
||||||
based on a the new openstack release.
|
|
||||||
"""
|
|
||||||
self._tmpl_env = None
|
|
||||||
self.openstack_release = openstack_release
|
|
||||||
self._get_tmpl_env()
|
|
||||||
|
|
||||||
def complete_contexts(self):
|
|
||||||
'''
|
|
||||||
Returns a list of context interfaces that yield a complete context.
|
|
||||||
'''
|
|
||||||
interfaces = []
|
|
||||||
[interfaces.extend(i.complete_contexts())
|
|
||||||
for i in six.itervalues(self.templates)]
|
|
||||||
return interfaces
|
|
||||||
|
|
||||||
def get_incomplete_context_data(self, interfaces):
|
|
||||||
'''
|
|
||||||
Return dictionary of relation status of interfaces and any missing
|
|
||||||
required context data. Example:
|
|
||||||
{'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
|
|
||||||
'zeromq-configuration': {'related': False}}
|
|
||||||
'''
|
|
||||||
incomplete_context_data = {}
|
|
||||||
|
|
||||||
for i in six.itervalues(self.templates):
|
|
||||||
for context in i.contexts:
|
|
||||||
for interface in interfaces:
|
|
||||||
related = False
|
|
||||||
if interface in context.interfaces:
|
|
||||||
related = context.get_related()
|
|
||||||
missing_data = context.missing_data
|
|
||||||
if missing_data:
|
|
||||||
incomplete_context_data[interface] = {'missing_data': missing_data}
|
|
||||||
if related:
|
|
||||||
if incomplete_context_data.get(interface):
|
|
||||||
incomplete_context_data[interface].update({'related': True})
|
|
||||||
else:
|
|
||||||
incomplete_context_data[interface] = {'related': True}
|
|
||||||
else:
|
|
||||||
incomplete_context_data[interface] = {'related': False}
|
|
||||||
return incomplete_context_data
|
|
@ -1,998 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
# Common python helper functions used for OpenStack charms.
|
|
||||||
from collections import OrderedDict
|
|
||||||
from functools import wraps
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import sys
|
|
||||||
import re
|
|
||||||
|
|
||||||
import six
|
|
||||||
import traceback
|
|
||||||
import uuid
|
|
||||||
import yaml
|
|
||||||
|
|
||||||
from charmhelpers.contrib.network import ip
|
|
||||||
|
|
||||||
from charmhelpers.core import (
|
|
||||||
unitdata,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
action_fail,
|
|
||||||
action_set,
|
|
||||||
config,
|
|
||||||
log as juju_log,
|
|
||||||
charm_dir,
|
|
||||||
INFO,
|
|
||||||
related_units,
|
|
||||||
relation_ids,
|
|
||||||
relation_set,
|
|
||||||
status_set,
|
|
||||||
hook_name
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.contrib.storage.linux.lvm import (
|
|
||||||
deactivate_lvm_volume_group,
|
|
||||||
is_lvm_physical_volume,
|
|
||||||
remove_lvm_physical_volume,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.contrib.network.ip import (
|
|
||||||
get_ipv6_addr,
|
|
||||||
is_ipv6,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.contrib.python.packages import (
|
|
||||||
pip_create_virtualenv,
|
|
||||||
pip_install,
|
|
||||||
)
|
|
||||||
|
|
||||||
from charmhelpers.core.host import lsb_release, mounts, umount
|
|
||||||
from charmhelpers.fetch import apt_install, apt_cache, install_remote
|
|
||||||
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
|
||||||
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
|
||||||
|
|
||||||
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
|
||||||
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
|
||||||
|
|
||||||
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
|
|
||||||
'restricted main multiverse universe')
|
|
||||||
|
|
||||||
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
|
||||||
('oneiric', 'diablo'),
|
|
||||||
('precise', 'essex'),
|
|
||||||
('quantal', 'folsom'),
|
|
||||||
('raring', 'grizzly'),
|
|
||||||
('saucy', 'havana'),
|
|
||||||
('trusty', 'icehouse'),
|
|
||||||
('utopic', 'juno'),
|
|
||||||
('vivid', 'kilo'),
|
|
||||||
('wily', 'liberty'),
|
|
||||||
])
|
|
||||||
|
|
||||||
|
|
||||||
OPENSTACK_CODENAMES = OrderedDict([
|
|
||||||
('2011.2', 'diablo'),
|
|
||||||
('2012.1', 'essex'),
|
|
||||||
('2012.2', 'folsom'),
|
|
||||||
('2013.1', 'grizzly'),
|
|
||||||
('2013.2', 'havana'),
|
|
||||||
('2014.1', 'icehouse'),
|
|
||||||
('2014.2', 'juno'),
|
|
||||||
('2015.1', 'kilo'),
|
|
||||||
('2015.2', 'liberty'),
|
|
||||||
])
|
|
||||||
|
|
||||||
# The ugly duckling
|
|
||||||
SWIFT_CODENAMES = OrderedDict([
|
|
||||||
('1.4.3', 'diablo'),
|
|
||||||
('1.4.8', 'essex'),
|
|
||||||
('1.7.4', 'folsom'),
|
|
||||||
('1.8.0', 'grizzly'),
|
|
||||||
('1.7.7', 'grizzly'),
|
|
||||||
('1.7.6', 'grizzly'),
|
|
||||||
('1.10.0', 'havana'),
|
|
||||||
('1.9.1', 'havana'),
|
|
||||||
('1.9.0', 'havana'),
|
|
||||||
('1.13.1', 'icehouse'),
|
|
||||||
('1.13.0', 'icehouse'),
|
|
||||||
('1.12.0', 'icehouse'),
|
|
||||||
('1.11.0', 'icehouse'),
|
|
||||||
('2.0.0', 'juno'),
|
|
||||||
('2.1.0', 'juno'),
|
|
||||||
('2.2.0', 'juno'),
|
|
||||||
('2.2.1', 'kilo'),
|
|
||||||
('2.2.2', 'kilo'),
|
|
||||||
('2.3.0', 'liberty'),
|
|
||||||
('2.4.0', 'liberty'),
|
|
||||||
('2.5.0', 'liberty'),
|
|
||||||
])
|
|
||||||
|
|
||||||
# >= Liberty version->codename mapping
|
|
||||||
PACKAGE_CODENAMES = {
|
|
||||||
'nova-common': OrderedDict([
|
|
||||||
('12.0.0', 'liberty'),
|
|
||||||
]),
|
|
||||||
'neutron-common': OrderedDict([
|
|
||||||
('7.0.0', 'liberty'),
|
|
||||||
]),
|
|
||||||
'cinder-common': OrderedDict([
|
|
||||||
('7.0.0', 'liberty'),
|
|
||||||
]),
|
|
||||||
'keystone': OrderedDict([
|
|
||||||
('8.0.0', 'liberty'),
|
|
||||||
]),
|
|
||||||
'horizon-common': OrderedDict([
|
|
||||||
('8.0.0', 'liberty'),
|
|
||||||
]),
|
|
||||||
'ceilometer-common': OrderedDict([
|
|
||||||
('5.0.0', 'liberty'),
|
|
||||||
]),
|
|
||||||
'heat-common': OrderedDict([
|
|
||||||
('5.0.0', 'liberty'),
|
|
||||||
]),
|
|
||||||
'glance-common': OrderedDict([
|
|
||||||
('11.0.0', 'liberty'),
|
|
||||||
]),
|
|
||||||
'openstack-dashboard': OrderedDict([
|
|
||||||
('8.0.0', 'liberty'),
|
|
||||||
]),
|
|
||||||
}
|
|
||||||
|
|
||||||
DEFAULT_LOOPBACK_SIZE = '5G'
|
|
||||||
|
|
||||||
|
|
||||||
def error_out(msg):
|
|
||||||
juju_log("FATAL ERROR: %s" % msg, level='ERROR')
|
|
||||||
sys.exit(1)
|
|
||||||
|
|
||||||
|
|
||||||
def get_os_codename_install_source(src):
|
|
||||||
'''Derive OpenStack release codename from a given installation source.'''
|
|
||||||
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
|
||||||
rel = ''
|
|
||||||
if src is None:
|
|
||||||
return rel
|
|
||||||
if src in ['distro', 'distro-proposed']:
|
|
||||||
try:
|
|
||||||
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
|
|
||||||
except KeyError:
|
|
||||||
e = 'Could not derive openstack release for '\
|
|
||||||
'this Ubuntu release: %s' % ubuntu_rel
|
|
||||||
error_out(e)
|
|
||||||
return rel
|
|
||||||
|
|
||||||
if src.startswith('cloud:'):
|
|
||||||
ca_rel = src.split(':')[1]
|
|
||||||
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
|
|
||||||
return ca_rel
|
|
||||||
|
|
||||||
# Best guess match based on deb string provided
|
|
||||||
if src.startswith('deb') or src.startswith('ppa'):
|
|
||||||
for k, v in six.iteritems(OPENSTACK_CODENAMES):
|
|
||||||
if v in src:
|
|
||||||
return v
|
|
||||||
|
|
||||||
|
|
||||||
def get_os_version_install_source(src):
|
|
||||||
codename = get_os_codename_install_source(src)
|
|
||||||
return get_os_version_codename(codename)
|
|
||||||
|
|
||||||
|
|
||||||
def get_os_codename_version(vers):
|
|
||||||
'''Determine OpenStack codename from version number.'''
|
|
||||||
try:
|
|
||||||
return OPENSTACK_CODENAMES[vers]
|
|
||||||
except KeyError:
|
|
||||||
e = 'Could not determine OpenStack codename for version %s' % vers
|
|
||||||
error_out(e)
|
|
||||||
|
|
||||||
|
|
||||||
def get_os_version_codename(codename, version_map=OPENSTACK_CODENAMES):
|
|
||||||
'''Determine OpenStack version number from codename.'''
|
|
||||||
for k, v in six.iteritems(version_map):
|
|
||||||
if v == codename:
|
|
||||||
return k
|
|
||||||
e = 'Could not derive OpenStack version for '\
|
|
||||||
'codename: %s' % codename
|
|
||||||
error_out(e)
|
|
||||||
|
|
||||||
|
|
||||||
def get_os_codename_package(package, fatal=True):
|
|
||||||
'''Derive OpenStack release codename from an installed package.'''
|
|
||||||
import apt_pkg as apt
|
|
||||||
|
|
||||||
cache = apt_cache()
|
|
||||||
|
|
||||||
try:
|
|
||||||
pkg = cache[package]
|
|
||||||
except:
|
|
||||||
if not fatal:
|
|
||||||
return None
|
|
||||||
# the package is unknown to the current apt cache.
|
|
||||||
e = 'Could not determine version of package with no installation '\
|
|
||||||
'candidate: %s' % package
|
|
||||||
error_out(e)
|
|
||||||
|
|
||||||
if not pkg.current_ver:
|
|
||||||
if not fatal:
|
|
||||||
return None
|
|
||||||
# package is known, but no version is currently installed.
|
|
||||||
e = 'Could not determine version of uninstalled package: %s' % package
|
|
||||||
error_out(e)
|
|
||||||
|
|
||||||
vers = apt.upstream_version(pkg.current_ver.ver_str)
|
|
||||||
match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
|
|
||||||
if match:
|
|
||||||
vers = match.group(0)
|
|
||||||
|
|
||||||
# >= Liberty independent project versions
|
|
||||||
if (package in PACKAGE_CODENAMES and
|
|
||||||
vers in PACKAGE_CODENAMES[package]):
|
|
||||||
return PACKAGE_CODENAMES[package][vers]
|
|
||||||
else:
|
|
||||||
# < Liberty co-ordinated project versions
|
|
||||||
try:
|
|
||||||
if 'swift' in pkg.name:
|
|
||||||
swift_vers = vers[:5]
|
|
||||||
if swift_vers not in SWIFT_CODENAMES:
|
|
||||||
# Deal with 1.10.0 upward
|
|
||||||
swift_vers = vers[:6]
|
|
||||||
return SWIFT_CODENAMES[swift_vers]
|
|
||||||
else:
|
|
||||||
vers = vers[:6]
|
|
||||||
return OPENSTACK_CODENAMES[vers]
|
|
||||||
except KeyError:
|
|
||||||
if not fatal:
|
|
||||||
return None
|
|
||||||
e = 'Could not determine OpenStack codename for version %s' % vers
|
|
||||||
error_out(e)
|
|
||||||
|
|
||||||
|
|
||||||
def get_os_version_package(pkg, fatal=True):
|
|
||||||
'''Derive OpenStack version number from an installed package.'''
|
|
||||||
codename = get_os_codename_package(pkg, fatal=fatal)
|
|
||||||
|
|
||||||
if not codename:
|
|
||||||
return None
|
|
||||||
|
|
||||||
if 'swift' in pkg:
|
|
||||||
vers_map = SWIFT_CODENAMES
|
|
||||||
else:
|
|
||||||
vers_map = OPENSTACK_CODENAMES
|
|
||||||
|
|
||||||
for version, cname in six.iteritems(vers_map):
|
|
||||||
if cname == codename:
|
|
||||||
return version
|
|
||||||
# e = "Could not determine OpenStack version for package: %s" % pkg
|
|
||||||
# error_out(e)
|
|
||||||
|
|
||||||
|
|
||||||
os_rel = None
|
|
||||||
|
|
||||||
|
|
||||||
def os_release(package, base='essex'):
|
|
||||||
'''
|
|
||||||
Returns OpenStack release codename from a cached global.
|
|
||||||
If the codename can not be determined from either an installed package or
|
|
||||||
the installation source, the earliest release supported by the charm should
|
|
||||||
be returned.
|
|
||||||
'''
|
|
||||||
global os_rel
|
|
||||||
if os_rel:
|
|
||||||
return os_rel
|
|
||||||
os_rel = (get_os_codename_package(package, fatal=False) or
|
|
||||||
get_os_codename_install_source(config('openstack-origin')) or
|
|
||||||
base)
|
|
||||||
return os_rel
|
|
||||||
|
|
||||||
|
|
||||||
def import_key(keyid):
|
|
||||||
cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
|
|
||||||
"--recv-keys %s" % keyid
|
|
||||||
try:
|
|
||||||
subprocess.check_call(cmd.split(' '))
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
error_out("Error importing repo key %s" % keyid)
|
|
||||||
|
|
||||||
|
|
||||||
def configure_installation_source(rel):
|
|
||||||
'''Configure apt installation source.'''
|
|
||||||
if rel == 'distro':
|
|
||||||
return
|
|
||||||
elif rel == 'distro-proposed':
|
|
||||||
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
|
||||||
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
|
||||||
f.write(DISTRO_PROPOSED % ubuntu_rel)
|
|
||||||
elif rel[:4] == "ppa:":
|
|
||||||
src = rel
|
|
||||||
subprocess.check_call(["add-apt-repository", "-y", src])
|
|
||||||
elif rel[:3] == "deb":
|
|
||||||
l = len(rel.split('|'))
|
|
||||||
if l == 2:
|
|
||||||
src, key = rel.split('|')
|
|
||||||
juju_log("Importing PPA key from keyserver for %s" % src)
|
|
||||||
import_key(key)
|
|
||||||
elif l == 1:
|
|
||||||
src = rel
|
|
||||||
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
|
||||||
f.write(src)
|
|
||||||
elif rel[:6] == 'cloud:':
|
|
||||||
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
|
||||||
rel = rel.split(':')[1]
|
|
||||||
u_rel = rel.split('-')[0]
|
|
||||||
ca_rel = rel.split('-')[1]
|
|
||||||
|
|
||||||
if u_rel != ubuntu_rel:
|
|
||||||
e = 'Cannot install from Cloud Archive pocket %s on this Ubuntu '\
|
|
||||||
'version (%s)' % (ca_rel, ubuntu_rel)
|
|
||||||
error_out(e)
|
|
||||||
|
|
||||||
if 'staging' in ca_rel:
|
|
||||||
# staging is just a regular PPA.
|
|
||||||
os_rel = ca_rel.split('/')[0]
|
|
||||||
ppa = 'ppa:ubuntu-cloud-archive/%s-staging' % os_rel
|
|
||||||
cmd = 'add-apt-repository -y %s' % ppa
|
|
||||||
subprocess.check_call(cmd.split(' '))
|
|
||||||
return
|
|
||||||
|
|
||||||
# map charm config options to actual archive pockets.
|
|
||||||
pockets = {
|
|
||||||
'folsom': 'precise-updates/folsom',
|
|
||||||
'folsom/updates': 'precise-updates/folsom',
|
|
||||||
'folsom/proposed': 'precise-proposed/folsom',
|
|
||||||
'grizzly': 'precise-updates/grizzly',
|
|
||||||
'grizzly/updates': 'precise-updates/grizzly',
|
|
||||||
'grizzly/proposed': 'precise-proposed/grizzly',
|
|
||||||
'havana': 'precise-updates/havana',
|
|
||||||
'havana/updates': 'precise-updates/havana',
|
|
||||||
'havana/proposed': 'precise-proposed/havana',
|
|
||||||
'icehouse': 'precise-updates/icehouse',
|
|
||||||
'icehouse/updates': 'precise-updates/icehouse',
|
|
||||||
'icehouse/proposed': 'precise-proposed/icehouse',
|
|
||||||
'juno': 'trusty-updates/juno',
|
|
||||||
'juno/updates': 'trusty-updates/juno',
|
|
||||||
'juno/proposed': 'trusty-proposed/juno',
|
|
||||||
'kilo': 'trusty-updates/kilo',
|
|
||||||
'kilo/updates': 'trusty-updates/kilo',
|
|
||||||
'kilo/proposed': 'trusty-proposed/kilo',
|
|
||||||
'liberty': 'trusty-updates/liberty',
|
|
||||||
'liberty/updates': 'trusty-updates/liberty',
|
|
||||||
'liberty/proposed': 'trusty-proposed/liberty',
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
|
||||||
pocket = pockets[ca_rel]
|
|
||||||
except KeyError:
|
|
||||||
e = 'Invalid Cloud Archive release specified: %s' % rel
|
|
||||||
error_out(e)
|
|
||||||
|
|
||||||
src = "deb %s %s main" % (CLOUD_ARCHIVE_URL, pocket)
|
|
||||||
apt_install('ubuntu-cloud-keyring', fatal=True)
|
|
||||||
|
|
||||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as f:
|
|
||||||
f.write(src)
|
|
||||||
else:
|
|
||||||
error_out("Invalid openstack-release specified: %s" % rel)
|
|
||||||
|
|
||||||
|
|
||||||
def config_value_changed(option):
|
|
||||||
"""
|
|
||||||
Determine if config value changed since last call to this function.
|
|
||||||
"""
|
|
||||||
hook_data = unitdata.HookData()
|
|
||||||
with hook_data():
|
|
||||||
db = unitdata.kv()
|
|
||||||
current = config(option)
|
|
||||||
saved = db.get(option)
|
|
||||||
db.set(option, current)
|
|
||||||
if saved is None:
|
|
||||||
return False
|
|
||||||
return current != saved
|
|
||||||
|
|
||||||
|
|
||||||
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
|
|
||||||
"""
|
|
||||||
Write an rc file in the charm-delivered directory containing
|
|
||||||
exported environment variables provided by env_vars. Any charm scripts run
|
|
||||||
outside the juju hook environment can source this scriptrc to obtain
|
|
||||||
updated config information necessary to perform health checks or
|
|
||||||
service changes.
|
|
||||||
"""
|
|
||||||
juju_rc_path = "%s/%s" % (charm_dir(), script_path)
|
|
||||||
if not os.path.exists(os.path.dirname(juju_rc_path)):
|
|
||||||
os.mkdir(os.path.dirname(juju_rc_path))
|
|
||||||
with open(juju_rc_path, 'wb') as rc_script:
|
|
||||||
rc_script.write(
|
|
||||||
"#!/bin/bash\n")
|
|
||||||
[rc_script.write('export %s=%s\n' % (u, p))
|
|
||||||
for u, p in six.iteritems(env_vars) if u != "script_path"]
|
|
||||||
|
|
||||||
|
|
||||||
def openstack_upgrade_available(package):
|
|
||||||
"""
|
|
||||||
Determines if an OpenStack upgrade is available from installation
|
|
||||||
source, based on version of installed package.
|
|
||||||
|
|
||||||
:param package: str: Name of installed package.
|
|
||||||
|
|
||||||
:returns: bool: : Returns True if configured installation source offers
|
|
||||||
a newer version of package.
|
|
||||||
|
|
||||||
"""
|
|
||||||
|
|
||||||
import apt_pkg as apt
|
|
||||||
src = config('openstack-origin')
|
|
||||||
cur_vers = get_os_version_package(package)
|
|
||||||
if "swift" in package:
|
|
||||||
codename = get_os_codename_install_source(src)
|
|
||||||
available_vers = get_os_version_codename(codename, SWIFT_CODENAMES)
|
|
||||||
else:
|
|
||||||
available_vers = get_os_version_install_source(src)
|
|
||||||
apt.init()
|
|
||||||
return apt.version_compare(available_vers, cur_vers) == 1
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_block_device(block_device):
|
|
||||||
'''
|
|
||||||
Confirm block_device, create as loopback if necessary.
|
|
||||||
|
|
||||||
:param block_device: str: Full path of block device to ensure.
|
|
||||||
|
|
||||||
:returns: str: Full path of ensured block device.
|
|
||||||
'''
|
|
||||||
_none = ['None', 'none', None]
|
|
||||||
if (block_device in _none):
|
|
||||||
error_out('prepare_storage(): Missing required input: block_device=%s.'
|
|
||||||
% block_device)
|
|
||||||
|
|
||||||
if block_device.startswith('/dev/'):
|
|
||||||
bdev = block_device
|
|
||||||
elif block_device.startswith('/'):
|
|
||||||
_bd = block_device.split('|')
|
|
||||||
if len(_bd) == 2:
|
|
||||||
bdev, size = _bd
|
|
||||||
else:
|
|
||||||
bdev = block_device
|
|
||||||
size = DEFAULT_LOOPBACK_SIZE
|
|
||||||
bdev = ensure_loopback_device(bdev, size)
|
|
||||||
else:
|
|
||||||
bdev = '/dev/%s' % block_device
|
|
||||||
|
|
||||||
if not is_block_device(bdev):
|
|
||||||
error_out('Failed to locate valid block device at %s' % bdev)
|
|
||||||
|
|
||||||
return bdev
|
|
||||||
|
|
||||||
|
|
||||||
def clean_storage(block_device):
|
|
||||||
'''
|
|
||||||
Ensures a block device is clean. That is:
|
|
||||||
- unmounted
|
|
||||||
- any lvm volume groups are deactivated
|
|
||||||
- any lvm physical device signatures removed
|
|
||||||
- partition table wiped
|
|
||||||
|
|
||||||
:param block_device: str: Full path to block device to clean.
|
|
||||||
'''
|
|
||||||
for mp, d in mounts():
|
|
||||||
if d == block_device:
|
|
||||||
juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
|
|
||||||
(d, mp), level=INFO)
|
|
||||||
umount(mp, persist=True)
|
|
||||||
|
|
||||||
if is_lvm_physical_volume(block_device):
|
|
||||||
deactivate_lvm_volume_group(block_device)
|
|
||||||
remove_lvm_physical_volume(block_device)
|
|
||||||
else:
|
|
||||||
zap_disk(block_device)
|
|
||||||
|
|
||||||
is_ip = ip.is_ip
|
|
||||||
ns_query = ip.ns_query
|
|
||||||
get_host_ip = ip.get_host_ip
|
|
||||||
get_hostname = ip.get_hostname
|
|
||||||
|
|
||||||
|
|
||||||
def get_matchmaker_map(mm_file='/etc/oslo/matchmaker_ring.json'):
|
|
||||||
mm_map = {}
|
|
||||||
if os.path.isfile(mm_file):
|
|
||||||
with open(mm_file, 'r') as f:
|
|
||||||
mm_map = json.load(f)
|
|
||||||
return mm_map
|
|
||||||
|
|
||||||
|
|
||||||
def sync_db_with_multi_ipv6_addresses(database, database_user,
|
|
||||||
relation_prefix=None):
|
|
||||||
hosts = get_ipv6_addr(dynamic_only=False)
|
|
||||||
|
|
||||||
if config('vip'):
|
|
||||||
vips = config('vip').split()
|
|
||||||
for vip in vips:
|
|
||||||
if vip and is_ipv6(vip):
|
|
||||||
hosts.append(vip)
|
|
||||||
|
|
||||||
kwargs = {'database': database,
|
|
||||||
'username': database_user,
|
|
||||||
'hostname': json.dumps(hosts)}
|
|
||||||
|
|
||||||
if relation_prefix:
|
|
||||||
for key in list(kwargs.keys()):
|
|
||||||
kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
|
|
||||||
del kwargs[key]
|
|
||||||
|
|
||||||
for rid in relation_ids('shared-db'):
|
|
||||||
relation_set(relation_id=rid, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def os_requires_version(ostack_release, pkg):
|
|
||||||
"""
|
|
||||||
Decorator for hook to specify minimum supported release
|
|
||||||
"""
|
|
||||||
def wrap(f):
|
|
||||||
@wraps(f)
|
|
||||||
def wrapped_f(*args):
|
|
||||||
if os_release(pkg) < ostack_release:
|
|
||||||
raise Exception("This hook is not supported on releases"
|
|
||||||
" before %s" % ostack_release)
|
|
||||||
f(*args)
|
|
||||||
return wrapped_f
|
|
||||||
return wrap
|
|
||||||
|
|
||||||
|
|
||||||
def git_install_requested():
|
|
||||||
"""
|
|
||||||
Returns true if openstack-origin-git is specified.
|
|
||||||
"""
|
|
||||||
return config('openstack-origin-git') is not None
|
|
||||||
|
|
||||||
|
|
||||||
requirements_dir = None
|
|
||||||
|
|
||||||
|
|
||||||
def _git_yaml_load(projects_yaml):
|
|
||||||
"""
|
|
||||||
Load the specified yaml into a dictionary.
|
|
||||||
"""
|
|
||||||
if not projects_yaml:
|
|
||||||
return None
|
|
||||||
|
|
||||||
return yaml.load(projects_yaml)
|
|
||||||
|
|
||||||
|
|
||||||
def git_clone_and_install(projects_yaml, core_project, depth=1):
|
|
||||||
"""
|
|
||||||
Clone/install all specified OpenStack repositories.
|
|
||||||
|
|
||||||
The expected format of projects_yaml is:
|
|
||||||
|
|
||||||
repositories:
|
|
||||||
- {name: keystone,
|
|
||||||
repository: 'git://git.openstack.org/openstack/keystone.git',
|
|
||||||
branch: 'stable/icehouse'}
|
|
||||||
- {name: requirements,
|
|
||||||
repository: 'git://git.openstack.org/openstack/requirements.git',
|
|
||||||
branch: 'stable/icehouse'}
|
|
||||||
|
|
||||||
directory: /mnt/openstack-git
|
|
||||||
http_proxy: squid-proxy-url
|
|
||||||
https_proxy: squid-proxy-url
|
|
||||||
|
|
||||||
The directory, http_proxy, and https_proxy keys are optional.
|
|
||||||
|
|
||||||
"""
|
|
||||||
global requirements_dir
|
|
||||||
parent_dir = '/mnt/openstack-git'
|
|
||||||
http_proxy = None
|
|
||||||
|
|
||||||
projects = _git_yaml_load(projects_yaml)
|
|
||||||
_git_validate_projects_yaml(projects, core_project)
|
|
||||||
|
|
||||||
old_environ = dict(os.environ)
|
|
||||||
|
|
||||||
if 'http_proxy' in projects.keys():
|
|
||||||
http_proxy = projects['http_proxy']
|
|
||||||
os.environ['http_proxy'] = projects['http_proxy']
|
|
||||||
if 'https_proxy' in projects.keys():
|
|
||||||
os.environ['https_proxy'] = projects['https_proxy']
|
|
||||||
|
|
||||||
if 'directory' in projects.keys():
|
|
||||||
parent_dir = projects['directory']
|
|
||||||
|
|
||||||
pip_create_virtualenv(os.path.join(parent_dir, 'venv'))
|
|
||||||
|
|
||||||
# Upgrade setuptools and pip from default virtualenv versions. The default
|
|
||||||
# versions in trusty break master OpenStack branch deployments.
|
|
||||||
for p in ['pip', 'setuptools']:
|
|
||||||
pip_install(p, upgrade=True, proxy=http_proxy,
|
|
||||||
venv=os.path.join(parent_dir, 'venv'))
|
|
||||||
|
|
||||||
for p in projects['repositories']:
|
|
||||||
repo = p['repository']
|
|
||||||
branch = p['branch']
|
|
||||||
if p['name'] == 'requirements':
|
|
||||||
repo_dir = _git_clone_and_install_single(repo, branch, depth,
|
|
||||||
parent_dir, http_proxy,
|
|
||||||
update_requirements=False)
|
|
||||||
requirements_dir = repo_dir
|
|
||||||
else:
|
|
||||||
repo_dir = _git_clone_and_install_single(repo, branch, depth,
|
|
||||||
parent_dir, http_proxy,
|
|
||||||
update_requirements=True)
|
|
||||||
|
|
||||||
os.environ = old_environ
|
|
||||||
|
|
||||||
|
|
||||||
def _git_validate_projects_yaml(projects, core_project):
|
|
||||||
"""
|
|
||||||
Validate the projects yaml.
|
|
||||||
"""
|
|
||||||
_git_ensure_key_exists('repositories', projects)
|
|
||||||
|
|
||||||
for project in projects['repositories']:
|
|
||||||
_git_ensure_key_exists('name', project.keys())
|
|
||||||
_git_ensure_key_exists('repository', project.keys())
|
|
||||||
_git_ensure_key_exists('branch', project.keys())
|
|
||||||
|
|
||||||
if projects['repositories'][0]['name'] != 'requirements':
|
|
||||||
error_out('{} git repo must be specified first'.format('requirements'))
|
|
||||||
|
|
||||||
if projects['repositories'][-1]['name'] != core_project:
|
|
||||||
error_out('{} git repo must be specified last'.format(core_project))
|
|
||||||
|
|
||||||
|
|
||||||
def _git_ensure_key_exists(key, keys):
|
|
||||||
"""
|
|
||||||
Ensure that key exists in keys.
|
|
||||||
"""
|
|
||||||
if key not in keys:
|
|
||||||
error_out('openstack-origin-git key \'{}\' is missing'.format(key))
|
|
||||||
|
|
||||||
|
|
||||||
def _git_clone_and_install_single(repo, branch, depth, parent_dir, http_proxy,
|
|
||||||
update_requirements):
|
|
||||||
"""
|
|
||||||
Clone and install a single git repository.
|
|
||||||
"""
|
|
||||||
dest_dir = os.path.join(parent_dir, os.path.basename(repo))
|
|
||||||
|
|
||||||
if not os.path.exists(parent_dir):
|
|
||||||
juju_log('Directory already exists at {}. '
|
|
||||||
'No need to create directory.'.format(parent_dir))
|
|
||||||
os.mkdir(parent_dir)
|
|
||||||
|
|
||||||
if not os.path.exists(dest_dir):
|
|
||||||
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
|
|
||||||
repo_dir = install_remote(repo, dest=parent_dir, branch=branch,
|
|
||||||
depth=depth)
|
|
||||||
else:
|
|
||||||
repo_dir = dest_dir
|
|
||||||
|
|
||||||
venv = os.path.join(parent_dir, 'venv')
|
|
||||||
|
|
||||||
if update_requirements:
|
|
||||||
if not requirements_dir:
|
|
||||||
error_out('requirements repo must be cloned before '
|
|
||||||
'updating from global requirements.')
|
|
||||||
_git_update_requirements(venv, repo_dir, requirements_dir)
|
|
||||||
|
|
||||||
juju_log('Installing git repo from dir: {}'.format(repo_dir))
|
|
||||||
if http_proxy:
|
|
||||||
pip_install(repo_dir, proxy=http_proxy, venv=venv)
|
|
||||||
else:
|
|
||||||
pip_install(repo_dir, venv=venv)
|
|
||||||
|
|
||||||
return repo_dir
|
|
||||||
|
|
||||||
|
|
||||||
def _git_update_requirements(venv, package_dir, reqs_dir):
|
|
||||||
"""
|
|
||||||
Update from global requirements.
|
|
||||||
|
|
||||||
Update an OpenStack git directory's requirements.txt and
|
|
||||||
test-requirements.txt from global-requirements.txt.
|
|
||||||
"""
|
|
||||||
orig_dir = os.getcwd()
|
|
||||||
os.chdir(reqs_dir)
|
|
||||||
python = os.path.join(venv, 'bin/python')
|
|
||||||
cmd = [python, 'update.py', package_dir]
|
|
||||||
try:
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
except subprocess.CalledProcessError:
|
|
||||||
package = os.path.basename(package_dir)
|
|
||||||
error_out("Error updating {} from "
|
|
||||||
"global-requirements.txt".format(package))
|
|
||||||
os.chdir(orig_dir)
|
|
||||||
|
|
||||||
|
|
||||||
def git_pip_venv_dir(projects_yaml):
|
|
||||||
"""
|
|
||||||
Return the pip virtualenv path.
|
|
||||||
"""
|
|
||||||
parent_dir = '/mnt/openstack-git'
|
|
||||||
|
|
||||||
projects = _git_yaml_load(projects_yaml)
|
|
||||||
|
|
||||||
if 'directory' in projects.keys():
|
|
||||||
parent_dir = projects['directory']
|
|
||||||
|
|
||||||
return os.path.join(parent_dir, 'venv')
|
|
||||||
|
|
||||||
|
|
||||||
def git_src_dir(projects_yaml, project):
|
|
||||||
"""
|
|
||||||
Return the directory where the specified project's source is located.
|
|
||||||
"""
|
|
||||||
parent_dir = '/mnt/openstack-git'
|
|
||||||
|
|
||||||
projects = _git_yaml_load(projects_yaml)
|
|
||||||
|
|
||||||
if 'directory' in projects.keys():
|
|
||||||
parent_dir = projects['directory']
|
|
||||||
|
|
||||||
for p in projects['repositories']:
|
|
||||||
if p['name'] == project:
|
|
||||||
return os.path.join(parent_dir, os.path.basename(p['repository']))
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def git_yaml_value(projects_yaml, key):
|
|
||||||
"""
|
|
||||||
Return the value in projects_yaml for the specified key.
|
|
||||||
"""
|
|
||||||
projects = _git_yaml_load(projects_yaml)
|
|
||||||
|
|
||||||
if key in projects.keys():
|
|
||||||
return projects[key]
|
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def os_workload_status(configs, required_interfaces, charm_func=None):
|
|
||||||
"""
|
|
||||||
Decorator to set workload status based on complete contexts
|
|
||||||
"""
|
|
||||||
def wrap(f):
|
|
||||||
@wraps(f)
|
|
||||||
def wrapped_f(*args, **kwargs):
|
|
||||||
# Run the original function first
|
|
||||||
f(*args, **kwargs)
|
|
||||||
# Set workload status now that contexts have been
|
|
||||||
# acted on
|
|
||||||
set_os_workload_status(configs, required_interfaces, charm_func)
|
|
||||||
return wrapped_f
|
|
||||||
return wrap
|
|
||||||
|
|
||||||
|
|
||||||
def set_os_workload_status(configs, required_interfaces, charm_func=None):
|
|
||||||
"""
|
|
||||||
Set workload status based on complete contexts.
|
|
||||||
status-set missing or incomplete contexts
|
|
||||||
and juju-log details of missing required data.
|
|
||||||
charm_func is a charm specific function to run checking
|
|
||||||
for charm specific requirements such as a VIP setting.
|
|
||||||
"""
|
|
||||||
incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
|
|
||||||
state = 'active'
|
|
||||||
missing_relations = []
|
|
||||||
incomplete_relations = []
|
|
||||||
message = None
|
|
||||||
charm_state = None
|
|
||||||
charm_message = None
|
|
||||||
|
|
||||||
for generic_interface in incomplete_rel_data.keys():
|
|
||||||
related_interface = None
|
|
||||||
missing_data = {}
|
|
||||||
# Related or not?
|
|
||||||
for interface in incomplete_rel_data[generic_interface]:
|
|
||||||
if incomplete_rel_data[generic_interface][interface].get('related'):
|
|
||||||
related_interface = interface
|
|
||||||
missing_data = incomplete_rel_data[generic_interface][interface].get('missing_data')
|
|
||||||
# No relation ID for the generic_interface
|
|
||||||
if not related_interface:
|
|
||||||
juju_log("{} relation is missing and must be related for "
|
|
||||||
"functionality. ".format(generic_interface), 'WARN')
|
|
||||||
state = 'blocked'
|
|
||||||
if generic_interface not in missing_relations:
|
|
||||||
missing_relations.append(generic_interface)
|
|
||||||
else:
|
|
||||||
# Relation ID exists but no related unit
|
|
||||||
if not missing_data:
|
|
||||||
# Edge case relation ID exists but departing
|
|
||||||
if ('departed' in hook_name() or 'broken' in hook_name()) \
|
|
||||||
and related_interface in hook_name():
|
|
||||||
state = 'blocked'
|
|
||||||
if generic_interface not in missing_relations:
|
|
||||||
missing_relations.append(generic_interface)
|
|
||||||
juju_log("{} relation's interface, {}, "
|
|
||||||
"relationship is departed or broken "
|
|
||||||
"and is required for functionality."
|
|
||||||
"".format(generic_interface, related_interface), "WARN")
|
|
||||||
# Normal case relation ID exists but no related unit
|
|
||||||
# (joining)
|
|
||||||
else:
|
|
||||||
juju_log("{} relations's interface, {}, is related but has "
|
|
||||||
"no units in the relation."
|
|
||||||
"".format(generic_interface, related_interface), "INFO")
|
|
||||||
# Related unit exists and data missing on the relation
|
|
||||||
else:
|
|
||||||
juju_log("{} relation's interface, {}, is related awaiting "
|
|
||||||
"the following data from the relationship: {}. "
|
|
||||||
"".format(generic_interface, related_interface,
|
|
||||||
", ".join(missing_data)), "INFO")
|
|
||||||
if state != 'blocked':
|
|
||||||
state = 'waiting'
|
|
||||||
if generic_interface not in incomplete_relations \
|
|
||||||
and generic_interface not in missing_relations:
|
|
||||||
incomplete_relations.append(generic_interface)
|
|
||||||
|
|
||||||
if missing_relations:
|
|
||||||
message = "Missing relations: {}".format(", ".join(missing_relations))
|
|
||||||
if incomplete_relations:
|
|
||||||
message += "; incomplete relations: {}" \
|
|
||||||
"".format(", ".join(incomplete_relations))
|
|
||||||
state = 'blocked'
|
|
||||||
elif incomplete_relations:
|
|
||||||
message = "Incomplete relations: {}" \
|
|
||||||
"".format(", ".join(incomplete_relations))
|
|
||||||
state = 'waiting'
|
|
||||||
|
|
||||||
# Run charm specific checks
|
|
||||||
if charm_func:
|
|
||||||
charm_state, charm_message = charm_func(configs)
|
|
||||||
if charm_state != 'active' and charm_state != 'unknown':
|
|
||||||
state = workload_state_compare(state, charm_state)
|
|
||||||
if message:
|
|
||||||
charm_message = charm_message.replace("Incomplete relations: ",
|
|
||||||
"")
|
|
||||||
message = "{}, {}".format(message, charm_message)
|
|
||||||
else:
|
|
||||||
message = charm_message
|
|
||||||
|
|
||||||
# Set to active if all requirements have been met
|
|
||||||
if state == 'active':
|
|
||||||
message = "Unit is ready"
|
|
||||||
juju_log(message, "INFO")
|
|
||||||
|
|
||||||
status_set(state, message)
|
|
||||||
|
|
||||||
|
|
||||||
def workload_state_compare(current_workload_state, workload_state):
|
|
||||||
""" Return highest priority of two states"""
|
|
||||||
hierarchy = {'unknown': -1,
|
|
||||||
'active': 0,
|
|
||||||
'maintenance': 1,
|
|
||||||
'waiting': 2,
|
|
||||||
'blocked': 3,
|
|
||||||
}
|
|
||||||
|
|
||||||
if hierarchy.get(workload_state) is None:
|
|
||||||
workload_state = 'unknown'
|
|
||||||
if hierarchy.get(current_workload_state) is None:
|
|
||||||
current_workload_state = 'unknown'
|
|
||||||
|
|
||||||
# Set workload_state based on hierarchy of statuses
|
|
||||||
if hierarchy.get(current_workload_state) > hierarchy.get(workload_state):
|
|
||||||
return current_workload_state
|
|
||||||
else:
|
|
||||||
return workload_state
|
|
||||||
|
|
||||||
|
|
||||||
def incomplete_relation_data(configs, required_interfaces):
|
|
||||||
"""
|
|
||||||
Check complete contexts against required_interfaces
|
|
||||||
Return dictionary of incomplete relation data.
|
|
||||||
|
|
||||||
configs is an OSConfigRenderer object with configs registered
|
|
||||||
|
|
||||||
required_interfaces is a dictionary of required general interfaces
|
|
||||||
with dictionary values of possible specific interfaces.
|
|
||||||
Example:
|
|
||||||
required_interfaces = {'database': ['shared-db', 'pgsql-db']}
|
|
||||||
|
|
||||||
The interface is said to be satisfied if anyone of the interfaces in the
|
|
||||||
list has a complete context.
|
|
||||||
|
|
||||||
Return dictionary of incomplete or missing required contexts with relation
|
|
||||||
status of interfaces and any missing data points. Example:
|
|
||||||
{'message':
|
|
||||||
{'amqp': {'missing_data': ['rabbitmq_password'], 'related': True},
|
|
||||||
'zeromq-configuration': {'related': False}},
|
|
||||||
'identity':
|
|
||||||
{'identity-service': {'related': False}},
|
|
||||||
'database':
|
|
||||||
{'pgsql-db': {'related': False},
|
|
||||||
'shared-db': {'related': True}}}
|
|
||||||
"""
|
|
||||||
complete_ctxts = configs.complete_contexts()
|
|
||||||
incomplete_relations = []
|
|
||||||
for svc_type in required_interfaces.keys():
|
|
||||||
# Avoid duplicates
|
|
||||||
found_ctxt = False
|
|
||||||
for interface in required_interfaces[svc_type]:
|
|
||||||
if interface in complete_ctxts:
|
|
||||||
found_ctxt = True
|
|
||||||
if not found_ctxt:
|
|
||||||
incomplete_relations.append(svc_type)
|
|
||||||
incomplete_context_data = {}
|
|
||||||
for i in incomplete_relations:
|
|
||||||
incomplete_context_data[i] = configs.get_incomplete_context_data(required_interfaces[i])
|
|
||||||
return incomplete_context_data
|
|
||||||
|
|
||||||
|
|
||||||
def do_action_openstack_upgrade(package, upgrade_callback, configs):
|
|
||||||
"""Perform action-managed OpenStack upgrade.
|
|
||||||
|
|
||||||
Upgrades packages to the configured openstack-origin version and sets
|
|
||||||
the corresponding action status as a result.
|
|
||||||
|
|
||||||
If the charm was installed from source we cannot upgrade it.
|
|
||||||
For backwards compatibility a config flag (action-managed-upgrade) must
|
|
||||||
be set for this code to run, otherwise a full service level upgrade will
|
|
||||||
fire on config-changed.
|
|
||||||
|
|
||||||
@param package: package name for determining if upgrade available
|
|
||||||
@param upgrade_callback: function callback to charm's upgrade function
|
|
||||||
@param configs: templating object derived from OSConfigRenderer class
|
|
||||||
|
|
||||||
@return: True if upgrade successful; False if upgrade failed or skipped
|
|
||||||
"""
|
|
||||||
ret = False
|
|
||||||
|
|
||||||
if git_install_requested():
|
|
||||||
action_set({'outcome': 'installed from source, skipped upgrade.'})
|
|
||||||
else:
|
|
||||||
if openstack_upgrade_available(package):
|
|
||||||
if config('action-managed-upgrade'):
|
|
||||||
juju_log('Upgrading OpenStack release')
|
|
||||||
|
|
||||||
try:
|
|
||||||
upgrade_callback(configs=configs)
|
|
||||||
action_set({'outcome': 'success, upgrade completed.'})
|
|
||||||
ret = True
|
|
||||||
except:
|
|
||||||
action_set({'outcome': 'upgrade failed, see traceback.'})
|
|
||||||
action_set({'traceback': traceback.format_exc()})
|
|
||||||
action_fail('do_openstack_upgrade resulted in an '
|
|
||||||
'unexpected error')
|
|
||||||
else:
|
|
||||||
action_set({'outcome': 'action-managed-upgrade config is '
|
|
||||||
'False, skipped upgrade.'})
|
|
||||||
else:
|
|
||||||
action_set({'outcome': 'no upgrade available.'})
|
|
||||||
|
|
||||||
return ret
|
|
||||||
|
|
||||||
|
|
||||||
def remote_restart(rel_name, remote_service=None):
|
|
||||||
trigger = {
|
|
||||||
'restart-trigger': str(uuid.uuid4()),
|
|
||||||
}
|
|
||||||
if remote_service:
|
|
||||||
trigger['remote-service'] = remote_service
|
|
||||||
for rid in relation_ids(rel_name):
|
|
||||||
# This subordinate can be related to two seperate services using
|
|
||||||
# different subordinate relations so only issue the restart if
|
|
||||||
# the principle is conencted down the relation we think it is
|
|
||||||
if related_units(relid=rid):
|
|
||||||
relation_set(relation_id=rid,
|
|
||||||
relation_settings=trigger,
|
|
||||||
)
|
|
@ -1,269 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import json
|
|
||||||
import six
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import relation_id as current_relation_id
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
is_relation_made,
|
|
||||||
relation_ids,
|
|
||||||
relation_get as _relation_get,
|
|
||||||
local_unit,
|
|
||||||
relation_set as _relation_set,
|
|
||||||
leader_get as _leader_get,
|
|
||||||
leader_set,
|
|
||||||
is_leader,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
"""
|
|
||||||
This helper provides functions to support use of a peer relation
|
|
||||||
for basic key/value storage, with the added benefit that all storage
|
|
||||||
can be replicated across peer units.
|
|
||||||
|
|
||||||
Requirement to use:
|
|
||||||
|
|
||||||
To use this, the "peer_echo()" method has to be called form the peer
|
|
||||||
relation's relation-changed hook:
|
|
||||||
|
|
||||||
@hooks.hook("cluster-relation-changed") # Adapt the to your peer relation name
|
|
||||||
def cluster_relation_changed():
|
|
||||||
peer_echo()
|
|
||||||
|
|
||||||
Once this is done, you can use peer storage from anywhere:
|
|
||||||
|
|
||||||
@hooks.hook("some-hook")
|
|
||||||
def some_hook():
|
|
||||||
# You can store and retrieve key/values this way:
|
|
||||||
if is_relation_made("cluster"): # from charmhelpers.core.hookenv
|
|
||||||
# There are peers available so we can work with peer storage
|
|
||||||
peer_store("mykey", "myvalue")
|
|
||||||
value = peer_retrieve("mykey")
|
|
||||||
print value
|
|
||||||
else:
|
|
||||||
print "No peers joind the relation, cannot share key/values :("
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def leader_get(attribute=None, rid=None):
|
|
||||||
"""Wrapper to ensure that settings are migrated from the peer relation.
|
|
||||||
|
|
||||||
This is to support upgrading an environment that does not support
|
|
||||||
Juju leadership election to one that does.
|
|
||||||
|
|
||||||
If a setting is not extant in the leader-get but is on the relation-get
|
|
||||||
peer rel, it is migrated and marked as such so that it is not re-migrated.
|
|
||||||
"""
|
|
||||||
migration_key = '__leader_get_migrated_settings__'
|
|
||||||
if not is_leader():
|
|
||||||
return _leader_get(attribute=attribute)
|
|
||||||
|
|
||||||
settings_migrated = False
|
|
||||||
leader_settings = _leader_get(attribute=attribute)
|
|
||||||
previously_migrated = _leader_get(attribute=migration_key)
|
|
||||||
|
|
||||||
if previously_migrated:
|
|
||||||
migrated = set(json.loads(previously_migrated))
|
|
||||||
else:
|
|
||||||
migrated = set([])
|
|
||||||
|
|
||||||
try:
|
|
||||||
if migration_key in leader_settings:
|
|
||||||
del leader_settings[migration_key]
|
|
||||||
except TypeError:
|
|
||||||
pass
|
|
||||||
|
|
||||||
if attribute:
|
|
||||||
if attribute in migrated:
|
|
||||||
return leader_settings
|
|
||||||
|
|
||||||
# If attribute not present in leader db, check if this unit has set
|
|
||||||
# the attribute in the peer relation
|
|
||||||
if not leader_settings:
|
|
||||||
peer_setting = _relation_get(attribute=attribute, unit=local_unit(),
|
|
||||||
rid=rid)
|
|
||||||
if peer_setting:
|
|
||||||
leader_set(settings={attribute: peer_setting})
|
|
||||||
leader_settings = peer_setting
|
|
||||||
|
|
||||||
if leader_settings:
|
|
||||||
settings_migrated = True
|
|
||||||
migrated.add(attribute)
|
|
||||||
else:
|
|
||||||
r_settings = _relation_get(unit=local_unit(), rid=rid)
|
|
||||||
if r_settings:
|
|
||||||
for key in set(r_settings.keys()).difference(migrated):
|
|
||||||
# Leader setting wins
|
|
||||||
if not leader_settings.get(key):
|
|
||||||
leader_settings[key] = r_settings[key]
|
|
||||||
|
|
||||||
settings_migrated = True
|
|
||||||
migrated.add(key)
|
|
||||||
|
|
||||||
if settings_migrated:
|
|
||||||
leader_set(**leader_settings)
|
|
||||||
|
|
||||||
if migrated and settings_migrated:
|
|
||||||
migrated = json.dumps(list(migrated))
|
|
||||||
leader_set(settings={migration_key: migrated})
|
|
||||||
|
|
||||||
return leader_settings
|
|
||||||
|
|
||||||
|
|
||||||
def relation_set(relation_id=None, relation_settings=None, **kwargs):
|
|
||||||
"""Attempt to use leader-set if supported in the current version of Juju,
|
|
||||||
otherwise falls back on relation-set.
|
|
||||||
|
|
||||||
Note that we only attempt to use leader-set if the provided relation_id is
|
|
||||||
a peer relation id or no relation id is provided (in which case we assume
|
|
||||||
we are within the peer relation context).
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if relation_id in relation_ids('cluster'):
|
|
||||||
return leader_set(settings=relation_settings, **kwargs)
|
|
||||||
else:
|
|
||||||
raise NotImplementedError
|
|
||||||
except NotImplementedError:
|
|
||||||
return _relation_set(relation_id=relation_id,
|
|
||||||
relation_settings=relation_settings, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
def relation_get(attribute=None, unit=None, rid=None):
|
|
||||||
"""Attempt to use leader-get if supported in the current version of Juju,
|
|
||||||
otherwise falls back on relation-get.
|
|
||||||
|
|
||||||
Note that we only attempt to use leader-get if the provided rid is a peer
|
|
||||||
relation id or no relation id is provided (in which case we assume we are
|
|
||||||
within the peer relation context).
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
if rid in relation_ids('cluster'):
|
|
||||||
return leader_get(attribute, rid)
|
|
||||||
else:
|
|
||||||
raise NotImplementedError
|
|
||||||
except NotImplementedError:
|
|
||||||
return _relation_get(attribute=attribute, rid=rid, unit=unit)
|
|
||||||
|
|
||||||
|
|
||||||
def peer_retrieve(key, relation_name='cluster'):
|
|
||||||
"""Retrieve a named key from peer relation `relation_name`."""
|
|
||||||
cluster_rels = relation_ids(relation_name)
|
|
||||||
if len(cluster_rels) > 0:
|
|
||||||
cluster_rid = cluster_rels[0]
|
|
||||||
return relation_get(attribute=key, rid=cluster_rid,
|
|
||||||
unit=local_unit())
|
|
||||||
else:
|
|
||||||
raise ValueError('Unable to detect'
|
|
||||||
'peer relation {}'.format(relation_name))
|
|
||||||
|
|
||||||
|
|
||||||
def peer_retrieve_by_prefix(prefix, relation_name='cluster', delimiter='_',
|
|
||||||
inc_list=None, exc_list=None):
|
|
||||||
""" Retrieve k/v pairs given a prefix and filter using {inc,exc}_list """
|
|
||||||
inc_list = inc_list if inc_list else []
|
|
||||||
exc_list = exc_list if exc_list else []
|
|
||||||
peerdb_settings = peer_retrieve('-', relation_name=relation_name)
|
|
||||||
matched = {}
|
|
||||||
if peerdb_settings is None:
|
|
||||||
return matched
|
|
||||||
for k, v in peerdb_settings.items():
|
|
||||||
full_prefix = prefix + delimiter
|
|
||||||
if k.startswith(full_prefix):
|
|
||||||
new_key = k.replace(full_prefix, '')
|
|
||||||
if new_key in exc_list:
|
|
||||||
continue
|
|
||||||
if new_key in inc_list or len(inc_list) == 0:
|
|
||||||
matched[new_key] = v
|
|
||||||
return matched
|
|
||||||
|
|
||||||
|
|
||||||
def peer_store(key, value, relation_name='cluster'):
|
|
||||||
"""Store the key/value pair on the named peer relation `relation_name`."""
|
|
||||||
cluster_rels = relation_ids(relation_name)
|
|
||||||
if len(cluster_rels) > 0:
|
|
||||||
cluster_rid = cluster_rels[0]
|
|
||||||
relation_set(relation_id=cluster_rid,
|
|
||||||
relation_settings={key: value})
|
|
||||||
else:
|
|
||||||
raise ValueError('Unable to detect '
|
|
||||||
'peer relation {}'.format(relation_name))
|
|
||||||
|
|
||||||
|
|
||||||
def peer_echo(includes=None, force=False):
|
|
||||||
"""Echo filtered attributes back onto the same relation for storage.
|
|
||||||
|
|
||||||
This is a requirement to use the peerstorage module - it needs to be called
|
|
||||||
from the peer relation's changed hook.
|
|
||||||
|
|
||||||
If Juju leader support exists this will be a noop unless force is True.
|
|
||||||
"""
|
|
||||||
try:
|
|
||||||
is_leader()
|
|
||||||
except NotImplementedError:
|
|
||||||
pass
|
|
||||||
else:
|
|
||||||
if not force:
|
|
||||||
return # NOOP if leader-election is supported
|
|
||||||
|
|
||||||
# Use original non-leader calls
|
|
||||||
relation_get = _relation_get
|
|
||||||
relation_set = _relation_set
|
|
||||||
|
|
||||||
rdata = relation_get()
|
|
||||||
echo_data = {}
|
|
||||||
if includes is None:
|
|
||||||
echo_data = rdata.copy()
|
|
||||||
for ex in ['private-address', 'public-address']:
|
|
||||||
if ex in echo_data:
|
|
||||||
echo_data.pop(ex)
|
|
||||||
else:
|
|
||||||
for attribute, value in six.iteritems(rdata):
|
|
||||||
for include in includes:
|
|
||||||
if include in attribute:
|
|
||||||
echo_data[attribute] = value
|
|
||||||
if len(echo_data) > 0:
|
|
||||||
relation_set(relation_settings=echo_data)
|
|
||||||
|
|
||||||
|
|
||||||
def peer_store_and_set(relation_id=None, peer_relation_name='cluster',
|
|
||||||
peer_store_fatal=False, relation_settings=None,
|
|
||||||
delimiter='_', **kwargs):
|
|
||||||
"""Store passed-in arguments both in argument relation and in peer storage.
|
|
||||||
|
|
||||||
It functions like doing relation_set() and peer_store() at the same time,
|
|
||||||
with the same data.
|
|
||||||
|
|
||||||
@param relation_id: the id of the relation to store the data on. Defaults
|
|
||||||
to the current relation.
|
|
||||||
@param peer_store_fatal: Set to True, the function will raise an exception
|
|
||||||
should the peer sotrage not be avialable."""
|
|
||||||
|
|
||||||
relation_settings = relation_settings if relation_settings else {}
|
|
||||||
relation_set(relation_id=relation_id,
|
|
||||||
relation_settings=relation_settings,
|
|
||||||
**kwargs)
|
|
||||||
if is_relation_made(peer_relation_name):
|
|
||||||
for key, value in six.iteritems(dict(list(kwargs.items()) +
|
|
||||||
list(relation_settings.items()))):
|
|
||||||
key_prefix = relation_id or current_relation_id()
|
|
||||||
peer_store(key_prefix + delimiter + key,
|
|
||||||
value,
|
|
||||||
relation_name=peer_relation_name)
|
|
||||||
else:
|
|
||||||
if peer_store_fatal:
|
|
||||||
raise ValueError('Unable to detect '
|
|
||||||
'peer relation {}'.format(peer_relation_name))
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1,56 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# coding: utf-8
|
|
||||||
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
|
|
||||||
import atexit
|
|
||||||
import sys
|
|
||||||
|
|
||||||
from charmhelpers.contrib.python.rpdb import Rpdb
|
|
||||||
from charmhelpers.core.hookenv import (
|
|
||||||
open_port,
|
|
||||||
close_port,
|
|
||||||
ERROR,
|
|
||||||
log
|
|
||||||
)
|
|
||||||
|
|
||||||
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
|
||||||
|
|
||||||
DEFAULT_ADDR = "0.0.0.0"
|
|
||||||
DEFAULT_PORT = 4444
|
|
||||||
|
|
||||||
|
|
||||||
def _error(message):
|
|
||||||
log(message, level=ERROR)
|
|
||||||
|
|
||||||
|
|
||||||
def set_trace(addr=DEFAULT_ADDR, port=DEFAULT_PORT):
|
|
||||||
"""
|
|
||||||
Set a trace point using the remote debugger
|
|
||||||
"""
|
|
||||||
atexit.register(close_port, port)
|
|
||||||
try:
|
|
||||||
log("Starting a remote python debugger session on %s:%s" % (addr,
|
|
||||||
port))
|
|
||||||
open_port(port)
|
|
||||||
debugger = Rpdb(addr=addr, port=port)
|
|
||||||
debugger.set_trace(sys._getframe().f_back)
|
|
||||||
except:
|
|
||||||
_error("Cannot start a remote debug session on %s:%s" % (addr,
|
|
||||||
port))
|
|
@ -1,130 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# coding: utf-8
|
|
||||||
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from charmhelpers.fetch import apt_install, apt_update
|
|
||||||
from charmhelpers.core.hookenv import charm_dir, log
|
|
||||||
|
|
||||||
try:
|
|
||||||
from pip import main as pip_execute
|
|
||||||
except ImportError:
|
|
||||||
apt_update()
|
|
||||||
apt_install('python-pip')
|
|
||||||
from pip import main as pip_execute
|
|
||||||
|
|
||||||
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
|
||||||
|
|
||||||
|
|
||||||
def parse_options(given, available):
|
|
||||||
"""Given a set of options, check if available"""
|
|
||||||
for key, value in sorted(given.items()):
|
|
||||||
if not value:
|
|
||||||
continue
|
|
||||||
if key in available:
|
|
||||||
yield "--{0}={1}".format(key, value)
|
|
||||||
|
|
||||||
|
|
||||||
def pip_install_requirements(requirements, constraints=None, **options):
|
|
||||||
"""Install a requirements file.
|
|
||||||
|
|
||||||
:param constraints: Path to pip constraints file.
|
|
||||||
http://pip.readthedocs.org/en/stable/user_guide/#constraints-files
|
|
||||||
"""
|
|
||||||
command = ["install"]
|
|
||||||
|
|
||||||
available_options = ('proxy', 'src', 'log', )
|
|
||||||
for option in parse_options(options, available_options):
|
|
||||||
command.append(option)
|
|
||||||
|
|
||||||
command.append("-r {0}".format(requirements))
|
|
||||||
if constraints:
|
|
||||||
command.append("-c {0}".format(constraints))
|
|
||||||
log("Installing from file: {} with constraints {} "
|
|
||||||
"and options: {}".format(requirements, constraints, command))
|
|
||||||
else:
|
|
||||||
log("Installing from file: {} with options: {}".format(requirements,
|
|
||||||
command))
|
|
||||||
pip_execute(command)
|
|
||||||
|
|
||||||
|
|
||||||
def pip_install(package, fatal=False, upgrade=False, venv=None, **options):
|
|
||||||
"""Install a python package"""
|
|
||||||
if venv:
|
|
||||||
venv_python = os.path.join(venv, 'bin/pip')
|
|
||||||
command = [venv_python, "install"]
|
|
||||||
else:
|
|
||||||
command = ["install"]
|
|
||||||
|
|
||||||
available_options = ('proxy', 'src', 'log', 'index-url', )
|
|
||||||
for option in parse_options(options, available_options):
|
|
||||||
command.append(option)
|
|
||||||
|
|
||||||
if upgrade:
|
|
||||||
command.append('--upgrade')
|
|
||||||
|
|
||||||
if isinstance(package, list):
|
|
||||||
command.extend(package)
|
|
||||||
else:
|
|
||||||
command.append(package)
|
|
||||||
|
|
||||||
log("Installing {} package with options: {}".format(package,
|
|
||||||
command))
|
|
||||||
if venv:
|
|
||||||
subprocess.check_call(command)
|
|
||||||
else:
|
|
||||||
pip_execute(command)
|
|
||||||
|
|
||||||
|
|
||||||
def pip_uninstall(package, **options):
|
|
||||||
"""Uninstall a python package"""
|
|
||||||
command = ["uninstall", "-q", "-y"]
|
|
||||||
|
|
||||||
available_options = ('proxy', 'log', )
|
|
||||||
for option in parse_options(options, available_options):
|
|
||||||
command.append(option)
|
|
||||||
|
|
||||||
if isinstance(package, list):
|
|
||||||
command.extend(package)
|
|
||||||
else:
|
|
||||||
command.append(package)
|
|
||||||
|
|
||||||
log("Uninstalling {} package with options: {}".format(package,
|
|
||||||
command))
|
|
||||||
pip_execute(command)
|
|
||||||
|
|
||||||
|
|
||||||
def pip_list():
|
|
||||||
"""Returns the list of current python installed packages
|
|
||||||
"""
|
|
||||||
return pip_execute(["list"])
|
|
||||||
|
|
||||||
|
|
||||||
def pip_create_virtualenv(path=None):
|
|
||||||
"""Create an isolated Python environment."""
|
|
||||||
apt_install('python-virtualenv')
|
|
||||||
|
|
||||||
if path:
|
|
||||||
venv_path = path
|
|
||||||
else:
|
|
||||||
venv_path = os.path.join(charm_dir(), 'venv')
|
|
||||||
|
|
||||||
if not os.path.exists(venv_path):
|
|
||||||
subprocess.check_call(['virtualenv', venv_path])
|
|
@ -1,58 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"""Remote Python Debugger (pdb wrapper)."""
|
|
||||||
|
|
||||||
import pdb
|
|
||||||
import socket
|
|
||||||
import sys
|
|
||||||
|
|
||||||
__author__ = "Bertrand Janin <b@janin.com>"
|
|
||||||
__version__ = "0.1.3"
|
|
||||||
|
|
||||||
|
|
||||||
class Rpdb(pdb.Pdb):
|
|
||||||
|
|
||||||
def __init__(self, addr="127.0.0.1", port=4444):
|
|
||||||
"""Initialize the socket and initialize pdb."""
|
|
||||||
|
|
||||||
# Backup stdin and stdout before replacing them by the socket handle
|
|
||||||
self.old_stdout = sys.stdout
|
|
||||||
self.old_stdin = sys.stdin
|
|
||||||
|
|
||||||
# Open a 'reusable' socket to let the webapp reload on the same port
|
|
||||||
self.skt = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
|
||||||
self.skt.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
|
|
||||||
self.skt.bind((addr, port))
|
|
||||||
self.skt.listen(1)
|
|
||||||
(clientsocket, address) = self.skt.accept()
|
|
||||||
handle = clientsocket.makefile('rw')
|
|
||||||
pdb.Pdb.__init__(self, completekey='tab', stdin=handle, stdout=handle)
|
|
||||||
sys.stdout = sys.stdin = handle
|
|
||||||
|
|
||||||
def shutdown(self):
|
|
||||||
"""Revert stdin and stdout, close the socket."""
|
|
||||||
sys.stdout = self.old_stdout
|
|
||||||
sys.stdin = self.old_stdin
|
|
||||||
self.skt.close()
|
|
||||||
self.set_continue()
|
|
||||||
|
|
||||||
def do_continue(self, arg):
|
|
||||||
"""Stop all operation on ``continue``."""
|
|
||||||
self.shutdown()
|
|
||||||
return 1
|
|
||||||
|
|
||||||
do_EOF = do_quit = do_exit = do_c = do_cont = do_continue
|
|
@ -1,34 +0,0 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# coding: utf-8
|
|
||||||
|
|
||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import sys
|
|
||||||
|
|
||||||
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
|
||||||
|
|
||||||
|
|
||||||
def current_version():
|
|
||||||
"""Current system python version"""
|
|
||||||
return sys.version_info
|
|
||||||
|
|
||||||
|
|
||||||
def current_version_string():
|
|
||||||
"""Current system python version as string major.minor.micro"""
|
|
||||||
return "{0}.{1}.{2}".format(sys.version_info.major,
|
|
||||||
sys.version_info.minor,
|
|
||||||
sys.version_info.micro)
|
|
@ -1,118 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
"""Charm Helpers saltstack - declare the state of your machines.
|
|
||||||
|
|
||||||
This helper enables you to declare your machine state, rather than
|
|
||||||
program it procedurally (and have to test each change to your procedures).
|
|
||||||
Your install hook can be as simple as::
|
|
||||||
|
|
||||||
{{{
|
|
||||||
from charmhelpers.contrib.saltstack import (
|
|
||||||
install_salt_support,
|
|
||||||
update_machine_state,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def install():
|
|
||||||
install_salt_support()
|
|
||||||
update_machine_state('machine_states/dependencies.yaml')
|
|
||||||
update_machine_state('machine_states/installed.yaml')
|
|
||||||
}}}
|
|
||||||
|
|
||||||
and won't need to change (nor will its tests) when you change the machine
|
|
||||||
state.
|
|
||||||
|
|
||||||
It's using a python package called salt-minion which allows various formats for
|
|
||||||
specifying resources, such as::
|
|
||||||
|
|
||||||
{{{
|
|
||||||
/srv/{{ basedir }}:
|
|
||||||
file.directory:
|
|
||||||
- group: ubunet
|
|
||||||
- user: ubunet
|
|
||||||
- require:
|
|
||||||
- user: ubunet
|
|
||||||
- recurse:
|
|
||||||
- user
|
|
||||||
- group
|
|
||||||
|
|
||||||
ubunet:
|
|
||||||
group.present:
|
|
||||||
- gid: 1500
|
|
||||||
user.present:
|
|
||||||
- uid: 1500
|
|
||||||
- gid: 1500
|
|
||||||
- createhome: False
|
|
||||||
- require:
|
|
||||||
- group: ubunet
|
|
||||||
}}}
|
|
||||||
|
|
||||||
The docs for all the different state definitions are at:
|
|
||||||
http://docs.saltstack.com/ref/states/all/
|
|
||||||
|
|
||||||
|
|
||||||
TODO:
|
|
||||||
* Add test helpers which will ensure that machine state definitions
|
|
||||||
are functionally (but not necessarily logically) correct (ie. getting
|
|
||||||
salt to parse all state defs.
|
|
||||||
* Add a link to a public bootstrap charm example / blogpost.
|
|
||||||
* Find a way to obviate the need to use the grains['charm_dir'] syntax
|
|
||||||
in templates.
|
|
||||||
"""
|
|
||||||
# Copyright 2013 Canonical Ltd.
|
|
||||||
#
|
|
||||||
# Authors:
|
|
||||||
# Charm Helpers Developers <juju@lists.ubuntu.com>
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
import charmhelpers.contrib.templating.contexts
|
|
||||||
import charmhelpers.core.host
|
|
||||||
import charmhelpers.core.hookenv
|
|
||||||
|
|
||||||
|
|
||||||
salt_grains_path = '/etc/salt/grains'
|
|
||||||
|
|
||||||
|
|
||||||
def install_salt_support(from_ppa=True):
|
|
||||||
"""Installs the salt-minion helper for machine state.
|
|
||||||
|
|
||||||
By default the salt-minion package is installed from
|
|
||||||
the saltstack PPA. If from_ppa is False you must ensure
|
|
||||||
that the salt-minion package is available in the apt cache.
|
|
||||||
"""
|
|
||||||
if from_ppa:
|
|
||||||
subprocess.check_call([
|
|
||||||
'/usr/bin/add-apt-repository',
|
|
||||||
'--yes',
|
|
||||||
'ppa:saltstack/salt',
|
|
||||||
])
|
|
||||||
subprocess.check_call(['/usr/bin/apt-get', 'update'])
|
|
||||||
# We install salt-common as salt-minion would run the salt-minion
|
|
||||||
# daemon.
|
|
||||||
charmhelpers.fetch.apt_install('salt-common')
|
|
||||||
|
|
||||||
|
|
||||||
def update_machine_state(state_path):
|
|
||||||
"""Update the machine state using the provided state declaration."""
|
|
||||||
charmhelpers.contrib.templating.contexts.juju_state_to_yaml(
|
|
||||||
salt_grains_path)
|
|
||||||
subprocess.check_call([
|
|
||||||
'salt-call',
|
|
||||||
'--local',
|
|
||||||
'state.template',
|
|
||||||
state_path,
|
|
||||||
])
|
|
@ -1,94 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import subprocess
|
|
||||||
from charmhelpers.core import hookenv
|
|
||||||
|
|
||||||
|
|
||||||
def generate_selfsigned(keyfile, certfile, keysize="1024", config=None, subject=None, cn=None):
|
|
||||||
"""Generate selfsigned SSL keypair
|
|
||||||
|
|
||||||
You must provide one of the 3 optional arguments:
|
|
||||||
config, subject or cn
|
|
||||||
If more than one is provided the leftmost will be used
|
|
||||||
|
|
||||||
Arguments:
|
|
||||||
keyfile -- (required) full path to the keyfile to be created
|
|
||||||
certfile -- (required) full path to the certfile to be created
|
|
||||||
keysize -- (optional) SSL key length
|
|
||||||
config -- (optional) openssl configuration file
|
|
||||||
subject -- (optional) dictionary with SSL subject variables
|
|
||||||
cn -- (optional) cerfificate common name
|
|
||||||
|
|
||||||
Required keys in subject dict:
|
|
||||||
cn -- Common name (eq. FQDN)
|
|
||||||
|
|
||||||
Optional keys in subject dict
|
|
||||||
country -- Country Name (2 letter code)
|
|
||||||
state -- State or Province Name (full name)
|
|
||||||
locality -- Locality Name (eg, city)
|
|
||||||
organization -- Organization Name (eg, company)
|
|
||||||
organizational_unit -- Organizational Unit Name (eg, section)
|
|
||||||
email -- Email Address
|
|
||||||
"""
|
|
||||||
|
|
||||||
cmd = []
|
|
||||||
if config:
|
|
||||||
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
|
|
||||||
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
|
|
||||||
"-keyout", keyfile,
|
|
||||||
"-out", certfile, "-config", config]
|
|
||||||
elif subject:
|
|
||||||
ssl_subject = ""
|
|
||||||
if "country" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/C={}".format(subject["country"])
|
|
||||||
if "state" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/ST={}".format(subject["state"])
|
|
||||||
if "locality" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/L={}".format(subject["locality"])
|
|
||||||
if "organization" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/O={}".format(subject["organization"])
|
|
||||||
if "organizational_unit" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/OU={}".format(subject["organizational_unit"])
|
|
||||||
if "cn" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/CN={}".format(subject["cn"])
|
|
||||||
else:
|
|
||||||
hookenv.log("When using \"subject\" argument you must "
|
|
||||||
"provide \"cn\" field at very least")
|
|
||||||
return False
|
|
||||||
if "email" in subject:
|
|
||||||
ssl_subject = ssl_subject + "/emailAddress={}".format(subject["email"])
|
|
||||||
|
|
||||||
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
|
|
||||||
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
|
|
||||||
"-keyout", keyfile,
|
|
||||||
"-out", certfile, "-subj", ssl_subject]
|
|
||||||
elif cn:
|
|
||||||
cmd = ["/usr/bin/openssl", "req", "-new", "-newkey",
|
|
||||||
"rsa:{}".format(keysize), "-days", "365", "-nodes", "-x509",
|
|
||||||
"-keyout", keyfile,
|
|
||||||
"-out", certfile, "-subj", "/CN={}".format(cn)]
|
|
||||||
|
|
||||||
if not cmd:
|
|
||||||
hookenv.log("No config, subject or cn provided,"
|
|
||||||
"unable to generate self signed SSL certificates")
|
|
||||||
return False
|
|
||||||
try:
|
|
||||||
subprocess.check_call(cmd)
|
|
||||||
return True
|
|
||||||
except Exception as e:
|
|
||||||
print("Execution of openssl command failed:\n{}".format(e))
|
|
||||||
return False
|
|
@ -1,279 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
from os.path import join as path_join
|
|
||||||
from os.path import exists
|
|
||||||
import subprocess
|
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import log, DEBUG
|
|
||||||
|
|
||||||
STD_CERT = "standard"
|
|
||||||
|
|
||||||
# Mysql server is fairly picky about cert creation
|
|
||||||
# and types, spec its creation separately for now.
|
|
||||||
MYSQL_CERT = "mysql"
|
|
||||||
|
|
||||||
|
|
||||||
class ServiceCA(object):
|
|
||||||
|
|
||||||
default_expiry = str(365 * 2)
|
|
||||||
default_ca_expiry = str(365 * 6)
|
|
||||||
|
|
||||||
def __init__(self, name, ca_dir, cert_type=STD_CERT):
|
|
||||||
self.name = name
|
|
||||||
self.ca_dir = ca_dir
|
|
||||||
self.cert_type = cert_type
|
|
||||||
|
|
||||||
###############
|
|
||||||
# Hook Helper API
|
|
||||||
@staticmethod
|
|
||||||
def get_ca(type=STD_CERT):
|
|
||||||
service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0]
|
|
||||||
ca_path = os.path.join(os.environ['CHARM_DIR'], 'ca')
|
|
||||||
ca = ServiceCA(service_name, ca_path, type)
|
|
||||||
ca.init()
|
|
||||||
return ca
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def get_service_cert(cls, type=STD_CERT):
|
|
||||||
service_name = os.environ['JUJU_UNIT_NAME'].split('/')[0]
|
|
||||||
ca = cls.get_ca()
|
|
||||||
crt, key = ca.get_or_create_cert(service_name)
|
|
||||||
return crt, key, ca.get_ca_bundle()
|
|
||||||
|
|
||||||
###############
|
|
||||||
|
|
||||||
def init(self):
|
|
||||||
log("initializing service ca", level=DEBUG)
|
|
||||||
if not exists(self.ca_dir):
|
|
||||||
self._init_ca_dir(self.ca_dir)
|
|
||||||
self._init_ca()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ca_key(self):
|
|
||||||
return path_join(self.ca_dir, 'private', 'cacert.key')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ca_cert(self):
|
|
||||||
return path_join(self.ca_dir, 'cacert.pem')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def ca_conf(self):
|
|
||||||
return path_join(self.ca_dir, 'ca.cnf')
|
|
||||||
|
|
||||||
@property
|
|
||||||
def signing_conf(self):
|
|
||||||
return path_join(self.ca_dir, 'signing.cnf')
|
|
||||||
|
|
||||||
def _init_ca_dir(self, ca_dir):
|
|
||||||
os.mkdir(ca_dir)
|
|
||||||
for i in ['certs', 'crl', 'newcerts', 'private']:
|
|
||||||
sd = path_join(ca_dir, i)
|
|
||||||
if not exists(sd):
|
|
||||||
os.mkdir(sd)
|
|
||||||
|
|
||||||
if not exists(path_join(ca_dir, 'serial')):
|
|
||||||
with open(path_join(ca_dir, 'serial'), 'w') as fh:
|
|
||||||
fh.write('02\n')
|
|
||||||
|
|
||||||
if not exists(path_join(ca_dir, 'index.txt')):
|
|
||||||
with open(path_join(ca_dir, 'index.txt'), 'w') as fh:
|
|
||||||
fh.write('')
|
|
||||||
|
|
||||||
def _init_ca(self):
|
|
||||||
"""Generate the root ca's cert and key.
|
|
||||||
"""
|
|
||||||
if not exists(path_join(self.ca_dir, 'ca.cnf')):
|
|
||||||
with open(path_join(self.ca_dir, 'ca.cnf'), 'w') as fh:
|
|
||||||
fh.write(
|
|
||||||
CA_CONF_TEMPLATE % (self.get_conf_variables()))
|
|
||||||
|
|
||||||
if not exists(path_join(self.ca_dir, 'signing.cnf')):
|
|
||||||
with open(path_join(self.ca_dir, 'signing.cnf'), 'w') as fh:
|
|
||||||
fh.write(
|
|
||||||
SIGNING_CONF_TEMPLATE % (self.get_conf_variables()))
|
|
||||||
|
|
||||||
if exists(self.ca_cert) or exists(self.ca_key):
|
|
||||||
raise RuntimeError("Initialized called when CA already exists")
|
|
||||||
cmd = ['openssl', 'req', '-config', self.ca_conf,
|
|
||||||
'-x509', '-nodes', '-newkey', 'rsa',
|
|
||||||
'-days', self.default_ca_expiry,
|
|
||||||
'-keyout', self.ca_key, '-out', self.ca_cert,
|
|
||||||
'-outform', 'PEM']
|
|
||||||
output = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
|
|
||||||
log("CA Init:\n %s" % output, level=DEBUG)
|
|
||||||
|
|
||||||
def get_conf_variables(self):
|
|
||||||
return dict(
|
|
||||||
org_name="juju",
|
|
||||||
org_unit_name="%s service" % self.name,
|
|
||||||
common_name=self.name,
|
|
||||||
ca_dir=self.ca_dir)
|
|
||||||
|
|
||||||
def get_or_create_cert(self, common_name):
|
|
||||||
if common_name in self:
|
|
||||||
return self.get_certificate(common_name)
|
|
||||||
return self.create_certificate(common_name)
|
|
||||||
|
|
||||||
def create_certificate(self, common_name):
|
|
||||||
if common_name in self:
|
|
||||||
return self.get_certificate(common_name)
|
|
||||||
key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
|
|
||||||
crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
|
|
||||||
csr_p = path_join(self.ca_dir, "certs", "%s.csr" % common_name)
|
|
||||||
self._create_certificate(common_name, key_p, csr_p, crt_p)
|
|
||||||
return self.get_certificate(common_name)
|
|
||||||
|
|
||||||
def get_certificate(self, common_name):
|
|
||||||
if common_name not in self:
|
|
||||||
raise ValueError("No certificate for %s" % common_name)
|
|
||||||
key_p = path_join(self.ca_dir, "certs", "%s.key" % common_name)
|
|
||||||
crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
|
|
||||||
with open(crt_p) as fh:
|
|
||||||
crt = fh.read()
|
|
||||||
with open(key_p) as fh:
|
|
||||||
key = fh.read()
|
|
||||||
return crt, key
|
|
||||||
|
|
||||||
def __contains__(self, common_name):
|
|
||||||
crt_p = path_join(self.ca_dir, "certs", "%s.crt" % common_name)
|
|
||||||
return exists(crt_p)
|
|
||||||
|
|
||||||
def _create_certificate(self, common_name, key_p, csr_p, crt_p):
|
|
||||||
template_vars = self.get_conf_variables()
|
|
||||||
template_vars['common_name'] = common_name
|
|
||||||
subj = '/O=%(org_name)s/OU=%(org_unit_name)s/CN=%(common_name)s' % (
|
|
||||||
template_vars)
|
|
||||||
|
|
||||||
log("CA Create Cert %s" % common_name, level=DEBUG)
|
|
||||||
cmd = ['openssl', 'req', '-sha1', '-newkey', 'rsa:2048',
|
|
||||||
'-nodes', '-days', self.default_expiry,
|
|
||||||
'-keyout', key_p, '-out', csr_p, '-subj', subj]
|
|
||||||
subprocess.check_call(cmd, stderr=subprocess.PIPE)
|
|
||||||
cmd = ['openssl', 'rsa', '-in', key_p, '-out', key_p]
|
|
||||||
subprocess.check_call(cmd, stderr=subprocess.PIPE)
|
|
||||||
|
|
||||||
log("CA Sign Cert %s" % common_name, level=DEBUG)
|
|
||||||
if self.cert_type == MYSQL_CERT:
|
|
||||||
cmd = ['openssl', 'x509', '-req',
|
|
||||||
'-in', csr_p, '-days', self.default_expiry,
|
|
||||||
'-CA', self.ca_cert, '-CAkey', self.ca_key,
|
|
||||||
'-set_serial', '01', '-out', crt_p]
|
|
||||||
else:
|
|
||||||
cmd = ['openssl', 'ca', '-config', self.signing_conf,
|
|
||||||
'-extensions', 'req_extensions',
|
|
||||||
'-days', self.default_expiry, '-notext',
|
|
||||||
'-in', csr_p, '-out', crt_p, '-subj', subj, '-batch']
|
|
||||||
log("running %s" % " ".join(cmd), level=DEBUG)
|
|
||||||
subprocess.check_call(cmd, stderr=subprocess.PIPE)
|
|
||||||
|
|
||||||
def get_ca_bundle(self):
|
|
||||||
with open(self.ca_cert) as fh:
|
|
||||||
return fh.read()
|
|
||||||
|
|
||||||
|
|
||||||
CA_CONF_TEMPLATE = """
|
|
||||||
[ ca ]
|
|
||||||
default_ca = CA_default
|
|
||||||
|
|
||||||
[ CA_default ]
|
|
||||||
dir = %(ca_dir)s
|
|
||||||
policy = policy_match
|
|
||||||
database = $dir/index.txt
|
|
||||||
serial = $dir/serial
|
|
||||||
certs = $dir/certs
|
|
||||||
crl_dir = $dir/crl
|
|
||||||
new_certs_dir = $dir/newcerts
|
|
||||||
certificate = $dir/cacert.pem
|
|
||||||
private_key = $dir/private/cacert.key
|
|
||||||
RANDFILE = $dir/private/.rand
|
|
||||||
default_md = default
|
|
||||||
|
|
||||||
[ req ]
|
|
||||||
default_bits = 1024
|
|
||||||
default_md = sha1
|
|
||||||
|
|
||||||
prompt = no
|
|
||||||
distinguished_name = ca_distinguished_name
|
|
||||||
|
|
||||||
x509_extensions = ca_extensions
|
|
||||||
|
|
||||||
[ ca_distinguished_name ]
|
|
||||||
organizationName = %(org_name)s
|
|
||||||
organizationalUnitName = %(org_unit_name)s Certificate Authority
|
|
||||||
|
|
||||||
|
|
||||||
[ policy_match ]
|
|
||||||
countryName = optional
|
|
||||||
stateOrProvinceName = optional
|
|
||||||
organizationName = match
|
|
||||||
organizationalUnitName = optional
|
|
||||||
commonName = supplied
|
|
||||||
|
|
||||||
[ ca_extensions ]
|
|
||||||
basicConstraints = critical,CA:true
|
|
||||||
subjectKeyIdentifier = hash
|
|
||||||
authorityKeyIdentifier = keyid:always, issuer
|
|
||||||
keyUsage = cRLSign, keyCertSign
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
SIGNING_CONF_TEMPLATE = """
|
|
||||||
[ ca ]
|
|
||||||
default_ca = CA_default
|
|
||||||
|
|
||||||
[ CA_default ]
|
|
||||||
dir = %(ca_dir)s
|
|
||||||
policy = policy_match
|
|
||||||
database = $dir/index.txt
|
|
||||||
serial = $dir/serial
|
|
||||||
certs = $dir/certs
|
|
||||||
crl_dir = $dir/crl
|
|
||||||
new_certs_dir = $dir/newcerts
|
|
||||||
certificate = $dir/cacert.pem
|
|
||||||
private_key = $dir/private/cacert.key
|
|
||||||
RANDFILE = $dir/private/.rand
|
|
||||||
default_md = default
|
|
||||||
|
|
||||||
[ req ]
|
|
||||||
default_bits = 1024
|
|
||||||
default_md = sha1
|
|
||||||
|
|
||||||
prompt = no
|
|
||||||
distinguished_name = req_distinguished_name
|
|
||||||
|
|
||||||
x509_extensions = req_extensions
|
|
||||||
|
|
||||||
[ req_distinguished_name ]
|
|
||||||
organizationName = %(org_name)s
|
|
||||||
organizationalUnitName = %(org_unit_name)s machine resources
|
|
||||||
commonName = %(common_name)s
|
|
||||||
|
|
||||||
[ policy_match ]
|
|
||||||
countryName = optional
|
|
||||||
stateOrProvinceName = optional
|
|
||||||
organizationName = match
|
|
||||||
organizationalUnitName = optional
|
|
||||||
commonName = supplied
|
|
||||||
|
|
||||||
[ req_extensions ]
|
|
||||||
basicConstraints = CA:false
|
|
||||||
subjectKeyIdentifier = hash
|
|
||||||
authorityKeyIdentifier = keyid:always, issuer
|
|
||||||
keyUsage = digitalSignature, keyEncipherment, keyAgreement
|
|
||||||
extendedKeyUsage = serverAuth, clientAuth
|
|
||||||
"""
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
@ -1,15 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
File diff suppressed because it is too large
Load Diff
@ -1,88 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
from subprocess import (
|
|
||||||
check_call,
|
|
||||||
check_output,
|
|
||||||
)
|
|
||||||
|
|
||||||
import six
|
|
||||||
|
|
||||||
|
|
||||||
##################################################
|
|
||||||
# loopback device helpers.
|
|
||||||
##################################################
|
|
||||||
def loopback_devices():
|
|
||||||
'''
|
|
||||||
Parse through 'losetup -a' output to determine currently mapped
|
|
||||||
loopback devices. Output is expected to look like:
|
|
||||||
|
|
||||||
/dev/loop0: [0807]:961814 (/tmp/my.img)
|
|
||||||
|
|
||||||
:returns: dict: a dict mapping {loopback_dev: backing_file}
|
|
||||||
'''
|
|
||||||
loopbacks = {}
|
|
||||||
cmd = ['losetup', '-a']
|
|
||||||
devs = [d.strip().split(' ') for d in
|
|
||||||
check_output(cmd).splitlines() if d != '']
|
|
||||||
for dev, _, f in devs:
|
|
||||||
loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
|
|
||||||
return loopbacks
|
|
||||||
|
|
||||||
|
|
||||||
def create_loopback(file_path):
|
|
||||||
'''
|
|
||||||
Create a loopback device for a given backing file.
|
|
||||||
|
|
||||||
:returns: str: Full path to new loopback device (eg, /dev/loop0)
|
|
||||||
'''
|
|
||||||
file_path = os.path.abspath(file_path)
|
|
||||||
check_call(['losetup', '--find', file_path])
|
|
||||||
for d, f in six.iteritems(loopback_devices()):
|
|
||||||
if f == file_path:
|
|
||||||
return d
|
|
||||||
|
|
||||||
|
|
||||||
def ensure_loopback_device(path, size):
|
|
||||||
'''
|
|
||||||
Ensure a loopback device exists for a given backing file path and size.
|
|
||||||
If it a loopback device is not mapped to file, a new one will be created.
|
|
||||||
|
|
||||||
TODO: Confirm size of found loopback device.
|
|
||||||
|
|
||||||
:returns: str: Full path to the ensured loopback device (eg, /dev/loop0)
|
|
||||||
'''
|
|
||||||
for d, f in six.iteritems(loopback_devices()):
|
|
||||||
if f == path:
|
|
||||||
return d
|
|
||||||
|
|
||||||
if not os.path.exists(path):
|
|
||||||
cmd = ['truncate', '--size', size, path]
|
|
||||||
check_call(cmd)
|
|
||||||
|
|
||||||
return create_loopback(path)
|
|
||||||
|
|
||||||
|
|
||||||
def is_mapped_loopback_device(device):
|
|
||||||
"""
|
|
||||||
Checks if a given device name is an existing/mapped loopback device.
|
|
||||||
:param device: str: Full path to the device (eg, /dev/loop1).
|
|
||||||
:returns: str: Path to the backing file if is a loopback device
|
|
||||||
empty string otherwise
|
|
||||||
"""
|
|
||||||
return loopback_devices().get(device, "")
|
|
@ -1,105 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
from subprocess import (
|
|
||||||
CalledProcessError,
|
|
||||||
check_call,
|
|
||||||
check_output,
|
|
||||||
Popen,
|
|
||||||
PIPE,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
##################################################
|
|
||||||
# LVM helpers.
|
|
||||||
##################################################
|
|
||||||
def deactivate_lvm_volume_group(block_device):
|
|
||||||
'''
|
|
||||||
Deactivate any volume gruop associated with an LVM physical volume.
|
|
||||||
|
|
||||||
:param block_device: str: Full path to LVM physical volume
|
|
||||||
'''
|
|
||||||
vg = list_lvm_volume_group(block_device)
|
|
||||||
if vg:
|
|
||||||
cmd = ['vgchange', '-an', vg]
|
|
||||||
check_call(cmd)
|
|
||||||
|
|
||||||
|
|
||||||
def is_lvm_physical_volume(block_device):
|
|
||||||
'''
|
|
||||||
Determine whether a block device is initialized as an LVM PV.
|
|
||||||
|
|
||||||
:param block_device: str: Full path of block device to inspect.
|
|
||||||
|
|
||||||
:returns: boolean: True if block device is a PV, False if not.
|
|
||||||
'''
|
|
||||||
try:
|
|
||||||
check_output(['pvdisplay', block_device])
|
|
||||||
return True
|
|
||||||
except CalledProcessError:
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
def remove_lvm_physical_volume(block_device):
|
|
||||||
'''
|
|
||||||
Remove LVM PV signatures from a given block device.
|
|
||||||
|
|
||||||
:param block_device: str: Full path of block device to scrub.
|
|
||||||
'''
|
|
||||||
p = Popen(['pvremove', '-ff', block_device],
|
|
||||||
stdin=PIPE)
|
|
||||||
p.communicate(input='y\n')
|
|
||||||
|
|
||||||
|
|
||||||
def list_lvm_volume_group(block_device):
|
|
||||||
'''
|
|
||||||
List LVM volume group associated with a given block device.
|
|
||||||
|
|
||||||
Assumes block device is a valid LVM PV.
|
|
||||||
|
|
||||||
:param block_device: str: Full path of block device to inspect.
|
|
||||||
|
|
||||||
:returns: str: Name of volume group associated with block device or None
|
|
||||||
'''
|
|
||||||
vg = None
|
|
||||||
pvd = check_output(['pvdisplay', block_device]).splitlines()
|
|
||||||
for l in pvd:
|
|
||||||
l = l.decode('UTF-8')
|
|
||||||
if l.strip().startswith('VG Name'):
|
|
||||||
vg = ' '.join(l.strip().split()[2:])
|
|
||||||
return vg
|
|
||||||
|
|
||||||
|
|
||||||
def create_lvm_physical_volume(block_device):
|
|
||||||
'''
|
|
||||||
Initialize a block device as an LVM physical volume.
|
|
||||||
|
|
||||||
:param block_device: str: Full path of block device to initialize.
|
|
||||||
|
|
||||||
'''
|
|
||||||
check_call(['pvcreate', block_device])
|
|
||||||
|
|
||||||
|
|
||||||
def create_lvm_volume_group(volume_group, block_device):
|
|
||||||
'''
|
|
||||||
Create an LVM volume group backed by a given block device.
|
|
||||||
|
|
||||||
Assumes block device has already been initialized as an LVM PV.
|
|
||||||
|
|
||||||
:param volume_group: str: Name of volume group to create.
|
|
||||||
:block_device: str: Full path of PV-initialized block device.
|
|
||||||
'''
|
|
||||||
check_call(['vgcreate', volume_group, block_device])
|
|
@ -1,71 +0,0 @@
|
|||||||
# Copyright 2014-2015 Canonical Limited.
|
|
||||||
#
|
|
||||||
# This file is part of charm-helpers.
|
|
||||||
#
|
|
||||||
# charm-helpers is free software: you can redistribute it and/or modify
|
|
||||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# charm-helpers is distributed in the hope that it will be useful,
|
|
||||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
||||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
||||||
# GNU Lesser General Public License for more details.
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU Lesser General Public License
|
|
||||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
|
||||||
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
from stat import S_ISBLK
|
|
||||||
|
|
||||||
from subprocess import (
|
|
||||||
check_call,
|
|
||||||
check_output,
|
|
||||||
call
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def is_block_device(path):
|
|
||||||
'''
|
|
||||||
Confirm device at path is a valid block device node.
|
|
||||||
|
|
||||||
:returns: boolean: True if path is a block device, False if not.
|
|
||||||
'''
|
|
||||||
if not os.path.exists(path):
|
|
||||||
return False
|
|
||||||
return S_ISBLK(os.stat(path).st_mode)
|
|
||||||
|
|
||||||
|
|
||||||
def zap_disk(block_device):
|
|
||||||
'''
|
|
||||||
Clear a block device of partition table. Relies on sgdisk, which is
|
|
||||||
installed as pat of the 'gdisk' package in Ubuntu.
|
|
||||||
|
|
||||||
:param block_device: str: Full path of block device to clean.
|
|
||||||
'''
|
|
||||||
# https://github.com/ceph/ceph/commit/fdd7f8d83afa25c4e09aaedd90ab93f3b64a677b
|
|
||||||
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
|
|
||||||
call(['sgdisk', '--zap-all', '--', block_device])
|
|
||||||
call(['sgdisk', '--clear', '--mbrtogpt', '--', block_device])
|
|
||||||
dev_end = check_output(['blockdev', '--getsz',
|
|
||||||
block_device]).decode('UTF-8')
|
|
||||||
gpt_end = int(dev_end.split()[0]) - 100
|
|
||||||
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
|
|
||||||
'bs=1M', 'count=1'])
|
|
||||||
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
|
|
||||||
'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
|
|
||||||
|
|
||||||
|
|
||||||
def is_device_mounted(device):
|
|
||||||
'''Given a device path, return True if that device is mounted, and False
|
|
||||||
if it isn't.
|
|
||||||
|
|
||||||
:param device: str: Full path of the device to check.
|
|
||||||
:returns: boolean: True if the path represents a mounted device, False if
|
|
||||||
it doesn't.
|
|
||||||
'''
|
|
||||||
is_partition = bool(re.search(r".*[0-9]+\b", device))
|
|
||||||
out = check_output(['mount']).decode('UTF-8')
|
|
||||||
if is_partition:
|
|
||||||
return bool(re.search(device + r"\b", out))
|
|
||||||
return bool(re.search(device + r"[0-9]*\b", out))
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user