Retire metalsmith

This removes all content except an informative README.rst and a
.gitreview, as documented in infra-manual.

Depends-On: https://review.opendev.org/c/openstack/project-config/+/963422
Change-Id: Id44d69caa36e92994ae1eba055068745d59207f7
Signed-off-by: Jay Faulkner <jay@jvf.cc>
This commit is contained in:
Jay Faulkner
2025-10-08 09:58:19 -07:00
parent 935224e709
commit eaf7ec9be3
111 changed files with 10 additions and 10331 deletions

View File

@@ -1,2 +0,0 @@
[DEFAULT]
test_path=./metalsmith/test

View File

@@ -1,184 +0,0 @@
- job:
name: metalsmith-integration-base
description: |
Base job for devstack-based metalsmith jobs.
parent: devstack-minimal
nodeset: openstack-single-node-jammy
post-run: playbooks/integration/post.yaml
run: playbooks/integration/run.yaml
irrelevant-files:
- ^.*\.rst$
- ^api-ref/.*$
- ^doc/.*$
- ^lower-constraints.txt$
- ^install-guide/.*$
- ^ironic/tests/.*$
- ^metalsmith/test/.*$
- ^releasenotes/.*$
- ^setup.cfg$
- ^test-requirements.txt$
- ^tools/.*$
- ^tox.ini$
timeout: 5400
required-projects:
- openstack/diskimage-builder
- openstack/keystone
- openstack/neutron
- openstack/glance
- openstack/swift
- openstack/ironic
- openstack/ironic-python-agent-builder
- openstack/metalsmith
vars:
ironic_bm_logs: /opt/stack/logs/ironic-bm-logs
devstack_services:
dstat: false
etcd3: true
mysql: true
rabbit: true
tls-proxy: true
# Keystone services
key: true
# Glance services
g-api: true
g-reg: true
# Nova services
n-api: false
n-api-meta: false
n-cauth: false
n-cond: false
n-cpu: false
n-novnc: false
n-obj: false
n-sch: false
placement-api: false
# Neutron services
q-agt: true
q-dhcp: true
q-l3: true
q-meta: true
q-metering: true
q-svc: true
# OVN services
ovn-controller: false
ovn-northd: false
ovs-vswitchd: false
ovsdb-server: false
q-ovn-metadata-agent: false
# Swift services
s-account: true
s-container: true
s-object: true
s-proxy: true
# Cinder services
c-api: false
c-bak: false
c-sch: false
c-vol: false
cinder: false
# Services we don't need.
horizon: false
tempest: false
devstack_plugins:
ironic: https://opendev.org/openstack/ironic
devstack_localrc:
Q_AGENT: openvswitch
Q_ML2_TENANT_NETWORK_TYPE: vxlan
Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch
GLANCE_LIMIT_IMAGE_SIZE_TOTAL: 5000
IRONIC_BAREMETAL_BASIC_OPS: true
IRONIC_BUILD_DEPLOY_RAMDISK: false
IRONIC_BOOT_MODE: uefi
IRONIC_DEFAULT_DEPLOY_INTERFACE: direct
IRONIC_DEPLOY_DRIVER: ipmi
IRONIC_RAMDISK_TYPE: tinyipa
IRONIC_VM_COUNT: 1
IRONIC_VM_LOG_DIR: /opt/stack/logs/ironic-bm-logs
IRONIC_VM_SPECS_CPU: 2
IRONIC_VM_SPECS_DISK: 10
IRONIC_VM_SPECS_RAM: 1024
LIBVIRT_STORAGE_POOL_PATH: /opt/libvirt/images
SWIFT_ENABLE_TEMPURLS: true
SWIFT_HASH: 54bd5642300c4b45-846f8636a70a07d2
SWIFT_START_ALL_SERVICES: false
SWIFT_TEMPURL_KEY: 54bd5642300c4b45846f8636a70a07d2
IRONIC_PXE_BOOT_RETRY_TIMEOUT: 600
# Tell devstack to set an owner project as metalsmith
# testing is executed with a devstack-admin OS_CLOUD
# which means all action use it, and with newer RBAC,
# the node cannot be seen in that case.
IRONIC_SET_NODE_OWNER: admin
centos_glance_initramds_image: test-centos-initramfs
centos_glance_kernel_image: test-centos-kernel
centos_glance_root_image: test-centos-partition
centos_glance_whole_disk_image: test-centos-wholedisk
metalsmith_netboot: false
metalsmith_root_size: 9
metalsmith_python: python3
- job:
name: metalsmith-integration-glance-centos9-uefi
nodeset: openstack-single-node-jammy
description: |
Integration job using Glance as image source and Centos Stream 9 with
local boot and UEFI. Images are built with diskimage-builder.
parent: metalsmith-integration-base
timeout: 8500
run: playbooks/integration/centos9-integration.yaml
vars:
devstack_localrc:
# NOTE(dtantsur): we need to use streaming, otherwise the image won't
# fit in RAM.
IRONIC_AGENT_IMAGE_DOWNLOAD_SOURCE: http
IRONIC_VM_SPECS_RAM: 3072
devstack_services:
# Since we're using streaming, try non-swift backends.
s-account: false
s-container: false
s-object: false
s-proxy: false
configure_instance_user: metalsmith
metalsmith_precreate_port: false
metalsmith_partition_image: test-centos-partition
metalsmith_whole_disk_image: test-centos-wholedisk
metalsmith_swap_size: 1024
metalsmith_traits: [CUSTOM_GOLD]
- job:
name: metalsmith-integration-http-cirros
description: |
Integration job using HTTP as image source and direct deploy.
parent: metalsmith-integration-base
run: playbooks/integration/run.yaml
vars:
metalsmith_precreate_port: true
metalsmith_use_http: true
- job:
name: metalsmith-tox-codespell
parent: openstack-tox
timeout: 7200
vars:
tox_envlist: codespell
- project:
templates:
- check-requirements
- openstack-python3-jobs
- openstack-cover-jobs
- publish-openstack-docs-pti
- release-notes-jobs-python3
check:
jobs:
- metalsmith-integration-glance-centos9-uefi
- metalsmith-integration-http-cirros
#NOTE(rpittau): disabling ansible linters for the time being
# we need to update ansible-lint to make it work on ubuntu noble
#- openstack-tox-linters
- metalsmith-tox-codespell:
voting: false
gate:
jobs:
- metalsmith-integration-glance-centos9-uefi
- metalsmith-integration-http-cirros
#- openstack-tox-linters

202
LICENSE
View File

@@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@@ -1,46 +1,13 @@
Deployment and Scheduling tool for Bare Metal
=============================================
This project is no longer maintained.
.. image:: https://governance.openstack.org/badges/metalsmith.svg
:target: https://governance.openstack.org/reference/tags/index.html
The contents of this repository are still available in the Git
source code management system. To see the contents of this
repository before it reached its end of life, please check out the
previous commit with "git checkout HEAD^1".
Overview
--------
Users needing an Ironic client should utilitize
`python-ironicclient <https://opendev.org/openstack/python-ironicclient>`_.
This is a simple tool to provision bare metal machines using `OpenStack Bare
Metal Service (ironic) <https://docs.openstack.org/ironic/latest/>`_ and,
optionally, `OpenStack Image Service (glance)
<https://docs.openstack.org/glance/latest/>`_ and `OpenStack Networking
Service (neutron) <https://docs.openstack.org/neutron/latest/>`_.
Please note the Metalsmith project is in maintenance mode and it's
functionality is slowly being introduced into Ironic's API and usual clients.
No additional features are expected to be added to metalsmith directly.
* License: Apache License, Version 2.0
* Documentation: https://docs.openstack.org/metalsmith/
* Source: https://opendev.org/openstack/metalsmith
* Bugs: https://bugs.launchpad.net/metalsmith/+bugs
Installation
------------
::
pip install --user metalsmith
.. note::
The current versions of *metalsmith* require Bare Metal API from the Stein
release or newer. Use the 0.11 release series for older versions.
Contributing
------------
* Pull requests: `Gerrit
<https://review.opendev.org/q/project:openstack/metalsmith>`_
(see `developer's guide
<https://docs.openstack.org/infra/manual/developers.html>`_)
* Bugs and RFEs: `Launchpad
<https://bugs.launchpad.net/metalsmith/+bugs>`_
(please do NOT report bugs to Github)
For any further questions, please email
openstack-discuss@lists.openstack.org or join #openstack-dev on
OFTC.

View File

@@ -1,4 +0,0 @@
# This file is needed because readthedocs people refuse to support several
# requirements files at once, and using autodoc requires installing both.
-r requirements.txt
-r ../requirements.txt

View File

@@ -1,9 +0,0 @@
# The order of packages is significant, because pip processes them in the order
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
reno>=3.1.0 # Apache-2.0
sphinx>=2.0.0,!=2.1.0 # BSD
sphinxcontrib-apidoc>=0.2.0 # BSD
sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD
openstackdocstheme>=2.2.1 # Apache-2.0
ansible>=2.8

View File

@@ -1,268 +0,0 @@
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import importlib
import os
from docutils import core
from docutils import nodes
from docutils.parsers import rst
from docutils.parsers.rst import Directive
from docutils.writers.html4css1 import Writer
import yaml
DOCYAML = yaml
DOCYAML.default_flow_style = False
class AnsibleAutoPluginDirective(Directive):
directive_name = "ansibleautoplugin"
has_content = True
option_spec = {
'module': rst.directives.unchanged,
'role': rst.directives.unchanged,
'documentation': rst.directives.unchanged,
'examples': rst.directives.unchanged
}
@staticmethod
def _render_html(source):
return core.publish_parts(
source=source,
writer=Writer(),
writer_name='html',
settings_overrides={'no_system_messages': True}
)
def make_node(self, title, contents, content_type=None):
section = self._section_block(title=title)
if not content_type:
# Doc section
for content in contents['docs']:
for paragraph in content.split('\n'):
retnode = nodes.paragraph()
retnode.append(self._raw_html_block(data=paragraph))
section.append(retnode)
# Options Section
options_list = nodes.field_list()
options_section = self._section_block(title='Options')
for key, value in contents['options'].items():
options_list.append(
self._raw_fields(
data=value['description'],
field_name=key
)
)
else:
options_section.append(options_list)
section.append(options_section)
# Authors Section
authors_list = nodes.field_list()
authors_list.append(
self._raw_fields(
data=contents['author']
)
)
authors_section = self._section_block(title='Authors')
authors_section.append(authors_list)
section.append(authors_section)
elif content_type == 'yaml':
for content in contents:
section.append(
self._literal_block(
data=content,
dump_data=False
)
)
return section
@staticmethod
def load_module(filename):
spec = importlib.util.spec_from_file_location('__ansible_module__',
filename)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
return module
@staticmethod
def build_documentation(module):
docs = DOCYAML.load(module.DOCUMENTATION, Loader=yaml.FullLoader)
doc_data = dict()
doc_data['docs'] = docs['description']
doc_data['author'] = docs.get('author', list())
doc_data['options'] = docs.get('options', dict())
return doc_data
@staticmethod
def build_examples(module):
examples = DOCYAML.load(module.EXAMPLES, Loader=yaml.FullLoader)
return_examples = list()
for example in examples:
return_examples.append(DOCYAML.dump([example], sort_keys=False))
return return_examples
def _raw_html_block(self, data):
html = self._render_html(source=data)
return nodes.raw('', html['body'], format='html')
def _raw_fields(self, data, field_name=''):
body = nodes.field_body()
if isinstance(data, list):
for item in data:
body.append(self._raw_html_block(data=item))
else:
body.append(self._raw_html_block(data=data))
field = nodes.field()
field.append(nodes.field_name(text=field_name))
field.append(body)
return field
@staticmethod
def _literal_block(data, language='yaml', dump_data=True):
if dump_data:
literal = nodes.literal_block(
text=DOCYAML.dump(data)
)
else:
literal = nodes.literal_block(text=data)
literal['language'] = 'yaml'
return literal
@staticmethod
def _section_block(title, text=None):
section = nodes.section(
title,
nodes.title(text=title),
ids=[nodes.make_id('-'.join(title))],
)
if text:
section_body = nodes.field_body()
section_body.append(nodes.paragraph(text=text))
section.append(section_body)
return section
def _yaml_section(self, to_yaml_data, section_title, section_text=None):
yaml_section = self._section_block(
title=section_title,
text=section_text
)
yaml_section.append(self._literal_block(data=to_yaml_data))
return yaml_section
def _run_role(self, role):
section = self._section_block(
title='Role Documentation',
text='Welcome to the "{}" role documentation.'.format(
os.path.basename(role)
)
)
defaults_file = os.path.join(role, 'defaults', 'main.yml')
if os.path.exists(defaults_file):
with open(defaults_file) as f:
role_defaults = DOCYAML.load(f.read())
section.append(
self._yaml_section(
to_yaml_data=role_defaults,
section_title='Role Defaults',
section_text='This section highlights all of the defaults'
' and variables set within the "{}"'
' role.'.format(os.path.basename(role))
)
)
vars_path = os.path.join(role, 'vars')
if os.path.exists(vars_path):
for v_file in os.listdir(vars_path):
vars_file = os.path.join(vars_path, v_file)
with open(vars_file) as f:
vars_values = DOCYAML.load(f.read())
section.append(
self._yaml_section(
to_yaml_data=vars_values,
section_title='Role Variables: {}'.format(v_file)
)
)
self.run_returns.append(section)
# Document any libraries nested within the role
library_path = os.path.join(role, 'library')
if os.path.exists(library_path):
self.options['documentation'] = True
self.options['examples'] = True
for lib in os.listdir(library_path):
if lib.endswith('.py'):
self._run_module(
module=self.load_module(
filename=os.path.join(
library_path,
lib
)
),
module_title='Embedded module: {}'.format(lib),
example_title='Examples for embedded module'
)
def _run_module(self, module, module_title="Module Documentation",
example_title="Example Tasks"):
if self.options.get('documentation'):
docs = self.build_documentation(module=module)
self.run_returns.append(
self.make_node(
title=module_title,
contents=docs
)
)
if self.options.get('examples'):
examples = self.build_examples(module=module)
self.run_returns.append(
self.make_node(
title=example_title,
contents=examples,
content_type='yaml'
)
)
def run(self):
self.run_returns = list()
if self.options.get('module'):
module = self.load_module(filename=self.options['module'])
self._run_module(module=module)
if self.options.get('role'):
self._run_role(role=self.options['role'])
return self.run_returns
def setup(app):
classes = [
AnsibleAutoPluginDirective,
]
for directive_class in classes:
app.add_directive(directive_class.directive_name, directive_class)
return {'version': '0.2'}

View File

@@ -1,33 +0,0 @@
metalsmith CLI
==============
Deploy Command
--------------
Generic usage is as follows::
metalsmith --os-cloud <CLOUD NAME> deploy --image <GLANCE IMAGE> \
--network <NEUTRON NET> --ssh-public-key <PATH TO SSH PUBLIC KEY> \
--resource-class <RESOURCE CLASS>
This is an example suitable for TripleO (replace ``compute`` with the profile
you want to deploy)::
source ~/stackrc
metalsmith deploy --image overcloud-full --network ctlplane \
--capability profile=compute --ssh-public-key ~/.ssh/id_rsa.pub \
--resource-class baremetal
Undeploy Command
----------------
To remove the deployed instance::
metalsmith --os-cloud <CLOUD NAME> undeploy <NODE UUID>
See Also
--------
For all possible options see the built-in help::
metalsmith --help

View File

@@ -1,91 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import sys
# Add the project
sys.path.insert(0, os.path.abspath('../..'))
# Add the extensions
sys.path.insert(0, os.path.join(os.path.abspath('.'), '_exts'))
# -- General configuration ----------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = [
'sphinxcontrib.apidoc',
'sphinxcontrib.rsvgconverter',
'openstackdocstheme',
'ansible-autodoc'
]
autoclass_content = 'both'
apidoc_module_dir = '../../metalsmith'
apidoc_output_dir = 'reference/api'
apidoc_excluded_paths = ['test']
apidoc_separate_modules = True
# autodoc generation is a bit aggressive and a nuisance when doing heavy
# text edit cycles.
# execute "export SPHINX_DEBUG=1" in your terminal to disable
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
copyright = '2018, MetalSmith Developers '
# If true, '()' will be appended to :func: etc. cross-reference text.
add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = True
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
openstackdocs_repo_name = 'openstack/metalsmith'
openstackdocs_bug_project = 'metalsmith'
openstackdocs_bug_tag = ''
# -- Options for HTML output --------------------------------------------------
# The theme to use for HTML and HTML Help pages. Major themes that come with
# Sphinx are currently 'default' and 'sphinxdoc'.
# html_theme_path = ["."]
# html_theme = '_theme'
# html_static_path = ['static']
html_theme = 'openstackdocs'
# Output file base name for HTML help builder.
htmlhelp_basename = 'metalsmithdoc'
latex_use_xindy = False
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass
# [howto/manual]).
latex_documents = [
('index',
'doc-metalsmith.tex',
'MetalSmith Documentation',
'MetalSmith Developers', 'manual'),
]
# Example configuration for intersphinx: refer to the Python standard library.
# intersphinx_mapping = {'http://docs.python.org/': None}

View File

@@ -1,39 +0,0 @@
.. include:: ../../README.rst
Python API
----------
The main entry point to the API is :py:class:`metalsmith.Provisioner`.
.. toctree::
:maxdepth: 3
reference/api/metalsmith
.. toctree::
:hidden:
reference/api/modules
Command-Line Interface
----------------------
.. toctree::
:maxdepth: 2
cli/index
Ansible Role
------------
.. toctree::
:maxdepth: 2
user/ansible
Indexes
-------
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`

View File

@@ -1,9 +0,0 @@
.. include:: ../../../metalsmith_ansible/roles/metalsmith_deployment/README.rst
Module - metalsmith_instances
=============================
.. ansibleautoplugin::
:module: metalsmith_ansible/ansible_plugins/modules/metalsmith_instances.py
:documentation: true
:examples: true

View File

@@ -1,20 +0,0 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from metalsmith._instance import Instance
from metalsmith._instance import InstanceState
from metalsmith._provisioner import Provisioner
__all__ = ['Instance', 'InstanceState', 'Provisioner']

View File

@@ -1,264 +0,0 @@
# Copyright 2015-2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import logging
import sys
from openstack import config as os_config
from metalsmith import _format
from metalsmith import _provisioner
from metalsmith import _utils
from metalsmith import instance_config
from metalsmith import sources
LOG = logging.getLogger(__name__)
class NICAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
assert option_string in ('--port', '--network', '--ip', '--subnet')
nics = getattr(namespace, self.dest, None) or []
if option_string == '--ip':
try:
network, ip = values.split(':', 1)
except ValueError:
raise argparse.ArgumentError(
self, '--ip format is NETWORK:IP, got %s' % values)
nics.append({'network': network, 'fixed_ip': ip})
else:
nics.append({option_string[2:]: values})
setattr(namespace, self.dest, nics)
def _do_deploy(api, args, formatter):
wait = None if args.no_wait else args.wait
capabilities = dict(item.split('=', 1) for item in args.capability)
if args.ssh_public_key:
with open(args.ssh_public_key) as fp:
ssh_keys = [fp.read().strip()]
else:
ssh_keys = []
if args.hostname and not _utils.is_hostname_safe(args.hostname):
raise RuntimeError("%s cannot be used as a hostname" % args.hostname)
source = sources.detect(args.image,
kernel=args.image_kernel,
ramdisk=args.image_ramdisk,
checksum=args.image_checksum)
if args.user_name:
config = instance_config.CloudInitConfig(ssh_keys=ssh_keys)
config.add_user(args.user_name, sudo=args.passwordless_sudo)
else:
config = instance_config.GenericConfig(ssh_keys=ssh_keys)
node = api.reserve_node(resource_class=args.resource_class,
conductor_group=args.conductor_group,
capabilities=capabilities,
traits=args.trait,
candidates=args.candidate,
hostname=args.hostname)
instance = api.provision_node(node,
image=source,
nics=args.nics,
root_size_gb=args.root_size,
swap_size_mb=args.swap_size,
config=config,
netboot=args.netboot,
wait=wait,
clean_up_on_failure=not args.no_clean_up)
formatter.deploy(instance)
def _do_undeploy(api, args, formatter):
node = api.unprovision_node(args.node, wait=args.wait)
formatter.undeploy(node)
def _do_show(api, args, formatter):
instances = api.show_instances(args.instance)
formatter.show(instances)
def _do_wait(api, args, formatter):
instances = api.wait_for_provisioning(args.instance,
timeout=args.timeout)
formatter.show(instances)
def _do_list(api, args, formatter):
instances = api.list_instances()
formatter.show(instances)
def _parse_args(args, config):
parser = argparse.ArgumentParser(
description='Deployment and Scheduling tool for Bare Metal')
verbosity = parser.add_mutually_exclusive_group()
verbosity.add_argument('-q', '--quiet', action='store_true',
help='output only errors')
verbosity.add_argument('--debug', action='store_true',
help='output extensive logging')
verbosity.add_argument('-v', '--verbose', action='count', default=0,
dest='verbosity',
help='increase output verbosity, can be specified '
'up to three times')
parser.add_argument('--dry-run', action='store_true',
help='do not take any destructive actions')
parser.add_argument('-f', '--format', choices=list(_format.FORMATS),
default=_format.DEFAULT_FORMAT,
help='output format')
parser.add_argument('-c', '--column', action='append', dest='columns',
choices=_format.FIELDS,
help='for table output, specify column(s) to show')
parser.add_argument('--sort-column', choices=_format.FIELDS,
help='for table output, specify a column to use '
'for sorting')
config.register_argparse_arguments(parser, sys.argv[1:])
subparsers = parser.add_subparsers()
deploy = subparsers.add_parser('deploy')
deploy.set_defaults(func=_do_deploy)
wait_grp = deploy.add_mutually_exclusive_group()
wait_grp.add_argument('--wait', type=int, default=1800,
help='time (in seconds) to wait for node to become '
'active')
wait_grp.add_argument('--no-wait', action='store_true',
help='disable waiting for deploy to finish')
deploy.add_argument('--image', help='image to use (name, UUID or URL)',
required=True)
deploy.add_argument('--image-checksum',
help='image SHA256 checksum or URL with checksums')
deploy.add_argument('--image-kernel', help='URL of the image\'s kernel')
deploy.add_argument('--image-ramdisk', help='URL of the image\'s ramdisk')
deploy.add_argument('--network', help='network to create a port on '
'(name or UUID)', dest='nics', action=NICAction)
deploy.add_argument('--subnet', help='subnet to create a port on '
'(name or UUID)', dest='nics', action=NICAction)
deploy.add_argument('--port', help='port to attach (name or UUID)',
dest='nics', action=NICAction)
deploy.add_argument('--ip', help='attach IP from the network',
dest='nics', metavar='NETWORK:IP', action=NICAction)
deploy.add_argument('--netboot', action='store_true',
help='boot from network instead of local disk')
deploy.add_argument('--root-size', type=int,
help='root partition size (in GiB), defaults to '
'(local_gb - 1)')
deploy.add_argument('--swap-size', type=int,
help='swap partition size (in MiB), defaults to '
'no swap')
deploy.add_argument('--capability', action='append', metavar='NAME=VALUE',
default=[], help='capabilities the node should have')
deploy.add_argument('--trait', action='append',
default=[], help='trait the node should have')
deploy.add_argument('--ssh-public-key', help='SSH public key to load')
deploy.add_argument('--hostname', help='Host name to use, defaults to '
'Node\'s name or UUID')
deploy.add_argument('--resource-class', required=True,
help='node resource class to deploy')
deploy.add_argument('--conductor-group',
help='conductor group to pick the node from')
deploy.add_argument('--candidate', action='append',
help='A candidate node to use for scheduling (can be '
'specified several times)')
deploy.add_argument('--user-name', help='Name of the admin user to create')
deploy.add_argument('--passwordless-sudo', action='store_true',
help='allow password-less sudo for the user')
deploy.add_argument('--no-clean-up', help='Prevent clean up on failure',
action='store_true')
undeploy = subparsers.add_parser('undeploy')
undeploy.set_defaults(func=_do_undeploy)
undeploy.add_argument('node', help='node UUID')
undeploy.add_argument('--wait', type=int,
help='time (in seconds) to wait for node to become '
'available for deployment again')
show = subparsers.add_parser('show')
show.set_defaults(func=_do_show)
show.add_argument('instance', nargs='+', help='instance UUID(s)')
show = subparsers.add_parser('list')
show.set_defaults(func=_do_list)
wait = subparsers.add_parser('wait')
wait.set_defaults(func=_do_wait)
wait.add_argument('instance', nargs='+', help='instance UUID(s)')
wait.add_argument('--timeout', type=int,
help='time (in seconds) to wait for provisioning.')
return parser.parse_args(args)
_URLLIB3_LOGGER = 'urllib3.connectionpool'
def _configure_logging(args):
log_fmt = ('%(asctime)s %(levelname)s %(name)s: %(message)s'
if args.debug or args.verbosity
else '[%(asctime)s] %(message)s')
# Verbosity:
# 0 (the default) - warnings and errors
# 1 - info from metalsmith, warnings and errors from everything else
# 2 - debug from metalsmith, info from everything else
# 3 - the same as --debug
base_level = logging.WARNING
metalsmith_level = base_level
urllib_level = logging.CRITICAL
if args.quiet:
base_level = logging.CRITICAL
metalsmith_level = base_level
elif args.debug or args.verbosity > 2:
base_level = logging.DEBUG
metalsmith_level = base_level
urllib_level = logging.INFO
elif args.verbosity == 2:
base_level = logging.INFO
metalsmith_level = logging.DEBUG
elif args.verbosity == 1:
metalsmith_level = logging.INFO
logging.basicConfig(level=base_level, format=log_fmt)
logging.getLogger('metalsmith').setLevel(metalsmith_level)
logging.getLogger(_URLLIB3_LOGGER).setLevel(urllib_level)
def main(args=sys.argv[1:]):
config = os_config.OpenStackConfig()
args = _parse_args(args, config)
_configure_logging(args)
if args.quiet:
formatter = _format.NULL_FORMAT
else:
formatter = _format.FORMATS[args.format](columns=args.columns,
sort_column=args.sort_column)
region = config.get_one(argparse=args)
api = _provisioner.Provisioner(cloud_region=region, dry_run=args.dry_run)
try:
args.func(api, args, formatter)
except Exception as exc:
LOG.critical('%s', exc, exc_info=args.debug)
sys.exit(1)

View File

@@ -1,168 +0,0 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import json
import sys
import prettytable
from metalsmith import _utils
def _print(msg, **kwargs):
print(msg % kwargs)
class NullFormat(object):
"""Formatting outputting nothing.
Used implicitly with --quiet.
"""
def __init__(self, columns=None, sort_column=None):
self.columns = columns
self.sort_column = sort_column
def deploy(self, instance):
pass
def undeploy(self, node):
pass
FIELDS = ['UUID', 'Node Name', 'Allocation UUID', 'Hostname',
'State', 'IP Addresses']
class ValueFormat(NullFormat):
""""Simple value formatter."""
def deploy(self, instance):
"""Output result of the deploy."""
self.show([instance])
def undeploy(self, node):
"""Output result of undeploy."""
if node.provision_state == 'available':
message = "Successfully unprovisioned node %(node)s"
else:
message = "Unprovisioning started for node %(node)s"
_print(message, node=_utils.log_res(node))
def _iter_rows(self, instances):
for instance in instances:
if instance.is_deployed:
ips = '\n'.join('%s=%s' % (net, ','.join(ips))
for net, ips in
instance.ip_addresses().items())
else:
ips = ''
row = [instance.uuid, instance.node.name or '',
instance.allocation.id if instance.allocation else '',
instance.hostname or '', instance.state.name, ips]
yield row
def show(self, instances):
allowed_columns = set(self.columns or FIELDS)
rows = (collections.OrderedDict(zip(FIELDS, row))
for row in self._iter_rows(instances))
if self.sort_column:
rows = sorted(rows, key=lambda row: row.get(self.sort_column))
for row in rows:
_print(' '.join(value if value is not None else ''
for key, value in row.items()
if key in allowed_columns))
class DefaultFormat(ValueFormat):
"""Human-readable formatter."""
def show(self, instances):
if not instances:
_print('') # Compatibility with openstackclient - one empty line
return
pt = prettytable.PrettyTable(field_names=FIELDS)
pt.align = 'l'
if self.sort_column:
pt.sortby = self.sort_column
for row in self._iter_rows(instances):
pt.add_row(row)
if self.columns:
value = pt.get_string(fields=self.columns)
else:
value = pt.get_string()
_print(value)
class JsonFormat(NullFormat):
"""JSON formatter."""
def _get_value_dict(self, instance):
if instance.is_deployed:
ips = '\n'.join('%s=%s' % (net, ','.join(ips))
for net, ips in
instance.ip_addresses().items())
else:
ips = ''
dict_values = [instance.uuid, instance.node.name or '',
instance.allocation.id if instance.allocation else '',
instance.hostname or '', instance.state.name, ips]
value_dict = dict(zip(FIELDS, dict_values))
return value_dict
def deploy(self, instance):
"""Output result of the deploy."""
json.dump(instance.to_dict(), sys.stdout)
def undeploy(self, node):
"""Output result of undeploy."""
result = {
'node': node.to_dict()
}
json.dump(result, sys.stdout)
def show(self, instances):
"""Output instance statuses."""
if self.columns:
value = {
instance.hostname: {
col: self._get_value_dict(instance).get(col)
for col in self.columns}
for instance in instances}
else:
value = {instance.hostname: instance.to_dict()
for instance in instances}
json.dump(value, sys.stdout)
FORMATS = {
'default': DefaultFormat,
'json': JsonFormat,
'table': DefaultFormat,
'value': ValueFormat,
}
"""Available formatters."""
DEFAULT_FORMAT = 'table'
"""Default formatter."""
NULL_FORMAT = NullFormat()
"""Formatter outputting nothing."""

View File

@@ -1,173 +0,0 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import enum
import logging
from metalsmith import _utils
LOG = logging.getLogger(__name__)
_PROGRESS_STATES = frozenset(['deploying', 'wait call-back',
'deploy complete'])
_ACTIVE_STATES = frozenset(['active'])
_ERROR_STATES = frozenset(['error', 'deploy failed'])
_RESERVED_STATES = frozenset(['available'])
class InstanceState(enum.Enum):
"""A state of an instance."""
DEPLOYING = 'deploying'
"""Provisioning is in progress.
This includes the case when a node is still in the ``available`` state, but
already has an instance associated with it.
"""
ACTIVE = 'active'
"""The instance is provisioned."""
MAINTENANCE = 'maintenance'
"""The instance is provisioned but is in the maintenance mode."""
ERROR = 'error'
"""The instance has a failure."""
UNKNOWN = 'unknown'
"""The node is in an unexpected state.
It can be unprovisioned or modified by a third party.
"""
@property
def is_deployed(self):
"""Whether the state designates a finished deployment."""
return self in _DEPLOYED_STATES
@property
def is_healthy(self):
"""Whether the state is considered healthy."""
return self in _HEALTHY_STATES
_HEALTHY_STATES = frozenset([InstanceState.ACTIVE, InstanceState.DEPLOYING])
_DEPLOYED_STATES = frozenset([InstanceState.ACTIVE, InstanceState.MAINTENANCE])
class Instance(object):
"""Instance status in metalsmith."""
network_cache = dict()
def __init__(self, connection, node, allocation=None):
self._connection = connection
self._uuid = node.id
self._node = node
self._allocation = allocation
@property
def allocation(self):
"""Allocation object associated with the node (if any)."""
return self._allocation
@property
def hostname(self):
"""Node's hostname."""
return _utils.hostname_for(self._node, self._allocation)
def ip_addresses(self):
"""Returns IP addresses for this instance.
:return: dict mapping network name or ID to a list of IP addresses.
"""
result = {}
for nic in self.nics():
net = getattr(nic.network, 'name', None) or nic.network.id
result.setdefault(net, []).extend(
ip['ip_address'] for ip in nic.fixed_ips
if ip.get('ip_address')
)
return result
@property
def is_deployed(self):
"""Whether the node is deployed."""
return self.state.is_deployed
@property
def is_healthy(self):
"""Whether the instance is not at fault or maintenance."""
return self.state.is_healthy and not self._node.is_maintenance
def nics(self):
"""List NICs for this instance.
:return: List of `Port` objects with additional ``network`` fields
with full representations of their networks.
"""
result = []
ports_query = {'binding:host_id': self.node.id}
ports = self._connection.network.ports(**ports_query)
for port in ports:
if port.network_id not in Instance.network_cache:
Instance.network_cache[port.network_id] = (
self._connection.network.get_network(port.network_id))
port.network = Instance.network_cache[port.network_id]
result.append(port)
return result
@property
def node(self):
"""Underlying `Node` object."""
return self._node
@property
def state(self):
"""Instance state, one of :py:class:`InstanceState`."""
prov_state = self._node.provision_state
if prov_state in _PROGRESS_STATES:
return InstanceState.DEPLOYING
# NOTE(dtantsur): include available since there is a period of time
# between claiming the instance and starting the actual provisioning.
elif prov_state in _RESERVED_STATES and self._node.instance_id:
return InstanceState.DEPLOYING
elif prov_state in _ERROR_STATES:
return InstanceState.ERROR
elif prov_state in _ACTIVE_STATES:
if self._node.is_maintenance:
return InstanceState.MAINTENANCE
else:
return InstanceState.ACTIVE
else:
return InstanceState.UNKNOWN
def to_dict(self):
"""Convert instance to a dict."""
return {
'allocation': (self._allocation.to_dict()
if self._allocation is not None else None),
'hostname': self.hostname,
'ip_addresses': self.ip_addresses(),
'node': self._node.to_dict(),
'state': self.state.value,
'uuid': self._uuid,
}
@property
def uuid(self):
"""Instance UUID (the same as `Node` UUID for metalsmith)."""
return self._uuid

View File

@@ -1,118 +0,0 @@
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import ipaddress
import logging
from openstack import exceptions as sdk_exc
from metalsmith import exceptions
LOG = logging.getLogger(__name__)
def create_network_metadata(connection, attached_ports, node_id):
network_data = {}
if not attached_ports:
return network_data
# NOTE(TheJulia): This method has no unit testing.... It should,
# but given the state of maintenance of Metalsmith, it doesn't make
# sense to add a bunch of tests here.
links = network_data.setdefault('links', [])
networks = network_data.setdefault('networks', [])
services = network_data.setdefault('services', [])
try:
# Gets a list of Ironic ports
ironic_ports = list(connection.baremetal.ports(node_id=node_id,
details=True))
except sdk_exc.SDKException as exc:
raise exceptions.NetworkResourceNotFound(
'Failed to retrieve a list of baremetal ports: %s' % exc)
for attached_port in attached_ports:
try:
port_mac = None
for p in ironic_ports:
if 'tenant_vif_port_id' in p.internal_info:
port_vif = p.internal_info.get('tenant_vif_port_id')
if port_vif == attached_port:
port_mac = p.address
if port_mac is None:
raise exceptions.NetworkResourceNotFound(
'Could not find the baremetal MAC address of '
'neutron port %s.', attached_port)
port = connection.network.get_port(attached_port)
net = connection.network.get_network(port.network_id)
subnets = [connection.network.get_subnet(x['subnet_id'])
for x in port.fixed_ips]
subnets_by_id = {x.id: x for x in subnets}
except sdk_exc.SDKException as exc:
raise exceptions.NetworkResourceNotFound(
'Cannot find network resource: %s' % exc)
metadata_add_links(links, port, net, port_mac)
metadata_add_services(services, subnets)
for idx, fixed_ip in enumerate(port.fixed_ips):
subnet = subnets_by_id[fixed_ip['subnet_id']]
metadata_add_network(networks, idx, fixed_ip, port, net, subnet)
return network_data
def metadata_add_links(links, port, network, port_mac):
links.append({'id': port.id,
'type': 'phy',
'mtu': network.mtu,
'ethernet_mac_address': port_mac})
def metadata_add_services(services, subnets):
for subnet in subnets:
for dns_nameserver in subnet.dns_nameservers:
services.append({'type': 'dns',
'address': dns_nameserver})
def metadata_add_network(networks, idx, fixed_ip, port, network, subnet):
ip_net = ipaddress.ip_network(subnet.cidr)
net_data = {'id': network.name + str(idx),
'network_id': network.id,
'link': port.id,
'ip_address': fixed_ip['ip_address'],
'netmask': str(ip_net.netmask)}
if subnet.ip_version == 4:
net_data['type'] = 'ipv4_dhcp' if subnet.is_dhcp_enabled else 'ipv4'
elif subnet.ip_version == 6:
net_data['type'] = ('ipv6_{}'.format(subnet.ipv6_address_mode)
if subnet.ipv6_address_mode else 'ipv6')
net_routes = net_data.setdefault('routes', [])
for route in subnet.host_routes:
ip_net = ipaddress.ip_network(route['destination'])
net_routes.append({'network': str(ip_net.network_address),
'netmask': str(ip_net.netmask),
'gateway': route['nexthop']})
# Services go in both "network" and toplevel.
# Ref: https://docs.openstack.org/nova/latest/_downloads/9119ca7ac90aa2990e762c08baea3a36/network_data.json # noqa
net_services = net_data.setdefault('services', [])
metadata_add_services(net_services, [subnet])
networks.append(net_data)

View File

@@ -1,232 +0,0 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections.abc
import logging
from openstack import exceptions as sdk_exc
from metalsmith import _utils
from metalsmith import exceptions
LOG = logging.getLogger(__name__)
class NICs(object):
"""Requested NICs."""
def __init__(self, connection, node, nics, hostname=None):
if nics is None:
nics = []
if not isinstance(nics, collections.abc.Sequence):
raise TypeError("NICs must be a list of dicts")
for nic in nics:
if not isinstance(nic, collections.abc.Mapping):
raise TypeError("Each NIC must be a dict got %s" % nic)
self._node = node
self._connection = connection
self._nics = nics
self._validated = None
self._hostname = hostname
self.created_ports = []
self.attached_ports = []
def validate(self):
"""Validate provided NIC records."""
if self._validated is not None:
return
result = []
for nic in self._nics:
if 'port' in nic:
result.append(('port', self._get_port(nic)))
elif 'network' in nic:
result.append(('network', self._get_network(nic)))
elif 'subnet' in nic:
result.append(('subnet', self._get_subnet(nic)))
else:
raise exceptions.InvalidNIC(
'Unknown NIC record type, export "port", "subnet" or '
'"network", got %s' % nic)
self._validated = result
def create_and_attach_ports(self):
"""Attach ports to the node, creating them if requested."""
self.validate()
for nic_type, nic in self._validated:
if nic_type != 'port':
# The 'binding:host_id' must be set to ensure IP allocation
# is not deferred.
# See: https://storyboard.openstack.org/#!/story/2009715
port = self._connection.network.create_port(
binding_host_id=self._node.id, **nic)
self.created_ports.append(port.id)
LOG.info('Created port %(port)s for node %(node)s with '
'%(nic)s', {'port': _utils.log_res(port),
'node': _utils.log_res(self._node),
'nic': nic})
else:
# The 'binding:host_id' must be set to ensure IP allocation
# is not deferred.
# See: https://storyboard.openstack.org/#!/story/2009715
self._connection.network.update_port(
nic, binding_host_id=self._node.id)
port = nic
self._connection.baremetal.attach_vif_to_node(self._node,
port.id)
LOG.info('Attached port %(port)s to node %(node)s',
{'port': _utils.log_res(port),
'node': _utils.log_res(self._node)})
self.attached_ports.append(port.id)
def detach_and_delete_ports(self):
"""Detach attached port and delete previously created ones."""
detach_and_delete_ports(self._connection, self._node,
self.created_ports, self.attached_ports)
def _get_port(self, nic):
"""Validate and get the NIC information for a port.
:param nic: NIC information in the form ``{"port": "<port ident>"}``.
:returns: `Port` object to use.
"""
unexpected = set(nic) - {'port'}
if unexpected:
raise exceptions.InvalidNIC(
'Unexpected fields for a port: %s' % ', '.join(unexpected))
try:
port = self._connection.network.find_port(
nic['port'], ignore_missing=False)
except sdk_exc.SDKException as exc:
raise exceptions.InvalidNIC(
'Cannot find port %(port)s: %(error)s' %
{'port': nic['port'], 'error': exc})
return port
def _get_network(self, nic):
"""Validate and get the NIC information for a network.
:param nic: NIC information in the form ``{"network": "<net ident>"}``
or ``{"network": "<net ident>", "fixed_ip": "<desired IP>"}``.
:returns: keyword arguments to use when creating a port.
"""
unexpected = set(nic) - {'network', 'fixed_ip', 'subnet'}
if unexpected:
raise exceptions.InvalidNIC(
'Unexpected fields for a network: %s' % ', '.join(unexpected))
try:
network = self._connection.network.find_network(
nic['network'], ignore_missing=False)
except sdk_exc.SDKException as exc:
raise exceptions.InvalidNIC(
'Cannot find network %(net)s: %(error)s' %
{'net': nic['network'], 'error': exc})
fixed_ip = {}
if nic.get('fixed_ip'):
fixed_ip['ip_address'] = nic['fixed_ip']
if nic.get('subnet'):
try:
subnet = self._connection.network.find_subnet(
nic['subnet'], network_id=network.id, ignore_missing=False)
except sdk_exc.SDKException as exc:
raise exceptions.InvalidNIC(
'Cannot find subnet %(subnet)s on network %(net)s: '
'%(error)s' %
{'net': nic['network'], 'subnet': nic['subnet'],
'error': exc})
fixed_ip['subnet_id'] = subnet.id
port_args = {'network_id': network.id}
if fixed_ip:
port_args['fixed_ips'] = [fixed_ip]
if self._hostname:
port_args['name'] = '%s-%s' % (self._hostname, network.name)
return port_args
def _get_subnet(self, nic):
"""Validate and get the NIC information for a subnet.
:param nic: NIC information in the form ``{"subnet": "<id or name>"}``.
:returns: keyword arguments to use when creating a port.
"""
unexpected = set(nic) - {'subnet'}
if unexpected:
raise exceptions.InvalidNIC(
'Unexpected fields for a subnet: %s' % ', '.join(unexpected))
try:
subnet = self._connection.network.find_subnet(
nic['subnet'], ignore_missing=False)
except sdk_exc.SDKException as exc:
raise exceptions.InvalidNIC(
'Cannot find subnet %(sub)s: %(error)s' %
{'sub': nic['subnet'], 'error': exc})
try:
network = self._connection.network.get_network(subnet.network_id)
except sdk_exc.SDKException as exc:
raise exceptions.InvalidNIC(
'Cannot find network %(net)s for subnet %(sub)s: %(error)s' %
{'net': subnet.network_id, 'sub': nic['subnet'], 'error': exc})
port_args = {'network_id': network.id,
'fixed_ips': [{'subnet_id': subnet.id}]}
if self._hostname:
port_args['name'] = '%s-%s' % (self._hostname, network.name)
return port_args
def detach_and_delete_ports(connection, node, created_ports, attached_ports):
"""Detach attached port and delete previously created ones.
:param connection: `openstacksdk.Connection` instance.
:param node: `Node` object to detach ports from.
:param created_ports: List of IDs of previously created ports.
:param attached_ports: List of IDs of previously attached_ports.
"""
for port_id in set(attached_ports + created_ports):
LOG.debug('Detaching port %(port)s from node %(node)s',
{'port': port_id, 'node': _utils.log_res(node)})
try:
connection.baremetal.detach_vif_from_node(node, port_id)
except Exception as exc:
LOG.debug('Failed to remove VIF %(vif)s from node %(node)s, '
'assuming already removed: %(exc)s',
{'vif': port_id, 'node': _utils.log_res(node),
'exc': exc})
for port_id in created_ports:
LOG.debug('Deleting port %s', port_id)
try:
connection.network.delete_port(port_id, ignore_missing=False)
except Exception as exc:
LOG.warning('Failed to delete neutron port %(port)s: %(exc)s',
{'port': port_id, 'exc': exc})
else:
LOG.info('Deleted port %(port)s for node %(node)s',
{'port': port_id, 'node': _utils.log_res(node)})

View File

@@ -1,737 +0,0 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import warnings
from openstack import connection
from openstack import exceptions as os_exc
from metalsmith import _instance
from metalsmith import _network_metadata
from metalsmith import _nics
from metalsmith import _scheduler
from metalsmith import _utils
from metalsmith import exceptions
from metalsmith import instance_config
from metalsmith import sources
LOG = logging.getLogger(__name__)
_CREATED_PORTS = 'metalsmith_created_ports'
_ATTACHED_PORTS = 'metalsmith_attached_ports'
_PRESERVE_INSTANCE_INFO_KEYS = {'capabilities', 'traits'}
class Provisioner(object):
"""API to deploy/undeploy nodes with OpenStack.
:param session: `Session` object (from ``keystoneauth``) to use when
making API requests. Mutually exclusive with **cloud_region**.
:param cloud_region: cloud configuration object (from ``openstacksdk``)
to use when making API requests. Mutually exclusive with **session**.
:param dry_run: boolean value, set to ``True`` to prevent any API calls
from being actually made.
:ivar connection: `openstacksdk` `Connection` object used for accessing
OpenStack API during provisioning.
"""
allocations_cache = dict()
def __init__(self, session=None, cloud_region=None, dry_run=False):
if cloud_region is None:
if session is None:
raise TypeError('Either session or cloud_region must '
'be provided')
self.connection = connection.Connection(session=session)
elif session is not None:
raise TypeError('Either session or cloud_region must be provided, '
'but not both')
else:
self.connection = connection.Connection(config=cloud_region)
self._dry_run = dry_run
def reserve_node(self, resource_class, conductor_group=None,
capabilities=None, traits=None, candidates=None,
predicate=None, hostname=None):
"""Find and reserve a suitable node.
Example::
node = provisioner.reserve_node("compute",
capabilities={"boot_mode": "uefi"})
:param resource_class: Requested resource class.
:param conductor_group: Conductor group to pick the nodes from.
Value ``None`` means any group, use empty string "" for nodes
from the default group.
:param capabilities: Requested capabilities as a dict.
:param traits: Requested traits as a list of strings.
:param candidates: List of nodes (UUIDs, names or `Node` objects)
to pick from. The filters (for resource class and capabilities)
are still applied to the provided list. The order in which
the nodes are considered is retained.
:param predicate: Custom predicate to run on nodes. A callable that
accepts a node and returns ``True`` if it should be included,
``False`` otherwise. Any exceptions are propagated to the caller.
:param hostname: Hostname to assign to the instance. Defaults to the
node's name or UUID.
:return: reserved `Node` object.
:raises: :py:class:`metalsmith.exceptions.ReservationFailed`
"""
capabilities = capabilities or {}
_utils.check_hostname(hostname)
if candidates or capabilities or conductor_group or predicate:
# Predicates, capabilities and conductor groups are not supported
# by the allocation API natively, so we need to use prefiltering.
candidates = self._prefilter_nodes(resource_class,
conductor_group=conductor_group,
capabilities=capabilities,
candidates=candidates,
predicate=predicate)
node = self._reserve_node(resource_class, hostname=hostname,
candidates=candidates, traits=traits,
capabilities=capabilities)[0]
return node
def _prefilter_nodes(self, resource_class, conductor_group, capabilities,
candidates, predicate):
"""Build a list of candidate nodes for allocation."""
if candidates:
try:
nodes = [self._get_node(node) for node in candidates]
except os_exc.ResourceNotFound as exc:
raise exceptions.InvalidNode(str(exc))
else:
nodes = list(self.connection.baremetal.nodes(
details=True,
associated=False,
provision_state='available',
maintenance=False,
resource_class=resource_class,
conductor_group=conductor_group))
if not nodes:
raise exceptions.NodesNotFound(resource_class, conductor_group)
filters = [
_scheduler.NodeTypeFilter(resource_class, conductor_group),
]
if capabilities:
filters.append(_scheduler.CapabilitiesFilter(capabilities))
if predicate is not None:
filters.append(_scheduler.CustomPredicateFilter(predicate))
return _scheduler.run_filters(filters, nodes)
def _reserve_node(self, resource_class, hostname=None, candidates=None,
traits=None, capabilities=None,
update_instance_info=True):
"""Create an allocation with given parameters."""
if candidates:
candidates = [
(node.id if not isinstance(node, str) else node)
for node in candidates
]
LOG.debug('Creating an allocation for resource class %(rsc)s '
'with traits %(traits)s and candidate nodes %(candidates)s',
{'rsc': resource_class, 'traits': traits,
'candidates': candidates})
try:
allocation = self.connection.baremetal.create_allocation(
name=hostname, candidate_nodes=candidates,
resource_class=resource_class, traits=traits)
except os_exc.SDKException as exc:
# Re-raise the expected exception class
raise exceptions.ReservationFailed(
'Failed to create an allocation: %s' % exc)
node = None
try:
try:
allocation = self.connection.baremetal.wait_for_allocation(
allocation)
except os_exc.SDKException as exc:
# Re-raise the expected exception class
raise exceptions.ReservationFailed(
'Failed to reserve a node: %s' % exc)
LOG.info('Successful allocation %(alloc)s for host %(host)s',
{'alloc': allocation, 'host': hostname})
node = self.connection.baremetal.get_node(allocation.node_id)
if update_instance_info:
node = self._patch_reserved_node(node, allocation, hostname,
capabilities)
except Exception as exc:
with _utils.reraise_os_exc(
exceptions.ReservationFailed,
'Failed to delete failed allocation') as expected:
LOG.error('Processing allocation %(alloc)s for node %(node)s '
'failed: %(exc)s; deleting allocation',
{'alloc': _utils.log_res(allocation),
'node': _utils.log_res(node), 'exc': exc},
exc_info=not expected)
self.connection.baremetal.delete_allocation(allocation)
LOG.debug('Reserved node: %s', node)
return node, allocation
def _patch_reserved_node(self, node, allocation, hostname, capabilities):
"""Make required updates on a newly reserved node."""
if capabilities:
patch = [{'path': '/instance_info/capabilities',
'op': 'add', 'value': capabilities}]
LOG.debug('Patching reserved node %(node)s with %(patch)s',
{'node': _utils.log_res(node), 'patch': patch})
return self.connection.baremetal.patch_node(node, patch)
else:
return node
def _check_node_for_deploy(self, node, hostname):
"""Check that node is ready and reserve it if needed.
These checks are done outside of the try..except block in
``provision_node``, so that we don't touch nodes that fail it at all.
Particularly, we don't want to try clean up nodes that were not
reserved by us or are in maintenance mode.
"""
if node.is_maintenance:
raise exceptions.InvalidNode('Refusing to deploy on node %(node)s '
'which is in maintenance mode due to '
'%(reason)s' %
{'node': _utils.log_res(node),
'reason': node.maintenance_reason})
allocation = None
# Make sure the hostname does not correspond to an existing allocation
# for another node.
if hostname is not None:
allocation = self._check_allocation_for_hostname(node, hostname)
if node.allocation_id:
if allocation is None:
# Previously created allocation, verify/update it
allocation = self._check_and_update_allocation_for_node(
node, hostname)
elif node.instance_id:
# Old-style reservations with instance_uuid==node.uuid
if node.instance_id != node.id:
raise exceptions.InvalidNode(
'Node %(node)s already reserved by instance %(inst)s '
'outside of metalsmith, cannot deploy on it' %
{'node': _utils.log_res(node), 'inst': node.instance_id})
elif hostname:
# We have no way to update hostname without allocations
raise exceptions.InvalidNode(
'Node %s does not use allocations, cannot update '
'hostname for it' % _utils.log_res(node))
else:
# Node is not reserved at all - reserve it
if not node.resource_class:
raise exceptions.InvalidNode(
'Cannot create an allocation for node %s that '
'does not have a resource class set'
% _utils.log_res(node))
if not self._dry_run:
if not hostname:
hostname = _utils.default_hostname(node)
LOG.debug('Node %(node)s is not reserved yet, reserving for '
'hostname %(host)s',
{'node': _utils.log_res(node),
'host': hostname})
# Not updating instance_info since it will be updated later
node, allocation = self._reserve_node(
node.resource_class,
hostname=hostname,
candidates=[node.id],
update_instance_info=False)
return node, allocation
def _check_allocation_for_hostname(self, node, hostname):
try:
allocation = self.connection.baremetal.get_allocation(
hostname)
except os_exc.ResourceNotFound:
return
if allocation.node_id and allocation.node_id != node.id:
raise ValueError("The following node already uses "
"hostname %(host)s: %(node)s" %
{'host': hostname,
'node': allocation.node_id})
else:
return allocation
def _check_and_update_allocation_for_node(self, node, hostname=None):
# No allocation with given hostname, find one corresponding to the
# node.
allocation = self.connection.baremetal.get_allocation(
node.allocation_id)
if allocation.name and hostname and allocation.name != hostname:
# Prevent updating of an existing hostname, since we don't
# understand the intention
raise exceptions.InvalidNode(
"Allocation %(alloc)s associated with node %(node)s "
"uses hostname %(old)s that does not match the expected "
"hostname %(new)s" %
{'alloc': _utils.log_res(allocation),
'node': _utils.log_res(node),
'old': allocation.name,
'new': hostname})
elif not allocation.name and not self._dry_run:
if not hostname:
hostname = _utils.default_hostname(node)
# Set the hostname that was not set in reserve_node.
LOG.debug('Updating allocation %(alloc)s for node '
'%(node)s with hostname %(host)s',
{'alloc': _utils.log_res(allocation),
'node': _utils.log_res(node),
'host': hostname})
allocation = self.connection.baremetal.update_allocation(
allocation, name=hostname)
return allocation
def provision_node(self, node, image, nics=None, root_size_gb=None,
swap_size_mb=None, config=None, hostname=None,
netboot=False, capabilities=None, traits=None,
wait=None, clean_up_on_failure=True):
"""Provision the node with the given image.
Example::
provisioner.provision_node("compute-1", "centos",
nics=[{"network": "private"},
{"network": "external"}],
root_size_gb=50,
wait=3600)
:param node: Node object, UUID or name. Will be reserved first, if
not reserved already. Must be in the "available" state with
maintenance mode off.
:param image: Image source - one of :mod:`~metalsmith.sources`,
`Image` name or UUID.
:param nics: List of virtual NICs to attach to physical ports.
Each item is a dict with a key describing the type of the NIC:
* ``{"port": "<port name or ID>"}`` to use the provided pre-created
port.
* ``{"network": "<network name or ID>"}`` to create a port on the
provided network. Optionally, a ``fixed_ip`` argument can be used
to specify an IP address.
* ``{"subnet": "<subnet name or ID>"}`` to create a port with an IP
address from the provided subnet. The network is determined from
the subnet.
:param root_size_gb: The size of the root partition. By default
the value of the local_gb property is used.
:param swap_size_mb: The size of the swap partition. It's an error
to specify it for a whole disk image.
:param config: configuration to pass to the instance, one of
objects from :py:mod:`metalsmith.instance_config`.
:param hostname: Hostname to assign to the instance. If provided,
overrides the ``hostname`` passed to ``reserve_node``.
:param netboot: Whether to use networking boot for final instances.
Deprecated and does not work in Ironic Zed.
:param capabilities: Requested capabilities of the node. If present,
overwrites the capabilities set by :meth:`reserve_node`.
Note that the capabilities are not checked against the ones
provided by the node - use :meth:`reserve_node` for that.
:param traits: Requested traits of the node. If present, overwrites
the traits set by :meth:`reserve_node`. Note that the traits are
not checked against the ones provided by the node - use
:meth:`reserve_node` for that.
:param wait: How many seconds to wait for the deployment to finish,
None to return immediately.
:param clean_up_on_failure: If True, then on failure the node is
cleared of instance information, VIFs are detached, created ports
and allocations are deleted.
:return: :py:class:`metalsmith.Instance` object with the current
status of provisioning. If ``wait`` is not ``None``, provisioning
is already finished.
:raises: :py:class:`metalsmith.exceptions.Error`
"""
if netboot:
warnings.warn("Network boot is deprecated and does not work in "
"Ironic Zed", DeprecationWarning)
if config is None:
config = instance_config.GenericConfig()
if isinstance(image, str):
image = sources.GlanceImage(image)
_utils.check_hostname(hostname)
try:
node = self._get_node(node)
except Exception as exc:
raise exceptions.InvalidNode('Cannot find node %(node)s: %(exc)s' %
{'node': node, 'exc': exc})
node, allocation = self._check_node_for_deploy(node, hostname)
nics = _nics.NICs(self.connection, node, nics,
hostname=allocation and allocation.name or None)
try:
root_size_gb = _utils.get_root_disk(root_size_gb, node)
image._validate(self.connection, root_size_gb)
nics.validate()
if capabilities is None:
capabilities = node.instance_info.get('capabilities') or {}
if self._dry_run:
LOG.warning('Dry run, not provisioning node %s',
_utils.log_res(node))
return node
nics.create_and_attach_ports()
capabilities['boot_option'] = 'netboot' if netboot else 'local'
instance_info = self._clean_instance_info(node.instance_info)
if root_size_gb is not None:
instance_info['root_gb'] = root_size_gb
instance_info['capabilities'] = capabilities
if hostname:
instance_info['display_name'] = hostname
extra = node.extra.copy()
extra[_CREATED_PORTS] = nics.created_ports
extra[_ATTACHED_PORTS] = nics.attached_ports
instance_info.update(image._node_updates(self.connection))
if traits is not None:
instance_info['traits'] = traits
if swap_size_mb is not None:
instance_info['swap_mb'] = swap_size_mb
LOG.debug('Updating node %(node)s with instance info %(iinfo)s '
'and extras %(extra)s', {'node': _utils.log_res(node),
'iinfo': instance_info,
'extra': extra})
node = self.connection.baremetal.update_node(
node, instance_info=instance_info, extra=extra)
self.connection.baremetal.validate_node(node)
network_data = _network_metadata.create_network_metadata(
self.connection, node.extra.get(_ATTACHED_PORTS), node.id)
LOG.debug('Generating a configdrive for node %s',
_utils.log_res(node))
cd = config.generate(node, _utils.hostname_for(node, allocation),
network_data)
LOG.debug('Starting provisioning of node %s', _utils.log_res(node))
self.connection.baremetal.set_node_provision_state(
node, 'active', config_drive=cd)
except Exception:
with _utils.reraise_os_exc(
exceptions.DeploymentFailed) as expected:
if clean_up_on_failure:
LOG.error('Deploy attempt failed on node %s, cleaning up',
_utils.log_res(node), exc_info=not expected)
self._clean_up(node, nics=nics)
LOG.info('Provisioning started on node %s', _utils.log_res(node))
if wait is not None:
LOG.debug('Waiting for node %(node)s to reach state active '
'with timeout %(timeout)s',
{'node': _utils.log_res(node), 'timeout': wait})
instance = self.wait_for_provisioning([node], timeout=wait)[0]
LOG.info('Deploy succeeded on node %s', _utils.log_res(node))
else:
# Update the node to return it's latest state
node = self.connection.baremetal.get_node(node.id)
instance = _instance.Instance(self.connection, node, allocation)
return instance
def wait_for_provisioning(self, nodes, timeout=None):
"""Wait for nodes to be provisioned.
Loops until all nodes finish provisioning.
:param nodes: List of nodes (UUID, name, `Node` object or
:py:class:`metalsmith.Instance`).
:param timeout: How much time (in seconds) to wait for all nodes
to finish provisioning. If ``None`` (the default), wait forever
(more precisely, until the operation times out on server side).
:return: List of updated :py:class:`metalsmith.Instance` objects if
all succeeded.
:raises: :py:class:`metalsmith.exceptions.DeploymentFailed`
if deployment fails or times out.
:raises: :py:class:`metalsmith.exceptions.InstanceNotFound`
if requested nodes cannot be found.
"""
nodes = [self._find_node_and_allocation(n)[0] for n in nodes]
try:
nodes = self.connection.baremetal.wait_for_nodes_provision_state(
nodes, 'active', timeout=timeout)
except os_exc.ResourceTimeout as exc:
raise exceptions.DeploymentTimeout(str(exc))
except os_exc.SDKException as exc:
raise exceptions.DeploymentFailed(str(exc))
# Using _get_instance in case the deployment started by something
# external that uses allocations.
return [self._get_instance(node) for node in nodes]
def _clean_instance_info(self, instance_info):
return {key: value
for key, value in instance_info.items()
if key in _PRESERVE_INSTANCE_INFO_KEYS}
def _clean_up(self, node, nics=None, remove_instance_info=True):
if nics is None:
created_ports = node.extra.get(_CREATED_PORTS, [])
attached_ports = node.extra.get(_ATTACHED_PORTS, [])
_nics.detach_and_delete_ports(self.connection, node,
created_ports, attached_ports)
else:
nics.detach_and_delete_ports()
extra = node.extra.copy()
for item in (_CREATED_PORTS, _ATTACHED_PORTS):
extra.pop(item, None)
kwargs = {}
if node.allocation_id and node.provision_state != 'active':
# Try to remove allocation (it will fail for active nodes)
LOG.debug('Trying to remove allocation %(alloc)s for node '
'%(node)s', {'alloc': node.allocation_id,
'node': _utils.log_res(node)})
try:
self.connection.baremetal.delete_allocation(node.allocation_id)
except Exception as exc:
LOG.debug('Failed to remove allocation %(alloc)s for %(node)s:'
' %(exc)s',
{'alloc': node.allocation_id,
'node': _utils.log_res(node), 'exc': exc})
elif not node.allocation_id:
# Old-style reservations have to be cleared explicitly
kwargs['instance_id'] = None
try:
if remove_instance_info:
LOG.debug('Updating node %(node)s with empty instance info '
'(was %(iinfo)s) and extras %(extra)s',
{'node': _utils.log_res(node),
'iinfo': node.instance_info,
'extra': extra})
self.connection.baremetal.update_node(
node, instance_info={}, extra=extra, **kwargs)
else:
LOG.debug('Updating node %(node)s with extras %(extra)s',
{'node': _utils.log_res(node), 'extra': extra})
self.connection.baremetal.update_node(
node, extra=extra, **kwargs)
except Exception as exc:
LOG.debug('Failed to clear node %(node)s extra: %(exc)s',
{'node': _utils.log_res(node), 'exc': exc})
def unprovision_node(self, node, wait=None):
"""Unprovision a previously provisioned node.
:param node: `Node` object, :py:class:`metalsmith.Instance`,
hostname, UUID or node name.
:param wait: How many seconds to wait for the process to finish,
None to return immediately.
:return: the latest `Node` object.
:raises: :py:class:`metalsmith.exceptions.DeploymentFailed`
if undeployment fails.
:raises: :py:class:`metalsmith.exceptions.DeploymentTimeout`
if undeployment times out.
:raises: :py:class:`metalsmith.exceptions.InstanceNotFound`
if requested node cannot be found.
"""
node = self._find_node_and_allocation(node)[0]
if self._dry_run:
LOG.warning("Dry run, not unprovisioning")
return
self._clean_up(node, remove_instance_info=False)
try:
node = self.connection.baremetal.set_node_provision_state(
node, 'deleted', wait=False)
LOG.info('Deleting started for node %s', _utils.log_res(node))
if wait is None:
return node
node = self.connection.baremetal.wait_for_nodes_provision_state(
[node], 'available', timeout=wait)[0]
except os_exc.ResourceTimeout as exc:
raise exceptions.DeploymentTimeout(str(exc))
except os_exc.SDKException as exc:
raise exceptions.DeploymentFailed(str(exc))
LOG.info('Node %s undeployed successfully', _utils.log_res(node))
return node
def show_instance(self, instance_id):
"""Show information about instance.
:param instance_id: hostname, UUID or node name.
:return: :py:class:`metalsmith.Instance` object.
:raises: :py:class:`metalsmith.exceptions.InstanceNotFound`
if the instance is not a valid instance.
"""
return self.show_instances([instance_id])[0]
def show_instances(self, instances):
"""Show information about instance.
More efficient than calling :meth:`show_instance` in a loop, because
it caches the node list.
:param instances: list of hostnames, UUIDs or node names.
:return: list of :py:class:`metalsmith.Instance` objects in the same
order as ``instances``.
:raises: :py:class:`metalsmith.exceptions.InstanceNotFound`
if one of the instances cannot be found or the found node is
not a valid instance.
"""
result = [self._get_instance(inst) for inst in instances]
# NOTE(dtantsur): do not accept node names as valid instances if they
# are not deployed or being deployed.
missing = [inst for (res, inst) in zip(result, instances)
if res.state == _instance.InstanceState.UNKNOWN]
if missing:
raise exceptions.InstanceNotFound(
"Node(s)/instance(s) %s are not valid instances"
% ', '.join(map(str, missing)))
return result
def list_instances(self):
"""List instances deployed by metalsmith.
:return: list of :py:class:`metalsmith.Instance` objects.
"""
nodes = self.connection.baremetal.nodes(associated=True, details=True)
Provisioner.allocations_cache = {
a.id: a for a in self.connection.baremetal.allocations()}
instances = [i for i in map(self._get_instance, nodes)
if i.state != _instance.InstanceState.UNKNOWN]
return instances
def _get_node(self, node, refresh=False):
"""A helper to find and return a node."""
if isinstance(node, str):
return self.connection.baremetal.get_node(node)
elif hasattr(node, 'node'):
# Instance object
node = node.node
else:
node = node
if refresh:
return self.connection.baremetal.get_node(node.id)
else:
return node
def _find_node_and_allocation(self, node, refresh=False):
try:
if (not isinstance(node, str)
or not _utils.is_hostname_safe(node)):
return self._get_node(node, refresh=refresh), None
try:
allocation = self.connection.baremetal.get_allocation(node)
except os_exc.ResourceNotFound:
return self._get_node(node, refresh=refresh), None
except os_exc.ResourceNotFound as exc:
raise exceptions.InstanceNotFound(str(exc))
if allocation.node_id:
try:
return (self.connection.baremetal.get_node(allocation.node_id),
allocation)
except os_exc.ResourceNotFound:
raise exceptions.InstanceNotFound(
'Node %(node)s associated with allocation '
'%(alloc)s was not found' %
{'node': allocation.node_id,
'alloc': allocation.id})
else:
raise exceptions.InstanceNotFound(
'Allocation %s exists but is not associated '
'with a node' % node)
def _get_instance(self, ident):
allocation = None
if hasattr(ident, 'allocation_id'):
node = ident
try:
allocation = Provisioner.allocations_cache[
node.instance_id]
except KeyError:
# NOTE(TheJulia): The pattern here assumes we just haven't
# found the allocation entry, so we try to get it.
if node.allocation_id:
try:
allocation = self.connection.baremetal.get_allocation(
node.allocation_id)
except os_exc.ResourceNotFound as exc:
raise exceptions.InstanceNotFound(str(exc))
elif node.instance_id:
LOG.debug('Discovered node %s without an '
'allocation when we believe it should have '
'an allocation based on the Metalsmith use '
'model. Metalsmith is likely being used '
'in an unsupported case, either in concert '
'with another user of Ironic, OR in a case '
'where a migration was executed incorrectly.',
node.id)
# We have an instance_id, and to be here we have
# no allocation ID.
fix_cmd = ('openstack baremetal allocation create --uuid '
'%(node_instance_id)s --name %(node_name)s '
'--node %(node_id)s' %
{'node_instance_id': node.instance_id,
'node_name': node.name,
'node_id': node.id})
msg = ('A error has been detected in the state of the '
'instance allocation records inside of Ironic '
'where a node has an assigned Instance ID, but '
'no allocation record. If only Metalsmith is '
'being used with Ironic, this can be safely '
'corrected with manual intervention. '
'To correct this, execute this command: '
'%(cmd)s' %
{'cmd': fix_cmd})
raise exceptions.InstanceNotFound(msg)
else:
node, allocation = self._find_node_and_allocation(ident)
if allocation is None and node.allocation_id:
try:
allocation = self.connection.baremetal.get_allocation(
node.allocation_id)
except os_exc.ResourceNotFound as exc:
raise exceptions.InstanceNotFound(str(exc))
return _instance.Instance(self.connection, node,
allocation=allocation)

View File

@@ -1,177 +0,0 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import collections
import logging
from metalsmith import _utils
from metalsmith import exceptions
LOG = logging.getLogger(__name__)
class Filter(object, metaclass=abc.ABCMeta):
"""Base class for filters."""
@abc.abstractmethod
def __call__(self, node):
"""Validate this node.
:param node: Node object.
:return: True/False
"""
@abc.abstractmethod
def fail(self):
"""Fail scheduling because no nodes are left.
Must raise an exception.
"""
def run_filters(filters, nodes):
"""Filter the node list by provided filters.
:param filters: List of callable Filter objects to filter/validate nodes.
They are called in passes. If a pass yields no nodes, an error is
raised.
:param nodes: List of input nodes.
:return: The resulting nodes
"""
for f in filters:
f_name = f.__class__.__name__
LOG.debug('Running filter %(filter)s on %(count)d node(s)',
{'filter': f_name, 'count': len(nodes)})
nodes = list(filter(f, nodes))
if not nodes:
LOG.debug('Filter %s yielded no nodes', f_name)
f.fail()
assert False, "BUG: %s.fail did not raise" % f_name
LOG.debug('Filter %(filter)s yielded %(count)d node(s)',
{'filter': f_name, 'count': len(nodes)})
return nodes
class NodeTypeFilter(Filter):
"""Filter that checks resource class and conductor group."""
def __init__(self, resource_class=None, conductor_group=None):
self.resource_class = resource_class
self.conductor_group = conductor_group
def __call__(self, node):
if node.instance_id:
LOG.debug('Node %s is already reserved', _utils.log_res(node))
return False
if node.is_maintenance:
LOG.debug('Node %s is in maintenance', _utils.log_res(node))
return False
if (self.resource_class is not None
and node.resource_class != self.resource_class):
LOG.debug('Resource class %(real)s does not match the expected '
'value of %(exp)s for node %(node)s',
{'node': _utils.log_res(node),
'exp': self.resource_class,
'real': node.resource_class})
return False
if (self.conductor_group is not None
and node.conductor_group != self.conductor_group):
LOG.debug('Conductor group %(real)s does not match the expected '
'value of %(exp)s for node %(node)s',
{'node': _utils.log_res(node),
'exp': self.conductor_group,
'real': node.conductor_group})
return False
return True
def fail(self):
raise exceptions.NodesNotFound(self.resource_class,
self.conductor_group)
class CapabilitiesFilter(Filter):
"""Filter that checks capabilities."""
def __init__(self, capabilities):
self._capabilities = capabilities
self._counter = collections.Counter()
def __call__(self, node):
if not self._capabilities:
return True
try:
caps = _utils.get_capabilities(node)
except Exception:
LOG.exception('Malformed capabilities on node %(node)s: %(caps)s',
{'node': _utils.log_res(node),
'caps': node.properties.get('capabilities')})
return False
LOG.debug('Capabilities for node %(node)s: %(caps)s',
{'node': _utils.log_res(node), 'caps': caps})
for key, value in self._capabilities.items():
try:
node_value = caps[key]
except KeyError:
LOG.debug('Node %(node)s does not have capability %(cap)s',
{'node': _utils.log_res(node), 'cap': key})
return False
else:
self._counter["%s=%s" % (key, node_value)] += 1
if value != node_value:
LOG.debug('Node %(node)s has capability %(cap)s of '
'value "%(node_val)s" instead of "%(expected)s"',
{'node': _utils.log_res(node), 'cap': key,
'node_val': node_value, 'expected': value})
return False
return True
def fail(self):
existing = ", ".join("%s (%d node(s))" % item
for item in self._counter.items())
requested = ', '.join("%s=%s" % item
for item in self._capabilities.items())
message = ("No available nodes found with capabilities %(req)s, "
"existing capabilities: %(exist)s" %
{'req': requested, 'exist': existing or 'none'})
raise exceptions.CapabilitiesNotFound(message, self._capabilities)
class CustomPredicateFilter(Filter):
def __init__(self, predicate):
self.predicate = predicate
self._failed_nodes = []
def __call__(self, node):
if not self.predicate(node):
self._failed_nodes.append(node)
return False
return True
def fail(self):
message = 'No nodes satisfied the custom predicate %s' % self.predicate
raise exceptions.CustomPredicateFailed(message, self._failed_nodes)

View File

@@ -1,146 +0,0 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import logging
import re
import sys
from openstack import exceptions as os_exc
from metalsmith import exceptions
LOG = logging.getLogger(__name__)
def log_res(res):
if res is None:
return None
elif getattr(res, 'name', None):
return '%s (UUID %s)' % (res.name, res.id)
else:
return res.id
def get_capabilities(node):
caps = node.properties.get('capabilities') or {}
if not isinstance(caps, dict):
caps = dict(x.split(':', 1) for x in caps.split(',') if x)
return caps
def get_root_disk(root_size_gb, node):
"""Validate and calculate the root disk size."""
if root_size_gb is not None:
if not isinstance(root_size_gb, int):
raise TypeError("The root_size_gb argument must be "
"a positive integer, got %r" % root_size_gb)
elif root_size_gb <= 0:
raise ValueError("The root_size_gb argument must be "
"a positive integer, got %d" % root_size_gb)
else:
try:
assert int(node.properties['local_gb']) > 0
except KeyError:
LOG.debug('No local_gb for node %s and no root partition size '
'specified', log_res(node))
return
except (TypeError, ValueError, AssertionError):
raise exceptions.UnknownRootDiskSize(
'The local_gb for node %(node)s is invalid: '
'expected positive integer, got %(value)s' %
{'node': log_res(node),
'value': node.properties['local_gb']})
# allow for partitioning and config drive
root_size_gb = int(node.properties['local_gb']) - 1
return root_size_gb
_HOSTNAME_RE = re.compile(r"""^
[a-z0-9][a-z0-9\-]{0,61}[a-z0-9] # host
(\.[a-z0-9][a-z0-9\-]{0,61}[a-z0-9])* # domain
$""", re.IGNORECASE | re.VERBOSE)
def is_hostname_safe(hostname):
"""Check for valid host name.
Nominally, checks that the supplied hostname conforms to:
* http://en.wikipedia.org/wiki/Hostname
* http://tools.ietf.org/html/rfc952
* http://tools.ietf.org/html/rfc1123
:param hostname: The hostname to be validated.
:returns: True if valid. False if not.
"""
if not isinstance(hostname, str) or len(hostname) > 255:
return False
return _HOSTNAME_RE.match(hostname) is not None
def check_hostname(hostname):
"""Check the provided host name.
:raises: ValueError on inappropriate value of ``hostname``
"""
if hostname is not None and not is_hostname_safe(hostname):
raise ValueError("%s cannot be used as a hostname" % hostname)
def parse_checksums(checksums):
"""Parse standard checksums file."""
result = {}
for line in checksums.split('\n'):
if not line.strip():
continue
checksum, fname = line.strip().split(None, 1)
result[fname.strip().lstrip('*')] = checksum.strip()
return result
def default_hostname(node):
if node.name and is_hostname_safe(node.name):
return node.name
else:
return node.id
def hostname_for(node, allocation=None):
if allocation is not None and allocation.name:
return allocation.name
else:
return default_hostname(node)
@contextlib.contextmanager
def reraise_os_exc(reraise_as, failure_message='Clean up failed'):
exc_info = sys.exc_info()
is_expected = isinstance(exc_info[1], os_exc.SDKException)
try:
yield is_expected
except Exception:
LOG.exception(failure_message)
if is_expected:
raise reraise_as(str(exc_info[1]))
else:
raise exc_info[1]

View File

@@ -1,104 +0,0 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class Error(Exception):
"""Base class for Metalsmith errors."""
class ReservationFailed(Error):
"""Failed to reserve a suitable node.
This is the base class for all reservation failures.
"""
class NodesNotFound(ReservationFailed):
"""Initial nodes lookup returned an empty list.
:ivar requested_resource_class: Requested resource class.
:ivar requested_conductor_group: Requested conductor group to pick nodes
from.
"""
def __init__(self, resource_class, conductor_group):
message = "No available nodes%(rc)s found%(cg)s" % {
'rc': ' with resource class %s' % resource_class
if resource_class else '',
'cg': ' in conductor group %s' % (conductor_group or '<default>')
if conductor_group is not None else ''
}
self.requested_resource_class = resource_class
self.requested_conductor_group = conductor_group
super(NodesNotFound, self).__init__(message)
class CustomPredicateFailed(ReservationFailed):
"""Custom predicate yielded no nodes.
:ivar nodes: List of nodes that were checked.
"""
def __init__(self, message, nodes):
self.nodes = nodes
super(CustomPredicateFailed, self).__init__(message)
class CapabilitiesNotFound(ReservationFailed):
"""Requested capabilities do not match any nodes.
:ivar requested_capabilities: Requested node's capabilities.
"""
def __init__(self, message, capabilities):
self.requested_capabilities = capabilities
super(CapabilitiesNotFound, self).__init__(message)
class InvalidImage(Error):
"""Requested image is invalid and cannot be used."""
class InvalidNIC(Error):
"""Requested NIC is invalid and cannot be used."""
class UnknownRootDiskSize(Error):
"""Cannot determine the root disk size."""
class InvalidNode(Error):
"""This node cannot be deployed onto."""
class DeploymentFailed(Error):
"""Deployment failed."""
class DeploymentTimeout(DeploymentFailed):
"""Timeout during deployment."""
class InstanceNotFound(Error):
"""Instance not found or node doesn't have an instance associated."""
class NetworkResourceNotFound(Error):
"""Network resource, port, network, subnet not found"""
# Deprecated aliases
DeploymentFailure = DeploymentFailed
InvalidInstance = InstanceNotFound

View File

@@ -1,162 +0,0 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
import logging
from metalsmith import _utils
LOG = logging.getLogger(__name__)
class GenericConfig(object):
"""Configuration of the target instance.
The information attached to this object will be passed via a configdrive
to the instance's first boot script (e.g. cloud-init).
This class represents generic configuration compatible with most first-boot
implementations. Use :py:class:`CloudInitConfig` for features specific to
`cloud-init <https://cloudinit.readthedocs.io/>`_.
:ivar ssh_keys: List of SSH public keys.
:ivar user_data: User data as a string.
:ivar meta_data: Dict of data to add to the generated ``meta_data``
"""
def __init__(self, ssh_keys=None, user_data=None, meta_data=None):
self.ssh_keys = ssh_keys or []
self.user_data = user_data
if meta_data and not isinstance(meta_data, dict):
raise TypeError('Custom meta_data must be a dictionary, '
'got %r' % meta_data)
self.meta_data = meta_data or {}
def generate(self, node, hostname=None, network_data=None):
"""Generate the config drive information.
:param node: `Node` object.
:param hostname: Desired hostname (defaults to node's name or ID).
:param network_data: Network metadata as dictionary
:return: configdrive contents as a dictionary with keys:
``meta_data``
meta data dictionary
``network_data``
network data as dictionary
``user_data``
user data as a string
"""
if not hostname:
hostname = _utils.default_hostname(node)
# NOTE(dtantsur): CirrOS does not understand lists
if isinstance(self.ssh_keys, list):
ssh_keys = {str(i): v for i, v in enumerate(self.ssh_keys)}
else:
ssh_keys = self.ssh_keys
meta_data = self.meta_data.copy()
meta_data.update({
'public_keys': ssh_keys,
'uuid': node.id,
'name': node.name,
'hostname': hostname
})
meta_data.setdefault('launch_index', 0)
meta_data.setdefault('availability_zone', '')
meta_data.setdefault('files', [])
meta_data.setdefault('meta', {})
user_data = self.populate_user_data()
data = {'meta_data': meta_data, 'user_data': user_data}
if network_data:
data['network_data'] = network_data
return data
def populate_user_data(self):
"""Get user data for this configuration.
Can be overridden to provide additional features.
:return: user data as a string.
"""
return self.user_data
class CloudInitConfig(GenericConfig):
"""Configuration of the target instance using cloud-init.
Compared to :class:`GenericConfig`, this adds support for managing users.
:ivar ssh_keys: List of SSH public keys.
:ivar user_data: Cloud-init cloud-config data as a dictionary.
:ivar meta_data: Dict of data to add to the generated ``meta_data``
"""
def __init__(self, ssh_keys=None, user_data=None, meta_data=None):
if user_data is not None and not isinstance(user_data, dict):
raise TypeError('Custom user data must be a dictionary for '
'CloudInitConfig, got %r' % user_data)
super(CloudInitConfig, self).__init__(ssh_keys, user_data or {},
meta_data=meta_data)
self.users = []
def add_user(self, name, admin=True, password_hash=None, sudo=False,
**kwargs):
"""Add a user to be created on first boot.
:param name: user name.
:param admin: whether to add the user to the admin group (wheel).
:param password_hash: user password hash, if password authentication
is expected.
:param sudo: whether to allow the user sudo without password.
:param kwargs: other arguments to pass.
"""
kwargs['name'] = name
if admin:
kwargs.setdefault('groups', []).append('wheel')
if password_hash:
kwargs['passwd'] = password_hash
if sudo:
kwargs['sudo'] = 'ALL=(ALL) NOPASSWD:ALL'
if self.ssh_keys:
kwargs.setdefault('ssh_authorized_keys', self.ssh_keys)
self.users.append(kwargs)
def populate_user_data(self):
"""Get user data for this configuration.
Takes the custom user data and appends requested users to it.
:return: user data as a string.
"""
if not isinstance(self.user_data, dict):
raise TypeError('Custom user data must be a dictionary for '
'CloudInitConfig, got %r' % self.user_data)
if self.users:
user_data = copy.deepcopy(self.user_data)
user_data.setdefault('users', []).extend(self.users)
else:
user_data = self.user_data
if user_data:
return "#cloud-config\n" + json.dumps(user_data)

View File

@@ -1,356 +0,0 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Image sources to use when provisioning nodes."""
import abc
import logging
import os
from urllib import parse as urlparse
import warnings
import openstack.exceptions
import requests
from metalsmith import _utils
from metalsmith import exceptions
LOG = logging.getLogger(__name__)
class _Source(object, metaclass=abc.ABCMeta):
def _validate(self, connection, root_size_gb):
"""Validate the source."""
@abc.abstractmethod
def _node_updates(self, connection):
"""Updates required for a node to use this source."""
class GlanceImage(_Source):
"""Image from the OpenStack Image service."""
def __init__(self, image):
"""Create a Glance source.
:param image: `Image` object, ID or name.
"""
self.image = image
self._image_obj = None
def _validate(self, connection, root_size_gb):
if self._image_obj is not None:
return
try:
self._image_obj = connection.image.find_image(self.image,
ignore_missing=False)
except openstack.exceptions.SDKException as exc:
raise exceptions.InvalidImage(
'Cannot find image %(image)s: %(error)s' %
{'image': self.image, 'error': exc})
if (root_size_gb is None
and any(getattr(self._image_obj, x, None) is not None
for x in ('kernel_id', 'ramdisk_id'))):
raise exceptions.UnknownRootDiskSize(
'Partition images require root partition size')
def _node_updates(self, connection):
LOG.debug('Image: %s', self._image_obj)
updates = {
'image_source': self._image_obj.id
}
for prop in ('kernel', 'ramdisk'):
value = getattr(self._image_obj, '%s_id' % prop, None)
if value:
updates[prop] = value
return updates
class HttpWholeDiskImage(_Source):
"""A whole-disk image from HTTP(s) location.
Some deployment methods require a checksum of the image. It has to be
provided via ``checksum`` or ``checksum_url``.
Only ``checksum_url`` (if provided) has to be accessible from the current
machine. Other URLs have to be accessible by the Bare Metal service (more
specifically, by **ironic-conductor** processes).
"""
def __init__(self, url, checksum=None, checksum_url=None,
disk_format=None):
"""Create an HTTP source.
:param url: URL of the image.
:param checksum: SHA256 checksum of the image. Mutually exclusive with
``checksum_url``.
:param checksum_url: URL of the checksum file for the image. Has to
be in the standard format of the ``sha256sum`` tool. Mutually
exclusive with ``checksum``.
:param disk_format: Optional value to set for ``instance_info``
``image_disk_format``.
"""
if (checksum and checksum_url) or (not checksum and not checksum_url):
raise TypeError('Exactly one of checksum and checksum_url has '
'to be specified')
self.url = url
self.checksum = checksum
self.checksum_url = checksum_url
self.disk_format = disk_format
def _validate(self, connection, root_size_gb):
# TODO(dtantsur): should we validate image URLs here? Ironic will do it
# as well, and images do not have to be accessible from where
# metalsmith is running.
if self.checksum:
return
try:
response = requests.get(self.checksum_url)
response.raise_for_status()
checksums = response.text
except requests.RequestException as exc:
raise exceptions.InvalidImage(
'Cannot download checksum file %(url)s: %(err)s' %
{'url': self.checksum_url, 'err': exc})
try:
checksums = _utils.parse_checksums(checksums)
except (ValueError, TypeError) as exc:
raise exceptions.InvalidImage(
'Invalid checksum file %(url)s: %(err)s' %
{'url': self.checksum_url, 'err': exc})
fname = os.path.basename(urlparse.urlparse(self.url).path)
try:
self.checksum = checksums[fname]
except KeyError:
raise exceptions.InvalidImage(
'There is no image checksum for %(fname)s in %(url)s' %
{'fname': fname, 'url': self.checksum_url})
def _node_updates(self, connection):
LOG.debug('Image: %(image)s, checksum %(checksum)s',
{'image': self.url, 'checksum': self.checksum})
updates = {
'image_source': self.url,
'image_checksum': self.checksum,
}
if self.disk_format:
updates['image_disk_format'] = self.disk_format
return updates
class HttpPartitionImage(HttpWholeDiskImage):
"""A partition image from an HTTP(s) location."""
def __init__(self, url, kernel_url, ramdisk_url, checksum=None,
checksum_url=None, disk_format=None):
"""Create an HTTP source.
:param url: URL of the root disk image.
:param kernel_url: URL of the kernel image.
:param ramdisk_url: URL of the initramfs image.
:param checksum: SHA256 checksum of the root disk image. Mutually
exclusive with ``checksum_url``.
:param checksum_url: URL of the checksum file for the root disk image.
Has to be in the standard format of the ``sha256sum`` tool.
Mutually exclusive with ``checksum``.
:param disk_format: Optional value to set for ``instance_info``
``image_disk_format``.
"""
super(HttpPartitionImage, self).__init__(url, checksum=checksum,
checksum_url=checksum_url,
disk_format=disk_format)
self.kernel_url = kernel_url
self.ramdisk_url = ramdisk_url
def _validate(self, connection, root_size_gb):
super(HttpPartitionImage, self)._validate(connection, root_size_gb)
if root_size_gb is None:
raise exceptions.UnknownRootDiskSize(
'Partition images require root partition size')
def _node_updates(self, connection):
updates = super(HttpPartitionImage, self)._node_updates(connection)
updates['kernel'] = self.kernel_url
updates['ramdisk'] = self.ramdisk_url
return updates
class FileWholeDiskImage(_Source):
"""A whole-disk image from a local file location.
.. warning::
The location must be local to the **ironic-conductor** process handling
the node, not to metalsmith itself! Since there is no easy way to
determine which conductor handles a node, the same file must be
available at the same location to all conductors in the same group.
"""
def __init__(self, location, checksum=None):
"""Create a local file source.
:param location: Location of the image, optionally starting with
``file://``.
:param checksum: SHA256 checksum of the image. DEPRECATED: checksums do
not actually work with file images.
"""
if not location.startswith('file://'):
location = 'file://' + location
self.location = location
self.checksum = checksum
if self.checksum:
warnings.warn("Checksums cannot be used with file images",
DeprecationWarning)
def _node_updates(self, connection):
LOG.debug('Image: %s', self.location)
return {
'image_source': self.location,
}
class FilePartitionImage(FileWholeDiskImage):
"""A partition image from a local file location.
.. warning::
The location must be local to the **ironic-conductor** process handling
the node, not to metalsmith itself! Since there is no easy way to
determine which conductor handles a node, the same file must be
available at the same location to all conductors in the same group.
"""
def __init__(self, location, kernel_location, ramdisk_location,
checksum=None):
"""Create a local file source.
:param location: Location of the image, optionally starting with
``file://``.
:param kernel_location: Location of the kernel of the image,
optionally starting with ``file://``.
:param ramdisk_location: Location of the ramdisk of the image,
optionally starting with ``file://``.
:param checksum: SHA256 checksum of the image. DEPRECATED: checksums do
not actually work with file images.
"""
super(FilePartitionImage, self).__init__(location, checksum)
if not kernel_location.startswith('file://'):
kernel_location = 'file://' + kernel_location
if not ramdisk_location.startswith('file://'):
ramdisk_location = 'file://' + ramdisk_location
self.kernel_location = kernel_location
self.ramdisk_location = ramdisk_location
def _validate(self, connection, root_size_gb):
super(FilePartitionImage, self)._validate(connection, root_size_gb)
if root_size_gb is None:
raise exceptions.UnknownRootDiskSize(
'Partition images require root partition size')
def _node_updates(self, connection):
updates = super(FilePartitionImage, self)._node_updates(connection)
updates['kernel'] = self.kernel_location
updates['ramdisk'] = self.ramdisk_location
return updates
def detect(image, kernel=None, ramdisk=None, checksum=None):
"""Try detecting the correct source type from the provided information.
.. note::
Images without a schema are assumed to be Glance images.
:param image: Location of the image: ``file://``, ``http://``, ``https://``
link or a Glance image name or UUID.
:param kernel: Location of the kernel (if present): ``file://``,
``http://``, ``https://`` link or a Glance image name or UUID.
:param ramdisk: Location of the ramdisk (if present): ``file://``,
``http://``, ``https://`` link or a Glance image name or UUID.
:param checksum: SHA256 checksum of the image: ``http://`` or ``https://``
link or a string.
:return: A valid source object.
:raises: ValueError if the given parameters do not correspond to any
valid source.
"""
image_type = _link_type(image)
checksum_type = _link_type(checksum)
if image_type == 'glance':
if kernel or ramdisk or checksum:
raise ValueError('kernel, image and checksum cannot be provided '
'for Glance images')
else:
return GlanceImage(image)
kernel_type = _link_type(kernel)
ramdisk_type = _link_type(ramdisk)
if image_type == 'http' and not checksum:
raise ValueError('checksum is required for HTTP images')
if image_type == 'file':
if (kernel_type not in (None, 'file')
or ramdisk_type not in (None, 'file')):
raise ValueError('kernel and ramdisk can only be files '
'for file images')
if kernel or ramdisk:
return FilePartitionImage(image,
kernel_location=kernel,
ramdisk_location=ramdisk,
checksum=checksum)
else:
return FileWholeDiskImage(image, checksum=checksum)
else:
if (kernel_type not in (None, 'http')
or ramdisk_type not in (None, 'http')
or checksum_type == 'file'):
raise ValueError('kernal, ramdisk and checksum can only be HTTP '
'links for HTTP images')
if checksum_type == 'http':
kwargs = {'checksum_url': checksum}
else:
kwargs = {'checksum': checksum}
# Assume raw image based on file extension
if image.endswith('.raw'):
kwargs['disk_format'] = 'raw'
if kernel or ramdisk:
return HttpPartitionImage(image,
kernel_url=kernel,
ramdisk_url=ramdisk,
**kwargs)
else:
return HttpWholeDiskImage(image, **kwargs)
def _link_type(link):
if link is None:
return None
elif link.startswith('http://') or link.startswith('https://'):
return 'http'
elif link.startswith('file://'):
return 'file'
else:
return 'glance'

View File

@@ -1,779 +0,0 @@
# Copyright 2015-2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import io
import json
import tempfile
import unittest
from unittest import mock
from metalsmith import _cmd
from metalsmith import _instance
from metalsmith import _provisioner
from metalsmith import instance_config
from metalsmith import sources
class Base(unittest.TestCase):
def setUp(self):
super(Base, self).setUp()
print_fixture = mock.patch(
'metalsmith._format._print', autospec=True)
self.mock_print = print_fixture.start()
self.addCleanup(print_fixture.stop)
@mock.patch.object(_provisioner, 'Provisioner', autospec=True)
class TestDeploy(Base):
def setUp(self):
super(TestDeploy, self).setUp()
os_conf_fixture = mock.patch.object(
_cmd.os_config, 'OpenStackConfig', autospec=True)
self.mock_os_conf = os_conf_fixture.start()
self.addCleanup(os_conf_fixture.stop)
self._init = False
def _check(self, mock_pr, args, reserve_args, provision_args,
dry_run=False, formatter='value'):
reserve_defaults = dict(resource_class='compute',
conductor_group=None,
capabilities={},
traits=[],
candidates=None,
hostname=None)
reserve_defaults.update(reserve_args)
provision_defaults = dict(image=mock.ANY,
nics=[{'network': 'mynet'}],
root_size_gb=None,
swap_size_mb=None,
config=mock.ANY,
netboot=False,
wait=1800,
clean_up_on_failure=True)
provision_defaults.update(provision_args)
if not self._init:
self._init_instance(mock_pr)
if '--format' not in args and formatter:
args = ['--format', formatter] + args
_cmd.main(args)
mock_pr.assert_called_once_with(
cloud_region=self.mock_os_conf.return_value.get_one.return_value,
dry_run=dry_run)
mock_pr.return_value.reserve_node.assert_called_once_with(
**reserve_defaults)
mock_pr.return_value.provision_node.assert_called_once_with(
mock_pr.return_value.reserve_node.return_value,
**provision_defaults)
def _init_instance(self, mock_pr):
instance = mock_pr.return_value.provision_node.return_value
instance.create_autospec(_instance.Instance)
instance.uuid = '123'
instance.node.name = None
instance.node.id = '123'
instance.allocation.id = '321'
instance.state = _instance.InstanceState.ACTIVE
instance.is_deployed = True
instance.ip_addresses.return_value = {'private': ['1.2.3.4']}
instance.hostname = None
self._init = True
return instance
@mock.patch.object(_cmd, 'logging', autospec=True)
def test_args_ok(self, mock_log, mock_pr):
self._init_instance(mock_pr)
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--resource-class', 'compute']
self._check(mock_pr, args, {}, {})
config = mock_pr.return_value.provision_node.call_args[1]['config']
self.assertEqual([], config.ssh_keys)
mock_log.basicConfig.assert_called_once_with(level=mock_log.WARNING,
format=mock.ANY)
source = mock_pr.return_value.provision_node.call_args[1]['image']
self.assertIsInstance(source, sources.GlanceImage)
self.assertEqual("myimg", source.image)
self.assertEqual(
mock.call('metalsmith').setLevel(mock_log.WARNING).call_list()
+ mock.call(_cmd._URLLIB3_LOGGER).setLevel(
mock_log.CRITICAL).call_list(),
mock_log.getLogger.mock_calls)
self.mock_print.assert_has_calls([
mock.call('123 321 ACTIVE private=1.2.3.4'),
])
@mock.patch.object(_cmd, 'logging', autospec=True)
def test_args_default_format(self, mock_log, mock_pr):
self._init_instance(mock_pr)
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--resource-class', 'compute']
self._check(mock_pr, args, {}, {}, formatter=None)
config = mock_pr.return_value.provision_node.call_args[1]['config']
self.assertEqual([], config.ssh_keys)
mock_log.basicConfig.assert_called_once_with(level=mock_log.WARNING,
format=mock.ANY)
source = mock_pr.return_value.provision_node.call_args[1]['image']
self.assertIsInstance(source, sources.GlanceImage)
self.assertEqual("myimg", source.image)
self.assertEqual(
mock.call('metalsmith').setLevel(mock_log.WARNING).call_list()
+ mock.call(_cmd._URLLIB3_LOGGER).setLevel(
mock_log.CRITICAL).call_list(),
mock_log.getLogger.mock_calls)
@mock.patch.object(_cmd, 'logging', autospec=True)
def test_args_json_format(self, mock_log, mock_pr):
instance = self._init_instance(mock_pr)
instance.to_dict.return_value = {'node': 'dict'}
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--resource-class', 'compute']
fake_io = io.StringIO()
with mock.patch('sys.stdout', fake_io):
self._check(mock_pr, args, {}, {}, formatter='json')
self.assertEqual(json.loads(fake_io.getvalue()),
{'node': 'dict'})
mock_log.basicConfig.assert_called_once_with(level=mock_log.WARNING,
format=mock.ANY)
self.assertEqual(
mock.call('metalsmith').setLevel(mock_log.WARNING).call_list()
+ mock.call(_cmd._URLLIB3_LOGGER).setLevel(
mock_log.CRITICAL).call_list(),
mock_log.getLogger.mock_calls)
def test_no_ips(self, mock_pr):
instance = self._init_instance(mock_pr)
instance.ip_addresses.return_value = {}
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--resource-class', 'compute']
self._check(mock_pr, args, {}, {})
self.mock_print.assert_has_calls([
mock.call('123 321 ACTIVE '),
])
def test_not_deployed_no_ips(self, mock_pr):
instance = self._init_instance(mock_pr)
instance.is_deployed = False
instance.state = _instance.InstanceState.DEPLOYING
instance.ip_addresses.return_value = {}
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--resource-class', 'compute']
self._check(mock_pr, args, {}, {})
self.mock_print.assert_has_calls([
mock.call('123 321 DEPLOYING '),
])
@mock.patch.object(_cmd.LOG, 'info', autospec=True)
def test_no_logs_not_deployed(self, mock_log, mock_pr):
instance = self._init_instance(mock_pr)
instance.is_deployed = False
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--resource-class', 'compute']
self._check(mock_pr, args, {}, {})
self.assertFalse(mock_log.called)
self.assertFalse(instance.ip_addresses.called)
def test_args_dry_run(self, mock_pr):
args = ['--dry-run', 'deploy', '--network', 'mynet',
'--image', 'myimg', '--resource-class', 'compute']
self._check(mock_pr, args, {}, {}, dry_run=True)
@mock.patch.object(_cmd, 'logging', autospec=True)
def test_args_debug(self, mock_log, mock_pr):
args = ['--debug', 'deploy', '--network', 'mynet', '--image', 'myimg',
'--resource-class', 'compute']
self._check(mock_pr, args, {}, {})
mock_log.basicConfig.assert_called_once_with(level=mock_log.DEBUG,
format=mock.ANY)
self.assertEqual(
mock.call('metalsmith').setLevel(mock_log.DEBUG).call_list()
+ mock.call(_cmd._URLLIB3_LOGGER).setLevel(
mock_log.INFO).call_list(),
mock_log.getLogger.mock_calls)
@mock.patch.object(_cmd, 'logging', autospec=True)
def test_args_quiet(self, mock_log, mock_pr):
args = ['--quiet', 'deploy', '--network', 'mynet', '--image', 'myimg',
'--resource-class', 'compute']
self._check(mock_pr, args, {}, {})
mock_log.basicConfig.assert_called_once_with(level=mock_log.CRITICAL,
format=mock.ANY)
self.assertEqual(
mock.call('metalsmith').setLevel(mock_log.CRITICAL).call_list()
+ mock.call(_cmd._URLLIB3_LOGGER).setLevel(
mock_log.CRITICAL).call_list(),
mock_log.getLogger.mock_calls)
self.assertFalse(self.mock_print.called)
@mock.patch.object(_cmd, 'logging', autospec=True)
def test_args_verbose_1(self, mock_log, mock_pr):
args = ['-v', 'deploy', '--network', 'mynet', '--image', 'myimg',
'--resource-class', 'compute']
self._check(mock_pr, args, {}, {})
mock_log.basicConfig.assert_called_once_with(level=mock_log.WARNING,
format=mock.ANY)
self.assertEqual(
mock.call('metalsmith').setLevel(mock_log.INFO).call_list()
+ mock.call(_cmd._URLLIB3_LOGGER).setLevel(
mock_log.CRITICAL).call_list(),
mock_log.getLogger.mock_calls)
@mock.patch.object(_cmd, 'logging', autospec=True)
def test_args_verbose_2(self, mock_log, mock_pr):
args = ['-vv', 'deploy', '--network', 'mynet', '--image', 'myimg',
'--resource-class', 'compute']
self._check(mock_pr, args, {}, {})
mock_log.basicConfig.assert_called_once_with(level=mock_log.INFO,
format=mock.ANY)
self.assertEqual(
mock.call('metalsmith').setLevel(mock_log.DEBUG).call_list()
+ mock.call(_cmd._URLLIB3_LOGGER).setLevel(
mock_log.CRITICAL).call_list(),
mock_log.getLogger.mock_calls)
@mock.patch.object(_cmd, 'logging', autospec=True)
def test_args_verbose_3(self, mock_log, mock_pr):
args = ['-vvv', 'deploy', '--network', 'mynet', '--image', 'myimg',
'--resource-class', 'compute']
self._check(mock_pr, args, {}, {})
mock_log.basicConfig.assert_called_once_with(level=mock_log.DEBUG,
format=mock.ANY)
self.assertEqual(
mock.call('metalsmith').setLevel(mock_log.DEBUG).call_list()
+ mock.call(_cmd._URLLIB3_LOGGER).setLevel(
mock_log.INFO).call_list(),
mock_log.getLogger.mock_calls)
@mock.patch.object(_cmd.LOG, 'critical', autospec=True)
def test_reservation_failure(self, mock_log, mock_pr):
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--resource-class', 'compute']
failure = RuntimeError('boom')
mock_pr.return_value.reserve_node.side_effect = failure
self.assertRaises(SystemExit, _cmd.main, args)
mock_log.assert_called_once_with('%s', failure, exc_info=False)
@mock.patch.object(_cmd.LOG, 'critical', autospec=True)
def test_deploy_failure(self, mock_log, mock_pr):
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--resource-class', 'compute']
failure = RuntimeError('boom')
mock_pr.return_value.provision_node.side_effect = failure
self.assertRaises(SystemExit, _cmd.main, args)
mock_log.assert_called_once_with('%s', failure, exc_info=False)
@mock.patch.object(_cmd.LOG, 'critical', autospec=True)
def test_invalid_hostname(self, mock_log, mock_pr):
args = ['deploy', '--hostname', 'n_1', '--image', 'myimg',
'--resource-class', 'compute']
self.assertRaises(SystemExit, _cmd.main, args)
self.assertTrue(mock_log.called)
def test_args_capabilities(self, mock_pr):
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--capability', 'foo=bar', '--capability', 'answer=42',
'--resource-class', 'compute']
self._check(mock_pr, args,
{'capabilities': {'foo': 'bar', 'answer': '42'}}, {})
def test_args_traits(self, mock_pr):
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--trait', 'foo:bar', '--trait', 'answer:42',
'--resource-class', 'compute']
self._check(mock_pr, args,
{'traits': ['foo:bar', 'answer:42']}, {})
def test_args_configdrive(self, mock_pr):
with tempfile.NamedTemporaryFile() as fp:
fp.write(b'foo\n')
fp.flush()
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--ssh-public-key', fp.name, '--resource-class', 'compute']
self._check(mock_pr, args, {}, {})
config = mock_pr.return_value.provision_node.call_args[1]['config']
self.assertEqual(['foo'], config.ssh_keys)
@mock.patch.object(instance_config.CloudInitConfig, 'add_user',
autospec=True)
def test_args_user_name(self, mock_add_user, mock_pr):
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--user-name', 'banana', '--resource-class', 'compute']
self._check(mock_pr, args, {}, {})
config = mock_pr.return_value.provision_node.call_args[1]['config']
self.assertEqual([], config.ssh_keys)
mock_add_user.assert_called_once_with(config, 'banana', sudo=False)
@mock.patch.object(instance_config.CloudInitConfig, 'add_user',
autospec=True)
def test_args_user_name_with_sudo(self, mock_add_user, mock_pr):
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--user-name', 'banana', '--resource-class', 'compute',
'--passwordless-sudo']
self._check(mock_pr, args, {}, {})
config = mock_pr.return_value.provision_node.call_args[1]['config']
self.assertEqual([], config.ssh_keys)
mock_add_user.assert_called_once_with(config, 'banana', sudo=True)
def test_args_port(self, mock_pr):
args = ['deploy', '--port', 'myport', '--image', 'myimg',
'--resource-class', 'compute']
self._check(mock_pr, args, {}, {'nics': [{'port': 'myport'}]})
def test_args_no_nics(self, mock_pr):
args = ['deploy', '--image', 'myimg', '--resource-class', 'compute']
self._check(mock_pr, args, {}, {'nics': None})
def test_args_networks_and_ports(self, mock_pr):
args = ['deploy', '--network', 'net1', '--port', 'port1',
'--port', 'port2', '--network', 'net2',
'--image', 'myimg', '--resource-class', 'compute']
self._check(mock_pr, args, {},
{'nics': [{'network': 'net1'}, {'port': 'port1'},
{'port': 'port2'}, {'network': 'net2'}]})
def test_args_ips(self, mock_pr):
args = ['deploy', '--image', 'myimg', '--resource-class', 'compute',
'--ip', 'private:10.0.0.2', '--ip', 'public:8.0.8.0']
self._check(mock_pr, args, {},
{'nics': [{'network': 'private', 'fixed_ip': '10.0.0.2'},
{'network': 'public', 'fixed_ip': '8.0.8.0'}]})
def test_args_subnet(self, mock_pr):
args = ['deploy', '--subnet', 'mysubnet', '--image', 'myimg',
'--resource-class', 'compute']
self._check(mock_pr, args, {}, {'nics': [{'subnet': 'mysubnet'}]})
def test_args_bad_ip(self, mock_pr):
args = ['deploy', '--image', 'myimg', '--resource-class', 'compute',
'--ip', 'private:10.0.0.2', '--ip', 'public']
self.assertRaises(SystemExit, _cmd.main, args)
self.assertFalse(mock_pr.return_value.reserve_node.called)
self.assertFalse(mock_pr.return_value.provision_node.called)
def test_args_hostname(self, mock_pr):
instance = self._init_instance(mock_pr)
instance.hostname = 'host'
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--hostname', 'host', '--resource-class', 'compute']
self._check(mock_pr, args, {'hostname': 'host'}, {})
self.mock_print.assert_has_calls([
mock.call('123 321 host ACTIVE private=1.2.3.4'),
])
def test_args_with_candidates(self, mock_pr):
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--candidate', 'node1', '--candidate', 'node2',
'--resource-class', 'compute']
self._check(mock_pr, args, {'candidates': ['node1', 'node2']}, {})
def test_args_conductor_group(self, mock_pr):
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--conductor-group', 'loc1', '--resource-class', 'compute']
self._check(mock_pr, args, {'conductor_group': 'loc1'}, {})
def test_args_http_image_with_checksum(self, mock_pr):
args = ['deploy', '--image', 'https://example.com/image.img',
'--image-checksum', '95e750180c7921ea0d545c7165db66b8',
'--network', 'mynet', '--resource-class', 'compute']
self._check(mock_pr, args, {}, {'image': mock.ANY})
source = mock_pr.return_value.provision_node.call_args[1]['image']
self.assertIsInstance(source, sources.HttpWholeDiskImage)
self.assertEqual('https://example.com/image.img', source.url)
self.assertEqual('95e750180c7921ea0d545c7165db66b8', source.checksum)
def test_args_http_image_with_checksum_url(self, mock_pr):
args = ['deploy', '--image', 'http://example.com/image.img',
'--image-checksum', 'http://example.com/CHECKSUMS',
'--network', 'mynet', '--resource-class', 'compute']
self._check(mock_pr, args, {}, {'image': mock.ANY})
source = mock_pr.return_value.provision_node.call_args[1]['image']
self.assertIsInstance(source, sources.HttpWholeDiskImage)
self.assertEqual('http://example.com/image.img', source.url)
self.assertEqual('http://example.com/CHECKSUMS', source.checksum_url)
@mock.patch.object(_cmd.LOG, 'critical', autospec=True)
def test_args_http_image_without_checksum(self, mock_log, mock_pr):
args = ['deploy', '--image', 'http://example.com/image.img',
'--resource-class', 'compute']
self.assertRaises(SystemExit, _cmd.main, args)
self.assertTrue(mock_log.called)
self.assertFalse(mock_pr.return_value.reserve_node.called)
self.assertFalse(mock_pr.return_value.provision_node.called)
def test_args_http_partition_image(self, mock_pr):
args = ['deploy', '--image', 'https://example.com/image.img',
'--image-kernel', 'https://example.com/kernel',
'--image-ramdisk', 'https://example.com/ramdisk',
'--image-checksum', '95e750180c7921ea0d545c7165db66b8',
'--network', 'mynet', '--resource-class', 'compute']
self._check(mock_pr, args, {}, {'image': mock.ANY})
source = mock_pr.return_value.provision_node.call_args[1]['image']
self.assertIsInstance(source, sources.HttpPartitionImage)
self.assertEqual('https://example.com/image.img', source.url)
self.assertEqual('https://example.com/kernel', source.kernel_url)
self.assertEqual('https://example.com/ramdisk', source.ramdisk_url)
self.assertEqual('95e750180c7921ea0d545c7165db66b8', source.checksum)
def test_args_file_whole_disk_image(self, mock_pr):
args = ['deploy', '--image', 'file:///var/lib/ironic/image.img',
'--network', 'mynet', '--resource-class', 'compute']
self._check(mock_pr, args, {}, {'image': mock.ANY})
source = mock_pr.return_value.provision_node.call_args[1]['image']
self.assertIsInstance(source, sources.FileWholeDiskImage)
self.assertEqual('file:///var/lib/ironic/image.img', source.location)
def test_args_file_partition_disk_image(self, mock_pr):
args = ['deploy', '--image', 'file:///var/lib/ironic/image.img',
'--image-kernel', 'file:///var/lib/ironic/image.vmlinuz',
'--image-ramdisk', 'file:///var/lib/ironic/image.initrd',
'--network', 'mynet', '--resource-class', 'compute']
self._check(mock_pr, args, {}, {'image': mock.ANY})
source = mock_pr.return_value.provision_node.call_args[1]['image']
self.assertIsInstance(source, sources.FilePartitionImage)
self.assertEqual('file:///var/lib/ironic/image.img', source.location)
self.assertEqual('file:///var/lib/ironic/image.vmlinuz',
source.kernel_location)
self.assertEqual('file:///var/lib/ironic/image.initrd',
source.ramdisk_location)
@mock.patch.object(_cmd.LOG, 'critical', autospec=True)
def test_args_file_image_with_incorrect_kernel(self, mock_log, mock_pr):
args = ['deploy', '--image', 'file:///var/lib/ironic/image.img',
'--image-kernel', 'http://example.com/image.vmlinuz',
'--image-checksum', '95e750180c7921ea0d545c7165db66b8',
'--resource-class', 'compute']
self.assertRaises(SystemExit, _cmd.main, args)
self.assertTrue(mock_log.called)
self.assertFalse(mock_pr.return_value.reserve_node.called)
self.assertFalse(mock_pr.return_value.provision_node.called)
def test_args_custom_wait(self, mock_pr):
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--wait', '3600', '--resource-class', 'compute']
self._check(mock_pr, args, {}, {'wait': 3600})
def test_args_no_wait(self, mock_pr):
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--no-wait', '--resource-class', 'compute']
self._check(mock_pr, args, {}, {'wait': None})
def test_with_root_size(self, mock_pr):
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--root-size', '100', '--resource-class', 'compute']
self._check(mock_pr, args, {}, {'root_size_gb': 100})
def test_with_swap_size(self, mock_pr):
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--swap-size', '4096', '--resource-class', 'compute']
self._check(mock_pr, args, {}, {'swap_size_mb': 4096})
def test_no_clean_up(self, mock_pr):
args = ['deploy', '--network', 'mynet', '--image', 'myimg',
'--resource-class', 'compute', '--no-clean-up']
self._check(mock_pr, args, {}, {'clean_up_on_failure': False})
@mock.patch.object(_provisioner, 'Provisioner', autospec=True)
@mock.patch.object(_cmd.os_config, 'OpenStackConfig', autospec=True)
class TestUndeploy(Base):
def test_ok(self, mock_os_conf, mock_pr):
node = mock_pr.return_value.unprovision_node.return_value
node.id = '123'
node.name = None
node.provision_state = 'cleaning'
args = ['undeploy', '123456']
_cmd.main(args)
mock_pr.assert_called_once_with(
cloud_region=mock_os_conf.return_value.get_one.return_value,
dry_run=False)
mock_pr.return_value.unprovision_node.assert_called_once_with(
'123456', wait=None
)
self.mock_print.assert_called_once_with(
'Unprovisioning started for node %(node)s',
node='123')
def test_custom_wait(self, mock_os_conf, mock_pr):
node = mock_pr.return_value.unprovision_node.return_value
node.id = '123'
node.name = None
node.provision_state = 'available'
args = ['undeploy', '--wait', '1800', '123456']
_cmd.main(args)
mock_pr.assert_called_once_with(
cloud_region=mock_os_conf.return_value.get_one.return_value,
dry_run=False)
mock_pr.return_value.unprovision_node.assert_called_once_with(
'123456', wait=1800
)
self.mock_print.assert_called_once_with(
'Successfully unprovisioned node %(node)s',
node='123')
def test_dry_run(self, mock_os_conf, mock_pr):
args = ['--dry-run', 'undeploy', '123456']
_cmd.main(args)
mock_pr.assert_called_once_with(
cloud_region=mock_os_conf.return_value.get_one.return_value,
dry_run=True)
mock_pr.return_value.unprovision_node.assert_called_once_with(
'123456', wait=None
)
def test_quiet(self, mock_os_conf, mock_pr):
args = ['--quiet', 'undeploy', '123456']
_cmd.main(args)
mock_pr.assert_called_once_with(
cloud_region=mock_os_conf.return_value.get_one.return_value,
dry_run=False)
mock_pr.return_value.unprovision_node.assert_called_once_with(
'123456', wait=None
)
self.assertFalse(self.mock_print.called)
def test_json(self, mock_os_conf, mock_pr):
node = mock_pr.return_value.unprovision_node.return_value
node.to_dict.return_value = {'node': 'dict'}
args = ['--format', 'json', 'undeploy', '123456']
fake_io = io.StringIO()
with mock.patch('sys.stdout', fake_io):
_cmd.main(args)
self.assertEqual(json.loads(fake_io.getvalue()),
{'node': {'node': 'dict'}})
mock_pr.assert_called_once_with(
cloud_region=mock_os_conf.return_value.get_one.return_value,
dry_run=False)
mock_pr.return_value.unprovision_node.assert_called_once_with(
'123456', wait=None
)
@mock.patch.object(_provisioner, 'Provisioner', autospec=True)
@mock.patch.object(_cmd.os_config, 'OpenStackConfig', autospec=True)
class TestShowWait(Base):
def setUp(self):
super(TestShowWait, self).setUp()
self.instances = [
mock.Mock(
spec=_instance.Instance,
hostname='hostname%d' % i,
uuid=str(i),
is_deployed=(i == 1),
state=_instance.InstanceState.ACTIVE
if i == 1 else _instance.InstanceState.DEPLOYING,
allocation=mock.Mock(spec=['id']) if i == 1 else None,
**{'ip_addresses.return_value': {'private': ['1.2.3.4']}}
)
for i in (1, 2)
]
for inst in self.instances:
inst.node.id = inst.uuid
inst.node.name = 'name-%s' % inst.uuid
if inst.allocation:
inst.allocation.id = '%s00' % inst.uuid
inst.to_dict.return_value = {inst.node.id: inst.node.name}
def test_show(self, mock_os_conf, mock_pr):
mock_pr.return_value.show_instances.return_value = self.instances
args = ['--format', 'value', 'show', 'uuid1', 'hostname2']
_cmd.main(args)
self.mock_print.assert_has_calls([
mock.call('1 name-1 100 hostname1 ACTIVE private=1.2.3.4'),
mock.call('2 name-2 hostname2 DEPLOYING '),
])
mock_pr.return_value.show_instances.assert_called_once_with(
['uuid1', 'hostname2'])
def test_list(self, mock_os_conf, mock_pr):
mock_pr.return_value.list_instances.return_value = self.instances
args = ['--format', 'value', 'list']
_cmd.main(args)
self.mock_print.assert_has_calls([
mock.call('1 name-1 100 hostname1 ACTIVE private=1.2.3.4'),
mock.call('2 name-2 hostname2 DEPLOYING '),
])
mock_pr.return_value.list_instances.assert_called_once_with()
def test_list_sort(self, mock_os_conf, mock_pr):
mock_pr.return_value.list_instances.return_value = self.instances
args = ['--format', 'value', '--sort-column', 'IP Addresses', 'list']
_cmd.main(args)
self.mock_print.assert_has_calls([
mock.call('2 name-2 hostname2 DEPLOYING '),
mock.call('1 name-1 100 hostname1 ACTIVE private=1.2.3.4'),
])
mock_pr.return_value.list_instances.assert_called_once_with()
def test_list_one_column(self, mock_os_conf, mock_pr):
mock_pr.return_value.list_instances.return_value = self.instances
args = ['--format', 'value', '--column', 'Node Name', 'list']
_cmd.main(args)
self.mock_print.assert_has_calls([
mock.call('name-1'),
mock.call('name-2'),
])
mock_pr.return_value.list_instances.assert_called_once_with()
def test_list_two_columns(self, mock_os_conf, mock_pr):
mock_pr.return_value.list_instances.return_value = self.instances
args = ['--format', 'value', '--column', 'Node Name',
'--column', 'Allocation UUID', 'list']
_cmd.main(args)
self.mock_print.assert_has_calls([
mock.call('name-1 100'),
mock.call('name-2 '),
])
mock_pr.return_value.list_instances.assert_called_once_with()
def test_list_empty(self, mock_os_conf, mock_pr):
mock_pr.return_value.list_instances.return_value = []
args = ['--format', 'value', 'list']
_cmd.main(args)
self.assertFalse(self.mock_print.called)
mock_pr.return_value.list_instances.assert_called_once_with()
def test_wait(self, mock_os_conf, mock_pr):
mock_pr.return_value.wait_for_provisioning.return_value = (
self.instances)
args = ['--format', 'value', 'wait', 'uuid1', 'hostname2']
_cmd.main(args)
self.mock_print.assert_has_calls([
mock.call('1 name-1 100 hostname1 ACTIVE private=1.2.3.4'),
mock.call('2 name-2 hostname2 DEPLOYING '),
])
mock_pr.return_value.wait_for_provisioning.assert_called_once_with(
['uuid1', 'hostname2'], timeout=None)
def test_wait_custom_timeout(self, mock_os_conf, mock_pr):
mock_pr.return_value.wait_for_provisioning.return_value = (
self.instances)
args = ['--format', 'value', 'wait', '--timeout', '42',
'uuid1', 'hostname2']
_cmd.main(args)
self.mock_print.assert_has_calls([
mock.call('1 name-1 100 hostname1 ACTIVE private=1.2.3.4'),
mock.call('2 name-2 hostname2 DEPLOYING '),
])
mock_pr.return_value.wait_for_provisioning.assert_called_once_with(
['uuid1', 'hostname2'], timeout=42)
def test_show_table(self, mock_os_conf, mock_pr):
mock_pr.return_value.show_instances.return_value = self.instances
args = ['show', 'uuid1', 'hostname2']
_cmd.main(args)
mock_pr.return_value.show_instances.assert_called_once_with(
['uuid1', 'hostname2'])
def test_show_json(self, mock_os_conf, mock_pr):
mock_pr.return_value.show_instances.return_value = self.instances
args = ['--format', 'json', 'show', 'uuid1', 'hostname2']
fake_io = io.StringIO()
with mock.patch('sys.stdout', fake_io):
_cmd.main(args)
self.assertEqual(json.loads(fake_io.getvalue()),
{'hostname1': {'1': 'name-1'},
'hostname2': {'2': 'name-2'}})
def test_list_table(self, mock_os_conf, mock_pr):
mock_pr.return_value.list_instances.return_value = self.instances
args = ['list']
_cmd.main(args)
mock_pr.return_value.list_instances.assert_called_once_with()
def test_list_table_empty(self, mock_os_conf, mock_pr):
mock_pr.return_value.list_instances.return_value = []
args = ['list']
_cmd.main(args)
self.mock_print.assert_called_once_with('')
mock_pr.return_value.list_instances.assert_called_once_with()
def test_list_json(self, mock_os_conf, mock_pr):
mock_pr.return_value.list_instances.return_value = self.instances
args = ['--format', 'json', 'list']
fake_io = io.StringIO()
with mock.patch('sys.stdout', fake_io):
_cmd.main(args)
self.assertEqual(json.loads(fake_io.getvalue()),
{'hostname1': {'1': 'name-1'},
'hostname2': {'2': 'name-2'}})
mock_pr.return_value.list_instances.assert_called_once_with()
def test_wait_json(self, mock_os_conf, mock_pr):
mock_pr.return_value.wait_for_provisioning.return_value = (
self.instances)
args = ['--format', 'json', 'wait', 'uuid1', 'hostname2']
fake_io = io.StringIO()
with mock.patch('sys.stdout', fake_io):
_cmd.main(args)
self.assertEqual(json.loads(fake_io.getvalue()),
{'hostname1': {'1': 'name-1'},
'hostname2': {'2': 'name-2'}})

View File

@@ -1,167 +0,0 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from metalsmith import _instance
from metalsmith.test import test_provisioner
class TestInstanceIPAddresses(test_provisioner.Base):
def setUp(self):
super(TestInstanceIPAddresses, self).setUp()
self.instance = _instance.Instance(self.api, self.node)
self.ports = [
mock.Mock(spec=['network_id', 'fixed_ips', 'network'],
network_id=n, fixed_ips=[{'ip_address': ip}])
for n, ip in [('0', '192.168.0.1'), ('1', '10.0.0.2')]
]
self.api.network.ports.return_value = self.ports
self.nets = [
mock.Mock(spec=['id', 'name'], id=str(i)) for i in range(2)
]
for n in self.nets:
n.name = 'name-%s' % n.id
self.api.network.get_network.side_effect = self.nets
def test_ip_addresses(self):
ips = self.instance.ip_addresses()
self.assertEqual({'name-0': ['192.168.0.1'],
'name-1': ['10.0.0.2']},
ips)
def test_missing_ip(self):
self.ports[0].fixed_ips = {}
ips = self.instance.ip_addresses()
self.assertEqual({'name-0': [],
'name-1': ['10.0.0.2']}, ips)
def test_missing_port(self):
self.ports = [
mock.Mock(spec=['network_id', 'fixed_ips', 'network'],
network_id='0',
fixed_ips=[{'ip_address': '192.168.0.1'}]),
]
self.api.network.ports.return_value = self.ports
ips = self.instance.ip_addresses()
self.assertEqual({'name-0': ['192.168.0.1']}, ips)
class TestInstanceStates(test_provisioner.Base):
def setUp(self):
super(TestInstanceStates, self).setUp()
self.instance = _instance.Instance(self.api, self.node)
def test_state_deploying(self):
self.node.provision_state = 'wait call-back'
self.assertEqual(_instance.InstanceState.DEPLOYING,
self.instance.state)
self.assertFalse(self.instance.is_deployed)
self.assertTrue(self.instance.is_healthy)
self.assertTrue(self.instance.state.is_healthy)
self.assertFalse(self.instance.state.is_deployed)
def test_state_deploying_when_available(self):
self.node.provision_state = 'available'
self.node.instance_id = 'abcd'
self.assertEqual(_instance.InstanceState.DEPLOYING,
self.instance.state)
self.assertFalse(self.instance.is_deployed)
self.assertTrue(self.instance.is_healthy)
def test_state_unknown_when_available(self):
self.node.provision_state = 'available'
self.node.instance_id = None
self.assertEqual(_instance.InstanceState.UNKNOWN, self.instance.state)
self.assertFalse(self.instance.is_deployed)
self.assertFalse(self.instance.is_healthy)
self.assertFalse(self.instance.state.is_healthy)
def test_state_deploying_maintenance(self):
self.node.is_maintenance = True
self.node.provision_state = 'wait call-back'
self.assertEqual(_instance.InstanceState.DEPLOYING,
self.instance.state)
self.assertFalse(self.instance.is_deployed)
self.assertFalse(self.instance.is_healthy)
# The state itself is considered healthy
self.assertTrue(self.instance.state.is_healthy)
def test_state_active(self):
self.node.provision_state = 'active'
self.assertEqual(_instance.InstanceState.ACTIVE, self.instance.state)
self.assertTrue(self.instance.is_deployed)
self.assertTrue(self.instance.is_healthy)
self.assertTrue(self.instance.state.is_deployed)
def test_state_maintenance(self):
self.node.is_maintenance = True
self.node.provision_state = 'active'
self.assertEqual(_instance.InstanceState.MAINTENANCE,
self.instance.state)
self.assertTrue(self.instance.is_deployed)
self.assertFalse(self.instance.is_healthy)
self.assertFalse(self.instance.state.is_healthy)
def test_state_error(self):
self.node.provision_state = 'deploy failed'
self.assertEqual(_instance.InstanceState.ERROR, self.instance.state)
self.assertFalse(self.instance.is_deployed)
self.assertFalse(self.instance.is_healthy)
self.assertFalse(self.instance.state.is_healthy)
def test_state_unknown(self):
self.node.provision_state = 'enroll'
self.assertEqual(_instance.InstanceState.UNKNOWN, self.instance.state)
self.assertFalse(self.instance.is_deployed)
self.assertFalse(self.instance.is_healthy)
self.assertFalse(self.instance.state.is_healthy)
@mock.patch.object(_instance.Instance, 'ip_addresses', autospec=True)
def test_to_dict(self, mock_ips):
self.node.provision_state = 'wait call-back'
self.node.to_dict.return_value = {'node': 'dict'}
mock_ips.return_value = {'private': ['1.2.3.4']}
to_dict = self.instance.to_dict()
self.assertEqual({'allocation': None,
'hostname': self.node.name,
'ip_addresses': {'private': ['1.2.3.4']},
'node': {'node': 'dict'},
'state': 'deploying',
'uuid': self.node.id},
to_dict)
# States are converted to strings
self.assertIsInstance(to_dict['state'], str)
@mock.patch.object(_instance.Instance, 'ip_addresses', autospec=True)
def test_to_dict_with_allocation(self, mock_ips):
self.node.provision_state = 'wait call-back'
self.node.to_dict.return_value = {'node': 'dict'}
mock_ips.return_value = {'private': ['1.2.3.4']}
self.instance._allocation = mock.Mock()
self.instance._allocation.name = 'host'
self.instance._allocation.to_dict.return_value = {'alloc': 'dict'}
to_dict = self.instance.to_dict()
self.assertEqual({'allocation': {'alloc': 'dict'},
'hostname': 'host',
'ip_addresses': {'private': ['1.2.3.4']},
'node': {'node': 'dict'},
'state': 'deploying',
'uuid': self.node.id},
to_dict)
# States are converted to strings
self.assertIsInstance(to_dict['state'], str)

View File

@@ -1,158 +0,0 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import unittest
from unittest import mock
from metalsmith import instance_config
class TestGenericConfig(unittest.TestCase):
CLASS = instance_config.GenericConfig
def setUp(self):
super(TestGenericConfig, self).setUp()
self.node = mock.Mock(id='1234')
self.node.name = 'node name'
def _check(self, config, expected_metadata, expected_userdata=None,
cloud_init=True, hostname=None, network_data=None,
expected_network_data=None):
expected_m = {'public_keys': {},
'uuid': self.node.id,
'name': self.node.name,
'hostname': self.node.id,
'launch_index': 0,
'availability_zone': '',
'files': [],
'meta': {}}
expected_m.update(expected_metadata)
result = config.generate(self.node, hostname, network_data)
self.assertEqual(expected_m, result['meta_data'])
user_data = result['user_data']
if expected_userdata:
self.assertIsNotNone(user_data)
if cloud_init:
header, user_data = user_data.split('\n', 1)
self.assertEqual('#cloud-config', header)
user_data = json.loads(user_data)
self.assertEqual(expected_userdata, user_data)
network_data = result.get('network_data')
if expected_network_data:
self.assertIsNotNone(network_data)
self.assertEqual(expected_network_data, network_data)
def test_default(self):
config = self.CLASS()
self._check(config, {})
def test_name_as_hostname(self):
self.node.name = 'example.com'
config = self.CLASS()
self._check(config, {'hostname': 'example.com'})
def test_explicit_hostname(self):
config = self.CLASS()
self._check(config, {'hostname': 'example.com'},
hostname='example.com')
def test_ssh_keys(self):
config = self.CLASS(ssh_keys=['abc', 'def'])
self._check(config, {'public_keys': {'0': 'abc', '1': 'def'}})
def test_ssh_keys_as_dict(self):
config = self.CLASS(ssh_keys={'default': 'abc'})
self._check(config, {'public_keys': {'default': 'abc'}})
def test_custom_user_data(self):
config = self.CLASS(user_data='{"answer": 42}')
self._check(config, {}, {"answer": 42}, cloud_init=False)
def test_custom_metadata(self):
config = self.CLASS(meta_data={"foo": "bar"})
self._check(config, {"foo": "bar"}, cloud_init=False)
def test_custom_metadata_not_dict(self):
self.assertRaises(TypeError, self.CLASS, meta_data="foobar")
def test_custom_network_data(self):
config = self.CLASS()
data = {'net': 'data'}
self._check(config, {}, network_data=data, expected_network_data=data)
class TestCloudInitConfig(TestGenericConfig):
CLASS = instance_config.CloudInitConfig
def test_add_user(self):
config = self.CLASS()
config.add_user('admin')
self._check(config, {},
{'users': [{'name': 'admin',
'groups': ['wheel']}]})
def test_add_user_admin(self):
config = self.CLASS()
config.add_user('admin', admin=False)
self._check(config, {},
{'users': [{'name': 'admin'}]})
def test_add_user_sudo(self):
config = self.CLASS()
config.add_user('admin', sudo=True)
self._check(config, {},
{'users': [{'name': 'admin',
'groups': ['wheel'],
'sudo': 'ALL=(ALL) NOPASSWD:ALL'}]})
def test_add_user_passwd(self):
config = self.CLASS()
config.add_user('admin', password_hash='123')
self._check(config, {},
{'users': [{'name': 'admin',
'groups': ['wheel'],
'passwd': '123'}]})
def test_add_user_with_keys(self):
config = self.CLASS(ssh_keys=['abc', 'def'])
config.add_user('admin')
self._check(config, {'public_keys': {'0': 'abc', '1': 'def'}},
{'users': [{'name': 'admin',
'groups': ['wheel'],
'ssh_authorized_keys': ['abc', 'def']}]})
# Overriding tests since CloudInitConfig does not support plain strings
# for user_data, only dictionaries.
def test_custom_user_data(self):
config = self.CLASS(user_data={'answer': 42})
self._check(config, {}, {'answer': 42})
def test_custom_user_data_with_users(self):
config = self.CLASS(user_data={'answer': 42})
config.add_user('admin')
self._check(config, {},
{'users': [{'name': 'admin',
'groups': ['wheel']}],
'answer': 42})
def test_user_data_not_dict(self):
self.assertRaises(TypeError, self.CLASS, user_data="string")
config = self.CLASS()
config.user_data = "string"
self.assertRaises(TypeError, config.populate_user_data)

View File

@@ -1,308 +0,0 @@
# Copyright 2019 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
from unittest import mock
from metalsmith_ansible.ansible_plugins.modules \
import metalsmith_instances as mi
from openstack import exceptions as os_exc
from metalsmith import exceptions as exc
class TestMetalsmithInstances(unittest.TestCase):
@mock.patch('metalsmith.sources.detect', autospec=True)
def test_get_source(self, mock_detect):
mi._get_source({
'image': {'href': 'overcloud-full'}
})
mi._get_source({
'image': {
'href': 'file://overcloud-full.qcow2',
'checksum': 'asdf',
'kernel': 'file://overcloud-full.vmlinuz',
'ramdisk': 'file://overcloud-full.initrd'
}
})
mock_detect.assert_has_calls([
mock.call(
image='overcloud-full',
checksum=None,
kernel=None,
ramdisk=None
),
mock.call(
image='file://overcloud-full.qcow2',
checksum='asdf',
kernel='file://overcloud-full.vmlinuz',
ramdisk='file://overcloud-full.initrd'
)
])
def test_reserve(self):
provisioner = mock.Mock()
instances = [{
'hostname': 'node-1',
'name': 'node',
'candidates': ['other_node'],
'resource_class': 'boxen',
'capabilities': {'foo': 'bar'},
'traits': ['this', 'that'],
'conductor_group': 'group'
}, {}]
reserved = [
mock.Mock(id=1),
mock.Mock(id=2),
]
# test reserve success
provisioner.reserve_node.side_effect = reserved
result = mi.reserve(provisioner, instances, True)
provisioner.reserve_node.assert_has_calls([
mock.call(
hostname='node-1',
candidates=['other_node', 'node'],
capabilities={'foo': 'bar'},
conductor_group='group',
resource_class='boxen',
traits=['this', 'that']
),
mock.call(
hostname=None,
candidates=None,
capabilities=None,
conductor_group=None,
resource_class='baremetal',
traits=None
)
])
self.assertTrue(result[0])
self.assertEqual(reserved, result[1])
# test reserve failure with cleanup
instances = [{}, {}, {}]
reserved = [
mock.Mock(id=1),
mock.Mock(id=2),
exc.ReservationFailed('ouch')
]
provisioner.reserve_node.side_effect = reserved
self.assertRaises(exc.ReservationFailed, mi.reserve,
provisioner, instances, True)
provisioner.unprovision_node.assert_has_calls([
mock.call(1),
mock.call(2)
])
@mock.patch('metalsmith.sources.detect', autospec=True)
@mock.patch('metalsmith.instance_config.CloudInitConfig', autospec=True)
def test_provision(self, mock_config, mock_detect):
config = mock_config.return_value
image = mock_detect.return_value
provisioner = mock.Mock()
instances = [{
'name': 'node-1',
'hostname': 'overcloud-controller-1',
'image': {'href': 'overcloud-full'}
}, {
'name': 'node-2',
'hostname': 'overcloud-controller-2',
'image': {'href': 'overcloud-full'},
'nics': {'network': 'ctlplane'},
'root_size_gb': 200,
'swap_size_mb': 16,
'netboot': True,
'ssh_public_keys': 'abcd',
'user_name': 'centos',
'passwordless_sudo': False,
'config_drive': {
'meta_data': {'foo': 'bar'},
'cloud_config': {'bootcmd': ['echo henlo world']}
}
}, {
'name': 'node-3',
'hostname': 'overcloud-controller-3',
'image': {'href': 'overcloud-full'}
}, {
'name': 'node-4',
'hostname': 'overcloud-compute-0',
'image': {'href': 'overcloud-full'}
}]
provisioned = [
mock.Mock(uuid=1),
mock.Mock(uuid=2),
mock.Mock(uuid=3),
mock.Mock(uuid=4),
]
# test provision success
provisioner.provision_node.side_effect = provisioned
# provision 4 nodes with concurrency of 2
result = mi.provision(provisioner, instances, 3600, 2, True, True)
provisioner.provision_node.assert_has_calls([
mock.call(
'node-1',
config=config,
hostname='overcloud-controller-1',
image=image,
netboot=False,
nics=None,
root_size_gb=None,
swap_size_mb=None
),
mock.call(
'node-2',
config=config,
hostname='overcloud-controller-2',
image=image,
netboot=True,
nics={'network': 'ctlplane'},
root_size_gb=200,
swap_size_mb=16
),
mock.call(
'node-3',
config=config,
hostname='overcloud-controller-3',
image=image,
netboot=False,
nics=None,
root_size_gb=None,
swap_size_mb=None
),
mock.call(
'node-4',
config=config,
hostname='overcloud-compute-0',
image=image,
netboot=False,
nics=None,
root_size_gb=None,
swap_size_mb=None
),
], any_order=True)
mock_config.assert_has_calls([
mock.call(ssh_keys=None, user_data=None, meta_data=None),
mock.call(ssh_keys='abcd',
user_data={'bootcmd': ['echo henlo world']},
meta_data={'foo': 'bar'})
])
config.add_user.assert_called_once_with(
'centos', admin=True, sudo=False)
mock_detect.assert_has_calls([
mock.call(
image='overcloud-full',
checksum=None,
kernel=None,
ramdisk=None
),
mock.call(
image='overcloud-full',
checksum=None,
kernel=None,
ramdisk=None
),
mock.call(
image='overcloud-full',
checksum=None,
kernel=None,
ramdisk=None
),
mock.call(
image='overcloud-full',
checksum=None,
kernel=None,
ramdisk=None
),
])
self.assertTrue(result[0])
self.assertEqual(provisioned, result[1])
# test provision failure with cleanup
instances = [{
'name': 'node-1',
'hostname': 'overcloud-controller-1',
'image': {'href': 'overcloud-full'}
}, {
'name': 'node-2',
'hostname': 'overcloud-controller-2',
'image': {'href': 'overcloud-full'},
}, {
'name': 'node-3',
'hostname': 'overcloud-controller-3',
'image': {'href': 'overcloud-full'},
}]
provisioned = [
mock.Mock(uuid=1),
mock.Mock(uuid=2),
exc.Error('ouch')
]
provisioner.provision_node.side_effect = provisioned
self.assertRaises(exc.Error, mi.provision,
provisioner, instances, 3600, 20, True, True)
provisioner.unprovision_node.assert_has_calls([
mock.call(1),
mock.call(2)
])
@mock.patch('metalsmith.sources.detect', autospec=True)
@mock.patch('metalsmith.instance_config.CloudInitConfig', autospec=True)
def test_unprovision(self, mock_config, mock_detect):
mock_node1 = mock.Mock(name='node-1')
mock_node2 = mock.Mock(name='node-2')
mock_allocation1 = mock.Mock(name='overcloud-controller-1',
node_id='aaaa')
connection = mock.Mock()
provisioner = mock.Mock(connection=connection)
connection.baremetal.get_allocation.side_effect = [
mock_allocation1, os_exc.ResourceNotFound()]
connection.baremetal.get_node.side_effect = [
mock_node1, mock_node2, os_exc.ResourceNotFound()]
instances = [{
'name': 'node-1',
'hostname': 'overcloud-controller-1',
'image': {'href': 'overcloud-full'},
'state': 'absent'
}, {
'name': 'node-2',
'image': {'href': 'overcloud-full'},
'state': 'absent'
}, {
'name': 'node-3',
'hostname': 'overcloud-controller-3',
'image': {'href': 'overcloud-full'},
'state': 'absent'
}]
self.assertTrue(mi.unprovision(provisioner, instances))
provisioner.unprovision_node.assert_has_calls([
mock.call(mock_node1),
mock.call(mock_node2)
])
connection.baremetal.get_allocation.assert_has_calls([
mock.call('overcloud-controller-1'),
mock.call('overcloud-controller-3')
])
connection.baremetal.get_node.assert_has_calls([
mock.call('aaaa'),
mock.call('node-2'),
mock.call('node-3')
])

View File

@@ -1,131 +0,0 @@
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import mock
from metalsmith import _network_metadata
class TestMetadataAdd(unittest.TestCase):
def test_metadata_add_links(self):
port = mock.Mock()
network = mock.Mock()
port.id = 'port_id'
port.mac_address = 'aa:bb:cc:dd:ee:ff'
network.mtu = 1500
links = []
ir_mac = '11:22:33:44:55:66'
expected = [{'id': 'port_id',
'type': 'phy',
'mtu': 1500,
'ethernet_mac_address': ir_mac}]
_network_metadata.metadata_add_links(links, port, network, ir_mac)
self.assertEqual(expected, links)
def test_metadata_add_services(self):
subnet_a = mock.Mock()
subnet_b = mock.Mock()
subnet_a.dns_nameservers = ['192.0.2.1', '192.0.2.2']
subnet_b.dns_nameservers = ['192.0.2.11', '192.0.2.22']
subnets = [subnet_a, subnet_b]
services = []
expected = [{'address': '192.0.2.1', 'type': 'dns'},
{'address': '192.0.2.2', 'type': 'dns'},
{'address': '192.0.2.11', 'type': 'dns'},
{'address': '192.0.2.22', 'type': 'dns'}]
_network_metadata.metadata_add_services(services, subnets)
self.assertEqual(expected, services)
def test_metadata_add_network_ipv4_dhcp(self):
idx = 1
fixed_ip = {'ip_address': '192.0.2.100', 'subnet_id': 'subnet_id'}
port = mock.Mock()
port.id = 'port_id'
subnet = mock.Mock()
subnet.cidr = '192.0.2.0/26'
subnet.ip_version = 4
subnet.is_dhcp_enabled = True
subnet.host_routes = [
{'destination': '192.0.2.64/26', 'nexthop': '192.0.2.1'},
{'destination': '192.0.2.128/26', 'nexthop': '192.0.2.1'}
]
subnet.dns_nameservers = ['192.0.2.11', '192.0.2.22']
network = mock.Mock()
network.id = 'network_id'
network.name = 'net_name'
networks = []
expected = [{'id': 'net_name1',
'ip_address': '192.0.2.100',
'link': 'port_id',
'netmask': '255.255.255.192',
'network_id': 'network_id',
'routes': [{'gateway': '192.0.2.1',
'netmask': '255.255.255.192',
'network': '192.0.2.64'},
{'gateway': '192.0.2.1',
'netmask': '255.255.255.192',
'network': '192.0.2.128'}],
'services': [{'address': '192.0.2.11', 'type': 'dns'},
{'address': '192.0.2.22', 'type': 'dns'}],
'type': 'ipv4_dhcp'}]
_network_metadata.metadata_add_network(networks, idx, fixed_ip, port,
network, subnet)
self.assertEqual(expected, networks)
def test_metadata_add_network_ipv6_stateful(self):
idx = 1
fixed_ip = {'ip_address': '2001:db8:1::10', 'subnet_id': 'subnet_id'}
port = mock.Mock()
port.id = 'port_id'
subnet = mock.Mock()
subnet.cidr = '2001:db8:1::/64'
subnet.ip_version = 6
subnet.ipv6_address_mode = 'dhcpv6-stateful'
subnet.host_routes = [
{'destination': '2001:db8:2::/64', 'nexthop': '2001:db8:1::1'},
{'destination': '2001:db8:3::/64', 'nexthop': '2001:db8:1::1'}
]
subnet.dns_nameservers = ['2001:db8:1::ee', '2001:db8:2::ff']
network = mock.Mock()
network.id = 'network_id'
network.name = 'net_name'
networks = []
expected = [
{'id': 'net_name1',
'ip_address': '2001:db8:1::10',
'link': 'port_id',
'netmask': 'ffff:ffff:ffff:ffff::',
'network_id': 'network_id',
'routes': [{'gateway': '2001:db8:1::1',
'netmask': 'ffff:ffff:ffff:ffff::',
'network': '2001:db8:2::'},
{'gateway': '2001:db8:1::1',
'netmask': 'ffff:ffff:ffff:ffff::',
'network': '2001:db8:3::'}],
'services': [{'address': '2001:db8:1::ee', 'type': 'dns'},
{'address': '2001:db8:2::ff', 'type': 'dns'}],
'type': 'ipv6_dhcpv6-stateful'}]
_network_metadata.metadata_add_network(networks, idx, fixed_ip, port,
network, subnet)
self.assertEqual(expected, networks)

View File

@@ -1,303 +0,0 @@
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import mock
from openstack import exceptions as sdk_exc
from metalsmith import _nics
from metalsmith import exceptions
from metalsmith.test import test_provisioner
class TestNICs(unittest.TestCase):
def setUp(self):
super(TestNICs, self).setUp()
self.connection = mock.Mock(spec=['network', 'baremetal'])
self.node = mock.Mock(spec=test_provisioner.NODE_FIELDS + ['to_dict'],
id='000', instance_id=None,
properties={'local_gb': 100},
instance_info={},
is_maintenance=False, extra={},
allocation_id=None)
def test_init(self):
nic_info = [{'network': 'uuid',
'fixed_ip': '1.1.1.1'}]
nics = _nics.NICs(self.connection, self.node, nic_info,
hostname='test-host')
self.assertEqual(nics._node, self.node)
self.assertEqual(nics._connection, self.connection)
self.assertEqual(nics._nics, nic_info)
self.assertIsNone(nics._validated)
self.assertEqual(nics._hostname, 'test-host')
self.assertEqual(nics.created_ports, [])
self.assertEqual(nics.attached_ports, [])
def test_init_wrong_type(self):
nic_info = {'wrong': 'type'}
self.assertRaisesRegex(
TypeError, 'NICs must be a list of dicts',
_nics.NICs,
self.connection, self.node, nic_info, hostname='test-host')
nic_info = [['wrong', 'type']]
self.assertRaisesRegex(
TypeError, 'Each NIC must be a dict',
_nics.NICs,
self.connection, self.node, nic_info, hostname='test-host')
def test_get_port(self):
nic_info = [{'port': 'port_uuid'}]
nics = _nics.NICs(self.connection, self.node, nic_info,
hostname='test-host')
fake_port = mock.Mock()
self.connection.network.find_port.return_value = fake_port
return_value = nics._get_port(nic_info[0])
self.connection.network.find_port.assert_called_once_with(
nic_info[0]['port'], ignore_missing=False)
self.assertEqual(fake_port, return_value)
def test_get_port_unexpected_fields(self):
nic_info = [{'port': 'port_uuid', 'unexpected': 'field'}]
nics = _nics.NICs(self.connection, self.node, nic_info,
hostname='test-host')
self.assertRaisesRegex(exceptions.InvalidNIC,
'Unexpected fields for a port: unexpected',
nics._get_port, nic_info[0])
def test_get_port_resource_not_found(self):
nic_info = [{'port': 'aaaa-bbbb-cccc'}]
nics = _nics.NICs(self.connection, self.node, nic_info,
hostname='test-host')
self.connection.network.find_port.side_effect = (
sdk_exc.SDKException('SDK_ERROR'))
self.assertRaisesRegex(exceptions.InvalidNIC,
'Cannot find port aaaa-bbbb-cccc: SDK_ERROR',
nics._get_port, nic_info[0])
def test_get_network(self):
nic_info = [{'network': 'net-name'}]
hostname = 'test-host'
nics = _nics.NICs(self.connection, self.node, nic_info,
hostname=hostname)
fake_net = mock.Mock(id='fake_net_id', name='fake_net_name')
self.connection.network.find_network.return_value = fake_net
return_value = nics._get_network(nic_info[0])
self.connection.network.find_network.assert_called_once_with(
nic_info[0]['network'], ignore_missing=False)
self.assertEqual({'network_id': fake_net.id,
'name': '%s-%s' % (hostname, fake_net.name)},
return_value)
def test_get_network_and_subnet(self):
nic_info = [{'network': 'net-name', 'subnet': 'subnet-name'}]
hostname = 'test-host'
nics = _nics.NICs(self.connection, self.node, nic_info,
hostname=hostname)
fake_net = mock.Mock(id='fake_net_id', name='fake_net_name')
fake_subnet = mock.Mock(id='fake_subnet_id', name='fake_subnet_name')
self.connection.network.find_network.return_value = fake_net
self.connection.network.find_subnet.return_value = fake_subnet
return_value = nics._get_network(nic_info[0])
self.connection.network.find_network.assert_called_once_with(
nic_info[0]['network'], ignore_missing=False)
self.connection.network.find_subnet.assert_called_once_with(
nic_info[0]['subnet'], network_id=fake_net.id,
ignore_missing=False)
self.assertEqual({'network_id': fake_net.id,
'name': '%s-%s' % (hostname, fake_net.name),
'fixed_ips': [{'subnet_id': fake_subnet.id}]},
return_value)
def test_get_network_and_subnet_not_found(self):
nic_info = [{'network': 'net-name', 'subnet': 'subnet-name'}]
hostname = 'test-host'
nics = _nics.NICs(self.connection, self.node, nic_info,
hostname=hostname)
fake_net = mock.Mock(id='fake_net_id', name='fake_net_name')
self.connection.network.find_network.return_value = fake_net
self.connection.network.find_subnet.side_effect = (
sdk_exc.SDKException('SDK_ERROR'))
self.assertRaisesRegex(exceptions.InvalidNIC,
('Cannot find subnet subnet-name on network '
'net-name: SDK_ERROR'),
nics._get_network, nic_info[0])
self.connection.network.find_network.assert_called_once_with(
nic_info[0]['network'], ignore_missing=False)
self.connection.network.find_subnet.assert_called_once_with(
nic_info[0]['subnet'], network_id=fake_net.id,
ignore_missing=False)
def test_get_network_fixed_ip(self):
nic_info = [{'network': 'net-name', 'fixed_ip': '1.1.1.1'}]
hostname = 'test-host'
nics = _nics.NICs(self.connection, self.node, nic_info,
hostname=hostname)
fake_net = mock.Mock(id='fake_net_id', name='fake_net_name')
self.connection.network.find_network.return_value = fake_net
return_value = nics._get_network(nic_info[0])
self.connection.network.find_network.assert_called_once_with(
nic_info[0]['network'], ignore_missing=False)
self.assertEqual({'network_id': fake_net.id,
'name': '%s-%s' % (hostname, fake_net.name),
'fixed_ips': [{'ip_address': '1.1.1.1'}]},
return_value)
def test_get_network_unexpected_fields(self):
nic_info = [{'network': 'uuid',
'subnet': 'subnet_name',
'fixed_ip': '1.1.1.1',
'unexpected': 'field'}]
nics = _nics.NICs(self.connection, self.node, nic_info,
hostname='test-host')
self.assertRaisesRegex(exceptions.InvalidNIC,
'Unexpected fields for a network: unexpected',
nics._get_network, nic_info[0])
def test_get_network_resource_not_found(self):
nic_info = [{'network': 'aaaa-bbbb-cccc', 'fixed_ip': '1.1.1.1'}]
self.connection.network.find_network.side_effect = (
sdk_exc.SDKException('SDK_ERROR'))
nics = _nics.NICs(self.connection, self.node, nic_info,
hostname='test-host')
self.assertRaisesRegex(exceptions.InvalidNIC,
'Cannot find network aaaa-bbbb-cccc: SDK_ERROR',
nics._get_network, nic_info[0])
def test_get_subnet(self):
nic_info = [{'subnet': 'net-name'}]
hostname = 'test-host'
nics = _nics.NICs(self.connection, self.node, nic_info,
hostname=hostname)
fake_net = mock.Mock(id='fake_net_id', name='fake_net_name')
fake_subnet = mock.Mock(id='fake_subnetnet_id',
name='fake_subnetnet_name',
network_id=fake_net.id)
self.connection.network.find_subnet.return_value = fake_subnet
self.connection.network.get_network.return_value = fake_net
return_value = nics._get_subnet(nic_info[0])
self.connection.network.find_subnet.assert_called_once_with(
nic_info[0]['subnet'], ignore_missing=False)
self.connection.network.get_network.assert_called_once_with(
fake_subnet.network_id)
self.assertEqual({'network_id': fake_net.id,
'name': '%s-%s' % (hostname, fake_net.name),
'fixed_ips': [{'subnet_id': fake_subnet.id}]},
return_value)
def test_get_subnet_unexpected_fields(self):
nic_info = [{'subnet': 'uuid', 'unexpected': 'field'}]
nics = _nics.NICs(self.connection, self.node, nic_info,
hostname='test-host')
self.assertRaisesRegex(exceptions.InvalidNIC,
'Unexpected fields for a subnet: unexpected',
nics._get_subnet, nic_info[0])
def test_get_subnet_resource_not_found(self):
nic_info = [{'subnet': 'uuid'}]
self.connection.network.find_subnet.side_effect = (
sdk_exc.SDKException('SDK_ERROR'))
nics = _nics.NICs(self.connection, self.node, nic_info,
hostname='test-host')
self.assertRaisesRegex(exceptions.InvalidNIC,
'Cannot find subnet uuid: SDK_ERROR',
nics._get_subnet, nic_info[0])
@mock.patch.object(_nics.NICs, '_get_port', autospec=True)
@mock.patch.object(_nics.NICs, '_get_subnet', autospec=True)
@mock.patch.object(_nics.NICs, '_get_network', autospec=True)
def test_validate(self, mock_network, mock_subnet, mock_port):
nic_info = [{'network': 'network'},
{'subnet': 'subnet'},
{'port': 'port'}]
nics = _nics.NICs(self.connection, self.node, nic_info)
mock_network.return_value = {'network_id': 'net_id'}
mock_subnet.return_value = {'network_id': 'net_id',
'fixed_ips': [
{'subnet_id': 'subnet_id'}]}
mock_port.return_value = port_mock = mock.Mock(id='port_id')
nics.validate()
mock_network.assert_called_once_with(nics, nic_info[0])
mock_subnet.assert_called_once_with(nics, nic_info[1])
mock_port.assert_called_once_with(nics, nic_info[2])
self.assertEqual(('network', {'network_id': 'net_id'}),
nics._validated[0])
self.assertEqual(('subnet', {'network_id': 'net_id',
'fixed_ips': [
{'subnet_id': 'subnet_id'}]}),
nics._validated[1])
self.assertEqual(('port', port_mock), nics._validated[2])
@mock.patch.object(_nics.NICs, '_get_port', autospec=True)
@mock.patch.object(_nics.NICs, '_get_subnet', autospec=True)
@mock.patch.object(_nics.NICs, '_get_network', autospec=True)
def test_create_and_attach_ports(self, mock_network, mock_subnet,
mock_port):
nic_info = [{'network': 'network'},
{'subnet': 'subnet'},
{'port': 'port'}]
nics = _nics.NICs(self.connection, self.node, nic_info)
mock_network.return_value = {'network_id': 'net_id'}
mock_subnet.return_value = {'network_id': 'net_id',
'fixed_ips': [
{'subnet_id': 'subnet_id'}]}
port_a_mock = mock.Mock(id='port_a_id')
port_b_mock = mock.Mock(id='port_b_id')
port_c_mock = mock.Mock(id='port_c_id')
self.connection.network.create_port.side_effect = [port_a_mock,
port_b_mock]
mock_port.return_value = port_c_mock
nics.create_and_attach_ports()
self.connection.network.create_port.assert_has_calls(
[mock.call(binding_host_id=nics._node.id,
**{'network_id': 'net_id'}),
mock.call(binding_host_id=nics._node.id,
**{'network_id': 'net_id',
'fixed_ips': [{'subnet_id': 'subnet_id'}]})])
self.connection.network.update_port.assert_has_calls(
[mock.call(port_c_mock, binding_host_id=nics._node.id)])
self.connection.baremetal.attach_vif_to_node.assert_has_calls(
[mock.call(nics._node, port_a_mock.id),
mock.call(nics._node, port_b_mock.id),
mock.call(nics._node, port_c_mock.id)])
self.assertEqual([port_a_mock.id, port_b_mock.id],
nics.created_ports)
self.assertEqual([port_a_mock.id, port_b_mock.id, port_c_mock.id],
nics.attached_ports)
@mock.patch.object(_nics, 'detach_and_delete_ports', autospec=True)
def test_detach_and_delete_ports(self, mock_detach_delete):
nics = _nics.NICs(self.connection, self.node, [])
nics.created_ports = ['port_a_id']
nics.attached_ports = ['port_a_id', 'port_b_id']
nics.detach_and_delete_ports()
mock_detach_delete.assert_called_once_with(
self.connection, nics._node, nics.created_ports,
nics.attached_ports)
def test_nics_detach_and_delete_ports(self):
created_ports = ['port_a_id']
attached_ports = ['port_a_id', 'port_b_id']
_nics.detach_and_delete_ports(
self.connection, self.node, created_ports, attached_ports)
self.connection.baremetal.detach_vif_from_node.assert_any_call(
self.node, attached_ports[0])
self.connection.baremetal.detach_vif_from_node.assert_any_call(
self.node, attached_ports[1])
self.connection.network.delete_port.assert_called_once_with(
created_ports[0], ignore_missing=False)

File diff suppressed because it is too large Load Diff

View File

@@ -1,127 +0,0 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import mock
from metalsmith import _scheduler
from metalsmith import exceptions
class TestRunFilters(unittest.TestCase):
def setUp(self):
super(TestRunFilters, self).setUp()
self.nodes = [mock.Mock(spec=['id', 'name']) for _ in range(2)]
def _filter(self, side_effect, fail=AssertionError('called fail')):
fltr = mock.Mock(spec=_scheduler.Filter)
fltr.side_effect = side_effect
fltr.fail.side_effect = fail
return fltr
def test_no_filters(self):
result = _scheduler.run_filters([], self.nodes)
self.assertEqual(result, self.nodes)
def test_all_filters_pass(self):
filters = [self._filter([True, True]) for _ in range(3)]
result = _scheduler.run_filters(filters, self.nodes)
self.assertEqual(result, self.nodes)
for fltr in filters:
self.assertEqual([mock.call(n) for n in self.nodes],
fltr.call_args_list)
self.assertFalse(fltr.fail.called)
def test_one_node_filtered(self):
filters = [self._filter([True, True]),
self._filter([False, True]),
self._filter([True])]
result = _scheduler.run_filters(filters, self.nodes)
self.assertEqual(result, self.nodes[1:2])
for fltr in filters:
self.assertFalse(fltr.fail.called)
for fltr in filters[:2]:
self.assertEqual([mock.call(n) for n in self.nodes],
fltr.call_args_list)
filters[2].assert_called_once_with(self.nodes[1])
def test_all_nodes_filtered(self):
filters = [self._filter([True, True]),
self._filter([False, True]),
self._filter([False], fail=RuntimeError('failed'))]
self.assertRaisesRegex(RuntimeError, 'failed',
_scheduler.run_filters,
filters, self.nodes)
for fltr in filters[:2]:
self.assertEqual([mock.call(n) for n in self.nodes],
fltr.call_args_list)
self.assertFalse(fltr.fail.called)
filters[2].assert_called_once_with(self.nodes[1])
filters[2].fail.assert_called_once_with()
class TestCapabilitiesFilter(unittest.TestCase):
def test_fail_no_capabilities(self):
fltr = _scheduler.CapabilitiesFilter({'profile': 'compute'})
self.assertRaisesRegex(exceptions.CapabilitiesNotFound,
'No available nodes found with capabilities '
'profile=compute, existing capabilities: none',
fltr.fail)
def test_nothing_requested_nothing_found(self):
fltr = _scheduler.CapabilitiesFilter({})
node = mock.Mock(properties={}, spec=['properties', 'name', 'id'])
self.assertTrue(fltr(node))
def test_matching_node(self):
fltr = _scheduler.CapabilitiesFilter({'profile': 'compute',
'foo': 'bar'})
node = mock.Mock(
properties={'capabilities': 'foo:bar,profile:compute,answer:42'},
spec=['properties', 'name', 'id'])
self.assertTrue(fltr(node))
def test_not_matching_node(self):
fltr = _scheduler.CapabilitiesFilter({'profile': 'compute',
'foo': 'bar'})
node = mock.Mock(
properties={'capabilities': 'foo:bar,answer:42'},
spec=['properties', 'name', 'id'])
self.assertFalse(fltr(node))
def test_fail_message(self):
fltr = _scheduler.CapabilitiesFilter({'profile': 'compute'})
node = mock.Mock(
properties={'capabilities': 'profile:control'},
spec=['properties', 'name', 'id'])
self.assertFalse(fltr(node))
self.assertRaisesRegex(exceptions.CapabilitiesNotFound,
'No available nodes found with capabilities '
'profile=compute, existing capabilities: '
r'profile=control \(1 node\(s\)\)',
fltr.fail)
def test_malformed_capabilities(self):
fltr = _scheduler.CapabilitiesFilter({'profile': 'compute'})
for cap in ['foo,profile:control', 42, 'a:b:c']:
node = mock.Mock(properties={'capabilities': cap},
spec=['properties', 'name', 'id'])
self.assertFalse(fltr(node))
self.assertRaisesRegex(exceptions.CapabilitiesNotFound,
'No available nodes found with capabilities '
'profile=compute, existing capabilities: none',
fltr.fail)

View File

@@ -1,222 +0,0 @@
# Copyright 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from unittest import mock
from metalsmith import exceptions
from metalsmith import sources
class TestDetect(unittest.TestCase):
def test_glance_whole_disk(self):
source = sources.detect('foobar')
self.assertIsInstance(source, sources.GlanceImage)
self.assertEqual(source.image, 'foobar')
conn = mock.Mock(spec=['image'])
conn.image.find_image.return_value = mock.Mock(
id=42, kernel_id=None, ramdisk_id=None)
source._validate(conn, None)
self.assertEqual({'image_source': 42}, source._node_updates(conn))
def test_glance_partition(self):
source = sources.detect('foobar')
self.assertIsInstance(source, sources.GlanceImage)
self.assertEqual(source.image, 'foobar')
conn = mock.Mock(spec=['image'])
conn.image.find_image.return_value = mock.Mock(
id=42, kernel_id=1, ramdisk_id=2)
source._validate(conn, 9)
self.assertEqual({'image_source': 42, 'kernel': 1, 'ramdisk': 2},
source._node_updates(conn))
def test_glance_partition_missing_root(self):
source = sources.detect('foobar')
self.assertIsInstance(source, sources.GlanceImage)
self.assertEqual(source.image, 'foobar')
conn = mock.Mock(spec=['image'])
conn.image.find_image.return_value = mock.Mock(
id=42, kernel_id=1, ramdisk_id=2)
self.assertRaises(exceptions.UnknownRootDiskSize,
source._validate, conn, None)
def test_glance_invalid_arguments(self):
for kwargs in [{'kernel': 'foo'},
{'ramdisk': 'foo'},
{'checksum': 'foo'}]:
self.assertRaisesRegex(ValueError, 'cannot be provided',
sources.detect, 'foobar', **kwargs)
def test_checksum_required(self):
for tp in ('http', 'https'):
self.assertRaisesRegex(ValueError, 'checksum is required',
sources.detect, '%s://foo' % tp)
def test_file_whole_disk(self):
source = sources.detect('file:///image')
self.assertIs(source.__class__, sources.FileWholeDiskImage)
self.assertEqual(source.location, 'file:///image')
self.assertIsNone(source.checksum)
source._validate(mock.Mock(), None)
def test_file_partition_disk(self):
source = sources.detect('file:///image',
kernel='file:///kernel',
ramdisk='file:///ramdisk')
self.assertIs(source.__class__, sources.FilePartitionImage)
self.assertEqual(source.location, 'file:///image')
self.assertIsNone(source.checksum)
self.assertEqual(source.kernel_location, 'file:///kernel')
self.assertEqual(source.ramdisk_location, 'file:///ramdisk')
source._validate(mock.Mock(), 9)
def test_file_partition_disk_missing_root(self):
source = sources.detect('file:///image', checksum='abcd',
kernel='file:///kernel',
ramdisk='file:///ramdisk')
self.assertRaises(exceptions.UnknownRootDiskSize,
source._validate, mock.Mock(), None)
def test_file_partition_inconsistency(self):
for kwargs in [{'kernel': 'foo'},
{'ramdisk': 'foo'},
{'kernel': 'http://foo'},
{'ramdisk': 'http://foo'}]:
kwargs.setdefault('checksum', 'abcd')
self.assertRaisesRegex(ValueError, 'can only be files',
sources.detect, 'file:///image', **kwargs)
def test_http_whole_disk(self):
source = sources.detect('http:///image', checksum='abcd')
self.assertIs(source.__class__, sources.HttpWholeDiskImage)
self.assertEqual(source.url, 'http:///image')
self.assertEqual(source.checksum, 'abcd')
source._validate(mock.Mock(), None)
self.assertEqual({
'image_checksum': 'abcd',
'image_source': 'http:///image'
}, source._node_updates(None))
def test_http_whole_disk_raw(self):
source = sources.detect('http:///image.raw', checksum='abcd')
self.assertIs(source.__class__, sources.HttpWholeDiskImage)
self.assertEqual(source.url, 'http:///image.raw')
self.assertEqual(source.checksum, 'abcd')
source._validate(mock.Mock(), None)
self.assertEqual({
'image_checksum': 'abcd',
'image_source': 'http:///image.raw',
'image_disk_format': 'raw'
}, source._node_updates(None))
def test_https_whole_disk(self):
source = sources.detect('https:///image', checksum='abcd')
self.assertIs(source.__class__, sources.HttpWholeDiskImage)
self.assertEqual(source.url, 'https:///image')
self.assertEqual(source.checksum, 'abcd')
source._validate(mock.Mock(), None)
def test_https_whole_disk_checksum(self):
source = sources.detect('https:///image',
checksum='https://checksum')
self.assertIs(source.__class__, sources.HttpWholeDiskImage)
self.assertEqual(source.url, 'https:///image')
self.assertEqual(source.checksum_url, 'https://checksum')
def test_http_partition_disk(self):
source = sources.detect('http:///image', checksum='abcd',
kernel='http:///kernel',
ramdisk='http:///ramdisk')
self.assertIs(source.__class__, sources.HttpPartitionImage)
self.assertEqual(source.url, 'http:///image')
self.assertEqual(source.checksum, 'abcd')
self.assertEqual(source.kernel_url, 'http:///kernel')
self.assertEqual(source.ramdisk_url, 'http:///ramdisk')
source._validate(mock.Mock(), 9)
self.assertEqual({
'image_checksum': 'abcd',
'image_source': 'http:///image',
'kernel': 'http:///kernel',
'ramdisk': 'http:///ramdisk'
}, source._node_updates(None))
def test_http_partition_disk_raw(self):
source = sources.detect('http:///image.raw', checksum='abcd',
kernel='http:///kernel',
ramdisk='http:///ramdisk')
self.assertIs(source.__class__, sources.HttpPartitionImage)
self.assertEqual(source.url, 'http:///image.raw')
self.assertEqual(source.checksum, 'abcd')
self.assertEqual(source.kernel_url, 'http:///kernel')
self.assertEqual(source.ramdisk_url, 'http:///ramdisk')
source._validate(mock.Mock(), 9)
self.assertEqual({
'image_checksum': 'abcd',
'image_source': 'http:///image.raw',
'kernel': 'http:///kernel',
'ramdisk': 'http:///ramdisk',
'image_disk_format': 'raw'
}, source._node_updates(None))
def test_http_partition_disk_missing_root(self):
source = sources.detect('http:///image', checksum='abcd',
kernel='http:///kernel',
ramdisk='http:///ramdisk')
self.assertRaises(exceptions.UnknownRootDiskSize,
source._validate, mock.Mock(), None)
def test_https_partition_disk(self):
source = sources.detect('https:///image', checksum='abcd',
# Can mix HTTP and HTTPs
kernel='http:///kernel',
ramdisk='https:///ramdisk')
self.assertIs(source.__class__, sources.HttpPartitionImage)
self.assertEqual(source.url, 'https:///image')
self.assertEqual(source.checksum, 'abcd')
self.assertEqual(source.kernel_url, 'http:///kernel')
self.assertEqual(source.ramdisk_url, 'https:///ramdisk')
def test_https_partition_disk_checksum(self):
source = sources.detect('https:///image',
# Can mix HTTP and HTTPs
checksum='http://checksum',
kernel='http:///kernel',
ramdisk='https:///ramdisk')
self.assertIs(source.__class__, sources.HttpPartitionImage)
self.assertEqual(source.url, 'https:///image')
self.assertEqual(source.checksum_url, 'http://checksum')
self.assertEqual(source.kernel_url, 'http:///kernel')
self.assertEqual(source.ramdisk_url, 'https:///ramdisk')
def test_http_partition_inconsistency(self):
for kwargs in [{'kernel': 'foo'},
{'ramdisk': 'foo'},
{'kernel': 'file://foo'},
{'ramdisk': 'file://foo'},
{'checksum': 'file://foo'}]:
kwargs.setdefault('checksum', 'abcd')
self.assertRaisesRegex(ValueError, 'can only be HTTP',
sources.detect, 'http:///image', **kwargs)

View File

@@ -1,68 +0,0 @@
# Copyright 2018 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from metalsmith import _utils
class TestIsHostnameSafe(unittest.TestCase):
def test_valid(self):
self.assertTrue(_utils.is_hostname_safe('spam'))
self.assertTrue(_utils.is_hostname_safe('spAm'))
self.assertTrue(_utils.is_hostname_safe('SPAM'))
self.assertTrue(_utils.is_hostname_safe('spam-eggs'))
self.assertTrue(_utils.is_hostname_safe('spam.eggs'))
self.assertTrue(_utils.is_hostname_safe('9spam'))
self.assertTrue(_utils.is_hostname_safe('spam7'))
self.assertTrue(_utils.is_hostname_safe('br34kf4st'))
self.assertTrue(_utils.is_hostname_safe('s' * 63))
self.assertTrue(_utils.is_hostname_safe('www.example.com'))
long_str = 'a' * 63 + '.' + 'b' * 63 + '.' + 'c' * 63 + '.' + 'd' * 63
self.assertTrue(_utils.is_hostname_safe(long_str))
def test_invalid(self):
self.assertFalse(_utils.is_hostname_safe('-spam'))
self.assertFalse(_utils.is_hostname_safe('spam-'))
self.assertFalse(_utils.is_hostname_safe('spam_eggs'))
self.assertFalse(_utils.is_hostname_safe('spam eggs'))
self.assertFalse(_utils.is_hostname_safe('$pam'))
self.assertFalse(_utils.is_hostname_safe('egg$'))
self.assertFalse(_utils.is_hostname_safe('spam#eggs'))
self.assertFalse(_utils.is_hostname_safe(' eggs'))
self.assertFalse(_utils.is_hostname_safe('spam '))
self.assertFalse(_utils.is_hostname_safe('s' * 64))
self.assertFalse(_utils.is_hostname_safe(''))
self.assertFalse(_utils.is_hostname_safe(None))
self.assertFalse(_utils.is_hostname_safe('www.nothere.com_'))
self.assertFalse(_utils.is_hostname_safe('www.nothere_.com'))
self.assertFalse(_utils.is_hostname_safe('www..nothere.com'))
self.assertFalse(_utils.is_hostname_safe('www.-nothere.com'))
long_str = 'a' * 63 + '.' + 'b' * 63 + '.' + 'c' * 63 + '.' + 'd' * 63
self.assertFalse(_utils.is_hostname_safe(long_str + '.'))
self.assertFalse(_utils.is_hostname_safe('a' * 255))
# These are valid domain names, but not hostnames (RFC 1123)
self.assertFalse(_utils.is_hostname_safe('www.example.com.'))
self.assertFalse(_utils.is_hostname_safe('http._sctp.www.example.com'))
self.assertFalse(_utils.is_hostname_safe('mail.pets_r_us.net'))
self.assertFalse(_utils.is_hostname_safe('mail-server-15.my_host.org'))
# RFC 952 forbids single-character hostnames
self.assertFalse(_utils.is_hostname_safe('s'))
def test_not_none(self):
# Need to ensure a binary response for success or fail
self.assertIsNotNone(_utils.is_hostname_safe('spam'))
self.assertIsNotNone(_utils.is_hostname_safe('-spam'))

View File

@@ -1,19 +0,0 @@
# Copyright 2018 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import pbr.version
version_info = pbr.version.VersionInfo('metalsmith')
"""Package version reported by pbr."""

View File

@@ -1,138 +0,0 @@
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from ansible.module_utils.basic import AnsibleModule
import yaml
DOCUMENTATION = '''
---
module: metalsmith_deployment_defaults
short_description: Transform instances list data for metalsmith_instances
author: "Steve Baker (@stevebaker)"
description:
- Takes a list of instances from the metalsmith_deployment role
and a dict of defaults and transforms that to the format required
by the metalsmith_instances module.
options:
instances:
description:
- List of node description dicts to perform operations on (in the
metalsmith_deployment instances format)
type: list
default: []
elements: dict
defaults:
description:
- Dict of defaults to use for missing values. Keys correspond to the
metalsmith_deployment instances format.
type: dict
'''
def transform(module, instances, defaults):
mi = []
def value(src, key, dest, to_key=None):
if not to_key:
to_key = key
value = src.get(key, defaults.get(key))
if value:
dest[to_key] = value
for src in instances:
dest = {'image': {}}
value(src, 'hostname', dest)
value(src, 'candidates', dest)
value(src, 'nics', dest)
value(src, 'netboot', dest)
value(src, 'root_size', dest, 'root_size_gb')
value(src, 'swap_size', dest, 'swap_size_mb')
value(src, 'capabilities', dest)
value(src, 'traits', dest)
value(src, 'resource_class', dest)
value(src, 'conductor_group', dest)
value(src, 'user_name', dest)
image = dest['image']
value(src, 'image', image, 'href')
value(src, 'image_checksum', image, 'checksum')
value(src, 'image_kernel', image, 'kernel')
value(src, 'image_ramdisk', image, 'ramdisk')
value(src, 'config_drive', dest)
# keys in metalsmith_instances not currently in metalsmith_deployment:
# passwordless_sudo
# keys in metalsmith_deployment not currently in metalsmith_instances:
# extra_args (CLI args cannot translate to the python lib,
# but they are mainly for auth and output formatting apart
# from --dry-run)
if 'extra_args' in src:
module.fail_json(
changed=False,
msg="extra_args is no longer supported"
)
# state (metalsmith_instances has a single state attribute for every
# instance)
if 'state' in src:
module.fail_json(
changed=False,
msg="Per-instance state is no longer supported, "
"use variable metalsmith_state"
)
# source keys could be a string or a list of strings
# and the strings could be a path to a public key or the key contents.
# Normalize this to a list of key contents
keys = []
source_keys = src.get('ssh_public_keys')
if source_keys:
if isinstance(source_keys, str):
source_keys = [source_keys]
for source_key in source_keys:
if os.path.isfile(source_key):
with open(source_key) as f:
source_key = f.read()
keys.append(source_key)
if keys:
dest['ssh_public_keys'] = keys
mi.append(dest)
module.exit_json(
changed=False,
msg="{} instances transformed".format(len(mi)),
instances=mi
)
return mi
def main():
module = AnsibleModule(
argument_spec=yaml.safe_load(DOCUMENTATION)['options'],
supports_check_mode=False,
)
instances = module.params['instances']
defaults = module.params['defaults']
transform(module, instances, defaults)
if __name__ == '__main__':
main()

View File

@@ -1,498 +0,0 @@
# Copyright 2020 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from concurrent import futures
import io
import logging
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.openstack import openstack_cloud_from_module
from ansible.module_utils.openstack import openstack_full_argument_spec
from ansible.module_utils.openstack import openstack_module_kwargs
except ImportError:
openstack_cloud_from_module = None
openstack_full_argument_spec = None
openstack_module_kwargs = None
import metalsmith
from metalsmith import instance_config
from metalsmith import sources
from openstack import exceptions as os_exc
import yaml
DOCUMENTATION = '''
---
module: metalsmith_instances
short_description: Manage baremetal instances with metalsmith
author: "Steve Baker (@stevebaker)"
description:
- Provision and unprovision ironic baremetal instances using metalsmith,
which is a a simple tool to provision bare metal machines using
OpenStack Bare Metal Service (ironic) and, optionally, OpenStack
Image Service (glance) and OpenStack Networking Service (neutron).
options:
instances:
description:
- List of node description dicts to perform operations on
type: list
default: []
elements: dict
suboptions:
hostname:
description:
- Host name to use, defaults to Node's name or UUID
type: str
name:
description:
- The name of an existing node to provision, this name is appended
to the candidates list
type: str
candidates:
description:
- List of nodes (UUIDs or names) to be considered for deployment
type: list
elements: str
image:
description:
- Details of the image you want to provision onto the node
type: dict
required: True
suboptions:
href:
description:
- Image to use (name, UUID or URL)
type: str
required: True
checksum :
description:
- Image SHA256, or SHA512 checksum or URL with checksums. MD5 is deprecated.
type: str
kernel:
description:
- URL of the image's kernel
type: str
ramdisk:
description:
- URL of the image's ramdisk
type: str
nics:
description:
- List of requested NICs
type: list
elements: dict
suboptions:
network:
description:
- Network to create a port on (name or UUID)
subnet:
description:
- Subnet to create a port on (name or UUID)
port:
description:
- Port to attach (name or UUID)
fixed_ip:
description:
- Attach IP from the network
netboot:
description:
- Boot from network instead of local disk
default: no
type: bool
root_size_gb:
description:
- Root partition size (in GiB), defaults to (local_gb - 1)
type: int
swap_size_mb:
description:
- Swap partition size (in MiB), defaults to no swap
type: int
capabilities:
description:
- Selection criteria to match the node capabilities
type: dict
traits:
description:
- Traits the node should have
type: list
elements: str
ssh_public_keys:
description:
- SSH public keys to load
resource_class:
description:
- Node resource class to provision
type: str
default: baremetal
conductor_group:
description:
- Conductor group to pick the node from
type: str
user_name:
description:
- Name of the admin user to create
type: str
passwordless_sudo:
description:
- Allow password-less sudo for the user
default: yes
type: bool
config_drive:
description:
- Extra data to add to the config-drive generated for this instance
type: dict
suboptions:
cloud_config:
description:
- Dict of cloud-init cloud-config tasks to run on node
boot. The 'users' directive can be used to configure extra
users other than the 'user_name' admin user.
type: dict
meta_data:
description:
- Extra metadata to include with the config-drive metadata.
This will be added to the generated metadata
'public_keys', 'uuid', 'name', and 'hostname'.
type: dict
clean_up:
description:
- Clean up resources on failure
default: yes
type: bool
state:
description:
- Desired provision state, "present" to provision,
"absent" to unprovision, "reserved" to create an allocation
record without changing the node state
default: present
choices:
- present
- absent
- reserved
wait:
description:
- A boolean value instructing the module to wait for node provision
to complete before returning. A 'yes' is implied if the number of
instances is more than the concurrency.
type: bool
default: no
timeout:
description:
- An integer value representing the number of seconds to wait for the
node provision to complete.
type: int
default: 3600
concurrency:
description:
- Maximum number of instances to provision at once. Set to 0 to have no
concurrency limit
type: int
default: 0
log_level:
description:
- Set the logging level for the log which is available in the
returned 'logging' result.
default: info
choices:
- debug
- info
- warning
- error
'''
EXAMPLES = '''
- name: Provision instances
metalsmith_instances:
instances:
- name: node-0
hostname: compute-0
image: overcloud-full
state: present
wait: true
clean_up: false
timeout: 1200
concurrency: 20
log_level: info
register: baremetal_provisioned
- name: Metalsmith log for provision instances
debug:
var: baremetal_provisioned.logging
'''
METALSMITH_LOG_MAP = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR
}
BASE_LOG_MAP = {
'debug': logging.INFO,
'info': logging.WARNING,
'warning': logging.WARNING,
'error': logging.ERROR
}
def _get_source(instance):
image = instance.get('image')
return sources.detect(image=image.get('href'),
kernel=image.get('kernel'),
ramdisk=image.get('ramdisk'),
checksum=image.get('checksum'))
def reserve(provisioner, instances, clean_up):
nodes = []
for instance in instances:
candidates = instance.get('candidates', [])
if instance.get('name') is not None:
candidates.append(instance['name'])
if not candidates:
candidates = None
try:
node = provisioner.reserve_node(
hostname=instance.get('hostname'),
resource_class=instance.get('resource_class', 'baremetal'),
capabilities=instance.get('capabilities'),
candidates=candidates,
traits=instance.get('traits'),
conductor_group=instance.get('conductor_group')),
if isinstance(node, tuple):
node = node[0]
nodes.append(node)
# side-effect of populating the instance name, which is passed to
# a later provision step
instance['name'] = node.id
except Exception as exc:
if clean_up:
# Remove all reservations on failure
_release_nodes(provisioner, [i.id for i in nodes])
raise exc
return len(nodes) > 0, nodes
def _release_nodes(provisioner, node_ids):
for node in node_ids:
try:
provisioner.unprovision_node(node)
except Exception:
pass
def provision(provisioner, instances, timeout, concurrency, clean_up, wait):
if not instances:
return False, []
# first, ensure all instances are reserved
reserve(provisioner, [i for i in instances if not i.get('name')], clean_up)
nodes = []
# no limit on concurrency, create a worker for every instance
if concurrency < 1:
concurrency = len(instances)
# if concurrency is less than instances, need to wait for
# instance completion
if concurrency < len(instances):
wait = True
provision_jobs = []
exceptions = []
with futures.ThreadPoolExecutor(max_workers=concurrency) as p:
for i in instances:
provision_jobs.append(p.submit(
_provision_instance, provisioner, i, nodes, timeout, wait
))
for job in futures.as_completed(provision_jobs):
e = job.exception()
if e:
exceptions.append(e)
if clean_up:
# first, cancel all jobs
for job in provision_jobs:
job.cancel()
# Unprovision all provisioned so far.
# This is best-effort as some provision calls may have
# started but not yet appended to nodes.
_release_nodes(provisioner, [i.uuid for i in nodes])
nodes = []
if exceptions:
# TODO(sbaker) future enhancement to tolerate a proportion of failures
# so that provisioning and deployment can continue
raise exceptions[0]
return len(nodes) > 0, nodes
def _provision_instance(provisioner, instance, nodes, timeout, wait):
name = instance.get('name')
image = _get_source(instance)
ssh_keys = instance.get('ssh_public_keys')
config_drive = instance.get('config_drive', {})
cloud_config = config_drive.get('cloud_config')
meta_data = config_drive.get('meta_data')
config = instance_config.CloudInitConfig(ssh_keys=ssh_keys,
user_data=cloud_config,
meta_data=meta_data)
if instance.get('user_name'):
config.add_user(instance.get('user_name'), admin=True,
sudo=instance.get('passwordless_sudo', True))
node = provisioner.provision_node(
name,
config=config,
hostname=instance.get('hostname'),
image=image,
nics=instance.get('nics'),
root_size_gb=instance.get('root_size_gb'),
swap_size_mb=instance.get('swap_size_mb'),
netboot=instance.get('netboot', False)
)
nodes.append(node)
if wait:
provisioner.wait_for_provisioning(
[node.uuid], timeout=timeout)
def unprovision(provisioner, instances):
connection = provisioner.connection
for instance in instances:
hostname = instance.get('hostname')
node = None
if hostname:
try:
allocation = connection.baremetal.get_allocation(hostname)
node = connection.baremetal.get_node(allocation.node_id)
except os_exc.ResourceNotFound:
# Allocation for this hostname doesn't exist, so attempt
# to lookup by node name
pass
name = instance.get('name')
if not node and name:
try:
node = connection.baremetal.get_node(name)
except os_exc.ResourceNotFound:
# Node with this name doesn't exist, so there is no
# node to unprovision
pass
if node:
provisioner.unprovision_node(node)
return True
def _configure_logging(log_level):
log_fmt = ('%(asctime)s %(levelname)s %(name)s: %(message)s')
urllib_level = logging.CRITICAL
base_level = BASE_LOG_MAP[log_level]
metalsmith_level = METALSMITH_LOG_MAP[log_level]
logging.basicConfig(level=base_level, format=log_fmt)
logging.getLogger('urllib3.connectionpool').setLevel(urllib_level)
logger = logging.getLogger('metalsmith')
logger.setLevel(metalsmith_level)
log_stream = io.StringIO()
logger.addHandler(logging.StreamHandler(log_stream))
return log_stream
def main():
if not openstack_full_argument_spec:
raise RuntimeError(
'This module requires ansible-collections-openstack')
# Modules in Ansible OpenStack Collection prior to 2.0.0 are not compatible
# with openstacksdk >=0.99.0, but the functions used here ARE compatible
# and will most likely not be removed in collection release 2.0.0, so we
# can safely remove the MAXIMUM_SDK_VERSION and thus use this module with
# releases of openstacksdk.
# TODO: Remove once ansible-collections-openstack 2.0.0 has been released
from ansible.module_utils import openstack as aoc
aoc.MAXIMUM_SDK_VERSION = None
argument_spec = openstack_full_argument_spec(
**yaml.safe_load(DOCUMENTATION)['options']
)
module_kwargs = openstack_module_kwargs()
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=False,
**module_kwargs
)
log_stream = _configure_logging(module.params['log_level'])
try:
sdk, cloud = openstack_cloud_from_module(module)
provisioner = metalsmith.Provisioner(cloud_region=cloud.config)
instances = module.params['instances']
state = module.params['state']
concurrency = module.params['concurrency']
timeout = module.params['timeout']
wait = module.params['wait']
clean_up = module.params['clean_up']
if state == 'present':
changed, nodes = provision(provisioner, instances,
timeout, concurrency, clean_up,
wait)
instances = [{
'name': i.node.name or i.uuid,
'hostname': i.hostname,
'id': i.uuid,
} for i in nodes]
module.exit_json(
changed=changed,
msg="{} instances provisioned".format(len(nodes)),
instances=instances,
logging=log_stream.getvalue()
)
if state == 'reserved':
changed, nodes = reserve(provisioner, instances, clean_up)
module.exit_json(
changed=changed,
msg="{} instances reserved".format(len(nodes)),
ids=[node.id for node in nodes],
instances=instances,
logging=log_stream.getvalue()
)
if state == 'absent':
changed = unprovision(provisioner, instances)
module.exit_json(
changed=changed,
msg="{} nodes unprovisioned".format(len(instances)),
logging=log_stream.getvalue()
)
except Exception as e:
module.fail_json(
msg=str(e),
logging=log_stream.getvalue()
)
if __name__ == '__main__':
main()

View File

@@ -1,210 +0,0 @@
Role - metalsmith_deployment
============================
This role deploys instances using **metalsmith** CLI.
Variables
---------
The only required variable is:
``metalsmith_instances``
list of instances to provision, see Instance_ for instance description.
The following optional variables provide the defaults for Instance_ attributes:
``metalsmith_candidates``
the default for ``candidates``.
``metalsmith_capabilities``
the default for ``capabilities``.
``metalsmith_conductor_group``
the default for ``conductor_group``.
``metalsmith_debug``
Show extra debug information, defaults to ``false``.
``metalsmith_extra_args``
the default for ``extra_args``.
``metalsmith_image``
the default for ``image``.
``metalsmith_image_checksum``
the default for ``image_checksum``.
``metalsmith_image_kernel``
the default for ``image_kernel``.
``metalsmith_image_ramdisk``
the default for ``image_ramdisk``.
``metalsmith_netboot``
the default for ``netboot``
``metalsmith_nics``
the default for ``nics``.
``metalsmith_resource_class``
the default for ``resource_class``.
``metalsmith_root_size``
the default for ``root_size``.
``metalsmith_ssh_public_keys``
the default for ``ssh_public_keys``.
``metalsmith_state``
the default state for instances, valid values are ``reserved``, ``absent``
or the default value ``present``.
``metalsmith_swap_size``
the default for ``swap_size``.
``metalsmith_traits``
the default for ``traits``.
``metalsmith_user_name``
the default for ``user_name``, the default value is ``metalsmith``.
Instance
--------
Each instances has the following attributes:
``candidates`` (defaults to ``metalsmith_candidates``)
list of nodes (UUIDs or names) to be considered for deployment.
``capabilities`` (defaults to ``metalsmith_capabilities``)
node capabilities to request when scheduling.
``config_drive``
extra data to add to the config-drive generated for this instance:
``cloud_config``
Dict of cloud-init cloud-config tasks to run on node
boot. The 'users' directive can be used to configure extra
users other than the 'user_name' admin user.
``meta_data``
Extra metadata to include with the config-drive metadata.
This will be added to the generated metadata
``public_keys``, ``uuid``, ``name``, and ``hostname``.
``conductor_group`` (defaults to ``metalsmith_conductor_group``)
conductor group to pick nodes from.
.. note:: Currently it's not possible to specify the default group.
``extra_args`` (defaults to ``metalsmith_extra_args``)
additional arguments to pass to the ``metalsmith`` CLI on all calls.
(No longer supported, will raise an error if used)
``image`` (defaults to ``metalsmith_image``)
UUID, name or HTTP(s) URL of the image to use for deployment. Mandatory.
``image_checksum`` (defaults to ``metalsmith_image_checksum``)
SHA256 checksum or checksum file URL for an HTTP(s) image.
``image_kernel`` (defaults to ``metalsmith_image_kernel``)
URL of the kernel image if and only if the ``image`` is a URL of
a partition image.
``image_ramdisk`` (defaults to ``metalsmith_image_ramdisk``)
URL of the ramdisk image if and only if the ``image`` is a URL of
a partition image.
``netboot``
whether to boot the deployed instance from network (PXE, iPXE, etc).
The default is to use local boot (requires a bootloader on the image).
``nics`` (defaults to ``metalsmith_nics``)
list of virtual NICs to attach to node's physical NICs. Each is an object
with exactly one attribute:
``network``
creates a port on the given network, for example:
.. code-block:: yaml
nics:
- network: private
- network: ctlplane
can optionally take a fixed IP to assign:
.. code-block:: yaml
nics:
- network: private
fixed_ip: 10.0.0.2
- network: ctlplane
fixed_ip: 192.168.42.30
``port``
uses the provided pre-created port:
.. code-block:: yaml
nics:
- port: b2254316-7867-4615-9fb7-911b3f38ca2a
``subnet``
creates a port on the given subnet, for example:
.. code-block:: yaml
nics:
- subnet: private-subnet1
``resource_class`` (defaults to ``metalsmith_resource_class``)
requested node's resource class. Mandatory.
``root_size`` (defaults to ``metalsmith_root_size``)
size of the root partition (in GiB), if partition images are used.
.. note::
Also required for whole-disk images due to how the Bare Metal service
currently works.
``ssh_public_keys`` (defaults to ``metalsmith_ssh_public_keys``)
list of file names with SSH public keys to put to the node.
``swap_size`` (defaults to ``metalsmith_swap_size``)
size of the swap partition (in MiB), if partition images are used
(it's an error to set it for a whole disk image).
``traits``
list of traits the node should have.
``user_name`` (defaults to ``metalsmith_user_name``)
name of the user to create on the instance via configdrive. Requires
cloud-init_ on the image.
.. _cloud-init: https://cloudinit.readthedocs.io/
Example
-------
.. code-block:: yaml
---
- hosts: all
tasks:
- include_role:
name: metalsmith_deployment
vars:
metalsmith_image: centos7
metalsmith_nics:
- network: ctlplane
metalsmith_ssh_public_keys:
- /home/user/.ssh/id_rsa.pub
metalsmith_instances:
- hostname: compute-0
resource_class: compute
root_size: 100
swap_size: 4096
capabilities:
boot_mode: uefi
traits:
- CUSTOM_GPU
- hostname: compute-1
resource_class: compute
root_size: 100
swap_size: 4096
capabilities:
boot_mode: uefi
user_name: heat-admin
- hostname: compute-2
resource_class: compute
candidates:
- e63650f2-4e7d-40b2-8932-f5b0e54698c7
- f19d00dd-60e1-46c8-b83c-782b4d291d9e
- hostname: control-0
resource_class: control
capabilities:
boot_mode: uefi
nics:
- network: ctlplane
- port: 1899af15-149d-47dc-b0dc-a68614eeb5c4
- hostname: custom-partition-image
resource_class: custom
image: https://example.com/images/custom-1.0.root.img
image_kernel: https://example.com/images/custom-1.0.vmlinuz
image_ramdisk: https://example.com/images/custom-1.0.initrd
image_checksum: https://example.com/images/SHA256SUMS
- hostname: custom-whole-disk-image
resource_class: custom
image: https://example.com/images/custom-1.0.qcow2
image_checksum: https://example.com/images/SHA256SUMS

View File

@@ -1,21 +0,0 @@
# Optional parameters
metalsmith_candidates: []
metalsmith_capabilities: {}
metalsmith_conductor_group:
metalsmith_debug: false
metalsmith_extra_args:
metalsmith_image_checksum:
metalsmith_image_kernel:
metalsmith_image_ramdisk:
metalsmith_netboot: false
metalsmith_nics: []
metalsmith_resource_class:
metalsmith_root_size:
metalsmith_ssh_public_keys: []
metalsmith_state: present
metalsmith_swap_size:
metalsmith_traits: []
metalsmith_user_name: metalsmith
# Wait parameters
metalsmith_provisioning_timeout: 3600

View File

@@ -1 +0,0 @@
allow_duplicates: true

View File

@@ -1,42 +0,0 @@
---
- name: Build instance defaults
metalsmith_deployment_defaults:
instances: "{{ metalsmith_instances }}"
defaults:
candidates: "{{ metalsmith_candidates }}"
capabilities: "{{ metalsmith_capabilities }}"
conductor_group: "{{ metalsmith_conductor_group }}"
extra_args: "{{ metalsmith_extra_args }}"
image: "{{ metalsmith_image }}"
image_checksum: "{{ metalsmith_image_checksum }}"
image_kernel: "{{ metalsmith_image_kernel }}"
image_ramdisk: "{{ metalsmith_image_ramdisk }}"
netboot: "{{ metalsmith_netboot }}"
nics: "{{ metalsmith_nics }}"
resource_class: "{{ metalsmith_resource_class }}"
root_size: "{{ metalsmith_root_size }}"
ssh_public_keys: "{{ metalsmith_ssh_public_keys }}"
swap_size: "{{ metalsmith_swap_size }}"
traits: "{{ metalsmith_traits }}"
user_name: "{{ metalsmith_user_name }}"
register: instances
- name: Show instances data
debug:
msg: "{{ instances.instances | to_yaml }}"
when: metalsmith_debug|bool
- name: Provision instances
metalsmith_instances:
instances: "{{ instances.instances }}"
state: "{{ metalsmith_state }}"
wait: true
timeout: "{{ metalsmith_provisioning_timeout }}"
log_level: "{{ 'debug' if metalsmith_debug|bool else 'info' }}"
register: baremetal_reserved
- name: Metalsmith log for reserve instances
debug:
var: baremetal_reserved.logging
when: metalsmith_debug|bool

View File

@@ -1,149 +0,0 @@
---
- name: Set facts for centos image builds
set_fact:
centos_image_file: ~/centos-download.qcow2
centos_initramfs_file: ~/centos.initramfs
centos_kernel_file: ~/centos.kernel
centos_partition_file: ~/centos-root.qcow2
centos_image_url: https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2.xz
- name: Install guestfish
package:
name: libguestfs-tools
state: present
become: true
- name: Make kernel files readable (workaround for Ubuntu)
shell: chmod 0644 /boot/vmlinuz-*
become: true
- name: Download the CentOS image
get_url:
url: "{{ centos_image_url }}"
dest: "{{ centos_image_file }}.xz"
register: centos_image_result
until: centos_image_result is succeeded
retries: 3
delay: 10
- name: Unpack the CentOS image
command: xz -d {{ centos_image_file }}.xz
- name: Print filesystems from the image
command: virt-filesystems -a {{ centos_image_file }} -l --extra --block-devices
- name: Upload the CentOS whole-disk image
command: >
openstack image create --disk-format qcow2
--public --file {{ centos_image_file }}
{{ centos_glance_whole_disk_image }}
environment:
OS_CLOUD: devstack-admin
when: centos_glance_whole_disk_image is defined
- name: Create a temporary directory for extraction
tempfile:
state: directory
suffix: boot
register: temp_dir
- name: Extract kernel/ramdisk from the image
command: >
virt-get-kernel -a {{ centos_image_file }}
-o {{ temp_dir.path }} --unversioned-names
- name: Upload the CentOS kernel image
command: >
openstack image create --disk-format aki --container-format aki \
--public --file {{ temp_dir.path }}/vmlinuz -f value -c id
{{ centos_glance_kernel_image }}
register: centos_kernel_id
failed_when: centos_kernel_id.stdout == ""
environment:
OS_CLOUD: devstack-admin
when: centos_glance_kernel_image is defined
- name: Upload the CentOS initramfs image
command: >
openstack image create --disk-format ari --container-format ari \
--public --file {{ temp_dir.path }}/initramfs -f value -c id
{{ centos_glance_initramds_image }}
register: centos_initramfs_id
failed_when: centos_initramfs_id.stdout == ""
environment:
OS_CLOUD: devstack-admin
when: centos_glance_initramds_image is defined
- name: Delete the kernel and ramdisk image files
file:
state: absent
path: "{{ temp_dir.path }}/{{ item }}"
with_items:
- vmlinuz
- initramfs
- name: Extract the root file system
command: virt-tar-out -a {{ centos_image_file }} / {{ temp_dir.path }}/root.tar
- name: Delete the whole-disk image file
file:
state: absent
path: "{{ centos_image_file }}"
- name: Extract /etc/fstab and /etc/selinux/config
command: >
tar -f {{ temp_dir.path }}/root.tar
-C {{ temp_dir.path }} --extract {{ item }}
with_items:
- ./etc/fstab
- ./etc/selinux/config
- name: Remove /etc/fstab and /etc/selinux/config from the archive
command: tar -f {{ temp_dir.path }}/root.tar --delete {{ item }}
with_items:
- ./etc/fstab
- ./etc/selinux/config
- name: Edit /etc/fstab to replace UUID with LABEL
command: sed -i 's/UUID=[^ ]* /\/dev\/vda2 /' {{ temp_dir.path}}/etc/fstab
- name: Rewrite /etc/selinux/config to disable selinux
copy:
dest: "{{ temp_dir.path }}/etc/selinux/config"
content: "SELINUX=disabled"
- name: Add edited /etc/fstab and /etc/selinux/config back
command: >
tar -f {{ temp_dir.path }}/root.tar
-C {{ temp_dir.path }}
--append {{ item }} --owner root --group root
with_items:
- ./etc/fstab
- ./etc/selinux/config
- name: Pack the root file system into a partition image
command: virt-make-fs {{ temp_dir.path }}/root.tar {{ centos_partition_file }}
- name: Print filesystems from the image
command: virt-filesystems -a {{ centos_partition_file }} -l --extra --block-devices
- name: Remove the temporary directory
file:
state: absent
path: "{{ temp_dir.path }}"
- name: Upload the CentOS partition image
command: >
openstack image create --disk-format qcow2
--public --file {{ centos_partition_file }}
--property kernel_id={{ centos_kernel_id.stdout }}
--property ramdisk_id={{ centos_initramfs_id.stdout }}
{{ centos_glance_root_image }}
environment:
OS_CLOUD: devstack-admin
when: centos_glance_root_image is defined
- name: Remove the partition image file
file:
state: absent
path: "{{ centos_partition_file }}"

View File

@@ -1,83 +0,0 @@
---
- name: "Set centos image facts"
set_fact:
centos_image_file: ~/centos8-wholedisk.qcow2
centos_initramfs_file: ~/centos8-partition.initrd
centos_kernel_file: ~/centos8-partition.vmlinuz
centos_partition_file: ~/centos8-partition.qcow2
- name: Install kpartx
package:
name: kpartx
state: present
become: true
- name: Install DIB
pip:
name: "/home/zuul/src/opendev.org/openstack/diskimage-builder"
become: true
vars:
ansible_python_interpreter: /usr/bin/{{ metalsmith_python | default('python') }}
- name: Make kernel files readable (workaround for Ubuntu)
shell: chmod 0644 /boot/vmlinuz-*
become: true
- name: Detect the right block device element
set_fact:
centos_block_device: block-device-efi
when: metalsmith_boot_mode | default('uefi') != 'bios'
- name: Build a centos8 wholedisk image
command: >
disk-image-create centos bootloader vm {{ centos_block_device | default('') }}
-o centos8-wholedisk
environment:
DIB_RELEASE: 8-stream
- name: Build a centos8 partition image
command: disk-image-create centos bootloader baremetal -o centos8-partition
environment:
DIB_RELEASE: 8-stream
- name: Upload the CentOS whole-disk image
command: >
openstack image create --disk-format qcow2
--public --file {{ centos_image_file }}
{{ centos_glance_whole_disk_image }}
environment:
OS_CLOUD: devstack-admin
when: centos_glance_whole_disk_image is defined
- name: Upload the CentOS kernel image
command: >
openstack image create --disk-format aki --container-format aki \
--public --file {{ centos_kernel_file }} -f value -c id
{{ centos_glance_kernel_image }}
register: centos_kernel_id
failed_when: centos_kernel_id.stdout == ""
environment:
OS_CLOUD: devstack-admin
when: centos_glance_kernel_image is defined
- name: Upload the CentOS initramfs image
command: >
openstack image create --disk-format ari --container-format ari \
--public --file {{ centos_initramfs_file }} -f value -c id
{{ centos_glance_initramds_image }}
register: centos_initramfs_id
failed_when: centos_initramfs_id.stdout == ""
environment:
OS_CLOUD: devstack-admin
when: centos_glance_initramds_image is defined
- name: Upload the CentOS partition image
command: >
openstack image create --disk-format qcow2
--public --file {{ centos_partition_file }}
--property kernel_id={{ centos_kernel_id.stdout }}
--property ramdisk_id={{ centos_initramfs_id.stdout }}
{{ centos_glance_root_image }}
environment:
OS_CLOUD: devstack-admin
when: centos_glance_root_image is defined

View File

@@ -1,37 +0,0 @@
---
- name: Perform initial setup
import_playbook: initial-setup.yaml
- hosts: all
environment:
OS_CLOUD: devstack-admin
tasks:
- include_tasks: ssh-key.yaml
- include_tasks: centos8-image.yaml
when:
- metalsmith_whole_disk_image is defined
- metalsmith_partition_image is defined
- include_tasks: cirros-image.yaml
when:
- metalsmith_whole_disk_image is undefined
- metalsmith_partition_image is undefined
- name: Test a whole-disk image
include_tasks: exercise.yaml
vars:
metalsmith_image: "{{ metalsmith_whole_disk_image }}"
metalsmith_image_checksum: "{{ metalsmith_whole_disk_checksum | default('') }}"
metalsmith_root_size:
# NOTE(dtantsur): cannot specify swap with whole disk images
metalsmith_swap_size:
- name: Test a partition image
include_tasks: exercise.yaml
vars:
metalsmith_image: "{{ metalsmith_partition_image }}"
metalsmith_image_checksum: "{{ metalsmith_partition_checksum | default('') }}"
metalsmith_image_kernel: "{{ metalsmith_partition_kernel_image | default('') }}"
metalsmith_image_ramdisk: "{{ metalsmith_partition_ramdisk_image | default('') }}"

View File

@@ -1,83 +0,0 @@
---
- name: "Set centos image facts"
set_fact:
centos_image_file: ~/centos9-wholedisk.qcow2
centos_initramfs_file: ~/centos9-partition.initrd
centos_kernel_file: ~/centos9-partition.vmlinuz
centos_partition_file: ~/centos9-partition.qcow2
- name: Install kpartx
package:
name: kpartx
state: present
become: true
- name: Install DIB
pip:
name: "/home/zuul/src/opendev.org/openstack/diskimage-builder"
become: true
vars:
ansible_python_interpreter: /usr/bin/{{ metalsmith_python | default('python') }}
- name: Make kernel files readable (workaround for Ubuntu)
shell: chmod 0644 /boot/vmlinuz-*
become: true
- name: Detect the right block device element
set_fact:
centos_block_device: block-device-efi
when: metalsmith_boot_mode | default('uefi') != 'bios'
- name: Build a centos9 wholedisk image
command: >
disk-image-create -x centos bootloader vm {{ centos_block_device | default('') }}
-o centos9-wholedisk
environment:
DIB_RELEASE: 9-stream
- name: Build a centos9 partition image
command: disk-image-create -x centos bootloader baremetal -o centos9-partition
environment:
DIB_RELEASE: 9-stream
- name: Upload the CentOS whole-disk image
command: >
openstack image create --disk-format qcow2
--public --file {{ centos_image_file }}
{{ centos_glance_whole_disk_image }}
environment:
OS_CLOUD: devstack-admin
when: centos_glance_whole_disk_image is defined
- name: Upload the CentOS kernel image
command: >
openstack image create --disk-format aki --container-format aki \
--public --file {{ centos_kernel_file }} -f value -c id
{{ centos_glance_kernel_image }}
register: centos_kernel_id
failed_when: centos_kernel_id.stdout == ""
environment:
OS_CLOUD: devstack-admin
when: centos_glance_kernel_image is defined
- name: Upload the CentOS initramfs image
command: >
openstack image create --disk-format ari --container-format ari \
--public --file {{ centos_initramfs_file }} -f value -c id
{{ centos_glance_initramds_image }}
register: centos_initramfs_id
failed_when: centos_initramfs_id.stdout == ""
environment:
OS_CLOUD: devstack-admin
when: centos_glance_initramds_image is defined
- name: Upload the CentOS partition image
command: >
openstack image create --disk-format qcow2
--public --file {{ centos_partition_file }}
--property kernel_id={{ centos_kernel_id.stdout }}
--property ramdisk_id={{ centos_initramfs_id.stdout }}
{{ centos_glance_root_image }}
environment:
OS_CLOUD: devstack-admin
when: centos_glance_root_image is defined

View File

@@ -1,37 +0,0 @@
---
- name: Perform initial setup
import_playbook: initial-setup.yaml
- hosts: all
environment:
OS_CLOUD: devstack-admin
tasks:
- include_tasks: ssh-key.yaml
- include_tasks: centos9-image.yaml
when:
- metalsmith_whole_disk_image is defined
- metalsmith_partition_image is defined
- include_tasks: cirros-image.yaml
when:
- metalsmith_whole_disk_image is undefined
- metalsmith_partition_image is undefined
- name: Test a whole-disk image
include_tasks: exercise.yaml
vars:
metalsmith_image: "{{ metalsmith_whole_disk_image }}"
metalsmith_image_checksum: "{{ metalsmith_whole_disk_checksum | default('') }}"
metalsmith_root_size:
# NOTE(dtantsur): cannot specify swap with whole disk images
metalsmith_swap_size:
- name: Test a partition image
include_tasks: exercise.yaml
vars:
metalsmith_image: "{{ metalsmith_partition_image }}"
metalsmith_image_checksum: "{{ metalsmith_partition_checksum | default('') }}"
metalsmith_image_kernel: "{{ metalsmith_partition_kernel_image | default('') }}"
metalsmith_image_ramdisk: "{{ metalsmith_partition_ramdisk_image | default('') }}"

View File

@@ -1,88 +0,0 @@
---
- name: Find Cirros partition image
shell: openstack image list -f value -c Name | grep 'cirros-.*-partition$' | sort | tail -n1
register: cirros_partition_image_result
failed_when: cirros_partition_image_result.stdout == ""
- name: Find Cirros disk image
shell: openstack image list -f value -c Name | grep 'cirros-.*-disk$' | sort | tail -n1
register: cirros_disk_image_result
failed_when: cirros_disk_image_result.stdout == ""
- name: Set image facts for Glance image
set_fact:
metalsmith_whole_disk_image: "{{ cirros_disk_image_result.stdout }}"
metalsmith_partition_image: "{{ cirros_partition_image_result.stdout }}"
when: not (metalsmith_use_http | default(false))
- block:
- name: Find Cirros UEC image
shell: openstack image list -f value -c Name | grep 'cirros-.*-uec$' | sort | tail -n1
register: cirros_uec_image_result
failed_when: cirros_uec_image_result.stdout == ""
- name: Get baremetal HTTP endpoint
shell: |
source /opt/stack/devstack/openrc admin admin > /dev/null
iniget /etc/ironic/ironic.conf deploy http_url
args:
executable: /bin/bash
register: baremetal_endpoint_result
failed_when: baremetal_endpoint_result.stdout == ""
- name: Copy UEC images directory
command: >
cp -r /opt/stack/devstack/files/images/{{ cirros_uec_image_result.stdout }}
/opt/stack/data/ironic/httpboot/metalsmith
args:
creates: /opt/stack/data/ironic/httpboot/metalsmith
become: yes
- name: Copy whole disk image
command: >
cp /opt/stack/devstack/files/{{ cirros_disk_image_result.stdout }}.img
/opt/stack/data/ironic/httpboot/metalsmith/
args:
creates: /opt/stack/data/ironic/httpboot/metalsmith/{{ cirros_disk_image_result.stdout }}.img
become: yes
- name: Copy partition image
command: >
cp /opt/stack/data/ironic/{{ cirros_partition_image_result.stdout }}.img
/opt/stack/data/ironic/httpboot/metalsmith
args:
creates: /opt/stack/data/ironic/httpboot/metalsmith/{{ cirros_partition_image_result.stdout }}.img
become: yes
- name: Create SHA256 checksums file for images
shell: sha256sum cirros-* > CHECKSUMS
args:
chdir: /opt/stack/data/ironic/httpboot/metalsmith
become: yes
- name: Change ownership of image files
file:
path: /opt/stack/data/ironic/httpboot/metalsmith
state: directory
owner: "{{ ansible_user }}"
recurse: yes
mode: a+r
become: yes
- name: Calculate SHA256 checksum for HTTP disk image
shell: |
sha256sum /opt/stack/devstack/files/{{ cirros_disk_image_result.stdout }}.img \
| awk '{ print $1; }'
register: cirros_disk_image_checksum_result
failed_when: cirros_disk_image_checksum_result.stdout == ""
- name: Set facts for HTTP image
set_fact:
metalsmith_partition_image: "{{ baremetal_endpoint_result.stdout}}/metalsmith/{{ cirros_partition_image_result.stdout }}.img"
metalsmith_partition_kernel_image: "{{ baremetal_endpoint_result.stdout}}/metalsmith/{{ cirros_uec_image_result.stdout | replace('-uec', '-vmlinuz') }}"
metalsmith_partition_ramdisk_image: "{{ baremetal_endpoint_result.stdout}}/metalsmith/{{ cirros_uec_image_result.stdout | replace('-uec', '-initrd') }}"
metalsmith_partition_checksum: "{{ baremetal_endpoint_result.stdout}}/metalsmith/CHECKSUMS"
metalsmith_whole_disk_image: "{{ baremetal_endpoint_result.stdout}}/metalsmith/{{ cirros_disk_image_result.stdout }}.img"
metalsmith_whole_disk_checksum: "{{ cirros_disk_image_checksum_result.stdout }}"
when: metalsmith_use_http | default(false)

View File

@@ -1,121 +0,0 @@
---
- name: Create a port
command: openstack port create --network private test-port
when: metalsmith_precreate_port
- name: Set port argument
set_fact:
nic:
port: test-port
when: metalsmith_precreate_port
- name: Set network argument
set_fact:
nic:
network: private
when: not metalsmith_precreate_port
- name: Deploy a node
include_role:
name: metalsmith_deployment
vars:
metalsmith_debug: true
metalsmith_resource_class: baremetal
metalsmith_instances:
- hostname: test
nics:
- "{{ nic }}"
ssh_public_keys:
- "{{ ssh_key_file }}"
user_name: "{{ configure_instance_user | default('') }}"
# FIXME(dtantsur): openstacksdk issues a deprecation warning here, which
# somehow ends up in stdout, presumably because of Zuul quirks.
# Hack around it while we're solving the issue.
- name: Get instance info via CLI show
shell: |
errout="$(mktemp)"
if ! metalsmith --format=json show test 2> "$errout"; then
cat "$errout"
exit 1
fi
register: instance_info
- name: Register instance information
set_fact:
instance: "{{ (instance_info.stdout | from_json).test }}"
failed_when: instance.state != 'active' or instance.node.provision_state != 'active'
# FIXME(dtantsur): openstacksdk issues a deprecation warning here, which
# somehow ends up in stdout, presumably because of Zuul quirks.
# Hack around it while we're solving the issue.
- name: Get instance info via CLI list
shell: |
errout="$(mktemp)"
if ! metalsmith --format=json list 2> "$errout"; then
cat "$errout"
exit 1
fi
register: instance_info_via_list
- name: Verify that instance info via list is also correct
set_fact:
instance_via_list: "{{ (instance_info_via_list.stdout | from_json).test }}"
failed_when: instance_via_list.state != 'active' or instance_via_list.node.provision_state != 'active'
- name: Show active node information
command: openstack baremetal node show {{ instance.node.id }}
- name: Get IP address
set_fact:
instance_ip: "{{ instance.ip_addresses.values() | list | first | first }}"
failed_when: not instance_ip
- name: SSH into the instance
command: >
ssh -vvv -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o ConnectTimeout=10
{{ configure_instance_user | default('cirros') }}@{{ instance_ip }}
"cat /etc/hostname"
register: ssh_result
until: ssh_result is success
retries: 30
delay: 30
- name: Undeploy a node
command: metalsmith --debug undeploy --wait 900 test
- name: Get the current status of the deployed node
command: openstack baremetal node show {{ instance.node.id }} -f json
register: undeployed_node_result
- name: Parse node state
set_fact:
undeployed_node: "{{ undeployed_node_result.stdout | from_json }}"
- name: Check that the node was undeployed
fail:
msg: The node is in unexpected status {{ undeployed_node }}
when: undeployed_node.provision_state != "available"
- name: Check that the node extra was cleared
fail:
msg: The node still has extra {{ undeployed_node }}
when: undeployed_node.extra != {}
- name: Get attached VIFs for the node
command: openstack baremetal node vif list {{ instance.node.id }} -f value -c ID
register: vif_list_output
- name: Check that no VIFs are still attached
fail:
msg: Some VIFs are still attached
when: vif_list_output.stdout != ""
- name: Show remaining ports
command: openstack port list
- name: Delete created port
command: openstack port delete test-port
when: metalsmith_precreate_port
# FIXME(dtantsur): fails because of ironic mis-behavior
ignore_errors: true

View File

@@ -1,29 +0,0 @@
---
- hosts: all
roles:
- run-devstack
tasks:
- set_fact:
metalsmith_src_dir: '{{ ansible_user_dir }}/src/opendev.org/openstack'
# https://github.com/git/git/commit/8959555cee7ec045958f9b6dd62e541affb7e7d9
- name: Mark the metalsmith as safe for git
command: git config --global --add safe.directory "{{ metalsmith_src_dir }}/metalsmith"
become: true
- name: Install requirements for metalsmith
pip:
requirements: "{{ metalsmith_src_dir }}/metalsmith/requirements.txt"
extra_args: -c {{ metalsmith_src_dir }}/requirements/upper-constraints.txt
become: true
vars:
ansible_python_interpreter: /usr/bin/python3
- name: Install metalsmith
pip:
name: "{{ metalsmith_src_dir }}/metalsmith"
editable: true
become: true
vars:
ansible_python_interpreter: /usr/bin/python3

View File

@@ -1 +0,0 @@
../../metalsmith_ansible/ansible_plugins/modules

View File

@@ -1,15 +0,0 @@
---
- hosts: all
tasks:
- name: Check that VM logs exist
stat:
path: '{{ ironic_bm_logs }}'
register: bm_logs_result
- name: Copy VM logs
synchronize:
src: '{{ ironic_bm_logs }}'
dest: '{{ zuul.executor.log_root }}/{{ inventory_hostname }}'
mode: pull
become: true
when: bm_logs_result.stat.exists

View File

@@ -1,37 +0,0 @@
---
- name: Perform initial setup
import_playbook: initial-setup.yaml
- hosts: all
environment:
OS_CLOUD: devstack-admin
tasks:
- include_tasks: ssh-key.yaml
- include_tasks: centos-image.yaml
when:
- metalsmith_whole_disk_image is defined
- metalsmith_partition_image is defined
- include_tasks: cirros-image.yaml
when:
- metalsmith_whole_disk_image is undefined
- metalsmith_partition_image is undefined
- name: Test a whole-disk image
include_tasks: exercise.yaml
vars:
metalsmith_image: "{{ metalsmith_whole_disk_image }}"
metalsmith_image_checksum: "{{ metalsmith_whole_disk_checksum | default('') }}"
metalsmith_root_size:
# NOTE(dtantsur): cannot specify swap with whole disk images
metalsmith_swap_size:
- name: Test a partition image
include_tasks: exercise.yaml
vars:
metalsmith_image: "{{ metalsmith_partition_image }}"
metalsmith_image_checksum: "{{ metalsmith_partition_checksum | default('') }}"
metalsmith_image_kernel: "{{ metalsmith_partition_kernel_image | default('') }}"
metalsmith_image_ramdisk: "{{ metalsmith_partition_ramdisk_image | default('') }}"

View File

@@ -1,11 +0,0 @@
---
- name: Find a public SSH key file
find:
path: ~/.ssh/
pattern: id_*.pub
register: ssh_key_result
failed_when: ssh_key_result.matched < 1
- name: Set SSH public key fact
set_fact:
ssh_key_file: "{{ ssh_key_result.files[0].path }}"

View File

@@ -1,10 +0,0 @@
---
upgrade:
- |
The deprecated ``root_disk_size`` argument has been removed, use
``root_size_gb`` instead.
deprecations:
- |
Not providing ``resource_class`` to the ``reserve_node`` call is now
deprecated. This is not compatible with the in-progress bare metal
allocation API.

View File

@@ -1,11 +0,0 @@
---
upgrade:
- |
An allocation name is now used for hostname instead of a custom ``extra``
field. Previously deployed instances will no longer be recognized, use
the allocation backfilling to make them recognized again.
deprecations:
- |
The exception classes ``DeploymentFailure``, ``TraitsNotFound`` and
``NoNodesReserved`` are deprecated and no longer used after transitioning
to the allocation API.

View File

@@ -1,7 +0,0 @@
---
fixes:
- |
It is now possible to create a port on a specific subnet also when multiple
subnets with the same name exist on different networks. See bug:
`2009732 <https://storyboard.openstack.org/#!/story/2009732>`_.

View File

@@ -1,9 +0,0 @@
---
upgrade:
- |
Bare Metal API version 1.56 (Stein) or newer is now required. Use the 0.11
release series for older versions.
deprecations:
- |
``InstanceConfig.build_configdrive`` is deprecated, use ``generate`` with
openstacksdk's ``openstack.baremetal.configdrive.build`` instead.

View File

@@ -1,5 +0,0 @@
---
critical:
- |
Fixes a regression that caused deployed nodes to be picked for deployment
again.

View File

@@ -1,7 +0,0 @@
---
features:
- |
Add new metalsmith_instances instance option ``config_drive``.
This has sub-options ``cloud_config` and ``meta_data`` and allows
customization of data packaged in the config-drive to be consumed by
cloud-init.

View File

@@ -1,7 +0,0 @@
---
upgrade:
- |
The deprecated class ``InstanceConfig`` has been removed.
- |
The deprecated instance configuration method ``build_configdrive`` has
been removed.

View File

@@ -1,6 +0,0 @@
---
upgrade:
- |
Python 2.7 support has been dropped. Last release of metalsmith to support
Python 2.7 is OpenStack Train. The minimum version of Python now supported
by metalsmith is Python 3.6.

View File

@@ -1,8 +0,0 @@
---
upgrade:
- |
Changes to consistently using exceptions from ``metalsmith.exceptions``
rathen than exposing OpenStackSDK exceptions.
deprecations:
- |
The exception ``InvalidInstance`` has been renamed to ``InstanceNotFound``.

View File

@@ -1,8 +0,0 @@
---
fixes:
- |
Checksums are no longer required (nor used) with file images.
deprecations:
- |
Providing checksums for file images is deprecated. No deploy implementation
actually supports them.

View File

@@ -1,10 +0,0 @@
---
fixes:
- |
Fixed an issue where instance metadata for network configuration was not
correctly written to the config-drive when using a neutron routed
provider network. The invalid metadata would in most cases cause the
instance network initialization to completely fail, leaving the instance
with no network connectivity. See bug: `2009715
<https://storyboard.openstack.org/#!/story/2009715>`_.

View File

@@ -1,4 +0,0 @@
---
features:
- |
Hostname is now displayed in the default format when displaying instances.

View File

@@ -1,7 +0,0 @@
---
fixes:
- |
Fixes stale ``instance_info`` remaining after deploy failures.
- |
Cleans up ``instance_info`` before updating it before deployment to make
sure not stale information is left there.

View File

@@ -1,3 +0,0 @@
---
prelude: >
This is the first release where release notes were introduced.

View File

@@ -1,14 +0,0 @@
---
features:
- |
It is now possible to provide custom ``user_data`` into instance
configuration.
upgrade:
- |
The ``InstanceConfig`` class has been split into ``GenericConfig`` and
``CloudInitConfig`` for clarity on which features come from what.
deprecations:
- |
The ``metalsmith.InstanceConfig`` class is deprecated, use
``GenericConfig`` or ``CloudInitConfig`` from the new module
``metalsmith.instance_config``.

View File

@@ -1,5 +0,0 @@
---
fixes:
- |
No longer removes ``instance_info`` on normal unprovisioning, only on
failures during deployment.

View File

@@ -1,13 +0,0 @@
---
features:
- |
Network metadata is now created and written to the instance config in the
config-drive for deployed nodes.
fixes:
- |
Fixed and issue where deployed nodes did not become available over the
network. This happened when the first network interface was not connected
to a network with a DHCP service, i.e a secondary network interface was
used. The addition of network metadata in the instance config solves this
problem. See bug:
`2009238 <https://storyboard.openstack.org/#!/story/2009238>`_.

View File

@@ -1,5 +0,0 @@
---
features:
- |
Allows disabling clean up on failure via the new ``clean_up_on_failure``
argument and ``--no-clean-up`` flag.

View File

@@ -1,13 +0,0 @@
---
upgrade:
- |
`openstacksdk <https://docs.openstack.org/openstacksdk/>`_ is now used
for bare metal operation. The ``Node`` objects returned from various calls
are now `openstacksdk Node objects
<https://docs.openstack.org/openstacksdk/latest/user/resources/baremetal/v1/node.html#openstack.baremetal.v1.node.Node>`_
instead of ones from **python-ironicclient**.
- |
The **genisoimage** utility is now required for building configdrives.
other:
- |
The dependency on **python-ironicclient** has been removed.

View File

@@ -1,13 +0,0 @@
---
fixes:
- |
Fixes erroneous behavior in metalsmith as it relates to the generation of
instance metadata where it relies upon the pre-set neutron mac address
to generate metadata. This is because metalsmith attempts to generate port
bindings in Neutron and then populate the VIF attachments to Ironic. This
behavior breaks when Ironic resets MAC addresses as part of early vif
binding fixes which break Metalsmith's reliance upon incorrect behavior.
Metalsmith now reconciles against tenant vif attachment record in Ironic,
and uses that data source for the physical MAC address to feed into network
metadata generation. More information regarding this issue can be found in
`bug 2107483 <https://bugs.launchpad.net/metalsmith/+bug/2107483>`_.

View File

@@ -1,7 +0,0 @@
---
features:
- |
The ``metalsmith`` CLI now uses table format similar to OpenStack CLI.
- |
The ``metalsmith`` CLI now supports the same ``-f``, ``-c`` and
``--sort-column`` arguments as other OpenStack CLI.

View File

@@ -1,9 +0,0 @@
---
fixes:
- |
Images sourced by HTTP would never have the node instance_info
`image_disk_format` set to `raw` because the image file is not processed by
ironic. This would result in errors for large images, or
ironic-python-agent never using streaming to copy the image to disk. To
work around this, `image_disk_format` is set to `raw` when the image URL
ends with a `.raw` file extension.

View File

@@ -1,5 +0,0 @@
---
upgrade:
- |
Support for Python 3.8 and older versions has been removed. Now the minimum
python version supported is 3.9 .

View File

@@ -1,4 +0,0 @@
---
features:
- |
The ``reserve_node`` call now also accepts ``hostname``.

View File

@@ -1,5 +0,0 @@
---
upgrade:
- |
The ``resource_class`` argument to ``reserve_node``, as well as the
``--resource-class`` CLI argument, are now required.

View File

@@ -1,5 +0,0 @@
---
features:
- |
Adds new function ``metalsmith.sources.detect`` to automate detection of
various sources from their location, kernel, image and checksum.

View File

@@ -1,17 +0,0 @@
---
features:
- |
The ``Instance.state`` value is now a proper enumeration of type
``metalsmith.InstanceState``.
- |
The ``list_instances`` call now returns any valid instances, not only ones
created by metalsmith. This is consistent with the ``show_instance(s)``
behavior.
deprecations:
- |
Comparing an instance state with strings is deprecated, use enumeration
values instead.
fixes:
- |
Fixes the ``show_instance(s)`` calls to return an exception for nodes that
are not valid instances (state == ``UNKNOWN``).

View File

@@ -1,8 +0,0 @@
---
upgrade:
- |
The deprecated ``delay`` argument to the ``wait_for_provisioning`` call
has been removed.
- |
Instance states (members of the ``InstanceState`` enumeration) can no
longer be compared to strings. This was deprecated in the Stein release.

View File

@@ -1,7 +0,0 @@
---
features:
- |
Allows specifying a subnet for the ``nics`` argument of the
``provision_node`` call as ``{"subnet": "<name or ID>"}``.
- |
Adds a new CLI argument ``--subnet`` to create a port on the given subnet.

View File

@@ -1,8 +0,0 @@
---
upgrade:
- |
During the *bobcat* development cycle, the Ironic community updated the
MD5 checksum logic in the ``ironic-python-agent`` to be explicitly
disabled, and added support for the checksums to be conveyed as a
SHA256 or SHA512 checksum.
With this upgrade, we have updated our references and playbooks.

View File

@@ -1,5 +0,0 @@
---
fixes:
- |
No longer requires root size for whole disk images. This requirement has
been removed from ironic.

View File

@@ -1,6 +0,0 @@
===========================
2023.1 Series Release Notes
===========================
.. release-notes::
:branch: unmaintained/2023.1

View File

@@ -1,6 +0,0 @@
===========================
2023.2 Series Release Notes
===========================
.. release-notes::
:branch: stable/2023.2

View File

@@ -1,6 +0,0 @@
===========================
2024.1 Series Release Notes
===========================
.. release-notes::
:branch: stable/2024.1

View File

@@ -1,6 +0,0 @@
===========================
2024.2 Series Release Notes
===========================
.. release-notes::
:branch: stable/2024.2

View File

@@ -1,6 +0,0 @@
===========================
2025.1 Series Release Notes
===========================
.. release-notes::
:branch: stable/2025.1

View File

@@ -1,6 +0,0 @@
===========================
2025.2 Series Release Notes
===========================
.. release-notes::
:branch: stable/2025.2

View File

@@ -1,198 +0,0 @@
# -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
# sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'reno.sphinxext',
]
try:
import openstackdocstheme
extensions.append('openstackdocstheme')
except ImportError:
openstackdocstheme = None
openstackdocs_repo_name = 'openstack/metalsmith'
openstackdocs_auto_name = False
openstackdocs_bug_project = 'metalsmith'
openstackdocs_bug_tag = ''
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
# source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'MetalSmith Release Notes'
copyright = '2018, MetalSmith Developers'
# Release notes are version independent.
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
# language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
# today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
# default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
# add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
# add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
# show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'native'
# A list of ignored prefixes for module index sorting.
# modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
# keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
if openstackdocstheme is not None:
html_theme = 'openstackdocs'
else:
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
# html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
# html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
# html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
# html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
# html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
# html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
# html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
# html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
# html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
# html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
# html_additional_pages = {}
# If false, no module index is generated.
# html_domain_indices = True
# If false, no index is generated.
# html_use_index = True
# If true, the index is split into individual pages for each letter.
# html_split_index = False
# If true, links to the reST sources are added to the pages.
# html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
# html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
# html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
# html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
# html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'MetalSmithReleaseNotesdoc'

View File

@@ -1,22 +0,0 @@
========================
MetalSmith Release Notes
========================
.. toctree::
:maxdepth: 1
unreleased
2025.2
2025.1
2024.2
2024.1
2023.2
2023.1
zed
yoga
xena
wallaby
victoria
ussuri
train
stein

View File

@@ -1,6 +0,0 @@
=============================================
Stein Series (0.11.0 - 0.11.x) Release Notes
=============================================
.. release-notes::
:branch: stable/stein

View File

@@ -1,6 +0,0 @@
=============================================
Train Series (0.12.0 - 0.15.x) Release Notes
=============================================
.. release-notes::
:branch: stable/train

View File

@@ -1,5 +0,0 @@
============================
Current Series Release Notes
============================
.. release-notes::

View File

@@ -1,6 +0,0 @@
===========================================
Ussuri Series (1.0.0 - 1.1.x) Release Notes
===========================================
.. release-notes::
:branch: stable/ussuri

View File

@@ -1,6 +0,0 @@
=============================================
Victoria Series (1.2.0 - 1.2.x) Release Notes
=============================================
.. release-notes::
:branch: unmaintained/victoria

Some files were not shown because too many files have changed in this diff Show More