fuel-web/fuelclient/fuel

2077 lines
61 KiB
Python
Executable File

#!/usr/bin/env python
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import argparse
import curses
from functools import partial
from functools import wraps
from itertools import chain
from itertools import groupby
import json
import math
from operator import itemgetter
import os
import pkg_resources
import shutil
import sys
from time import sleep
import urllib2
import yaml
path_to_config = "/etc/fuel-client.yaml"
defaults = {
"LISTEN_ADDRESS": "127.0.0.1",
"LISTEN_PORT": "8000"
}
if os.path.exists(path_to_config):
with open(path_to_config, "r") as fh:
config = yaml.load(fh.read())
defaults.update(config)
else:
defaults.update(os.environ)
ROOT = "http://{LISTEN_ADDRESS}:{LISTEN_PORT}".format(**defaults)
try:
__version__ = pkg_resources.get_distribution("python-fuelclient").version
except pkg_resources.DistributionNotFound:
__version__ = ""
OSTF_ROOT = ROOT + "/ostf/"
API_ROOT = ROOT + "/api/v1/"
DEBUG = False
JSON = False
YAML = False
DEFAULT_SERIALIZER = "yaml"
SERIALIZERS = {
"json": {
"w": lambda d: json.dumps(d, indent=4),
"r": lambda d: json.loads(d)
},
"yaml": {
"w": lambda d: yaml.safe_dump(d, default_flow_style=False),
"r": lambda d: yaml.load(d)
}
}
PASSIVE_ARGUMENTS = ["rel", "env", "action", "debug", "json", "net", "yaml"]
class DeployProgressError(Exception):
pass
class FuelVersionAction(argparse._VersionAction):
"""Custom argparse._VersionAction subclass to compute fuel server version
:returns: prints fuel server version
"""
def __call__(self, parser, namespace, values, option_string=None):
parser.exit(message=get_fuel_version())
class NodeAction(argparse.Action):
"""Custom argparse.Action subclass to store node identity
:returns: list of ids
"""
def __call__(self, _parser, namespace, values, option_string=None):
if values:
node_identities = set(chain(*values))
input_macs = set(n for n in node_identities if ":" in n)
only_ids = set()
for _id in (node_identities - input_macs):
try:
only_ids.add(int(_id))
except ValueError:
print_error("'{0}' is not valid node id.".format(_id))
if input_macs:
nodes_mac_to_id_map = dict(
(n["mac"], n["id"])
for n in json_api_get_request("nodes/")
)
for short_mac in input_macs:
target_node = None
for mac in nodes_mac_to_id_map:
if mac.endswith(short_mac):
target_node = mac
break
if target_node:
only_ids.add(nodes_mac_to_id_map[target_node])
else:
print_error(
'Node with mac endfix "{0}" was not found.'
.format(short_mac)
)
setattr(namespace, self.dest, list(only_ids))
class SetAction(argparse.Action):
"""Custom argparse.Action subclass to store distinct values
:returns: Set of arguments
"""
def __call__(self, _parser, namespace, values, option_string=None):
try:
getattr(namespace, self.dest).update(values)
except AttributeError:
setattr(namespace, self.dest, set(values))
def handle_exceptions(exc):
if isinstance(exc, urllib2.HTTPError):
error_body = exc.read()
print_error("{0} {1}".format(
exc,
"({0})".format(error_body or "")
))
elif isinstance(exc, urllib2.URLError):
print_error("Can't connect to Nailgun server!")
else:
raise exc
def exceptions_decorator(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as exc:
handle_exceptions(exc)
return wrapper
def recur_get(multi_level_dict, key_chain):
"""Method accesses some field in nested dictionaries
:returns: value for last key in key_chain in last dictionary
"""
if not isinstance(multi_level_dict[key_chain[0]], dict):
return multi_level_dict[key_chain[0]]
else:
return recur_get(multi_level_dict[key_chain[0]], key_chain[1:])
def print_formatted(data):
if JSON:
print(SERIALIZERS["json"]["w"](data))
elif YAML:
print(SERIALIZERS["yaml"]["w"](data))
def print_to_output(formatted_data, arg, print_method=print):
if JSON or YAML:
print_formatted(formatted_data)
else:
print_method(arg)
def format_table(data, acceptable_keys=None, subdict_keys=None):
"""Format list of dicts to ascii table
:acceptable_keys list(str): list of keys for which to create table
also specifies their order
:subdict_keys list(tuple(str)): list of key chains (tuples of key strings)
which are applied to dictionaries
to extract values
"""
if subdict_keys:
for key_chain in subdict_keys:
for data_dict in data:
data_dict[key_chain[0]] = recur_get(data_dict, key_chain)
if acceptable_keys:
rows = [tuple([value[key] for key in acceptable_keys])
for value in data]
header = tuple(acceptable_keys)
else:
rows = [tuple(x.values()) for x in data]
header = tuple(data[0].keys())
number_of_columns = len(header)
column_widths = dict(
zip(
range(number_of_columns),
(len(str(x)) for x in header)
)
)
for row in rows:
column_widths.update(
(index, max(column_widths[index], len(str(element))))
for index, element in enumerate(row)
)
row_template = ' | '.join(
'%%-%ss' % column_widths[i] for i in range(number_of_columns)
)
return '\n'.join(
(row_template % header,
'-|-'.join(column_widths[column_index] * '-'
for column_index in range(number_of_columns)),
'\n'.join(row_template % x for x in rows))
)
def json_api_delete_request(api):
"""Make DELETE request to specific API with some data
"""
print_debug(
"DELETE {0}".format(API_ROOT + api)
)
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(API_ROOT + api)
request.add_header('Content-Type', ' application/json')
request.get_method = lambda: 'DELETE'
opener.open(request)
return {}
def json_api_put_request(api, data):
"""Make PUT request to specific API with some data
"""
data_json = json.dumps(data)
print_debug(
"PUT {0} data={1}"
.format(API_ROOT + api, data_json)
)
opener = urllib2.build_opener(urllib2.HTTPHandler)
request = urllib2.Request(API_ROOT + api, data=data_json)
request.add_header('Content-Type', ' application/json')
request.get_method = lambda: 'PUT'
return json.loads(
opener.open(request).read()
)
def json_api_get_request(api, root=API_ROOT):
"""Make GET request to specific API
"""
url = root + api
print_debug(
"GET {0}"
.format(url)
)
request = urllib2.urlopen(url)
return json.loads(
request.read()
)
def json_ostf_get_request(api):
return json_api_get_request(api, root=OSTF_ROOT)
def json_ostf_post_request(api, data):
return json_api_post_request(api, data, root=OSTF_ROOT)
def json_api_post_request(api, data, root=API_ROOT):
"""Make POST request to specific API with some data
"""
url = root + api
data_json = json.dumps(data)
print_debug(
"POST {0} data={1}"
.format(url, data_json)
)
request = urllib2.Request(
url=url,
data=data_json,
headers={
'Content-Type': 'application/json'
}
)
try:
response = json.loads(
urllib2.urlopen(request)
.read()
)
except ValueError:
response = {}
return response
def has_arguments(params):
"""Checks whether params has some initialized attributes
"""
current_arguments = [v for k, v in params.__dict__.iteritems()
if k not in PASSIVE_ARGUMENTS]
return any(current_arguments)
def print_error(message):
sys.stderr.write(message + "\n")
exit(1)
def print_debug(message):
if DEBUG:
print(message)
def check_for_attributes(params, attributes):
not_passed_checks = [attribute for attribute in attributes
if not getattr(params, attribute)]
if not_passed_checks:
print_error(
"{0} required!".format(
quote_and_join(
map(
lambda attr: "--" + attr,
not_passed_checks
)
)
)
)
def check_for_one_attribute(params, attributes):
if not any(map(
lambda attr: getattr(params, attr),
attributes
)):
print_error(
"At least one of {0} is required!".format(
", ".join(
map(
lambda attr: '"--{0}"'.format(attr),
attributes)
)
)
)
def release(params):
"""List and modify currently available releases
"""
acceptable_keys = ["id", "name", "state", "operating_system", "version"]
if params.config:
check_for_attributes(params, ["rel", "username", "password"])
data = {
"release_id": params.rel,
"username": params.username,
"password": params.password
}
satellite_flags = [params.satellite_server_hostname,
params.activation_key]
if not any(satellite_flags):
data.update({
"license_type": "rhsm",
"satellite": "",
"activation_key": ""
})
elif all(satellite_flags):
data.update({
"license_type": "rhn",
"satellite": params.satellite_server_hostname,
"activation_key": params.activation_key
})
else:
print_error('RedHat satellite settings requires both a '
'"--satellite-server-hostname" and '
'a "--activation-key" flags.')
release_response = json_api_post_request(
"redhat/setup/",
data
)
print_to_output(
release_response,
"Credentials for release with id={0}"
" were modified."
.format(params.rel)
)
else:
if params.rel:
data = [json_api_get_request(
"releases/{0}/"
.format(params.rel)
)]
else:
data = json_api_get_request("releases/")
print_to_output(
data,
format_table(
data,
acceptable_keys=acceptable_keys
)
)
def role(params):
"""List all roles for specific release
"""
data = json_api_get_request(
"releases/{0}/"
.format(params.rel)
)
acceptable_keys = ["name", "conflicts"]
roles = [
{
"name": role_name,
"conflicts": ", ".join(
metadata.get("conflicts", ["-"])
)
} for role_name, metadata in data["roles_metadata"].iteritems()]
print_to_output(
roles,
format_table(
roles,
acceptable_keys=acceptable_keys
)
)
def environment(params):
"""Create, list and modify currently existing environments(clusters)
"""
if params.create:
check_for_attributes(params, ["name", "rel"])
data = {
"nodes": [],
"tasks": [],
"name": params.name,
"release": int(params.rel)
}
if params.net.lower() == "nova":
data["net_provider"] = "nova_network"
else:
data["net_provider"] = "neutron"
if params.net_segment_type:
data["net_segment_type"] = params.net_segment_type
else:
print_error('"--net-segment-type" must be specified!')
cluster_response = json_api_post_request("clusters/", data)
cluster_id = cluster_response[u"id"]
if params.mode:
full_mode = "ha_compact" \
if params.mode.lower() == "ha" else "multinode"
data = {"mode": full_mode}
cluster_response = json_api_put_request(
"clusters/{0}/".format(cluster_id),
data
)
print_to_output(
cluster_response,
"Environment '{name}' with id={id}, mode={mode}"
" and network-mode={net_provider} was created!"
.format(**cluster_response)
)
elif params.set:
check_for_attributes(params, ["env"])
check_for_one_attribute(params, ["name", "mode"])
data = {}
if params.mode:
data["mode"] = "ha_compact" \
if params.mode.lower() == "ha" else "multinode"
if params.name:
data["name"] = params.name
put_response = json_api_put_request(
"clusters/{0}/".format(params.env),
data
)
msg_templates = []
if params.name:
msg_templates.append(
"Environment with id={id} was renamed to '{name}'."
)
if params.mode:
msg_templates.append(
"Mode of environment with id={id} was set to '{mode}'."
)
message = "\n".join(msg_templates).format(
id=params.env,
name=params.name,
mode=params.mode
)
print_to_output(put_response, message)
elif params.delete:
check_for_attributes(params, ["env"])
delete_response = json_api_delete_request(
"clusters/{0}/".format(params.env)
)
print_to_output(
delete_response,
"Environment with id={0} was deleted."
.format(params.env)
)
else:
acceptable_keys = ["id", "status", "name", "mode",
"release_id", "changes"]
data = json_api_get_request("clusters/")
if params.env:
data = filter(
lambda x: x[u"id"] == int(params.env),
data
)
print_to_output(
data,
format_table(
data,
acceptable_keys=acceptable_keys,
subdict_keys=[("release_id", u"id")]
)
)
def node(params):
"""List and assign available nodes to environments
"""
node_ids = params.node
if params.set:
check_for_attributes(params, ["node", "role", "env"])
roles = map(str.lower, params.role)
data = [{'id': _node_id, 'roles': roles} for _node_id in node_ids]
post_response = json_api_post_request(
"clusters/{0}/assignment/".format(params.env),
data
)
print_to_output(
post_response,
"Nodes {0} with roles {1} "
"were added to environment {2}"
.format(node_ids, roles, params.env)
)
elif params.delete:
check_for_one_attribute(params, ["env", "node"])
if not params.node and params.env:
if params.all:
node_ids = [
n["id"] for n in
json_api_get_request(
"nodes/?cluster_id={0}".format(params.env)
)
]
if not node_ids:
print_error(
"Environment with id={0} doesn't have nodes to remove."
.format(params.env)
)
else:
print_error(
"You have to select which nodes to remove with --node-id."
" Try --all for removing all nodes."
)
json_api_post_request(
"clusters/{0}/unassignment/".format(params.env),
[{"id": n} for n in node_ids]
)
print_to_output({},
"Nodes with ids {0} were removed "
"from environment with id {1}."
.format(node_ids, params.env))
else:
if params.env:
clusters_removable_nodes_map = [(params.env, node_ids)]
else:
def get_cluster_id_by_node_id(n_id):
return json_api_get_request(
"nodes/{0}/".format(n_id)
)["cluster"]
clusters_removable_nodes_map = \
groupby(params.node, get_cluster_id_by_node_id)
for cluster_id, nodes in clusters_removable_nodes_map:
nodes = list(nodes)
json_api_post_request(
"clusters/{0}/unassignment/".format(cluster_id),
[{"id": n} for n in nodes]
)
print_to_output({},
"Nodes with ids {0} were removed "
"from environment with id {1}."
.format(nodes, cluster_id))
elif params.network or params.disk:
check_for_one_attribute(params, ["default", "download", "upload"])
check_for_attributes(params, ["node"])
for node_id in node_ids:
if params.network:
get_node_attribute(
params,
node_id,
"interfaces",
[("interfaces", "default_assignment")]
)
elif params.disk:
get_node_attribute(
params,
node_id,
"disks",
[("disks", "defaults")]
)
elif any((params.default, params.download, params.upload)):
print('"--default", "--download" or "--upload" '
'must appear after "--disk" or "--network" flags.')
elif params.deploy or params.provision:
check_for_attributes(params, ["node", "env"])
node_ids = map(str, node_ids)
mode = "deploy" if params.deploy else "provision"
deploy_nodes_url = "clusters/{0}/{1}/?nodes={2}".format(
params.env,
mode,
','.join(node_ids)
)
data = json_api_put_request(
deploy_nodes_url,
{}
)
print_to_output(
data,
"Started {0}ing nodes [{1}]."
.format(mode, ", ".join(node_ids))
)
else:
acceptable_keys = ["id", "status", "name", "cluster", "ip",
"mac", "roles", "pending_roles", "online"]
data = json_api_get_request("nodes/")
if params.env:
data = filter(
lambda x: x[u"cluster"] == int(params.env),
data
)
elif params.node:
data = filter(
lambda x: x[u"id"] in node_ids,
data
)
print_to_output(
data,
format_table(data, acceptable_keys=acceptable_keys)
)
def quote_and_join(words):
words = list(words)
if len(words) > 1:
return '{0} and "{1}"'.format(
", ".join(
map(
lambda x: '"{0}"'.format(x),
words
)[0:-1]
),
words[-1]
)
else:
return '"{0}"'.format(words[0])
def get_node_attribute(params, node_id, upload_attribute, attributes):
default_url_template = "nodes/{0}/{1}/{2}".format(node_id, "{0}", "{1}")
dir_path = os.path.join(
os.path.abspath(params.dir or os.path.curdir),
"node_{0}".format(node_id)
)
if params.upload:
dir_path = folder_or_one_up(dir_path)
if not os.path.exists(dir_path):
print_error(
"Folder {0} doesn't contain node folder '{1}'"
.format(dir_path, "node_{0}".format(node_id))
)
upload_node_attribute(
default_url_template.format(upload_attribute, ""),
node_id,
dir_path,
upload_attribute
)
elif params.default or params.download:
if not os.path.exists(dir_path):
os.makedirs(dir_path)
for attribute, default_tail in attributes:
write_node_attribute(
default_url_template.format(
attribute,
default_tail if params.default else ""
),
dir_path,
attribute
)
def upload_node_attribute(url, node_id, node_dir_path, attribute):
data = read_from_file(
os.path.join(
node_dir_path,
attribute
)
)[0]
if attribute == "interfaces":
url = "nodes/interfaces"
data = [{
"interfaces": data,
"id": node_id
}]
json_api_put_request(
url,
data
)
print("{0} configuration uploaded.".format(attribute))
def write_node_attribute(url, node_dir_path, attribute):
attribute_path = os.path.join(
node_dir_path,
attribute
)
get_response = json_api_get_request(url)
if os.path.exists(attribute_path):
os.remove(attribute_path)
attribute_path = write_to_file(
attribute_path,
get_response
)
print(
"{0} configuration downloaded to {1}"
.format(attribute, attribute_path)
)
def network(params):
"""Show or modify network settings of specific environments
"""
cluster = json_api_get_request("clusters/{0}/".format(params.env))
network_url = "clusters/{0}/network_configuration/{1}".format(
params.env,
cluster["net_provider"]
)
network_data = json_api_get_request(network_url)
network_file_path = os.path.join(
os.path.abspath(params.dir or os.path.curdir),
"network_{0}".format(params.env)
)
check_for_one_attribute(params, ["upload", "verify", "download"])
if params.upload:
data, network_file_path = read_from_file(network_file_path)
put_response = json_api_put_request(
network_url,
data
)
print_to_output(
put_response,
"Network configuration from {0} uploaded."
.format(network_file_path)
)
elif params.verify:
verify_url = network_url + "/verify"
put_response = json_api_put_request(verify_url, network_data)
print_to_output(
put_response,
"Verification status is '{status}'. message: {message}"
.format(**put_response)
)
elif params.download:
network_file_path = write_to_file(network_file_path,
network_data)
print(
"Network configuration for environment with id={0}"
" downloaded to {1}"
.format(params.env, network_file_path)
)
def settings(params):
"""Show or modify environment settings
"""
check_for_one_attribute(params, ["default", "download", "upload"])
settings_url = "clusters/{0}/attributes".format(params.env)
settings_data = json_api_get_request(settings_url)
settings_file_path = os.path.join(
os.path.abspath(params.dir or os.path.curdir),
"settings_{0}".format(params.env)
)
if params.upload:
data, settings_file_path = read_from_file(settings_file_path)
put_response = json_api_put_request(
settings_url,
data
)
print_to_output(
put_response,
"Settings configuration from {0} uploaded."
.format(settings_file_path)
)
elif params.default:
default_url = settings_url + "/defaults"
get_response = json_api_get_request(default_url)
settings_file_path = write_to_file(settings_file_path, get_response)
print_to_output(
get_response,
"Default settings configuration downloaded to {0}."
.format(settings_file_path)
)
elif params.download:
settings_file_path = write_to_file(
settings_file_path,
settings_data
)
print_to_output(
settings_data,
"Settings configuration for environment with id={0}"
" downloaded to {1}"
.format(params.env, settings_file_path)
)
def task(params):
"""Show tasks
"""
if params.delete:
check_for_attributes(params, ["tid"])
task_ids = list(chain(*params.tid))
delete_response = map(
lambda tid: json_api_delete_request(
"tasks/{0}/?force={1}".format(
tid,
int(params.force),
)
),
task_ids
)
print_to_output(
delete_response,
"Tasks with id's {0} deleted."
.format(', '.join(map(str, task_ids)))
)
else:
acceptable_keys = ["id", "status", "name",
"cluster", "progress", "uuid"]
tasks = json_api_get_request("tasks/")
if params.tid:
task_ids = map(int, chain(*params.tid))
tasks = [t for t in tasks if t["id"] in task_ids]
print_to_output(
tasks,
format_table(tasks, acceptable_keys=acceptable_keys)
)
def snapshot(params):
"""Generate and download snapshot.
"""
dump_task = json_api_put_request(
"logs/package",
{}
)
task_id = dump_task["id"]
print("Generating dump...")
while dump_task["progress"] < 100:
dump_task = json_api_get_request("tasks/{0}/".format(task_id))
sleep(0.5)
download_snapshot_with_progress_bar(
ROOT + dump_task["message"],
params.dir
)
def download_snapshot_with_progress_bar(url, directory):
directory = directory or os.path.curdir
if not os.path.exists(directory):
print_error("Folder {0} doesn't exist.".format(directory))
file_name = os.path.join(
os.path.abspath(directory),
url.split('/')[-1]
)
download_handle = urllib2.urlopen(url)
with open(file_name, 'wb') as file_handle:
meta = download_handle.info()
file_size = int(meta.getheaders("Content-Length")[0])
print("Downloading: {0} Bytes: {1}".format(url, file_size))
file_size_dl = 0
block_size = 8192
bar = partial(get_bar_for_progress, 80)
while True:
data_buffer = download_handle.read(block_size)
if not data_buffer:
break
file_size_dl += len(data_buffer)
file_handle.write(data_buffer)
progress = int(100 * float(file_size_dl) / file_size)
sys.stdout.write("\r{0}".format(
bar(progress)
))
sys.stdout.flush()
sleep(1 / 10)
print()
def deploy_changes(params):
"""Deploy changes to environments
"""
put_response = json_api_put_request(
"clusters/{0}/changes".format(params.env),
{}
)
print_to_output(put_response, put_response,
print_method=print_deploy_progress)
def get_bar_for_progress(full_width, progress):
number_of_equal_signs = int(
math.ceil(progress * float(full_width - 2) / 100)
)
return "[{0}{1}{2}]".format(
"=" * number_of_equal_signs,
">" if number_of_equal_signs < full_width - 2 else "",
" " * (full_width - 3 - number_of_equal_signs)
)
class DeployProgressFactory:
def __init__(self, deploy_task):
self.env = deploy_task["cluster"]
self.tid = deploy_task["id"]
self.progress = 0
self.nodes = []
@property
def is_not_finished(self):
return self.progress != 100 or any(
map(lambda n: n["progress"] != 100, self.nodes)
)
@property
def normalized_progress(self):
return self.progress / 100.0
def update(self):
task_response = json_api_get_request(
"tasks/{0}/".format(self.tid)
)
if task_response["status"] == "error":
raise DeployProgressError(task_response["message"])
self.progress = task_response["progress"]
self.nodes = json_api_get_request(
"nodes?cluster_id={0}".format(self.env)
)
self.nodes.sort(key=lambda n: n.get("id"))
for _node in self.nodes:
_node["norm_progress"] = _node["progress"] / 100.0
def __iter__(self):
return self
def next(self):
if self.is_not_finished:
sleep(0.5)
self.update()
return self
else:
raise StopIteration
def print_deploy_progress(deploy_task):
deploy_process = DeployProgressFactory(deploy_task)
try:
terminal_screen = curses.initscr()
print_deploy_progress_with_terminal(deploy_process, terminal_screen)
except curses.error:
print_deploy_progress_without_terminal(deploy_process)
def print_deploy_progress_without_terminal(deploy_process):
print("Deploying changes to environment with id={0}".format(
deploy_process.env
))
message_len = 0
try:
for state in deploy_process:
sys.stdout.write("\r" * message_len)
message_len = 0
deployment_message = "[Deployment: {0:4.0%}]".format(
state.normalized_progress
)
sys.stdout.write(deployment_message)
message_len += len(deployment_message)
for index, _node in enumerate(state.nodes):
node_message = "[Node{id:2} {norm_progress:4.0%}]".format(
**_node
)
message_len += len(node_message)
sys.stdout.write(node_message)
print("\nFinished deployment!")
except DeployProgressError as de:
print(de.message)
def print_deploy_progress_with_terminal(deploy_process, terminal_screen):
scr_width = terminal_screen.getmaxyx()[1]
curses.noecho()
curses.cbreak()
total_progress_bar = partial(get_bar_for_progress, scr_width - 17)
node_bar = partial(get_bar_for_progress, scr_width - 28)
try:
for state in deploy_process:
terminal_screen.refresh()
terminal_screen.addstr(
0, 0,
"Deploying changes to environment with id={0}".format(
state.env
)
)
terminal_screen.addstr(
1, 0,
"Deployment: {0} {1:4.0%}".format(
total_progress_bar(state.progress),
state.normalized_progress
)
)
for index, _node in enumerate(state.nodes):
terminal_screen.addstr(
index + 2, 0,
"Node{id:3} {status:13}: {bar} {norm_progress:4.0%}"
.format(bar=node_bar(_node["progress"]), **_node)
)
except DeployProgressError as de:
close_curses()
print(de.message)
finally:
close_curses()
def close_curses():
curses.echo()
curses.nocbreak()
curses.endwin()
def stop(params):
"""Stop deployment process for specific environment
"""
intercept_deploy(params, "stop")
def reset(params):
"""Reset deployed process for specific environment
"""
intercept_deploy(params, "reset")
def intercept_deploy(params, method):
url_template_map = {
"stop": 'clusters/{0}/stop_deployment/',
"reset": 'clusters/{0}/reset/',
}
inflect = lambda w: w + w[-1] + "ing"
intercept_task = json_api_put_request(
url_template_map[method].format(params.env),
data={}
)
print_to_output(
intercept_task,
"{0} of environment with id={1} started. To check task status run"
" 'fuel task -t {2}'.".format(
inflect(method).title(),
params.env,
intercept_task["id"]
)
)
def provisioning(params):
"""Show computed provisioning facts for orchestrator
"""
fact(params, "provisioning")
def deployment(params):
"""Show computed deployment facts for orchestrator
"""
fact(params, "deployment")
def fact(params, info_type):
dir_name = os.path.join(
os.path.abspath(params.dir or os.path.curdir),
"{0}_{1}".format(info_type, params.env)
)
facts_default_url = "clusters/{0}/orchestrator/{1}/defaults".format(
params.env,
info_type
)
if params.node:
facts_default_url += "/?nodes=" + ",".join(map(str, params.node))
facts_url = "clusters/{0}/orchestrator/{1}/".format(
params.env,
info_type
)
if params.default:
facts = json_api_get_request(facts_default_url)
write_facts_to_dir(facts, dir_name)
elif params.upload:
json_api_put_request(
facts_url,
read_deployment_info(dir_name)
if info_type == "deployment" else
read_provisioning_info(dir_name)
)
print("{0} facts uploaded from {1}.".format(info_type, dir_name))
elif params.delete:
json_api_delete_request(facts_url)
print("{0} facts deleted.".format(info_type))
else:
facts = json_api_get_request(facts_url)
if not facts:
print(
"Environment with id={0} has no {1} info."
.format(params.env, info_type)
)
return
if params.download:
write_facts_to_dir(facts, dir_name)
def health_check(params):
"""Run health check on environment
"""
test_sets = json_ostf_get_request(
'testsets/{0}'.format(params.env)
)
if params.list:
print_to_output(test_sets, format_table(test_sets))
else:
cluster = json_api_get_request("clusters/{0}/".format(params.env))
if cluster["is_customized"] and not params.force:
print_error(
"Environment deployment facts were updated. "
"Health check is likely to fail because of "
"that. Use --force flag to proceed anyway."
)
params.check = params.check or set(ts["id"] for ts in test_sets)
tests_data = map(
lambda testset: {
"testset": testset,
"metadata": {
"config": {},
"cluster_id": params.env
}
},
list(params.check)
)
json_ostf_post_request(
"testruns",
tests_data
)
tests_state = json_ostf_get_request(
"testruns/last/{0}".format(params.env)
)
print_to_output(tests_state, params, print_method=print_health_check)
def print_health_check(params):
tests_states = [{"status": "not finished"}]
finished_tests = set()
test_counter, total_tests_count = 1, None
while not all(map(
lambda t: t["status"] == "finished",
tests_states
)):
tests_states = json_ostf_get_request(
"testruns/last/{0}".format(params.env)
)
all_tests = list(chain(*map(
itemgetter("tests"),
filter(
lambda x: x["testset"] in params.check,
tests_states
))))
if total_tests_count is None:
total_tests_count = len(all_tests)
all_finished_tests = filter(
lambda t: "running" not in t["status"],
all_tests
)
new_finished_tests = filter(
lambda t: t["name"] not in finished_tests,
all_finished_tests
)
finished_tests.update(
map(
itemgetter("name"),
new_finished_tests
)
)
for test in new_finished_tests:
print(
"[{0:2} of {1}] [{status}] '{name}' "
"({taken:.4} s) {message}".format(
test_counter,
total_tests_count,
**test
)
)
test_counter += 1
sleep(1)
def prepare_path(path):
if JSON:
serialisation_format = "json"
elif YAML:
serialisation_format = "yaml"
else:
serialisation_format = DEFAULT_SERIALIZER
return "{0}.{1}".format(
path, serialisation_format
), SERIALIZERS[serialisation_format]
def write_to_file(path, data):
full_path, serializer = prepare_path(path)
with open(full_path, "w+") as file_to_write:
file_to_write.write(serializer["w"](data))
return full_path
def is_file_exists(path):
dir_path_list = path.split(os.path.sep)
name = dir_path_list.pop(-1)
folder_path = os.path.sep.join(dir_path_list)
files_with_extensions = filter(
lambda x: len(x) > 1 and x[0] != '',
map(
lambda x: str.split(x, "."),
os.listdir(folder_path)
)
)
if not files_with_extensions:
return False
else:
return name in map(
lambda x: x[0],
files_with_extensions
)
def read_from_file(path):
full_path, serializer = prepare_path(path)
try:
with open(full_path, "r") as file_to_read:
return serializer["r"](file_to_read.read()), full_path
except IOError:
print_error(
"File {0} doesn't exist. Try checking serialization format."
.format(full_path)
)
def folder_or_one_up(dir_path):
if not os.path.exists(dir_path):
path_to_folder = dir_path.split(os.sep)
one_folder_up = path_to_folder[:-2] + path_to_folder[-2:-1]
dir_path = os.sep.join(one_folder_up)
return dir_path
def listdir_without_extensions(dir_path):
return filter(
lambda f: f != "",
map(
lambda f: f.split(".")[0],
os.listdir(dir_path)
)
)
def read_provisioning_info(dir_name):
dir_name = folder_or_one_up(dir_name)
if not any(map(lambda f: "engine" in f, os.listdir(dir_name))):
print_error(
"engine file was not found in {0}"
.format(dir_name)
)
try:
node_facts = map(
lambda f: read_from_file(f)[0],
[os.path.join(dir_name, fact_file)
for fact_file in listdir_without_extensions(dir_name)
if "engine" != fact_file]
)
engine, _ = read_from_file(os.path.join(dir_name, "engine"))
return {
"engine": engine,
"nodes": node_facts
}
except OSError:
print_error(
"Directory {0} doesn't exist."
.format(dir_name)
)
def read_deployment_info(dir_name):
dir_name = folder_or_one_up(dir_name)
try:
return map(
lambda f: read_from_file(f)[0],
[os.path.join(dir_name, json_file)
for json_file in listdir_without_extensions(dir_name)]
)
except OSError:
print_error(
"Directory {0} doesn't exist."
.format(dir_name)
)
def write_facts_to_dir(facts, dir_name):
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
print("old directory {0} was removed".format(dir_name))
os.makedirs(dir_name)
print("directory {0} was created".format(dir_name))
if isinstance(facts, dict):
engine_file_path = os.path.join(dir_name, "engine")
engine_file_path = write_to_file(engine_file_path, facts["engine"])
print("Created {0}".format(engine_file_path))
facts = facts["nodes"]
name_template = "{name}"
else:
name_template = "{role}_{uid}"
for _fact in facts:
fact_path = os.path.join(
dir_name,
name_template.format(**_fact)
)
fact_path = write_to_file(fact_path, _fact)
print("Created {0}".format(fact_path))
def parse_ids(x):
"""Parse arguments with commas and spaces
:returns: list of lists with numbers
"""
filtered = [y for y in x.split(",") if y.strip() != '']
if len(filtered) > 1:
return map(int, filtered)
elif len(filtered) == 1:
return [int(filtered[0])]
else:
return None
def get_download_arg(help_msg):
return {
"args": ["-d", "--download"],
"params": {
"dest": "download",
"action": "store_true",
"help": help_msg,
"default": False
}
}
def get_list_arg(help_msg):
return {
"args": ["-l", "--list"],
"params": {
"dest": "list",
"action": "store_true",
"help": help_msg,
"default": False
}
}
def get_dir_arg(help_msg):
return {
"args": ["--dir"],
"params": {
"dest": "dir",
"action": "store",
"help": help_msg,
"default": None
}
}
def get_verify_arg(help_msg):
return {
"args": ["-v", "--verify"],
"params": {
"dest": "verify",
"action": "store_true",
"help": help_msg,
"default": False
}
}
def get_upload_arg(help_msg):
return {
"args": ["-u", "--upload"],
"params": {
"dest": "upload",
"action": "store_true",
"help": help_msg,
"default": False
}
}
def get_default_arg(help_msg):
return {
"args": ["--default"],
"params": {
"dest": "default",
"action": "store_true",
"help": help_msg,
"default": False
}
}
def get_set_arg(help_msg):
return {
"args": ["-s", "--set"],
"params": {
"dest": "set",
"action": "store_true",
"help": help_msg,
"default": False
}
}
def get_delete_arg(help_msg):
return {
"args": ["--delete"],
"params": {
"dest": "delete",
"action": "store_true",
"help": help_msg,
"default": False
}
}
def get_release_arg(help_msg, required=False):
return {
"args": ["--rel", "--release"],
"params": {
"dest": "rel",
"action": "store",
"type": str,
"help": help_msg,
"default": None,
"required": required
}
}
def get_node_arg(help_msg):
return {
"args": ["--node", "--node-id"],
"params": {
"dest": "node",
"action": NodeAction,
"nargs": '+',
"type": lambda v: v.split(","),
"help": help_msg,
"default": None
}
}
def get_env_arg(required=False):
return {
"args": ["--env", "--env-id"],
"params": {
"dest": "env",
"action": "store",
"type": str,
"help": "environment id",
"default": None,
"required": required
}
}
def get_force_arg():
return {
"args": ["-f", "--force"],
"params": {
"dest": "force",
"action": "store_true",
"help": "Bypassing parameter validation.",
"default": False
}
}
actions = {
"release": {
"action": release,
"args": [
get_list_arg("List all available releases."),
get_release_arg("Specify release id to configure"),
{
"args": ["-c", "--config"],
"params": {
"dest": "config",
"action": "store_true",
"help": "Configure release with --release",
"default": False
}
}, {
"args": ["-U", "--user", "--username"],
"params": {
"dest": "username",
"action": "store",
"type": str,
"help": "Username for release credentials",
"default": None
}
}, {
"args": ["-P", "--pass", "--password"],
"params": {
"dest": "password",
"action": "store",
"type": str,
"help": "Password for release credentials",
"default": None
}
}, {
"args": ["--satellite-server-hostname"],
"params": {
"dest": "satellite_server_hostname",
"action": "store",
"type": str,
"help": "Satellite server hostname",
"default": None
}
}, {
"args": ["--activation-key"],
"params": {
"dest": "activation_key",
"action": "store",
"type": str,
"help": "activation key",
"default": None
}
}],
"examples":
"""Examples:
Print all available releases:
fuel release --list
Print release with specific id=1:
fuel release --rel 1
To configure RedHat release:
fuel rel --rel <id of RedHat release> -c -U <username> -P <password>
To configure RedHat release with satellite server:
fuel rel --rel <...> -c -U <...> -P <...> """ +
"""--satellite-server-hostname <hostname> --activation-key <key>
"""
},
"role": {
"action": role,
"args": [
get_list_arg("List all roles for specific release"),
get_release_arg("Release id", required=True)
],
"examples":
"""Examples:
Print all available roles and their conflicts for some release with id=1:
fuel role --rel 1
"""
},
"environment": {
"action": environment,
"args": [
get_env_arg(),
get_list_arg("List all available environments."),
get_set_arg("Set environment parameters (e.g name, deployment mode)"),
get_delete_arg("Delete environment with specific env or name"),
get_release_arg("Release id"),
{
"args": ["-c", "--env-create", "--create"],
"params": {
"dest": "create",
"action": "store_true",
"help": "Create a new environment with specific "
"release id and name.",
"default": False
}
}, {
"args": ["--name", "--env-name"],
"params": {
"dest": "name",
"action": "store",
"type": str,
"help": "environment name",
"default": None
}
}, {
"args": ["-m", "--mode", "--deployment-mode"],
"params": {
"dest": "mode",
"action": "store",
"choices": ["multinode", "ha"],
"help": "Set deployment mode for specific environment.",
"default": False
}
}, {
"args": ["-n", "--net", "--network-mode"],
"params": {
"dest": "net",
"action": "store",
"choices": ["nova", "neutron"],
"help": "Set network mode for specific environment.",
"default": "nova"
}
}, {
"args": ["--nst", "--net-segment-type"],
"params": {
"dest": "net_segment_type",
"action": "store",
"choices": ["gre", "vlan"],
"help": "Set network segment type",
"default": False
}
}],
"examples":
"""Examples:
Print all available environments:
fuel env
To create an environment with name MyEnv and release id=1 run:
fuel env create --name MyEnv --rel 1
By default it creates environment in multinode mode, and nova network mode,
to specify other modes you can add optional arguments:
fuel env create --name MyEnv --rel 1 --mode ha --network-mode neutron
For changing environments name, mode or network mode exists set action:
fuel --env 1 env set --name NewEmvName --mode ha_compact
To delete the environment:
fuel --env 1 env delete
"""
},
"node": {
"action": node,
"args": [
get_env_arg(),
get_list_arg("List all nodes."),
get_set_arg("Set role for specific node."),
get_delete_arg("Delete specific node from environment."),
get_default_arg("Get default network configuration of some node"),
get_download_arg("Download configuration of specific node"),
get_upload_arg("Upload configuration to specific node"),
get_dir_arg("Select directory to which download node attributes"),
get_node_arg("Node id."),
get_force_arg(),
{
"args": ["--all"],
"params": {
"dest": "all",
"action": "store_true",
"help": "Select all nodes.",
"default": False
}
}, {
"args": ["-r", "--role"],
"params": {
"dest": "role",
"type": lambda v: v.split(','),
"action": SetAction,
"help": "Role to assign for node.",
"default": None
}
}, {
"args": ["--net", "--network"],
"params": {
"dest": "network",
"action": "store_true",
"help": "Node network configuration.",
"default": False
}
}, {
"args": ["--disk"],
"params": {
"dest": "disk",
"action": "store_true",
"help": "Node disk configuration.",
"default": False
}
}, {
"args": ["--deploy"],
"params": {
"dest": "deploy",
"action": "store_true",
"help": "Deploy specific nodes.",
"default": False
}
}, {
"args": ["--provision"],
"params": {
"dest": "provision",
"action": "store_true",
"help": "Provision specific nodes.",
"default": False
}
}],
"examples":
"""Examples:
To list all available nodes:
fuel node
To filter them by environment:
fuel --env-id 1 node
Assign some nodes to environment with with specific roles:
fuel --env 1 node set --node 1 --role controller
fuel --env 1 node set --node 2,3,4 --role compute,cinder
Remove some nodes from environment:
fuel --env 1 node remove --node 2,3
Remove nodes no matter to which environment they were assigned:
fuel node remove --node 2,3,6,7
Remove all nodes from some environment:
fuel --env 1 node remove --all
Download current or default disk, network, configuration for some node:
fuel node --node-id 2 --disk --default
fuel node --node-id 2 --network --download --dir path/to/directory
Upload disk, network, configuration for some node:
fuel node --node-id 2 --network --upload
fuel node --node-id 2 --disk --upload --dir path/to/directory
Deploy/Provision some node:
fuel node --node-id 2 --provision
fuel node --node-id 2 --deploy
It's Possible to manipulate nodes with their short mac addresses:
fuel node --node-id 80:ac
fuel node remove --node-id 80:ac,5d:a2
"""
},
"network": {
"action": network,
"args": [
get_env_arg(required=True),
get_download_arg("Download current network configuration."),
get_dir_arg("Directory with network data."),
get_verify_arg("Verify current network configuration."),
get_upload_arg("Upload changed network configuration.")
],
"examples":
"""Examples:
To download network configuration in this directory for some environment:
fuel --env 1 network --download
To upload network configuration from some directory for some environment:
fuel --env 1 network --upload --dir path/to/derectory
To verify network configuration from some directory for some environment:
fuel --env 1 network --verify --dir path/to/derectory
"""
},
"settings": {
"action": settings,
"args": [
get_env_arg(required=True),
get_download_arg("Modify current configuration."),
get_default_arg("Open default configuration."),
get_upload_arg("Save current changes in configuration."),
get_dir_arg("Directory with configuration data.")
],
"examples":
"""Examples:
To download settings for some environment in this directory:
fuel --env 1 settings --download
To download default settings for some environment in some directory:
fuel --env 1 settings --default --dir path/to/derectory
To upload settings for some environment from some directory:
fuel --env 1 settings --upload --dir path/to/derectory
"""
},
"task": {
"action": task,
"args": [
get_force_arg(),
{
"args": ["-d", "--delete"],
"params": {
"dest": "delete",
"action": "store_true",
"help": "Delete task with some task-id.",
"default": False
}
}, {
"args": ["-t", "--task-id"],
"params": {
"dest": "tid",
"action": "store",
"nargs": '+',
"type": parse_ids,
"help": "Task id.",
"default": None
}
}],
"examples":
"""Examples:
To display all tasks:
fuel task
To display tasks with some ids:
fuel task -t 1,2,3
To delete some tasks:
fuel task delete -t 1,2,3
To delete some tasks forcefully (without considering their state):
fuel task delete -f -t 1,6
"""
},
"snapshot": {
"action": snapshot,
"args": [
get_dir_arg("Directory to which download snapshot.")
],
"examples":
"""Examples:
To download diagnostic snapshot:
fuel snapshot
To download diagnostic snapshot to specific directory:
fuel snapshot --dir path/to/directory
"""
},
"deploy-changes": {
"action": deploy_changes,
"args": [
get_env_arg(required=True),
],
"examples":
"""Examples:
To deploy all applied changes to some environment:
fuel --env 1 deploy-changes
"""
},
"health": {
"action": health_check,
"args": [
get_env_arg(required=True),
get_list_arg("List all available checks"),
get_force_arg(),
{
"args": ["--check"],
"params": {
"dest": "check",
"type": lambda v: v.split(','),
"action": SetAction,
"help": "Run check for some testset.",
"default": None
}
}
],
"examples":
"""Examples:
To list all health check test sets:
fuel health
or:
fuel --env 1 health --list
To run some health checks:
fuel --env 1 health --check smoke,sanity
"""
}
}
def get_args_for_facts(fact_type):
return [
get_env_arg(),
get_delete_arg("Delete current {0} data.".format(fact_type)),
get_download_arg("Download current {0} data.".format(fact_type)),
get_upload_arg("Upload current {0} data.".format(fact_type)),
get_default_arg("Download default {0} data.".format(fact_type)),
get_dir_arg("Directory with {0} data.".format(fact_type)),
get_node_arg("Node ids."),
]
substitutions = {
#replace from: to
"env": "environment",
"nodes": "node",
"net": "network",
"rel": "release",
"list": "--list",
"set": "--set",
"delete": "--delete",
"download": "--download",
"upload": "--upload",
"default": "--default",
"create": "--create",
"remove": "--delete",
"config": "--config",
"--roles": "--role"
}
@exceptions_decorator
def get_fuel_version():
return yaml.safe_dump(
json_api_get_request("version"),
default_flow_style=False
)
def prepare_args():
# replace some args from dict substitutions
sys.argv = map(
lambda x: substitutions.get(x, x),
sys.argv
)
# move --json and --debug flags before any action
for flag in ["--json", "--debug", "--yaml"]:
if flag in sys.argv:
sys.argv.remove(flag)
sys.argv.insert(1, flag)
for arg in sys.argv:
if "--env" in arg:
# if declaration with '=' sign (e.g. --env-id=1)
if "=" in arg:
index_of_env = sys.argv.index(arg)
env = sys.argv.pop(index_of_env)
sys.argv.append(env)
else:
try:
index_of_env = sys.argv.index(arg)
sys.argv.pop(index_of_env)
env = sys.argv.pop(index_of_env)
sys.argv.append(arg)
sys.argv.append(env)
except IndexError:
print_error(
'Environment id must follow "{0}" flag'
.format(arg)
)
break
for fact_type in (deployment, provisioning):
actions[fact_type.__name__] = {
"action": fact_type,
"args": get_args_for_facts(fact_type.__name__),
"examples":
"""Examples:
To download {func} information for some environment:
fuel --env 1 {func} --download
To get default {func} information for some environment:
fuel --env 1 {func} --default
To upload {func} information for some environment:
fuel --env 1 {func} --upload
It's possible to get default {func} information just for some nodes:
fuel --env 1 {func} --default --node 1,2,3
Also {func} information can be left or taken from specific directory:
fuel --env 1 {func} --upload --dir path/to/some/directory
""".format(func=fact_type.__name__)
}
for func in (stop, reset):
actions[func.__name__] = {
"action": func,
"args": [get_env_arg(required=True)],
"examples":
"""Examples:
To {func} some environment:
fuel --env 1 {func}
""".format(func=func.__name__)
}
if __name__ == '__main__':
prepare_args()
parser = argparse.ArgumentParser(
usage="fuel [optional args] <namespace> [action] [flags]"
)
parser.add_argument("-v", "--version",
action="version",
version=__version__)
parser.add_argument("--fuel-version", action=FuelVersionAction)
parser.add_argument("--json",
dest="json",
action="store_true",
help="prints to only json to stdout",
default=False)
parser.add_argument("--yaml",
dest="yaml",
action="store_true",
help="prints to only yaml to stdout",
default=False)
parser.add_argument("--debug",
dest="debug",
action="store_true",
help="prints details of all HTTP request",
default=False)
subparsers = parser.add_subparsers(
title="Namespaces",
metavar="",
dest="action",
help='actions'
)
for action, parameters in actions.iteritems():
action_parser = subparsers.add_parser(
action,
prog="fuel {0}".format(action),
help=parameters["action"].__doc__,
formatter_class=argparse.RawTextHelpFormatter,
epilog=parameters.get("examples", "")
)
for argument in parameters.get("args", []):
action_parser.add_argument(
*argument["args"],
**argument["params"]
)
parsed_params, other_params = parser.parse_known_args()
DEBUG = parsed_params.debug
JSON = parsed_params.json
YAML = parsed_params.yaml
if parsed_params.action not in actions:
parser.print_help()
sys.exit(0)
exceptions_decorator(
actions[parsed_params.action]["action"]
)(parsed_params)