commit 4c91d20a9151d681b6da398211fa37d000a63868 Author: Alexandr Notchenko Date: Thu Nov 21 17:58:57 2013 +0400 moved and renamed fuel-cli added tests, modified run_tests.sh fixed some mode redundancy fixed run_tests.sh removed old code Change-Id: I7660e550d40a4c77df2179a947b87a492984c682 diff --git a/__init__.py b/__init__.py new file mode 100644 index 0000000..141ca6a --- /dev/null +++ b/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2013 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/fuel b/fuel new file mode 100755 index 0000000..ce1cc7f --- /dev/null +++ b/fuel @@ -0,0 +1,1723 @@ +#!/usr/bin/env python +# Copyright 2013 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from __future__ import print_function + +import argparse +import curses +from functools import partial +from itertools import chain +import json +import math +from operator import itemgetter +import os +import shutil +import sys +from time import sleep +import urllib2 +import yaml + +path_to_config = "/etc/fuel-client.yaml" +defaults = { + "LISTEN_ADDRESS": "127.0.0.1", + "LISTEN_PORT": "8000" +} +if os.path.exists(path_to_config): + with open(path_to_config, "r") as fh: + config = yaml.load(fh.read()) + defaults.update(config) +else: + defaults.update(os.environ) +ROOT = "http://{LISTEN_ADDRESS}:{LISTEN_PORT}".format(**defaults) + +OSTF_ROOT = ROOT + "/ostf/" +API_ROOT = ROOT + "/api/v1/" +DEBUG = False +JSON = False +YAML = False +DEFAULT_SERIALIZER = "yaml" +SERIALIZERS = { + "json": { + "w": lambda d: json.dumps(d, indent=4), + "r": lambda d: json.loads(d) + }, + "yaml": { + "w": lambda d: yaml.safe_dump(d, default_flow_style=False), + "r": lambda d: yaml.load(d) + } +} +PASSIVE_ARGUMENTS = ["rel", "env", "action", "debug", "json", "net", "yaml"] + + +class DeployProgressError(Exception): + pass + + +class SetAction(argparse.Action): + """Custom argparse.Action subclass to store distinct values + + :returns: Set of arguments + """ + def __call__(self, _parser, namespace, values, option_string=None): + try: + getattr(namespace, self.dest).update(values) + except AttributeError: + setattr(namespace, self.dest, set(values)) + + +def handle_api_exceptions(e): + if isinstance(e, ValueError): + return {} + elif isinstance(e, urllib2.HTTPError): + print_error(str(e)) + elif isinstance(e, urllib2.URLError): + print_error("Can't connect to Nailgun server!") + else: + raise e + + +def recur_get(multi_level_dict, key_chain): + """Method accesses some field in nested dictionaries + + :returns: value for last key in key_chain in last dictionary + """ + if not isinstance(multi_level_dict[key_chain[0]], dict): + return multi_level_dict[key_chain[0]] + else: + return recur_get(multi_level_dict[key_chain[0]], key_chain[1:]) + + +def print_formatted(data): + if JSON: + print(SERIALIZERS["json"]["w"](data)) + elif YAML: + print(SERIALIZERS["yaml"]["w"](data)) + + +def print_to_output(formatted_data, arg, print_method=print): + if JSON or YAML: + print_formatted(formatted_data) + else: + print_method(arg) + + +def format_table(data, acceptable_keys=None, subdict_keys=None): + """Format list of dicts to ascii table + + :acceptable_keys list(str): list of keys for which to create table + also specifies their order + :subdict_keys list(tuple(str)): list of key chains (tuples of key strings) + which are applied to dictionaries + to extract values + """ + if subdict_keys: + for key_chain in subdict_keys: + for data_dict in data: + data_dict[key_chain[0]] = recur_get(data_dict, key_chain) + if acceptable_keys: + rows = [tuple([value[key] for key in acceptable_keys]) + for value in data] + header = tuple(acceptable_keys) + else: + rows = [tuple(x.values()) for x in data] + header = tuple(data[0].keys()) + number_of_columns = len(header) + column_widths = dict( + zip( + range(number_of_columns), + (len(str(x)) for x in header) + ) + ) + + for row in rows: + column_widths.update( + (index, max(column_widths[index], len(str(element)))) + for index, element in enumerate(row) + ) + row_template = ' | '.join( + '%%-%ss' % column_widths[i] for i in range(number_of_columns) + ) + + return '\n'.join( + (row_template % header, + '-|-'.join(column_widths[column_index] * '-' + for column_index in range(number_of_columns)), + '\n'.join(row_template % x for x in rows)) + ) + + +def json_api_delete_request(api): + """Make DELETE request to specific API with some data + """ + print_debug( + "DELETE {0}".format(API_ROOT + api) + ) + opener = urllib2.build_opener(urllib2.HTTPHandler) + request = urllib2.Request(API_ROOT + api) + request.add_header('Content-Type', ' application/json') + request.get_method = lambda: 'DELETE' + try: + return json.loads( + opener.open(request).read() + ) + except Exception as e: + return handle_api_exceptions(e) + + +def json_api_put_request(api, data): + """Make PUT request to specific API with some data + """ + data_json = json.dumps(data) + print_debug( + "PUT {0} data={1}" + .format(API_ROOT + api, data_json) + ) + opener = urllib2.build_opener(urllib2.HTTPHandler) + request = urllib2.Request(API_ROOT + api, data=data_json) + request.add_header('Content-Type', ' application/json') + request.get_method = lambda: 'PUT' + try: + return json.loads( + opener.open(request).read() + ) + except Exception as e: + return handle_api_exceptions(e) + + +def json_api_get_request(api, root=API_ROOT): + """Make GET request to specific API + """ + url = root + api + print_debug( + "GET {0}" + .format(url) + ) + try: + request = urllib2.urlopen(url) + return json.loads( + request.read() + ) + except Exception as e: + return handle_api_exceptions(e) + + +def json_ostf_get_request(api): + return json_api_get_request(api, root=OSTF_ROOT) + + +def json_ostf_post_request(api, data): + return json_api_post_request(api, data, root=OSTF_ROOT) + + +def json_api_post_request(api, data, root=API_ROOT): + """Make POST request to specific API with some data + """ + url = root + api + data_json = json.dumps(data) + print_debug( + "POST {0} data={1}" + .format(url, data_json) + ) + request = urllib2.Request( + url=url, + data=data_json, + headers={ + 'Content-Type': 'application/json' + } + ) + try: + return json.loads( + urllib2.urlopen(request) + .read() + ) + except Exception as e: + return handle_api_exceptions(e) + + +def has_arguments(params): + """Checks whether params has some initialized attributes + """ + current_arguments = [v for k, v in params.__dict__.iteritems() + if k not in PASSIVE_ARGUMENTS] + return any(current_arguments) + + +def print_error(message): + sys.stderr.write(message + "\n") + exit(1) + + +def print_debug(message): + if DEBUG: + print(message) + + +def check_for_attributes(params, attributes): + not_paseed_checks = [attribute for attribute in attributes + if not getattr(params, attribute)] + if len(not_paseed_checks): + print_error( + "{0} required!".format( + quote_and_join( + map( + lambda attr: "--" + attr, + not_paseed_checks + ) + ) + ) + ) + + +def check_for_one_attribute(params, attributes): + if not any(map( + lambda attr: getattr(params, attr), + attributes + )): + print_error( + "At least one of {0} is required!".format( + ", ".join( + map( + lambda attr: '"--{0}"'.format(attr), + attributes) + ) + ) + ) + + +def release(params): + """List and modify currently available releases + """ + acceptable_keys = ["id", "name", "state", "operating_system", "version"] + if not has_arguments(params) or params.list: + if params.rel: + data = [json_api_get_request( + "releases/{0}/" + .format(params.rel) + )] + else: + data = json_api_get_request("releases/") + print_to_output( + data, + format_table( + data, + acceptable_keys=acceptable_keys + ) + ) + elif params.config: + check_for_attributes(params, ["rel", "username", "password"]) + data = { + "release_id": params.rel, + "username": params.username, + "password": params.password + } + satellite_flags = [params.satellite_server_hostname, + params.activation_key] + if not any(satellite_flags): + data.update({ + "license_type": "rhsm", + "satellite": "", + "activation_key": "" + }) + elif all(satellite_flags): + data.update({ + "license_type": "rhn", + "satellite": params.satellite_server_hostname, + "activation_key": params.activation_key + }) + else: + print_error('RedHat satellite settings requires both a ' + '"--satellite-server-hostname" and ' + 'a "--activation-key" flags.') + release_response = json_api_post_request( + "redhat/setup/", + data + ) + print_to_output( + release_response, + "Credentials for release with id={0}" + " were modified." + .format(params.rel) + ) + + +def role(params): + """List all roles for specific release + """ + check_for_attributes(params, ["rel"]) + data = json_api_get_request( + "releases/{0}/" + .format(params.rel) + ) + acceptable_keys = ["name", "conflicts"] + roles = [ + { + "name": role_name, + "conflicts": ", ".join( + metadata.get("conflicts", ["-"]) + ) + } for role_name, metadata in data["roles_metadata"].iteritems()] + print_to_output( + roles, + format_table( + roles, + acceptable_keys=acceptable_keys + ) + ) + + +def environment(params): + """Create, list and modify currently existing environments(clusters) + """ + if not has_arguments(params) or params.list: + acceptable_keys = ["id", "status", "name", "mode", + "release", "changes"] + data = json_api_get_request("clusters/") + if params.env: + data = filter( + lambda x: x[u"id"] == int(params.env), + data + ) + print_to_output( + data, + format_table( + data, + acceptable_keys=acceptable_keys, + subdict_keys=[("release", u"id")] + ) + ) + elif params.create: + check_for_attributes(params, ["name", "rel"]) + data = { + "nodes": [], + "tasks": [], + "name": params.name, + "release": int(params.rel) + } + if params.net.lower() == "nova": + data["net_provider"] = "nova_network" + else: + data["net_provider"] = "neutron" + if params.net_segment_type: + data["net_segment_type"] = params.net_segment_type + else: + print_error('"--net-segment-type" must be specified!') + cluster_response = json_api_post_request("clusters/", data) + cluster_id = cluster_response[u"id"] + if params.mode: + full_mode = "ha_compact" \ + if params.mode.lower() == "ha" else "multinode" + data = {"mode": full_mode} + cluster_response = json_api_put_request( + "clusters/{0}/".format(cluster_id), + data + ) + print_to_output( + cluster_response, + "Environment '{name}' with id={id}, mode={mode}" + " and network-mode={net_provider} was created!" + .format(**cluster_response) + ) + elif params.set: + check_for_attributes(params, ["env"]) + check_for_one_attribute(params, ["name", "mode"]) + data = {} + if params.mode: + data["mode"] = "ha_compact" \ + if params.mode.lower() == "ha" else "multinode" + if params.name: + data["name"] = params.name + put_response = json_api_put_request( + "clusters/{0}/".format(params.env), + data + ) + msg_templates = [] + if params.name: + msg_templates.append( + "Environment with id={id} was renamed to '{name}'." + ) + if params.mode: + msg_templates.append( + "Mode of environment with id={id} was set to '{mode}'." + ) + + message = "\n".join(msg_templates).format( + id=params.env, + name=params.name, + mode=params.mode + ) + print_to_output(put_response, message) + elif params.delete: + check_for_attributes(params, ["env"]) + delete_response = json_api_delete_request( + "clusters/{0}/".format(params.env) + ) + print_to_output( + delete_response, + "Environment with id={0} was deleted." + .format(params.env) + ) + + +def node(params): + """List and assign available nodes to environments + """ + if params.set: + check_for_attributes(params, ["node", "role", "env"]) + node_ids = list(chain(*params.node)) + roles = map(str.lower, params.role) + if not params.force: + validate_roles(params.env, roles) + data = map( + lambda _node_id: { + "id": _node_id, + "cluster_id": params.env, + "pending_roles": roles, + "pending_addition": True, + "pending_deletion": False + }, + node_ids + ) + put_response = json_api_put_request("nodes/", data) + print_to_output( + put_response, + "Nodes {0} with roles {1} " + "were added to environment {2}" + .format(node_ids, roles, params.env) + ) + elif params.delete: + check_for_one_attribute(params, ["env", "node"]) + nodes_clusters = dict((_node["id"], _node["cluster"]) + for _node in json_api_get_request("nodes/")) + if not params.node and params.env: + node_ids = [k for k, v in nodes_clusters.iteritems() + if v == int(params.env)] + else: + node_ids = list(chain(*params.node)) + data = map( + lambda _node_id: { + "id": _node_id, + "cluster_id": None, + "pending_roles": [], + "pending_addition": False, + "pending_deletion": True + }, + node_ids + ) + put_response = json_api_put_request("nodes/", data) + print_to_output( + put_response, + "Nodes with ids {0} were removed from environment with id {1}." + .format( + node_ids, + nodes_clusters[node_ids[0]] + ) + ) + elif params.network or params.disk: + check_for_one_attribute(params, ["default", "download", "upload"]) + check_for_attributes(params, ["node"]) + node_ids = list(chain(*params.node)) + for node_id in node_ids: + if params.network: + get_node_attribute( + params, + node_id, + "interfaces", + [("interfaces", "default_assignment")] + ) + elif params.disk: + get_node_attribute( + params, + node_id, + "disks", + [("disks", "defaults")] + ) + elif any((params.default, params.download, params.upload)): + print('"--default", "--download" or "--upload" ' + 'must appear after "--disk" or "--network" flags.') + else: + acceptable_keys = ["id", "status", "name", "cluster", + "mac", "roles", "pending_roles", "online"] + data = json_api_get_request("nodes/") + if params.env: + data = filter( + lambda x: x[u"cluster"] == int(params.env), + data + ) + elif params.node: + node_ids = list(chain(*params.node)) + data = filter( + lambda x: x[u"id"] in node_ids, + data + ) + print_to_output( + data, + format_table(data, acceptable_keys=acceptable_keys) + ) + + +def quote_and_join(words): + words = list(words) + if len(words) > 1: + return '{0} and "{1}"'.format( + ", ".join( + map( + lambda x: '"{0}"'.format(x), + words + )[0:-1] + ), + words[-1] + ) + else: + return '"{0}"'.format(words[0]) + + +def validate_roles(cluster_id, roles): + roles = set(roles) + cluster = json_api_get_request("clusters/{0}/".format(cluster_id)) + roles_metadata = cluster["release"]["roles_metadata"] + not_valid_roles = roles - set(cluster["release"]["roles"]) + if not_valid_roles: + print_error( + "{0} are not valid roles for environment {1}" + .format(quote_and_join(not_valid_roles), cluster["name"]) + ) + for _role in roles: + if "conflicts" in roles_metadata[_role]: + conflicting_roles = set(roles_metadata[_role]["conflicts"]) + other_roles = roles - set(_role) + conflicting_roles &= other_roles + if conflicting_roles: + print_error( + 'Role "{0}" in conflict with role {1}' + .format(_role, quote_and_join(conflicting_roles)) + ) + + +def get_node_attribute(params, node_id, upload_attribute, attributes): + default_url_template = "nodes/{0}/{1}/{2}".format(node_id, "{0}", "{1}") + dir_path = os.path.join( + os.path.abspath(params.dir or os.path.curdir), + "node_{0}".format(node_id) + ) + if params.upload: + dir_path = folder_or_one_up(dir_path) + if not os.path.exists(dir_path): + print_error( + "Folder {0} doesn't contain node folder '{1}'" + .format(dir_path, "node_{0}".format(node_id)) + ) + upload_node_attribute( + default_url_template.format(upload_attribute, ""), + node_id, + dir_path, + upload_attribute + ) + elif params.default or params.download: + if not os.path.exists(dir_path): + os.makedirs(dir_path) + for attribute, default_tail in attributes: + write_node_attribute( + default_url_template.format( + attribute, + default_tail if params.default else "" + ), + dir_path, + attribute + ) + + +def upload_node_attribute(url, node_id, node_dir_path, attribute): + data = read_from_file( + os.path.join( + node_dir_path, + attribute + ) + )[0] + if attribute == "interfaces": + url = "nodes/interfaces" + data = [{ + "interfaces": data, + "id": node_id + }] + json_api_put_request( + url, + data + ) + print("{0} configuration uploaded.".format(attribute)) + + +def write_node_attribute(url, node_dir_path, attribute): + attribute_path = os.path.join( + node_dir_path, + attribute + ) + get_response = json_api_get_request(url) + if os.path.exists(attribute_path): + os.remove(attribute_path) + attribute_path = write_to_file( + attribute_path, + get_response + ) + print( + "{0} configuration downloaded to {1}" + .format(attribute, attribute_path) + ) + + +def network(params): + """Show or modify network settings of specific environments + """ + check_for_attributes(params, ["env"]) + cluster = json_api_get_request("clusters/{0}/".format(params.env)) + network_url = "clusters/{0}/network_configuration/{1}".format( + params.env, + cluster["net_provider"] + ) + network_data = json_api_get_request(network_url) + network_file_path = os.path.join( + os.path.abspath(params.dir or os.path.curdir), + "network_{0}".format(params.env) + ) + if params.upload: + data, network_file_path = read_from_file(network_file_path) + put_response = json_api_put_request( + network_url, + data + ) + print_to_output( + put_response, + "Network configuration from {0} uploaded." + .format(network_file_path) + ) + elif params.verify: + verify_url = network_url + "/verify" + put_response = json_api_put_request(verify_url, network_data) + print_to_output( + put_response, + "Verification status is '{status}'. message: {message}" + .format(**put_response) + ) + else: + if params.download: + network_file_path = write_to_file(network_file_path, + network_data) + print( + "Network configuration for environment with id={0}" + " downloaded to {1}" + .format(params.env, network_file_path) + ) + else: + print_to_output(network_data, None) + + +def settings(params): + """Show or modify environment settings + """ + check_for_attributes(params, ["env"]) + check_for_one_attribute(params, ["default", "download", "upload"]) + settings_url = "clusters/{0}/attributes".format(params.env) + settings_data = json_api_get_request(settings_url) + settings_file_path = os.path.join( + os.path.abspath(params.dir or os.path.curdir), + "settings_{0}".format(params.env) + ) + if params.upload: + data, settings_file_path = read_from_file(settings_file_path) + put_response = json_api_put_request( + settings_url, + data + ) + print_to_output( + put_response, + "Settings configuration from {0} uploaded." + .format(settings_file_path) + ) + elif params.default: + default_url = settings_url + "/defaults" + get_response = json_api_get_request(default_url) + settings_file_path = write_to_file(settings_file_path, get_response) + print_to_output( + get_response, + "Default settings configuration downloaded to {0}." + .format(settings_file_path) + ) + elif params.download: + settings_file_path = write_to_file( + settings_file_path, + settings_data + ) + print_to_output( + settings_data, + "Settings configuration for environment with id={0}" + " downloaded to {1}" + .format(params.env, settings_file_path) + ) + + +def task(params): + """Show tasks + """ + if params.delete: + check_for_attributes(params, ["tid"]) + task_ids = list(chain(*params.tid)) + delete_response = map( + lambda tid: json_api_delete_request("tasks/{0}/".format(tid)), + task_ids + ) + print_to_output( + delete_response, + "Tasks with id's {0} deleted." + .format(','.join(map(str, task_ids))) + ) + else: + acceptable_keys = ["id", "status", "name", "cluster", "progress"] + tasks = json_api_get_request("tasks/") + print_to_output( + tasks, + format_table(tasks, acceptable_keys=acceptable_keys) + ) + + +def snapshot(params): + """Generate and download snapshot. + """ + dump_task = json_api_put_request( + "logs/package", + "{}" + ) + task_id = dump_task["id"] + print("Generating dump...") + while dump_task["progress"] < 100: + dump_task = json_api_get_request("tasks/{0}/".format(task_id)) + sleep(0.5) + download_snapshot_with_progress_bar( + ROOT + dump_task["message"], + params.dir + ) + + +def download_snapshot_with_progress_bar(url, directory): + directory = directory or os.path.curdir + if not os.path.exists(directory): + print_error("Folder {0} doesn't exist.".format(directory)) + file_name = os.path.join( + os.path.abspath(directory), + url.split('/')[-1] + ) + download_handle = urllib2.urlopen(url) + with open(file_name, 'wb') as file_handle: + meta = download_handle.info() + file_size = int(meta.getheaders("Content-Length")[0]) + print("Downloading: {0} Bytes: {1}".format(url, file_size)) + file_size_dl = 0 + block_size = 8192 + bar = partial(get_bar_for_progress, 80) + while True: + data_buffer = download_handle.read(block_size) + if not data_buffer: + break + file_size_dl += len(data_buffer) + file_handle.write(data_buffer) + progress = int(100 * float(file_size_dl) / file_size) + sys.stdout.write("\r{0}".format( + bar(progress) + )) + sys.stdout.flush() + sleep(1 / 10) + print() + + +def deploy(params): + """Deploy changes to environments + """ + check_for_attributes(params, ["env"]) + put_response = json_api_put_request( + "clusters/{0}/changes".format(params.env), + {} + ) + print_to_output(put_response, put_response, + print_method=print_deploy_progress) + + +def get_bar_for_progress(full_width, progress): + number_of_equal_signs = int( + math.ceil(progress * float(full_width - 2) / 100) + ) + return "[{0}{1}{2}]".format( + "=" * number_of_equal_signs, + ">" if number_of_equal_signs < full_width - 2 else "", + " " * (full_width - 3 - number_of_equal_signs) + ) + + +class DeployProgressFactory: + + def __init__(self, deploy_task): + self.env = deploy_task["cluster"] + self.tid = deploy_task["id"] + self.progress = 0 + self.nodes = [] + + @property + def is_not_finished(self): + return self.progress != 100 or any( + map(lambda n: n["progress"] != 100, self.nodes) + ) + + @property + def normalized_progress(self): + return self.progress / 100.0 + + def update(self): + task_response = json_api_get_request( + "tasks/{0}/".format(self.tid) + ) + if task_response["status"] == "error": + raise DeployProgressError(task_response["message"]) + self.progress = task_response["progress"] + self.nodes = json_api_get_request( + "nodes?cluster_id={0}".format(self.env) + ) + self.nodes.sort(key=lambda n: n.get("id")) + for _node in self.nodes: + _node["norm_progress"] = _node["progress"] / 100.0 + + +def print_deploy_progress(deploy_task): + deploy_process = DeployProgressFactory(deploy_task) + try: + terminal_screen = curses.initscr() + print_deploy_progress_with_terminal(deploy_process, terminal_screen) + except curses.error: + print_deploy_progress_without_terminal(deploy_process) + + +def print_deploy_progress_without_terminal(deploy_process): + print("Deploying changes to environment with id={0}".format( + deploy_process.env + )) + try: + while deploy_process.is_not_finished: + message_len = 0 + deploy_process.update() + deployment_message = "[Deployment: {0:4.0%}]".format( + deploy_process.normalized_progress + ) + sys.stdout.write(deployment_message) + message_len += len(deployment_message) + for index, _node in enumerate(deploy_process.nodes): + node_message = "[Node{id:2} {norm_progress:4.0%}]".format( + **_node + ) + message_len += len(node_message) + sys.stdout.write(node_message) + sleep(0.5) + sys.stdout.write("\r" * message_len) + print("Finished deployment!") + except DeployProgressError as e: + print(e.message) + + +def print_deploy_progress_with_terminal(deploy_process, terminal_screen): + scr_width = terminal_screen.getmaxyx()[1] + curses.noecho() + curses.cbreak() + total_progress_bar = partial(get_bar_for_progress, scr_width - 17) + node_bar = partial(get_bar_for_progress, scr_width - 28) + try: + while deploy_process.is_not_finished: + deploy_process.update() + terminal_screen.addstr( + 0, 0, + "Deploying changes to environment with id={0}".format( + deploy_process.env + ) + ) + terminal_screen.addstr( + 1, 0, + "Deployment: {0} {1:4.0%}".format( + total_progress_bar(deploy_process.progress), + deploy_process.normalized_progress + ) + ) + for index, _node in enumerate(deploy_process.nodes): + terminal_screen.addstr( + index + 2, 0, + "Node{id:3} {status:13}: {bar} {norm_progress:4.0%}" + .format(bar=node_bar(_node["progress"]), **_node) + ) + sleep(0.5) + terminal_screen.refresh() + except DeployProgressError as e: + close_curses() + print(e.message) + finally: + close_curses() + + +def close_curses(): + curses.echo() + curses.nocbreak() + curses.endwin() + + +def provisioning(params): + """Show computed provisioning facts for orchestrator + """ + fact(params, "provisioning") + + +def deployment(params): + """Show computed deployment facts for orchestrator + """ + fact(params, "deployment") + + +def fact(params, info_type): + check_for_attributes(params, ["env"]) + + dir_name = os.path.join( + os.path.abspath(params.dir or os.path.curdir), + "{0}_{1}".format(info_type, params.env) + ) + facts_default_url = "clusters/{0}/orchestrator/{1}/defaults".format( + params.env, + info_type + ) + facts_url = "clusters/{0}/orchestrator/{1}/".format( + params.env, + info_type + ) + if params.default: + facts = json_api_get_request(facts_default_url) + write_facts_to_dir(facts, dir_name) + elif params.upload: + json_api_put_request( + facts_url, + read_deployment_info(dir_name) + if info_type == "deployment" else + read_provisioning_info(dir_name) + ) + print("{0} facts uploaded from {1}.".format(info_type, dir_name)) + elif params.delete: + json_api_delete_request(facts_url) + print("{0} facts deleted.".format(info_type)) + else: + facts = json_api_get_request(facts_url) + if not facts: + print( + "Environment with id={0} has no {1} info." + .format(params.env, info_type) + ) + return + if params.download: + write_facts_to_dir(facts, dir_name) + + +def healthcheck(params): + """Run health check on environment + """ + check_for_attributes(params, ["env"]) + test_sets = json_ostf_get_request( + 'testsets/{0}'.format(params.env) + ) + if params.list: + print_to_output(test_sets, format_table(test_sets)) + else: + params.check = params.check or set(ts["id"] for ts in test_sets) + tests_data = map( + lambda testset: { + "testset": testset, + "metadata": { + "config": {}, + "cluster_id": params.env + } + }, + list(params.check) + ) + json_ostf_post_request( + "testruns", + tests_data + ) + tests_state = json_ostf_get_request( + "testruns/last/{0}".format(params.env) + ) + print_to_output(tests_state, params, print_method=print_health_check) + + +def print_health_check(params): + tests_states = [{"status": "not finished"}] + finished_tests = set() + test_counter, total_tests_count = 1, None + while not all(map( + lambda t: t["status"] == "finished", + tests_states + )): + tests_states = json_ostf_get_request( + "testruns/last/{0}".format(params.env) + ) + all_tests = list(chain(*map( + itemgetter("tests"), + filter( + lambda x: x["testset"] in params.check, + tests_states + )))) + if total_tests_count is None: + total_tests_count = len(all_tests) + all_finished_tests = filter( + lambda t: "running" not in t["status"], + all_tests + ) + new_finished_tests = filter( + lambda t: t["name"] not in finished_tests, + all_finished_tests + ) + finished_tests.update( + map( + itemgetter("name"), + new_finished_tests + ) + ) + for test in new_finished_tests: + print( + "[{0:2} of {1}] [{status}] '{name}' " + "({taken:.4} s) {message}".format( + test_counter, + total_tests_count, + **test + ) + ) + test_counter += 1 + sleep(1) + + +def prepare_path(path): + if JSON: + serialisation_format = "json" + elif YAML: + serialisation_format = "yaml" + else: + serialisation_format = DEFAULT_SERIALIZER + return "{0}.{1}".format( + path, serialisation_format + ), SERIALIZERS[serialisation_format] + + +def write_to_file(path, data): + full_path, serializer = prepare_path(path) + with open(full_path, "w+") as file_to_write: + file_to_write.write(serializer["w"](data)) + return full_path + + +def is_file_exists(path): + dir_path_list = path.split(os.path.sep) + name = dir_path_list.pop(-1) + folder_path = os.path.sep.join(dir_path_list) + files_with_extensions = filter( + lambda x: len(x) > 1 and x[0] != '', + map( + lambda x: str.split(x, "."), + os.listdir(folder_path) + ) + ) + if not files_with_extensions: + return False + else: + return name in map( + lambda x: x[0], + files_with_extensions + ) + + +def read_from_file(path): + full_path, serializer = prepare_path(path) + try: + with open(full_path, "r") as file_to_read: + return serializer["r"](file_to_read.read()), full_path + except IOError: + print_error( + "File {0} doesn't exist. Try checking serialization format." + .format(full_path) + ) + + +def folder_or_one_up(dir_path): + if not os.path.exists(dir_path): + path_to_folder = dir_path.split(os.sep) + one_folder_up = path_to_folder[:-2] + path_to_folder[-2:-1] + dir_path = os.sep.join(one_folder_up) + return dir_path + + +def listdir_without_extensions(dir_path): + return filter( + lambda f: f != "", + map( + lambda f: f.split(".")[0], + os.listdir(dir_path) + ) + ) + + +def read_provisioning_info(dir_name): + dir_name = folder_or_one_up(dir_name) + if not any(map(lambda f: "engine" in f, os.listdir(dir_name))): + print_error( + "engine file was not found in {0}" + .format(dir_name) + ) + try: + node_facts = map( + lambda f: read_from_file(f)[0], + [os.path.join(dir_name, fact_file) + for fact_file in listdir_without_extensions(dir_name) + if "engine" != fact_file] + ) + engine, _ = read_from_file(os.path.join(dir_name, "engine")) + return { + "engine": engine, + "nodes": node_facts + } + except OSError: + print_error( + "Directory {0} doesn't exist." + .format(dir_name) + ) + + +def read_deployment_info(dir_name): + dir_name = folder_or_one_up(dir_name) + try: + return map( + lambda f: read_from_file(f)[0], + [os.path.join(dir_name, json_file) + for json_file in listdir_without_extensions(dir_name)] + ) + except OSError: + print_error( + "Directory {0} doesn't exist." + .format(dir_name) + ) + + +def write_facts_to_dir(facts, dir_name): + if os.path.exists(dir_name): + shutil.rmtree(dir_name) + print("old directory {0} was removed".format(dir_name)) + os.makedirs(dir_name) + print("directory {0} was created".format(dir_name)) + if isinstance(facts, dict): + engine_file_path = os.path.join(dir_name, "engine") + engine_file_path = write_to_file(engine_file_path, facts["engine"]) + print("Created {0}".format(engine_file_path)) + facts = facts["nodes"] + name_template = "{name}" + else: + name_template = "{role}_{uid}" + for _fact in facts: + fact_path = os.path.join( + dir_name, + name_template.format(**_fact) + ) + fact_path = write_to_file(fact_path, _fact) + print("Created {0}".format(fact_path)) + + +def parse_ids(x): + """Parse arguments with commas and spaces + + :returns: list of lists with numbers + """ + filtered = [y for y in x.split(",") if y.strip() != ''] + if len(filtered) > 1: + return map(int, filtered) + elif len(filtered) == 1: + return [int(filtered[0])] + else: + return None + + +def get_download_arg(help_msg): + return { + "args": ["-d", "--download"], + "params": { + "dest": "download", + "action": "store_true", + "help": help_msg, + "default": False + } + } + + +def get_list_arg(help_msg): + return { + "args": ["-l", "--list"], + "params": { + "dest": "list", + "action": "store_true", + "help": help_msg, + "default": False + } + } + + +def get_dir_arg(help_msg): + return { + "args": ["--dir"], + "params": { + "dest": "dir", + "action": "store", + "help": help_msg, + "default": None + } + } + + +def get_verify_arg(help_msg): + return { + "args": ["-v", "--verify"], + "params": { + "dest": "verify", + "action": "store_true", + "help": help_msg, + "default": False + } + } + + +def get_upload_arg(help_msg): + return { + "args": ["-u", "--upload"], + "params": { + "dest": "upload", + "action": "store_true", + "help": help_msg, + "default": False + } + } + + +def get_default_arg(help_msg): + return { + "args": ["--default"], + "params": { + "dest": "default", + "action": "store_true", + "help": help_msg, + "default": False + } + } + + +def get_set_arg(help_msg): + return { + "args": ["-s", "--set"], + "params": { + "dest": "set", + "action": "store_true", + "help": help_msg, + "default": False + } + } + + +def get_delete_arg(help_msg): + return { + "args": ["--delete"], + "params": { + "dest": "delete", + "action": "store_true", + "help": help_msg, + "default": False + } + } + + +def get_release_arg(help_msg): + return { + "args": ["--rel", "--release"], + "params": { + "dest": "rel", + "action": "store", + "type": str, + "help": help_msg, + "default": None + } + } + + +actions = { + "release": { + "action": release, + "args": [ + get_list_arg("List all available releases."), + get_release_arg("Specify release id to configure"), + { + "args": ["-c", "--config"], + "params": { + "dest": "config", + "action": "store_true", + "help": "Configure release with --release", + "default": False + } + }, { + "args": ["-U", "--user", "--username"], + "params": { + "dest": "username", + "action": "store", + "type": str, + "help": "Username for release credentials", + "default": None + } + }, { + "args": ["-P", "--pass", "--password"], + "params": { + "dest": "password", + "action": "store", + "type": str, + "help": "Password for release credentials", + "default": None + } + }, { + "args": ["--satellite-server-hostname"], + "params": { + "dest": "satellite_server_hostname", + "action": "store", + "type": str, + "help": "Satellite server hostname", + "default": None + } + }, { + "args": ["--activation-key"], + "params": { + "dest": "activation_key", + "action": "store", + "type": str, + "help": "activation key", + "default": None + } + }] + }, + "role": { + "action": role, + "args": [ + get_list_arg("List all roles for specific release"), + get_release_arg("Release id") + ] + }, + "environment": { + "action": environment, + "args": [ + get_list_arg("List all available environments."), + get_set_arg("Set environment parameters (e.g name, deployment mode)"), + get_delete_arg("Delete environment with specific env or name"), + get_release_arg("Release id"), + { + "args": ["-c", "--env-create", "--create"], + "params": { + "dest": "create", + "action": "store_true", + "help": "Create a new environment with specific " + "release id and name.", + "default": False + } + }, { + "args": ["--name", "--env-name"], + "params": { + "dest": "name", + "action": "store", + "type": str, + "help": "environment name", + "default": None + } + }, { + "args": ["-m", "--mode", "--deployment-mode"], + "params": { + "dest": "mode", + "action": "store", + "choices": ["multinode", "ha"], + "help": "Set deployment mode for specific environment.", + "default": False + } + }, { + "args": ["-n", "--net", "--network-mode"], + "params": { + "dest": "net", + "action": "store", + "choices": ["nova", "neutron"], + "help": "Set network mode for specific environment.", + "default": "nova" + } + }, { + "args": ["--nst", "--net-segment-type"], + "params": { + "dest": "net_segment_type", + "action": "store", + "choices": ["gre", "vlan"], + "help": "Set network segment type", + "default": False + } + }] + }, + "node": { + "action": node, + "args": [ + get_list_arg("List all nodes."), + get_set_arg("Set role for specific node."), + get_delete_arg("Delete specific node from environment."), + get_default_arg("Get default network configuration of some node"), + get_download_arg("Download configuration of specific node"), + get_upload_arg("Upload configuration to specific node"), + get_dir_arg("Select directory to which download node attributes"), + { + "args": ["--node", "--node-id"], + "params": { + "dest": "node", + "action": "store", + "nargs": '+', + "type": parse_ids, + "help": "Node id.", + "default": None + } + }, { + "args": ["-r", "--role"], + "params": { + "dest": "role", + "type": lambda v: v.split(','), + "action": SetAction, + "help": "Role to assign for node.", + "default": None + } + }, { + "args": ["--net", "--network"], + "params": { + "dest": "network", + "action": "store_true", + "help": "Node network configuration.", + "default": False + } + }, { + "args": ["--disk"], + "params": { + "dest": "disk", + "action": "store_true", + "help": "Node disk configuration.", + "default": False + } + }, { + "args": ["-f", "--force"], + "params": { + "dest": "force", + "action": "store_true", + "help": "Bypassing parameter validation.", + "default": False + } + }] + }, + "network": { + "action": network, + "args": [ + get_download_arg("Download current network configuration."), + get_dir_arg("Directory with network data."), + get_verify_arg("Verify current network configuration."), + get_upload_arg("Upload changed network configuration.") + ] + }, + "settings": { + "action": settings, + "args": [ + get_download_arg("Modify current configuration."), + get_default_arg("Open default configuration."), + get_upload_arg("Save current changes in configuration."), + get_dir_arg("Directory with configuration data.") + ] + }, + "task": { + "action": task, + "args": [ + get_delete_arg("Delete task with some task-id."), + { + "args": ["--tid", "--task-id"], + "params": { + "dest": "tid", + "action": "store", + "nargs": '+', + "type": parse_ids, + "help": "Task id.", + "default": None + } + }] + }, + "snapshot": { + "action": snapshot, + "args": [ + get_dir_arg("Directory to which download snapshot.") + ] + }, + "deploy": { + "action": deploy, + "args": [] + }, + "health": { + "action": healthcheck, + "args": [ + get_list_arg("List all available checks"), + { + "args": ["--check"], + "params": { + "dest": "check", + "type": lambda v: v.split(','), + "action": SetAction, + "help": "Run check for some id.", + "default": None + } + } + ] + } +} + + +def get_args_for_facts(fact_type): + return [ + get_delete_arg("Delete current {0} data.".format(fact_type)), + get_download_arg("Download current {0} data.".format(fact_type)), + get_upload_arg("Upload current {0} data.".format(fact_type)), + get_default_arg("Download default {0} data.".format(fact_type)), + get_dir_arg("Directory with {0} data.".format(fact_type)) + ] + + +substitutions = { + #replace from: to + "env": "environment", + "nodes": "node", + "net": "network", + "rel": "release", + "list": "--list", + "set": "--set", + "delete": "--delete", + "download": "--download", + "upload": "--upload", + "default": "--default", + "create": "--create", + "remove": "--delete", + "config": "--config", + "--roles": "--role" +} + + +def prepare_args(): + # replace some args from dict substitutions + sys.argv = map( + lambda x: substitutions.get(x, x), + sys.argv + ) + # move --json and --debug flags before any action + for flag in ["--json", "--debug", "--yaml"]: + if flag in sys.argv: + sys.argv.remove(flag) + sys.argv.insert(1, flag) + + # move --env or --env-id flags to beginning if declared after action + for arg in sys.argv: + if "--env" in arg: + # if declaration with '=' sign (e.g. --env-id=1) + if "=" in arg: + index_of_env = sys.argv.index(arg) + env = sys.argv.pop(index_of_env) + sys.argv.insert(1, env) + else: + try: + index_of_env = sys.argv.index(arg) + sys.argv.pop(index_of_env) + env = sys.argv.pop(index_of_env) + sys.argv.insert(1, env) + sys.argv.insert(1, arg) + except IndexError: + print_error( + 'Environment id must follow "{0}" flag' + .format(arg) + ) + break + + for fact_type in [deployment, provisioning]: + actions[fact_type.__name__] = { + "action": fact_type, + "args": get_args_for_facts(fact_type.__name__) + } + + +if __name__ == '__main__': + prepare_args() + + parser = argparse.ArgumentParser( + usage="fuel [optional args] [action] [flags]", + ) + parser.add_argument("--env", "--env-id", + dest="env", + action="store", + type=str, + help="environment id", + default=None + ) + parser.add_argument("--json", + dest="json", + action="store_true", + help="prints to only json to stdout", + default=False + ) + parser.add_argument("--yaml", + dest="yaml", + action="store_true", + help="prints to only yaml to stdout", + default=False + ) + parser.add_argument("--debug", + dest="debug", + action="store_true", + help="prints details of all HTTP request", + default=False + ) + subparsers = parser.add_subparsers( + title="Namespaces", + metavar="", + dest="action", + help='actions' + ) + + for action, parameters in actions.iteritems(): + action_parser = subparsers.add_parser( + action, + prog="fuel [global optional args] {0}" + .format(action), + help=parameters["action"].__doc__ + ) + for argument in parameters.get("args", []): + action_parser.add_argument( + *argument["args"], + **argument["params"] + ) + + parsed_params, other_params = parser.parse_known_args() + sys.argv.pop(1) + DEBUG = parsed_params.debug + JSON = parsed_params.json + YAML = parsed_params.yaml + if parsed_params.action not in actions: + parser.print_help() + sys.exit(0) + current_action = getattr(parsed_params, "action") + delattr(parsed_params, "action") + actions[current_action]["action"](parsed_params) diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..5684eb8 --- /dev/null +++ b/setup.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python +# Copyright 2013 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from setuptools import setup + +setup( + name='python-fuelclient', + version='0.1', + description='Command line interface for Nailgun', + long_description="""Command line interface for Nailgun""", + author='Mirantis Inc.', + author_email='product@mirantis.com', + url='http://mirantis.com', + install_requires=['PyYAML==3.10'], + scripts=['fuel'] +) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..141ca6a --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2013 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. diff --git a/tests/base.py b/tests/base.py new file mode 100644 index 0000000..ab98be3 --- /dev/null +++ b/tests/base.py @@ -0,0 +1,133 @@ +# -*- coding: utf-8 -*- + +# Copyright 2013 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +try: + from unittest.case import TestCase +except ImportError: + # Runing unit-tests in production environment + from unittest2.case import TestCase + +import logging +import os +import subprocess +import sys + +logging.basicConfig(stream=sys.stderr) +logging.getLogger("SomeTest.testSomething").setLevel(logging.DEBUG) + + +class CliExectutionResult: + def __init__(self, process_handle): + self.return_code = process_handle.returncode + self.stdout = process_handle.stdout.read() + self.stderr = process_handle.stderr.read() + + @property + def has_errors(self): + return bool(len(self.stderr)) + + @property + def is_return_code_zero(self): + return self.return_code == 0 + + +class BaseTestCase(TestCase): + root_path = os.path.abspath( + os.path.join( + os.curdir, + os.path.pardir + ) + ) + clean_cmd = os.path.join( + root_path, + "run_tests.sh" + ) + " -c" + manage_path = os.path.join( + root_path, + "nailgun/manage.py" + ) + fuel_path = os.path.join( + root_path, + "fuelclient/fuel" + ) + + @classmethod + def setUpClass(cls): + cls.reload_nailgun_server() + + @classmethod + def reload_nailgun_server(cls): + for action in ("dropdb", "syncdb", "loaddefault"): + cls.run_command(cls.manage_path, action) + + @classmethod + def load_data_to_nailgun_server(cls): + cls.run_command(cls.manage_path, "loaddata {0}".format( + os.path.join( + cls.root_path, + "nailgun/nailgun/fixtures/sample_environment.json" + ) + )) + + @staticmethod + def run_command(*args): + handle = subprocess.Popen( + [" ".join(args + (">/dev/null", "2>&1"))], + shell=True + ) + print("Running " + " ".join(args)) + handle.wait() + + def run_cli_command(self, command_line=None, with_erros=False): + modified_env = os.environ.copy() + modified_env["LISTEN_PORT"] = "8003" + command_args = [" ".join((self.fuel_path, command_line))] + log = logging.getLogger("SomeTest.testSomething") + process_handle = subprocess.Popen( + command_args, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + shell=True, + env=modified_env + ) + process_handle.wait() + result = CliExectutionResult(process_handle) + log.debug("command_args: '%s',stdout: '%s', stderr: '%s'", + command_args[0], result.stdout, result.stderr) + if not with_erros: + if not result.is_return_code_zero or result.has_errors: + self.fail() + return result + + def check_if_required(self, command): + call = self.run_cli_command(command_line=command, with_erros=True) + #should not work without env id + self.assertIn("required", call.stderr) + + def check_for_stdout(self, command, msg): + call = self.run_cli_command(command_line=command) + self.assertEqual(call.stdout, msg) + + def check_all_in_msg(self, command, substrs): + output = self.run_cli_command(command_line=command) + for substr in substrs: + self.assertIn(substr, output.stdout) + + def check_for_rows_in_table(self, command): + output = self.run_cli_command(command_line=command) + message = output.stdout.split("\n") + #no env + self.assertEqual(message[2], '') diff --git a/tests/test_client.py b/tests/test_client.py new file mode 100644 index 0000000..e6c75a2 --- /dev/null +++ b/tests/test_client.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- + +# Copyright 2013 Mirantis, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from fuelclient.tests.base import BaseTestCase + + +class TestHandlers(BaseTestCase): + + def test_env_action(self): + #check env help + help_msgs = ["usage: fuel [global optional args]", + "environment [-h] [-l] [-s] [--delete]", + "optional arguments:", "--help", "--list", "--set", + "--delete", "--rel", "--release", "--env-create,", + "--create", "--name", "--env-name", "--mode", "--net", + "--network-mode", "--nst", "--net-segment-type", + "--deployment-mode"] + self.check_all_in_msg("env --help", help_msgs) + #no clusters + self.check_for_rows_in_table("env") + + for action in ("set", "create", "delete"): + self.check_if_required("env {0}".format(action)) + + #list of tuples (, ) + expected_stdout = \ + [( + "env create --name=TestEnv --release=1", + "Environment 'TestEnv' with id=1, mode=multinode and " + "network-mode=nova_network was created!\n" + ), ( + "--env-id=1 env set --name=NewEnv", + "Environment with id=1 was renamed to 'NewEnv'.\n" + ), ( + "--env-id=1 env set --mode=ha", + "Mode of environment with id=1 was set to 'ha'.\n" + )] + + for cmd, msg in expected_stdout: + self.check_for_stdout(cmd, msg) + + def test_node_action(self): + help_msg = ["usage: fuel [global optional args] node [-h] ", + "[-l] [-s] [--delete] [--default]", "-h", "--help", "-l", + "--list", "-s", "--set", "--delete", "--default", "-d", + "--download", "-u", "--upload", "--dir", "--node", + "--node-id", "-r", "--role", "--net", "--network", + "--disk", "-f", "--force"] + self.check_all_in_msg("node --help", help_msg) + + self.check_for_rows_in_table("node") + + for action in ("set", "remove", "--network", "--disk"): + self.check_if_required("node {0}".format(action))