e1f5bcf08c
We should define a set of CLUSTER_STATUS in stead of using direct string in code. 1. Add cluster.py in utils/ 2. Add cluster status. 3. move cluster operation related methods from general.py to cluster.py Change-Id: Id95d982a911ab5d0f789265e03bff2256cf75856
139 lines
4.7 KiB
Python
139 lines
4.7 KiB
Python
# Copyright (c) 2015 Intel Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
# implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
from oslo_log import log as logging
|
|
|
|
from sahara import conductor as c
|
|
from sahara import context
|
|
from sahara import exceptions as e
|
|
from sahara.i18n import _LI
|
|
from sahara.utils.notification import sender
|
|
|
|
conductor = c.API
|
|
LOG = logging.getLogger(__name__)
|
|
|
|
# cluster status
|
|
CLUSTER_STATUS_VALIDATING = "Validating"
|
|
CLUSTER_STATUS_INFRAUPDATING = "InfraUpdating"
|
|
CLUSTER_STATUS_SPAWNING = "Spawning"
|
|
CLUSTER_STATUS_WAITING = "Waiting"
|
|
CLUSTER_STATUS_PREPARING = "Preparing"
|
|
CLUSTER_STATUS_CONFIGURING = "Configuring"
|
|
CLUSTER_STATUS_STARTING = "Starting"
|
|
CLUSTER_STATUS_ACTIVE = "Active"
|
|
CLUSTER_STATUS_DECOMMISSIONING = "Decommissioning"
|
|
CLUSTER_STATUS_ERROR = "Error"
|
|
CLUSTER_STATUS_DELETING = "Deleting"
|
|
CLUSTER_STATUS_AWAITINGTERMINATION = "AwaitingTermination"
|
|
|
|
# cluster status -- Instances
|
|
CLUSTER_STATUS_DELETING_INSTANCES = "Deleting Instances"
|
|
CLUSTER_STATUS_ADDING_INSTANCES = "Adding Instances"
|
|
|
|
# Scaling status
|
|
CLUSTER_STATUS_SCALING = "Scaling"
|
|
CLUSTER_STATUS_SCALING_SPAWNING = (CLUSTER_STATUS_SCALING +
|
|
": " + CLUSTER_STATUS_SPAWNING)
|
|
CLUSTER_STATUS_SCALING_WAITING = (CLUSTER_STATUS_SCALING +
|
|
": " + CLUSTER_STATUS_WAITING)
|
|
CLUSTER_STATUS_SCALING_PREPARING = (CLUSTER_STATUS_SCALING +
|
|
": " + CLUSTER_STATUS_PREPARING)
|
|
|
|
# Rollback status
|
|
CLUSTER_STATUS_ROLLBACK = "Rollback"
|
|
CLUSTER_STATUS_ROLLBACK_SPAWNING = (CLUSTER_STATUS_ROLLBACK +
|
|
": " + CLUSTER_STATUS_SPAWNING)
|
|
CLUSTER_STATUS_ROLLBACK_WAITING = (CLUSTER_STATUS_ROLLBACK +
|
|
": " + CLUSTER_STATUS_WAITING)
|
|
CLUSTER_STATUS_ROLLBACK__PREPARING = (CLUSTER_STATUS_ROLLBACK +
|
|
": " + CLUSTER_STATUS_PREPARING)
|
|
|
|
|
|
def change_cluster_status_description(cluster, status_description):
|
|
try:
|
|
ctx = context.ctx()
|
|
return conductor.cluster_update(
|
|
ctx, cluster, {'status_description': status_description})
|
|
except e.NotFoundException:
|
|
return None
|
|
|
|
|
|
def change_cluster_status(cluster, status, status_description=None):
|
|
ctx = context.ctx()
|
|
|
|
# Update cluster status. Race conditions with deletion are still possible,
|
|
# but this reduces probability at least.
|
|
cluster = conductor.cluster_get(ctx, cluster) if cluster else None
|
|
|
|
if status_description is not None:
|
|
change_cluster_status_description(cluster, status_description)
|
|
|
|
# 'Deleting' is final and can't be changed
|
|
if cluster is None or cluster.status == CLUSTER_STATUS_DELETING:
|
|
return cluster
|
|
|
|
update_dict = {"status": status}
|
|
cluster = conductor.cluster_update(ctx, cluster, update_dict)
|
|
conductor.cluster_provision_progress_update(ctx, cluster.id)
|
|
|
|
LOG.info(_LI("Cluster status has been changed. New status="
|
|
"{status}").format(status=cluster.status))
|
|
|
|
sender.notify(ctx, cluster.id, cluster.name, cluster.status,
|
|
"update")
|
|
|
|
return cluster
|
|
|
|
|
|
def count_instances(cluster):
|
|
return sum([node_group.count for node_group in cluster.node_groups])
|
|
|
|
|
|
def check_cluster_exists(cluster):
|
|
ctx = context.ctx()
|
|
# check if cluster still exists (it might have been removed)
|
|
cluster = conductor.cluster_get(ctx, cluster)
|
|
return cluster is not None
|
|
|
|
|
|
def get_instances(cluster, instances_ids=None):
|
|
inst_map = {}
|
|
for node_group in cluster.node_groups:
|
|
for instance in node_group.instances:
|
|
inst_map[instance.id] = instance
|
|
|
|
if instances_ids is not None:
|
|
return [inst_map[id] for id in instances_ids]
|
|
else:
|
|
return [v for v in inst_map.values()]
|
|
|
|
|
|
def clean_cluster_from_empty_ng(cluster):
|
|
ctx = context.ctx()
|
|
for ng in cluster.node_groups:
|
|
if ng.count == 0:
|
|
conductor.node_group_remove(ctx, ng)
|
|
|
|
|
|
def generate_etc_hosts(cluster):
|
|
hosts = "127.0.0.1 localhost\n"
|
|
for node_group in cluster.node_groups:
|
|
for instance in node_group.instances:
|
|
hosts += "%s %s %s\n" % (instance.internal_ip,
|
|
instance.fqdn(),
|
|
instance.hostname())
|
|
|
|
return hosts
|