Cluster security and node placement control
Adding user key to the cluster Each cluster will have a it own private key for passwordless login It is possible to schedule a data nodes on diffirent hosts implements: blueprint node-placement-control and blueprint cluster-security fixed: bug #1179815 and bug #1179821 Change-Id: I27183b9fcf1652b8f65a7018fe9cb880d5bed3b3
This commit is contained in:
@@ -13,7 +13,13 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import time
|
||||
|
||||
from savanna.context import ctx
|
||||
from savanna.db import models as m
|
||||
from savanna.openstack.common import log as logging
|
||||
from savanna.utils.crypto import private_key_to_public_key
|
||||
import savanna.utils.openstack.nova as nova
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
@@ -30,12 +36,95 @@ def create_cluster(cluster):
|
||||
|
||||
def _create_instances(cluster):
|
||||
"""Create all instances using nova client and persist them into DB."""
|
||||
pass
|
||||
session = ctx().session
|
||||
aa_groups = _generate_anti_affinity_groups(cluster)
|
||||
for node_group in cluster.node_groups:
|
||||
files = _generate_instance_files(node_group)
|
||||
for idx in xrange(1, node_group.count + 1):
|
||||
name = '%s-%s-%03d' % (cluster.name, node_group.name, idx)
|
||||
aa_group = node_group.anti_affinity_group
|
||||
ids = aa_groups[aa_group]
|
||||
hints = {'different_host': list(ids)} if ids else None
|
||||
|
||||
nova_instance = nova.novaclient().servers.create(
|
||||
name, node_group.image_id, node_group.flavor_id,
|
||||
scheduler_hints=hints, files=files)
|
||||
|
||||
with session.begin():
|
||||
instance = m.Instance(node_group.id, nova_instance.id, name)
|
||||
node_group.instances.append(instance)
|
||||
session.add(instance)
|
||||
|
||||
if aa_group:
|
||||
aa_groups[aa_group].append(nova_instance.id)
|
||||
|
||||
|
||||
def _generate_instance_files(node_group):
|
||||
cluster = node_group.cluster
|
||||
user_key = cluster.user_keypair
|
||||
|
||||
if node_group.username == "root":
|
||||
path_to_root = "/root"
|
||||
else:
|
||||
path_to_root = "/home/" + node_group.username
|
||||
|
||||
authorized_keys = user_key.public_key + '\n'
|
||||
authorized_keys += private_key_to_public_key(cluster.private_key)
|
||||
|
||||
return {
|
||||
path_to_root + "/.ssh/authorized_keys": authorized_keys,
|
||||
path_to_root + "/.ssh/id_rsa": cluster.private_key
|
||||
}
|
||||
|
||||
|
||||
def _generate_anti_affinity_groups(cluster):
|
||||
return dict((ng.anti_affinity_group, []) for ng in cluster.node_groups)
|
||||
|
||||
|
||||
def _await_instances(cluster):
|
||||
"""Await all instances are in Active status and available."""
|
||||
pass
|
||||
all_up = False
|
||||
while not all_up:
|
||||
all_up = True
|
||||
for node_group in cluster.node_groups:
|
||||
for instance in node_group.instances:
|
||||
if not _check_if_up(instance):
|
||||
all_up = False
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
def _check_if_up(instance):
|
||||
if hasattr(instance, '_is_up'):
|
||||
return True
|
||||
|
||||
server = instance.nova_info
|
||||
if server.status == 'ERROR':
|
||||
# todo replace with specific error
|
||||
raise RuntimeError("node %s has error status" % server.name)
|
||||
|
||||
if server.status != 'ACTIVE':
|
||||
return False
|
||||
|
||||
if len(server.networks) == 0:
|
||||
return False
|
||||
|
||||
if instance.management_ip is None:
|
||||
# todo support floating ips and different networks
|
||||
ip = server.networks.values()[0][1]
|
||||
if not ip:
|
||||
return False
|
||||
instance.management_ip = ip
|
||||
|
||||
try:
|
||||
exit_code, _ = instance.remote.execute_command("hostname")
|
||||
if exit_code:
|
||||
return False
|
||||
except Exception as ex:
|
||||
LOG.debug("Can't login to node %s reason %s", server.name, ex)
|
||||
return False
|
||||
|
||||
instance._is_up = True
|
||||
return True
|
||||
|
||||
|
||||
def _configure_instances(cluster):
|
||||
@@ -45,7 +134,21 @@ def _configure_instances(cluster):
|
||||
* setup passwordless login
|
||||
* etc.
|
||||
"""
|
||||
pass
|
||||
hosts = _generate_etc_hosts(cluster)
|
||||
for node_group in cluster.node_groups:
|
||||
for instance in node_group.instances:
|
||||
instance.remote.write_file_to('etc-hosts', hosts)
|
||||
instance.remote.execute_command('sudo mv etc-hosts /etc/hosts')
|
||||
instance.remote.execute_command('chmod 400 .ssh/id_rsa')
|
||||
|
||||
|
||||
def _generate_etc_hosts(cluster):
|
||||
hosts = "127.0.0.1 localhost\n"
|
||||
for node_group in cluster.node_groups:
|
||||
for instance in node_group.instances:
|
||||
hosts += "%s %s\n" % (instance.management_ip, instance.hostname)
|
||||
|
||||
return hosts
|
||||
|
||||
|
||||
def _rollback_cluster_creation(cluster, ex):
|
||||
@@ -57,9 +160,11 @@ def _rollback_cluster_creation(cluster, ex):
|
||||
|
||||
def _shutdown_instances(cluster, quiet=False):
|
||||
"""Shutdown all instances related to the specified cluster."""
|
||||
pass
|
||||
for node_group in cluster.node_groups:
|
||||
for instance in node_group.instances:
|
||||
nova.novaclient().servers.delete(instance.instance_id)
|
||||
|
||||
|
||||
def shutdown_cluster(cluster):
|
||||
"""Shutdown specified cluster and all related resources."""
|
||||
pass
|
||||
_shutdown_instances(cluster)
|
||||
|
||||
0
savanna/tests/unit/service/__init__.py
Normal file
0
savanna/tests/unit/service/__init__.py
Normal file
175
savanna/tests/unit/service/test_instances.py
Normal file
175
savanna/tests/unit/service/test_instances.py
Normal file
@@ -0,0 +1,175 @@
|
||||
# Copyright (c) 2013 Mirantis Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import mock
|
||||
import savanna.context as ctx
|
||||
import savanna.db.models as m
|
||||
from savanna.service.instances import _create_instances
|
||||
from savanna.tests.unit.db.models.base import ModelTestCase
|
||||
import savanna.utils.crypto as c
|
||||
|
||||
|
||||
class NodePlacementTest(ModelTestCase):
|
||||
@mock.patch('savanna.utils.openstack.nova.novaclient')
|
||||
def test_one_node_groups_and_one_affinity_group(self, novaclient):
|
||||
node_groups = [m.NodeGroup("test_group",
|
||||
"test_flavor",
|
||||
"test_image",
|
||||
["data node", "test tracker"],
|
||||
2,
|
||||
anti_affinity_group="1")]
|
||||
node_groups[0]._username = "root"
|
||||
|
||||
cluster = _create_cluster_mock(node_groups)
|
||||
|
||||
nova = _create_nova_mock(novaclient)
|
||||
|
||||
_create_instances(cluster)
|
||||
files = _generate_files(cluster)
|
||||
|
||||
nova.servers.create.assert_has_calls(
|
||||
[mock.call("test_cluster-test_group-001",
|
||||
"test_image",
|
||||
"test_flavor",
|
||||
scheduler_hints=None,
|
||||
files=files),
|
||||
mock.call("test_cluster-test_group-002",
|
||||
"test_image",
|
||||
"test_flavor",
|
||||
scheduler_hints={'different_host': ["1"]},
|
||||
files=files)],
|
||||
any_order=False)
|
||||
|
||||
session = ctx.ctx().session
|
||||
with session.begin():
|
||||
self.assertEqual(session.query(m.Instance).count(), 2)
|
||||
|
||||
@mock.patch('savanna.utils.openstack.nova.novaclient')
|
||||
def test_one_node_groups_and_no_affinity_group(self, novaclient):
|
||||
node_groups = [m.NodeGroup("test_group",
|
||||
"test_flavor",
|
||||
"test_image",
|
||||
["data node", "test tracker"],
|
||||
2)]
|
||||
node_groups[0]._username = "root"
|
||||
|
||||
cluster = _create_cluster_mock(node_groups)
|
||||
|
||||
nova = _create_nova_mock(novaclient)
|
||||
|
||||
_create_instances(cluster)
|
||||
|
||||
files = _generate_files(cluster)
|
||||
nova.servers.create.assert_has_calls(
|
||||
[mock.call("test_cluster-test_group-001",
|
||||
"test_image",
|
||||
"test_flavor",
|
||||
scheduler_hints=None,
|
||||
files=files),
|
||||
mock.call("test_cluster-test_group-002",
|
||||
"test_image",
|
||||
"test_flavor",
|
||||
scheduler_hints=None,
|
||||
files=files)],
|
||||
any_order=False)
|
||||
|
||||
session = ctx.ctx().session
|
||||
with session.begin():
|
||||
self.assertEqual(session.query(m.Instance).count(), 2)
|
||||
|
||||
@mock.patch('savanna.utils.openstack.nova.novaclient')
|
||||
def test_two_node_groups_and_one_affinity_group(self, novaclient):
|
||||
node_groups = [m.NodeGroup("test_group_1",
|
||||
"test_flavor",
|
||||
"test_image",
|
||||
["data node",
|
||||
"test tracker"],
|
||||
2,
|
||||
anti_affinity_group="1"),
|
||||
m.NodeGroup("test_group_2",
|
||||
"test_flavor",
|
||||
"test_image",
|
||||
["data node", "test tracker"],
|
||||
1,
|
||||
anti_affinity_group="1")]
|
||||
node_groups[0]._username = "root"
|
||||
node_groups[1]._username = "root"
|
||||
|
||||
cluster = _create_cluster_mock(node_groups)
|
||||
nova = _create_nova_mock(novaclient)
|
||||
|
||||
_create_instances(cluster)
|
||||
|
||||
files = _generate_files(cluster)
|
||||
nova.servers.create.assert_has_calls(
|
||||
[mock.call("test_cluster-test_group_1-001",
|
||||
"test_image",
|
||||
"test_flavor",
|
||||
scheduler_hints=None,
|
||||
files=files),
|
||||
mock.call("test_cluster-test_group_1-002",
|
||||
"test_image",
|
||||
"test_flavor",
|
||||
scheduler_hints={'different_host': ["1"]},
|
||||
files=files),
|
||||
mock.call("test_cluster-test_group_2-001",
|
||||
"test_image",
|
||||
"test_flavor",
|
||||
scheduler_hints={'different_host': ["1", "2"]},
|
||||
files=files)],
|
||||
any_order=False)
|
||||
|
||||
session = ctx.ctx().session
|
||||
with session.begin():
|
||||
self.assertEqual(session.query(m.Instance).count(), 3)
|
||||
|
||||
|
||||
def _create_cluster_mock(node_groups):
|
||||
cluster = m.Cluster("test_cluster",
|
||||
"tenant_id",
|
||||
"mock_plugin",
|
||||
"mock_version",
|
||||
"initial")
|
||||
|
||||
cluster._user_kp = mock.Mock()
|
||||
cluster._user_kp.public_key = "123"
|
||||
cluster.private_key = c.generate_private_key()
|
||||
|
||||
cluster.node_groups = node_groups
|
||||
return cluster
|
||||
|
||||
|
||||
def _mock_instance(id):
|
||||
instance1 = mock.Mock()
|
||||
instance1.id = id
|
||||
return instance1
|
||||
|
||||
|
||||
def _mock_instances(count):
|
||||
return [_mock_instance(str(i)) for i in range(1, count + 1)]
|
||||
|
||||
|
||||
def _generate_files(cluster):
|
||||
key = c.private_key_to_public_key(cluster.private_key)
|
||||
files = {"/root/.ssh/authorized_keys": "123\n" + key,
|
||||
'/root/.ssh/id_rsa': cluster.private_key}
|
||||
return files
|
||||
|
||||
|
||||
def _create_nova_mock(novalcient):
|
||||
nova = mock.Mock()
|
||||
novalcient.return_value = nova
|
||||
nova.servers.create.side_effect = _mock_instances(3)
|
||||
return nova
|
||||
Reference in New Issue
Block a user