Fix hardcoded username(ec2-user) for heat-engine
heat-engine pass a username associated with the image to heat template's "admin_user" field Change-Id: I8d3b218de668d0003bf3277826d85857fc297bc3 Closes-Bug: 1336525
This commit is contained in:
parent
b8edeaf2b6
commit
c63baf013c
@ -4,6 +4,7 @@
|
||||
"name" : "%(instance_name)s",
|
||||
"flavor" : "%(flavor_id)s",
|
||||
"image" : "%(image_id)s",
|
||||
"admin_user": "%(image_username)s",
|
||||
%(network_interfaces)s
|
||||
%(key_name)s
|
||||
%(scheduler_hints)s
|
||||
|
@ -35,10 +35,6 @@ LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class DirectEngine(e.Engine):
|
||||
def get_node_group_image_username(self, node_group):
|
||||
image_id = node_group.get_image_id()
|
||||
return nova.client().images.get(image_id).username
|
||||
|
||||
def create_cluster(self, cluster):
|
||||
ctx = context.ctx()
|
||||
try:
|
||||
|
@ -25,6 +25,7 @@ from sahara import context
|
||||
from sahara.openstack.common import log as logging
|
||||
from sahara.service import networks
|
||||
from sahara.utils import general as g
|
||||
from sahara.utils.openstack import nova
|
||||
from sahara.utils import remote
|
||||
|
||||
|
||||
@ -46,9 +47,9 @@ class Engine:
|
||||
def shutdown_cluster(self, cluster):
|
||||
pass
|
||||
|
||||
@abc.abstractmethod
|
||||
def get_node_group_image_username(self, node_group):
|
||||
pass
|
||||
image_id = node_group.get_image_id()
|
||||
return nova.client().images.get(image_id).username
|
||||
|
||||
def _await_networks(self, cluster, instances):
|
||||
if not instances:
|
||||
|
@ -30,13 +30,8 @@ conductor = c.API
|
||||
CONF = cfg.CONF
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CLOUD_INIT_USERNAME = 'ec2-user'
|
||||
|
||||
|
||||
class HeatEngine(e.Engine):
|
||||
def get_node_group_image_username(self, node_group):
|
||||
return CLOUD_INIT_USERNAME
|
||||
|
||||
def create_cluster(self, cluster):
|
||||
ctx = context.ctx()
|
||||
|
||||
|
@ -168,8 +168,6 @@ VANILLA_CONFIG_OPTS = [
|
||||
'image. If you do not specify image related parameters, '
|
||||
'then image for cluster creation will be chosen by '
|
||||
'tag "sahara_i_tests".'),
|
||||
cfg.StrOpt('SSH_USERNAME',
|
||||
help='Username to get cluster node with SSH.'),
|
||||
cfg.StrOpt('HADOOP_VERSION',
|
||||
default='1.2.1',
|
||||
help='Version of Hadoop.'),
|
||||
@ -242,8 +240,6 @@ VANILLA_TWO_CONFIG_OPTS = [
|
||||
'image. If you do not specify image related parameters, '
|
||||
'then image for cluster creation will be chosen by '
|
||||
'tag "savanna_i_tests".'),
|
||||
cfg.StrOpt('SSH_USERNAME',
|
||||
help='Username to get cluster node with SSH.'),
|
||||
cfg.StrOpt('HADOOP_VERSION',
|
||||
default='2.3.0',
|
||||
help='Version of Hadoop.'),
|
||||
@ -313,8 +309,6 @@ HDP_CONFIG_OPTS = [
|
||||
'image. If you do not specify image related parameters, '
|
||||
'then image for cluster creation will be chosen by '
|
||||
'tag "sahara_i_tests".'),
|
||||
cfg.StrOpt('SSH_USERNAME',
|
||||
help='Username to get cluster node with SSH.'),
|
||||
cfg.ListOpt('MASTER_NODE_PROCESSES',
|
||||
default=['JOBTRACKER', 'NAMENODE', 'SECONDARY_NAMENODE',
|
||||
'GANGLIA_SERVER', 'NAGIOS_SERVER',
|
||||
@ -406,9 +400,6 @@ HDP2_CONFIG_OPTS = [
|
||||
'image. If you do not specify image related parameters, '
|
||||
'then image for cluster creation will be chosen by '
|
||||
'tag "sahara_i_tests".'),
|
||||
cfg.StrOpt('SSH_USERNAME',
|
||||
default=None,
|
||||
help='Username to get cluster node with SSH.'),
|
||||
cfg.ListOpt('MASTER_NODE_PROCESSES',
|
||||
default=['NAMENODE', 'SECONDARY_NAMENODE', 'ZOOKEEPER_SERVER',
|
||||
'AMBARI_SERVER', 'HISTORYSERVER', 'RESOURCEMANAGER',
|
||||
|
@ -131,10 +131,6 @@
|
||||
#IMAGE_TAG = <None>
|
||||
|
||||
|
||||
# Username to get cluster node with SSH (string value)
|
||||
#SSH_USERNAME = <None>
|
||||
|
||||
|
||||
# Version of Hadoop (string value)
|
||||
#HADOOP_VERSION = '1.2.1'
|
||||
|
||||
|
@ -414,11 +414,7 @@ class ITestCase(testcase.WithAttributes, testtools.TestCase):
|
||||
|
||||
def try_get_image_id_and_ssh_username(parameter, value):
|
||||
try:
|
||||
if not plugin_config.SSH_USERNAME:
|
||||
return image.id, image.metadata[imgs.PROP_USERNAME]
|
||||
|
||||
else:
|
||||
return image.id, plugin_config.SSH_USERNAME
|
||||
return image.id, image.metadata[imgs.PROP_USERNAME]
|
||||
|
||||
except KeyError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
@ -479,11 +475,7 @@ class ITestCase(testcase.WithAttributes, testtools.TestCase):
|
||||
image.metadata.get(imgs.PROP_TAG + (
|
||||
'%s' % plugin_config.PLUGIN_NAME))):
|
||||
try:
|
||||
if not plugin_config.SSH_USERNAME:
|
||||
return image.id, image.metadata[imgs.PROP_USERNAME]
|
||||
|
||||
else:
|
||||
return image.id, plugin_config.SSH_USERNAME
|
||||
return image.id, image.metadata[imgs.PROP_USERNAME]
|
||||
|
||||
except KeyError:
|
||||
with excutils.save_and_reraise_exception():
|
||||
|
@ -22,6 +22,7 @@
|
||||
"name" : "cluster-worker-001",
|
||||
"flavor" : "42",
|
||||
"image" : "1",
|
||||
"admin_user": "root",
|
||||
"networks" : [{ "port" : { "Ref" : "cluster-worker-001-port" }}],
|
||||
"key_name" : "user_key",
|
||||
"user_data": {
|
||||
@ -48,6 +49,7 @@
|
||||
"name" : "cluster-worker-002",
|
||||
"flavor" : "42",
|
||||
"image" : "1",
|
||||
"admin_user": "root",
|
||||
"networks" : [{ "port" : { "Ref" : "cluster-worker-002-port" }}],
|
||||
"key_name" : "user_key",
|
||||
"scheduler_hints" : {"different_host": [{"Ref": "cluster-worker-001"}]},
|
||||
@ -75,6 +77,7 @@
|
||||
"name" : "cluster-master-001",
|
||||
"flavor" : "42",
|
||||
"image" : "1",
|
||||
"admin_user": "root",
|
||||
"networks" : [{ "port" : { "Ref" : "cluster-master-001-port" }}],
|
||||
"key_name" : "user_key",
|
||||
"user_data": {
|
||||
|
@ -22,6 +22,7 @@
|
||||
"name" : "cluster-worker-001",
|
||||
"flavor" : "42",
|
||||
"image" : "1",
|
||||
"admin_user": "root",
|
||||
"networks" : [{ "port" : { "Ref" : "cluster-worker-001-port" }}],
|
||||
"key_name" : "user_key",
|
||||
"user_data": {
|
||||
@ -78,6 +79,7 @@
|
||||
"name" : "cluster-master-001",
|
||||
"flavor" : "42",
|
||||
"image" : "1",
|
||||
"admin_user": "root",
|
||||
"networks" : [{ "port" : { "Ref" : "cluster-master-001-port" }}],
|
||||
"key_name" : "user_key",
|
||||
"user_data": {
|
||||
|
@ -9,6 +9,7 @@
|
||||
"name" : "cluster-worker-001",
|
||||
"flavor" : "42",
|
||||
"image" : "1",
|
||||
"admin_user": "root",
|
||||
"key_name" : "user_key",
|
||||
"user_data": {
|
||||
"Fn::Join" : ["\n", ["line2", "line3"]]
|
||||
@ -51,6 +52,7 @@
|
||||
"name" : "cluster-master-001",
|
||||
"flavor" : "42",
|
||||
"image" : "1",
|
||||
"admin_user": "root",
|
||||
"key_name" : "user_key",
|
||||
"user_data": {
|
||||
"Fn::Join" : ["\n", ["line1", "line2"]]
|
||||
|
@ -22,6 +22,7 @@
|
||||
"name" : "cluster-worker-001",
|
||||
"flavor" : "42",
|
||||
"image" : "1",
|
||||
"admin_user": "root",
|
||||
"key_name" : "user_key",
|
||||
"user_data": {
|
||||
"Fn::Join" : ["\n", ["line2", "line3"]]
|
||||
@ -77,6 +78,7 @@
|
||||
"name" : "cluster-master-001",
|
||||
"flavor" : "42",
|
||||
"image" : "1",
|
||||
"admin_user": "root",
|
||||
"key_name" : "user_key",
|
||||
"user_data": {
|
||||
"Fn::Join" : ["\n", ["line1", "line2"]]
|
||||
|
@ -78,10 +78,12 @@ class TestClusterTemplate(base.SaharaWithDbTestCase):
|
||||
def _make_node_groups(self, floating_ip_pool=None):
|
||||
ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1,
|
||||
floating_ip_pool=floating_ip_pool, image_id=None,
|
||||
volumes_per_node=0, volumes_size=0, id=1)
|
||||
volumes_per_node=0, volumes_size=0, id=1,
|
||||
image_username='root')
|
||||
ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 1,
|
||||
floating_ip_pool=floating_ip_pool, image_id=None,
|
||||
volumes_per_node=2, volumes_size=10, id=2)
|
||||
volumes_per_node=2, volumes_size=10, id=2,
|
||||
image_username='root')
|
||||
return ng1, ng2
|
||||
|
||||
def _make_cluster(self, mng_network, ng1, ng2):
|
||||
@ -178,10 +180,12 @@ class TestClusterTemplate(base.SaharaWithDbTestCase):
|
||||
|
||||
ng1 = tu.make_ng_dict('master', 42, ['namenode'], 1,
|
||||
floating_ip_pool='floating', image_id=None,
|
||||
volumes_per_node=0, volumes_size=0, id=1)
|
||||
volumes_per_node=0, volumes_size=0, id=1,
|
||||
image_username='root')
|
||||
ng2 = tu.make_ng_dict('worker', 42, ['datanode'], 2,
|
||||
floating_ip_pool='floating', image_id=None,
|
||||
volumes_per_node=0, volumes_size=0, id=2)
|
||||
volumes_per_node=0, volumes_size=0, id=2,
|
||||
image_username='root')
|
||||
cluster = tu.create_cluster("cluster", "tenant1", "general",
|
||||
"1.2.1", [ng1, ng2],
|
||||
user_keypair_id='user_key',
|
||||
|
@ -168,6 +168,7 @@ class ClusterTemplate(object):
|
||||
fields = {'instance_name': inst_name,
|
||||
'flavor_id': ng.flavor_id,
|
||||
'image_id': ng.get_image_id(),
|
||||
'image_username': ng.image_username,
|
||||
'network_interfaces': nets,
|
||||
'key_name': key_name,
|
||||
'userdata': _prepare_userdata(userdata),
|
||||
|
Loading…
Reference in New Issue
Block a user