Merge "Designate integration"

This commit is contained in:
Jenkins 2016-08-10 15:23:12 +00:00 committed by Gerrit Code Review
commit b4dee1c7e4
28 changed files with 448 additions and 61 deletions

View File

@ -22,6 +22,12 @@
# with use_neutron=True)
#use_namespaces=false
# Use Designate for internal and external hostnames resolution (boolean value)
#use_designate=false
# IP addresses of Designate nameservers. This is required if 'use_designate' is
# True
#nameservers =
# Maximum length of job binary data in kilobytes that may be
# stored or retrieved in a single operation (integer value)

View File

@ -0,0 +1,4 @@
---
features:
- Added integration of Designate for hostname resolution through dns
servers

View File

@ -64,6 +64,7 @@ class Cluster(object):
use_autoconfig
is_public
is_protected
domain_name
"""
def has_proxy_gateway(self):
@ -87,6 +88,9 @@ class Cluster(object):
extra = self.extra or {}
return extra.get('heat_stack_name', self.name)
def use_designate_feature(self):
return CONF.use_designate and self.domain_name
class NodeGroup(object):
"""An object representing Node Group.
@ -152,13 +156,23 @@ class Instance(object):
management_ip
volumes
storage_devices_number
dns_hostname
"""
def hostname(self):
return self.instance_name
def fqdn(self):
return self.instance_name + '.' + CONF.node_domain
if self._use_designate_feature():
return self.dns_hostname
else:
return self.instance_name + '.' + CONF.node_domain
def get_ip_or_dns_name(self):
if self._use_designate_feature():
return self.dns_hostname
else:
return self.management_ip
def remote(self):
return remote.get_remote(self)
@ -173,6 +187,9 @@ class Instance(object):
return mp
def _use_designate_feature(self):
return CONF.use_designate and self.dns_hostname
class ClusterTemplate(object):
"""An object representing Cluster Template.
@ -190,6 +207,7 @@ class ClusterTemplate(object):
node_groups - list of NodeGroup objects
is_public
is_protected
domain_name
"""

View File

@ -94,12 +94,24 @@ networking_opts = [
"use_rootwrap=True")
]
dns_opts = [
cfg.BoolOpt('use_designate',
default=False,
help='Use Designate for internal and external hostnames '
'resolution'),
cfg.ListOpt('nameservers',
default=[],
help="IP addresses of Designate nameservers. "
"This is required if 'use_designate' is True")
]
CONF = cfg.CONF
CONF.register_cli_opts(cli_opts)
CONF.register_opts(networking_opts)
CONF.register_opts(edp_opts)
CONF.register_opts(db_opts)
CONF.register_opts(dns_opts)
log.register_options(CONF)
@ -149,6 +161,7 @@ def list_opts():
itertools.chain(cli_opts,
edp_opts,
networking_opts,
dns_opts,
db_opts,
plugins_base.opts,
topology_helper.opts,

View File

@ -0,0 +1,38 @@
# Copyright 2016 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""032_add_domain_name
Revision ID: 032
Revises: 031
Create Date: 2016-07-21 13:33:33.674853
"""
# revision identifiers, used by Alembic.
revision = '032'
down_revision = '031'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('cluster_templates', sa.Column(
'domain_name', sa.String(length=255), nullable=True))
op.add_column('clusters', sa.Column(
'domain_name', sa.String(length=255), nullable=True))
op.add_column('instances', sa.Column(
'dns_hostname', sa.String(length=255), nullable=True))

View File

@ -83,6 +83,7 @@ class Cluster(mb.SaharaBase):
shares = sa.Column(st.JsonListType())
is_public = sa.Column(sa.Boolean())
is_protected = sa.Column(sa.Boolean())
domain_name = sa.Column(sa.String(255))
def to_dict(self, show_progress=False):
d = super(Cluster, self).to_dict()
@ -162,6 +163,7 @@ class Instance(mb.SaharaBase):
management_ip = sa.Column(sa.String(45))
volumes = sa.Column(st.JsonListType())
storage_devices_number = sa.Column(sa.Integer)
dns_hostname = sa.Column(sa.String(255))
# Template objects: ClusterTemplate, NodeGroupTemplate, TemplatesRelation
@ -192,6 +194,7 @@ class ClusterTemplate(mb.SaharaBase):
shares = sa.Column(st.JsonListType())
is_public = sa.Column(sa.Boolean())
is_protected = sa.Column(sa.Boolean())
domain_name = sa.Column(sa.String(255))
def to_dict(self):
d = super(ClusterTemplate, self).to_dict()

View File

@ -98,7 +98,7 @@ class AmbariPluginProvider(p.ProvisioningPluginBase):
def _set_cluster_info(self, cluster):
ambari_ip = plugin_utils.get_instance(
cluster, p_common.AMBARI_SERVER).management_ip
cluster, p_common.AMBARI_SERVER).get_ip_or_dns_name()
ambari_port = "8080"
info = {
p_common.AMBARI_SERVER: {
@ -113,53 +113,54 @@ class AmbariPluginProvider(p.ProvisioningPluginBase):
for idx, namenode in enumerate(nns):
info[p_common.NAMENODE][
"Web UI %s" % (idx + 1)] = (
"http://%s:50070" % namenode.management_ip)
"http://%s:50070" % namenode.get_ip_or_dns_name())
rms = plugin_utils.get_instances(cluster, p_common.RESOURCEMANAGER)
info[p_common.RESOURCEMANAGER] = {}
for idx, resourcemanager in enumerate(rms):
info[p_common.RESOURCEMANAGER][
"Web UI %s" % (idx + 1)] = (
"http://%s:8088" % resourcemanager.management_ip)
"http://%s:8088" % resourcemanager.get_ip_or_dns_name())
historyserver = plugin_utils.get_instance(cluster,
p_common.HISTORYSERVER)
if historyserver:
info[p_common.HISTORYSERVER] = {
"Web UI": "http://%s:19888" % historyserver.management_ip
"Web UI": "http://%s:19888" %
historyserver.get_ip_or_dns_name()
}
atlserver = plugin_utils.get_instance(cluster,
p_common.APP_TIMELINE_SERVER)
if atlserver:
info[p_common.APP_TIMELINE_SERVER] = {
"Web UI": "http://%s:8188" % atlserver.management_ip
"Web UI": "http://%s:8188" % atlserver.get_ip_or_dns_name()
}
oozie = plugin_utils.get_instance(cluster, p_common.OOZIE_SERVER)
if oozie:
info[p_common.OOZIE_SERVER] = {
"Web UI": "http://%s:11000/oozie" % oozie.management_ip
"Web UI": "http://%s:11000/oozie" % oozie.get_ip_or_dns_name()
}
hbase_master = plugin_utils.get_instance(cluster,
p_common.HBASE_MASTER)
if hbase_master:
info[p_common.HBASE_MASTER] = {
"Web UI": "http://%s:60010" % hbase_master.management_ip
"Web UI": "http://%s:60010" % hbase_master.get_ip_or_dns_name()
}
falcon = plugin_utils.get_instance(cluster, p_common.FALCON_SERVER)
if falcon:
info[p_common.FALCON_SERVER] = {
"Web UI": "http://%s:15000" % falcon.management_ip
"Web UI": "http://%s:15000" % falcon.get_ip_or_dns_name()
}
storm_ui = plugin_utils.get_instance(cluster, p_common.STORM_UI_SERVER)
if storm_ui:
info[p_common.STORM_UI_SERVER] = {
"Web UI": "http://%s:8744" % storm_ui.management_ip
"Web UI": "http://%s:8744" % storm_ui.get_ip_or_dns_name()
}
ranger_admin = plugin_utils.get_instance(cluster,
p_common.RANGER_ADMIN)
if ranger_admin:
info[p_common.RANGER_ADMIN] = {
"Web UI": "http://%s:6080" % ranger_admin.management_ip,
"Web UI": "http://%s:6080" % ranger_admin.get_ip_or_dns_name(),
"Username": "admin",
"Password": "admin"
}
@ -167,7 +168,7 @@ class AmbariPluginProvider(p.ProvisioningPluginBase):
p_common.SPARK_JOBHISTORYSERVER)
if spark_hs:
info[p_common.SPARK_JOBHISTORYSERVER] = {
"Web UI": "http://%s:18080" % spark_hs.management_ip
"Web UI": "http://%s:18080" % spark_hs.get_ip_or_dns_name()
}
info.update(cluster.info.to_dict())
ctx = context.ctx()

View File

@ -125,7 +125,7 @@ class BaseVersionHandler(AbstractVersionHandler):
hue = self.cloudera_utils.pu.get_hue(cluster)
if hue:
info['Hue Dashboard'] = {
'Web UI': 'http://%s:8888' % hue.management_ip
'Web UI': 'http://%s:8888' % hue.get_ip_or_dns_name()
}
ctx = context.ctx()

View File

@ -326,7 +326,7 @@ class ClouderaUtils(object):
mng = self.pu.get_manager(cluster)
info = {
'Cloudera Manager': {
'Web UI': 'http://%s:7180' % mng.management_ip,
'Web UI': 'http://%s:7180' % mng.get_ip_or_dns_name(),
'Username': 'admin',
'Password': db_helper.get_cm_password(cluster)
}

View File

@ -251,7 +251,7 @@ class BaseConfigurer(ac.AbstractConfigurer):
display_name = display_name_template % args
data = ui_info.copy()
data[srvc.SERVICE_UI] = (data[srvc.SERVICE_UI] %
instance.management_ip)
instance.get_ip_or_dns_name())
info.update({display_name: data})
ctx = context.ctx()

View File

@ -398,7 +398,7 @@ class SparkProvider(p.ProvisioningPluginBase):
'HDFS', 'dfs.http.address', cluster)
port = address[address.rfind(':') + 1:]
info['HDFS'] = {
'Web UI': 'http://%s:%s' % (nn.management_ip, port)
'Web UI': 'http://%s:%s' % (nn.get_ip_or_dns_name(), port)
}
info['HDFS']['NameNode'] = 'hdfs://%s:8020' % nn.hostname()
@ -407,7 +407,8 @@ class SparkProvider(p.ProvisioningPluginBase):
'Spark', 'Master webui port', cluster)
if port is not None:
info['Spark'] = {
'Web UI': 'http://%s:%s' % (sp_master.management_ip, port)
'Web UI': 'http://%s:%s' % (
sp_master.get_ip_or_dns_name(), port)
}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {'info': info})

View File

@ -310,7 +310,8 @@ class StormProvider(p.ProvisioningPluginBase):
port = "8080"
info['Strom'] = {
'Web UI': 'http://%s:%s' % (st_master.management_ip, port)
'Web UI': 'http://%s:%s' % (
st_master.get_ip_or_dns_name(), port)
}
ctx = context.ctx()
conductor.cluster_update(ctx, cluster, {'info': info})

View File

@ -108,24 +108,25 @@ class VersionHandler(avm.AbstractVersionHandler):
if rm:
info['YARN'] = {
'Web UI': 'http://%s:%s' % (rm.management_ip, '8088'),
'ResourceManager': 'http://%s:%s' % (rm.management_ip, '8032')
'Web UI': 'http://%s:%s' % (rm.get_ip_or_dns_name(), '8088'),
'ResourceManager': 'http://%s:%s' % (
rm.get_ip_or_dns_name(), '8032')
}
if nn:
info['HDFS'] = {
'Web UI': 'http://%s:%s' % (nn.management_ip, '50070'),
'Web UI': 'http://%s:%s' % (nn.get_ip_or_dns_name(), '50070'),
'NameNode': 'hdfs://%s:%s' % (nn.hostname(), '9000')
}
if oo:
info['JobFlow'] = {
'Oozie': 'http://%s:%s' % (oo.management_ip, '11000')
'Oozie': 'http://%s:%s' % (oo.get_ip_or_dns_name(), '11000')
}
if hs:
info['MapReduce JobHistory Server'] = {
'Web UI': 'http://%s:%s' % (hs.management_ip, '19888')
'Web UI': 'http://%s:%s' % (hs.get_ip_or_dns_name(), '19888')
}
ctx = context.ctx()

View File

@ -142,10 +142,10 @@ class Engine(object):
"""Configure active instances.
* generate /etc/hosts
* change /etc/resolv.conf
* setup passwordless login
* etc.
"""
hosts_file = cluster_utils.generate_etc_hosts(cluster)
cpo.add_provisioning_step(
cluster.id, _("Configure instances"),
cluster_utils.count_instances(cluster))
@ -154,14 +154,20 @@ class Engine(object):
for node_group in cluster.node_groups:
for instance in node_group.instances:
with context.set_current_instance_id(instance.instance_id):
tg.spawn(
"configure-instance-%s" % instance.instance_name,
self._configure_instance, instance, hosts_file)
tg.spawn("configure-instance-{}".format(
instance.instance_name),
self._configure_instance, instance, cluster
)
@cpo.event_wrapper(mark_successful_on_exit=True)
def _configure_instance(self, instance, hosts_file):
LOG.debug('Configuring instance')
def _configure_instance(self, instance, cluster):
self._configure_instance_etc_hosts(instance, cluster)
if cluster.use_designate_feature():
self._configure_instance_resolve_conf(instance)
def _configure_instance_etc_hosts(self, instance, cluster):
LOG.debug('Configuring "/etc/hosts" of instance.')
hosts_file = cluster_utils.generate_etc_hosts(cluster)
with instance.remote() as r:
r.write_file_to('etc-hosts', hosts_file)
r.execute_command('sudo hostname %s' % instance.fqdn())
@ -169,6 +175,23 @@ class Engine(object):
r.execute_command('sudo usermod -s /bin/bash $USER')
def _configure_instance_resolve_conf(self, instance):
LOG.debug('Setting up those name servers from sahara.conf '
'which are lacked in the /etc/resolv.conf.')
with instance.remote() as r:
code, curr_resolv_conf = r.execute_command('cat /etc/resolv.conf')
diff = cluster_utils.generate_resolv_conf_diff(curr_resolv_conf)
if diff.strip():
position = curr_resolv_conf.find('nameserver')
if position == -1:
position = 0
new_resolv_conf = "{}\n{}{}".format(
curr_resolv_conf[:position],
diff,
curr_resolv_conf[position:])
r.write_file_to('resolv-conf', new_resolv_conf)
r.execute_command('sudo mv resolv-conf /etc/resolv.conf')
def _generate_user_data_script(self, node_group, instance_name):
script = """#!/bin/bash
echo "${public_key}" >> ${user_home}/.ssh/authorized_keys\n

View File

@ -14,4 +14,4 @@
# limitations under the License.
HEAT_ENGINE_VERSION = 'heat.3.0'
HEAT_TEMPLATE_VERSION = '2013-05-23'
HEAT_TEMPLATE_VERSION = '2016-04-08'

View File

@ -153,11 +153,17 @@ class HeatEngine(e.Engine):
instances = stack.get_node_group_instances(node_group)
for instance in instances:
nova_id = instance['physical_id']
name = instance['name']
if nova_id not in old_ids:
instance_id = conductor.instance_add(
ctx, node_group, {"instance_id": nova_id,
"instance_name": name})
name = instance['name']
inst = {
"instance_id": nova_id,
"instance_name": name
}
if cluster.use_designate_feature():
inst.update(
{"dns_hostname":
name + '.' + cluster.domain_name[:-1]})
instance_id = conductor.instance_add(ctx, node_group, inst)
new_ids.append(instance_id)
return new_ids
@ -234,6 +240,7 @@ class HeatEngine(e.Engine):
cluster = c_u.change_cluster_status(cluster, stages[2])
instances = c_u.get_instances(cluster, inst_ids)
volumes.mount_to_instances(instances)
self._configure_instances(cluster)

View File

@ -35,6 +35,10 @@ SSH_PORT = 22
INSTANCE_RESOURCE_NAME = "inst"
SERVER_GROUP_PARAM_NAME = "servgroup"
AUTO_SECURITY_GROUP_PARAM_NAME = "autosecgroup"
INTERNAL_DESIGNATE_REC = "internal_designate_record"
INTERNAL_DESIGNATE_REV_REC = "internal_designate_reverse_record"
EXTERNAL_DESIGNATE_REC = "external_designate_record"
EXTERNAL_DESIGNATE_REV_REC = "external_designate_reverse_record"
# TODO(vgridnev): Using insecure flag until correct way to pass certificate
# will be invented
@ -67,6 +71,14 @@ def _get_inst_name(ng):
}
def _get_inst_domain_name(domain):
return {
"list_join": [
'.',
[{"get_attr": [INSTANCE_RESOURCE_NAME, "name"]}, domain]]
}
def _get_aa_group_name(cluster):
return g.generate_aa_group_name(cluster.name)
@ -175,7 +187,8 @@ class ClusterStack(object):
'disable_rollback': disable_rollback,
'parameters': {},
'template': main_tmpl,
'files': self.files}
'files': self.files
}
if CONF.heat_stack_tags:
kwargs['tags'] = ",".join(CONF.heat_stack_tags)
@ -346,6 +359,75 @@ class ClusterStack(object):
return int(configs.get(cfg_target,
{}).get(cfg_name, timeout_cfg.default_value))
def _serialize_designate_records(self):
if not self.cluster.use_designate_feature():
return {}
hostname = _get_inst_domain_name(self.cluster.domain_name)
return {
INTERNAL_DESIGNATE_REC: {
'type': 'OS::Designate::Record',
'properties': {
'name': hostname,
'type': 'A',
'data': {'get_attr': [
INSTANCE_RESOURCE_NAME, 'networks', 'private', 0]},
'domain': self.cluster.domain_name
}
},
EXTERNAL_DESIGNATE_REC: {
'type': 'OS::Designate::Record',
'properties': {
'name': hostname,
'type': 'A',
'data': {'get_attr': ['floating_ip', 'ip']},
'domain': self.cluster.domain_name
}
}
}
def _serialize_designate_reverse_records(self):
if not self.cluster.use_designate_feature():
return {}
def _generate_reversed_ip(ip):
return {
'list_join': [
'.',
[
{'str_split': ['.', ip, 3]},
{'str_split': ['.', ip, 2]},
{'str_split': ['.', ip, 1]},
{'str_split': ['.', ip, 0]},
'in-addr.arpa.'
]
]
}
hostname = _get_inst_domain_name(self.cluster.domain_name)
return {
INTERNAL_DESIGNATE_REV_REC: {
'type': 'OS::Designate::Record',
'properties': {
'name': _generate_reversed_ip({'get_attr': [
INSTANCE_RESOURCE_NAME, 'networks', 'private', 0]}),
'type': 'PTR',
'data': hostname,
'domain': 'in-addr.arpa.'
}
},
EXTERNAL_DESIGNATE_REV_REC: {
'type': 'OS::Designate::Record',
'properties': {
'name': _generate_reversed_ip(
{'get_attr': ['floating_ip', 'ip']}),
'type': 'PTR',
'data': hostname,
'domain': 'in-addr.arpa.'
}
}
}
def _serialize_instance(self, ng):
resources = {}
properties = {}
@ -406,6 +488,8 @@ class ClusterStack(object):
}
})
resources.update(self._serialize_designate_records())
resources.update(self._serialize_designate_reverse_records())
resources.update(self._serialize_volume(ng))
resources.update(self._serialize_wait_condition(ng))
return resources

View File

@ -99,6 +99,9 @@ CLUSTER_TEMPLATE_SCHEMA = {
},
"is_protected": {
"type": ["boolean", "null"],
},
"domain_name": {
"type": ["string", "null"],
}
},
"additionalProperties": False,

View File

@ -81,7 +81,9 @@ class VersionHandlerTestCase(base.SaharaTestCase):
@mock.patch(plugin_utils_path + "get_hue")
def test_set_cluster_info(self, get_hue, get_cloudera_manager_info,
ctx, cluster_update):
get_hue.return_value.management_ip = "1.2.3.4"
hue = mock.Mock()
hue.get_ip_or_dns_name.return_value = "1.2.3.4"
get_hue.return_value = hue
cluster = mock.Mock()
self.vh._set_cluster_info(cluster)
info = {'info': {'Hue Dashboard': {'Web UI': 'http://1.2.3.4:8888'}}}

View File

@ -82,7 +82,9 @@ class VersionHandlerTestCase(base.SaharaTestCase):
@mock.patch(plugin_utils_path + "get_hue")
def test_set_cluster_info(self, get_hue, get_cloudera_manager_info,
ctx, cluster_update):
get_hue.return_value.management_ip = "1.2.3.4"
hue = mock.Mock()
hue.get_ip_or_dns_name.return_value = "1.2.3.4"
get_hue.return_value = hue
cluster = mock.Mock()
self.vh._set_cluster_info(cluster)
info = {'info': {'Hue Dashboard': {'Web UI': 'http://1.2.3.4:8888'}}}

View File

@ -82,7 +82,9 @@ class VersionHandlerTestCase(base.SaharaTestCase):
@mock.patch(plugin_utils_path + "get_hue")
def test_set_cluster_info(self, get_hue, get_cloudera_manager_info,
ctx, cluster_update):
get_hue.return_value.management_ip = "1.2.3.4"
hue = mock.Mock()
hue.get_ip_or_dns_name.return_value = "1.2.3.4"
get_hue.return_value = hue
cluster = mock.Mock()
self.vh._set_cluster_info(cluster)
info = {'info': {'Hue Dashboard': {'Web UI': 'http://1.2.3.4:8888'}}}

View File

@ -82,7 +82,9 @@ class VersionHandlerTestCase(base.SaharaTestCase):
@mock.patch(plugin_utils_path + "get_hue")
def test_set_cluster_info(self, get_hue, get_cloudera_manager_info,
ctx, cluster_update):
get_hue.return_value.management_ip = "1.2.3.4"
hue = mock.Mock()
hue.get_ip_or_dns_name.return_value = "1.2.3.4"
get_hue.return_value = hue
cluster = mock.Mock()
self.vh._set_cluster_info(cluster)
info = {'info': {'Hue Dashboard': {'Web UI': 'http://1.2.3.4:8888'}}}

View File

@ -82,7 +82,9 @@ class VersionHandlerTestCase(base.SaharaTestCase):
@mock.patch(plugin_utils_path + "get_hue")
def test_set_cluster_info(self, get_hue, get_cloudera_manager_info,
ctx, cluster_update):
get_hue.return_value.management_ip = "1.2.3.4"
hue = mock.Mock()
hue.get_ip_or_dns_name.return_value = "1.2.3.4"
get_hue.return_value = hue
cluster = mock.Mock()
self.vh._set_cluster_info(cluster)
info = {'info': {'Hue Dashboard': {'Web UI': 'http://1.2.3.4:8888'}}}

View File

@ -45,13 +45,15 @@ class BaseTestClusterTemplate(base.SaharaWithDbTestCase):
auto_security_group=True)
return ng1, ng2
def _make_cluster(self, mng_network, ng1, ng2, anti_affinity=None):
def _make_cluster(self, mng_network, ng1, ng2, anti_affinity=None,
domain_name=None):
return tu.create_cluster("cluster", "tenant1", "general",
"2.6.0", [ng1, ng2],
user_keypair_id='user_key',
neutron_management_network=mng_network,
default_image_id='1', image_id=None,
anti_affinity=anti_affinity or [])
anti_affinity=anti_affinity or [],
domain_name=domain_name)
class TestClusterTemplate(BaseTestClusterTemplate):
@ -176,6 +178,113 @@ class TestClusterTemplate(BaseTestClusterTemplate):
actual = self._generate_auto_security_group_template(False)
self.assertEqual(expected, actual)
@mock.patch("sahara.conductor.objects.Cluster.use_designate_feature")
def test_serialize_designate_records(self, mock_use_designate):
ng1, ng2 = self._make_node_groups('floating')
cluster = self._make_cluster('private_net', ng1, ng2,
domain_name='domain.org.')
mock_use_designate.return_value = False
heat_template = self._make_heat_template(cluster, ng1, ng2)
expected = {}
actual = heat_template._serialize_designate_records()
self.assertEqual(expected, actual)
mock_use_designate.return_value = True
heat_template = self._make_heat_template(cluster, ng1, ng2)
expected = {
'internal_designate_record': {
'properties': {
'domain': 'domain.org.',
'name': {
'list_join': [
'.',
[{'get_attr': ['inst', 'name']}, 'domain.org.']]
},
'data': {'get_attr': ['inst', 'networks', 'private', 0]},
'type': 'A'
},
'type': 'OS::Designate::Record'
},
'external_designate_record': {
'properties': {
'domain': 'domain.org.',
'name': {
'list_join': [
'.',
[{'get_attr': ['inst', 'name']}, 'domain.org.']]
},
'data': {'get_attr': ['floating_ip', 'ip']},
'type': 'A'
},
'type': 'OS::Designate::Record'
}
}
actual = heat_template._serialize_designate_records()
self.assertEqual(expected, actual)
@mock.patch("sahara.conductor.objects.Cluster.use_designate_feature")
def test_serialize_designate_reversed_records(self, mock_use_designate):
def _generate_reversed_ip(ip):
return {
'list_join': [
'.',
[
{'str_split': ['.', ip, 3]},
{'str_split': ['.', ip, 2]},
{'str_split': ['.', ip, 1]},
{'str_split': ['.', ip, 0]},
'in-addr.arpa.'
]
]
}
ng1, ng2 = self._make_node_groups('floating')
cluster = self._make_cluster('private_net', ng1, ng2,
domain_name='domain.org.')
mock_use_designate.return_value = False
heat_template = self._make_heat_template(cluster, ng1, ng2)
expected = {}
actual = heat_template._serialize_designate_reverse_records()
self.assertEqual(expected, actual)
mock_use_designate.return_value = True
heat_template = self._make_heat_template(cluster, ng1, ng2)
expected = {
'internal_designate_reverse_record': {
'properties': {
'domain': 'in-addr.arpa.',
'name': _generate_reversed_ip(
{'get_attr': ['inst', 'networks', 'private', 0]}),
'data': {
'list_join': [
'.',
[{'get_attr': ['inst', 'name']}, 'domain.org.']]
},
'type': 'PTR'
},
'type': 'OS::Designate::Record'
},
'external_designate_reverse_record': {
'properties': {
'domain': 'in-addr.arpa.',
'name': _generate_reversed_ip(
{'get_attr': ['floating_ip', 'ip']}),
'data': {
'list_join': [
'.',
[{'get_attr': ['inst', 'name']}, 'domain.org.']]
},
'type': 'PTR'
},
'type': 'OS::Designate::Record'
}
}
actual = heat_template._serialize_designate_reverse_records()
self.assertEqual(expected, actual)
class TestClusterTemplateWaitCondition(BaseTestClusterTemplate):
def _make_heat_template(self, cluster, ng1, ng2):

View File

@ -156,7 +156,8 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
"name": "test",
'count': 3
}
]
],
'domain_name': 'domain.org.'
},
)
@ -226,7 +227,8 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
],
'anti_affinity': ['datanode'],
'description': 'my template',
'neutron_management_network': str(uuid.uuid4())
'neutron_management_network': str(uuid.uuid4()),
'domain_name': 'domain.org.'
})
@mock.patch("sahara.service.validations.base.check_network_exists")
@ -244,7 +246,8 @@ class TestClusterTemplateCreateValidation(u.ValidationTestCase):
'node_groups': None,
'anti_affinity': None,
'description': None,
'neutron_management_network': None
'neutron_management_network': None,
'domain_name': None
})
def test_cluster_template_create_v_plugin_name_exists(self):

View File

@ -421,7 +421,8 @@ class ValidationTestCase(base.SaharaTestCase):
'name': 'test-cluster',
'plugin_name': 'fake',
'hadoop_version': '0.1',
'default_image_id': '550e8400-e29b-41d4-a716-446655440000'
'default_image_id': '550e8400-e29b-41d4-a716-446655440000',
'domain_name': 'domain.org.'
}
self._assert_create_object_validation(data=data)
data = {

View File

@ -94,8 +94,13 @@ class UtilsClusterTest(base.SaharaWithDbTestCase):
cluster = self.api.cluster_get(ctx, cluster.id)
self.assertEqual(ng_len - 1, len(cluster.node_groups))
def test_generate_etc_hosts(self):
@mock.patch("sahara.conductor.objects.Cluster.use_designate_feature")
@mock.patch("socket.gethostbyname")
@mock.patch("sahara.utils.openstack.base.url_for")
def test_generate_etc_hosts(self, mock_url, mock_get_host,
mock_use_designate):
cluster = self._make_sample()
mock_use_designate.return_value = False
ctx = context.ctx()
idx = 0
for ng in cluster.node_groups:
@ -107,12 +112,13 @@ class UtilsClusterTest(base.SaharaWithDbTestCase):
'internal_ip': str(idx),
})
cluster = self.api.cluster_get(ctx, cluster)
with mock.patch("sahara.utils.openstack.base.url_for") as mock_url:
mock_url.side_effect = ["http://keystone.local:1234/v13",
"http://swift.local:5678/v42"]
with mock.patch("socket.gethostbyname") as mock_get_host:
mock_get_host.side_effect = ["1.2.3.4", "5.6.7.8"]
value = cluster_utils.generate_etc_hosts(cluster)
mock_url.side_effect = ["http://keystone.local:1234/v13",
"http://swift.local:5678/v42"]
mock_get_host.side_effect = ["1.2.3.4", "5.6.7.8"]
value = cluster_utils.generate_etc_hosts(cluster)
expected = ("127.0.0.1 localhost\n"
"1 1.novalocal 1\n"
"2 2.novalocal 2\n"
@ -121,3 +127,35 @@ class UtilsClusterTest(base.SaharaWithDbTestCase):
"1.2.3.4 keystone.local\n"
"5.6.7.8 swift.local\n")
self.assertEqual(expected, value)
@mock.patch("sahara.conductor.objects.Cluster.use_designate_feature")
@mock.patch("socket.gethostbyname")
@mock.patch("sahara.utils.openstack.base.url_for")
def test_generate_etc_hosts_with_designate(self, mock_url, mock_get_host,
mock_use_designate):
cluster = self._make_sample()
mock_use_designate.return_value = True
mock_url.side_effect = ["http://keystone.local:1234/v13",
"http://swift.local:5678/v42"]
mock_get_host.side_effect = ["1.2.3.4", "5.6.7.8"]
value = cluster_utils.generate_etc_hosts(cluster)
expected = ("127.0.0.1 localhost\n"
"1.2.3.4 keystone.local\n"
"5.6.7.8 swift.local\n")
self.assertEqual(expected, value)
def test_generate_resolv_conf_diff(self):
curr_resolv_conf = "search openstacklocal\nnameserver 8.8.8.8\n"
self.override_config("nameservers", ['1.1.1.1'])
value = cluster_utils.generate_resolv_conf_diff(curr_resolv_conf)
expected = "nameserver 1.1.1.1\n"
self.assertEqual(expected, value)
self.override_config("nameservers", ['1.1.1.1', '8.8.8.8', '2.2.2.2'])
value = cluster_utils.generate_resolv_conf_diff(curr_resolv_conf)
expected = ("nameserver 1.1.1.1\n"
"nameserver 2.2.2.2\n")
self.assertEqual(expected, value)

View File

@ -16,6 +16,7 @@
import socket
from keystoneauth1 import exceptions as keystone_ex
from oslo_config import cfg
from oslo_log import log as logging
from six.moves.urllib import parse
@ -29,6 +30,8 @@ from sahara.utils.openstack import base as auth_base
conductor = c.API
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
# cluster status
CLUSTER_STATUS_VALIDATING = "Validating"
CLUSTER_STATUS_INFRAUPDATING = "InfraUpdating"
@ -132,13 +135,7 @@ def clean_cluster_from_empty_ng(cluster):
conductor.node_group_remove(ctx, ng)
def generate_etc_hosts(cluster):
hosts = "127.0.0.1 localhost\n"
for node_group in cluster.node_groups:
for instance in node_group.instances:
hosts += "%s %s %s\n" % (instance.internal_ip,
instance.fqdn(),
instance.hostname())
def _etc_hosts_for_services(hosts):
# add alias for keystone and swift
for service in ["identity", "object-store"]:
try:
@ -149,5 +146,31 @@ def generate_etc_hosts(cluster):
LOG.debug("Endpoint not found for service: \"%s\"", service)
continue
hosts += "%s %s\n" % (socket.gethostbyname(hostname), hostname)
return hosts
def _etc_hosts_for_instances(hosts, cluster):
for node_group in cluster.node_groups:
for instance in node_group.instances:
hosts += "%s %s %s\n" % (instance.internal_ip,
instance.fqdn(),
instance.hostname())
return hosts
def generate_etc_hosts(cluster):
hosts = "127.0.0.1 localhost\n"
if not cluster.use_designate_feature():
hosts = _etc_hosts_for_instances(hosts, cluster)
hosts = _etc_hosts_for_services(hosts)
return hosts
def generate_resolv_conf_diff(curr_resolv_conf):
# returns string that contains nameservers
# which are lacked in the 'curr_resolve_conf'
resolv_conf = ""
for ns in CONF.nameservers:
if ns not in curr_resolv_conf:
resolv_conf += "nameserver {}\n".format(ns)
return resolv_conf