Add support devops 3.0 into fuel-qa

Blueprint: template-based-testcases
Change-Id: Ie641be08bc192f051be755a519210749397df5b5
This commit is contained in:
Dmitry Tyzhnenko 2016-03-01 20:56:50 +02:00 committed by tatyana-leontovich
parent ff59b9cf51
commit 415044aa99
13 changed files with 1744 additions and 45 deletions

View File

@ -258,7 +258,9 @@ def get_test_method_name():
def get_current_env(args):
if args[0].__class__.__name__ == "EnvironmentModel":
return args[0]
elif args[0].__class__.__name__ == "FuelWebClient":
elif args[0].__class__.__name__ in ("FuelWebClient",
"FuelWebClient29",
"FuelWebClient30"):
return args[0].environment
else:
try:

View File

@ -18,6 +18,8 @@ import re
import time
import traceback
import distutils
import devops
from devops.error import DevopsCalledProcessError
from devops.error import TimeoutError
from devops.helpers.helpers import _wait
@ -92,7 +94,7 @@ from fuelweb_test.settings import VCENTER_PASSWORD
from fuelweb_test.settings import VCENTER_USERNAME
class FuelWebClient(object):
class FuelWebClient29(object):
"""FuelWebClient.""" # TODO documentation
def __init__(self, environment):
@ -101,7 +103,7 @@ class FuelWebClient(object):
self.client = NailgunClient(self.ssh_manager.admin_ip)
self._environment = environment
self.security = SecurityChecks(self.client, self._environment)
super(FuelWebClient, self).__init__()
super(FuelWebClient29, self).__init__()
@property
def environment(self):
@ -537,9 +539,8 @@ class FuelWebClient(object):
"it is required for VxLAN DVR "
"network configuration.")
public_gw = self.environment.d_env.router(router_name="public")
public_gw = self.get_public_gw()
remote = self.environment.d_env.get_admin_remote()
if help_data.FUEL_USE_LOCAL_NTPD\
and ('ntp_list' not in settings)\
and checkers.is_ntpd_active(
@ -549,7 +550,6 @@ class FuelWebClient(object):
logger.info("Configuring cluster #{0}"
"to use NTP server {1}"
.format(cluster_id, public_gw))
remote.clear()
if help_data.FUEL_USE_LOCAL_DNS and ('dns_list' not in settings):
attributes['editable']['external_dns']['dns_list']['value'] =\
@ -586,12 +586,7 @@ class FuelWebClient(object):
# may be created by new components like ironic
self.client.update_cluster_attributes(cluster_id, attributes)
if MULTIPLE_NETWORKS:
ng = {rack['name']: [] for rack in NODEGROUPS}
self.update_nodegroups(cluster_id=cluster_id,
node_groups=ng)
self.update_nodegroups_network_configuration(cluster_id,
NODEGROUPS)
self.nodegroups_configure(cluster_id)
logger.debug("Try to update cluster "
"with next attributes {0}".format(attributes))
@ -611,6 +606,21 @@ class FuelWebClient(object):
return cluster_id
@logwrap
def get_public_gw(self):
return self.environment.d_env.router(router_name="public")
@logwrap
def nodegroups_configure(self, cluster_id):
"""Update nodegroups configuration
"""
if not MULTIPLE_NETWORKS:
return
ng = {rack['name']: [] for rack in NODEGROUPS}
self.update_nodegroups(cluster_id=cluster_id, node_groups=ng)
self.update_nodegroups_network_configuration(cluster_id, NODEGROUPS)
@logwrap
def ssl_configure(self, cluster_id):
attributes = self.client.get_cluster_attributes(cluster_id)
@ -1207,6 +1217,19 @@ class FuelWebClient(object):
return self.client.get_task(task['id'])
# TODO(ddmitriev): this method will be replaced
# after switching to fuel-devops3.0
# pylint: disable=no-self-use
def get_node_group_and_role(self, node_name, nodes_dict):
if MULTIPLE_NETWORKS:
node_roles = nodes_dict[node_name][0]
node_group = nodes_dict[node_name][1]
else:
node_roles = nodes_dict[node_name]
node_group = 'default'
return node_group, node_roles
# pylint: enable=no-self-use
@logwrap
def update_nodes(self, cluster_id, nodes_dict,
pending_addition=True, pending_deletion=False,
@ -1236,15 +1259,10 @@ class FuelWebClient(object):
nodes_groups = {}
updated_nodes = []
for node_name in nodes_dict:
if MULTIPLE_NETWORKS:
node_roles = nodes_dict[node_name][0]
node_group = nodes_dict[node_name][1]
else:
node_roles = nodes_dict[node_name]
node_group = 'default'
devops_node = self.environment.d_env.get_node(name=node_name)
node_group, node_roles = self.get_node_group_and_role(node_name,
nodes_dict)
wait(lambda:
self.get_nailgun_node_by_devops_node(devops_node)['online'],
timeout=60 * 2)
@ -1439,7 +1457,7 @@ class FuelWebClient(object):
# TODO(apanchenko): remove this hack when network verification begins
# TODO(apanchenko): to work for environments with multiple net groups
if MULTIPLE_NETWORKS:
if len(self.client.get_nodegroups()) > 1:
logger.warning('Network verification is temporary disabled when '
'"multiple cluster networks" feature is used')
return
@ -2696,18 +2714,277 @@ class FuelWebClient(object):
self.assert_task_success(latest_task, interval=interval,
timeout=timeout)
class FuelWebClient30(FuelWebClient29):
"""FuelWebClient that works with fuel-devops 3.0
"""
@logwrap
def get_vip_info(self, cluster_id, vip_name='public'):
vip_data = self.client.get_vip_info_by_name(cluster_id, vip_name)
assert_true(vip_data, "Vip with name {} wasn't found".format(vip_name))
logger.debug("vip data is {}".format(vip_data[0]))
return vip_data[0]
def get_default_node_group(self):
return self.environment.d_env.get_group(name='default')
@logwrap
def update_vip_ip(self, cluster_id, ip, vip_name='public'):
vip_data = self.get_vip_info(cluster_id, vip_name=vip_name)
vip_data['ip_addr'] = ip
vip_data['is_user_defined'] = True
vip_id = vip_data['id']
logger.debug("data to send {}".format(vip_data))
self.client.update_vip_ip(cluster_id, vip_id, vip_data)
def get_public_gw(self):
default_node_group = self.get_default_node_group()
pub_pool = default_node_group.get_network_pool(name='public')
return str(pub_pool.gateway)
@logwrap
def nodegroups_configure(self, cluster_id):
# Add node groups with networks
if len(self.environment.d_env.get_groups()) > 1:
ng = {rack.name: [] for rack in
self.environment.d_env.get_groups()}
ng_nets = []
for rack in self.environment.d_env.get_groups():
nets = {'name': rack.name}
nets['networks'] = {r.name: r.address_pool.name for
r in rack.get_network_pools(
name__in=['fuelweb_admin',
'public',
'management',
'storage',
'private'])}
ng_nets.append(nets)
self.update_nodegroups(cluster_id=cluster_id,
node_groups=ng)
self.update_nodegroups_network_configuration(cluster_id,
ng_nets)
def change_default_network_settings(self):
api_version = self.client.get_api_version()
if int(api_version["release"][0]) < 6:
return
def fetch_networks(networks):
"""Parse response from api/releases/1/networks and return dict with
networks' settings - need for avoiding hardcode"""
result = {}
for net in networks:
if (net['name'] == 'private' and
net.get('seg_type', '') == 'tun'):
result['private_tun'] = net
elif (net['name'] == 'private' and
net.get('seg_type', '') == 'gre'):
result['private_gre'] = net
elif net['name'] == 'public':
result['public'] = net
elif net['name'] == 'management':
result['management'] = net
elif net['name'] == 'storage':
result['storage'] = net
elif net['name'] == 'baremetal':
result['baremetal'] = net
return result
default_node_group = self.get_default_node_group()
logger.info("Default node group has {} name".format(
default_node_group.name))
logger.info("Applying default network settings")
for _release in self.client.get_releases():
logger.info(
'Applying changes for release: {}'.format(
_release['name']))
net_settings = \
self.client.get_release_default_net_settings(
_release['id'])
for net_provider in NETWORK_PROVIDERS:
if net_provider not in net_settings:
# TODO(ddmitriev): should show warning if NETWORK_PROVIDERS
# are not match providers in net_settings.
continue
networks = fetch_networks(
net_settings[net_provider]['networks'])
pub_pool = default_node_group.get_network_pool(
name='public')
networks['public']['cidr'] = str(pub_pool.net)
networks['public']['gateway'] = str(pub_pool.gateway)
networks['public']['notation'] = 'ip_ranges'
networks['public']['vlan_start'] = \
pub_pool.vlan_start if pub_pool.vlan_start else None
networks['public']['ip_range'] = list(
pub_pool.ip_range(relative_start=2, relative_end=-16))
net_settings[net_provider]['config']['floating_ranges'] = [
list(pub_pool.ip_range('floating',
relative_start=-15,
relative_end=-2))]
if 'baremetal' in networks and \
default_node_group.get_network_pools(name='ironic'):
ironic_net_pool = default_node_group.get_network_pool(
name='ironic')
networks['baremetal']['cidr'] = ironic_net_pool.net
net_settings[net_provider]['config'][
'baremetal_gateway'] = ironic_net_pool.gateway
networks['baremetal']['ip_range'] = \
list(ironic_net_pool.ip_range())
net_settings[net_provider]['config']['baremetal_range'] = \
list(ironic_net_pool.ip_range('baremetal'))
for pool in default_node_group.get_network_pools(
name__in=['storage', 'management']):
networks[pool.name]['cidr'] = str(pool.net)
networks[pool.name]['ip_range'] = self.get_range(
pool.net)[0]
networks[pool.name]['notation'] = 'ip_ranges'
networks[pool.name]['vlan_start'] = pool.vlan_start
if net_provider == 'neutron':
private_net_pool = default_node_group.get_network_pool(
name='private')
networks['private_tun']['cidr'] = str(private_net_pool.net)
networks['private_gre']['cidr'] = str(private_net_pool.net)
networks['private_tun']['vlan_start'] = \
private_net_pool.vlan_start or None
networks['private_gre']['vlan_start'] = \
private_net_pool.vlan_start or None
net_settings[net_provider]['config']['internal_cidr'] = \
'192.168.0.0/24'
net_settings[net_provider]['config']['internal_gateway'] =\
'192.168.0.1'
elif net_provider == 'nova_network':
private_net_pool = default_node_group.get_network_pool(
name='private')
net_settings[net_provider]['config'][
'fixed_networks_cidr'] = \
str(private_net_pool.net) or None
net_settings[net_provider]['config'][
'fixed_networks_vlan_start'] = \
private_net_pool.vlan_start or None
self.client.put_release_default_net_settings(
_release['id'], net_settings)
def get_node_group_and_role(self, node_name, nodes_dict):
devops_node = self.environment.d_env.get_node(name=node_name)
node_group = devops_node.group.name
if isinstance(nodes_dict[node_name][0], list):
# Backwards compatibility
node_roles = nodes_dict[node_name][0]
else:
node_roles = nodes_dict[node_name]
return node_group, node_roles
@logwrap
def update_nodes_interfaces(self, cluster_id, nailgun_nodes=None):
assigned_networks = {}
nailgun_nodes = nailgun_nodes or []
if not nailgun_nodes:
nailgun_nodes = self.client.list_cluster_nodes(cluster_id)
for node in nailgun_nodes:
interfaces = self.client.get_node_interfaces(node['id'])
interfaces = {iface['mac']: iface for iface in interfaces}
d_node = self.get_devops_node_by_nailgun_node(node)
for net in d_node.network_configs:
if net.aggregation is None: # Have some ifaces aggregation?
node_iface = d_node.interface_set.get(label=net.label)
assigned_networks[interfaces[
node_iface.mac_address]['name']] = net.networks
else:
assigned_networks[net.label] = net.networks
self.update_node_networks(node['id'], assigned_networks)
@logwrap
def update_node_networks(self, node_id, interfaces_dict,
raw_data=None,
override_ifaces_params=None):
interfaces = self.client.get_node_interfaces(node_id)
node = [n for n in self.client.list_nodes() if n['id'] == node_id][0]
d_node = self.get_devops_node_by_nailgun_node(node)
bonds = [n for n in d_node.network_configs
if n.aggregation is not None]
for bond in bonds:
macs = [i.mac_address.lower() for i in
d_node.interface_set.filter(label__in=bond.parents)]
parents = [{'name': iface['name']} for iface in interfaces
if iface['mac'].lower() in macs]
bond_config = {
'mac': None,
'mode': bond.aggregation,
'name': bond.label,
'slaves': parents,
'state': None,
'type': 'bond',
'assigned_networks': []
}
interfaces.append(bond_config)
if raw_data is not None:
interfaces.extend(raw_data)
def get_iface_by_name(ifaces, name):
iface = [i for i in ifaces if i['name'] == name]
assert_true(len(iface) > 0,
"Interface with name {} is not present on "
"node. Please check override params.".format(name))
return iface[0]
if override_ifaces_params is not None:
for interface in override_ifaces_params:
get_iface_by_name(interfaces, interface['name']).\
update(interface)
all_networks = dict()
for interface in interfaces:
all_networks.update(
{net['name']: net for net in interface['assigned_networks']})
for interface in interfaces:
name = interface["name"]
interface['assigned_networks'] = \
[all_networks[i] for i in interfaces_dict.get(name, []) if
i in all_networks.keys()]
self.client.put_node_interfaces(
[{'id': node_id, 'interfaces': interfaces}])
def update_nodegroup_net_settings(self, network_configuration, nodegroup,
cluster_id=None):
# seg_type = network_configuration.get('networking_parameters', {}) \
# .get('segmentation_type')
nodegroup_id = self.get_nodegroup(cluster_id, nodegroup['name'])['id']
for net in network_configuration.get('networks'):
if nodegroup['name'] == 'default' and \
net['name'] == 'fuelweb_admin':
continue
if net['group_id'] == nodegroup_id:
group = self.environment.d_env.get_group(
name=nodegroup['name'])
net_pool = group.networkpool_set.get(name=net['name'])
net['cidr'] = net_pool.net
# if net['meta']['use_gateway']:
# net['gateway'] = net_pool.gateway
# else:
# net['gateway'] = None
net['gateway'] = net_pool.gateway
if net['gateway']:
net['meta']['use_gateway'] = True
net['meta']['gateway'] = net['gateway']
else:
net['meta']['use_gateway'] = False
net['vlan_start'] = net_pool.vlan_start
net['meta']['notation'] = 'ip_ranges'
net['ip_ranges'] = [list(net_pool.ip_range())]
return network_configuration
# TODO(ddmitriev): this code will be removed after moving to fuel-devops3.0
# pylint: disable=no-member
if (distutils.version.LooseVersion(devops.__version__) <
distutils.version.LooseVersion('3')):
logger.info("Use FuelWebClient compatible to fuel-devops 2.9")
FuelWebClient = FuelWebClient29
else:
logger.info("Use FuelWebClient compatible to fuel-devops 3.0")
FuelWebClient = FuelWebClient30
# pylint: enable=no-member

View File

@ -75,11 +75,21 @@ class ChangeVipManually(TestBasic):
}
)
self.show_step(4)
# TODO(ddmitriev): remove this 'disable' after moving to fuel-devops3.0
# pylint: disable=no-member
ip = netaddr.IPAddress(
self.fuel_web.get_vip_info(cluster_id)['ip_addr'])
# pylint: enable=no-member
ip_to_set = str(ip + 1)
logger.debug('ip to be set is {}'.format(ip_to_set))
# TODO(ddmitriev): remove this 'disable' after moving to fuel-devops3.0
# pylint: disable=no-member
self.fuel_web.update_vip_ip(cluster_id, ip_to_set)
# pylint: enable=no-member
self.show_step(5)
self.fuel_web.verify_network(cluster_id)
self.show_step(6)
@ -166,7 +176,12 @@ class ChangeVipManually(TestBasic):
self.show_step(5)
ip_to_set = str(floating_upper_range + 1)
logger.debug('ip to be set is {}'.format(ip_to_set))
# TODO(ddmitriev): remove this 'disable' after moving to fuel-devops3.0
# pylint: disable=no-member
self.fuel_web.update_vip_ip(cluster_id, ip_to_set)
# pylint: enable=no-member
self.show_step(6)
self.fuel_web.verify_network(cluster_id)
self.show_step(7)

View File

@ -61,6 +61,18 @@ def define_custom_groups():
systest_group="system_test.deploy_and_check_radosgw",
config_name="ceph_all_on_neutron_vlan")
add_group(group="system_test.ceph_ha_30",
systest_group="system_test.deploy_and_check_radosgw",
config_name="ceph_all_on_neutron_vlan_30")
add_group(group="system_test.ceph_ha_30_bond",
systest_group="system_test.deploy_and_check_radosgw",
config_name="ceph_all_on_neutron_vlan_30-bond")
add_group(group="system_test.ceph_ha_30_2groups",
systest_group="system_test.deploy_and_check_radosgw",
config_name="ceph_all_on_neutron_vlan_30-2groups")
add_group(group="filling_root",
systest_group="system_test.failover.filling_root",
config_name="ceph_all_on_neutron_vlan")

View File

@ -69,8 +69,7 @@ class DeployCheckRadosGW(ActionTest, BaseActions):
def check_rados_daemon(self):
"""Check the radosgw daemon is started"""
def radosgw_started(remote):
return len(remote.check_call(
'ps aux | grep "/usr/bin/radosgw -n '
'client.radosgw.gateway"')['stdout']) == 3
return remote.check_call('pkill -0 radosgw')['exit_code'] == 0
with self.fuel_web.get_ssh_for_node('slave-01') as remote:
assert_true(radosgw_started(remote), 'radosgw daemon started')

View File

@ -0,0 +1,504 @@
---
aliases:
dynamic_address_pool:
- &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24
default_interface_model:
- &interface_model !os_env INTERFACE_MODEL, e1000
env_name: !os_env ENV_NAME
address_pools:
# Network pools used by the environment
fuelweb_admin-pool01:
net: *pool_default
params:
tag: 0
ip_reserved:
gateway: +1
l2_network_device: +1 # l2_network_device will get this IP address
ip_ranges:
default: [+2, -2] # admin IP range for 'default' nodegroup name
public-pool01:
net: *pool_default
params:
tag: 100
ip_reserved:
gateway: +1
l2_network_device: +1 # l2_network_device will get this IP address
ip_ranges:
default: [+2, +127] # public IP range for 'default' nodegroup name
floating: [+128, -2]
storage-pool01:
net: *pool_default
params:
tag: 101
ip_reserved:
gateway: +1
l2_network_device: +1 # l2_network_device will get this IP address
ip_ranges:
default: [+2, -2] # public IP range for 'default' nodegroup name
management-pool01:
net: *pool_default
params:
tag: 102
ip_reserved:
gateway: +1
l2_network_device: +1 # l2_network_device will get this IP address
ip_ranges:
default: [+2, -2] # public IP range for 'default' nodegroup name
private-pool01:
net: *pool_default
params:
tag: 960
ip_reserved:
gateway: +1
l2_network_device: +1 # l2_network_device will get this IP address
ip_ranges:
default: [+2, -2] # public IP range for 'default' nodegroup name
public-pool02:
net: *pool_default
params:
tag: 100
ip_reserved:
gateway: +1
l2_network_device: +1 # l2_network_device will get this IP address
ip_ranges:
virtual-rack-01: [+2, +127] # public IP range for 'virtual-rack-01' nodegroup name
floating: [+128, -2]
storage-pool02:
net: *pool_default
params:
tag: 101
ip_reserved:
gateway: +1
l2_network_device: +1 # l2_network_device will get this IP address
ip_ranges:
virtual-rack-01: [+2, -2] # public IP range for 'virtual-rack-01' nodegroup name
management-pool02:
net: *pool_default
params:
tag: 102
ip_reserved:
gateway: +1
l2_network_device: +1 # l2_network_device will get this IP address
ip_ranges:
virtual-rack-01: [+2, -2] # public IP range for 'virtual-rack-01' nodegroup name
private-pool02:
net: *pool_default
params:
tag: 960
ip_reserved:
gateway: +1
l2_network_device: +1 # l2_network_device will get this IP address
ip_ranges:
virtual-rack-01: [+2, -2] # public IP range for 'virtual-rack-01' nodegroup name
groups:
- name: default
driver:
name: devops.driver.libvirt.libvirt_driver
params:
connection_string: !os_env CONNECTION_STRING, qemu:///system
storage_pool_name: !os_env STORAGE_POOL_NAME, default
stp: True
hpet: False
use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
network_pools: # Address pools for OpenStack networks.
# Actual names should be used for keys
# (the same as in Nailgun, for example)
fuelweb_admin: fuelweb_admin-pool01
public: public-pool01
storage: storage-pool01
management: management-pool01
private: private-pool01
l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks
admin:
address_pool: fuelweb_admin-pool01
dhcp: false
forward:
mode: nat
vlan_ifaces:
- 100
# Public libvirt network is only for connecting public network
# to the Internet.
# Actually, public network with tag:100 use 'admin' l2_network_device
# (see 'network_config' in nodes)
public:
address_pool: public-pool01
dhcp: false
forward:
mode: nat
parent_iface:
l2_net_dev: admin
tag: 100
nodes:
- name: admin # Custom name of VM for Fuel admin node
role: fuel_master # Fixed role for Fuel master node properties
params:
vcpu: !os_env ADMIN_NODE_CPU, 2
memory: !os_env ADMIN_NODE_MEMORY, 3072
boot:
- hd
- cdrom # for boot from usb - without 'cdrom'
volumes:
- name: system
capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 75
format: qcow2
- name: iso
source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size
format: raw
device: cdrom # for boot from usb - 'disk'
bus: ide # for boot from usb - 'usb'
interfaces:
- label: iface1
l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network
interface_model: *interface_model
network_config:
iface1:
networks:
- fuelweb_admin
- name: slave-01 # Custom name for Fuel slave node
role: fuel_slave # Fixed role for Fuel master node properties
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
format: qcow2
# so, interfaces can be turn on in one or in a different switches.
interfaces:
- label: iface1
mac_address: 7c:14:7a:6c:9b:96
interface_model: *interface_model
l2_network_device: admin
- label: iface2
mac_address: 8c:04:7a:6c:9b:97
interface_model: *interface_model
l2_network_device: admin
network_config:
iface1:
networks:
- fuelweb_admin ## OpenStack network, NOT switch name
- public ## OpenStack network, NOT switch name
iface2:
networks:
- storage ## OpenStack network, NOT switch name
- management ## OpenStack network, NOT switch name
- private ## OpenStack network, NOT switch name
- name: slave-02 # Custom name for Fuel slave node
role: fuel_slave # Fixed role for Fuel master node properties
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
format: qcow2
# so, interfaces can be turn on in one or in a different switches.
interfaces:
- label: iface1
mac_address: fc:c4:7a:6c:9a:16
interface_model: *interface_model
l2_network_device: admin
- label: iface2
mac_address: dc:c4:7a:6c:9a:17
interface_model: *interface_model
l2_network_device: admin
network_config:
iface1:
networks:
- fuelweb_admin ## OpenStack network, NOT switch name
- public ## OpenStack network, NOT switch name
iface2:
networks:
- storage ## OpenStack network, NOT switch name
- management ## OpenStack network, NOT switch name
- private ## OpenStack network, NOT switch name
- name: slave-03 # Custom name for Fuel slave node
role: fuel_slave # Fixed role for Fuel master node properties
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
format: qcow2
# so, interfaces can be turn on in one or in a different switches.
interfaces:
- label: iface1
mac_address: ac:c4:7a:6c:9b:40
interface_model: *interface_model
l2_network_device: admin
- label: iface2
mac_address: 9c:c4:7a:6c:9b:41
interface_model: *interface_model
l2_network_device: admin
network_config:
iface1:
networks:
- fuelweb_admin ## OpenStack network, NOT switch name
- public ## OpenStack network, NOT switch name
iface2:
networks:
- storage ## OpenStack network, NOT switch name
- management ## OpenStack network, NOT switch name
- private ## OpenStack network, NOT switch name
- name: virtual-rack-01
driver:
name: devops.driver.libvirt.libvirt_driver
params:
connection_string: !os_env CONNECTION_STRING, qemu:///system
storage_pool_name: !os_env STORAGE_POOL_NAME, default
stp: True
hpet: False
use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
network_pools: # Address pools for OpenStack networks.
# Actual names should be used for keys
# (the same as in Nailgun, for example)
fuelweb_admin: fuelweb_admin-pool01
public: public-pool02
storage: storage-pool02
management: management-pool02
private: private-pool02
nodes:
- name: slave-04 # Custom name for Fuel slave node
role: fuel_slave # Fixed role for Fuel master node properties
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
format: qcow2
# so, interfaces can be turn on in one or in a different switches.
interfaces:
- label: iface1
mac_address: 0c:d4:7a:6d:35:98
l2_network_device: admin
interface_model: *interface_model
- label: iface3
mac_address: 00:ab:21:8a:7b:18
l2_network_device: admin
interface_model: *interface_model
network_config:
iface1:
networks:
- fuelweb_admin ## OpenStack network, NOT switch name
- public ## OpenStack network, NOT switch name
iface3:
networks:
- storage ## OpenStack network, NOT switch name
- management ## OpenStack network, NOT switch name
- private ## OpenStack network, NOT switch name
- name: slave-05 # Custom name for Fuel slave node
role: fuel_slave # Fixed role for Fuel master node properties
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
format: qcow2
# so, interfaces can be turn on in one or in a different switches.
interfaces:
- label: iface1
mac_address: 3c:c4:7a:6d:28:de
interface_model: *interface_model
l2_network_device: admin
- label: iface2
mac_address: 20:1b:21:89:47:90
interface_model: *interface_model
l2_network_device: admin
network_config:
iface2:
networks:
- storage ## OpenStack network, NOT switch name
- management ## OpenStack network, NOT switch name
- private ## OpenStack network, NOT switch name
iface1:
networks:
- fuelweb_admin ## OpenStack network, NOT switch name
- public ## OpenStack network, NOT switch name
- name: slave-06 # Custom name for Fuel slave node
role: fuel_slave # Fixed role for Fuel master node properties
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
format: qcow2
# so, interfaces can be turn on in one or in a different switches.
interfaces:
- label: iface1
mac_address: 1c:c4:7a:6d:28:de
interface_model: *interface_model
l2_network_device: admin
- label: iface2
mac_address: 10:1b:21:89:47:90
interface_model: *interface_model
l2_network_device: admin
network_config:
iface2:
networks:
- storage ## OpenStack network, NOT switch name
- management ## OpenStack network, NOT switch name
- private ## OpenStack network, NOT switch name
iface1:
networks:
- fuelweb_admin ## OpenStack network, NOT switch name
- public ## OpenStack network, NOT switch name
- name: slave-07 # Custom name for Fuel slave node
role: fuel_slave # Fixed role for Fuel master node properties
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
format: qcow2
# so, interfaces can be turn on in one or in a different switches.
interfaces:
- label: iface1
mac_address: 0c:c4:7a:6d:18:de
interface_model: *interface_model
l2_network_device: admin
- label: iface2
mac_address: 00:1b:21:89:37:90
interface_model: *interface_model
l2_network_device: admin
network_config:
iface1:
networks:
- fuelweb_admin ## OpenStack network, NOT switch name
- public ## OpenStack network, NOT switch name
iface2:
networks:
- storage ## OpenStack network, NOT switch name
- management ## OpenStack network, NOT switch name
- private ## OpenStack network, NOT switch name
- name: slave-08 # Custom name for Fuel slave node
role: fuel_slave # Fixed role for Fuel master node properties
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
format: qcow2
# so, interfaces can be turn on in one or in a different switches.
interfaces:
- label: iface1
mac_address: 0c:c4:7a:5d:28:de
interface_model: *interface_model
l2_network_device: admin
- label: iface2
mac_address: 00:1b:21:79:47:90
interface_model: *interface_model
l2_network_device: admin
network_config:
iface1:
networks:
- fuelweb_admin ## OpenStack network, NOT switch name
- public ## OpenStack network, NOT switch name
iface2:
networks:
- storage ## OpenStack network, NOT switch name
- management ## OpenStack network, NOT switch name
- private ## OpenStack network, NOT switch name
- name: slave-09 # Custom name for Fuel slave node
role: fuel_slave # Fixed role for Fuel master node properties
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
format: qcow2
# so, interfaces can be turn on in one or in a different switches.
interfaces:
- label: iface1
mac_address: 0c:c4:6a:6d:28:de
interface_model: *interface_model
l2_network_device: admin
- label: iface2
mac_address: 00:1b:11:89:47:90
interface_model: *interface_model
l2_network_device: admin
network_config:
iface1:
networks:
- fuelweb_admin ## OpenStack network, NOT switch name
- public ## OpenStack network, NOT switch name
iface2:
networks:
- storage ## OpenStack network, NOT switch name
- management ## OpenStack network, NOT switch name
- private ## OpenStack network, NOT switch name

View File

@ -0,0 +1,320 @@
---
aliases:
dynamic_address_pool:
- &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24
default_interface_model:
- &interface_model !os_env INTERFACE_MODEL, e1000
env_name: !os_env ENV_NAME
address_pools:
# Network pools used by the environment
fuelweb_admin-pool01:
net: *pool_default
params:
tag: 0
ip_reserved:
gateway: +1
l2_network_device: +1 # l2_network_device will get this IP address
ip_ranges:
default: [+2, -2] # admin IP range for 'default' nodegroup name
public-pool01:
net: *pool_default
params:
tag: 100
ip_reserved:
gateway: +1
l2_network_device: +1 # l2_network_device will get this IP address
ip_ranges:
default: [+2, +127] # public IP range for 'default' nodegroup name
floating: [+128, -2]
storage-pool01:
net: *pool_default
params:
tag: 101
management-pool01:
net: *pool_default
params:
tag: 102
private-pool01:
net: *pool_default
params:
tag: 960
groups:
- name: baremetal-rack-01
driver:
name: devops.driver.libvirt.libvirt_driver
params:
connection_string: !os_env CONNECTION_STRING, qemu:///system
storage_pool_name: !os_env STORAGE_POOL_NAME, default
stp: True
hpet: False
use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
network_pools: # Address pools for OpenStack networks.
# Actual names should be used for keys
# (the same as in Nailgun, for example)
fuelweb_admin: fuelweb_admin-pool01
public: public-pool01
storage: storage-pool01
management: management-pool01
private: private-pool01
l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks
admin-net-01:
address_pool: fuelweb_admin-pool01
dhcp: false
vlan_ifaces:
- 0
openstack-net-01:
forward:
mode: Null
nodes:
- name: slave-01 # Custom name of baremetal for Fuel slave node
role: fuel_slave # Fixed role for Fuel master node properties
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
format: qcow2
# so, interfaces can be turn on in one or in a different switches.
interfaces:
- label: iface1
mac_address: 0c:c4:7a:6c:9b:96
interface_model: *interface_model
l2_network_device: admin-net-01
- label: iface2
mac_address: 0c:c4:7a:6c:9b:97
interface_model: *interface_model
l2_network_device: openstack-net-01
network_config:
iface1:
networks:
- fuelweb_admin ## OpenStack network, NOT switch name
- public ## OpenStack network, NOT switch name
iface2:
networks:
- storage ## OpenStack network, NOT switch name
- management ## OpenStack network, NOT switch name
- private ## OpenStack network, NOT switch name
- name: slave-02 # Custom name of baremetal for Fuel slave node
role: fuel_slave # Fixed role for Fuel master node properties
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
format: qcow2
# so, interfaces can be turn on in one or in a different switches.
interfaces:
- label: iface1
mac_address: 0c:c4:7a:6c:9a:16
interface_model: *interface_model
l2_network_device: admin-net-01
- label: iface2
mac_address: 0c:c4:7a:6c:9a:17
interface_model: *interface_model
l2_network_device: openstack-net-01
network_config:
iface1:
networks:
- fuelweb_admin ## OpenStack network, NOT switch name
- public ## OpenStack network, NOT switch name
iface2:
networks:
- storage ## OpenStack network, NOT switch name
- management ## OpenStack network, NOT switch name
- private ## OpenStack network, NOT switch name
- name: slave-03 # Custom name of baremetal for Fuel slave node
role: fuel_slave # Fixed role for Fuel master node properties
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
format: qcow2
# so, interfaces can be turn on in one or in a different switches.
interfaces:
- label: iface1
mac_address: 0c:c4:7a:6c:9b:40
interface_model: *interface_model
l2_network_device: admin-net-01
- label: iface2
mac_address: 0c:c4:7a:6c:9b:41
interface_model: *interface_model
l2_network_device: openstack-net-01
network_config:
iface1:
networks:
- fuelweb_admin ## OpenStack network, NOT switch name
- public ## OpenStack network, NOT switch name
iface2:
networks:
- storage ## OpenStack network, NOT switch name
- management ## OpenStack network, NOT switch name
- private ## OpenStack network, NOT switch name
- name: slave-04 # Custom name of baremetal for Fuel slave node
role: fuel_slave # Fixed role for Fuel master node properties
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
format: qcow2
# so, interfaces can be turn on in one or in a different switches.
interfaces:
- label: iface1
mac_address: 00:1b:21:8a:7b:18
l2_network_device: openstack-net-01
interface_model: *interface_model
- label: iface3
mac_address: 0c:c4:7a:6d:35:98
l2_network_device: admin-net-01
interface_model: *interface_model
network_config:
iface3:
networks:
- fuelweb_admin ## OpenStack network, NOT switch name
- public ## OpenStack network, NOT switch name
iface1:
networks:
- storage ## OpenStack network, NOT switch name
- management ## OpenStack network, NOT switch name
- private ## OpenStack network, NOT switch name
- name: slave-05 # Custom name of baremetal for Fuel slave node
role: fuel_slave # Fixed role for Fuel master node properties
params:
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 200
format: qcow2
# so, interfaces can be turn on in one or in a different switches.
interfaces:
- label: iface1
mac_address: 00:1b:21:89:47:90
interface_model: *interface_model
l2_network_device: openstack-net-01
- label: iface2
mac_address: 0c:c4:7a:6d:28:de
interface_model: *interface_model
l2_network_device: admin-net-01
network_config:
iface2:
networks:
- fuelweb_admin ## OpenStack network, NOT switch name
- public ## OpenStack network, NOT switch name
iface1:
networks:
- storage ## OpenStack network, NOT switch name
- management ## OpenStack network, NOT switch name
- private ## OpenStack network, NOT switch name
- name: default
driver:
name: devops.driver.libvirt.libvirt_driver
params:
connection_string: !os_env CONNECTION_STRING, qemu:///system
storage_pool_name: !os_env STORAGE_POOL_NAME, default
stp: True
hpet: False
use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
network_pools: # Address pools for OpenStack networks.
# Actual names should be used for keys
# (the same as in Nailgun, for example)
fuelweb_admin: fuelweb_admin-pool01
public: public-pool01
storage: storage-pool01
management: management-pool01
private: private-pool01
l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks
admin:
address_pool: fuelweb_admin-pool01
dhcp: false
forward:
mode: nat
parent_iface:
l2_net_dev: admin-net-01
tag: 0
vlan_ifaces:
- 100
public:
address_pool: public-pool01
dhcp: false
forward:
mode: nat
parent_iface:
l2_net_dev: admin
tag: 100
nodes:
- name: admin # Custom name of VM for Fuel admin node
role: fuel_master # Fixed role for Fuel master node properties
params:
vcpu: !os_env ADMIN_NODE_CPU, 2
memory: !os_env ADMIN_NODE_MEMORY, 3072
boot:
- hd
- cdrom # for boot from usb - without 'cdrom'
volumes:
- name: system
capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 75
format: qcow2
- name: iso
source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size
format: raw
device: cdrom # for boot from usb - 'disk'
bus: ide # for boot from usb - 'usb'
interfaces:
- label: iface1
l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network
interface_model: *interface_model
network_config:
iface1:
networks:
- fuelweb_admin

View File

@ -0,0 +1,229 @@
---
aliases:
dynamic_address_pool:
- &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24
default_interface_model:
- &interface_model !os_env INTERFACE_MODEL, e1000
env_name: !os_env ENV_NAME
address_pools:
# Network pools used by the environment
fuelweb_admin-pool01:
net: *pool_default
params:
tag: 0
ip_reserved:
gateway: +1
l2_network_device: +1 # l2_network_device will get this IP address
ip_ranges:
default: [+2, -2] # admin IP range for 'default' nodegroup name
public-pool01:
net: *pool_default
params:
tag: 100
ip_reserved:
gateway: +1
l2_network_device: +1 # l2_network_device will get this IP address
ip_ranges:
default: [+2, +127] # public IP range for 'default' nodegroup name
floating: [+128, -2]
storage-pool01:
net: *pool_default
params:
tag: 101
management-pool01:
net: *pool_default
params:
tag: 102
private-pool01:
net: *pool_default
params:
tag: 103
groups:
- name: default
driver:
name: devops.driver.libvirt.libvirt_driver
params:
connection_string: !os_env CONNECTION_STRING, qemu:///system
storage_pool_name: !os_env STORAGE_POOL_NAME, default
stp: True
hpet: False
use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
network_pools: # Address pools for OpenStack networks.
# Actual names should be used for keys
# (the same as in Nailgun, for example)
fuelweb_admin: fuelweb_admin-pool01
public: public-pool01
storage: storage-pool01
management: management-pool01
private: private-pool01
l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks
admin:
address_pool: fuelweb_admin-pool01
dhcp: false
forward:
mode: nat
openstack_br:
vlan_ifaces:
- 100
- 101
- 102
public:
address_pool: public-pool01
dhcp: false
forward:
mode: nat
parent_iface:
l2_net_dev: openstack_br
tag: 100
storage:
address_pool: storage-pool01
dhcp: false
parent_iface:
l2_net_dev: openstack_br
tag: 101
management:
address_pool: management-pool01
dhcp: false
parent_iface:
l2_net_dev: openstack_br
tag: 102
private:
address_pool: private-pool01
dhcp: false
nodes:
- name: admin # Custom name of VM for Fuel admin node
role: fuel_master # Fixed role for Fuel master node properties
params:
vcpu: !os_env ADMIN_NODE_CPU, 2
memory: !os_env ADMIN_NODE_MEMORY, 3072
boot:
- hd
- cdrom # for boot from usb - without 'cdrom'
volumes:
- name: system
capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 75
format: qcow2
- name: iso
source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size
format: raw
device: cdrom # for boot from usb - 'disk'
bus: ide # for boot from usb - 'usb'
interfaces:
- label: iface2
l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network
interface_model: *interface_model
# - # not used
network_config:
iface2:
networks:
- fuelweb_admin
# Slave nodes
- name: slave-01
role: fuel_slave
params: &rack-01-slave-node-params
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 50
format: qcow2
- name: cinder
capacity: !os_env NODE_VOLUME_SIZE, 50
format: qcow2
- name: swift
capacity: !os_env NODE_VOLUME_SIZE, 50
format: qcow2
# List of node interfaces
interfaces:
- label: iface6
l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks
interface_model: *interface_model
- label: iface2
l2_network_device: openstack_br # Libvirt bridge name. It is *NOT* Nailgun networks
interface_model: *interface_model
- label: iface3
l2_network_device: openstack_br # Libvirt bridge name. It is *NOT* Nailgun networks
interface_model: *interface_model
- label: iface4
l2_network_device: openstack_br # Libvirt bridge name. It is *NOT* Nailgun networks
interface_model: *interface_model
- label: iface5
l2_network_device: openstack_br # Libvirt bridge name. It is *NOT* Nailgun networks
interface_model: *interface_model
# How Nailgun/OpenStack networks should assigned for interfaces
network_config:
iface6:
networks:
- fuelweb_admin # Nailgun/OpenStack network name
bond0:
networks:
- public
- storage
- management
- private
aggregation: active-backup # if 'aggregation' present in the config - then enable bonding for interfaces in 'parents'
parents:
- iface2
- iface3
- iface4
- iface5
- name: slave-02
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-03
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-04
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-05
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-06
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-07
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-08
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-09
role: fuel_slave
params: *rack-01-slave-node-params

View File

@ -0,0 +1,208 @@
---
aliases:
dynamic_address_pool:
- &pool_default !os_env POOL_DEFAULT, 10.109.0.0/16:24
default_interface_model:
- &interface_model !os_env INTERFACE_MODEL, e1000
env_name: !os_env ENV_NAME
address_pools:
# Network pools used by the environment
fuelweb_admin-pool01:
net: *pool_default
params:
tag: 0
ip_reserved:
gateway: +1
l2_network_device: +1 # l2_network_device will get this IP address
ip_ranges:
default: [+2, -2] # admin IP range for 'default' nodegroup name
public-pool01:
net: *pool_default
params:
tag: 100
ip_reserved:
gateway: +1
l2_network_device: +1 # l2_network_device will get this IP address
ip_ranges:
default: [+2, +127] # public IP range for 'default' nodegroup name
floating: [+128, -2]
storage-pool01:
net: *pool_default
params:
tag: 101
management-pool01:
net: *pool_default
params:
tag: 102
private-pool01:
net: *pool_default
params:
tag: 103
groups:
- name: default
driver:
name: devops.driver.libvirt.libvirt_driver
params:
connection_string: !os_env CONNECTION_STRING, qemu:///system
storage_pool_name: !os_env STORAGE_POOL_NAME, default
stp: True
hpet: False
use_host_cpu: !os_env DRIVER_USE_HOST_CPU, true
network_pools: # Address pools for OpenStack networks.
# Actual names should be used for keys
# (the same as in Nailgun, for example)
fuelweb_admin: fuelweb_admin-pool01
public: public-pool01
storage: storage-pool01
management: management-pool01
private: private-pool01
l2_network_devices: # Libvirt bridges. It is *NOT* Nailgun networks
admin:
address_pool: fuelweb_admin-pool01
dhcp: false
forward:
mode: nat
openstack_br:
vlan_ifaces:
- 100
- 101
- 102
public:
address_pool: public-pool01
dhcp: false
forward:
mode: nat
parent_iface:
l2_net_dev: openstack_br
tag: 100
storage:
address_pool: storage-pool01
dhcp: false
parent_iface:
l2_net_dev: openstack_br
tag: 101
management:
address_pool: management-pool01
dhcp: false
parent_iface:
l2_net_dev: openstack_br
tag: 102
private:
address_pool: private-pool01
dhcp: false
nodes:
- name: admin # Custom name of VM for Fuel admin node
role: fuel_master # Fixed role for Fuel master node properties
params:
vcpu: !os_env ADMIN_NODE_CPU, 2
memory: !os_env ADMIN_NODE_MEMORY, 3072
boot:
- hd
- cdrom # for boot from usb - without 'cdrom'
volumes:
- name: system
capacity: !os_env ADMIN_NODE_VOLUME_SIZE, 75
format: qcow2
- name: iso
source_image: !os_env ISO_PATH # if 'source_image' set, then volume capacity is calculated from it's size
format: raw
device: cdrom # for boot from usb - 'disk'
bus: ide # for boot from usb - 'usb'
interfaces:
- label: iface2
l2_network_device: admin # Libvirt bridge name. It is *NOT* a Nailgun network
interface_model: *interface_model
network_config:
iface2:
networks:
- fuelweb_admin
# Slave nodes
- name: slave-01
role: fuel_slave
params: &rack-01-slave-node-params
vcpu: !os_env SLAVE_NODE_CPU, 2
memory: !os_env SLAVE_NODE_MEMORY, 3072
boot:
- network
- hd
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 50
format: qcow2
- name: cinder
capacity: !os_env NODE_VOLUME_SIZE, 50
format: qcow2
- name: swift
capacity: !os_env NODE_VOLUME_SIZE, 50
format: qcow2
# List of node interfaces
interfaces:
- label: iface3
l2_network_device: admin # Libvirt bridge name. It is *NOT* Nailgun networks
interface_model: *interface_model
- label: iface2
l2_network_device: openstack_br # Libvirt bridge name. It is *NOT* Nailgun networks
interface_model: *interface_model
# How Nailgun/OpenStack networks should assigned for interfaces
network_config:
iface2:
networks:
- public
- storage
- management
- private
iface3:
networks:
- fuelweb_admin # Nailgun/OpenStack network name
- name: slave-02
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-03
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-04
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-05
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-06
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-07
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-08
role: fuel_slave
params: *rack-01-slave-node-params
- name: slave-09
role: fuel_slave
params: *rack-01-slave-node-params

View File

@ -78,16 +78,16 @@ aliases:
rack-02-slave-network_config: &rack-02-slave-network_config
enp0s3:
networks:
- fuelweb_admin2
- fuelweb_admin
enp0s4:
networks:
- public2
- public
enp0s5:
networks:
- management2
- management
enp0s6:
networks:
- private2
- private
enp0s7:
networks:
- storage
@ -95,16 +95,16 @@ aliases:
rack-03-slave-network_config: &rack-03-slave-network_config
enp0s3:
networks:
- fuelweb_admin3
- fuelweb_admin
enp0s4:
networks:
- public3
- public
enp0s5:
networks:
- management3
- management
enp0s6:
networks:
- private3
- private
enp0s7:
networks:
- storage
@ -134,6 +134,7 @@ aliases:
boot:
- network
- hd
bootmenu_timeout: 3000
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 55
@ -153,6 +154,7 @@ aliases:
boot:
- network
- hd
bootmenu_timeout: 3000
volumes:
- name: system
capacity: !os_env NODE_VOLUME_SIZE, 55
@ -178,6 +180,9 @@ address_pools:
net: *pool_default
params:
tag: 0
ip_ranges:
default: [+2, +31] # public IP range for 'default' nodegroup name
floating: [+32, -2] # floating IP range for 'default' nodegroup name
storage-pool01:
net: *pool_default
params:
@ -195,10 +200,17 @@ address_pools:
net: *pool_default
params:
tag: 0
ip_reserved:
gateway: +2 # gateway is Fuel master, not libvirt bridge
l2_network_device: +1 # l2_network_device will get this IP address
ip_ranges:
group-custom-1: [+3, -2] # admin IP range for 'group-custom-1' nodegroup name
public-pool02:
net: *pool_default
params:
tag: 0
ip_ranges:
group-custom-1: [+2, +31] # public IP range for 'group-custom-1' nodegroup name
management-pool02:
net: *pool_default
params:
@ -212,10 +224,17 @@ address_pools:
net: *pool_default
params:
tag: 0
ip_reserved:
gateway: +2 # gateway is Fuel master, not libvirt bridge
l2_network_device: +1 # l2_network_device will get this IP address
ip_ranges:
group-custom-2: [+3, -2] # admin IP range for 'group-custom-2' nodegroup name
public-pool03:
net: *pool_default
params:
tag: 0
ip_ranges:
group-custom-2: [+2, +31] # public IP range for 'group-custom-2' nodegroup name
management-pool03:
net: *pool_default
params:
@ -320,7 +339,7 @@ groups:
role: fuel_slave
params: *default-slave-node-params
- name: rack-02
- name: group-custom-1
driver:
name: devops.driver.libvirt.libvirt_driver
params:
@ -376,7 +395,7 @@ groups:
role: fuel_slave
params: *rack-02-slave-node-params
- name: rack-03
- name: group-custom-2
driver:
name: devops.driver.libvirt.libvirt_driver
params:

View File

@ -0,0 +1,38 @@
---
network-config: &network-config
provider: neutron
segment-type: vlan
pubip-to-all: false
storages-config: &storages-config
volume-lvm: false
volume-ceph: true
image-ceph: true
rados-ceph: true
ephemeral-ceph: false
replica-ceph: 2
nodes: &nodes
- roles:
- controller
count: 3
- roles:
- compute
- ceph-osd
count: 3
template:
name: 3 Controller, 3 Compute & Ceph on Neutron/VLAN
slaves: 6
devops_settings: !include devops_configs/default30-2groups.yaml
cluster_template: &environment-config
name: rados
release: ubuntu
settings:
components:
sahara: false
murano: false
ceilometer: false
storages: *storages-config
network: *network-config
nodes: *nodes

View File

@ -0,0 +1,38 @@
---
network-config: &network-config
provider: neutron
segment-type: vlan
pubip-to-all: false
storages-config: &storages-config
volume-lvm: false
volume-ceph: true
image-ceph: true
rados-ceph: true
ephemeral-ceph: false
replica-ceph: 2
nodes: &nodes
- roles:
- controller
count: 3
- roles:
- compute
- ceph-osd
count: 3
template:
name: 3 Controller, 3 Compute & Ceph on Neutron/VLAN
slaves: 6
devops_settings: !include devops_configs/default30-bond.yaml
cluster_template: &environment-config
name: rados
release: ubuntu
settings:
components:
sahara: false
murano: false
ceilometer: false
storages: *storages-config
network: *network-config
nodes: *nodes

View File

@ -0,0 +1,38 @@
---
network-config: &network-config
provider: neutron
segment-type: vlan
pubip-to-all: false
storages-config: &storages-config
volume-lvm: false
volume-ceph: true
image-ceph: true
rados-ceph: true
ephemeral-ceph: false
replica-ceph: 2
nodes: &nodes
- roles:
- controller
count: 3
- roles:
- compute
- ceph-osd
count: 3
template:
name: 3 Controller, 3 Compute & Ceph on Neutron/VLAN
slaves: 6
devops_settings: !include devops_configs/default30.yaml
cluster_template: &environment-config
name: rados
release: ubuntu
settings:
components:
sahara: false
murano: false
ceilometer: false
storages: *storages-config
network: *network-config
nodes: *nodes