Add DevopsClient

- moved common logic from shell.py/helpers.py to new classes
  DevopsClient, DevopsEnvironment
- old helpers deprecated
- GroupNtpSync refactored
- added unit tests for Shell, DevopsClient and DevopsEnvironment
- Fixed imports in code

blueprint: fuel-devops-client-as-a-module
Change-Id: I57350aa4f803e75b01813dd8ffb747da04264e5b
This commit is contained in:
Anton Studenov 2016-09-07 19:03:36 +03:00 committed by Alexey Stepanov
parent 4899d01df5
commit 8877bd8b6c
18 changed files with 1991 additions and 528 deletions

17
devops/client/__init__.py Normal file
View File

@ -0,0 +1,17 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from devops.client.client import DevopsClient
__all__ = ['DevopsClient']

108
devops/client/client.py Normal file
View File

@ -0,0 +1,108 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from devops.client import environment
from devops.helpers import templates
from devops import models
from devops import settings
class DevopsClient(object):
"""Client class
Provide methods to get/create environments
"""
@staticmethod
def get_env(env_name):
env = models.Environment.get(name=env_name)
return environment.DevopsEnvironment(env)
@staticmethod
def list_env_names():
return [env.name for env in models.Environment.list_all()]
@staticmethod
def synchronize_all():
models.Environment.synchronize_all()
@staticmethod
def create_env_from_config(config):
"""Creates env from template
:type config: str or dict
"""
if isinstance(config, str):
config = templates.get_devops_config(config)
env = models.Environment.create_environment(config)
return environment.DevopsEnvironment(env)
def create_env(self,
boot_from='cdrom',
env_name=None,
admin_iso_path=None,
admin_vcpu=None,
admin_memory=None,
admin_sysvolume_capacity=None,
nodes_count=None,
slave_vcpu=None,
slave_memory=None,
second_volume_capacity=None,
third_volume_capacity=None,
net_pool=None):
"""Backward compatibility for fuel-qa
Creates env from list of environment variables
"""
hw = settings.HARDWARE
config = templates.create_devops_config(
boot_from=boot_from,
env_name=env_name or settings.ENV_NAME,
admin_vcpu=admin_vcpu or hw['admin_node_cpu'],
admin_memory=admin_memory or hw['admin_node_memory'],
admin_sysvolume_capacity=(
admin_sysvolume_capacity or settings.ADMIN_NODE_VOLUME_SIZE),
admin_iso_path=admin_iso_path or settings.ISO_PATH,
nodes_count=nodes_count or settings.NODES_COUNT,
numa_nodes=hw['numa_nodes'],
slave_vcpu=slave_vcpu or hw['slave_node_cpu'],
slave_memory=slave_memory or hw["slave_node_memory"],
slave_volume_capacity=settings.NODE_VOLUME_SIZE,
second_volume_capacity=(
second_volume_capacity or settings.NODE_VOLUME_SIZE),
third_volume_capacity=(
third_volume_capacity or settings.NODE_VOLUME_SIZE),
use_all_disks=settings.USE_ALL_DISKS,
multipath_count=settings.SLAVE_MULTIPATH_DISKS_COUNT,
ironic_nodes_count=settings.IRONIC_NODES_COUNT,
networks_bonding=settings.BONDING,
networks_bondinginterfaces=settings.BONDING_INTERFACES,
networks_multiplenetworks=settings.MULTIPLE_NETWORKS,
networks_nodegroups=settings.NODEGROUPS,
networks_interfaceorder=settings.INTERFACE_ORDER,
networks_pools=dict(
admin=net_pool or settings.POOLS['admin'],
public=net_pool or settings.POOLS['public'],
management=net_pool or settings.POOLS['management'],
private=net_pool or settings.POOLS['private'],
storage=net_pool or settings.POOLS['storage'],
),
networks_forwarding=settings.FORWARDING,
networks_dhcp=settings.DHCP,
driver_enable_acpi=settings.DRIVER_PARAMETERS['enable_acpi'],
driver_enable_nwfilers=settings.ENABLE_LIBVIRT_NWFILTERS,
)
return self.create_env_from_config(config)

View File

@ -0,0 +1,198 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import paramiko
# pylint: disable=redefined-builtin
# noinspection PyUnresolvedReferences
from six.moves import xrange
# pylint: enable=redefined-builtin
from devops.client import nailgun
from devops import error
from devops.helpers import helpers
from devops.helpers import ntp
from devops.helpers import ssh_client
from devops.helpers import templates
from devops import settings
class DevopsEnvironment(object):
"""DevopsEnvironment
Contains all methods to controll environment and nodes
"""
def __init__(self, env):
self._env = env
def __getattr__(self, name):
return getattr(self._env, name)
def add_slaves(self,
nodes_count,
slave_vcpu=1,
slave_memory=1024,
second_volume_capacity=50,
third_volume_capacity=50,
force_define=True,
group_name='default',
):
group = self._env.get_group(name=group_name)
created_nodes = len(group.get_nodes())
new_nodes = []
for node_num in xrange(created_nodes, created_nodes + nodes_count):
node_name = "slave-{:02d}".format(node_num)
slave_conf = templates.create_slave_config(
slave_name=node_name,
slave_role='fuel_slave',
slave_vcpu=slave_vcpu,
slave_memory=slave_memory,
slave_volume_capacity=settings.NODE_VOLUME_SIZE,
second_volume_capacity=second_volume_capacity,
third_volume_capacity=third_volume_capacity,
interfaceorder=settings.INTERFACE_ORDER,
numa_nodes=settings.HARDWARE['numa_nodes'],
use_all_disks=True,
networks_multiplenetworks=settings.MULTIPLE_NETWORKS,
networks_nodegroups=settings.NODEGROUPS,
networks_bonding=settings.BONDING,
networks_bondinginterfaces=settings.BONDING_INTERFACES,
)
node = group.add_node(**slave_conf)
if force_define is True:
for volume in node.get_volumes():
volume.define()
node.define()
new_nodes.append(node)
return new_nodes
def get_default_gw(self, l2_network_device_name='admin'):
l2_net_dev = self._env.get_env_l2_network_device(
name=l2_network_device_name)
return l2_net_dev.address_pool.gateway
def has_admin(self):
return self._env.get_nodes(name='admin').exists()
def admin_setup(self, boot_from='cdrom', iface='enp0s3',
wait_for_external_config='no'):
admin_node = self.get_admin()
if admin_node.kernel_cmd is None:
admin_node.kernel_cmd = admin_node.ext.get_kernel_cmd(
boot_from=boot_from,
wait_for_external_config=wait_for_external_config,
iface=iface)
admin_node.ext.bootstrap_and_wait()
admin_node.ext.deploy_wait()
return admin_node
def get_active_nodes(self):
return [node for node in self._env.get_nodes() if node.is_active()]
def get_admin(self):
if self.has_admin():
return self._env.get_node(name='admin')
raise error.DevopsError(
'Environment {!r} has no admin node'.format(self._env.name))
def get_admin_login(self):
return settings.SSH_CREDENTIALS['login']
def get_admin_ip(self):
return self.get_admin().get_ip_address_by_network_name(
settings.SSH_CREDENTIALS['admin_network'])
def get_admin_remote(self, login=settings.SSH_CREDENTIALS['login'],
password=settings.SSH_CREDENTIALS['password']):
admin_ip = self.get_admin_ip()
admin_node = self.get_admin()
helpers.wait_tcp(
host=admin_ip, port=admin_node.ssh_port, timeout=180,
timeout_msg=("Admin node {ip} is not accessible by SSH."
"".format(ip=admin_ip)))
return ssh_client.SSHClient(
admin_ip,
auth=ssh_client.SSHAuth(username=login, password=password))
def get_private_keys(self):
ssh_keys = []
with self.get_admin_remote() as admin_remote:
for key_string in ['/root/.ssh/id_rsa',
'/root/.ssh/bootstrap.rsa']:
if admin_remote.isfile(key_string):
with admin_remote.open(key_string) as f:
ssh_keys.append(paramiko.RSAKey.from_private_key(f))
return ssh_keys
def get_node_ip(self, node_name):
node = self.get_node(name=node_name)
node_mac = node.interfaces[0].mac_address
nailgun_client = nailgun.NailgunClient(ip=self.get_admin_ip())
ip = nailgun_client.get_slave_ip_by_mac(node_mac)
return ip
def get_node_remote(self, node_name,
login=settings.SSH_SLAVE_CREDENTIALS['login'],
password=settings.SSH_SLAVE_CREDENTIALS['password']):
node = self.get_node(name=node_name)
ip = self.get_node_ip(node_name)
helpers.wait_tcp(
host=ip, port=node.ssh_port, timeout=180,
timeout_msg="Node {ip} is not accessible by SSH.".format(ip=ip))
return ssh_client.SSHClient(
ip,
auth=ssh_client.SSHAuth(
username=login,
password=password,
keys=self.get_private_keys()))
def sync_time(self, node_names=None, skip_sync=False):
"""Synchronize time on nodes
param: node_names - list of devops node names
param: skip_sync - only get the current time without sync
return: dict{node_name: node_time, ...}
"""
if node_names is None:
node_names = [node.name for node in self.get_active_nodes()]
group = ntp.GroupNtpSync()
for node_name in node_names:
if node_name == 'admin':
remote = self.get_admin_remote()
else:
remote = self.get_node_remote(node_name=node_name)
group.add_node(remote, node_name)
with group:
if not skip_sync:
group.sync_time('admin')
group.sync_time('pacemaker')
group.sync_time('other')
return group.get_curr_time()
def get_curr_time(self, node_names=None):
"""Get current time on nodes
param: node_names - list of devops node names
return: dict{node_name: node_time, ...}
"""
return self.sync_time(node_names=node_names, skip_sync=True)

59
devops/client/nailgun.py Normal file
View File

@ -0,0 +1,59 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.utils import functional
from keystoneauth1.identity import V2Password
from keystoneauth1.session import Session as KeystoneSession
from devops import error
from devops import logger
from devops import settings
class NailgunClient(object):
def __init__(self, ip):
self.ip = ip
@functional.cached_property
def _keystone_session(self):
keystone_auth = V2Password(
auth_url="http://{}:5000/v2.0".format(self.ip),
username=settings.KEYSTONE_CREDS['username'],
password=settings.KEYSTONE_CREDS['password'],
tenant_name=settings.KEYSTONE_CREDS['tenant_name'])
return KeystoneSession(auth=keystone_auth, verify=False)
def get_slave_ip_by_mac(self, mac):
nodes = self.get_nodes_json()
def poor_mac(mac_addr):
return [m.lower() for m in mac_addr
if m.lower() in '01234546789abcdef']
for node in nodes:
for interface in node['meta']['interfaces']:
if poor_mac(interface['mac']) == poor_mac(mac):
logger.debug('For mac {0} found ip {1}'
.format(mac, node['ip']))
return node['ip']
raise error.DevopsError(
'There is no match between MAC {0} and Nailgun MACs'.format(mac))
def get_nodes_json(self):
nodes = self._keystone_session.get(
'/nodes',
endpoint_filter={'service_type': 'fuel'}
)
return nodes.json()

View File

@ -18,12 +18,10 @@ import functools
import os
import socket
import time
import warnings
import xml.etree.ElementTree as ET
from dateutil import tz
from keystoneauth1.identity import V2Password
from keystoneauth1.session import Session as KeystoneSession
import paramiko
import six
# pylint: disable=import-error
# noinspection PyUnresolvedReferences
@ -177,31 +175,30 @@ def http(host='localhost', port=80, method='GET', url='/', waited_code=200):
def get_private_keys(env):
_ssh_keys = []
admin_remote = get_admin_remote(env)
for key_string in ['/root/.ssh/id_rsa',
'/root/.ssh/bootstrap.rsa']:
if admin_remote.isfile(key_string):
with admin_remote.open(key_string) as f:
_ssh_keys.append(paramiko.RSAKey.from_private_key(f))
return _ssh_keys
msg = (
'get_private_keys has been deprecated in favor of '
'DevopsEnvironment.get_private_keys')
logger.warning(msg)
warnings.warn(msg, DeprecationWarning)
from devops.client import DevopsClient
denv = DevopsClient().get_env(env.name)
return denv.get_private_keys()
def get_admin_remote(
env,
login=settings.SSH_CREDENTIALS['login'],
password=settings.SSH_CREDENTIALS['password']):
admin_ip = get_admin_ip(env)
wait(lambda: tcp_ping(admin_ip, 22),
timeout=180,
timeout_msg=("Admin node {ip} is not accessible by SSH."
.format(ip=admin_ip)))
return env.get_node(
name='admin'
).remote(
network_name=settings.SSH_CREDENTIALS['admin_network'],
login=login,
password=password)
msg = (
'get_admin_remote has been deprecated in favor of '
'DevopsEnvironment.get_admin_remote')
logger.warning(msg)
warnings.warn(msg, DeprecationWarning)
from devops.client import DevopsClient
denv = DevopsClient().get_env(env.name)
return denv.get_admin_remote(login=login, password=password)
def get_node_remote(
@ -209,41 +206,42 @@ def get_node_remote(
node_name,
login=settings.SSH_SLAVE_CREDENTIALS['login'],
password=settings.SSH_SLAVE_CREDENTIALS['password']):
ip = get_slave_ip(env, env.get_node(
name=node_name).interfaces[0].mac_address)
wait(lambda: tcp_ping(ip, 22), timeout=180,
timeout_msg="Node {ip} is not accessible by SSH.".format(ip=ip))
return ssh_client.SSHClient(
ip,
auth=ssh_client.SSHAuth(
username=login,
password=password,
keys=get_private_keys(env)))
msg = (
'get_node_remote has been deprecated in favor of '
'DevopsEnvironment.get_node_remote')
logger.warning(msg)
warnings.warn(msg, DeprecationWarning)
from devops.client import DevopsClient
denv = DevopsClient().get_env(env.name)
return denv.get_node_remote(
node_name=node_name, login=login, password=password)
def get_admin_ip(env):
return env.get_node(name='admin').get_ip_address_by_network_name('admin')
msg = (
'get_admin_ip has been deprecated in favor of '
'DevopsEnvironment.get_admin_ip')
logger.warning(msg)
warnings.warn(msg, DeprecationWarning)
def get_ip_from_json(js, mac):
def poor_mac(mac_addr):
return \
[m.lower() for m in mac_addr if m.lower() in '01234546789abcdef']
for node in js:
for interface in node['meta']['interfaces']:
if poor_mac(interface['mac']) == poor_mac(mac):
logger.debug("For mac {0} found ip {1}".format(
mac, node['ip']))
return node['ip']
raise error.DevopsError(
'There is no match between MAC {0} and Nailgun MACs'.format(mac))
from devops.client import DevopsClient
denv = DevopsClient().get_env(env.name)
return denv.get_admin_ip()
def get_slave_ip(env, node_mac_address):
admin_ip = get_admin_ip(env)
js = get_nodes(admin_ip)
return get_ip_from_json(js, node_mac_address)
msg = (
'get_slave_ip has been deprecated in favor of '
'DevopsEnvironment.get_node_ip')
logger.warning(msg)
warnings.warn(msg, DeprecationWarning)
from devops import client
from devops.client import nailgun
denv = client.DevopsClient().get_env(env.name)
ng_client = nailgun.NailgunClient(ip=denv.get_admin_ip())
return ng_client.get_slave_ip_by_mac(node_mac_address)
def xmlrpctoken(uri, login, password):
@ -324,17 +322,14 @@ def underscored(*args):
def get_nodes(admin_ip):
keystone_auth = V2Password(
auth_url="http://{}:5000/v2.0".format(admin_ip),
username=settings.KEYSTONE_CREDS['username'],
password=settings.KEYSTONE_CREDS['password'],
tenant_name=settings.KEYSTONE_CREDS['tenant_name'])
keystone_session = KeystoneSession(auth=keystone_auth, verify=False)
nodes = keystone_session.get(
'/nodes',
endpoint_filter={'service_type': 'fuel'}
)
return nodes.json()
msg = ('get_nodes has been deprecated in favor of '
'NailgunClient.get_nodes_json')
logger.warning(msg)
warnings.warn(msg, DeprecationWarning)
from devops.client import nailgun
ng_client = nailgun.NailgunClient(ip=admin_ip)
return ng_client.get_nodes_json()
def utc_to_local(t):

View File

@ -1,4 +1,4 @@
# Copyright 2015 Mirantis, Inc.
# Copyright 2015 - 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -13,19 +13,19 @@
# under the License.
import abc
import collections
import warnings
import paramiko
from six import add_metaclass
import six
from devops.error import DevopsError
from devops.helpers.helpers import get_admin_remote
from devops.helpers.helpers import get_node_remote
from devops.helpers.helpers import wait
from devops.helpers.retry import retry
from devops import error
from devops.helpers import helpers
from devops.helpers import retry
from devops import logger
@retry(paramiko.SSHException, count=3, delay=60)
@retry.retry(paramiko.SSHException, count=3, delay=60)
def sync_time(env, node_names, skip_sync=False):
"""Synchronize time on nodes
@ -34,26 +34,17 @@ def sync_time(env, node_names, skip_sync=False):
param: skip_sync - only get the current time without sync
return: dict{node_name: node_time, ...}
"""
with GroupNtpSync(env, node_names) as g_ntp:
logger.warning('sync_time is deprecated. Use DevopsClient instead')
warnings.warn(
'sync_time is deprecated. Use DevopsClient.sync_time instead',
DeprecationWarning)
if not skip_sync:
if g_ntp.admin_ntps:
g_ntp.do_sync_time(g_ntp.admin_ntps)
if g_ntp.pacemaker_ntps:
g_ntp.do_sync_time(g_ntp.pacemaker_ntps)
if g_ntp.other_ntps:
g_ntp.do_sync_time(g_ntp.other_ntps)
all_ntps = g_ntp.admin_ntps + g_ntp.pacemaker_ntps + g_ntp.other_ntps
results = {ntp.node_name: ntp.date for ntp in all_ntps}
return results
from devops.client import DevopsClient
denv = DevopsClient().get_env(env.name)
return denv.sync_time(node_names=node_names, skip_sync=skip_sync)
@add_metaclass(abc.ABCMeta)
class AbstractNtp(object):
class AbstractNtp(six.with_metaclass(abc.ABCMeta, object)):
def __init__(self, remote, node_name):
self._remote = remote
@ -111,10 +102,11 @@ class BaseNtp(AbstractNtp):
# Waiting for parent server until it starts providing the time
set_date_cmd = "ntpdate -p 4 -t 0.2 -bu {0}".format(server)
wait(lambda: not self.remote.execute(set_date_cmd)['exit_code'],
timeout=timeout,
timeout_msg='Failed to set actual time on node {!r}'.format(
self._node_name))
helpers.wait(
lambda: not self.remote.execute(set_date_cmd)['exit_code'],
timeout=timeout,
timeout_msg='Failed to set actual time on node {!r}'.format(
self._node_name))
self.remote.check_call('hwclock -w')
@ -152,11 +144,12 @@ class BaseNtp(AbstractNtp):
return False
def wait_peer(self, interval=8, timeout=600):
wait(self._get_sync_complete,
interval=interval,
timeout=timeout,
timeout_msg='Failed to wait peer on node {!r}'.format(
self._node_name))
helpers.wait(
self._get_sync_complete,
interval=interval,
timeout=timeout,
timeout_msg='Failed to wait peer on node {!r}'.format(
self._node_name))
# pylint: enable=abstract-method
@ -234,9 +227,10 @@ class NtpChronyd(AbstractNtp):
self._remote.check_call('chronyc -a burst 3/5')
# wait burst complete
wait(self._get_burst_complete, timeout=timeout,
timeout_msg='Failed to set actual time on node {!r}'.format(
self._node_name))
helpers.wait(
self._get_burst_complete, timeout=timeout,
timeout_msg='Failed to set actual time on node {!r}'.format(
self._node_name))
# set system clock
self._remote.check_call('chronyc -a makestep')
@ -269,40 +263,12 @@ class GroupNtpSync(object):
elif len(remote.execute(initd_cmd)['stdout']):
return NtpInitscript(remote, node_name)
else:
raise DevopsError('No suitable NTP service found on node {!r}'
''.format(node_name))
raise error.DevopsError(
'No suitable NTP service found on node {!r}'
''.format(node_name))
def __init__(self, env, node_names):
"""Context manager for synchronize time on nodes
param: env - environment object
param: node_names - list of devops node names
"""
self.admin_ntps = []
self.pacemaker_ntps = []
self.other_ntps = []
for node_name in node_names:
if node_name == 'admin':
# 1. Add a 'Ntp' instance with connection to Fuel admin node
admin_remote = get_admin_remote(env)
admin_ntp = self.get_ntp(admin_remote, 'admin')
self.admin_ntps.append(admin_ntp)
logger.debug("Added node '{0}' to self.admin_ntps"
.format(node_name))
continue
remote = get_node_remote(env, node_name)
ntp = self.get_ntp(remote, node_name)
if isinstance(ntp, NtpPacemaker):
# 2. Create a list of 'Ntp' connections to the controller nodes
self.pacemaker_ntps.append(ntp)
logger.debug("Added node '{0}' to self.pacemaker_ntps"
.format(node_name))
else:
# 2. Create a list of 'Ntp' connections to the other nodes
self.other_ntps.append(ntp)
logger.debug("Added node '{0}' to self.other_ntps"
.format(node_name))
def __init__(self):
self.ntp_groups = collections.defaultdict(list)
def __enter__(self):
return self
@ -310,32 +276,53 @@ class GroupNtpSync(object):
def __exit__(self, exp_type, exp_value, traceback):
pass
@staticmethod
def report_node_names(ntps):
return [ntp.node_name for ntp in ntps]
def add_node(self, remote, node_name):
group = 'other'
if node_name == 'admin':
group = 'admin'
ntp = self.get_ntp(remote, 'admin')
else:
ntp = self.get_ntp(remote, node_name)
if isinstance(ntp, NtpPacemaker):
group = 'pacemaker'
def do_sync_time(self, ntps):
# 1. Stop NTPD service on nodes
logger.debug("Stop NTPD service on nodes {0}"
.format(self.report_node_names(ntps)))
self.ntp_groups[group].append(ntp)
def get_curr_time(self):
return {
ntp.node_name: ntp.date
for ntps in self.ntp_groups.values()
for ntp in ntps
}
def sync_time(self, group_name):
if group_name not in self.ntp_groups:
logger.debug("No ntp group: {0}".format(group_name))
return
ntps = self.ntp_groups[group_name]
if not ntps:
logger.debug("No nodes in ntp group: {0}".format(group_name))
return
node_names = [ntp.node_name for ntp in ntps]
logger.debug("Stop NTP service on nodes {0}".format(node_names))
for ntp in ntps:
ntp.stop()
# 2. Set actual time on all nodes via 'ntpdate'
logger.debug("Set actual time on all nodes via 'ntpdate' on nodes {0}"
.format(self.report_node_names(ntps)))
logger.debug("Set actual time on nodes {0}".format(node_names))
for ntp in ntps:
ntp.set_actual_time()
# 3. Start NTPD service on nodes
logger.debug("Start NTPD service on nodes {0}"
.format(self.report_node_names(ntps)))
logger.debug("Start NTP service on nodes {0}".format(node_names))
for ntp in ntps:
ntp.start()
# 4. Wait for established peers
logger.debug("Wait for established peers on nodes {0}"
.format(self.report_node_names(ntps)))
logger.debug("Wait for established peers on nodes {0}".format(
node_names))
for ntp in ntps:
ntp.wait_peer()
logger.debug("time sync completted on nodes {0}".format(node_names))

View File

@ -13,10 +13,8 @@
# under the License.
import abc
from datetime import datetime
# pylint: disable=redefined-builtin
from functools import reduce
# pylint: enable=redefined-builtin
import datetime
import functools
import operator
from django.db import models
@ -25,8 +23,8 @@ from django.db.models import query
import jsonfield
import six
from devops.error import DevopsError
from devops.helpers.helpers import deepgetattr
from devops import error
from devops.helpers import helpers
from devops.helpers import loader
@ -41,7 +39,7 @@ class BaseModel(models.Model):
class Meta(object):
abstract = True
created = models.DateTimeField(default=datetime.utcnow)
created = models.DateTimeField(default=datetime.datetime.utcnow)
class ParamedModelType(ModelBase):
@ -69,7 +67,8 @@ class ParamedModelType(ModelBase):
# if not ParamModel itself
if name != 'ParamedModel' and name != 'NewBase':
# pylint: disable=map-builtin-not-iterating
parents = reduce(operator.add, map(lambda a: a.__mro__, bases))
parents = functools.reduce(
operator.add, map(lambda a: a.__mro__, bases))
# pylint: enable=map-builtin-not-iterating
# if not a first subclass of ParamedModel
if ParamedModel not in bases and ParamedModel in parents:
@ -172,14 +171,14 @@ class ParamField(ParamFieldBase):
a.bar = 'c'
print(a.params) # prints {'foo': 5, 'bar': 'c'}
a.bar = 15 # throws DevopsError
a.bar = 15 # throws DevopsException
"""
def __init__(self, default=None, choices=None):
super(ParamField, self).__init__()
if choices and default not in choices:
raise DevopsError('Default value not in choices list')
raise error.DevopsException('Default value not in choices list')
self.default_value = default
self.choices = choices
@ -192,8 +191,8 @@ class ParamField(ParamFieldBase):
def __set__(self, instance, value):
if self.choices and value not in self.choices:
raise DevopsError('{}: Value not in choices list'
''.format(self.param_key))
raise error.DevopsException(
'{}: Value not in choices list'.format(self.param_key))
instance.params[self.param_key] = value
@ -225,14 +224,15 @@ class ParamMultiField(ParamFieldBase):
super(ParamMultiField, self).__init__()
if len(subfields) == 0:
raise DevopsError('subfields is empty')
raise error.DevopsException('subfields is empty')
self.subfields = []
for name, field in subfields.items():
if not isinstance(field, ParamFieldBase):
raise DevopsError('field "{}" has wrong type;'
' should be ParamFieldBase subclass instance'
''.format(name))
raise error.DevopsException(
'field "{}" has wrong type;'
' should be ParamFieldBase subclass instance'
''.format(name))
field.set_param_key(name)
self.subfields.append(field)
@ -259,11 +259,12 @@ class ParamMultiField(ParamFieldBase):
def __set__(self, instance, values):
if not isinstance(values, dict):
raise DevopsError('Can set only dict')
raise error.DevopsException('Can set only dict')
self._init_proxy_params(instance)
for field_name, field_value in values.items():
if field_name not in self.proxy_fields:
raise DevopsError('Unknown field "{}"'.format(field_name))
raise error.DevopsException(
'Unknown field "{}"'.format(field_name))
setattr(self._proxy, field_name, field_value)
@ -326,8 +327,8 @@ class ParamedModelQuerySet(query.QuerySet):
# skip other classes
continue
item_val = deepgetattr(item, key, splitter='__',
do_raise=True)
item_val = helpers.deepgetattr(
item, key, splitter='__', do_raise=True)
if item_val != value:
break
else:
@ -379,6 +380,7 @@ class ParamedModel(six.with_metaclass(ParamedModelType, models.Model)):
for basecls in cls.__mro__:
if not hasattr(basecls, '_param_field_names'):
continue
# noinspection PyProtectedMember
param_names += basecls._param_field_names
return param_names
@ -386,6 +388,7 @@ class ParamedModel(six.with_metaclass(ParamedModelType, models.Model)):
for basecls in self.__class__.__mro__:
if not hasattr(basecls, '_param_field_names'):
continue
# noinspection PyProtectedMember
for param in basecls._param_field_names:
basecls.__dict__[param].set_default_value(self)

View File

@ -22,14 +22,9 @@ from netaddr import IPNetwork
from paramiko import Agent
from paramiko import RSAKey
from devops.error import DevopsEnvironmentError
from devops.error import DevopsError
from devops.error import DevopsObjNotFound
from devops import error
from devops.helpers.network import IpNetworksPool
from devops.helpers.ssh_client import SSHAuth
from devops.helpers.ssh_client import SSHClient
from devops.helpers.templates import create_devops_config
from devops.helpers.templates import get_devops_config
from devops.helpers import ssh_client
from devops import logger
from devops.models.base import BaseModel
from devops.models.driver import Driver
@ -46,18 +41,28 @@ class Environment(BaseModel):
name = models.CharField(max_length=255, unique=True, null=False)
hostname = 'nailgun'
domain = 'test.domain.local'
nat_interface = '' # INTERFACES.get('admin')
# TODO(akostrikov) As we providing admin net names in fuel-qa/settings,
# we should create constant and use it in fuel-qa or
# pass admin net names to Environment from fuel-qa.
admin_net = 'admin'
admin_net2 = 'admin2'
def __repr__(self):
return 'Environment(name={name!r})'.format(name=self.name)
@property
def admin_net(self):
msg = (
'Environment.admin_net is deprecated. '
'Replace by string "admin".'
)
logger.warning(msg)
warn(msg, DeprecationWarning)
return 'admin'
@property
def nat_interface(self):
msg = (
'Environment.nat_interface is deprecated.'
)
logger.warning(msg)
warn(msg, DeprecationWarning)
return ''
def get_allocated_networks(self):
allocated_networks = []
for group in self.get_groups():
@ -68,7 +73,7 @@ class Environment(BaseModel):
try:
return self.addresspool_set.get(**kwargs)
except AddressPool.DoesNotExist:
raise DevopsObjNotFound(AddressPool, **kwargs)
raise error.DevopsObjNotFound(AddressPool, **kwargs)
def get_address_pools(self, **kwargs):
return self.addresspool_set.filter(**kwargs).order_by('id')
@ -77,7 +82,7 @@ class Environment(BaseModel):
try:
return self.group_set.get(**kwargs)
except Group.DoesNotExist:
raise DevopsObjNotFound(Group, **kwargs)
raise error.DevopsObjNotFound(Group, **kwargs)
def get_groups(self, **kwargs):
return self.group_set.filter(**kwargs).order_by('id')
@ -147,15 +152,17 @@ class Environment(BaseModel):
try:
return cls.objects.create(name=name)
except IntegrityError:
raise DevopsError('Environment with name {!r} already exists'
''.format(name))
raise error.DevopsError(
'Environment with name {!r} already exists. '
'Please, set another environment name.'
''.format(name))
@classmethod
def get(cls, *args, **kwargs):
try:
return cls.objects.get(*args, **kwargs)
except Environment.DoesNotExist:
raise DevopsObjNotFound(Environment, *args, **kwargs)
raise error.DevopsObjNotFound(Environment, *args, **kwargs)
@classmethod
def list_all(cls):
@ -202,6 +209,10 @@ class Environment(BaseModel):
def snapshot(self, name=None, description=None, force=False):
if name is None:
name = str(int(time.time()))
if self.has_snapshot(name):
raise error.DevopsError(
'Snapshot with name {0} already exists.'.format(
self.params.snapshot_name))
for node in self.get_nodes():
node.snapshot(name=name, description=description, force=force,
external=settings.SNAPSHOTS_EXTERNAL)
@ -217,6 +228,7 @@ class Environment(BaseModel):
for l2netdev in group.get_l2_network_devices():
l2netdev.unblock()
# NOTE: Does not work
# TO REWRITE FOR LIBVIRT DRIVER ONLY
@classmethod
def synchronize_all(cls):
@ -257,43 +269,17 @@ class Environment(BaseModel):
Please use self.create_environment() instead.
"""
warn(
'describe_environment is deprecated in favor of'
' create_environment', DeprecationWarning)
if settings.DEVOPS_SETTINGS_TEMPLATE:
config = get_devops_config(
settings.DEVOPS_SETTINGS_TEMPLATE)
else:
config = create_devops_config(
boot_from=boot_from,
env_name=settings.ENV_NAME,
admin_vcpu=settings.HARDWARE["admin_node_cpu"],
admin_memory=settings.HARDWARE["admin_node_memory"],
admin_sysvolume_capacity=settings.ADMIN_NODE_VOLUME_SIZE,
admin_iso_path=settings.ISO_PATH,
nodes_count=settings.NODES_COUNT,
numa_nodes=settings.HARDWARE['numa_nodes'],
slave_vcpu=settings.HARDWARE["slave_node_cpu"],
slave_memory=settings.HARDWARE["slave_node_memory"],
slave_volume_capacity=settings.NODE_VOLUME_SIZE,
second_volume_capacity=settings.NODE_VOLUME_SIZE,
third_volume_capacity=settings.NODE_VOLUME_SIZE,
use_all_disks=settings.USE_ALL_DISKS,
multipath_count=settings.SLAVE_MULTIPATH_DISKS_COUNT,
ironic_nodes_count=settings.IRONIC_NODES_COUNT,
networks_bonding=settings.BONDING,
networks_bondinginterfaces=settings.BONDING_INTERFACES,
networks_multiplenetworks=settings.MULTIPLE_NETWORKS,
networks_nodegroups=settings.NODEGROUPS,
networks_interfaceorder=settings.INTERFACE_ORDER,
networks_pools=settings.POOLS,
networks_forwarding=settings.FORWARDING,
networks_dhcp=settings.DHCP,
driver_enable_acpi=settings.DRIVER_PARAMETERS['enable_acpi'],
driver_enable_nwfilers=settings.ENABLE_LIBVIRT_NWFILTERS,
)
'describe_environment is deprecated in favor of '
'DevopsClient.create_env_from_config', DeprecationWarning)
environment = cls.create_environment(config)
return environment
from devops.client import DevopsClient
client = DevopsClient()
template = settings.DEVOPS_SETTINGS_TEMPLATE
if template:
return client.create_env_from_config(template)
else:
return client.create_env()
@classmethod
def create_environment(cls, full_config):
@ -349,13 +335,16 @@ class Environment(BaseModel):
if env.get_nodes().count() == 0:
env.erase()
# TO L2_NETWORK_device, LEGACY
# Rename it to default_gw and move to models.Network class
def router(self, router_name=None): # Alternative name: get_host_node_ip
router_name = router_name or self.admin_net
if router_name == self.admin_net2:
return str(self.get_network(name=router_name).ip[2])
return str(self.get_network(name=router_name).ip[1])
# LEGACY, TO REMOVE
def router(self, router_name='admin'):
msg = ('router has been deprecated in favor of '
'DevopsEnvironment.get_default_gw')
logger.warning(msg)
warn(msg, DeprecationWarning)
from devops.client import DevopsClient
env = DevopsClient().get_env(self.name)
return env.get_default_gw(l2_network_device_name=router_name)
# LEGACY, for fuel-qa compatibility
# @logwrap
@ -366,35 +355,32 @@ class Environment(BaseModel):
:rtype : SSHClient
"""
admin = sorted(
list(self.get_nodes(role__contains='master')),
key=lambda node: node.name
)[0]
return admin.remote(
self.admin_net, auth=SSHAuth(
username=login,
password=password))
msg = ('get_admin_remote has been deprecated in favor of '
'DevopsEnvironment.get_admin_remote')
logger.warning(msg)
warn(msg, DeprecationWarning)
from devops.client import DevopsClient
env = DevopsClient().get_env(self.name)
return env.get_admin_remote(login=login, password=password)
# LEGACY, for fuel-qa compatibility
# @logwrap
def get_ssh_to_remote(self, ip,
login=settings.SSH_SLAVE_CREDENTIALS['login'],
password=settings.SSH_SLAVE_CREDENTIALS['password']):
warn('LEGACY, for fuel-qa compatibility', DeprecationWarning)
keys = []
remote = self.get_admin_remote()
for key_string in ['/root/.ssh/id_rsa',
'/root/.ssh/bootstrap.rsa']:
if remote.isfile(key_string):
with remote.open(key_string) as f:
keys.append(RSAKey.from_private_key(f))
msg = ('get_ssh_to_remote has been deprecated in favor of '
'DevopsEnvironment.get_node_remote')
logger.warning(msg)
warn(msg, DeprecationWarning)
return SSHClient(
from devops.client import DevopsClient
env = DevopsClient().get_env(self.name)
return ssh_client.SSHClient(
ip,
auth=SSHAuth(
username=login,
password=password,
keys=keys))
auth=ssh_client.SSHAuth(
username=login, password=password,
keys=env.get_private_keys()))
# LEGACY, for fuel-qa compatibility
# @logwrap
@ -408,7 +394,9 @@ class Environment(BaseModel):
logger.warning('Loading of SSH key from file failed. Trying to use'
' SSH agent ...')
keys = Agent().get_keys()
return SSHClient(ip, auth=SSHAuth(keys=keys))
return ssh_client.SSHClient(
ip,
auth=ssh_client.SSHAuth(keys=keys))
# LEGACY, TO REMOVE (for fuel-qa compatibility)
def nodes(self): # migrated from EnvironmentModel.nodes()
@ -434,7 +422,7 @@ class Environment(BaseModel):
self.slaves = self.others
self.all = self.slaves + self.admins + self.ironics
if len(self.admins) == 0:
raise DevopsEnvironmentError(
raise error.DevopsEnvironmentError(
"No nodes with role 'fuel_master' found in the "
"environment {env_name}, please check environment "
"configuration".format(
@ -478,7 +466,7 @@ class Environment(BaseModel):
return L2NetworkDevice.objects.get(
group__environment=self, **kwargs)
except L2NetworkDevice.DoesNotExist:
raise DevopsObjNotFound(L2NetworkDevice, **kwargs)
raise error.DevopsObjNotFound(L2NetworkDevice, **kwargs)
def get_env_l2_network_devices(self, **kwargs):
return L2NetworkDevice.objects.filter(
@ -496,14 +484,12 @@ class Environment(BaseModel):
address_pool__isnull=False, **kwargs)
return [self._create_network_object(x) for x in l2_network_devices]
# LEGACY, for fuel-qa compatibility
def get_node(self, *args, **kwargs):
try:
return Node.objects.get(*args, group__environment=self, **kwargs)
except Node.DoesNotExist:
raise DevopsObjNotFound(Node, *args, **kwargs)
raise error.DevopsObjNotFound(Node, *args, **kwargs)
# LEGACY, for fuel-qa compatibility
def get_nodes(self, *args, **kwargs):
return Node.objects.filter(
*args, group__environment=self, **kwargs).order_by('id')

View File

@ -16,7 +16,7 @@ from copy import deepcopy
from django.db import models
from devops.error import DevopsObjNotFound
from devops import error
from devops import logger
from devops.models.base import BaseModel
from devops.models.network import L2NetworkDevice
@ -40,7 +40,7 @@ class Group(BaseModel):
try:
return self.l2networkdevice_set.get(**kwargs)
except L2NetworkDevice.DoesNotExist:
raise DevopsObjNotFound(L2NetworkDevice, **kwargs)
raise error.DevopsObjNotFound(L2NetworkDevice, **kwargs)
def get_l2_network_devices(self, **kwargs):
return self.l2networkdevice_set.filter(**kwargs).order_by('id')
@ -49,7 +49,7 @@ class Group(BaseModel):
try:
return self.networkpool_set.get(**kwargs)
except NetworkPool.DoesNotExist:
raise DevopsObjNotFound(NetworkPool, **kwargs)
raise error.DevopsObjNotFound(NetworkPool, **kwargs)
def get_network_pools(self, **kwargs):
return self.networkpool_set.filter(**kwargs).order_by('id')
@ -58,7 +58,7 @@ class Group(BaseModel):
try:
return self.node_set.get(**kwargs)
except Node.DoesNotExist:
raise DevopsObjNotFound(Node, **kwargs)
raise error.DevopsObjNotFound(Node, **kwargs)
def get_nodes(self, **kwargs):
return self.node_set.filter(**kwargs).order_by('id')
@ -79,7 +79,7 @@ class Group(BaseModel):
try:
return cls.objects.get(**kwargs)
except Group.DoesNotExist:
raise DevopsObjNotFound(Group, **kwargs)
raise error.DevopsObjNotFound(Group, **kwargs)
@classmethod
def list_all(cls):
@ -107,7 +107,9 @@ class Group(BaseModel):
l2_network_device.start()
def start_nodes(self, nodes=None):
for node in nodes or self.get_nodes():
if nodes is None:
nodes = self.get_nodes()
for node in nodes:
node.start()
def destroy(self, **kwargs):
@ -187,7 +189,7 @@ class Group(BaseModel):
role=node_cfg['role'],
**node_cfg['params'])
def add_node(self, name, role='fuel_slave', **params):
def add_node(self, name, role=None, **params):
new_params = deepcopy(params)
interfaces = new_params.pop('interfaces', [])
network_configs = new_params.pop('network_config', {})
@ -241,7 +243,7 @@ class Group(BaseModel):
try:
return self.volume_set.get(**kwargs)
except Volume.DoesNotExist:
raise DevopsObjNotFound(Volume, **kwargs)
raise error.DevopsObjNotFound(Volume, **kwargs)
def get_volumes(self, **kwargs):
return self.volume_set.filter(**kwargs)

View File

@ -18,7 +18,7 @@ from django.db import models
from django.utils.functional import cached_property
import six
from devops.error import DevopsObjNotFound
from devops import error
from devops.helpers.helpers import tcp_ping_
from devops.helpers.helpers import wait_pass
from devops.helpers import loader
@ -203,6 +203,9 @@ class Node(six.with_metaclass(ExtendableNodeType, ParamedModel, BaseModel)):
def resume(self, *args, **kwargs):
pass
def is_active(self):
return False
def snapshot(self, *args, **kwargs):
SSHClient.close_connections()
@ -279,20 +282,25 @@ class Node(six.with_metaclass(ExtendableNodeType, ParamedModel, BaseModel)):
return None
return self.interface_set.get(label=label)
# NOTE: this method works only for master node
def get_ip_address_by_network_name(self, name, interface=None):
interface = interface or self.interface_set.filter(
l2_network_device__name=name).order_by('id')[0]
return interface.address_set.get(interface=interface).ip_address
# NOTE: this method works only for master node
def get_ip_address_by_nailgun_network_name(self, name):
interface = self.get_interface_by_nailgun_network_name(name)
return interface.address_set.first().ip_address
# LEGACY
def remote(
self, network_name, login=None, password=None, private_keys=None,
auth=None):
"""Create SSH-connection to the network
NOTE: this method works only for master node
:rtype : SSHClient
"""
return SSHClient(
@ -300,6 +308,8 @@ class Node(six.with_metaclass(ExtendableNodeType, ParamedModel, BaseModel)):
username=login,
password=password, private_keys=private_keys, auth=auth)
# LEGACY
# NOTE: this method works only for master node
def await(self, network_name, timeout=120, by_port=22):
wait_pass(
lambda: tcp_ping_(
@ -414,7 +424,7 @@ class Node(six.with_metaclass(ExtendableNodeType, ParamedModel, BaseModel)):
try:
return self.volume_set.get(**kwargs)
except Volume.DoesNotExist:
raise DevopsObjNotFound(Volume, **kwargs)
raise error.DevopsObjNotFound(Volume, **kwargs)
# NEW
def get_volumes(self, **kwargs):

View File

@ -19,68 +19,57 @@ import collections
import os
import sys
# pylint: disable=redefined-builtin
from six.moves import xrange
# pylint: enable=redefined-builtin
import tabulate
import devops
from devops.error import DevopsObjNotFound
from devops.helpers.helpers import utc_to_local
from devops.helpers.ntp import sync_time
from devops.helpers.templates import create_devops_config
from devops.helpers.templates import create_slave_config
from devops.helpers.templates import get_devops_config
from devops.models import Environment
from devops import settings
from devops import client
from devops import error
from devops.helpers import helpers
from devops import logger
class Shell(object):
def __init__(self, args):
self.args = args
self.params = self.get_params()
if getattr(self.params, 'snapshot-name', None):
self.snapshot_name = getattr(self.params, 'snapshot-name')
if (getattr(self.params, 'name', None) and
getattr(self.params, 'command', None) != 'create'):
try:
self.env = Environment.get(name=self.params.name)
except DevopsObjNotFound:
self.env = None
sys.exit("Enviroment with name {} doesn't exist."
"".format(self.params.name))
self.client = client.DevopsClient()
self.env = None
name = getattr(self.params, 'name', None)
command = getattr(self.params, 'command', None)
if name and command != 'create':
self.env = self.client.get_env(name)
def execute(self):
self.commands.get(self.params.command)(self)
command_name = 'do_{}'.format(self.params.command.replace('-', '_'))
command_method = getattr(self, command_name)
command_method()
def print_table(self, headers, columns):
if not columns:
return
print(tabulate.tabulate(columns, headers=headers,
tablefmt="simple"))
def do_list(self):
env_list = Environment.list_all().values('name', 'created')
env_names = self.client.list_env_names()
columns = []
for env in env_list:
column = collections.OrderedDict({'NAME': env['name']})
for env_name in sorted(env_names):
env = self.client.get_env(env_name)
column = collections.OrderedDict()
column['NAME'] = env.name
if self.params.list_ips:
cur_env = Environment.get(name=env['name'])
admin_ip = ''
if 'admin' in [node.name for node in cur_env.get_nodes()]:
admin_ip = (cur_env.get_node(name='admin').
get_ip_address_by_network_name('admin'))
column['ADMIN IP'] = admin_ip
if env.has_admin():
column['ADMIN IP'] = env.get_admin_ip()
else:
column['ADMIN IP'] = ''
if self.params.timestamps:
column['CREATED'] = utc_to_local(env['created']).strftime(
column['CREATED'] = helpers.utc_to_local(env.created).strftime(
'%Y-%m-%d_%H:%M:%S')
columns.append(column)
if columns:
self.print_table(headers="keys", columns=columns)
def node_dict(self, node):
return {'name': node.name,
'vnc': node.get_vnc_port()}
self.print_table(headers='keys', columns=columns)
def do_show(self):
nodes = sorted(self.env.get_nodes(), key=lambda node: node.name)
@ -105,17 +94,13 @@ class Shell(object):
self.env.resume()
def do_revert(self):
self.env.revert(self.snapshot_name, flag=False)
self.env.revert(self.params.snapshot_name, flag=False)
def do_snapshot(self):
if self.env.has_snapshot(self.snapshot_name):
sys.exit("Snapshot with name {0} already exists."
.format(self.snapshot_name))
else:
self.env.snapshot(self.snapshot_name)
self.env.snapshot(self.params.snapshot_name)
def do_synchronize(self):
Environment.synchronize_all()
def do_sync(self):
self.client.synchronize_all()
def do_snapshot_list(self):
snapshots = collections.OrderedDict()
@ -137,7 +122,8 @@ class Shell(object):
nodes.sort()
columns.append((
info.name,
utc_to_local(info.created).strftime('%Y-%m-%d %H:%M:%S'),
helpers.utc_to_local(
info.created).strftime('%Y-%m-%d %H:%M:%S'),
', '.join(nodes),
))
@ -146,184 +132,114 @@ class Shell(object):
def do_snapshot_delete(self):
for node in self.env.get_nodes():
snaps = [x.name for x in node.get_snapshots()]
if self.snapshot_name in snaps:
node.erase_snapshot(name=self.snapshot_name)
if self.params.snapshot_name in snaps:
node.erase_snapshot(name=self.params.snapshot_name)
def do_net_list(self):
headers = ("NETWORK NAME", "IP NET")
columns = [(net.name, net.ip_network)
for net in self.env.get_networks()]
for net in self.env.get_address_pools()]
self.print_table(headers=headers, columns=columns)
def do_timesync(self):
if not self.params.node_name:
nodes = [node.name for node in self.env.get_nodes()
if node.driver.node_active(node)]
else:
nodes = [self.params.node_name]
cur_time = sync_time(self.env, nodes, skip_sync=True)
def do_time_sync(self):
node_name = self.params.node_name
node_names = [node_name] if node_name else None
cur_time = self.env.get_curr_time(node_names)
for name in sorted(cur_time):
print("Current time on '{0}' = {1}".format(name, cur_time[name]))
print('Current time on {0!r} = {1}'.format(name, cur_time[name]))
print("Please wait for a few minutes while time is synchronized...")
print('Please wait for a few minutes while time is synchronized...')
new_time = sync_time(self.env, nodes, skip_sync=False)
new_time = self.env.sync_time(node_names)
for name in sorted(new_time):
print("New time on '{0}' = {1}".format(name, new_time[name]))
def do_revert_resume(self):
self.env.revert(self.snapshot_name, flag=False)
self.env.revert(self.params.snapshot_name, flag=False)
self.env.resume()
if not self.params.no_timesync:
print('Time synchronization is starting')
self.do_timesync()
self.do_time_sync()
def do_version(self):
print(devops.__version__)
def do_create(self):
config = create_devops_config(
boot_from='cdrom',
"""Create env using cli parameters."""
env = self.client.create_env(
env_name=self.params.name,
admin_iso_path=self.params.iso_path,
admin_vcpu=self.params.admin_vcpu_count,
admin_memory=self.params.admin_ram_size,
admin_sysvolume_capacity=self.params.admin_disk_size,
admin_iso_path=self.params.iso_path,
nodes_count=self.params.node_count,
numa_nodes=settings.HARDWARE['numa_nodes'],
slave_vcpu=self.params.vcpu_count,
slave_memory=self.params.ram_size,
slave_volume_capacity=settings.NODE_VOLUME_SIZE,
second_volume_capacity=self.params.second_disk_size,
third_volume_capacity=self.params.third_disk_size,
use_all_disks=settings.USE_ALL_DISKS,
multipath_count=settings.SLAVE_MULTIPATH_DISKS_COUNT,
ironic_nodes_count=settings.IRONIC_NODES_COUNT,
networks_bonding=settings.BONDING,
networks_bondinginterfaces=settings.BONDING_INTERFACES,
networks_multiplenetworks=settings.MULTIPLE_NETWORKS,
networks_nodegroups=settings.NODEGROUPS,
networks_interfaceorder=settings.INTERFACE_ORDER,
networks_pools=dict(admin=self.params.net_pool.split(':'),
public=self.params.net_pool.split(':'),
management=self.params.net_pool.split(':'),
private=self.params.net_pool.split(':'),
storage=self.params.net_pool.split(':')),
networks_forwarding=settings.FORWARDING,
networks_dhcp=settings.DHCP,
driver_enable_acpi=settings.DRIVER_PARAMETERS['enable_acpi'],
driver_enable_nwfilers=settings.ENABLE_LIBVIRT_NWFILTERS,
net_pool=self.params.net_pool.split(':'),
)
self._create_env_from_config(config)
env.define()
def do_create_env(self):
config = get_devops_config(self.params.env_config_name)
self._create_env_from_config(config)
"""Create env using config file."""
env = self.client.create_env_from_config(
self.params.env_config_name)
env.define()
def _create_env_from_config(self, config):
env_name = config['template']['devops_settings']['env_name']
for env in Environment.list_all():
if env.name == env_name:
print("Please, set another environment name")
raise SystemExit()
self.env = Environment.create_environment(config)
self.env.define()
# Start all l2 network devices
for group in self.env.get_groups():
for net in group.get_l2_network_devices():
net.start()
def do_slave_add(self, force_define=True):
try:
group = self.env.get_group(name=self.params.group_name)
except DevopsObjNotFound:
print('Group {!r} not found'.format(self.params.group_name))
raise SystemExit()
node_count = self.params.node_count
created_node_names = [n.name for n in group.get_nodes()]
def get_available_slave_name():
for i in xrange(1, 1000):
name = "slave-{:02d}".format(i)
if name in created_node_names:
continue
created_node_names.append(name)
return name
for node_num in range(node_count):
node_name = get_available_slave_name()
slave_conf = create_slave_config(
slave_name=node_name,
slave_role='fuel_slave',
slave_vcpu=self.params.vcpu_count,
slave_memory=self.params.ram_size,
slave_volume_capacity=settings.NODE_VOLUME_SIZE,
second_volume_capacity=self.params.second_disk_size,
third_volume_capacity=self.params.third_disk_size,
interfaceorder=settings.INTERFACE_ORDER,
numa_nodes=settings.HARDWARE['numa_nodes'],
use_all_disks=True,
networks_multiplenetworks=settings.MULTIPLE_NETWORKS,
networks_nodegroups=settings.NODEGROUPS,
networks_bonding=settings.BONDING,
networks_bondinginterfaces=settings.BONDING_INTERFACES,
)
node = group.add_node(**slave_conf)
if force_define is True:
for volume in node.get_volumes():
volume.define()
node.define()
print('Node {!r} created'.format(node.name))
def do_slave_add(self):
self.env.add_slaves(
nodes_count=self.params.node_count,
slave_vcpu=self.params.vcpu_count,
slave_memory=self.params.ram_size,
second_volume_capacity=self.params.second_disk_size,
third_volume_capacity=self.params.third_disk_size,
)
def do_slave_remove(self):
volumes = []
for drive in self.env.get_node(
name=self.params.node_name).disk_devices:
volumes.append(drive.volume)
self.env.get_node(name=self.params.node_name).remove()
for volume in volumes:
volume.erase()
# TODO(astudenov): add positional argument instead of option
node = self.env.get_node(name=self.params.node_name)
node.remove()
def do_slave_change(self):
node = self.env.get_node(name=self.params.node_name)
# TODO(astudenov): check if node is under libvirt controll
node.set_vcpu(vcpu=self.params.vcpu_count)
node.set_memory(memory=self.params.ram_size)
def do_admin_change(self):
node = self.env.get_node(name="admin")
# TODO(astudenov): check if node is under libvirt controll
node.set_vcpu(vcpu=self.params.admin_vcpu_count)
node.set_memory(memory=self.params.admin_ram_size)
def do_admin_setup(self):
admin_node = self.env.get_node(name='admin')
if admin_node.kernel_cmd is None:
admin_node.kernel_cmd = admin_node.ext.get_kernel_cmd(
boot_from=self.params.boot_from,
wait_for_external_config='no',
iface=self.params.iface)
admin_node.ext.bootstrap_and_wait()
admin_node.ext.deploy_wait()
# start networks first
for group in self.env.get_groups():
group.start_networks()
print("Setup complete.\n ssh {0}@{1}".format(
settings.SSH_CREDENTIALS['login'],
admin_node.get_ip_address_by_network_name(
settings.SSH_CREDENTIALS['admin_network'])))
self.env.admin_setup(
boot_from=self.params.boot_from,
iface=self.params.iface)
print('Setup complete.\n ssh {0}@{1}'.format(
self.env.get_admin_login(),
self.env.get_admin_ip()))
def do_node_start(self):
# TODO(astudenov): add positional argument instead of
# checking that option is present
self.check_param_show_help(self.params.node_name)
self.env.get_node(name=self.params.node_name).start()
def do_node_destroy(self):
# TODO(astudenov): add positional argument instead of
# checking that option is present
self.check_param_show_help(self.params.node_name)
self.env.get_node(name=self.params.node_name).destroy()
def do_node_reset(self):
# TODO(astudenov): add positional argument instead of
# checking that option is present
self.check_param_show_help(self.params.node_name)
self.env.get_node(name=self.params.node_name).reset()
@ -332,35 +248,6 @@ class Shell(object):
self.args.append('-h')
self.get_params()
commands = {
'list': do_list,
'show': do_show,
'erase': do_erase,
'start': do_start,
'destroy': do_destroy,
'suspend': do_suspend,
'resume': do_resume,
'revert': do_revert,
'snapshot': do_snapshot,
'sync': do_synchronize,
'snapshot-list': do_snapshot_list,
'snapshot-delete': do_snapshot_delete,
'net-list': do_net_list,
'time-sync': do_timesync,
'revert-resume': do_revert_resume,
'version': do_version,
'create': do_create,
'create-env': do_create_env,
'slave-add': do_slave_add,
'slave-change': do_slave_change,
'slave-remove': do_slave_remove,
'admin-setup': do_admin_setup,
'admin-change': do_admin_change,
'node-start': do_node_start,
'node-destroy': do_node_destroy,
'node-reset': do_node_reset
}
def get_params(self):
name_parser = argparse.ArgumentParser(add_help=False)
@ -378,7 +265,7 @@ class Shell(object):
'DEVOPS_SETTINGS_TEMPLATE'))
snapshot_name_parser = argparse.ArgumentParser(add_help=False)
snapshot_name_parser.add_argument('snapshot-name',
snapshot_name_parser.add_argument('snapshot_name',
help='snapshot name',
default=os.environ.get(
'SNAPSHOT_NAME'))
@ -627,4 +514,10 @@ class Shell(object):
def main(args=None):
if args is None:
args = sys.argv[1:]
Shell(args).execute()
try:
shell = Shell(args)
shell.execute()
except error.DevopsError as exc:
logger.debug(exc, exc_info=True)
sys.exit('Error: {}'.format(exc))

View File

@ -0,0 +1,13 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

View File

@ -0,0 +1,171 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from devops.client import client
from devops.client import environment
from devops import error
from devops.tests.driver import driverless
class TestDevopsClient(driverless.DriverlessTestCase):
def patch(self, *args, **kwargs):
patcher = mock.patch(*args, **kwargs)
m = patcher.start()
self.addCleanup(patcher.stop)
return m
def setUp(self):
super(TestDevopsClient, self).setUp()
self.conf = {
'template': {
'devops_settings': {
'env_name': 'test2',
'address_pools': {
'pool1': {
'net': '10.109.0.0/16:24',
'params': {
'tag': 0,
'ip_reserved': {
'gateway': 1,
'l2_network_device': 1,
},
'ip_ranges': {
'default': [2, -2]
}
}
}
},
'groups': [
{
'name': 'rack-01',
'driver': {
'name': 'devops.driver.empty',
},
'network_pools': {
'fuelweb_admin': 'pool1'
},
'l2_network_devices': {
'admin': {
'address_pool': 'pool1',
}
}
}
]
}
}
}
self.cr_conf_mock = self.patch(
'devops.helpers.templates.create_devops_config')
self.cr_conf_mock.return_value = self.conf
self.get_conf_mock = self.patch(
'devops.helpers.templates.get_devops_config')
self.get_conf_mock.return_value = self.conf
self.c = client.DevopsClient()
def test_get_env(self):
test_env = self.c.get_env('test')
assert test_env.name == 'test'
assert isinstance(test_env, environment.DevopsEnvironment)
def test_get_env_error(self):
with self.assertRaises(error.DevopsError):
self.c.get_env('unknown')
def test_list_env_names(self):
assert self.c.list_env_names() == ['test']
test_env = self.c.get_env('test')
test_env.erase()
assert self.c.list_env_names() == []
def test_create_env_default(self):
env = self.c.create_env(env_name='test2')
assert env.name == 'test2'
self.cr_conf_mock.assert_called_once_with(
admin_iso_path=None,
admin_memory=3072,
admin_sysvolume_capacity=75,
admin_vcpu=2,
boot_from='cdrom',
driver_enable_acpi=False,
driver_enable_nwfilers=False,
env_name='test2',
ironic_nodes_count=0,
multipath_count=0,
networks_bonding=False,
networks_bondinginterfaces={
'admin': ['eth0', 'eth1'],
'public': ['eth2', 'eth3', 'eth4', 'eth5']},
networks_dhcp={
'admin': False,
'management': False,
'storage': False,
'public': False,
'private': False},
networks_forwarding={
'admin': 'nat',
'management': None,
'storage': None,
'public': 'nat',
'private': None},
networks_interfaceorder=[
'admin',
'public',
'management',
'private',
'storage'],
networks_multiplenetworks=False,
networks_nodegroups=(),
networks_pools={
'admin': ['10.109.0.0/16', '24'],
'management': ['10.109.0.0/16', '24'],
'storage': ['10.109.0.0/16', '24'],
'public': ['10.109.0.0/16', '24'],
'private': ['10.109.0.0/16', '24']},
nodes_count=10,
numa_nodes=0,
second_volume_capacity=50,
slave_memory=3027,
slave_vcpu=2,
slave_volume_capacity=50,
third_volume_capacity=50,
use_all_disks=True,
)
assert self.c.list_env_names() == ['test', 'test2']
def test_create_env_from_config(self):
env = self.c.create_env_from_config(self.conf)
assert env.name == 'test2'
assert env.get_address_pool(name='pool1') is not None
assert env.get_group(name='rack-01') is not None
def test_create_env_from_config_file(self):
env = self.c.create_env_from_config('/path/to/my-conf.yaml')
self.get_conf_mock.assert_called_once_with('/path/to/my-conf.yaml')
assert env.name == 'test2'
assert env.get_address_pool(name='pool1') is not None
assert env.get_group(name='rack-01') is not None
def test_synchronize_all(self):
sync_all_mock = self.patch(
'devops.models.environment.Environment.synchronize_all')
self.c.synchronize_all()
sync_all_mock.assert_called_once_with()

View File

@ -0,0 +1,366 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from devops.client import environment
from devops.client import nailgun
from devops import error
from devops.helpers import helpers
from devops.helpers import ntp
from devops.helpers import ssh_client
from devops.tests.driver import driverless
class TestDevopsEnvironment(driverless.DriverlessTestCase):
def patch(self, *args, **kwargs):
patcher = mock.patch(*args, **kwargs)
m = patcher.start()
self.addCleanup(patcher.stop)
return m
def setUp(self):
super(TestDevopsEnvironment, self).setUp()
self.paramiko_mock = self.patch('devops.client.environment.paramiko')
self.l2dev_start_mock = self.patch(
'devops.models.network.L2NetworkDevice.start')
self.vol_define_mock = self.patch(
'devops.models.volume.Volume.define')
self.wait_tcp_mock = self.patch(
'devops.helpers.helpers.wait_tcp', spec=helpers.wait_tcp)
self.ssh_mock = self.patch(
'devops.helpers.ssh_client.SSHClient', spec=ssh_client.SSHClient)
self.nc_mock = self.patch(
'devops.client.nailgun.NailgunClient', spec=nailgun.NailgunClient)
self.nc_mock_inst = self.nc_mock.return_value
self.mac_to_ip = {
'64:52:dc:96:12:cc': '10.109.0.100',
}
self.nc_mock_inst.get_slave_ip_by_mac.side_effect = self.mac_to_ip.get
self.ntpgroup_mock = self.patch(
'devops.helpers.ntp.GroupNtpSync', spec=ntp.GroupNtpSync)
self.ntpgroup_inst = self.ntpgroup_mock.return_value
self.slave_conf = {
'name': 'slave-00',
'role': 'fuel_slave',
'params': {},
'volumes': [
{
'name': 'system',
},
]
}
self.cr_sl_conf_mock = self.patch(
'devops.helpers.templates.create_slave_config')
self.cr_sl_conf_mock.return_value = self.slave_conf
self.ext_mock = self.patch(
'devops.models.node.Node.ext')
self.env.add_group(group_name='default',
driver_name='devops.driver.empty')
self.denv = environment.DevopsEnvironment(self.env)
def test_add_slaves(self):
nodes = self.denv.add_slaves(
nodes_count=1)
self.cr_sl_conf_mock.assert_called_once_with(
slave_name='slave-00',
slave_role='fuel_slave',
slave_vcpu=1,
slave_memory=1024,
slave_volume_capacity=50,
second_volume_capacity=50,
third_volume_capacity=50,
interfaceorder=[
'admin', 'public', 'management', 'private', 'storage'],
numa_nodes=0,
use_all_disks=True,
networks_multiplenetworks=False,
networks_nodegroups=(),
networks_bonding=False,
networks_bondinginterfaces={
'admin': ['eth0', 'eth1'],
'public': ['eth2', 'eth3', 'eth4', 'eth5']},
)
assert len(nodes) == 1
assert nodes[0].name == 'slave-00'
self.vol_define_mock.assert_called_once_with()
def test_admin_setup(self):
self.group.add_node(
name='admin',
role='fule_master')
admin = self.denv.admin_setup()
assert admin is not None
self.ext_mock.get_kernel_cmd.assert_called_once_with(
boot_from='cdrom',
wait_for_external_config='no',
iface='enp0s3')
self.ext_mock.bootstrap_and_wait()
self.ext_mock.deploy_wait()
def test_get_active_nodes(self):
assert self.denv.get_active_nodes() == []
self.group.add_node(
name='admin',
role='fule_master')
self.patch('devops.models.node.Node.is_active', return_value=True)
nodes = self.denv.get_active_nodes()
assert len(nodes) == 1
assert nodes[0].name == 'admin'
def test_get_admin(self):
with self.assertRaises(error.DevopsError):
self.denv.get_admin()
self.group.add_node(
name='admin',
role='fule_master')
node = self.denv.get_admin()
assert node is not None
assert node.name == 'admin'
def test_get_admin_ip(self):
self.group.add_node(
name='admin',
role='fule_master',
interfaces=[dict(
label='eth0',
l2_network_device='admin',
interface_model='e1000',
)])
ip = self.denv.get_admin_ip()
assert ip == '10.109.0.2'
def test_get_admin_remote(self):
ssh = self.ssh_mock.return_value
self.group.add_node(
name='admin',
role='fule_master',
interfaces=[dict(
label='eth0',
l2_network_device='admin',
interface_model='e1000',
)])
remote = self.denv.get_admin_remote()
assert remote is ssh
self.ssh_mock.assert_called_once_with(
'10.109.0.2',
auth=ssh_client.SSHAuth(username='root', password='r00tme'))
self.wait_tcp_mock.assert_called_once_with(
host='10.109.0.2', port=22, timeout=180,
timeout_msg='Admin node 10.109.0.2 is not accessible by SSH.')
def test_get_node_ip(self):
self.group.add_node(
name='admin',
role='fule_master',
interfaces=[dict(
label='eth0',
l2_network_device='admin',
interface_model='e1000',
)])
slave = self.group.add_node(
name='slave-00',
role='fule_slave',
interfaces=[dict(
label='eth0',
l2_network_device='admin',
interface_model='e1000',
)])
eth0 = slave.get_interface_by_network_name('admin')
eth0.mac_address = '64:52:dc:96:12:cc'
eth0.save()
ip = self.denv.get_node_ip('slave-00')
assert ip == '10.109.0.100'
def test_get_private_keys(self):
ssh = self.ssh_mock.return_value.__enter__.return_value
ssh.open = mock.mock_open()
key = self.paramiko_mock.RSAKey.from_private_key.return_value
self.group.add_node(
name='admin',
role='fule_master',
interfaces=[dict(
label='eth0',
l2_network_device='admin',
interface_model='e1000',
)])
keys = self.denv.get_private_keys()
assert len(keys) == 2
assert keys == [key, key]
self.ssh_mock.assert_called_once_with(
'10.109.0.2',
auth=ssh_client.SSHAuth(username='root', password='r00tme'))
assert ssh.isfile.call_count == 2
ssh.isfile.assert_any_call('/root/.ssh/id_rsa')
ssh.isfile.assert_any_call('/root/.ssh/bootstrap.rsa')
assert ssh.open.call_count == 2
ssh.open.assert_any_call('/root/.ssh/id_rsa')
ssh.open.assert_any_call('/root/.ssh/bootstrap.rsa')
assert self.paramiko_mock.RSAKey.from_private_key.call_count == 2
self.paramiko_mock.RSAKey.from_private_key.assert_called_with(
ssh.open.return_value)
def test_get_node_remote(self):
ssh = self.ssh_mock.return_value
ssh.open = mock.mock_open()
self.group.add_node(
name='admin',
role='fule_master',
interfaces=[dict(
label='eth0',
l2_network_device='admin',
interface_model='e1000',
)])
slave = self.group.add_node(
name='slave-00',
role='fule_slave',
interfaces=[dict(
label='eth0',
l2_network_device='admin',
interface_model='e1000',
)])
eth0 = slave.get_interface_by_network_name('admin')
eth0.mac_address = '64:52:dc:96:12:cc'
eth0.save()
key = self.paramiko_mock.RSAKey.from_private_key.return_value
keys = [key, key]
remote = self.denv.get_node_remote('slave-00')
assert remote is ssh
self.ssh_mock.assert_called_with(
'10.109.0.100',
auth=ssh_client.SSHAuth(
username='root',
password='r00tme',
keys=keys))
self.wait_tcp_mock.assert_called_with(
host='10.109.0.2', port=22, timeout=180,
timeout_msg='Admin node 10.109.0.2 is not accessible by SSH.')
def test_sync_time(self):
ssh = self.ssh_mock.return_value
self.patch('devops.models.node.Node.is_active', return_value=True)
self.group.add_node(
name='admin',
role='fule_master',
interfaces=[dict(
label='eth0',
l2_network_device='admin',
interface_model='e1000',
)])
slave = self.group.add_node(
name='slave-00',
role='fule_slave',
interfaces=[dict(
label='eth0',
l2_network_device='admin',
interface_model='e1000',
)])
eth0 = slave.get_interface_by_network_name('admin')
eth0.mac_address = '64:52:dc:96:12:cc'
eth0.save()
t = self.denv.sync_time()
assert t is self.ntpgroup_inst.get_curr_time.return_value
self.ntpgroup_mock.assert_called_once_with()
self.ntpgroup_inst.add_node.assert_has_calls((
mock.call(ssh, 'admin'),
mock.call(ssh, 'slave-00'),
))
assert self.ntpgroup_inst.sync_time.call_count == 3
self.ntpgroup_inst.sync_time.assert_any_call('admin')
self.ntpgroup_inst.sync_time.assert_any_call('pacemaker')
self.ntpgroup_inst.sync_time.assert_any_call('other')
self.ntpgroup_inst.get_curr_time.assert_called_once_with()
self.ntpgroup_inst.__enter__.assert_called_once_with()
self.ntpgroup_inst.__exit__.assert_called_once_with(None, None, None)
def test_get_curr_time(self):
ssh = self.ssh_mock.return_value
self.patch('devops.models.node.Node.is_active', return_value=True)
self.group.add_node(
name='admin',
role='fule_master',
interfaces=[dict(
label='eth0',
l2_network_device='admin',
interface_model='e1000',
)])
slave = self.group.add_node(
name='slave-00',
role='fule_slave',
interfaces=[dict(
label='eth0',
l2_network_device='admin',
interface_model='e1000',
)])
eth0 = slave.get_interface_by_network_name('admin')
eth0.mac_address = '64:52:dc:96:12:cc'
eth0.save()
t = self.denv.get_curr_time(node_names=['admin', 'slave-00'])
assert t is self.ntpgroup_inst.get_curr_time.return_value
self.ntpgroup_mock.assert_called_once_with()
self.ntpgroup_inst.add_node.assert_has_calls((
mock.call(ssh, 'admin'),
mock.call(ssh, 'slave-00'),
))
assert self.ntpgroup_inst.sync_time.call_count == 0
self.ntpgroup_inst.get_curr_time.assert_called_once_with()
self.ntpgroup_inst.__enter__.assert_called_once_with()
self.ntpgroup_inst.__exit__.assert_called_once_with(None, None, None)
def test_get_default_gw(self):
assert self.denv.get_default_gw() == '10.109.0.1'
assert self.denv.get_default_gw('public') == '10.109.1.1'
def test_get_admin_login(self):
assert self.denv.get_admin_login() == 'root'

View File

@ -0,0 +1,77 @@
# Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.test import TestCase
from keystoneauth1.identity import V2Password
from keystoneauth1.session import Session as KeystoneSession
import mock
from devops.client import nailgun
from devops import error
class TestNailgunClient(TestCase):
def patch(self, *args, **kwargs):
patcher = mock.patch(*args, **kwargs)
m = patcher.start()
self.addCleanup(patcher.stop)
return m
def setUp(self):
super(TestNailgunClient, self).setUp()
self.v2pass_mock = self.patch(
'devops.client.nailgun.V2Password', spec=V2Password)
self.v2pass_inst = self.v2pass_mock.return_value
self.ks_session_mock = self.patch(
'devops.client.nailgun.KeystoneSession', spec=KeystoneSession)
self.k2_session_inst = self.ks_session_mock.return_value
self.nodes_mock = self.k2_session_inst.get.return_value
self.nc = nailgun.NailgunClient('10.109.0.2')
def test_get_nodes_json(self):
data = self.nc.get_nodes_json()
assert data is self.nodes_mock.json.return_value
self.v2pass_mock.assert_called_once_with(
auth_url='http://10.109.0.2:5000/v2.0',
password='admin', tenant_name='admin', username='admin')
self.ks_session_mock.assert_called_once_with(
auth=self.v2pass_inst, verify=False)
self.k2_session_inst.get.assert_called_once_with(
'/nodes', endpoint_filter={'service_type': 'fuel'})
def test_get_slave_ip_by_mac(self):
self.nodes_mock.json.return_value = [
{
'ip': '10.109.0.100',
'meta': {
'interfaces': [
{'mac': '64.52.DC.96.12.CC'}
]
}
}
]
ip = self.nc.get_slave_ip_by_mac('64:52:dc:96:12:cc')
assert ip == '10.109.0.100'
ip = self.nc.get_slave_ip_by_mac('64.52.dc.96.12.cc')
assert ip == '10.109.0.100'
ip = self.nc.get_slave_ip_by_mac('6452dc9612cc')
assert ip == '10.109.0.100'
with self.assertRaises(error.DevopsError):
self.nc.get_slave_ip_by_mac('a1a1a1a1a1a1')

View File

@ -20,6 +20,7 @@ import unittest
import mock
from devops import error
from devops.helpers import ntp
from devops.helpers import ssh_client
@ -36,7 +37,7 @@ class NtpTestCase(unittest.TestCase):
self.remote_mock = mock.Mock(spec=ssh_client.SSHClient)
self.remote_mock.__repr__ = mock.Mock(return_value='<SSHClient()>')
self.wait_mock = self.patch('devops.helpers.ntp.wait')
self.wait_mock = self.patch('devops.helpers.helpers.wait')
@staticmethod
def make_exec_result(stdout, exit_code=0):
@ -302,3 +303,145 @@ class TestNtpChronyd(NtpTestCase):
ntp_chrony.wait_peer()
self.remote_mock.check_call.assert_called_once_with(
'chronyc -a waitsync 10 0.01')
class GroupNtpSync(NtpTestCase):
def setUp(self):
super(GroupNtpSync, self).setUp()
self.exec_results = {}
bad_result = self.make_exec_result('', -1)
self.remote_mock.execute.side_effect = \
lambda cmd: self.exec_results.get(cmd, bad_result)
def test_get_ntp_error(self):
with self.assertRaises(error.DevopsError):
ntp.GroupNtpSync.get_ntp(self.remote_mock, 'node1')
def test_get_ntp_pcs(self):
pcs_cmd = "ps -C pacemakerd && crm_resource --resource p_ntp --locate"
self.exec_results[pcs_cmd] = self.make_exec_result('')
pcs_ntp = ntp.GroupNtpSync.get_ntp(self.remote_mock, 'node1')
assert isinstance(pcs_ntp, ntp.NtpPacemaker)
assert pcs_ntp.remote is self.remote_mock
assert pcs_ntp.node_name == 'node1'
def test_get_ntp_systemd(self):
systemd_cmd = "systemctl list-unit-files| grep ntpd"
self.exec_results[systemd_cmd] = self.make_exec_result('')
systemd_ntp = ntp.GroupNtpSync.get_ntp(self.remote_mock, 'node1')
assert isinstance(systemd_ntp, ntp.NtpSystemd)
assert systemd_ntp.remote is self.remote_mock
assert systemd_ntp.node_name == 'node1'
def test_get_ntp_chronyd(self):
chronyd_cmd = "systemctl is-active chronyd"
self.exec_results[chronyd_cmd] = self.make_exec_result('')
chronyd_ntp = ntp.GroupNtpSync.get_ntp(self.remote_mock, 'node1')
assert isinstance(chronyd_ntp, ntp.NtpChronyd)
assert chronyd_ntp.remote is self.remote_mock
assert chronyd_ntp.node_name == 'node1'
def test_get_ntp_initd(self):
initd_cmd = "find /etc/init.d/ -regex '/etc/init.d/ntp.?' -executable"
self.exec_results[initd_cmd] = self.make_exec_result('/etc/init.d/ntp')
initd_ntp = ntp.GroupNtpSync.get_ntp(self.remote_mock, 'node1')
assert isinstance(initd_ntp, ntp.NtpInitscript)
assert initd_ntp.remote is self.remote_mock
assert initd_ntp.node_name == 'node1'
def test_get_curr_time(self):
pcs_cmd = "ps -C pacemakerd && crm_resource --resource p_ntp --locate"
self.exec_results[pcs_cmd] = self.make_exec_result('')
self.exec_results['date'] = self.make_exec_result(
'Fri Jul 22 12:45:42 MSK 2016')
group = ntp.GroupNtpSync()
group.add_node(self.remote_mock, 'node1')
assert len(group.ntp_groups['pacemaker']) == 1
assert group.get_curr_time() == {
'node1': 'Fri Jul 22 12:45:42 MSK 2016'}
def test_add_node(self):
pcs_cmd = "ps -C pacemakerd && crm_resource --resource p_ntp --locate"
self.exec_results[pcs_cmd] = self.make_exec_result('')
self.exec_results['date'] = self.make_exec_result(
'Fri Jul 22 12:45:42 MSK 2016')
group = ntp.GroupNtpSync()
group.add_node(self.remote_mock, 'node1')
assert len(group.ntp_groups['admin']) == 0
assert len(group.ntp_groups['pacemaker']) == 1
assert len(group.ntp_groups['other']) == 0
group.add_node(self.remote_mock, 'admin')
assert len(group.ntp_groups['admin']) == 1
assert len(group.ntp_groups['pacemaker']) == 1
assert len(group.ntp_groups['other']) == 0
chronyd_cmd = "systemctl is-active chronyd"
del self.exec_results[pcs_cmd]
self.exec_results[chronyd_cmd] = self.make_exec_result('')
group.add_node(self.remote_mock, 'node2')
assert len(group.ntp_groups['admin']) == 1
assert len(group.ntp_groups['pacemaker']) == 1
assert len(group.ntp_groups['other']) == 1
assert group.get_curr_time() == {
'admin': 'Fri Jul 22 12:45:42 MSK 2016',
'node1': 'Fri Jul 22 12:45:42 MSK 2016',
'node2': 'Fri Jul 22 12:45:42 MSK 2016'}
@mock.patch('devops.helpers.ntp.GroupNtpSync.get_ntp')
def test_sync_time(self, get_ntp_mock):
spec = mock.create_autospec(spec=ntp.NtpPacemaker, instance=True)
admin_ntp_mock = mock.Mock(spec=spec)
node1_ntp_mock = mock.Mock(spec=spec)
node2_ntp_mock = mock.Mock(spec=spec)
get_ntp_mock.side_effect = (
admin_ntp_mock, node1_ntp_mock, node2_ntp_mock)
group = ntp.GroupNtpSync()
group.sync_time('admin')
group.add_node(self.remote_mock, 'admin')
group.add_node(self.remote_mock, 'node1')
group.add_node(self.remote_mock, 'node2')
assert group.ntp_groups == {
'admin': [admin_ntp_mock],
'pacemaker': [node1_ntp_mock, node2_ntp_mock]
}
group.sync_time('admin')
admin_ntp_mock.assert_has_calls((
mock.call.stop(),
mock.call.set_actual_time(),
mock.call.start(),
mock.call.wait_peer()
), any_order=True)
node1_ntp_mock.stop.assert_not_called()
node1_ntp_mock.set_actual_time.assert_not_called()
node1_ntp_mock.start.assert_not_called()
node1_ntp_mock.wait_peer.assert_not_called()
node2_ntp_mock.stop.assert_not_called()
node2_ntp_mock.set_actual_time.assert_not_called()
node2_ntp_mock.start.assert_not_called()
node2_ntp_mock.wait_peer.assert_not_called()
group.sync_time('pacemaker')
node1_ntp_mock.assert_has_calls((
mock.call.stop(),
mock.call.set_actual_time(),
mock.call.start(),
mock.call.wait_peer()
))
node2_ntp_mock.assert_has_calls([
mock.call.stop(),
mock.call.set_actual_time(),
mock.call.start(),
mock.call.wait_peer()
])

View File

@ -16,7 +16,7 @@
from django.test import TestCase
from devops.error import DevopsError
from devops.error import DevopsException
from devops.models.base import ParamField
from devops.models.base import ParamMultiField
from devops.models import Driver
@ -120,20 +120,20 @@ class TestParamedModel(TestCase):
def test_unknown_field(self):
with self.assertRaises(TypeError):
MyModel(unknown=0)
with self.assertRaises(DevopsError):
with self.assertRaises(DevopsException):
MyModel(multi=dict(unknown='aaa'))
with self.assertRaises(DevopsError):
with self.assertRaises(DevopsException):
MyModel(multi='aaa')
def test_not_in_choices(self):
with self.assertRaises(DevopsError):
with self.assertRaises(DevopsException):
MyModel(number=0)
with self.assertRaises(DevopsError):
with self.assertRaises(DevopsException):
t = MyModel()
t.number = 0
with self.assertRaises(DevopsError):
with self.assertRaises(DevopsException):
MyModel(multi=dict(sub2='aaa'))
with self.assertRaises(DevopsError):
with self.assertRaises(DevopsException):
t = MyModel()
t.multi.sub2 = 'aaa'

View File

@ -1,6 +1,6 @@
# -*- coding: utf-8 -*-
# Copyright 2015 Mirantis, Inc.
# Copyright 2015 - 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -16,77 +16,512 @@
# pylint: disable=no-self-use
import datetime
from datetime import datetime as dt
import unittest
from dateutil import tz
import mock
from netaddr import IPNetwork
from devops.error import DevopsError
from devops import models
from devops import shell
from devops.shell import main
from devops.shell import Shell
class BaseShellTestCase(unittest.TestCase):
class TestMain(unittest.TestCase):
def execute(self, *args):
return shell.main(args)
def patch(self, *args, **kwargs):
patcher = mock.patch(*args, **kwargs)
m = patcher.start()
self.addCleanup(patcher.stop)
return m
def setUp(self):
super(TestMain, self).setUp()
self.sys_mock = self.patch('devops.shell.sys')
self.shell_mock = self.patch('devops.shell.Shell')
self.shell_inst = self.shell_mock.return_value
def test_main_sys_args(self):
self.sys_mock.argv = ['dos.py', 'list']
main()
self.shell_mock.assert_called_once_with(['list'])
self.shell_inst.execute.assert_called_once_with()
assert self.sys_mock.exit.called is False
def test_main(self):
main(['show'])
self.shell_mock.assert_called_once_with(['show'])
self.shell_inst.execute.assert_called_once_with()
assert self.sys_mock.exit.called is False
def test_main_devops_error(self):
error = DevopsError('my error')
self.shell_inst.execute.side_effect = error
main(['start'])
self.shell_mock.assert_called_once_with(['start'])
self.shell_inst.execute.assert_called_once_with()
self.sys_mock.exit.assert_called_once_with('Error: my error')
def test_main_exception(self):
error = ValueError('error')
self.shell_inst.execute.side_effect = error
with self.assertRaises(ValueError):
main(['start'])
class TestSnaphotList(BaseShellTestCase):
class TestShell(unittest.TestCase):
@mock.patch('devops.helpers.helpers.tz.tzlocal',
return_value=tz.gettz('Europe/Rome'))
@mock.patch.object(shell.Shell, 'print_table')
@mock.patch.object(models.Environment, 'get')
def test_snapshot_list_order(self, mock_get_env, mock_print, tzlocal_mock):
snaps = []
base_date = datetime.datetime(2015, 12, 1)
for i in range(4):
snap = mock.Mock()
snap.name = "snap_{0}".format(i)
snap.created = base_date - datetime.timedelta(days=i)
snaps.append(snap)
def patch(self, *args, **kwargs):
patcher = mock.patch(*args, **kwargs)
m = patcher.start()
self.addCleanup(patcher.stop)
return m
node = mock.Mock()
node.name = "node"
node.get_snapshots.return_value = snaps
def setUp(self):
super(TestShell, self).setUp()
env = mock_get_env.return_value
env.get_nodes.return_value = [node, node]
self.print_mock = self.patch('devops.shell.print')
self.tzlocal_mock = self.patch(
'devops.helpers.helpers.tz.tzlocal',
return_value=tz.gettz('Europe/Rome'))
self.execute('snapshot-list', 'some-env')
self.client_mock = self.patch('devops.client.DevopsClient',
autospec=True)
self.client_inst = self.client_mock.return_value
mock_print.assert_called_once_with(
columns=[
('snap_3', '2015-11-28 01:00:00', 'node, node'),
('snap_2', '2015-11-29 01:00:00', 'node, node'),
('snap_1', '2015-11-30 01:00:00', 'node, node'),
('snap_0', '2015-12-01 01:00:00', 'node, node')
],
headers=('SNAPSHOT', 'CREATED', 'NODES-NAMES')
def create_snap_mock(name, t):
m = mock.Mock()
m.name = name
m.created = dt(2016, 5, 12, 15, 12, t)
return m
def create_node_mock(name, vnc_port=5005, snapshots=None):
m = mock.Mock(spec=models.Node)
m.name = name
m.group.name = 'rack-01'
m.set_vcpu = mock.Mock(return_value=None)
m.set_memory = mock.Mock(return_value=None)
m.get_vnc_port = mock.Mock(return_value=vnc_port)
m.erase_snapshot = mock.Mock(return_value=None)
snap_mocks = []
if snapshots:
snap_mocks = [
create_snap_mock(s_name, t) for s_name, t in snapshots]
m.get_snapshots.return_value = snap_mocks
return m
self.nodes = {
'env1': {
'admin': create_node_mock('admin', snapshots=[('snap1', 15),
('snap2', 16)]),
'slave-00': create_node_mock('slave-00',
snapshots=[('snap1', 15)]),
'slave-01': create_node_mock('slave-01'),
}
}
def create_ap_mock(name, ip_network):
m = mock.Mock(spec=models.AddressPool)
m.name = name
m.ip_network = IPNetwork(ip_network)
return m
self.aps = {
'env1': [
create_ap_mock('fuelweb_admin-pool01', '109.10.0.0/24'),
create_ap_mock('public-pool01', '109.10.1.0/24'),
create_ap_mock('storage-pool01', '109.10.2.0/24'),
]
}
def create_env_mock(env_name, created, nodes, aps, admin_ip=None):
m = mock.Mock(created=created)
m.name = env_name
m.get_node.side_effect = lambda name: nodes.get(name)
m.get_nodes.side_effect = nodes.values
m.get_address_pools.return_value = aps
m.get_admin.side_effect = lambda: nodes['admin']
m.get_admin_ip.return_value = admin_ip
m.has_admin.side_effect = lambda: bool(admin_ip)
return m
self.env_mocks = {
'env1': create_env_mock(
env_name='env1', created=dt(2016, 5, 12, 15, 12, 10),
nodes=self.nodes['env1'], aps=self.aps['env1'],
admin_ip='109.10.0.2'),
'env2': create_env_mock(
env_name='env2', created=dt(2016, 5, 12, 15, 12, 11),
nodes={}, aps=[], admin_ip='109.10.1.2'),
'env3': create_env_mock(
env_name='env3', created=dt(2016, 5, 12, 15, 12, 12),
nodes={}, aps=[]),
}
self.client_inst.list_env_names.side_effect = self.env_mocks.keys
self.client_inst.get_env.side_effect = self.env_mocks.__getitem__
def test_shell(self):
shell = Shell(['list'])
assert shell.args == ['list']
self.client_mock.assert_called_once_with()
def test_shell_command_not_create(self):
shell = Shell(['show', 'env1'])
assert shell.args == ['show', 'env1']
self.client_inst.get_env.assert_called_once_with('env1')
def test_list(self):
shell = Shell(['list'])
shell.execute()
self.print_mock.assert_called_once_with(
'NAME\n'
'------\n'
'env1\n'
'env2\n'
'env3')
def test_list_ips(self):
shell = Shell(['list', '--ips'])
shell.execute()
self.print_mock.assert_called_once_with(
'NAME ADMIN IP\n'
'------ ----------\n'
'env1 109.10.0.2\n'
'env2 109.10.1.2\n'
'env3')
def test_list_ips_timestamps(self):
shell = Shell(['list', '--ips', '--timestamps'])
shell.execute()
self.print_mock.assert_called_once_with(
'NAME ADMIN IP CREATED\n'
'------ ---------- -------------------\n'
'env1 109.10.0.2 2016-05-12_17:12:10\n'
'env2 109.10.1.2 2016-05-12_17:12:11\n'
'env3 2016-05-12_17:12:12')
def test_list_none(self):
self.env_mocks.clear()
shell = Shell(['list'])
assert self.print_mock.called is False
shell.execute()
assert self.print_mock.called is False
def test_show(self):
shell = Shell(['show', 'env1'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.print_mock.assert_called_once_with(
' VNC NODE-NAME GROUP-NAME\n'
'----- ----------- ------------\n'
' 5005 admin rack-01\n'
' 5005 slave-00 rack-01\n'
' 5005 slave-01 rack-01')
def test_show_none(self):
shell = Shell(['show', 'env2'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env2')
assert self.print_mock.called is False
def test_erase(self):
shell = Shell(['erase', 'env1'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.env_mocks['env1'].erase.assert_called_once_with()
def test_start(self):
shell = Shell(['start', 'env1'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.env_mocks['env1'].start.assert_called_once_with()
def test_destroy(self):
shell = Shell(['destroy', 'env1'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.env_mocks['env1'].destroy.assert_called_once_with()
def test_suspend(self):
shell = Shell(['suspend', 'env1'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.env_mocks['env1'].suspend.assert_called_once_with()
def test_resume(self):
shell = Shell(['resume', 'env1'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.env_mocks['env1'].resume.assert_called_once_with()
def test_revert(self):
shell = Shell(['revert', 'env1', 'snap1'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.env_mocks['env1'].revert.assert_called_once_with(
'snap1', flag=False)
def test_snapshot(self):
shell = Shell(['snapshot', 'env1', 'snap1'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.env_mocks['env1'].snapshot.assert_called_once_with('snap1')
def test_sync(self):
shell = Shell(['sync'])
shell.execute()
self.client_inst.synchronize_all.assert_called_once_with()
def test_snapshot_list(self):
shell = Shell(['snapshot-list', 'env1'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.print_mock.assert_called_once_with(
'SNAPSHOT CREATED NODES-NAMES\n'
'---------- ------------------- ---------------\n'
'snap1 2016-05-12 17:12:15 admin, slave-00\n'
'snap2 2016-05-12 17:12:16 admin')
def test_snapshot_list_none(self):
shell = Shell(['snapshot-list', 'env2'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env2')
assert self.print_mock.called is False
def test_snapshot_delete(self):
shell = Shell(['snapshot-delete', 'env1', 'snap1'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
admin = self.nodes['env1']['admin']
admin.erase_snapshot.assert_called_once_with(name='snap1')
slave = self.nodes['env1']['slave-00']
slave.erase_snapshot.assert_called_once_with(name='snap1')
def test_net_list(self):
shell = Shell(['net-list', 'env1'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.print_mock.assert_called_once_with(
'NETWORK NAME IP NET\n'
'-------------------- -------------\n'
'fuelweb_admin-pool01 109.10.0.0/24\n'
'public-pool01 109.10.1.0/24\n'
'storage-pool01 109.10.2.0/24')
def test_net_list_none(self):
shell = Shell(['net-list', 'env2'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env2')
assert self.print_mock.called is False
def test_time_sync(self):
self.env_mocks['env1'].get_curr_time.return_value = {
'node1': 'Thu May 12 18:26:34 MSK 2016',
'node2': 'Thu May 12 18:13:44 MSK 2016',
}
self.env_mocks['env1'].sync_time.return_value = {
'node1': 'Thu May 12 19:00:00 MSK 2016',
'node2': 'Thu May 12 19:00:00 MSK 2016',
}
shell = Shell(['time-sync', 'env1'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.env_mocks['env1'].get_curr_time.assert_called_once_with(None)
self.env_mocks['env1'].sync_time.assert_called_once_with(None)
def test_time_sync_node(self):
self.env_mocks['env1'].get_curr_time.return_value = {
'node1': 'Thu May 12 18:26:34 MSK 2016',
}
self.env_mocks['env1'].sync_time.return_value = {
'node1': 'Thu May 12 19:00:00 MSK 2016',
}
shell = Shell(['time-sync', 'env1', '--node-name', 'node1'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.env_mocks['env1'].get_curr_time.assert_called_once_with(['node1'])
self.env_mocks['env1'].sync_time.assert_called_once_with(['node1'])
def test_revert_resume(self):
self.env_mocks['env1'].get_curr_time.return_value = {
'node1': 'Thu May 12 18:26:34 MSK 2016',
'node2': 'Thu May 12 18:13:44 MSK 2016',
}
self.env_mocks['env1'].sync_time.return_value = {
'node1': 'Thu May 12 19:00:00 MSK 2016',
'node2': 'Thu May 12 19:00:00 MSK 2016',
}
shell = Shell(['revert-resume', 'env1', 'snap1'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.env_mocks['env1'].revert.assert_called_once_with(
'snap1', flag=False)
self.env_mocks['env1'].resume.assert_called_once_with()
self.env_mocks['env1'].get_curr_time.assert_called_once_with(None)
self.env_mocks['env1'].sync_time.assert_called_once_with(None)
def test_version(self):
shell = Shell(['version'])
shell.execute()
assert self.print_mock.called
def test_create(self):
shell = Shell(['create', 'test-env',
'--net-pool', '10.109.0.0/16:24',
'--iso-path', '/tmp/my.iso',
'--admin-vcpu', '4',
'--admin-ram', '2048',
'--admin-disk-size', '80',
'--vcpu', '2',
'--ram', '512',
'--node-count', '5',
'--second-disk-size', '35',
'--third-disk-size', '45',
])
shell.execute()
self.client_inst.create_env.assert_called_once_with(
env_name='test-env',
admin_iso_path='/tmp/my.iso',
admin_vcpu=4,
admin_memory=2048,
admin_sysvolume_capacity=80,
nodes_count=5,
slave_vcpu=2,
slave_memory=512,
second_volume_capacity=35,
third_volume_capacity=45,
net_pool=['10.109.0.0/16', '24'],
)
def test_create_env(self):
shell = Shell(['create-env', 'myenv.yaml'])
shell.execute()
class TestDoSnapshot(BaseShellTestCase):
@mock.patch('devops.models.environment.time.time')
@mock.patch.object(models.Environment, 'get_nodes')
@mock.patch.object(models.Environment, 'get')
@mock.patch.object(models.Environment, 'has_snapshot')
def test_create_snaphot_with_mandatory_snapshot_name(self,
mock_has_snapshot,
mock_get_env,
mock_get_nodes,
mock_time):
mock_has_snapshot.return_value = False
mock_get_env.return_value = models.Environment()
mock_time.return_value = 123456.789
self.client_inst.create_env_from_config.assert_called_once_with(
'myenv.yaml')
nodes = (mock.Mock(), mock.Mock())
mock_get_nodes.return_value = nodes
def test_slave_add(self):
shell = Shell(['slave-add', 'env1',
'--node-count', '5',
'--vcpu', '2',
'--ram', '512',
'--second-disk-size', '35',
'--third-disk-size', '45',
])
shell.execute()
self.execute('snapshot', 'some-env', 'test-snapshot-name')
self.client_inst.get_env.assert_called_once_with('env1')
self.env_mocks['env1'].add_slaves.assert_called_once_with(
nodes_count=5,
slave_vcpu=2,
slave_memory=512,
second_volume_capacity=35,
third_volume_capacity=45,
)
for node in nodes:
node.snapshot.assert_called_once_with(
force=mock.ANY, description=mock.ANY,
name="test-snapshot-name", external=False)
def test_slave_remove(self):
shell = Shell(['slave-remove', 'env1', '-N', 'slave-01'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.nodes['env1']['slave-01'].remove.assert_called_once_with()
def test_slave_change(self):
shell = Shell(['slave-change', 'env1',
'-N', 'slave-01',
'--vcpu', '4',
'--ram', '256',
])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.nodes['env1']['slave-01'].set_vcpu.assert_called_once_with(
vcpu=4)
self.nodes['env1']['slave-01'].set_memory.assert_called_once_with(
memory=256)
def test_admin_change(self):
shell = Shell(['admin-change', 'env1',
'--admin-vcpu', '8',
'--admin-ram', '768',
])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.nodes['env1']['admin'].set_vcpu.assert_called_once_with(
vcpu=8)
self.nodes['env1']['admin'].set_memory.assert_called_once_with(
memory=768)
def test_admin_setup(self):
group = mock.Mock(spec=models.Group)
self.env_mocks['env1'].get_groups.return_value = [group]
shell = Shell(['admin-setup', 'env1',
'--boot-from', 'cdrom',
'--iface', 'eth1',
])
shell.execute()
group.start_networks.assert_called_once_with()
self.client_inst.get_env.assert_called_once_with('env1')
self.env_mocks['env1'].admin_setup.assert_called_once_with(
boot_from='cdrom',
iface='eth1')
def test_node_start(self):
shell = Shell(['node-start', 'env1', '-N', 'slave-01'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.nodes['env1']['slave-01'].start.assert_called_once_with()
def test_node_destroy(self):
shell = Shell(['node-destroy', 'env1', '-N', 'slave-01'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.env_mocks['env1'].get_node.assert_called_once_with(
name='slave-01')
self.nodes['env1']['slave-01'].destroy.assert_called_once_with()
def test_node_reset(self):
shell = Shell(['node-reset', 'env1', '-N', 'slave-01'])
shell.execute()
self.client_inst.get_env.assert_called_once_with('env1')
self.nodes['env1']['slave-01'].reset.assert_called_once_with()