Refactoring all cli related tests
* Use ssh_manager instead of context manager and remote * delete remote as variable of function, all methods are executed on master node so it is not necessary * changes was accepted for cli tests , test_cli_base, and create_backup_reset_restore_and_deploy_via_cli test Change-Id: Ib98243af7919f231818f5896244c8f37c70c54fb Closes-Bug: #1544229
This commit is contained in:
parent
02e0901893
commit
1b2fb68e4c
@ -30,7 +30,6 @@ from fuelweb_test.tests.base_test_case import TestBasic
|
||||
from fuelweb_test.tests.test_ha_one_controller_base\
|
||||
import HAOneControllerNeutronBase
|
||||
from fuelweb_test.tests.test_neutron_tun_base import NeutronTunHaBase
|
||||
from fuelweb_test.helpers.utils import run_on_remote
|
||||
from fuelweb_test.settings import DEPLOYMENT_MODE
|
||||
from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE
|
||||
from fuelweb_test.settings import OPENSTACK_RELEASE
|
||||
@ -332,42 +331,52 @@ class BackupRestoreHA(NeutronTunHaBase):
|
||||
self.env.d_env.nodes().slaves[slave_id])['id']
|
||||
for slave_id in range(3)]
|
||||
|
||||
with self.env.d_env.get_admin_remote() as remote:
|
||||
# Create an environment
|
||||
if NEUTRON_SEGMENT_TYPE:
|
||||
nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE)
|
||||
else:
|
||||
nst = ''
|
||||
cmd = ('fuel env create --name={0} --release={1} '
|
||||
'{2} --json'.format(self.__class__.__name__,
|
||||
release_id, nst))
|
||||
env_result = run_on_remote(remote, cmd, jsonify=True)
|
||||
cluster_id = env_result['id']
|
||||
# Create an environment
|
||||
if NEUTRON_SEGMENT_TYPE:
|
||||
nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE)
|
||||
else:
|
||||
nst = ''
|
||||
cmd = ('fuel env create --name={0} --release={1} '
|
||||
'{2} --json'.format(self.__class__.__name__,
|
||||
release_id, nst))
|
||||
env_result = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
jsonify=True
|
||||
)['stdout_json']
|
||||
cluster_id = env_result['id']
|
||||
|
||||
# Update network parameters
|
||||
cl.update_cli_network_configuration(cluster_id, remote)
|
||||
# Update network parameters
|
||||
cl.update_cli_network_configuration(cluster_id)
|
||||
|
||||
# Update SSL configuration
|
||||
cl.update_ssl_configuration(cluster_id, remote)
|
||||
# Update SSL configuration
|
||||
cl.update_ssl_configuration(cluster_id)
|
||||
|
||||
roles = {'controller': node_ids[0],
|
||||
'compute': node_ids[1],
|
||||
'ceph-osd': node_ids[2]}
|
||||
roles = {'controller': node_ids[0],
|
||||
'compute': node_ids[1],
|
||||
'ceph-osd': node_ids[2]}
|
||||
|
||||
for role in roles:
|
||||
cmd = ('fuel --env-id={0} node set --node {1} --role={2}'
|
||||
.format(cluster_id,
|
||||
roles[role],
|
||||
role))
|
||||
remote.execute(cmd)
|
||||
cmd = (
|
||||
'fuel --env-id={0} node --provision --node={1} --json'.format(
|
||||
cluster_id, ','.join(str(l) for l in node_ids))
|
||||
for role in roles:
|
||||
cmd = ('fuel --env-id={0} node set --node {1} --role={2}'
|
||||
.format(cluster_id,
|
||||
roles[role],
|
||||
role))
|
||||
self.ssh_manager.execute(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd
|
||||
)
|
||||
logger.info("Started provisioning via CLI")
|
||||
task = run_on_remote(remote, cmd, jsonify=True)
|
||||
cl.assert_cli_task_success(task, remote, timeout=30 * 60)
|
||||
logger.info("Finished provisioning via CLI")
|
||||
cmd = (
|
||||
'fuel --env-id={0} node --provision --node={1} --json'.format(
|
||||
cluster_id, ','.join(str(l) for l in node_ids))
|
||||
)
|
||||
logger.info("Started provisioning via CLI")
|
||||
task = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
jsonify=True
|
||||
)['stdout_json']
|
||||
cl.assert_cli_task_success(task, timeout=30 * 60)
|
||||
logger.info("Finished provisioning via CLI")
|
||||
|
||||
|
||||
@test(groups=["backup_reinstall_restore"])
|
||||
|
@ -18,7 +18,6 @@ from proboscis.asserts import assert_equal
|
||||
|
||||
from devops.error import TimeoutError
|
||||
from devops.helpers.helpers import wait
|
||||
from fuelweb_test.helpers.utils import run_on_remote
|
||||
from fuelweb_test.helpers.ssl import change_cluster_ssl_config
|
||||
from fuelweb_test.tests.base_test_case import TestBasic
|
||||
from fuelweb_test import logwrap
|
||||
@ -31,43 +30,60 @@ class CommandLine(TestBasic):
|
||||
"""CommandLine.""" # TODO documentation
|
||||
|
||||
@logwrap
|
||||
def get_task(self, remote, task_id):
|
||||
tasks = run_on_remote(remote, 'fuel task --task-id {0} --json'
|
||||
.format(task_id), jsonify=True)
|
||||
def get_task(self, task_id):
|
||||
tasks = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd='fuel task --task-id {0} --json'.format(task_id),
|
||||
jsonify=True
|
||||
)['stdout_json']
|
||||
return tasks[0]
|
||||
|
||||
@logwrap
|
||||
def get_network_filename(self, cluster_id, remote):
|
||||
def get_network_filename(self, cluster_id):
|
||||
cmd = ('fuel --env {0} network --download --dir /tmp --json'
|
||||
.format(cluster_id))
|
||||
net_download = ''.join(run_on_remote(remote, cmd))
|
||||
out = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd
|
||||
)['stdout']
|
||||
net_download = ''.join(out)
|
||||
# net_download = 'Network ... downloaded to /tmp/network_1.json'
|
||||
return net_download.split()[-1]
|
||||
|
||||
@logwrap
|
||||
def get_networks(self, cluster_id, remote):
|
||||
net_file = self.get_network_filename(cluster_id, remote)
|
||||
return run_on_remote(remote, 'cat {0}'.format(net_file), jsonify=True)
|
||||
def get_networks(self, cluster_id):
|
||||
net_file = self.get_network_filename(cluster_id)
|
||||
out = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd='cat {0}'.format(net_file),
|
||||
jsonify=True
|
||||
)['stdout_json']
|
||||
return out
|
||||
|
||||
@logwrap
|
||||
def update_network(self, cluster_id, remote, net_config):
|
||||
net_file = self.get_network_filename(cluster_id, remote)
|
||||
def update_network(self, cluster_id, net_config):
|
||||
net_file = self.get_network_filename(cluster_id)
|
||||
data = json.dumps(net_config)
|
||||
cmd = 'echo {data} > {net_file}'.format(data=json.dumps(data),
|
||||
net_file=net_file)
|
||||
run_on_remote(remote, cmd)
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd
|
||||
)
|
||||
cmd = ('cd /tmp; fuel --env {0} network --upload --json'
|
||||
.format(cluster_id))
|
||||
run_on_remote(remote, cmd)
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd
|
||||
)
|
||||
|
||||
def assert_cli_task_success(
|
||||
self, task, remote, timeout=70 * 60, interval=20):
|
||||
def assert_cli_task_success(self, task, timeout=70 * 60, interval=20):
|
||||
logger.info('Wait {timeout} seconds for task: {task}'
|
||||
.format(timeout=timeout, task=task))
|
||||
start = time.time()
|
||||
try:
|
||||
wait(
|
||||
lambda: (self.get_task(remote, task['id'])['status'] not in
|
||||
lambda: (self.get_task(task['id'])['status'] not in
|
||||
('pending', 'running')),
|
||||
interval=interval,
|
||||
timeout=timeout
|
||||
@ -77,7 +93,7 @@ class CommandLine(TestBasic):
|
||||
"Waiting timeout {timeout} sec was reached for task: {task}"
|
||||
.format(task=task["name"], timeout=timeout))
|
||||
took = time.time() - start
|
||||
task = self.get_task(remote, task['id'])
|
||||
task = self.get_task(task['id'])
|
||||
logger.info('Task finished in {took} seconds with the result: {task}'
|
||||
.format(took=took, task=task))
|
||||
assert_equal(
|
||||
@ -110,7 +126,7 @@ class CommandLine(TestBasic):
|
||||
return floating_ranges_json
|
||||
|
||||
@logwrap
|
||||
def get_floating_ranges(self, cluster_id, remote):
|
||||
def get_floating_ranges(self, cluster_id):
|
||||
"""
|
||||
|
||||
This method using for get floating ranges from master node before
|
||||
@ -120,101 +136,115 @@ class CommandLine(TestBasic):
|
||||
3. Save floating ranges from master node
|
||||
|
||||
"""
|
||||
net_config = self.get_networks(cluster_id, remote)
|
||||
net_config = self.get_networks(cluster_id)
|
||||
floating_ranges =\
|
||||
net_config[u'networking_parameters'][u'floating_ranges']
|
||||
return floating_ranges
|
||||
|
||||
@logwrap
|
||||
def change_floating_ranges(self, cluster_id, remote, floating_range):
|
||||
net_config = self.get_networks(cluster_id, remote)
|
||||
def change_floating_ranges(self, cluster_id, floating_range):
|
||||
net_config = self.get_networks(cluster_id)
|
||||
net_config[u'networking_parameters'][u'floating_ranges'] = \
|
||||
floating_range
|
||||
new_settings = net_config
|
||||
self.update_network(cluster_id, remote, new_settings)
|
||||
self.update_network(cluster_id, new_settings)
|
||||
|
||||
@logwrap
|
||||
def update_cli_network_configuration(self, cluster_id, remote):
|
||||
def update_cli_network_configuration(self, cluster_id):
|
||||
"""Update cluster network settings with custom configuration.
|
||||
Place here an additional config changes if needed (e.g. nodegroups'
|
||||
networking configuration.
|
||||
Also this method checks downloading/uploading networks via cli.
|
||||
"""
|
||||
net_config = self.get_networks(cluster_id, remote)
|
||||
net_config = self.get_networks(cluster_id)
|
||||
new_settings = net_config
|
||||
self.update_network(cluster_id, remote, new_settings)
|
||||
self.update_network(cluster_id, new_settings)
|
||||
|
||||
def get_public_vip(self, cluster_id, remote):
|
||||
networks = self.get_networks(cluster_id, remote)
|
||||
def get_public_vip(self, cluster_id):
|
||||
networks = self.get_networks(cluster_id)
|
||||
return networks['public_vip']
|
||||
|
||||
def download_settings(self, cluster_id, remote):
|
||||
def download_settings(self, cluster_id):
|
||||
cmd = ('fuel --env {0} settings --download --dir /tmp --json'.format(
|
||||
cluster_id))
|
||||
run_on_remote(remote, cmd)
|
||||
return run_on_remote(remote,
|
||||
'cd /tmp && cat settings_{0}.json'.format(
|
||||
cluster_id), jsonify=True)
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd
|
||||
)
|
||||
out = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd='cd /tmp && cat settings_{0}.json'.format(cluster_id),
|
||||
jsonify=True
|
||||
)['stdout_json']
|
||||
return out
|
||||
|
||||
def upload_settings(self, cluster_id, remote, settings):
|
||||
def upload_settings(self, cluster_id, settings):
|
||||
data = json.dumps(settings)
|
||||
cmd = 'cd /tmp && echo {data} > settings_{id}.json'.format(
|
||||
data=json.dumps(data),
|
||||
id=cluster_id)
|
||||
run_on_remote(remote, cmd)
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd
|
||||
)
|
||||
cmd = ('fuel --env {0} settings --upload --dir /tmp --json'.format(
|
||||
cluster_id))
|
||||
run_on_remote(remote, cmd)
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd
|
||||
)
|
||||
|
||||
@logwrap
|
||||
def update_ssl_configuration(self, cluster_id, remote):
|
||||
settings = self.download_settings(cluster_id, remote)
|
||||
def update_ssl_configuration(self, cluster_id):
|
||||
settings = self.download_settings(cluster_id)
|
||||
change_cluster_ssl_config(settings, SSL_CN)
|
||||
self.upload_settings(cluster_id, remote, settings)
|
||||
self.upload_settings(cluster_id, settings)
|
||||
|
||||
def add_nodes_to_cluster(
|
||||
self, remote, cluster_id, node_ids, roles):
|
||||
def add_nodes_to_cluster(self, cluster_id, node_ids, roles):
|
||||
if isinstance(node_ids, int):
|
||||
node_ids_str = str(node_ids)
|
||||
else:
|
||||
node_ids_str = ','.join(str(n) for n in node_ids)
|
||||
cmd = ('fuel --env-id={0} node set --node {1} --role={2}'.format(
|
||||
cluster_id, node_ids_str, ','.join(roles)))
|
||||
run_on_remote(remote, cmd)
|
||||
self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd
|
||||
)
|
||||
|
||||
@logwrap
|
||||
def use_ceph_for_volumes(self, cluster_id, remote):
|
||||
settings = self.download_settings(cluster_id, remote)
|
||||
def use_ceph_for_volumes(self, cluster_id):
|
||||
settings = self.download_settings(cluster_id)
|
||||
settings['editable']['storage']['volumes_lvm'][
|
||||
'value'] = False
|
||||
settings['editable']['storage']['volumes_ceph'][
|
||||
'value'] = True
|
||||
self.upload_settings(cluster_id, remote, settings)
|
||||
self.upload_settings(cluster_id, settings)
|
||||
|
||||
@logwrap
|
||||
def use_ceph_for_images(self, cluster_id, remote):
|
||||
settings = self.download_settings(cluster_id, remote)
|
||||
def use_ceph_for_images(self, cluster_id):
|
||||
settings = self.download_settings(cluster_id)
|
||||
settings['editable']['storage']['images_ceph'][
|
||||
'value'] = True
|
||||
self.upload_settings(cluster_id, remote, settings)
|
||||
self.upload_settings(cluster_id, settings)
|
||||
|
||||
@logwrap
|
||||
def use_ceph_for_ephemeral(self, cluster_id, remote):
|
||||
settings = self.download_settings(cluster_id, remote)
|
||||
def use_ceph_for_ephemeral(self, cluster_id):
|
||||
settings = self.download_settings(cluster_id)
|
||||
settings['editable']['storage']['ephemeral_ceph'][
|
||||
'value'] = True
|
||||
self.upload_settings(cluster_id, remote, settings)
|
||||
self.upload_settings(cluster_id, settings)
|
||||
|
||||
@logwrap
|
||||
def change_osd_pool_size(self, cluster_id, remote, replication_factor):
|
||||
settings = self.download_settings(cluster_id, remote)
|
||||
def change_osd_pool_size(self, cluster_id, replication_factor):
|
||||
settings = self.download_settings(cluster_id)
|
||||
settings['editable']['storage']['osd_pool_size'][
|
||||
'value'] = replication_factor
|
||||
self.upload_settings(cluster_id, remote, settings)
|
||||
self.upload_settings(cluster_id, settings)
|
||||
|
||||
@logwrap
|
||||
def use_radosgw_for_objects(self, cluster_id, remote):
|
||||
settings = self.download_settings(cluster_id, remote)
|
||||
def use_radosgw_for_objects(self, cluster_id):
|
||||
settings = self.download_settings(cluster_id)
|
||||
ceph_for_images = settings['editable']['storage']['images_ceph'][
|
||||
'value']
|
||||
if ceph_for_images:
|
||||
@ -225,4 +255,4 @@ class CommandLine(TestBasic):
|
||||
'value'] = True
|
||||
settings['editable']['storage']['objects_ceph'][
|
||||
'value'] = True
|
||||
self.upload_settings(cluster_id, remote, settings)
|
||||
self.upload_settings(cluster_id, settings)
|
||||
|
@ -15,7 +15,6 @@
|
||||
from proboscis import test
|
||||
|
||||
from fuelweb_test.helpers.decorators import log_snapshot_after_test
|
||||
from fuelweb_test.helpers.utils import run_on_remote
|
||||
from fuelweb_test.settings import OPENSTACK_RELEASE
|
||||
from fuelweb_test.tests.base_test_case import SetupEnvironment
|
||||
from fuelweb_test.tests import test_cli_base
|
||||
@ -50,40 +49,46 @@ class CommandLineAcceptanceDeploymentTests(test_cli_base.CommandLine):
|
||||
release_id = self.fuel_web.get_releases_list_for_os(
|
||||
release_name=OPENSTACK_RELEASE)[0]
|
||||
|
||||
with self.env.d_env.get_admin_remote() as remote:
|
||||
self.show_step(1, initialize=True)
|
||||
cmd = ('fuel env create --name={0} --release={1} '
|
||||
'--nst=tun --json'.format(self.__class__.__name__,
|
||||
release_id))
|
||||
env_result = run_on_remote(remote, cmd, jsonify=True)
|
||||
cluster_id = env_result['id']
|
||||
self.show_step(1, initialize=True)
|
||||
cmd = ('fuel env create --name={0} --release={1} '
|
||||
'--nst=tun --json'.format(self.__class__.__name__,
|
||||
release_id))
|
||||
|
||||
self.update_cli_network_configuration(cluster_id, remote)
|
||||
env_result = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
jsonify=True
|
||||
)['stdout_json']
|
||||
cluster_id = env_result['id']
|
||||
|
||||
self.update_ssl_configuration(cluster_id, remote)
|
||||
self.show_step(2)
|
||||
self.show_step(3)
|
||||
self.show_step(4)
|
||||
self.add_nodes_to_cluster(remote, cluster_id, node_ids[0],
|
||||
['controller'])
|
||||
self.add_nodes_to_cluster(remote, cluster_id, node_ids[1],
|
||||
['compute'])
|
||||
self.add_nodes_to_cluster(remote, cluster_id, node_ids[2],
|
||||
['cinder'])
|
||||
self.update_cli_network_configuration(cluster_id)
|
||||
|
||||
self.fuel_web.verify_network(cluster_id)
|
||||
self.show_step(5)
|
||||
cmd = 'fuel --env-id={0} deploy-changes --json'.format(cluster_id)
|
||||
task = run_on_remote(remote, cmd, jsonify=True)
|
||||
self.assert_cli_task_success(task, remote, timeout=130 * 60)
|
||||
self.update_ssl_configuration(cluster_id)
|
||||
self.show_step(2)
|
||||
self.show_step(3)
|
||||
self.show_step(4)
|
||||
self.add_nodes_to_cluster(cluster_id, node_ids[0], ['controller'])
|
||||
self.add_nodes_to_cluster(cluster_id, node_ids[1], ['compute'])
|
||||
self.add_nodes_to_cluster(cluster_id, node_ids[2], ['cinder'])
|
||||
|
||||
self.show_step(6)
|
||||
self.fuel_web.verify_network(cluster_id)
|
||||
self.fuel_web.verify_network(cluster_id)
|
||||
self.show_step(5)
|
||||
cmd = 'fuel --env-id={0} deploy-changes --json'.format(cluster_id)
|
||||
|
||||
self.show_step(7)
|
||||
self.fuel_web.run_ostf(
|
||||
cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'],
|
||||
should_fail=1)
|
||||
task = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
jsonify=True
|
||||
)['stdout_json']
|
||||
self.assert_cli_task_success(task, timeout=130 * 60)
|
||||
|
||||
self.show_step(6)
|
||||
self.fuel_web.verify_network(cluster_id)
|
||||
|
||||
self.show_step(7)
|
||||
self.fuel_web.run_ostf(
|
||||
cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'],
|
||||
should_fail=1)
|
||||
|
||||
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
|
||||
groups=["cli_deploy_tasks"])
|
||||
@ -110,33 +115,48 @@ class CommandLineAcceptanceDeploymentTests(test_cli_base.CommandLine):
|
||||
release_id = self.fuel_web.get_releases_list_for_os(
|
||||
release_name=OPENSTACK_RELEASE)[0]
|
||||
|
||||
with self.env.d_env.get_admin_remote() as remote:
|
||||
self.show_step(1)
|
||||
cmd = ('fuel env create --name={0} --release={1} '
|
||||
'--nst=vlan --json'.format(self.__class__.__name__,
|
||||
release_id))
|
||||
env_result = run_on_remote(remote, cmd, jsonify=True)
|
||||
cluster_id = env_result['id']
|
||||
self.show_step(2)
|
||||
self.add_nodes_to_cluster(remote, cluster_id, node_ids[0:3],
|
||||
['controller'])
|
||||
self.show_step(3)
|
||||
cmd = ('fuel node --node-id {0} --provision --env {1} --json'.
|
||||
format(','.join(str(n) for n in node_ids), cluster_id))
|
||||
task = run_on_remote(remote, cmd, jsonify=True)
|
||||
self.assert_cli_task_success(task, remote, timeout=20 * 60)
|
||||
self.show_step(4)
|
||||
cmd = ('fuel node --node {0} --end netconfig --env {1} --json'.
|
||||
format(node_ids[1], release_id))
|
||||
task = run_on_remote(remote, cmd, jsonify=True)
|
||||
self.assert_cli_task_success(task, remote, timeout=30 * 60)
|
||||
self.show_step(5)
|
||||
cmd = 'fuel --env-id={0} deploy-changes --json'.format(cluster_id)
|
||||
task = run_on_remote(remote, cmd, jsonify=True)
|
||||
self.assert_cli_task_success(task, remote, timeout=130 * 60)
|
||||
self.show_step(6)
|
||||
self.fuel_web.verify_network(cluster_id)
|
||||
self.show_step(7)
|
||||
self.fuel_web.run_ostf(
|
||||
cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'],
|
||||
should_fail=1)
|
||||
self.show_step(1)
|
||||
cmd = ('fuel env create --name={0} --release={1} '
|
||||
'--nst=vlan --json'.format(self.__class__.__name__,
|
||||
release_id))
|
||||
env_result = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
jsonify=True
|
||||
)['stdout_json']
|
||||
cluster_id = env_result['id']
|
||||
self.show_step(2)
|
||||
self.add_nodes_to_cluster(cluster_id, node_ids[0:3],
|
||||
['controller'])
|
||||
self.show_step(3)
|
||||
cmd = ('fuel node --node-id {0} --provision --env {1} --json'.
|
||||
format(','.join(str(n) for n in node_ids), cluster_id))
|
||||
task = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
jsonify=True
|
||||
)['stdout_json']
|
||||
self.assert_cli_task_success(task, timeout=20 * 60)
|
||||
self.show_step(4)
|
||||
cmd = ('fuel node --node {0} --end netconfig --env {1} --json'.
|
||||
format(node_ids[1], release_id))
|
||||
task = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
jsonify=True
|
||||
)['stdout_json']
|
||||
self.assert_cli_task_success(task, timeout=30 * 60)
|
||||
self.show_step(5)
|
||||
cmd = 'fuel --env-id={0} deploy-changes --json'.format(cluster_id)
|
||||
task = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
jsonify=True
|
||||
)['stdout_json']
|
||||
self.assert_cli_task_success(task, timeout=130 * 60)
|
||||
self.show_step(6)
|
||||
self.fuel_web.verify_network(cluster_id)
|
||||
self.show_step(7)
|
||||
self.fuel_web.run_ostf(
|
||||
cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'],
|
||||
should_fail=1)
|
||||
|
@ -15,7 +15,6 @@
|
||||
from proboscis import test
|
||||
|
||||
from fuelweb_test.helpers.decorators import log_snapshot_after_test
|
||||
from fuelweb_test.helpers.utils import run_on_remote
|
||||
from fuelweb_test.settings import OPENSTACK_RELEASE
|
||||
from fuelweb_test.tests.base_test_case import SetupEnvironment
|
||||
from fuelweb_test.tests import test_cli_base
|
||||
@ -53,43 +52,51 @@ class CommandLineAcceptanceCephDeploymentTests(test_cli_base.CommandLine):
|
||||
release_id = self.fuel_web.get_releases_list_for_os(
|
||||
release_name=OPENSTACK_RELEASE)[0]
|
||||
|
||||
with self.env.d_env.get_admin_remote() as remote:
|
||||
self.show_step(1, initialize=True)
|
||||
cmd = ('fuel env create --name={0} --release={1} '
|
||||
'--nst=tun --json'.format(self.__class__.__name__,
|
||||
release_id))
|
||||
env_result = run_on_remote(remote, cmd, jsonify=True)
|
||||
cluster_id = env_result['id']
|
||||
self.show_step(1, initialize=True)
|
||||
cmd = ('fuel env create --name={0} --release={1} '
|
||||
'--nst=tun --json'.format(self.__class__.__name__,
|
||||
release_id))
|
||||
env_result = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
jsonify=True
|
||||
)['stdout_json']
|
||||
cluster_id = env_result['id']
|
||||
|
||||
self.update_cli_network_configuration(cluster_id, remote)
|
||||
self.update_cli_network_configuration(cluster_id)
|
||||
|
||||
self.update_ssl_configuration(cluster_id, remote)
|
||||
self.update_ssl_configuration(cluster_id)
|
||||
|
||||
self.use_ceph_for_volumes(cluster_id, remote)
|
||||
self.use_ceph_for_images(cluster_id, remote)
|
||||
self.change_osd_pool_size(cluster_id, remote, '2')
|
||||
self.use_ceph_for_volumes(cluster_id)
|
||||
self.use_ceph_for_images(cluster_id)
|
||||
self.change_osd_pool_size(cluster_id, '2')
|
||||
|
||||
self.show_step(2)
|
||||
self.show_step(3)
|
||||
self.show_step(4)
|
||||
self.add_nodes_to_cluster(remote, cluster_id, node_ids[0:3],
|
||||
['controller'])
|
||||
self.add_nodes_to_cluster(remote, cluster_id, node_ids[3:5],
|
||||
['compute'])
|
||||
self.add_nodes_to_cluster(remote, cluster_id, node_ids[5:7],
|
||||
['ceph-osd'])
|
||||
self.fuel_web.verify_network(cluster_id)
|
||||
self.show_step(5)
|
||||
cmd = 'fuel --env-id={0} deploy-changes --json'.format(cluster_id)
|
||||
task = run_on_remote(remote, cmd, jsonify=True)
|
||||
self.assert_cli_task_success(task, remote, timeout=130 * 60)
|
||||
self.show_step(2)
|
||||
self.show_step(3)
|
||||
self.show_step(4)
|
||||
self.add_nodes_to_cluster(cluster_id, node_ids[0:3],
|
||||
['controller'])
|
||||
self.add_nodes_to_cluster(cluster_id, node_ids[3:5],
|
||||
['compute'])
|
||||
self.add_nodes_to_cluster(cluster_id, node_ids[5:7],
|
||||
['ceph-osd'])
|
||||
self.fuel_web.verify_network(cluster_id)
|
||||
self.show_step(5)
|
||||
cmd = 'fuel --env-id={0} deploy-changes --json'.format(cluster_id)
|
||||
|
||||
self.show_step(6)
|
||||
self.fuel_web.verify_network(cluster_id)
|
||||
task = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
jsonify=True
|
||||
)['stdout_json']
|
||||
self.assert_cli_task_success(task, timeout=130 * 60)
|
||||
|
||||
self.show_step(7)
|
||||
self.fuel_web.run_ostf(
|
||||
cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'])
|
||||
self.show_step(6)
|
||||
self.fuel_web.verify_network(cluster_id)
|
||||
|
||||
self.show_step(7)
|
||||
self.fuel_web.run_ostf(
|
||||
cluster_id=cluster_id, test_sets=['ha', 'smoke', 'sanity'])
|
||||
|
||||
@test(depends_on=[SetupEnvironment.prepare_slaves_9],
|
||||
groups=["cli_deploy_ceph_neutron_vlan"])
|
||||
@ -132,9 +139,8 @@ class CommandLineAcceptanceCephDeploymentTests(test_cli_base.CommandLine):
|
||||
)['stdout_json']
|
||||
|
||||
self.show_step(3)
|
||||
with self.env.d_env.get_admin_remote() as remote:
|
||||
self.use_ceph_for_volumes(cluster['id'], remote)
|
||||
self.use_ceph_for_images(cluster['id'], remote)
|
||||
self.use_ceph_for_volumes(cluster['id'])
|
||||
self.use_ceph_for_images(cluster['id'])
|
||||
|
||||
nodes = {
|
||||
'controller': node_ids[0:3],
|
||||
@ -162,8 +168,7 @@ class CommandLineAcceptanceCephDeploymentTests(test_cli_base.CommandLine):
|
||||
'deploy-changes --json'.format(cluster['id']),
|
||||
jsonify=True
|
||||
)['stdout_json']
|
||||
with self.env.d_env.get_admin_remote() as remote:
|
||||
self.assert_cli_task_success(task, remote, timeout=130 * 60)
|
||||
self.assert_cli_task_success(task, timeout=130 * 60)
|
||||
|
||||
self.show_step(7)
|
||||
self.fuel_web.verify_network(cluster['id'])
|
||||
|
@ -17,7 +17,6 @@ from proboscis import test
|
||||
from proboscis.asserts import assert_equal, assert_true
|
||||
|
||||
from fuelweb_test.helpers.decorators import log_snapshot_after_test
|
||||
from fuelweb_test.helpers.utils import run_on_remote
|
||||
from fuelweb_test.settings import NEUTRON_SEGMENT_TYPE
|
||||
from fuelweb_test.settings import OPENSTACK_RELEASE
|
||||
from fuelweb_test.tests.base_test_case import SetupEnvironment
|
||||
@ -69,27 +68,32 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
|
||||
cmd='fuel role --rel {} --update --file'
|
||||
' /tmp/controller.yaml'.format(release_id))
|
||||
|
||||
with self.env.d_env.get_admin_remote() as remote:
|
||||
if NEUTRON_SEGMENT_TYPE:
|
||||
nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE)
|
||||
else:
|
||||
nst = ''
|
||||
self.show_step(5)
|
||||
cmd = ('fuel env create --name={0} --release={1} '
|
||||
'{2} --json'.format(self.__class__.__name__,
|
||||
release_id, nst))
|
||||
env_result = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
jsonify=True
|
||||
)['stdout_json']
|
||||
cluster_id = env_result['id']
|
||||
self.show_step(6)
|
||||
cmd = ('fuel --env-id={0} node set --node {1} --role=controller,'
|
||||
'compute'.format(cluster_id, node_ids[0]))
|
||||
result = self.ssh_manager.execute(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
"Can't assign controller and compute node"
|
||||
" to node id {}".format(node_ids[0]))
|
||||
|
||||
if NEUTRON_SEGMENT_TYPE:
|
||||
nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE)
|
||||
else:
|
||||
nst = ''
|
||||
self.show_step(5)
|
||||
cmd = ('fuel env create --name={0} --release={1} '
|
||||
'{2} --json'.format(self.__class__.__name__,
|
||||
release_id, nst))
|
||||
env_result = run_on_remote(remote, cmd, jsonify=True)
|
||||
cluster_id = env_result['id']
|
||||
self.show_step(6)
|
||||
cmd = ('fuel --env-id={0} node set --node {1} --role=controller,'
|
||||
'compute'.format(cluster_id, node_ids[0]))
|
||||
result = remote.execute(cmd)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
"Can't assign controller and compute node"
|
||||
" to node id {}".format(node_ids[0]))
|
||||
|
||||
self.env.make_snapshot("cli_update_role")
|
||||
self.env.make_snapshot("cli_update_role")
|
||||
|
||||
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
|
||||
groups=["cli_create_role"])
|
||||
@ -127,34 +131,42 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
|
||||
cmd='fuel role --rel {} --create --file'
|
||||
' /tmp/create_role.yaml'.format(release_id))
|
||||
|
||||
with self.env.d_env.get_admin_remote() as remote:
|
||||
|
||||
if NEUTRON_SEGMENT_TYPE:
|
||||
nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE)
|
||||
else:
|
||||
nst = ''
|
||||
self.show_step(4)
|
||||
cmd = ('fuel env create --name={0} --release={1} '
|
||||
'{2} --json'.format(self.__class__.__name__,
|
||||
release_id, nst))
|
||||
env_result = run_on_remote(remote, cmd, jsonify=True)
|
||||
cluster_id = env_result['id']
|
||||
self.show_step(5)
|
||||
cmd = ('fuel --env-id={0} node set --node {1}'
|
||||
' --role=test-role'.format(cluster_id, node_ids[0]))
|
||||
result = remote.execute(cmd)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
"Can't assign controller and compute node"
|
||||
" to node id {}".format(node_ids[0]))
|
||||
self.show_step(6)
|
||||
cmd = ('fuel --env-id={0} node set --node {1}'
|
||||
' --role=test-role,controller,'
|
||||
'compute'.format(cluster_id, node_ids[1]))
|
||||
result = remote.execute(cmd)
|
||||
assert_equal(result['exit_code'], 1,
|
||||
"We shouldn't be able to assign controller and"
|
||||
" compute node to node id {}".format(node_ids[1]))
|
||||
self.env.make_snapshot("cli_create_role")
|
||||
if NEUTRON_SEGMENT_TYPE:
|
||||
nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE)
|
||||
else:
|
||||
nst = ''
|
||||
self.show_step(4)
|
||||
cmd = ('fuel env create --name={0} --release={1} '
|
||||
'{2} --json'.format(self.__class__.__name__,
|
||||
release_id, nst))
|
||||
env_result = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
jsonify=True
|
||||
)['stdout_json']
|
||||
cluster_id = env_result['id']
|
||||
self.show_step(5)
|
||||
cmd = ('fuel --env-id={0} node set --node {1}'
|
||||
' --role=test-role'.format(cluster_id, node_ids[0]))
|
||||
result = self.ssh_manager.execute(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
"Can't assign controller and compute node"
|
||||
" to node id {}".format(node_ids[0]))
|
||||
self.show_step(6)
|
||||
cmd = ('fuel --env-id={0} node set --node {1}'
|
||||
' --role=test-role,controller,'
|
||||
'compute'.format(cluster_id, node_ids[1]))
|
||||
result = self.ssh_manager.execute(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
)
|
||||
assert_equal(result['exit_code'], 1,
|
||||
"We shouldn't be able to assign controller and"
|
||||
" compute node to node id {}".format(node_ids[1]))
|
||||
self.env.make_snapshot("cli_create_role")
|
||||
|
||||
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
|
||||
groups=["cli_create_role_with_has_primary"])
|
||||
@ -191,27 +203,32 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
|
||||
cmd='fuel role --rel {} --create --file'
|
||||
' /tmp/create_primary_role.yaml'.format(release_id))
|
||||
|
||||
with self.env.d_env.get_admin_remote() as remote:
|
||||
|
||||
if NEUTRON_SEGMENT_TYPE:
|
||||
nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE)
|
||||
else:
|
||||
nst = ''
|
||||
self.show_step(4)
|
||||
cmd = ('fuel env create --name={0} --release={1} '
|
||||
'{2} --json'.format(self.__class__.__name__,
|
||||
release_id, nst))
|
||||
env_result = run_on_remote(remote, cmd, jsonify=True)
|
||||
cluster_id = env_result['id']
|
||||
self.show_step(5)
|
||||
cmd = ('fuel --env-id={0} node set --node {1}'
|
||||
' --role=test-primary-role'.format(cluster_id,
|
||||
node_ids[0]))
|
||||
result = remote.execute(cmd)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
"Can't assign new role"
|
||||
" to node id {}".format(node_ids[0]))
|
||||
self.env.make_snapshot("cli_create_role_with_has_primary")
|
||||
if NEUTRON_SEGMENT_TYPE:
|
||||
nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE)
|
||||
else:
|
||||
nst = ''
|
||||
self.show_step(4)
|
||||
cmd = ('fuel env create --name={0} --release={1} '
|
||||
'{2} --json'.format(self.__class__.__name__,
|
||||
release_id, nst))
|
||||
env_result = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
jsonify=True
|
||||
)['stdout_json']
|
||||
cluster_id = env_result['id']
|
||||
self.show_step(5)
|
||||
cmd = ('fuel --env-id={0} node set --node {1}'
|
||||
' --role=test-primary-role'.format(cluster_id,
|
||||
node_ids[0]))
|
||||
result = self.ssh_manager.execute(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
"Can't assign new role"
|
||||
" to node id {}".format(node_ids[0]))
|
||||
self.env.make_snapshot("cli_create_role_with_has_primary")
|
||||
|
||||
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
|
||||
groups=["cli_delete_role"])
|
||||
@ -258,46 +275,58 @@ class CommandLineRoleTests(test_cli_base.CommandLine):
|
||||
assert_true('test-role' in roles,
|
||||
"role is not in the list {}".format(roles))
|
||||
|
||||
with self.env.d_env.get_admin_remote() as remote:
|
||||
if NEUTRON_SEGMENT_TYPE:
|
||||
nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE)
|
||||
else:
|
||||
nst = ''
|
||||
self.show_step(5)
|
||||
cmd = ('fuel env create --name={0} --release={1} '
|
||||
'{2} --json'.format(self.__class__.__name__,
|
||||
release_id, nst))
|
||||
env_result = run_on_remote(remote, cmd, jsonify=True)
|
||||
cluster_id = env_result['id']
|
||||
self.show_step(6)
|
||||
cmd = ('fuel --env-id={0} node set --node {1}'
|
||||
' --role=controller'.format(cluster_id, node_ids[0]))
|
||||
result = remote.execute(cmd)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
"Can't assign controller and"
|
||||
" compute node to node id {}".format(node_ids[0]))
|
||||
if NEUTRON_SEGMENT_TYPE:
|
||||
nst = '--nst={0}'.format(NEUTRON_SEGMENT_TYPE)
|
||||
else:
|
||||
nst = ''
|
||||
self.show_step(5)
|
||||
cmd = ('fuel env create --name={0} --release={1} '
|
||||
'{2} --json'.format(self.__class__.__name__,
|
||||
release_id, nst))
|
||||
env_result = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
jsonify=True
|
||||
)['stdout_json']
|
||||
cluster_id = env_result['id']
|
||||
self.show_step(6)
|
||||
cmd = ('fuel --env-id={0} node set --node {1}'
|
||||
' --role=controller'.format(cluster_id, node_ids[0]))
|
||||
result = self.ssh_manager.execute(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
"Can't assign controller and"
|
||||
" compute node to node id {}".format(node_ids[0]))
|
||||
|
||||
self.show_step(7)
|
||||
cmd = ('fuel role --rel {} --delete'
|
||||
' --role test-role'.format(release_id))
|
||||
result = remote.execute(cmd)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
"Can't delete role, result is {}".format(result))
|
||||
self.show_step(7)
|
||||
cmd = ('fuel role --rel {} --delete'
|
||||
' --role test-role'.format(release_id))
|
||||
result = self.ssh_manager.execute(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
)
|
||||
assert_equal(result['exit_code'], 0,
|
||||
"Can't delete role, result is {}".format(result))
|
||||
|
||||
result = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd='fuel role --rel {}'.format(release_id))['stdout']
|
||||
roles = [i.strip() for i in result]
|
||||
assert_true('test-role' not in roles,
|
||||
"role is not in the list {}".format(roles))
|
||||
cmd = ('fuel role --rel {} --delete'
|
||||
' --role controller'.format(release_id))
|
||||
result = remote.execute(cmd)
|
||||
self.show_step(8)
|
||||
assert_equal(result['exit_code'], 1,
|
||||
"Controller role shouldn't be able to be deleted")
|
||||
result = self.ssh_manager.execute_on_remote(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd='fuel role --rel {}'.format(release_id))['stdout']
|
||||
roles = [i.strip() for i in result]
|
||||
assert_true('test-role' not in roles,
|
||||
"role is not in the list {}".format(roles))
|
||||
cmd = ('fuel role --rel {} --delete'
|
||||
' --role controller'.format(release_id))
|
||||
result = self.ssh_manager.execute(
|
||||
ip=self.ssh_manager.admin_ip,
|
||||
cmd=cmd,
|
||||
)
|
||||
self.show_step(8)
|
||||
assert_equal(result['exit_code'], 1,
|
||||
"Controller role shouldn't be able to be deleted")
|
||||
|
||||
self.env.make_snapshot("cli_delete_role")
|
||||
self.env.make_snapshot("cli_delete_role")
|
||||
|
||||
@test(depends_on=[SetupEnvironment.prepare_slaves_3],
|
||||
groups=["cli_incorrect_update_role"])
|
||||
|
Loading…
Reference in New Issue
Block a user