Implemented method that gets image id by image_name set in config. Renamed image_ref to image_name everywhere.

Added controller ssh key path parameter to config. Registered this parameter in config module. Added it`s usage in tests.
This commit is contained in:
anaboikina 2013-07-05 19:43:19 +03:00
parent c0917ce4a7
commit 26891f0e37
9 changed files with 214 additions and 33 deletions

View File

@ -38,8 +38,9 @@ class Client(object):
self.username = username
self.password = password
if isinstance(pkey, basestring):
pkey = paramiko.RSAKey.from_private_key(
cStringIO.StringIO(str(pkey)))
if pkey != "":
pkey = paramiko.RSAKey.from_private_key(
cStringIO.StringIO(str(pkey)))
self.pkey = pkey
self.look_for_keys = look_for_keys
self.key_filename = key_filename

View File

@ -62,11 +62,6 @@ def build_url(host, port, api_version=None, path=None,
return url
def parse_image_id(image_ref):
"""Return the image id from a given image ref."""
return image_ref.rsplit('/')[-1]
def arbitrary_string(size=4, base_text=None):
"""
Return size characters from base_text, repeating the base_text infinitely

View File

@ -196,7 +196,10 @@ ComputeGroup = [
cfg.StrOpt('controller_node_ssh_password',
default='pass',
help="ssh user pass of one of the controller nodes"),
cfg.StrOpt('image_ref',
cfg.StrOpt('controller_node_ssh_key_path',
default='',
help="path to ssh key"),
cfg.StrOpt('image_name',
default="{$IMAGE_ID}",
help="Valid secondary image reference to be used in tests."),
cfg.StrOpt('image_ref_alt',
@ -381,7 +384,7 @@ OrchestrationGroup = [
default='m1.micro',
help="Instance type for tests. Needs to be big enough for a "
"full OS plus the test workload"),
cfg.StrOpt('image_ref',
cfg.StrOpt('image_name',
default=None,
help="Name of heat-cfntools enabled image to use when "
"launching test instances."),

View File

@ -2,7 +2,7 @@
# This section contains configuration options that a variety of
# test clients use when authenticating with different user/tenant
# combinations
url = http://172.18.164.38/
url = http://172.18.164.70/
# The type of endpoint for a Identity service. Unless you have a
# custom Keystone service catalog implementation, you probably want to leave
# this value as "identity"
@ -11,7 +11,7 @@ catalog_type = identity
# environments that have self-signed SSL certs.
disable_ssl_certificate_validation = False
# URL for where to find the OpenStack Identity API endpoint (Keystone)
uri = http://172.18.164.38:5000/v2.0/
uri = http://172.18.164.70:5000/v2.0/
# URL for where to find the OpenStack V3 Identity API endpoint (Keystone)
#uri_v3 = http://127.0.0.1:5000/v3/
# Should typically be left as keystone unless you have a non-Keystone
@ -47,7 +47,7 @@ admin_tenant_name = admin
# against the OpenStack Compute API.
#One of the controller nodes
controller_node = 10.0.0.101
controller_node = 10.30.1.101
controller_node_name = fuel-controller-01.localdomain.
#Controller node user who able connect via ssh
@ -55,9 +55,10 @@ controller_node_ssh_user = root
#Controller node ssh user's password
controller_node_ssh_password = r00tme
controller_node_ssh_key_path =
#The list of the services should be enabled
enabled_services=nova-cert, nova-consoleauth, nova-scheduler, nova-conductor, nova-compute, nova-compute
enabled_services=nova-cert, nova-consoleauth, nova-scheduler, nova-conductor, nova-cert, nova-consoleauth, nova-scheduler, nova-conductor, nova-cert, nova-consoleauth, nova-scheduler, nova-conductor, nova-compute
# Allows test cases to create/destroy tenants and users. This option
# enables isolated test cases and better parallel execution,
@ -73,8 +74,8 @@ allow_tenant_reuse = true
# Reference data for tests. The ref and ref_alt should be
# distinct images/flavors.
image_ref = e4fa4ef5-9f03-45d4-adbd-30b1abdcfb32
image_ref_alt = e4fa4ef5-9f03-45d4-adbd-30b1abdcfb32
image_name = cirros-0.3.0-x86_64
image_ref_alt = 53734a0d-60a8-4689-b7c8-3c14917a7197
flavor_ref = 1
flavor_ref_alt = 2
@ -88,7 +89,7 @@ build_interval = 3
# Number of seconds to time out on waiting for an instance
# to build or reach an expected status
build_timeout = 100
build_timeout = 300
# Run additional tests that use SSH for instance validation?
# This requires the instances be routable from the host
@ -108,7 +109,7 @@ network_for_ssh = private
ip_version_for_ssh = 4
# Number of seconds to wait to authenticate to an instance
ssh_timeout = 400
ssh_timeout = 300
# Number of seconds to wait for output from ssh channel
ssh_channel_timeout = 60
@ -190,13 +191,13 @@ tenant_network_mask_bits = 28
tenant_networks_reachable = true
# Id of the public network that provides external connectivity.
public_network_id = eeb739fc-97a0-46b3-b8b9-12212b8247c4
public_network_id = cdb94175-2002-449f-be41-6b8afce8de13
# Id of a shared public router that provides external connectivity.
# A shared public router would commonly be used where IP namespaces
# were disabled. If namespaces are enabled, it would be preferable
# for each tenant to have their own router.
public_router_id = 30dc6a2e-d22e-48e5-8fa3-34a7d501d673
public_router_id = 2a6bf65b-01f7-4c91-840a-2b5f676e7016
# Whether or not quantum is expected to be available
quantum_available = false
@ -214,7 +215,7 @@ catalog_type = volume
build_interval = 3
# Number of seconds to time out on waiting for a volume
# to be available or reach an expected status
build_timeout = 400
build_timeout = 300
# Runs Cinder multi-backend tests (requires 2 backends declared in cinder.conf)
# They must have different volume_backend_name (backend1_name and backend2_name
# have to be different)
@ -258,8 +259,8 @@ allow_tenant_reuse = true
# Reference data for tests. The ref and ref_alt should be
# distinct images/flavors.
image_ref = e4fa4ef5-9f03-45d4-adbd-30b1abdcfb32
image_ref_alt = e4fa4ef5-9f03-45d4-adbd-30b1abdcfb32
image_name = cirros-0.3.0-x86_64
image_ref_alt = 53734a0d-60a8-4689-b7c8-3c14917a7197
flavor_ref = 1
flavor_ref_alt = 2
@ -273,7 +274,7 @@ build_interval = 3
# Number of seconds to time out on waiting for an instance
# to build or reach an expected status
build_timeout = 20
build_timeout = 300
# Run additional tests that use SSH for instance validation?
# This requires the instances be routable from the host
@ -293,7 +294,7 @@ network_for_ssh = net04
ip_version_for_ssh = 4
# Number of seconds to wait to authenticate to an instance
ssh_timeout = 400
ssh_timeout = 320
# Number of seconds to wait for output from ssh channel
ssh_channel_timeout = 60
@ -328,4 +329,4 @@ block_migrate_supports_cinder_iscsi = false
# By default, rely on the status of the diskConfig extension to
# decide if to execute disk config tests. When set to false, tests
# are forced to skip, regardless of the extension status
disk_config_enabled_override = true
disk_config_enabled_override = true

View File

@ -1,3 +1,182 @@
"Timed out waiting for %s to become "
"reachable" % ip_address)
class NovaNetworkScenarioTest(OfficialClientTest):
"""
Base class for nova network scenario tests
"""
@classmethod
def check_preconditions(cls):
if cls.config.network.quantum_available:
cls.enabled = False
msg = "Nova Networking not available"
raise cls.skipException(msg)
else:
cls.enabled = True
# ensure the config says true
try:
cls.compute_client.networks.list()
except exc.EndpointNotFound:
cls.enabled = False
raise
@classmethod
def setUpClass(cls):
super(NovaNetworkScenarioTest, cls).setUpClass()
cls.tenant_id = cls.manager._get_identity_client(
cls.config.identity.username,
cls.config.identity.password,
cls.config.identity.tenant_name).tenant_id
cls.network = []
def _create_keypair(self, client, namestart='ost1_test-keypair-smoke-'):
kp_name = rand_name(namestart)
keypair = client.keypairs.create(kp_name)
self.verify_response_body_content(keypair.id,
kp_name,
'Creation of keypair failed')
self.set_resource(kp_name, keypair)
return keypair
def _create_security_group(self, client, namestart='ost1_test-secgroup-smoke-'):
# Create security group
sg_name = rand_name(namestart)
sg_desc = sg_name + " description"
secgroup = client.security_groups.create(sg_name, sg_desc)
self.verify_response_body_content(secgroup.name,
sg_name,
"Security group creation failed")
self.verify_response_body_content(secgroup.description,
sg_desc,
"Security group creation failed")
self.set_resource(sg_name, secgroup)
# Add rules to the security group
# These rules are intended to permit inbound ssh and icmp
# traffic from all sources, so no group_id is provided.
# Setting a group_id would only permit traffic from ports
# belonging to the same security group.
rulesets = [
{
# ssh
'ip_protocol': 'tcp',
'from_port': 22,
'to_port': 22,
'cidr': '0.0.0.0/0',
},
{
# ping
'ip_protocol': 'icmp',
'from_port': -1,
'to_port': -1,
'cidr': '0.0.0.0/0',
}
]
for ruleset in rulesets:
try:
client.security_group_rules.create(secgroup.id, **ruleset)
except Exception:
self.fail("Failed to create rule in security group.")
return secgroup
def _create_network(self, tenant_id, label='ost1_test-network-smoke-'):
n_label = rand_name(label)
cidr=self.config.network.tenant_network_cidr
networks = self.compute_client.networks.create(label=n_label, cidr=cidr)
self.network.append(networks)
self.verify_response_body_content(networks.label,
n_label,
"Network creation failed")
return networks
@classmethod
def _clear_networks(cls):
for net in cls.network:
try:
cls.compute_client.networks.delete(net.id)
except Exception:
cls.fail("Can't delete network")
def _list_networks(self):
nets = self.compute_client.networks.list()
return nets
def _create_server(self, client, network, name, key_name, security_groups):
flavor_id = self.config.compute.flavor_ref
base_image_id = self.config.compute.image_ref
create_kwargs = {
'nics': [
{'net-id': network.id},
],
'key_name': key_name,
'security_groups': security_groups,
}
server = client.servers.create(name, base_image_id, flavor_id,
**create_kwargs)
self.verify_response_body_content(server.name,
name,
"Instance creation failed")
self.set_resource(name, server)
self.status_timeout(client.servers, server.id, 'ACTIVE')
# The instance retrieved on creation is missing network
# details, necessitating retrieval after it becomes active to
# ensure correct details.
server = client.servers.get(server.id)
self.set_resource(name, server)
return server
def _create_floating_ip(self, server):
floating_ips_pool = self.compute_client.floating_ip_pools.list()
if len(floating_ips_pool) != 0:
floating_ip = self.compute_client.floating_ips.create(
pool=floating_ips_pool[0].name)
self.set_resource(rand_name('ost1_test-floatingip-'), floating_ip)
return floating_ip
else:
self.fail('Incorrect OpenStack configurations. '
'There is no any floating_ips pools')
def _ping_ip_address(self, ip_address):
cmd = ['ping', '-c1', '-w1', ip_address]
def ping():
proc = subprocess.Popen(cmd,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
proc.wait()
if proc.returncode == 0:
return True
# TODO Allow configuration of execution and sleep duration.
return fuel_health.test.call_until_true(ping, 40, 1)
def _is_reachable_via_ssh(self, ip_address, username, private_key,
timeout=120):
ssh_client = ssh.Client(ip_address, username,
pkey=private_key,
timeout=timeout)
return ssh_client.test_connection_auth()
def _check_vm_connectivity(self, ip_address, username, private_key,
timeout=120):
self.assertTrue(self._ping_ip_address(ip_address),
"Timed out waiting for %s to become "
"reachable. Please, check Network "
"configuration" % ip_address)
@classmethod
def tearDownClass(cls):
super(NovaNetworkScenarioTest, cls).tearDownClass()
cls._clear_networks()
import logging
import subprocess
@ -20,6 +199,7 @@ from fuel_health.common.utils.data_utils import rand_name
from fuel_health import exceptions
import fuel_health.manager
import fuel_health.test
from fuel_health import config
LOG = logging.getLogger(__name__)
@ -305,7 +485,7 @@ class NovaNetworkScenarioTest(OfficialClientTest):
def _create_server(self, client, network, name, key_name, security_groups):
flavor_id = self.config.compute.flavor_ref
base_image_id = self.config.compute.image_ref
base_image_id = self.config.compute.image_name
create_kwargs = {
'nics': [
{'net-id': network.id},

View File

@ -27,6 +27,7 @@ class SanityInfrastructureTest(base.BaseComputeAdminTest):
cls.host = cls.config.compute.controller_node
cls.usr = cls.config.compute.controller_node_ssh_user
cls.pwd = cls.config.compute.controller_node_ssh_password
cls.key = cls.config.compute.controller_node_ssh_key_path
cls.hostname = cls.config.compute.controller_node_name
@classmethod
@ -55,8 +56,8 @@ class SanityInfrastructureTest(base.BaseComputeAdminTest):
4. Check number of normally executed services (with :-) state
is equal to the number of expected services
"""
with ExecutionTimeout(5):
output = SSHClient(self.host, self.usr, self.pwd).exec_command(
with ExecutionTimeout(300):
output = SSHClient(self.host, self.usr, self.pwd, pkey=self.key).exec_command(
"nova-manage service list")
self.assertFalse(u'XXX' in output)
self.assertEqual(len(self.list_of_expected_services),
@ -88,7 +89,7 @@ class SanityInfrastructureTest(base.BaseComputeAdminTest):
expected_output = "in-addr.arpa domain name pointer " + self.hostname
with ExecutionTimeout(10):
try:
output = SSHClient(self.host, self.usr, self.pwd).exec_command(
output = SSHClient(self.host, self.usr, self.pwd, pkey=self.key).exec_command(
"host " + self.host)
except SSHExecCommandFailed:
output = "'host' command failed."

View File

@ -3,11 +3,11 @@ import time
from fuel_health import clients
from fuel_health import exceptions
from fuel_health import clients
import fuel_health.test
from fuel_health.common import log as logging
from fuel_health.common.utils.data_utils import rand_name, rand_int_id
from fuel_health.tests import smoke
from fuel_health import nmanager
LOG = logging.getLogger(__name__)
@ -160,7 +160,8 @@ class BaseComputeTest(fuel_health.test.BaseTestCase):
if 'name' in kwargs:
name = kwargs.pop('name')
flavor = kwargs.get('flavor', cls.flavor_ref)
image_id = kwargs.get('image_id', cls.image_ref)
# image_id = kwargs.get('image_id', cls.image_ref)
image_id = kwargs.get('image_id', nmanager.get_image_from_name())
resp, body = cls.servers_client.create_server(
name, image_id, flavor, **kwargs)

View File

@ -71,7 +71,7 @@ class TestImageAction(nmanager.OfficialClientTest):
#self._create_security_group_rule()
# boot a instance and create a timestamp file in it
server = self._boot_image(self.config.compute.image_ref)
server = self._boot_image(self.config.compute.image_name)
# snapshot the instance
snapshot_image_id = self._create_image(server)

View File

@ -1,4 +1,3 @@
from fuel_health.common import network_common as net_common
from fuel_health.common.utils.data_utils import rand_name
from fuel_health import nmanager