Add first version of common scenarios

Change context for shaker, added common
functions for l3 and dhcp in base_scrnario.
Create Neutron disaster scenario with common
scenarios for l3 and agents

Change-Id: Iba459ff3a78a9e13d8c73abba75bc5ee372fb428
This commit is contained in:
Kristina Kuznetsova
2015-04-27 17:28:14 +03:00
parent 7d6af5ca68
commit 2f0fcb13fe
14 changed files with 988 additions and 73 deletions

1
haos/__init__.py Normal file
View File

@@ -0,0 +1 @@
__author__ = 'kkuznetsova'

1
haos/rally/__init__.py Normal file
View File

@@ -0,0 +1 @@
__author__ = 'kkuznetsova'

View File

@@ -0,0 +1 @@
__author__ = 'kkuznetsova'

View File

@@ -30,6 +30,10 @@ class CloudNodesContext(base.Context):
"default_flavor": {
"type": "string",
"default": "m1.micro"
},
"shaker": {
"type": "object",
"default": {}
}
}
}

View File

@@ -1,10 +1,8 @@
import time
from cloud_utils import run_command
from rally.benchmark.context import base
from rally import exceptions
from rally import consts
from haos.rally.utils import run_command
@base.context(name="recover_cloud", order=900)
class CloudNodesContext(base.Context):
@@ -54,13 +52,13 @@ class CloudNodesContext(base.Context):
def cleanup(self):
"""This method is called after the task finish."""
for action in self.context["recover_commands"]:
run_command(self.context, action["node"], action["command"],
action["executor"])
time.sleep(action.get("timeout", 0))
controllers = self.context["controllers"]
if "rabbitmq_cluster_status" in self.context["checks"]:
if self.check_rabbitmq_cluster_status(controllers) is False:
raise Exception("RabbitMQ cluster wasn't recovered")
pass
# for action in self.context["recover_commands"]:
# run_command(self.context, action["node"], action["command"],
# action["executor"])
# time.sleep(action.get("timeout", 0))
#
# controllers = self.context["controllers"]
# if "rabbitmq_cluster_status" in self.context["checks"]:
# if self.check_rabbitmq_cluster_status(controllers) is False:
# raise Exception("RabbitMQ cluster wasn't recovered")

View File

@@ -0,0 +1 @@
__author__ = 'kkuznetsova'

View File

@@ -0,0 +1,212 @@
from haos.rally import utils
from rally.benchmark.scenarios.neutron import utils as neutron_utils
from rally.benchmark.scenarios.nova import utils as nova_utils
from rally.benchmark.scenarios.vm import utils as vm_utils
from rally.common import log as logging
import time
LOG = logging.getLogger(__name__)
class BaseDisaster(neutron_utils.NeutronScenario,
nova_utils.NovaScenario,
vm_utils.VMScenario):
def wait_shaker_agent(self, agent_id, timeout=300):
result = utils.run_command(self.context, agent_id, "hostname",
executor="shaker", timeout=timeout)
LOG.debug(result)
def boot_server(self, name, nics=None):
USER_DATA = """#!/bin/bash
shaker-agent --agent-id %(agent_id)s \
--server-endpoint %(server_endpoint)s --debug \
--log-file /var/log/shaker.log
"""
shaker_endpoint = self.context['shaker_endpoint']
userdata = USER_DATA % dict(agent_id=name,
server_endpoint=shaker_endpoint)
key_name = "my_key"
kwargs = {"userdata": userdata, "key_name": key_name}
if nics is not None:
kwargs['nics'] = nics
vm = self._boot_server(name=name,
image_id=self.context["shaker_image"],
flavor_id=self.context["default_flavor"],
auto_assign_nic=True,
**kwargs)
self.wait_shaker_agent(name, timeout=850)
return vm
def power_off_controller(self, controller):
control_node = self.context["power_control_node"]
utils.run_command(self.context, control_node["agent_endpoint"],
command=controller["hardware_power_off_cmd"],
recover_command=controller["hardware_power_on_cmd"],
recover_timeout=controller["power_on_timeout"])
time.sleep(controller["power_off_timeout"])
# This function creates router, network, subnet and joins them
def create_network_subnet_router(self):
self._clients = self._admin_clients
router = self._create_router({}, external_gw=True)
network, subnets = self._create_network_and_subnets()
self._add_interface_router(subnets[0]["subnet"], router["router"])
return network, subnets, router
# This function associate floating IP for delivered VM
def associate_floating_ip(self, server=None):
self._clients = self._admin_clients
nets = self._list_networks()
for network in nets:
if network["router:external"]:
external_network = network
self._attach_floating_ip(server, external_network)
return
# This function define floating IP for delivered VM and name of network
def define_floating_ip_for_vm(self, vm, net_name):
# vm - instance: type(vm) = <class 'novaclient.v2.servers.Server'>
# net_name - name of network on which we boot vm
addresses = vm.addresses[net_name]
for address in addresses:
if address["OS-EXT-IPS:type"] == 'floating':
return address["addr"]
return None
# This function define internal-fixed IP
# for delivered VM and name of network
def define_fixed_ip_for_vm(self, vm, net_name):
# vm - instance: type(vm) = <class 'novaclient.v2.servers.Server'>
# net_name - name of network on which we boot vm
addresses = vm.addresses[net_name]
for address in addresses:
if address["OS-EXT-IPS:type"] == 'fixed':
return address["addr"]
return None
# This function from server ping adress_ip
def check_connectivity(self, server, adress_ip):
# server - server where we try to ping
# address_ip - what ping
command = "ping -c1 %s 1>/dev/null;echo $?" % adress_ip
output = utils.run_command(self.context, server, command=command,
executor="shaker")
return output and output[0] == "0"
# function: get node for l3-agent
# on what the current router is with neutron API
def get_node_on_what_is_agent_for_router(self, router):
# router - router with type NeutronClient
router_id = router["router"]["id"]
neutron_client = self.clients("neutron")
agents = neutron_client.list_l3_agent_hosting_routers(router_id)
for agent in agents["agents"]:
return agent['host']
raise "Router hasn't any l3-agent"
# Add tcp rule for 22 port and icmp rule
def add_rules_for_ping(self):
self._clients = self._admin_clients
sec_groups = self._list_security_groups()
self.clients("nova").security_group_rules.create(
sec_groups[0].id,
from_port=22,
to_port=22,
ip_protocol="tcp",
cidr="0.0.0.0/0")
self.clients("nova").security_group_rules.create(
sec_groups[0].id,
from_port=-1,
to_port=-1,
ip_protocol="icmp",
cidr="0.0.0.0/0")
# Get list agents, only dhcp
def get_list_dhcp_agents(self):
list_agents = self.clients("neutron").list_agents()
list_dhcp_agents = []
for agent in list_agents["agents"]:
if agent["agent_type"] == "DHCP agent":
list_dhcp_agents.append(agent)
return list_dhcp_agents
# Get list agents, only l3
def get_list_l3_agents(self):
list_agents = self.clients("neutron").list_agents()
list_l3_agents = []
for agent in list_agents["agents"]:
if agent["agent_type"] == "L3 agent":
list_l3_agents.append(agent)
return list_l3_agents
# Get dhcp agent for chosen network on chosen node
def get_dhcp_on_chosen_node(self, node, net):
# node - name oh node-controller, on which is router
# net - network for which check agent node
neutron_client = self.clients("neutron")
net_id = net["network"]["id"]
dhcp_agents = neutron_client.list_dhcp_agent_hosting_networks(net_id)
need_manually_rescheduling = True
for agent in dhcp_agents["agents"]:
if agent["host"] == node:
need_manually_rescheduling = True
break
if need_manually_rescheduling:
first_dhcp_agent_id = dhcp_agents["agents"][0]["id"]
neutron_client.remove_network_from_dhcp_agent(first_dhcp_agent_id,
net_id)
list_dhcp_agents = self.get_list_dhcp_agents()
need_agent = None
for agent in list_dhcp_agents:
if agent["host"] == node:
need_agent = agent
break
if need_agent:
agent_id = need_agent['id']
body = {"network_id": net_id}
neutron_client.add_network_to_dhcp_agent(dhcp_agent=agent_id,
body=body)
else:
raise
def get_l3_on_chosen_node(self, node, router):
"""Get l3 agent for chosen router on chosen node.
:param node:
:param router:
:return:
"""
neutron_client = self.clients("neutron")
router_id = router["router"]["id"]
l3_agents = neutron_client.list_l3_agent_hosting_routers(router_id)
need_manually_rescheduling = True
for agent in l3_agents["agents"]:
if agent["host"] == node:
need_manually_rescheduling = True
break
if need_manually_rescheduling:
first_l3_agent_id = l3_agents["agents"][0]["id"]
neutron_client.remove_router_from_l3_agent(first_l3_agent_id,
router_id)
list_l3_agents = self.get_list_l3_agents()
need_agent = None
for agent in list_l3_agents:
if agent["host"] == node:
need_agent = agent
break
if need_agent:
agent_id = need_agent['id']
body = {"router_id": router_id}
# TODO(sbelous): review this. We really need to set dhcp_agent?
neutron_client.add_router_to_l3_agent(dhcp_agent=agent_id,
body=body)
else:
raise

View File

@@ -0,0 +1,703 @@
from rally.benchmark.scenarios import base
from haos.rally.plugin import base_disaster
from haos.rally import utils
from rally.common import log as logging
LOG = logging.getLogger(__name__)
class NeutronDisaster(base_disaster.BaseDisaster):
def check_all_rescedule(self, node):
list_agents = self.clients("neutron").list_agents()
dhcp_for_node = None
l3_for_node = None
for agent in list_agents["agents"]:
if (agent["host"] == node):
if (agent["agent_type"] == "DHCP agent"):
dhcp_for_node = agent
elif (agent["agent_type"] == "L3 agent"):
l3_for_node = agent
if (l3_for_node) & (dhcp_for_node):
list_networks = self.clients(
"neutron").list_networks_on_dhcp_agent(dhcp_for_node["id"])
if len(list_networks) == 0:
raise
list_routers = self.clients(
"neutron").list_routers_on_l3_agent(l3_for_node["id"])
if len(list_routers) == 0:
raise
# TODO(sbelous): create function find_primary_controller()
def find_primary_controller(self):
for controller in self.context["controllers"]:
node = controller["agent_endpoint"]
command = "ifconfig | grep br-ex-hapr 1>/dev/null;echo $?"
result = utils.run_command(self.context, node, command=command,
executor="shaker")
if result and result[0] == "0":
return node
return None
# TODO(sbelous): write function wait some time
def wait_some_time(self):
pass
@base.scenario()
def drop_mysql_port(self):
"""Drop mysql port
Setup:
OpenStack cloud with at least 3 controllers 16
Scenario:
1. Create router1, net1 and subnetwork1 and join router1 with net1
2. Create router2, net2 and subnetwork2 and join router2 with net2
3. Start vm1 in network1
4. Start vm2 in network2
5. Define floating ip for vm1 and vm2
6. Define internal ip for vm1
7. Add rules for ping
8. ping 8.8.8.8 from vm2
9. ping vm1 from vm2 and vm1 from vm2
10. Run udhcp on vm1
11. Make l3-agent for router1 and dhcp-agent for net1 on the same node
12. drop rabbit port 3306 on node, where is l3-agent for router1
13. Boot vm3 in network1
14. ping 8.8.8.8 from vm3
15. ping between vm1 and vm3 by internal ip
16. ping between vm2 and vm3 by floating ip
17. Run udhcp on vm1 and vm3
"""
# Add rules to be able ping
self.add_rules_for_ping()
# Create 1 network, subnt, router and join this construction
network1, subnets1, router1 = self.create_network_subnet_router()
# Create 1 network, subnt, router and join this construction
network2, subnets2, router2 = self.create_network_subnet_router()
# boot vms
net1_id = network1["network"]["id"]
net2_id = network2["network"]["id"]
vm1 = self.boot_server("VM1", nics=[{"net-id": net1_id}])
vm2 = self.boot_server("VM2", nics=[{"net-id": net2_id}])
# floatingIp for VMs
self.associate_floating_ip(vm1)
self.associate_floating_ip(vm2)
# Define internal IP and floating IP
net1_name = network1["network"]["name"]
net2_name = network2["network"]["name"]
vm1_internal_ip = self.define_fixed_ip_for_vm(vm1, net1_name)
vm1_floating_ip = self.define_floating_ip_for_vm(vm1, net1_name)
vm2_floating_ip = self.define_floating_ip_for_vm(vm2, net2_name)
# Check on what agents are router1
node = self.get_node_on_what_is_agent_for_router(router1)
self.get_dhcp_on_chosen_node(node, network1)
# Check connectivity
self.check_connectivity("VM2", "8.8.8.8")
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
# dhcp work
output = utils.run_command(self.context, "VM1", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
# Drop rabbit MQ port
command = "iptables -I INPUT 1 -p tcp --dport 3306 -j DROP"
utils.run_command(self.context, node, command=command)
vm3 = self.boot_server("VM3", nics=[{"net-id": net1_id}])
vm3_internal_ip = self.define_fixed_ip_for_vm(vm3, net1_name)
vm3_floating_ip = self.define_floating_ip_for_vm(vm3, net1_name)
# dhcp work
output = utils.run_command(self.context, "VM1", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
output = utils.run_command(self.context, "VM3", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
# Check connectivity
self.check_connectivity("VM3", "8.8.8.8")
self.check_connectivity("VM1", vm3_internal_ip)
self.check_connectivity("VM3", vm1_internal_ip)
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
self.check_connectivity("VM2", vm3_floating_ip)
self.check_connectivity("VM3", vm2_floating_ip)
self.check_connectivity("VM1", vm3_floating_ip)
self.check_connectivity("VM3 ", vm1_floating_ip)
@base.scenario()
def reboot_primary_controller(self):
"""Reboot primary controller
Setup:
OpenStack cloud with at least 3 controllers and 1 compute
Scenario:
1. Create router1, net1 and subnet1 and join router1 with net1
2. Create router2, net2 and subnet2 and join router2 with net2
3. Start vm1 in net1
4. Start vm2 in net2
5. Define floating ip for vm1 and vm2
6. Define internal ip for vm1
7. Add rules for ping
8. Find primary controller
9. Get l3 agent for router1 and dhcp-agent for net1
on primary controller
10. ping 8.8.8.8 from vm2
11. ping vm1 from vm2 and vm1 from vm2
12. Run udhcp on vm1
13. Reboot primary controller
14. Wait some time
15. Boot vm3 in net1
16. ping 8.8.8.8 from vm3
17. ping between vm1 and vm3 by internal ip
18. ping between vm2 and vm3 by floating ip
19. Run udhcp on vm1 and vm3
"""
# Create 1 network, subnt, router and join this construction
network1, subnets1, router1 = self.create_network_subnet_router()
# Create 1 network, subnt, router and join this construction
network2, subnets2, router2 = self.create_network_subnet_router()
# boot vms
net1_id = network1["network"]["id"]
net2_id = network2["network"]["id"]
vm1 = self.boot_server("VM1", nics=[{"net-id": net1_id}])
vm2 = self.boot_server("VM2", nics=[{"net-id": net2_id}])
# Add rules to be able ping
self.add_rules_for_ping()
# floatingIp for VMs
self.associate_floating_ip(vm1)
self.associate_floating_ip(vm2)
# Define internal IP and floating IP
net1_name = network1["network"]["name"]
net2_name = network2["network"]["name"]
vm1_internal_ip = self.define_fixed_ip_for_vm(vm1, net1_name)
vm1_floating_ip = self.define_floating_ip_for_vm(vm1, net1_name)
vm2_floating_ip = self.define_floating_ip_for_vm(vm2, net2_name)
# Find primary controller
primary_controller = self.find_primary_controller()
# Get l3 agent for router1 and one dhcp agent
# for network1 on primary controller
self.get_dhcp_on_chosen_node(primary_controller, network1)
self.get_l3_on_chosen_node(primary_controller, router1)
# dhcp work
output = utils.run_command(self.context, "VM1", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
# Check connectivity
self.check_connectivity("VM2", "8.8.8.8")
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
primary_context_controller = None
for controller in self.context["controllers"]:
if controller["agent_endpoint"] == primary_controller:
primary_context_controller = controller
break
if primary_context_controller:
self.power_off_controller(primary_context_controller)
else:
raise
# TODO(sbelous): wait some time
self.check_all_rescedule(primary_controller)
vm3 = self.boot_server("VM3", nics=[{"net-id": net1_id}])
vm3_internal_ip = self.define_fixed_ip_for_vm(vm3, net1_name)
vm3_floating_ip = self.define_floating_ip_for_vm(vm3, net1_name)
# dhcp work
output = utils.run_command(self.context, "VM1", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
output = utils.run_command(self.context, "VM3", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
# Check connectivity
self.check_connectivity("VM3", "8.8.8.8")
self.check_connectivity("VM1", vm3_internal_ip)
self.check_connectivity("VM3", vm1_internal_ip)
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
self.check_connectivity("VM2", vm3_floating_ip)
self.check_connectivity("VM3", vm2_floating_ip)
self.check_connectivity("VM1", vm3_floating_ip)
self.check_connectivity("VM3 ", vm1_floating_ip)
@base.scenario()
def drop_rabbit_port(self):
"""Drop rabbit port
Setup:
OpenStack cloud with at least 3 controllers 16
Scenario:
1. Create router1, net1 and subnet1 and join router1 with net1
2. Create router2, net2 and subnet2 and join router2 with net2
3. Start vm1 in net1
4. Start vm2 in net2
5. Define floating ip for vm1 and vm2
6. Define internal ip for vm1
7. Add rules for ping
8. ping 8.8.8.8 from vm2
9. ping vm1 from vm2 and vm1 from vm2
10. Run udhcp on vm1
11. Make l3-agent for router1 and one dhcp-agent for net1
on the same node
12. drop rabbit port 5673 on node, where is l3-agent for router1
13. Boot vm3 in net1
14. ping 8.8.8.8 from vm3
15. ping between vm1 and vm3 by internal ip
16. ping between vm2 and vm3 by floating ip
17. Run udhcp on vm1 and vm3
"""
# Add rules to be able ping
self.add_rules_for_ping()
# Create 1 network, subnt, router and join this construction
network1, subnets1, router1 = self.create_network_subnet_router()
# Create 1 network, subnt, router and join this construction
network2, subnets2, router2 = self.create_network_subnet_router()
# boot vms
net1_id = network1["network"]["id"]
net2_id = network2["network"]["id"]
vm1 = self.boot_server("VM1", nics=[{"net-id": net1_id}])
vm2 = self.boot_server("VM2", nics=[{"net-id": net2_id}])
# floatingIp for VMs
self.associate_floating_ip(vm1)
self.associate_floating_ip(vm2)
# Define internal IP and floating IP
net1_name = network1["network"]["name"]
net2_name = network2["network"]["name"]
vm1_internal_ip = self.define_fixed_ip_for_vm(vm1, net1_name)
vm1_floating_ip = self.define_floating_ip_for_vm(vm1, net1_name)
vm2_floating_ip = self.define_floating_ip_for_vm(vm2, net2_name)
# Check on what agents are router1
node = self.get_node_on_what_is_agent_for_router(router1)
self.get_dhcp_on_chosen_node(node, network1)
# Check connectivity
self.check_connectivity("VM2", "8.8.8.8")
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
# dhcp work
output = utils.run_command(self.context, "VM1", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
# Drop rabbit MQ port
command = "iptables -I OUTPUT 1 -p tcp --dport 5673 -j DROP"
utils.run_command(self.context, node, command=command,
executor="shaker")
vm3 = self.boot_server("VM3", nics=[{"net-id": net1_id}])
vm3_internal_ip = self.define_fixed_ip_for_vm(vm3, net1_name)
vm3_floating_ip = self.define_floating_ip_for_vm(vm3, net1_name)
# dhcp work
output = utils.run_command(self.context, "VM1", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
output = utils.run_command(self.context, "VM3", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
# Check connectivity
self.check_connectivity("VM3", "8.8.8.8")
self.check_connectivity("VM1", vm3_internal_ip)
self.check_connectivity("VM3", vm1_internal_ip)
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
self.check_connectivity("VM2", vm3_floating_ip)
self.check_connectivity("VM3", vm2_floating_ip)
self.check_connectivity("VM1", vm3_floating_ip)
self.check_connectivity("VM3 ", vm1_floating_ip)
@base.scenario()
def reset_primary_controller(self):
"""Reset primary controller
Setup:
OpenStack cloud with at least 3 controllers and 1 compute
Scenario:
1. Create router1, net1 and subnet1 and join router1 with net1
2. Create router2, net2 and subnet2 and join router2 with net2
3. Start vm1 in net1
4. Start vm2 in net2
5. Define floating ip for vm1 and vm2
6. Define internal ip for vm1
7. Add rules for ping
8. Find primary controller
9. Get l3 agent for router1 and one dhcp agent for net1
on primary controller
10. ping 8.8.8.8 from vm2
11. ping vm1 from vm2 and vm1 from vm2
12. Run udhcp on vm1
13. Reset primary controller
14. Wait some time
15. Boot vm3 in net1
16. ping 8.8.8.8 from vm3
17. ping between vm1 and vm3 by internal ip
18. ping between vm2 and vm3 by floating ip
19. Run udhcp on vm1 and vm3
"""
# Create 1 network, subnt, router and join this construction
network1, subnets1, router1 = self.create_network_subnet_router()
# Create 1 network, subnt, router and join this construction
network2, subnets2, router2 = self.create_network_subnet_router()
# boot vms
net1_id = network1["network"]["id"]
net2_id = network2["network"]["id"]
vm1 = self.boot_server("VM1", nics=[{"net-id": net1_id}])
vm2 = self.boot_server("VM2", nics=[{"net-id": net2_id}])
# Add rules to be able ping
self.add_rules_for_ping()
# floatingIp for VMs
self.associate_floating_ip(vm1)
self.associate_floating_ip(vm2)
# Define internal IP and floating IP
net1_name = network1["network"]["name"]
net2_name = network2["network"]["name"]
vm1_internal_ip = self.define_fixed_ip_for_vm(vm1, net1_name)
vm1_floating_ip = self.define_floating_ip_for_vm(vm1, net1_name)
vm2_floating_ip = self.define_floating_ip_for_vm(vm2, net2_name)
# Find primary controller
primary_controller = self.find_primary_controller()
# Get l3 agent for router1 and one dhcp agent for network1
# on primary controller
self.get_dhcp_on_chosen_node(primary_controller, network1)
self.get_l3_on_chosen_node(primary_controller, router1)
# dhcp work
output = utils.run_command(self.context, "VM1", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
# Check connectivity
self.check_connectivity("VM2", "8.8.8.8")
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
primary_context_controller = None
for controller in self.context["controllers"]:
if controller["agent_endpoint"] == primary_controller:
primary_context_controller = controller
break
if primary_context_controller:
self.power_off_controller(primary_context_controller)
else:
raise
# TODO(sbelous): wait some time
self.check_all_rescedule(primary_controller)
vm3 = self.boot_server("VM3", nics=[{"net-id": net1_id}])
vm3_internal_ip = self.define_fixed_ip_for_vm(vm3, net1_name)
vm3_floating_ip = self.define_floating_ip_for_vm(vm3, net1_name)
# dhcp work
output = utils.run_command(self.context, "VM1", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
output = utils.run_command(self.context, "VM3", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
# Check connectivity
self.check_connectivity("VM3", "8.8.8.8")
self.check_connectivity("VM1", vm3_internal_ip)
self.check_connectivity("VM3", vm1_internal_ip)
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
self.check_connectivity("VM2", vm3_floating_ip)
self.check_connectivity("VM3", vm2_floating_ip)
self.check_connectivity("VM1", vm3_floating_ip)
self.check_connectivity("VM3 ", vm1_floating_ip)
@base.scenario()
def destroy_primary_controller(self):
"""Shut destroy primary controller
Scenario:
1. Create network1, subnets1, router1
2. Create network2, subnets2, router2
2. Launch 2 instances (vm1 and vm2) and associate floating ip
3. Add rules for ping
4. Find primary controller
5. Rescedule network1 and router1 for primary controller
6. ping 8.8.8.8 from vm2
7. ping vm1 from vm2 and vm1 from vm2
8. Run udhcp on vm1
9. Destroy primary controller (virsh destroy <primary_controller>)
10. Wait some time
11. Check that all networks and routers rescedule
from prrimary controller
11. Boot vm3 in network1
12. ping 8.8.8.8 from vm3
13. ping between vm1 and vm3 by internal ip
14. ping between vm2 and vm3 by floating ip
15. Run udhcp on vm1 and vm3
"""
# Create 1 network, subnt, router and join this construction
network1, subnets1, router1 = self.create_network_subnet_router()
# Create 1 network, subnt, router and join this construction
network2, subnets2, router2 = self.create_network_subnet_router()
# boot vms
net1_id = network1["network"]["id"]
net2_id = network2["network"]["id"]
vm1 = self.boot_server("VM1", nics=[{"net-id": net1_id}])
vm2 = self.boot_server("VM2", nics=[{"net-id": net2_id}])
# Add rules to be able ping
self.add_rules_for_ping()
# floatingIp for VMs
self.associate_floating_ip(vm1)
self.associate_floating_ip(vm2)
# Define internal IP and floating IP
net1_name = network1["network"]["name"]
net2_name = network2["network"]["name"]
vm1_internal_ip = self.define_fixed_ip_for_vm(vm1, net1_name)
vm1_floating_ip = self.define_floating_ip_for_vm(vm1, net1_name)
vm2_floating_ip = self.define_floating_ip_for_vm(vm2, net2_name)
# Find primary controller
primary_controller = self.find_primary_controller()
# Get l3 agent for router1 and one dhcp agent for network1
# on primary controller
self.get_dhcp_on_chosen_node(primary_controller, network1)
self.get_l3_on_chosen_node(primary_controller, router1)
# dhcp work
output = utils.run_command(self.context, "VM1", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
# Check connectivity
self.check_connectivity("VM2", "8.8.8.8")
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
primary_context_controller = None
for controller in self.context["controllers"]:
if controller["agent_endpoint"] == primary_controller:
primary_context_controller = controller
break
if primary_context_controller:
self.power_off_controller(primary_context_controller)
else:
raise
# TODO(sbelous): wait some time
self.check_all_rescedule(primary_controller)
vm3 = self.boot_server("VM3", nics=[{"net-id": net1_id}])
vm3_internal_ip = self.define_fixed_ip_for_vm(vm3, net1_name)
vm3_floating_ip = self.define_floating_ip_for_vm(vm3, net1_name)
# dhcp work
output = utils.run_command(self.context, "VM1", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
output = utils.run_command(self.context, "VM3", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
# Check connectivity
self.check_connectivity("VM3", "8.8.8.8")
self.check_connectivity("VM1", vm3_internal_ip)
self.check_connectivity("VM3", vm1_internal_ip)
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
self.check_connectivity("VM2", vm3_floating_ip)
self.check_connectivity("VM3", vm2_floating_ip)
self.check_connectivity("VM1", vm3_floating_ip)
self.check_connectivity("VM3 ", vm1_floating_ip)
@base.scenario()
def destroy_non_primary_controller(self):
"""Destroy non primary controller
Scenario:
1. Create network1, subnets1, router1
2. Create network2, subnets2, router2
2. Launch 2 instances (vm1 and vm2) and associate floating ip
3. Add rules for ping
4. Choose one non primary controller
5. Rescedule network1 and router1 for chosen non primary controller
6. ping 8.8.8.8 from vm2
7. ping vm1 from vm2 and vm1 from vm2
8. Run udhcp on vm1
9. Destroy non primary controller
(virsh destroy <non_primary_controller>)
10. Wait some time
11. Check that all networks and routers rescedule
from non primary controller
11. Boot vm3 in network1
12. ping 8.8.8.8 from vm3
13. ping between vm1 and vm3 by internal ip
14. ping between vm2 and vm3 by floating ip
15. Run udhcp on vm1 and vm3
"""
# Create 1 network, subnt, router and join this construction
network1, subnets1, router1 = self.create_network_subnet_router()
# Create 1 network, subnt, router and join this construction
network2, subnets2, router2 = self.create_network_subnet_router()
# boot vms
net1_id = network1["network"]["id"]
net2_id = network2["network"]["id"]
vm1 = self.boot_server("VM1", nics=[{"net-id": net1_id}])
vm2 = self.boot_server("VM2", nics=[{"net-id": net2_id}])
# Add rules to be able ping
self.add_rules_for_ping()
# floatingIp for VMs
self.associate_floating_ip(vm1)
self.associate_floating_ip(vm2)
# Define internal IP and floating IP
net1_name = network1["network"]["name"]
net2_name = network2["network"]["name"]
vm1_internal_ip = self.define_fixed_ip_for_vm(vm1, net1_name)
vm1_floating_ip = self.define_floating_ip_for_vm(vm1, net1_name)
vm2_floating_ip = self.define_floating_ip_for_vm(vm2, net2_name)
# Find primary controller
primary_controller = self.find_primary_controller()
# Get l3 agent for router1 and one dhcp agent for network1
# on primary controller
self.get_dhcp_on_chosen_node(primary_controller, network1)
self.get_l3_on_chosen_node(primary_controller, router1)
# dhcp work
output = utils.run_command(self.context, "VM1", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
# Check connectivity
self.check_connectivity("VM2", "8.8.8.8")
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
non_primary_context_controller = None
non_primary_controller = None
for controller in self.context["controllers"]:
if controller["agent_endpoint"] != primary_controller:
non_primary_context_controller = controller
non_primary_controller = controller["agent_endpoint"]
break
if non_primary_context_controller:
self.power_off_controller(non_primary_context_controller)
else:
raise
# TODO(sbelous): wait some time
self.check_all_rescedule(non_primary_controller)
vm3 = self.boot_server("VM3", nics=[{"net-id": net1_id}])
vm3_internal_ip = self.define_fixed_ip_for_vm(vm3, net1_name)
vm3_floating_ip = self.define_floating_ip_for_vm(vm3, net1_name)
# dhcp work
output = utils.run_command(self.context, "VM1", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
output = utils.run_command(self.context, "VM3", command="udhcpc",
executor="shaker")
LOG.debug("output = %s", output)
# Check connectivity
self.check_connectivity("VM3", "8.8.8.8")
self.check_connectivity("VM1", vm3_internal_ip)
self.check_connectivity("VM3", vm1_internal_ip)
self.check_connectivity("VM1", vm2_floating_ip)
self.check_connectivity("VM2", vm1_floating_ip)
self.check_connectivity("VM2", vm3_floating_ip)
self.check_connectivity("VM3", vm2_floating_ip)
self.check_connectivity("VM1", vm3_floating_ip)
self.check_connectivity("VM3 ", vm1_floating_ip)

View File

@@ -1,13 +1,15 @@
import random
import base_disaster_scenario
from rally.benchmark.scenarios import base
from haos.rally.plugin import base_disaster
class RabbitMQDisasterScenarios(base_disaster_scenario.BaseDisasterScenario):
class RabbitMQDisaster(base_disaster.BaseDisaster):
@base.scenario()
def power_off_one_controller(self):
""" Poweroff one contoller and verify cloud
"""Poweroff one contoller and verify cloud
Setup:
OpenStack cloud with at least 3 controllers
@@ -17,7 +19,7 @@ class RabbitMQDisasterScenarios(base_disaster_scenario.BaseDisasterScenario):
2. Verify cloud: create VM 10 times
"""
controller_id = random.randint(0, len(self.context["controllers"])-1)
controller_id = random.randint(0, len(self.context["controllers"]) - 1)
self.power_off_controller(controller_id)
for i in xrange(0, 10):

View File

@@ -1,9 +1,9 @@
import json
import requests
import signal
from rally import exceptions
from shaker.lib import Shaker
import requests
from shaker import lib
import signal
def timeout_alarm(signum, frame):
@@ -20,13 +20,16 @@ def run_command(context, node, command, recover_command=None,
signal.signal(signal.SIGALRM, timeout_alarm)
signal.alarm(timeout)
if executor == "dummy":
r = requests.post("http://{0}/run_command".format(node),
headers={"Content-Type": "application/json"},
data=json.dumps({"command": command}))
return r.text
elif executor == "shaker":
shaker = Shaker(context["shaker_endpoint"], [node])
r = shaker.run_program(node, command)
return r.get('stdout')
shaker = context.get("shaker")
if not shaker:
shaker = lib.Shaker(context["shaker_endpoint"], [],
agent_loss_timeout=600)
context["shaker"] = shaker
r = shaker.run_script(node, command)
return r['stdout']

View File

@@ -0,0 +1,35 @@
{
"NeutronDisaster.drop_rabbit_port": [
{
"runner": {
"type": "serial",
"times": 1
},
"context": {
"users": {
"tenants": 1,
"users_per_tenant": 1
},
"recover_cloud": {
"checks": ["rabbitmq_cluster_status"]
},
"cloud_nodes": {
"controllers": [
{
"agent_endpoint": "node-1.domain.tld"
},
{
"agent_endpoint": "node-2.domain.tld"
},
{
"agent_endpoint": "node-3.domain.tld"
}
],
"shaker_endpoint": "172.18.76.21:5999",
"shaker_image": "2fb29a22-b351-4466-83ff-21446097b8c9",
"default_flavor": "18"
}
}
}
]
}

View File

@@ -1,45 +0,0 @@
import time
from cloud_utils import run_command
from rally.benchmark.scenarios import base
from rally.benchmark.scenarios.nova import utils as nova_utils
class BaseDisasterScenario(nova_utils.NovaScenario):
USER_DATA = """#!/bin/bash
shaker-agent --agent-id \"$(hostname)\" --server-endpoint {0}
"""
def wait_shaker_agent(self, agent_id, timeout=300):
result = run_command(self.context, agent_id, "hostname",
executor="shaker", timeout=timeout)
print result
def boot_server(self, name):
nova = self.admin_clients("nova")
userdata = self.USER_DATA.format(self.context["shaker_endpoint"])
kwargs = {"userdata": userdata}
vm = self._boot_server(name=name,
image_id=self.context["shaker_image"],
flavor_id=self.context["default_flavor"],
auto_assign_nic=True,
**kwargs)
self.wait_shaker_agent(name, timeout=300)
return vm
def power_off_controller(self, controller_id):
control_node = self.context["power_control_node"]
controller = self.context["controllers"][controller_id]
run_command(self.context, control_node["agent_endpoint"],
command=controller["hardware_power_off_cmd"],
recover_command=controller["hardware_power_on_cmd"],
recover_timeout=controller["power_on_timeout"])
time.sleep(controller["power_off_timeout"])
def power_off_main_controller(self):
pass

View File

@@ -23,13 +23,12 @@ commands =
bash tools/verify.sh
bash tools/install_rally.sh
bash tools/install_shaker_agents.sh
rally --plugin-path {toxinidir}/rally-contexts,{toxinidir}/rally-scenarios task start {posargs}
rally --plugin-path {toxinidir}/haos/rally/context,{toxinidir}/haos/rally/plugin task start {posargs}
whitelist_externals = bash
[flake8]
# E125 continuation line does not distinguish itself from next logical line
# all others are enabled temporary
ignore = E125,F401,H233,F841,H304,H306,H401,E226,H302
ignore = E125
show-source = true
builtins = _
exclude=.venv,.git,.tox,dist,doc,*openstack/common*,*lib/python*,*egg,tools,build