Fix mesos monitor for handling multiple masters

Magnum is going to enable HA mode for mesos bay, in which there
could be more than one master nodes. One of the master node (leader)
is active and the rest of them is not active. Mesos monitor needs to
identify the leader and pull metrics from the leader only.

Change-Id: Ic7d6cb64964db2758163bd231dec4190302155b2
Partial-Implements: blueprint mesos-multi-master-node
This commit is contained in:
Hongbin Lu 2015-11-30 15:42:17 -05:00
parent 64c5496536
commit d95f400c63
2 changed files with 42 additions and 15 deletions

View File

@ -34,16 +34,21 @@ class MesosMonitor(MonitorBase):
def _build_url(self, url, protocol='http', port='80', path='/'):
return protocol + '://' + url + ':' + port + path
def _is_leader(self, state):
return state['leader'] == state['pid']
def pull_data(self):
mesos_master_url = self._build_url(self.bay.api_address,
port='5050',
path='/state')
state_json = jsonutils.loads(urlfetch.get(mesos_master_url))
self.data['mem_total'] = 0
self.data['mem_used'] = 0
for slave in state_json['slaves']:
self.data['mem_total'] += slave['resources']['mem']
self.data['mem_used'] += slave['used_resources']['mem']
for master_addr in self.bay.master_addresses:
mesos_master_url = self._build_url(master_addr, port='5050',
path='/state')
master = jsonutils.loads(urlfetch.get(mesos_master_url))
if self._is_leader(master):
for slave in master['slaves']:
self.data['mem_total'] += slave['resources']['mem']
self.data['mem_used'] += slave['used_resources']['mem']
break
def compute_memory_util(self):
if self.data['mem_total'] == 0:

View File

@ -43,7 +43,8 @@ class MonitorsTestCase(base.TestCase):
super(MonitorsTestCase, self).setUp()
bay = utils.get_test_bay(node_addresses=['1.2.3.4'],
api_address='https://5.6.7.8:2376')
api_address='https://5.6.7.8:2376',
master_addresses=['10.0.0.6'])
self.bay = objects.Bay(self.context, **bay)
self.monitor = swarm_monitor.SwarmMonitor(self.context, self.bay)
self.k8s_monitor = k8s_monitor.K8sMonitor(self.context, self.bay)
@ -230,9 +231,22 @@ class MonitorsTestCase(base.TestCase):
mem_util = self.k8s_monitor.compute_memory_util()
self.assertEqual(0, mem_util)
def _test_mesos_monitor_pull_data(
self, mock_url_get, state_json, expected_mem_total,
expected_mem_used):
state_json = jsonutils.dumps(state_json)
mock_url_get.return_value = state_json
self.mesos_monitor.pull_data()
self.assertEqual(self.mesos_monitor.data['mem_total'],
expected_mem_total)
self.assertEqual(self.mesos_monitor.data['mem_used'],
expected_mem_used)
@mock.patch('magnum.common.urlfetch.get')
def test_mesos_monitor_pull_data_success(self, mock_url_get):
state_json = {
'leader': 'master@10.0.0.6:5050',
'pid': 'master@10.0.0.6:5050',
'slaves': [{
'resources': {
'mem': 100
@ -242,13 +256,21 @@ class MonitorsTestCase(base.TestCase):
}
}]
}
state_json = jsonutils.dumps(state_json)
mock_url_get.return_value = state_json
self.mesos_monitor.pull_data()
self.assertEqual(self.mesos_monitor.data['mem_total'],
100)
self.assertEqual(self.mesos_monitor.data['mem_used'],
50)
self._test_mesos_monitor_pull_data(mock_url_get, state_json, 100, 50)
@mock.patch('magnum.common.urlfetch.get')
def test_mesos_monitor_pull_data_success_not_leader(self, mock_url_get):
state_json = {
'leader': 'master@10.0.0.6:5050',
'pid': 'master@1.1.1.1:5050',
'slaves': []
}
self._test_mesos_monitor_pull_data(mock_url_get, state_json, 0, 0)
@mock.patch('magnum.common.urlfetch.get')
def test_mesos_monitor_pull_data_success_no_master(self, mock_url_get):
self.bay.master_addresses = []
self._test_mesos_monitor_pull_data(mock_url_get, {}, 0, 0)
def test_mesos_monitor_get_metric_names(self):
mesos_metric_spec = 'magnum.conductor.mesos_monitor.MesosMonitor.'\