PR Plugin now uses service_name to identify hosts
Now the Physical Resource plugin uses the service name instead of hypervisor host name. This is because in Havana (and could be a chance that happens in Icehouse), the host name of the compute node is not the same as the service name. Change-Id: Id958f57539d2e3bcbc93698cb6151dcedfcb549f Closes-Bug: #1301294
This commit is contained in:
parent
376e30641c
commit
7f9b1516dc
@ -0,0 +1,50 @@
|
||||
# Copyright 2014 OpenStack Foundation.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
# implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""Add service name to ComputeHost
|
||||
|
||||
Revision ID: 10e34bba18e8
|
||||
Revises: 0_1
|
||||
Create Date: 2014-04-04 11:00:57.542857
|
||||
|
||||
"""
|
||||
|
||||
# revision identifiers, used by Alembic.
|
||||
revision = '10e34bba18e8'
|
||||
down_revision = '0_1'
|
||||
|
||||
from alembic import op
|
||||
import sqlalchemy as sa
|
||||
|
||||
|
||||
def upgrade():
|
||||
op.add_column('computehosts', sa.Column(
|
||||
'service_name', sa.String(length=255), nullable=True))
|
||||
|
||||
|
||||
def downgrade():
|
||||
engine = op.get_bind().engine
|
||||
if engine.name == 'sqlite':
|
||||
# Only for testing purposes with sqlite
|
||||
op.execute('CREATE TABLE tmp_computehosts as SELECT id, '
|
||||
'vcpus, cpu_info, hypervisor_type, '
|
||||
'hypervisor_version, hypervisor_hostname, memory_mb, '
|
||||
'local_gb, status '
|
||||
'FROM computehosts')
|
||||
op.execute('DROP TABLE computehosts')
|
||||
op.execute('ALTER TABLE tmp_computehosts RENAME TO computehosts')
|
||||
return
|
||||
|
||||
op.drop_column('computehosts', 'service_name')
|
@ -178,6 +178,7 @@ class ComputeHost(mb.ClimateBase):
|
||||
hypervisor_type = sa.Column(MediumText(), nullable=False)
|
||||
hypervisor_version = sa.Column(sa.Integer, nullable=False)
|
||||
hypervisor_hostname = sa.Column(sa.String(255), nullable=True)
|
||||
service_name = sa.Column(sa.String(255), nullable=True)
|
||||
memory_mb = sa.Column(sa.Integer, nullable=False)
|
||||
local_gb = sa.Column(sa.Integer, nullable=False)
|
||||
status = sa.Column(sa.String(13))
|
||||
|
@ -160,9 +160,8 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper):
|
||||
'reservation_id': reservation_id})
|
||||
if hosts_in_pool:
|
||||
host = db_api.host_get(host_id)
|
||||
host_name = host['hypervisor_hostname']
|
||||
self.pool.add_computehost(reservation['resource_id'],
|
||||
host_name)
|
||||
host['service_name'])
|
||||
|
||||
def on_start(self, resource_id):
|
||||
"""Add the hosts in the pool."""
|
||||
@ -173,8 +172,8 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper):
|
||||
for allocation in db_api.host_allocation_get_all_by_values(
|
||||
reservation_id=reservation['id']):
|
||||
host = db_api.host_get(allocation['compute_host_id'])
|
||||
host_name = host['hypervisor_hostname']
|
||||
pool.add_computehost(reservation['resource_id'], host_name)
|
||||
pool.add_computehost(reservation['resource_id'],
|
||||
host['service_name'])
|
||||
|
||||
def on_end(self, resource_id):
|
||||
"""Remove the hosts from the pool."""
|
||||
@ -255,7 +254,8 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper):
|
||||
extra_capabilities = dict(
|
||||
(key, host_values[key]) for key in extra_capabilities_keys
|
||||
)
|
||||
self.pool.add_computehost(self.freepool_name, host_ref)
|
||||
self.pool.add_computehost(self.freepool_name,
|
||||
host_details['service_name'])
|
||||
|
||||
host = None
|
||||
cantaddextracapability = []
|
||||
@ -264,7 +264,8 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper):
|
||||
except db_ex.ClimateDBException:
|
||||
#We need to rollback
|
||||
# TODO(sbauza): Investigate use of Taskflow for atomic transactions
|
||||
self.pool.remove_computehost(self.freepool_name, host_ref)
|
||||
self.pool.remove_computehost(self.freepool_name,
|
||||
host_details['service_name'])
|
||||
if host:
|
||||
for key in extra_capabilities:
|
||||
values = {'computehost_id': host['id'],
|
||||
@ -325,7 +326,7 @@ class PhysicalHostPlugin(base.BasePlugin, nova.NovaClientWrapper):
|
||||
host=host['hypervisor_hostname'], servers=servers)
|
||||
try:
|
||||
self.pool.remove_computehost(self.freepool_name,
|
||||
host['hypervisor_hostname'])
|
||||
host['service_name'])
|
||||
# NOTE(sbauza): Extracapabilities will be destroyed thanks to
|
||||
# the DB FK.
|
||||
db_api.host_destroy(host_id)
|
||||
|
@ -57,6 +57,7 @@ class NovaInventory(nova.NovaClientWrapper):
|
||||
try:
|
||||
return {'id': hypervisor.id,
|
||||
'hypervisor_hostname': hypervisor.hypervisor_hostname,
|
||||
'service_name': hypervisor.service['host'],
|
||||
'vcpus': hypervisor.vcpus,
|
||||
'cpu_info': hypervisor.cpu_info,
|
||||
'hypervisor_type': hypervisor.hypervisor_type,
|
||||
|
@ -112,3 +112,6 @@ class TestMigrations(migration.BaseWalkMigrationTestCase,
|
||||
self.assertTableExists(engine, 'events')
|
||||
self.assertTableExists(engine, 'computehost_allocations')
|
||||
self.assertTableExists(engine, 'computehost_reservations')
|
||||
|
||||
def _check_10e34bba18e8(self, engine, data):
|
||||
self.assertColumnExists(engine, 'computehosts', 'service_name')
|
||||
|
@ -36,6 +36,7 @@ class FakeNovaHypervisors(object):
|
||||
local_gb = 10
|
||||
|
||||
servers = ['server1', 'server2']
|
||||
service = {'host': 'fake_name'}
|
||||
|
||||
@classmethod
|
||||
def get(cls, host):
|
||||
@ -52,7 +53,7 @@ class FakeNovaHypervisors(object):
|
||||
def search(cls, host, servers=False):
|
||||
if host == 'multiple':
|
||||
return [cls.FakeHost, cls.FakeHost]
|
||||
if host == cls.FakeHost.hypervisor_hostname:
|
||||
if host == cls.FakeHost.service['host']:
|
||||
return [cls.FakeHost]
|
||||
else:
|
||||
raise nova_exceptions.NotFound(404)
|
||||
@ -61,6 +62,7 @@ class FakeNovaHypervisors(object):
|
||||
def expected(cls):
|
||||
return {'id': cls.FakeHost.id,
|
||||
'hypervisor_hostname': cls.FakeHost.hypervisor_hostname,
|
||||
'service_name': cls.FakeHost.service['host'],
|
||||
'vcpus': cls.FakeHost.vcpus,
|
||||
'cpu_info': cls.FakeHost.cpu_info,
|
||||
'hypervisor_type': cls.FakeHost.hypervisor_type,
|
||||
|
@ -107,6 +107,7 @@ class PhysicalHostPluginTestCase(tests.TestCase):
|
||||
self.fake_host = {
|
||||
'id': self.fake_host_id,
|
||||
'hypervisor_hostname': 'foo',
|
||||
'service_name': 'foo',
|
||||
'vcpus': 4,
|
||||
'cpu_info': 'foo',
|
||||
'hypervisor_type': 'xen',
|
||||
@ -556,7 +557,7 @@ class PhysicalHostPluginTestCase(tests.TestCase):
|
||||
'end_date': datetime.datetime(2013, 12, 19, 21, 00)
|
||||
}
|
||||
host_get = self.patch(self.db_api, 'host_get')
|
||||
host_get.return_value = {'hypervisor_hostname': 'host2'}
|
||||
host_get.return_value = {'service_name': 'host2'}
|
||||
host_reservation_get_by_reservation_id = self.patch(
|
||||
self.db_api,
|
||||
'host_reservation_get_by_reservation_id')
|
||||
@ -627,7 +628,7 @@ class PhysicalHostPluginTestCase(tests.TestCase):
|
||||
{'compute_host_id': 'host1'},
|
||||
]
|
||||
host_get = self.patch(self.db_api, 'host_get')
|
||||
host_get.return_value = {'hypervisor_hostname': 'host1_hostname'}
|
||||
host_get.return_value = {'service_name': 'host1_hostname'}
|
||||
add_computehost = self.patch(
|
||||
self.rp.ReservationPool, 'add_computehost')
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user