Remove support for nodepool_id

This was a temporary measure to keep production nodepool from
deleting nodes created by v3 nodepool. We don't need to carry
it over.

This is an alternative to: https://review.openstack.org/449375

Change-Id: Ib24395e30a118c0ea57f8958a8dca4407fe1b55b
This commit is contained in:
David Shrewsbury 2017-03-30 12:08:04 -04:00
parent 73f3b56376
commit 92f375c70b
4 changed files with 1 additions and 88 deletions

View File

@ -40,7 +40,6 @@ class Config(ConfigValue):
class Provider(ConfigValue):
def __eq__(self, other):
if (other.cloud_config != self.cloud_config or
other.nodepool_id != self.nodepool_id or
other.pools != self.pools or
other.image_type != self.image_type or
other.rate != self.rate or
@ -194,7 +193,6 @@ def loadConfig(config_path):
cloud_kwargs = _cloudKwargsFromProvider(provider)
p.cloud_config = _get_one_cloud(cloud_config, cloud_kwargs)
p.nodepool_id = provider.get('nodepool-id', None)
p.region_name = provider.get('region-name')
p.max_concurrency = provider.get('max-concurrency', -1)
p.rate = provider.get('rate', 1.0)

View File

@ -1175,15 +1175,6 @@ class CleanupWorker(BaseCleanupWorker):
if 'nodepool_provider_name' not in meta:
continue
nodepool_id = meta.get('nodepool_nodepool_id', None)
if provider.nodepool_id is not None and \
nodepool_id != provider.nodepool_id:
self.log.debug("Instance %s (%s) in %s "
"was not launched by us" % (
server['name'], server['id'],
provider.name))
continue
if meta['nodepool_provider_name'] != provider.name:
# Another launcher, sharing this provider but configured
# with a different name, owns this.

View File

@ -205,8 +205,6 @@ class ProviderManager(object):
groups=",".join(groups_list),
nodepool_provider_name=self.provider.name,
)
if self.provider.nodepool_id:
meta['nodepool_nodepool_id'] = self.provider.nodepool_id
if nodepool_node_id:
meta['nodepool_node_id'] = nodepool_node_id
if nodepool_image_name:

View File

@ -16,7 +16,6 @@
import logging
import time
import fixtures
from unittest import skip
from nodepool import tests
from nodepool import zk
@ -404,15 +403,9 @@ class TestNodepool(tests.DBTestCase):
self.assertEqual(len(nodes), 1)
self.assertEqual(nodes[0].provider, 'fake-provider')
def test_leaked_node_with_nodepool_id(self):
self._test_leaked_node('leaked_node_nodepool_id.yaml')
def test_leaked_node(self):
self._test_leaked_node('leaked_node.yaml')
def _test_leaked_node(self, cfgfile):
"""Test that a leaked node is deleted"""
configfile = self.setup_config(cfgfile)
configfile = self.setup_config('leaked_node.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
@ -446,73 +439,6 @@ class TestNodepool(tests.DBTestCase):
servers = manager.listServers()
self.assertEqual(len(servers), 1)
@skip("Disabled while merging master into feature/zuulv3. Needs rework.")
def test_leaked_node_not_deleted(self):
"""Test that a leaked node is not deleted"""
# TODOv3(jhesketh): Fix this up
nodedb = object()
configfile = self.setup_config('leaked_node_nodepool_id.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
self._useBuilder(configfile)
pool.start()
self.waitForImage('fake-provider', 'fake-image')
self.log.debug("Waiting for initial pool...")
self.waitForNodes(pool)
self.log.debug("...done waiting for initial pool.")
pool.stop()
# Make sure we have a node built and ready
provider = pool.config.providers['fake-provider']
manager = pool.getProviderManager(provider)
servers = manager.listServers()
self.assertEqual(len(servers), 1)
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
# Delete the node from the db, but leave the instance
# so it is leaked.
self.log.debug("Delete node db record so instance is leaked...")
for node in nodes:
node.delete()
self.log.debug("...deleted node db so instance is leaked.")
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 0)
# Wait for nodepool to replace it, which should be enough
# time for it to also delete the leaked node
configfile = self.setup_config('leaked_node.yaml')
pool = self.useNodepool(configfile, watermark_sleep=1)
pool.start()
self.log.debug("Waiting for replacement pool...")
self.waitForNodes(pool)
self.log.debug("...done waiting for replacement pool.")
# Make sure we end up with only one server (the replacement)
provider = pool.config.providers['fake-provider']
manager = pool.getProviderManager(provider)
foobar_servers = manager.listServers()
self.assertEqual(len(servers), 1)
self.assertEqual(len(foobar_servers), 1)
with pool.getDB().getSession() as session:
nodes = session.getNodes(provider_name='fake-provider',
label_name='fake-label',
target_name='fake-target',
state=nodedb.READY)
self.assertEqual(len(nodes), 1)
# Just to be safe, ensure we have 2 nodes again.
self.assertEqual(len(servers), 1)
self.assertEqual(len(foobar_servers), 1)
def test_label_provider(self):
"""Test that only providers listed in the label satisfy the request"""
configfile = self.setup_config('node_label_provider.yaml')