 7533f8c80a
			
		
	
	7533f8c80a
	
	
	
		
			
			In MOS 9 the memcache isn't enabled for consoleauth by default and for a multi-controller environment the authentication will be failed because token cannot be retrieved from memcache. This fix also moves the deprecated settings from DEFAULT section to vnc or cache. And the nova-compute.conf on controller actually never been used thus is removed. Change-Id: Ic9438172b6c1ed5074aaa0b6d0e036287efe1982 Related-Bug: #1576218
		
			
				
	
	
		
			124 lines
		
	
	
		
			4.1 KiB
		
	
	
	
		
			Python
		
	
	
		
			Executable File
		
	
	
	
	
			
		
		
	
	
			124 lines
		
	
	
		
			4.1 KiB
		
	
	
	
		
			Python
		
	
	
		
			Executable File
		
	
	
	
	
| #!/usr/bin/env python
 | |
| 
 | |
| from glanceclient import Client
 | |
| from keystoneauth1 import loading
 | |
| from keystoneauth1 import session
 | |
| import os
 | |
| from time import sleep
 | |
| import utils
 | |
| import yaml
 | |
| 
 | |
| 
 | |
| utils.setup_logging('primary_controller_post_deployment.log')
 | |
| LOG = utils.LOG
 | |
| 
 | |
| 
 | |
| def get_keystone_creds():
 | |
|     return {
 | |
|         'username': os.environ.get('OS_USERNAME'),
 | |
|         'password': os.environ.get('OS_PASSWORD'),
 | |
|         'auth_url': os.environ.get('OS_AUTH_URL'),
 | |
|         'tenant_name': os.environ.get('OS_TENANT_NAME'),
 | |
|     }
 | |
| 
 | |
| 
 | |
| def get_keystone_session():
 | |
|     loader = loading.get_plugin_loader('password')
 | |
|     creds = get_keystone_creds()
 | |
|     auth = loader.load_from_options(**creds)
 | |
|     return session.Session(auth=auth)
 | |
| 
 | |
| 
 | |
| def list_images(sess):
 | |
|     LOG.info('Listing images:')
 | |
|     glance = Client('2', session=sess)
 | |
|     images = glance.images.list()
 | |
|     for image in images:
 | |
|         LOG.info(('+ {name} container_format:{container_format} '
 | |
|                   'disk_format:{disk_format} visibility:{visibility} '
 | |
|                   'file:{file}').format(**image))
 | |
| 
 | |
| 
 | |
| def del_images(sess, image_name):
 | |
|     glance = Client('2', session=sess)
 | |
|     images = glance.images.list()
 | |
|     for image in images:
 | |
|         if image.name == image_name:
 | |
|             glance.images.delete(image.id)
 | |
|             LOG.info('Image %s has been deleted' % image_name)
 | |
| 
 | |
| 
 | |
| def add_image(sess, image_name, vm_mode, image_file):
 | |
|     glance = Client('2', session=sess)
 | |
|     image = glance.images.create(name=image_name, container_format="ovf",
 | |
|                                  disk_format="vhd", visibility="public",
 | |
|                                  vm_mode=vm_mode)
 | |
|     with open(image_file, 'rb') as f:
 | |
|         glance.images.upload(image.id, f)
 | |
|     LOG.info('Image %s (mode: %s, file: %s) has been added' %
 | |
|              (image_name, vm_mode, image_file))
 | |
| 
 | |
| 
 | |
| def wait_ocf_resource_started(timeout, interval):
 | |
|     """Wait until all ocf resources are started"""
 | |
|     LOG.info("Waiting for all ocf resources to start")
 | |
|     remain_time = timeout
 | |
|     while remain_time > 0:
 | |
|         resources = utils.execute('pcs', 'resource', 'show')
 | |
|         if resources:
 | |
|             exists_not_started = any([("Started" not in line)
 | |
|                                       for line in resources.split('\n')
 | |
|                                       if "ocf::fuel" in line])
 | |
|             # All started
 | |
|             if not exists_not_started:
 | |
|                 return
 | |
|         sleep(interval)
 | |
|         remain_time = timeout - interval
 | |
| 
 | |
|     utils.reportError("Timeout for waiting resources to start")
 | |
| 
 | |
| 
 | |
| def mod_ceilometer():
 | |
|     rc, out, err = utils.detailed_execute(
 | |
|         'pcs', 'resource', 'show', 'p_ceilometer-agent-central',
 | |
|         allowed_return_codes=[0, 1])
 | |
| 
 | |
|     """Wait until all ocf resources are started, otherwise there is risk for race
 | |
|     condition: If run "pcs resource restart" while some resources are still in
 | |
|     restarting or initiating stage, it may result into failures for both.
 | |
|     """
 | |
|     if rc == 0:
 | |
|         wait_ocf_resource_started(300, 10)
 | |
|         LOG.info("Patching ceilometer pipeline.yaml to exclude \
 | |
|             network.servers.*")
 | |
|         # Exclude network.services.* to avoid error 404
 | |
|         pipeline = '/etc/ceilometer/pipeline.yaml'
 | |
|         if not os.path.exists(pipeline):
 | |
|             utils.reportError('%s not found' % pipeline)
 | |
|         with open(pipeline) as f:
 | |
|             ceilometer = yaml.safe_load(f)
 | |
|         sources = utils.astute_get(ceilometer, ('sources',))
 | |
|         if len(sources) != 1:
 | |
|             utils.reportError('ceilometer has none or more than one sources')
 | |
|         source = sources[0]
 | |
|         meters = utils.astute_get(source, ('meters',))
 | |
|         new_meter = '!network.services.*'
 | |
|         if new_meter not in meters:
 | |
|             meters.append(new_meter)
 | |
|         with open(pipeline, "w") as f:
 | |
|             ceilometer = yaml.safe_dump(ceilometer, f)
 | |
| 
 | |
|         restart_info = utils.execute(
 | |
|             'pcs', 'resource', 'restart', 'p_ceilometer-agent-central')
 | |
| 
 | |
|         LOG.info(restart_info)
 | |
| 
 | |
| 
 | |
| if __name__ == '__main__':
 | |
|     sess = get_keystone_session()
 | |
|     list_images(sess)
 | |
|     del_images(sess, "TestVM")
 | |
|     add_image(sess, "TestVM", "xen", "cirros-0.3.4-x86_64-disk.vhd.tgz")
 | |
|     list_images(sess)
 | |
|     mod_ceilometer()
 |