Currently we pass a reference to a global "rollback" list to create() to keep rollback functions. Other nodes don't need to know about global rollback state, and by passing by reference we're giving them the chance to mess it up for everyone else. Add a "add_rollback()" function in NodeBase for create() calls to register rollback calls within themselves. As they hit rollback points they can add a new entry. lambda v arguments is much of a muchness -- but this is similar to the standard atexit() call so with go with that pattern. A new "rollback()" call is added that the driver will invoke on each node as it works its way backwards in case of failure. On error, nodes will have rollback() called in reverse order (which then calls registered rollbacks in reverse order). A unit test is added to test rollback behaviour Change-Id: I65214e72c7ef607dd08f750a6d32a0b10fe97ac3
		
			
				
	
	
		
			56 lines
		
	
	
		
			2.1 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
			
		
		
	
	
			56 lines
		
	
	
		
			2.1 KiB
		
	
	
	
		
			Python
		
	
	
	
	
	
# Licensed under the Apache License, Version 2.0 (the "License"); you may
 | 
						|
# not use this file except in compliance with the License. You may obtain
 | 
						|
# a copy of the License at
 | 
						|
#
 | 
						|
#     http://www.apache.org/licenses/LICENSE-2.0
 | 
						|
#
 | 
						|
# Unless required by applicable law or agreed to in writing, software
 | 
						|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
 | 
						|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
 | 
						|
# License for the specific language governing permissions and limitations
 | 
						|
# under the License.
 | 
						|
 | 
						|
import logging
 | 
						|
import mock
 | 
						|
 | 
						|
import diskimage_builder.block_device.tests.test_config as tc
 | 
						|
 | 
						|
from diskimage_builder.block_device.config import create_graph
 | 
						|
from diskimage_builder.block_device.level3.mount import MountPointNode
 | 
						|
 | 
						|
logger = logging.getLogger(__name__)
 | 
						|
 | 
						|
 | 
						|
class TestMountOrder(tc.TestGraphGeneration):
 | 
						|
 | 
						|
    @mock.patch('diskimage_builder.block_device.level3.mount.exec_sudo')
 | 
						|
    def test_mount_order(self, mock_exec_sudo):
 | 
						|
 | 
						|
        config = self.load_config_file('multiple_partitions_graph.yaml')
 | 
						|
 | 
						|
        state = {}
 | 
						|
 | 
						|
        graph, call_order = create_graph(config, self.fake_default_config,
 | 
						|
                                         state)
 | 
						|
 | 
						|
        # build up some fake state so that we don't have to mock out
 | 
						|
        # all the parent calls that would really make these values, as
 | 
						|
        # we just want to test MountPointNode
 | 
						|
        state['filesys'] = {}
 | 
						|
        state['filesys']['mkfs_root'] = {}
 | 
						|
        state['filesys']['mkfs_root']['device'] = 'fake'
 | 
						|
        state['filesys']['mkfs_var'] = {}
 | 
						|
        state['filesys']['mkfs_var']['device'] = 'fake'
 | 
						|
        state['filesys']['mkfs_var_log'] = {}
 | 
						|
        state['filesys']['mkfs_var_log']['device'] = 'fake'
 | 
						|
 | 
						|
        for node in call_order:
 | 
						|
            if isinstance(node, MountPointNode):
 | 
						|
                # XXX: do we even need to create?  We could test the
 | 
						|
                # sudo arguments from the mock in the below asserts
 | 
						|
                # too
 | 
						|
                node.create()
 | 
						|
 | 
						|
        # ensure that partitions are mounted in order root->var->var/log
 | 
						|
        self.assertListEqual(state['mount_order'], ['/', '/var', '/var/log'])
 |