Revised policy comments and some details

The base policy class provies 'attach()', 'detach()', 'pre_op()',
'post_op()' operations with default implementation (empty). All
subclasses are supposed to override any of these methods to affect
action execution in certain ways.

The 'enforce()' method is not needed for the time being, so we remove it
for now.
This commit is contained in:
tengqm 2015-02-11 14:17:58 +08:00
parent ef5d91dbbe
commit 3f61639a44
7 changed files with 106 additions and 42 deletions

View File

@ -170,17 +170,25 @@ class Policy(object):
return self.id
def pre_op(self, cluster_id, action, **kwargs):
'''Force all subclasses to implement an operation that will be invoked
before an action.
def attach(self, cluster_id, **kwargs):
'''Method to be invoked before the policy is attached to a cluster.
'''
return NotImplemented
return True
def post_op(self, cluster_id, action, **kwargs):
'''Force all subclasses to implement an operation that will be
performed after an action.
def detach(self, cluster_id, **kwargs):
'''Method to be invoked before the policy is detached from a cluster.
'''
return NotImplemented
return True
def pre_op(self, cluster_id, action, policy_data):
'''A method that will be invoked before an action execution.
'''
return policy_data
def post_op(self, cluster_id, action, policy_data):
'''A method that will be invoked after an action execution.
'''
return policy_data
def to_dict(self):
pb_dict = {

View File

@ -20,8 +20,8 @@ Input:
cluster: cluster whose nodes can be deleted
policy_data.deletion:
- count: number of nodes to delete; it can be customized by a
scaling policy for example. If no scaling policy is
effective, deletion count is assumed to be 1
scaling policy for example. If no scaling policy is in
effect, deletion count is assumed to be 1
self.criteria: list of criteria for sorting nodes
Output: policy_data
{
@ -142,7 +142,3 @@ class DeletionPolicy(base.Policy):
pd['grace_period'] = self.grace_period
policy_data.update({'deletion': pd})
return policy_data
def post_op(self, cluster_id, action, policy_data):
# TODO(anyone): reduce desired_capacity if needed
return policy_data

View File

@ -50,6 +50,21 @@ class HealthPolicy(base.Policy):
self.grace_period = self.spec.get('grace_period')
self.check_type = self.spec.get('check_type')
def attach(self, cluster_id):
'''Hook for policy attach.
Initialize the health check mechanism for existing nodes in cluster.
'''
# TODO(anyone): implement this
return True
def detach(self, cluster_id):
'''Hook for policy detach.
Deinitialize the health check mechanism (for the cluster).
'''
return True
def pre_op(self, cluster_id, action, **args):
# Ignore actions that are not required to be processed at this stage
if action not in (consts.CLUSTER_SCALE_IN,
@ -60,9 +75,6 @@ class HealthPolicy(base.Policy):
# infrastructure
return True
def enforce(self, cluster_id, action, **args):
pass
def post_op(self, cluster_id, action, **args):
# Ignore irrelevant action here
if action not in (consts.CLUSTER_SCALE_OUT,

View File

@ -75,17 +75,17 @@ class LoadBalancingPolicy(base.Policy):
def pre_op(self, cluster_id, action, policy_data):
if action not in (consts.CLUSTER_DEL_NODES, consts.CLUSTER_SCALE_IN):
return True
return policy_data
nodes = policy_data.get('nodes', [])
for node in nodes:
member_id = node.data.get('lb_member')
neutron.delete_member(member_id)
return True
return policy_data
def post_op(self, cluster_id, action, policy_data):
if action not in (consts.CLUSTER_ADD_NODES, consts.CLUSTER_SCALE_OUT):
return True
return policy_data
nodes = policy_data.get('nodes', [])
for node in nodes:
@ -98,4 +98,4 @@ class LoadBalancingPolicy(base.Policy):
member = neutron.create_member({'member': params})['member']
node.data.update('lb_member', member['id'])
return True
return policy_data

View File

@ -10,6 +10,38 @@
# License for the specific language governing permissions and limitations
# under the License.
'''
Policy for placing nodes across AZs and/or regions.
'''
'''
NOTE: How placement policy works
Input:
cluster: cluster whose nodes are to be manipulated.
policy_data.placement:
- count: number of nodes to create; it can be decision from a scaling
policy. If no scaling policy is in effect, the count will be
assumed to be 1.
Output:
policy_data: A dictionary containing scheduling decisions made.
{
'status': 'OK',
'placement': {
'count': 2,
'placements': [
{
'AZ': 'nova-1',
'region': 'RegionOne',
},
{
'AZ': 'nova-2',
'region': 'RegionTwo',
}
]
}
}
'''
from senlin.common import consts
from senlin.policies import base
@ -38,15 +70,8 @@ class PlacementPolicy(base.Policy):
self.regions = self.spec.get('regions')
self.AZs = self.spec.get('AZs')
def pre_op(self, cluster_id, action, **args):
def pre_op(self, cluster_id, action, policy_data):
'''Call back when new nodes are created for a cluster.
'''
# TODO(anyone): calculate available AZs and or regions
return True
def enforce(self, cluster_id, action, **kwargs):
# we expect kwargs to carry node profile information before the node
# is created.
# TODO(anyone): modify node's scheduler hints and return them
return True
def post_op(self, cluster_id, action, **kwargs):
pass
return policy_data

View File

@ -57,7 +57,6 @@ class ScalingPolicy(base.Policy):
# TODO(anyone): check if new size will break min_size or max_size
# constraints
policy_data['status'] = self.CHECK_OK
adjustment = self.adjustment_number
nodes = db_api.node_get_all_by_cluster(cluster_id)
current_size = len(nodes)
@ -66,14 +65,10 @@ class ScalingPolicy(base.Policy):
elif current_size + adjustment < self.min_size:
adjustment = current_size - self.min_size
policy_data['count'] = adjustment
pd = {'count': adjustment}
if action == consts.CLUSTER_SCALE_OUT:
policy_data['placement'] = pd
elif action == consts.CLUSTER_SCALE_IN:
policy_data['deletion'] = pd
return policy_data
def enforce(self, cluster_id, action, policy_data):
policy_data['status'] = self.CHECK_OK
return policy_data
def post_op(self, cluster_id, action, policy_data):
policy_data['status'] = self.CHECK_OK
return policy_data

View File

@ -10,6 +10,34 @@
# License for the specific language governing permissions and limitations
# under the License.
'''
Policy for updating a cluster.
'''
'''
NOTE: How update policy works
Input:
cluster: the cluste whose nodes are to be updated.
Output:
policy_data: A dictionary containing a detailed update schedule.
{
'status': 'OK',
'update': {
'pause_time': 2,
'plan': [{
'node-id-1',
'node-id-2',
}, {
'node-id-3',
'node-id-4',
}, {
'node-id-5',
}
]
}
}
'''
from senlin.common import consts
from senlin.policies import base