Adding is_public and is_protected fields support

Adding new fields to support shared resources and
ability to prevent unintentional changes.

Partially-Implements: blueprint shared-protected-resources

Change-Id: I200944ac876e476ea0659d502a55e5f9b2a158ee
This commit is contained in:
Andrey Pavlov 2015-08-20 10:31:35 +03:00
parent 0716c0d990
commit f084139cd7
7 changed files with 153 additions and 3 deletions

View File

@ -30,6 +30,8 @@ CLUSTER_DEFAULTS = {
"info": {},
"rollback_info": {},
"sahara_info": {},
"is_public": False,
"is_protected": False
}
NODE_GROUP_DEFAULTS = {
@ -48,12 +50,38 @@ NODE_GROUP_DEFAULTS = {
"volume_local_to_instance": False,
}
NODE_GROUP_TEMPLATE_DEFAULTS = copy.deepcopy(NODE_GROUP_DEFAULTS)
NODE_GROUP_TEMPLATE_DEFAULTS.update({"is_public": False,
"is_protected": False})
INSTANCE_DEFAULTS = {
"volumes": []
}
DATA_SOURCE_DEFAULTS = {
"credentials": {}
"credentials": {},
"is_public": False,
"is_protected": False
}
JOB_DEFAULTS = {
"is_public": False,
"is_protected": False
}
JOB_BINARY_DEFAULTS = {
"is_public": False,
"is_protected": False
}
JOB_BINARY_INTERNAL_DEFAULTS = {
"is_public": False,
"is_protected": False
}
JOB_EXECUTION_DEFAULTS = {
"is_public": False,
"is_protected": False
}
@ -283,7 +311,7 @@ class ConductorManager(db_base.Base):
def node_group_template_create(self, context, values):
"""Create a Node Group Template from the values dictionary."""
values = copy.deepcopy(values)
values = _apply_defaults(values, NODE_GROUP_DEFAULTS)
values = _apply_defaults(values, NODE_GROUP_TEMPLATE_DEFAULTS)
values['tenant_id'] = context.tenant_id
return self.db.node_group_template_create(context, values)
@ -375,6 +403,7 @@ class ConductorManager(db_base.Base):
def job_execution_create(self, context, values):
"""Create a JobExecution from the values dictionary."""
values = copy.deepcopy(values)
values = _apply_defaults(values, JOB_EXECUTION_DEFAULTS)
values['tenant_id'] = context.tenant_id
return self.db.job_execution_create(context, values)
@ -402,6 +431,7 @@ class ConductorManager(db_base.Base):
def job_create(self, context, values):
"""Create a Job from the values dictionary."""
values = copy.deepcopy(values)
values = _apply_defaults(values, JOB_DEFAULTS)
values['tenant_id'] = context.tenant_id
return self.db.job_create(context, values)
@ -430,6 +460,7 @@ class ConductorManager(db_base.Base):
"""Create a JobBinary from the values dictionary."""
values = copy.deepcopy(values)
values = _apply_defaults(values, JOB_BINARY_DEFAULTS)
values['tenant_id'] = context.tenant_id
return self.db.job_binary_create(context, values)
@ -469,6 +500,7 @@ class ConductorManager(db_base.Base):
# here the deepcopy of values only incs a reference count on data.
# This is nice, since data could be big...
values = copy.deepcopy(values)
values = _apply_defaults(values, JOB_BINARY_INTERNAL_DEFAULTS)
values['tenant_id'] = context.tenant_id
return self.db.job_binary_internal_create(context, values)

View File

@ -62,6 +62,8 @@ class Cluster(object):
cluster_template_id
cluster_template - ClusterTemplate object
use_autoconfig
is_public
is_protected
"""
def has_proxy_gateway(self):
@ -182,6 +184,8 @@ class ClusterTemplate(object):
plugin_name
hadoop_version
node_groups - list of NodeGroup objects
is_public
is_protected
"""
@ -210,6 +214,8 @@ class NodeGroupTemplate(object):
availability_zone
is_proxy_gateway
volume_local_to_instance
is_public
is_protected
"""
@ -225,6 +231,8 @@ class DataSource(object):
type
url
credentials
is_public
is_protected
"""
@ -246,6 +254,8 @@ class JobExecution(object):
interface
extra
data_source_urls
is_public
is_protected
"""
@ -260,6 +270,8 @@ class Job(object):
mains
libs
interface
is_public
is_protected
"""
@ -272,6 +284,8 @@ class JobBinary(object):
description
url - URLs may be the following: internal-db://URL, swift://
extra - extra may contain not only user-password but e.g. auth-token
is_public
is_protected
"""
@ -286,6 +300,8 @@ class JobBinaryInternal(object):
tenant_id
name
datasize
is_public
is_protected
"""
# Events ops

View File

@ -0,0 +1,65 @@
# Copyright 2015 OpenStack Foundation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""add is_public and is_protected flags
Revision ID: 026
Revises: 025
Create Date: 2015-06-24 12:41:52.571258
"""
# revision identifiers, used by Alembic.
revision = '026'
down_revision = '025'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('clusters',
sa.Column('is_public', sa.Boolean()),)
op.add_column('cluster_templates',
sa.Column('is_public', sa.Boolean()))
op.add_column('node_group_templates',
sa.Column('is_public', sa.Boolean()))
op.add_column('data_sources',
sa.Column('is_public', sa.Boolean()))
op.add_column('job_executions',
sa.Column('is_public', sa.Boolean()))
op.add_column('jobs',
sa.Column('is_public', sa.Boolean()))
op.add_column('job_binary_internal',
sa.Column('is_public', sa.Boolean()))
op.add_column('job_binaries',
sa.Column('is_public', sa.Boolean()))
op.add_column('clusters',
sa.Column('is_protected', sa.Boolean()))
op.add_column('cluster_templates',
sa.Column('is_protected', sa.Boolean()))
op.add_column('node_group_templates',
sa.Column('is_protected', sa.Boolean()))
op.add_column('data_sources',
sa.Column('is_protected', sa.Boolean()))
op.add_column('job_executions',
sa.Column('is_protected', sa.Boolean()))
op.add_column('jobs',
sa.Column('is_protected', sa.Boolean()))
op.add_column('job_binary_internal',
sa.Column('is_protected', sa.Boolean()))
op.add_column('job_binaries',
sa.Column('is_protected', sa.Boolean()))

View File

@ -79,6 +79,8 @@ class Cluster(mb.SaharaBase):
cluster_template = relationship('ClusterTemplate',
backref="clusters", lazy='joined')
shares = sa.Column(st.JsonListType())
is_public = sa.Column(sa.Boolean())
is_protected = sa.Column(sa.Boolean())
def to_dict(self, show_progress=False):
d = super(Cluster, self).to_dict()
@ -183,6 +185,8 @@ class ClusterTemplate(mb.SaharaBase):
is_default = sa.Column(sa.Boolean(), default=False)
use_autoconfig = sa.Column(sa.Boolean(), default=True)
shares = sa.Column(st.JsonListType())
is_public = sa.Column(sa.Boolean())
is_protected = sa.Column(sa.Boolean())
def to_dict(self):
d = super(ClusterTemplate, self).to_dict()
@ -224,6 +228,8 @@ class NodeGroupTemplate(mb.SaharaBase):
is_default = sa.Column(sa.Boolean(), default=False)
use_autoconfig = sa.Column(sa.Boolean(), default=True)
shares = sa.Column(st.JsonListType())
is_public = sa.Column(sa.Boolean())
is_protected = sa.Column(sa.Boolean())
class TemplatesRelation(mb.SaharaBase):
@ -286,6 +292,8 @@ class DataSource(mb.SaharaBase):
type = sa.Column(sa.String(80), nullable=False)
url = sa.Column(sa.String(256), nullable=False)
credentials = sa.Column(st.JsonDictType())
is_public = sa.Column(sa.Boolean())
is_protected = sa.Column(sa.Boolean())
class JobExecution(mb.SaharaBase):
@ -311,6 +319,8 @@ class JobExecution(mb.SaharaBase):
job_configs = sa.Column(st.JsonDictType())
extra = sa.Column(st.JsonDictType())
data_source_urls = sa.Column(st.JsonDictType())
is_public = sa.Column(sa.Boolean())
is_protected = sa.Column(sa.Boolean())
mains_association = sa.Table("mains_association",
@ -349,6 +359,8 @@ class Job(mb.SaharaBase):
name = sa.Column(sa.String(80), nullable=False)
description = sa.Column(sa.Text())
type = sa.Column(sa.String(80), nullable=False)
is_public = sa.Column(sa.Boolean())
is_protected = sa.Column(sa.Boolean())
mains = relationship("JobBinary",
secondary=mains_association, lazy="joined")
@ -408,6 +420,8 @@ class JobBinaryInternal(mb.SaharaBase):
name = sa.Column(sa.String(80), nullable=False)
data = sa.orm.deferred(sa.Column(st.LargeBinary()))
datasize = sa.Column(sa.BIGINT)
is_public = sa.Column(sa.Boolean())
is_protected = sa.Column(sa.Boolean())
class JobBinary(mb.SaharaBase):
@ -425,6 +439,8 @@ class JobBinary(mb.SaharaBase):
description = sa.Column(sa.Text())
url = sa.Column(sa.String(256), nullable=False)
extra = sa.Column(st.JsonDictType())
is_public = sa.Column(sa.Boolean())
is_protected = sa.Column(sa.Boolean())
class ClusterEvent(mb.SaharaBase):

View File

@ -58,6 +58,8 @@ SAMPLE_CLUSTER = {
"config_1": "value_1"
}
},
"is_public": False,
"is_protected": False
}

View File

@ -57,6 +57,8 @@ SAMPLE_NGT = {
"is_proxy_gateway": False,
"volume_local_to_instance": False,
'use_autoconfig': True,
"is_public": False,
"is_protected": False
}
SAMPLE_CLT = {
@ -100,7 +102,9 @@ SAMPLE_CLT = {
"anti_affinity": ["datanode"],
"description": "my template",
"neutron_management_network": str(uuid.uuid4()),
"shares": None
"shares": None,
"is_public": False,
"is_protected": False
}

View File

@ -519,6 +519,21 @@ class SaharaMigrationsCheckers(object):
self.assertColumnType(engine, 'instances', 'management_ip',
'VARCHAR(45)')
def _check_026(self, engine, data):
tables = [
'clusters',
'cluster_templates',
'node_group_templates',
'data_sources',
'job_executions',
'jobs',
'job_binary_internal',
'job_binaries',
]
for table in tables:
self.assertColumnExists(engine, table, 'is_public')
self.assertColumnExists(engine, table, 'is_protected')
class TestMigrationsMySQL(SaharaMigrationsCheckers,
base.BaseWalkMigrationTestCase,