Browse Source

Add computer per model feature

Change-Id: I11b66c3eb9e195f49871456084a12db42676cd75
bharath 5 months ago
parent
commit
40b7b896ac

+ 0
- 2
gyan/api/controllers/v1/flavors.py View File

@@ -164,11 +164,9 @@ class FlavorController(base.Controller):
164 164
         policy.enforce(context, "flavor:create",
165 165
                        action="flavor:create")
166 166
 
167
-        LOG.debug("bhaaaaaaaaaaaaaaaaaaaaaaaaaaa")
168 167
         LOG.debug(flavor_dict)
169 168
         flavor_dict["additional_details"] = json.dumps(flavor_dict["additional_details"])
170 169
         LOG.debug(flavor_dict)
171
-        # flavor_dict["model_data"] = open("/home/bharath/model.zip", "rb").read()
172 170
         new_flavor = objects.Flavor(context, **flavor_dict)
173 171
         flavor = new_flavor.create(context)
174 172
         LOG.debug(new_flavor)

+ 97
- 28
gyan/api/controllers/v1/ml_models.py View File

@@ -12,6 +12,11 @@
12 12
 
13 13
 import base64
14 14
 import shlex
15
+import os
16
+import time
17
+import yaml
18
+
19
+from eventlet import greenthread
15 20
 
16 21
 from oslo_log import log as logging
17 22
 from oslo_utils import strutils
@@ -31,6 +36,7 @@ from gyan.common import context as gyan_context
31 36
 from gyan.common import exception
32 37
 from gyan.common.i18n import _
33 38
 from gyan.common.policies import ml_model as policies
39
+from gyan.common import clients
34 40
 from gyan.common import policy
35 41
 from gyan.common import utils
36 42
 import gyan.conf
@@ -38,7 +44,7 @@ from gyan import objects
38 44
 
39 45
 CONF = gyan.conf.CONF
40 46
 LOG = logging.getLogger(__name__)
41
-
47
+BASE_TEMPLATE = os.path.join(CONF.state_path, "base.yaml")
42 48
 
43 49
 def check_policy_on_ml_model(ml_model, action):
44 50
     context = pecan.request.context
@@ -81,7 +87,6 @@ class MLModelController(base.Controller):
81 87
         'predict': ['POST']
82 88
     }
83 89
 
84
-
85 90
     @pecan.expose('json')
86 91
     @exception.wrap_pecan_controller_exception
87 92
     def get_all(self, **kwargs):
@@ -107,7 +112,7 @@ class MLModelController(base.Controller):
107 112
         expand = kwargs.pop('expand', None)
108 113
 
109 114
         ml_model_allowed_filters = ['name', 'status', 'project_id', 'user_id',
110
-                                     'type']
115
+                                    'type']
111 116
         filters = {}
112 117
         for filter_key in ml_model_allowed_filters:
113 118
             if filter_key in kwargs:
@@ -119,23 +124,23 @@ class MLModelController(base.Controller):
119 124
         marker = kwargs.pop('marker', None)
120 125
         if marker:
121 126
             marker_obj = objects.ML_Model.get_by_uuid(context,
122
-                                                       marker)
127
+                                                      marker)
123 128
         if kwargs:
124 129
             unknown_params = [str(k) for k in kwargs]
125 130
             msg = _("Unknown parameters: %s") % ", ".join(unknown_params)
126 131
             raise exception.InvalidValue(msg)
127 132
 
128 133
         ml_models = objects.ML_Model.list(context,
129
-                                            limit,
130
-                                            marker_obj,
131
-                                            sort_key,
132
-                                            sort_dir,
133
-                                            filters=filters)
134
+                                          limit,
135
+                                          marker_obj,
136
+                                          sort_key,
137
+                                          sort_dir,
138
+                                          filters=filters)
134 139
         return MLModelCollection.convert_with_links(ml_models, limit,
135
-                                                      url=resource_url,
136
-                                                      expand=expand,
137
-                                                      sort_key=sort_key,
138
-                                                      sort_dir=sort_dir)
140
+                                                    url=resource_url,
141
+                                                    expand=expand,
142
+                                                    sort_key=sort_key,
143
+                                                    sort_dir=sort_dir)
139 144
 
140 145
     @pecan.expose('json')
141 146
     @exception.wrap_pecan_controller_exception
@@ -152,7 +157,7 @@ class MLModelController(base.Controller):
152 157
         ml_model = utils.get_ml_model(ml_model_ident)
153 158
         check_policy_on_ml_model(ml_model.as_dict(), "ml_model:get_one")
154 159
         return view.format_ml_model(context, pecan.request.host_url,
155
-                                     ml_model.as_dict())
160
+                                    ml_model.as_dict())
156 161
 
157 162
     @base.Controller.api_version("1.0")
158 163
     @pecan.expose('json')
@@ -168,9 +173,9 @@ class MLModelController(base.Controller):
168 173
         compute_api = pecan.request.compute_api
169 174
         new_model = view.format_ml_model(context, pecan.request.host_url,
170 175
                                          ml_model.as_dict())
171
-        compute_api.ml_model_create(context, new_model)
176
+        # compute_api.ml_model_create(context, new_model)
172 177
         return new_model
173
-    
178
+
174 179
     @base.Controller.api_version("1.0")
175 180
     @pecan.expose('json')
176 181
     @exception.wrap_pecan_controller_exception
@@ -180,10 +185,13 @@ class MLModelController(base.Controller):
180 185
         ml_model = utils.get_ml_model(ml_model_ident)
181 186
         pecan.response.status = 200
182 187
         compute_api = pecan.request.compute_api
188
+        host_ip = ml_model.deployed_on
189
+        LOG.debug(pecan.request.POST['file'])
183 190
         predict_dict = {
184 191
             "data": base64.b64encode(pecan.request.POST['file'].file.read())
185 192
         }
186
-        prediction = compute_api.ml_model_predict(context, ml_model_ident, **predict_dict)
193
+        LOG.debug(predict_dict)
194
+        prediction = compute_api.ml_model_predict(context, ml_model_ident, host_ip, **predict_dict)
187 195
         return prediction
188 196
 
189 197
     @base.Controller.api_version("1.0")
@@ -210,20 +218,22 @@ class MLModelController(base.Controller):
210 218
 
211 219
         ml_model_dict['status'] = consts.CREATED
212 220
         ml_model_dict['ml_type'] = ml_model_dict['type']
221
+        ml_model_dict['flavor_id'] = ml_model_dict['flavor_id']
213 222
         extra_spec = {}
214 223
         extra_spec['hints'] = ml_model_dict.get('hints', None)
215
-        #ml_model_dict["model_data"] = open("/home/bharath/model.zip", "rb").read()
224
+        # ml_model_dict["model_data"] = open("/home/bharath/model.zip", "rb").read()
216 225
         new_ml_model = objects.ML_Model(context, **ml_model_dict)
226
+        # heat_client = clients.OpenStackClients(context).heat()
227
+
217 228
         ml_model = new_ml_model.create(context)
218 229
         LOG.debug(new_ml_model)
219
-        #compute_api.ml_model_create(context, new_ml_model)
230
+        # compute_api.ml_model_create(context, new_ml_model)
220 231
         # Set the HTTP Location Header
221 232
         pecan.response.location = link.build_url('ml_models',
222 233
                                                  ml_model.id)
223 234
         pecan.response.status = 201
224 235
         return view.format_ml_model(context, pecan.request.host_url,
225
-                                     ml_model.as_dict())
226
-
236
+                                    ml_model.as_dict())
227 237
 
228 238
     @pecan.expose('json')
229 239
     @exception.wrap_pecan_controller_exception
@@ -241,8 +251,7 @@ class MLModelController(base.Controller):
241 251
         compute_api = pecan.request.compute_api
242 252
         ml_model = compute_api.ml_model_update(context, ml_model, patch)
243 253
         return view.format_ml_model(context, pecan.request.node_url,
244
-                                     ml_model.as_dict())
245
-
254
+                                    ml_model.as_dict())
246 255
 
247 256
     @pecan.expose('json')
248 257
     @exception.wrap_pecan_controller_exception
@@ -259,6 +268,58 @@ class MLModelController(base.Controller):
259 268
         ml_model.destroy(context)
260 269
         pecan.response.status = 204
261 270
 
271
+    def _do_compute_node_schedule(self, context, ml_model, url, compute_api, host_url):
272
+        target_status = "COMPLETE"
273
+        timeout = 500
274
+        sleep_interval = 5
275
+        stack_data = {
276
+            "files": {},
277
+            "disable_rollback": True,
278
+            "parameters": {},
279
+            "stack_name": "TENSORFLOW",
280
+            "environment": {}
281
+        }
282
+        stack_data["template"] = yaml.safe_load(open(BASE_TEMPLATE))
283
+        LOG.debug(stack_data)
284
+        heat_client = clients.OpenStackClients(context).heat()
285
+        stack = heat_client.stacks.create(**stack_data)
286
+        LOG.debug(stack)
287
+        stack_id = stack["stack"]["id"]
288
+        while True:
289
+            stack_result = heat_client.stacks.get(stack_id)
290
+            status = stack_result.status
291
+            if (status == target_status):
292
+                break
293
+        if status == target_status:
294
+            ml_model.status = consts.DEPLOYED_COMPUTE_NODE
295
+            ml_model.save(context)
296
+        else:
297
+            ml_model.status = consts.DEPLOYMENT_FAILED
298
+            ml_model.save(context)
299
+            return
300
+        host_ip = None
301
+        stack_result = heat_client.stacks.get(stack_id)
302
+        for output in stack_result.outputs:
303
+            if "public" in output["output_key"]:
304
+                host_ip = output["output_value"]
305
+        ml_model.deployed_on = host_ip
306
+        ml_model.save(context)
307
+        while True:
308
+            hosts = objects.ComputeHost.list(context)
309
+            LOG.debug(hosts)
310
+            LOG.debug(host_ip)
311
+            for host in hosts:
312
+                if host_ip == host.hostname:
313
+                    ml_model.status = consts.DEPLOYED
314
+                    ml_model.url = url
315
+                    ml_model.save(context)
316
+                    ml_model.ml_data = None
317
+                    new_model = view.format_ml_model(context, host_url,
318
+                                                     ml_model.as_dict())
319
+                    compute_api.ml_model_create(context, new_model, host_ip)
320
+                    return
321
+            greenthread.sleep(sleep_interval)
322
+        return None
262 323
 
263 324
     @pecan.expose('json')
264 325
     @exception.wrap_pecan_controller_exception
@@ -267,19 +328,27 @@ class MLModelController(base.Controller):
267 328
 
268 329
         :param ml_model_ident: UUID or Name of a ML Model.
269 330
         """
331
+
270 332
         context = pecan.request.context
271 333
         ml_model = utils.get_ml_model(ml_model_ident)
334
+
335
+        @utils.synchronized(ml_model.id)
336
+        def do_compute_schedule(context, ml_model, url, compute_api, host_url):
337
+            self._do_compute_node_schedule(context, ml_model, url, compute_api, host_url)
338
+
272 339
         check_policy_on_ml_model(ml_model.as_dict(), "ml_model:deploy")
273 340
         utils.validate_ml_model_state(ml_model, 'deploy')
274 341
         LOG.debug('Calling compute.ml_model_deploy with %s',
275 342
                   ml_model.id)
276
-        ml_model.status =  consts.DEPLOYED
343
+        ml_model.status = consts.DEPLOYMENT_STARTED
277 344
         url = pecan.request.url.replace("deploy", "predict")
278
-        ml_model.url = url
345
+        # ml_model.url = url
346
+        compute_api = pecan.request.compute_api
347
+        utils.spawn_n(do_compute_schedule, context, ml_model, url, compute_api, pecan.request.host_url)
279 348
         ml_model.save(context)
280 349
         pecan.response.status = 202
281 350
         return view.format_ml_model(context, pecan.request.host_url,
282
-                                     ml_model.as_dict())
351
+                                    ml_model.as_dict())
283 352
 
284 353
     @pecan.expose('json')
285 354
     @exception.wrap_pecan_controller_exception
@@ -294,9 +363,9 @@ class MLModelController(base.Controller):
294 363
         utils.validate_ml_model_state(ml_model, 'undeploy')
295 364
         LOG.debug('Calling compute.ml_model_deploy with %s',
296 365
                   ml_model.id)
297
-        ml_model.status = consts.SCHEDULED
366
+        ml_model.status = consts.CREATED
298 367
         ml_model.url = None
299 368
         ml_model.save(context)
300 369
         pecan.response.status = 202
301 370
         return view.format_ml_model(context, pecan.request.host_url,
302
-                                     ml_model.as_dict())
371
+                                    ml_model.as_dict())

+ 2
- 1
gyan/api/controllers/v1/schemas/ml_models.py View File

@@ -20,7 +20,8 @@ ml_model_create = {
20 20
     'type': 'object',
21 21
     'properties': {
22 22
         "name": parameter_types.ml_model_name,
23
-        "type": parameter_types.ml_model_type
23
+        "type": parameter_types.ml_model_type,
24
+        "flavor_id": parameter_types.ml_model_flavor_id
24 25
     },
25 26
     'required': ['name', 'type'],
26 27
     'additionalProperties': False

+ 7
- 0
gyan/api/controllers/v1/schemas/parameter_types.py View File

@@ -147,6 +147,13 @@ ml_model_name = {
147 147
     'pattern': '^[a-zA-Z0-9-._]*$'
148 148
 }
149 149
 
150
+ml_model_flavor_id = {
151
+    'type': 'string',
152
+    'minLength': 1,
153
+    'maxLength': 255,
154
+    'pattern': '^[a-zA-Z0-9-._]*$'
155
+}
156
+
150 157
 ml_model_type = {
151 158
     'type': 'string',
152 159
     'minLength': 1,

+ 4
- 5
gyan/api/controllers/v1/views/ml_models_view.py View File

@@ -27,8 +27,9 @@ _basic_keys = (
27 27
     'status',
28 28
     'status_reason',
29 29
     'host_id',
30
-    'deployed',
31
-    'ml_type'
30
+    'ml_type',
31
+    'flavor_id',
32
+    'deployed_on'
32 33
 )
33 34
 
34 35
 LOG = logging.getLogger(__name__)
@@ -36,8 +37,6 @@ LOG = logging.getLogger(__name__)
36 37
 
37 38
 def format_ml_model(context, url, ml_model):
38 39
     def transform(key, value):
39
-        LOG.debug(key)
40
-        LOG.debug(value)
41 40
         if key not in _basic_keys:
42 41
             return
43 42
         # strip the key if it is not allowed by policy
@@ -57,4 +56,4 @@ def format_ml_model(context, url, ml_model):
57 56
             yield (key, value)
58 57
 
59 58
     return dict(itertools.chain.from_iterable(
60
-        transform(k, v) for k, v in ml_model.items()))
59
+        transform(k, v) for k, v in ml_model.items()))

+ 1
- 1
gyan/api/utils.py View File

@@ -63,7 +63,7 @@ def get_resource(resource, resource_ident):
63 63
     context = pecan.request.context
64 64
     if context.is_admin:
65 65
         context.all_projects = True
66
-    if uuidutils.is_uuid_like(resource_ident):
66
+    if uuidutils.is_uuid_like(resource_ident) or 'gyan-' in resource_ident:
67 67
         return resource.get_by_uuid(context, resource_ident)
68 68
 
69 69
     return resource.get_by_name(context, resource_ident)

+ 62
- 0
gyan/common/clients.py View File

@@ -0,0 +1,62 @@
1
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
2
+# not use this file except in compliance with the License. You may obtain
3
+# a copy of the License at
4
+#
5
+#      http://www.apache.org/licenses/LICENSE-2.0
6
+#
7
+# Unless required by applicable law or agreed to in writing, software
8
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10
+# License for the specific language governing permissions and limitations
11
+# under the License.
12
+
13
+from heatclient import client as heatclient
14
+
15
+from gyan.common import exception
16
+from gyan.common import keystone
17
+import gyan.conf
18
+
19
+
20
+class OpenStackClients(object):
21
+    """Convenience class to create and cache client instances."""
22
+
23
+    def __init__(self, context):
24
+        self.context = context
25
+        self._keystone = None
26
+        self._heat = None
27
+
28
+    def url_for(self, **kwargs):
29
+        return self.keystone().session.get_endpoint(**kwargs)
30
+
31
+    def gyan_url(self):
32
+        endpoint_type = self._get_client_option('gyan', 'endpoint_type')
33
+        region_name = self._get_client_option('gyan', 'region_name')
34
+        return self.url_for(service_type='ml-infra',
35
+                            interface=endpoint_type,
36
+                            region_name=region_name)
37
+
38
+    @property
39
+    def auth_token(self):
40
+        return self.context.auth_token or self.keystone().auth_token
41
+
42
+    def keystone(self):
43
+        if self._keystone:
44
+            return self._keystone
45
+
46
+        self._keystone = keystone.KeystoneClientV3(self.context)
47
+        return self._keystone
48
+
49
+    def _get_client_option(self, client, option):
50
+        return getattr(getattr(gyan.conf.CONF, '%s_client' % client), option)
51
+
52
+    @exception.wrap_keystone_exception
53
+    def heat(self):
54
+        if self._heat:
55
+            return self._heat
56
+
57
+        heatclient_version = self._get_client_option('heat', 'api_version')
58
+        session = self.keystone().session
59
+        self._heat = heatclient.Client(heatclient_version,
60
+                                           session=session)
61
+
62
+        return self._heat

+ 5
- 1
gyan/common/consts.py View File

@@ -17,4 +17,8 @@ UNDEPLOYED = 'undeployed'
17 17
 DEPLOYED = 'deployed'
18 18
 CREATING = 'CREATING'
19 19
 CREATED = 'CREATED'
20
-SCHEDULED = 'SCHEDULED'
20
+SCHEDULED = 'SCHEDULED'
21
+DEPLOYED_COMPUTE_NODE = 'DEPLOYED COMPUTE NODE'
22
+DEPLOYMENT_FAILED = 'DEPLOYMENT FAILED'
23
+DEPLOYMENT_STARTED = 'DEPLOYMENT STARTED'
24
+NAME_PREFIX='gyan-'

+ 10
- 2
gyan/common/utils.py View File

@@ -25,8 +25,11 @@ import json
25 25
 import mimetypes
26 26
 import os
27 27
 import zipfile
28
+import ctypes, os
28 29
 
30
+from oslo_concurrency import lockutils
29 31
 from oslo_concurrency import processutils
32
+from oslo_utils import importutils
30 33
 from oslo_context import context as common_context
31 34
 from oslo_log import log as logging
32 35
 from oslo_utils import excutils
@@ -42,12 +45,16 @@ from gyan.common import privileged
42 45
 import gyan.conf
43 46
 from gyan import objects
44 47
 
48
+eventlet.monkey_patch()
49
+
45 50
 CONF = gyan.conf.CONF
46 51
 LOG = logging.getLogger(__name__)
47 52
 
53
+synchronized = lockutils.synchronized_with_prefix(consts.NAME_PREFIX)
54
+
48 55
 VALID_STATES = {
49 56
     'deploy': [consts.CREATED, consts.UNDEPLOYED, consts.SCHEDULED],
50
-    'undeploy': [consts.DEPLOYED]
57
+    'undeploy': [consts.DEPLOYED, consts.DEPLOYED_COMPUTE_NODE, consts.DEPLOYMENT_FAILED, consts.DEPLOYMENT_STARTED]
51 58
 }
52 59
 def safe_rstrip(value, chars=None):
53 60
     """Removes trailing characters from a string if that does not make it empty
@@ -101,6 +108,7 @@ def spawn_n(func, *args, **kwargs):
101 108
     def context_wrapper(*args, **kwargs):
102 109
         # NOTE: If update_store is not called after spawn_n it won't be
103 110
         # available for the logger to pull from threadlocal storage.
111
+        _context.update_store()
104 112
         if _context is not None:
105 113
             _context.update_store()
106 114
         func(*args, **kwargs)
@@ -271,4 +279,4 @@ def save_model(path, model):
271 279
         f.write(model.ml_data)
272 280
     zip_ref = zipfile.ZipFile(file_path+'.zip', 'r')
273 281
     zip_ref.extractall(file_path)
274
-    zip_ref.close()
282
+    zip_ref.close()

+ 9
- 10
gyan/compute/api.py View File

@@ -23,7 +23,6 @@ from gyan.compute import rpcapi
23 23
 import gyan.conf
24 24
 from gyan import objects
25 25
 
26
-
27 26
 CONF = gyan.conf.CONF
28 27
 LOG = logging.getLogger(__name__)
29 28
 
@@ -35,11 +34,11 @@ class API(object):
35 34
         self.rpcapi = rpcapi.API(context=context)
36 35
         super(API, self).__init__()
37 36
 
38
-    def ml_model_create(self, context, new_ml_model, **extra_spec):
37
+    def ml_model_create(self, context, new_ml_model, host_ip, **extra_spec):
39 38
         try:
40 39
             host_state = {
41
-                "host": "localhost"
42
-            } #self._schedule_ml_model(context, ml_model, extra_spec)
40
+                "host": host_ip
41
+            }
43 42
         except exception.NoValidHost:
44 43
             new_ml_model.status = consts.ERROR
45 44
             new_ml_model.status_reason = _(
@@ -53,15 +52,15 @@ class API(object):
53 52
             raise
54 53
         LOG.debug(host_state)
55 54
         return self.rpcapi.ml_model_create(context, host_state['host'],
56
-                                     new_ml_model)
57
-    
58
-    def ml_model_predict(self, context, ml_model_id, **kwargs):
59
-        return self.rpcapi.ml_model_predict(context, ml_model_id,
60
-                                     **kwargs)
55
+                                           new_ml_model)
56
+
57
+    def ml_model_predict(self, context, ml_model_id, host_ip, **kwargs):
58
+        return self.rpcapi.ml_model_predict(context, ml_model_id, host_ip,
59
+                                            **kwargs)
61 60
 
62 61
     def ml_model_delete(self, context, ml_model, *args):
63 62
         self._record_action_start(context, ml_model, ml_model_actions.DELETE)
64 63
         return self.rpcapi.ml_model_delete(context, ml_model, *args)
65 64
 
66 65
     def ml_model_show(self, context, ml_model):
67
-        return self.rpcapi.ml_model_show(context, ml_model)
66
+        return self.rpcapi.ml_model_show(context, ml_model)

+ 2
- 2
gyan/compute/manager.py View File

@@ -53,8 +53,8 @@ class Manager(periodic_task.PeriodicTasks):
53 53
         db_ml_model = objects.ML_Model.get_by_uuid_db(context, ml_model["id"])
54 54
         utils.save_model(CONF.state_path, db_ml_model)
55 55
         obj_ml_model = objects.ML_Model.get_by_uuid(context, ml_model["id"])
56
-        obj_ml_model.status = consts.SCHEDULED
57
-        obj_ml_model.status_reason = "The ML Model is scheduled and saved to the host %s" % self.host
56
+        #obj_ml_model.status = consts.SCHEDULED
57
+        #obj_ml_model.status_reason = "The ML Model is scheduled and saved to the host %s" % self.host
58 58
         obj_ml_model.save(context)
59 59
 
60 60
     def ml_model_predict(self, context, ml_model_id, kwargs):

+ 3
- 3
gyan/compute/rpcapi.py View File

@@ -48,8 +48,8 @@ class API(rpc_service.API):
48 48
         self._cast(host, 'ml_model_create', 
49 49
                    ml_model=ml_model)
50 50
 
51
-    def ml_model_predict(self, context, ml_model_id, **kwargs):
52
-        return self._call("localhost", 'ml_model_predict', 
51
+    def ml_model_predict(self, context, ml_model_id, host_ip, **kwargs):
52
+        return self._call(host_ip, 'ml_model_predict',
53 53
                    ml_model_id=ml_model_id, kwargs=kwargs)
54 54
 
55 55
     @check_ml_model_host
@@ -66,4 +66,4 @@ class API(rpc_service.API):
66 66
     @check_ml_model_host
67 67
     def ml_model_update(self, context, ml_model, patch):
68 68
         return self._call(ml_model.host, 'ml_model_update',
69
-                          ml_model=ml_model, patch=patch)
69
+                          ml_model=ml_model, patch=patch)

+ 2
- 0
gyan/conf/__init__.py View File

@@ -24,6 +24,7 @@ from gyan.conf import services
24 24
 from gyan.conf import ssl
25 25
 from gyan.conf import utils
26 26
 from gyan.conf import gyan_client
27
+from gyan.conf import heat_client
27 28
 
28 29
 CONF = cfg.CONF
29 30
 
@@ -36,6 +37,7 @@ path.register_opts(CONF)
36 37
 scheduler.register_opts(CONF)
37 38
 services.register_opts(CONF)
38 39
 gyan_client.register_opts(CONF)
40
+heat_client.register_opts(CONF)
39 41
 ssl.register_opts(CONF)
40 42
 profiler.register_opts(CONF)
41 43
 utils.register_opts(CONF)

+ 54
- 0
gyan/conf/heat_client.py View File

@@ -0,0 +1,54 @@
1
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
2
+# not use this file except in compliance with the License. You may obtain
3
+# a copy of the License at
4
+#
5
+#      http://www.apache.org/licenses/LICENSE-2.0
6
+#
7
+# Unless required by applicable law or agreed to in writing, software
8
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
9
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
10
+# License for the specific language governing permissions and limitations
11
+# under the License.
12
+
13
+from oslo_config import cfg
14
+
15
+
16
+heat_group = cfg.OptGroup(name='heat_client',
17
+                            title='Options for the Heat client')
18
+
19
+common_security_opts = [
20
+    cfg.StrOpt('ca_file',
21
+               help='Optional CA cert file to use in SSL connections.'),
22
+    cfg.StrOpt('cert_file',
23
+               help='Optional PEM-formatted certificate chain file.'),
24
+    cfg.StrOpt('key_file',
25
+               help='Optional PEM-formatted file that contains the '
26
+                    'private key.'),
27
+    cfg.BoolOpt('insecure',
28
+                default=False,
29
+                help="If set, then the server's certificate will not "
30
+                     "be verified.")]
31
+
32
+heat_client_opts = [
33
+    cfg.StrOpt('region_name',
34
+               help='Region in Identity service catalog to use for '
35
+                    'communication with the OpenStack service.'),
36
+    cfg.StrOpt('endpoint_type',
37
+               default='publicURL',
38
+               help='Type of endpoint in Identity service catalog to use '
39
+                    'for communication with the OpenStack service.'),
40
+    cfg.StrOpt('api_version',
41
+               default='1',
42
+               help='Version of Heat API to use in heatclient.')]
43
+
44
+
45
+ALL_OPTS = (heat_client_opts + common_security_opts)
46
+
47
+
48
+def register_opts(conf):
49
+    conf.register_group(heat_group)
50
+    conf.register_opts(ALL_OPTS, group=heat_group)
51
+
52
+
53
+def list_opts():
54
+    return {heat_group: ALL_OPTS}

+ 26
- 0
gyan/db/sqlalchemy/alembic/versions/319fc86b7f72_add_flavor_column_to_ml_model.py View File

@@ -0,0 +1,26 @@
1
+"""Add flavor column to ML Model
2
+
3
+Revision ID: 319fc86b7f72
4
+Revises: 395aff469925
5
+Create Date: 2018-11-06 15:31:35.600670
6
+
7
+"""
8
+
9
+# revision identifiers, used by Alembic.
10
+revision = '319fc86b7f72'
11
+down_revision = '395aff469925'
12
+branch_labels = None
13
+depends_on = None
14
+
15
+from alembic import op
16
+import sqlalchemy as sa
17
+
18
+
19
+def upgrade():
20
+    # ### commands auto generated by Alembic - please adjust! ###
21
+    with op.batch_alter_table('ml_model', schema=None) as batch_op:
22
+        batch_op.add_column(sa.Column('deployed_on', sa.Text(), nullable=True))
23
+        batch_op.add_column(sa.Column('flavor_id', sa.String(length=255), nullable=False))
24
+        batch_op.drop_column('deployed')
25
+
26
+    # ### end Alembic commands ###

+ 0
- 1
gyan/db/sqlalchemy/alembic/versions/f3bf9414f399_add_ml_type_and_ml_data_to_ml_model_.py View File

@@ -34,6 +34,5 @@ def upgrade():
34 34
         batch_op.add_column(sa.Column('ml_type', sa.String(length=255), nullable=True))
35 35
         batch_op.add_column(sa.Column('started_at', sa.DateTime(), nullable=True))
36 36
         batch_op.create_unique_constraint('uniq_mlmodel0uuid', ['id'])
37
-        batch_op.drop_constraint(u'ml_model_ibfk_1', type_='foreignkey')
38 37
         
39 38
     # ### end Alembic commands ###

+ 3
- 1
gyan/db/sqlalchemy/api.py View File

@@ -91,6 +91,8 @@ def add_identity_filter(query, value):
91 91
         return query.filter_by(id=value)
92 92
     elif uuidutils.is_uuid_like(value):
93 93
         return query.filter_by(id=value)
94
+    elif "gyan-" in value:
95
+        return query.filter_by(id=value)
94 96
     else:
95 97
         raise exception.InvalidIdentity(identity=value)
96 98
 
@@ -240,7 +242,7 @@ class Connection(object):
240 242
     def create_ml_model(self, context, values):
241 243
         # ensure defaults are present for new ml_models
242 244
         if not values.get('id'):
243
-            values['id'] = uuidutils.generate_uuid()
245
+            values['id'] = 'gyan-' + uuidutils.generate_uuid()
244 246
         ml_model = models.ML_Model()
245 247
         ml_model.update(values)
246 248
         try:

+ 2
- 1
gyan/db/sqlalchemy/models.py View File

@@ -122,7 +122,8 @@ class ML_Model(Base):
122 122
     status = Column(String(20))
123 123
     status_reason = Column(Text, nullable=True)
124 124
     host_id = Column(String(255), nullable=True)
125
-    deployed = Column(Text, nullable=True)
125
+    deployed_on = Column(Text, nullable=True)
126
+    flavor_id = Column(String(255), nullable=False)
126 127
     url = Column(Text, nullable=True)
127 128
     hints = Column(Text, nullable=True)
128 129
     ml_type = Column(String(255), nullable=True)

+ 1
- 1
gyan/objects/flavor.py View File

@@ -33,7 +33,7 @@ class Flavor(base.GyanPersistentObject, base.GyanObject):
33 33
         'cpu': fields.StringField(nullable=True),
34 34
         'memory': fields.StringField(nullable=True),
35 35
         'python_version': fields.StringField(nullable=True),
36
-        'disk': fields.BooleanField(nullable=True),
36
+        'disk': fields.StringField(nullable=True),
37 37
         'additional_details': fields.StringField(nullable=True),
38 38
         'created_at': fields.DateTimeField(tzinfo_aware=False, nullable=True),
39 39
         'updated_at': fields.DateTimeField(tzinfo_aware=False, nullable=True),

+ 3
- 2
gyan/objects/ml_model.py View File

@@ -28,14 +28,15 @@ class ML_Model(base.GyanPersistentObject, base.GyanObject):
28 28
     VERSION = '1'
29 29
 
30 30
     fields = {
31
-        'id': fields.UUIDField(nullable=True),
31
+        'id': fields.StringField(nullable=True),
32 32
         'name': fields.StringField(nullable=True),
33 33
         'project_id': fields.StringField(nullable=True),
34 34
         'user_id': fields.StringField(nullable=True),
35 35
         'status': fields.StringField(nullable=True),
36 36
         'status_reason': fields.StringField(nullable=True),
37 37
         'url': fields.StringField(nullable=True),
38
-        'deployed': fields.BooleanField(nullable=True),
38
+        'deployed_on': fields.StringField(nullable=True),
39
+        'flavor_id': fields.StringField(nullable=True),
39 40
         'hints': fields.StringField(nullable=True),
40 41
         'created_at': fields.DateTimeField(tzinfo_aware=False, nullable=True),
41 42
         'updated_at': fields.DateTimeField(tzinfo_aware=False, nullable=True),

Loading…
Cancel
Save