Kubernetes on senlin
Done: - Add ability to do actions on cluster creation/deletion. - Add more interfaces with openstacksdk - Add kubernetes master profile, use kubeadm to setup one master node. - Add kubernetes node profile, auto retrieve kubernetes data from master cluster. TODO: See TODO.md Change-Id: Ia6a3f562cc776e1df7367a3278aa136db974ff7f
This commit is contained in:
parent
d4a9cb8287
commit
15ca5e41a1
99
contrib/kubernetes/README.rst
Normal file
99
contrib/kubernetes/README.rst
Normal file
@ -0,0 +1,99 @@
|
|||||||
|
kubernetes Profile
|
||||||
|
==================
|
||||||
|
|
||||||
|
Installation
|
||||||
|
------------
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
pip install --editable .
|
||||||
|
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
|
||||||
|
Prepare a profile for master nodes
|
||||||
|
..................................
|
||||||
|
|
||||||
|
Copy the example profile file `kubemaster.yaml` from examples/kubemaster.yaml,
|
||||||
|
modify related parameters based on your openstack environment.
|
||||||
|
For now, only official ubuntu 16.04 cloud image is supported.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
openstack cluster profile create --spec-file kubemaster.yaml profile-master
|
||||||
|
|
||||||
|
Create a cluster for master nodes
|
||||||
|
.................................
|
||||||
|
|
||||||
|
For now, please create exactly one node in this cluster. This profile doesn't
|
||||||
|
support multiple master nodes as high-availability mode install.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
openstack cluster create --min-size 1 --desired-capacity 1 --max-size 1 --profile profile-master cm
|
||||||
|
|
||||||
|
|
||||||
|
Prepare a profile for worker nodes
|
||||||
|
..................................
|
||||||
|
|
||||||
|
Copy the example profile file `kubenode.yaml`, modify related parameters,
|
||||||
|
change master-cluster to the senlin cluster you just created.
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
openstack cluster profile create --spec-file kubenode.yaml profile-node
|
||||||
|
|
||||||
|
|
||||||
|
Create a cluster for worker nodes
|
||||||
|
.................................
|
||||||
|
|
||||||
|
::
|
||||||
|
|
||||||
|
openstack cluster create --desired-capacity 2 --profile profile-node cn
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Operate kubernetes
|
||||||
|
------------------
|
||||||
|
|
||||||
|
About kubeconfig
|
||||||
|
................
|
||||||
|
|
||||||
|
The config file for `kubectl` is located in the `/root/.kube/config` directory
|
||||||
|
on the master nodes. Copy this file out and place it at `$HOME/.kube/config`.
|
||||||
|
Change the IP to master node's floating IP in it. Run `kubectl get nodes` and
|
||||||
|
see if it works.
|
||||||
|
|
||||||
|
Dashboard
|
||||||
|
.........
|
||||||
|
|
||||||
|
Prepare following file to skip dashboard authentication::
|
||||||
|
|
||||||
|
$ cat ./dashboard-admin.yaml
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
metadata:
|
||||||
|
name: kubernetes-dashboard
|
||||||
|
labels:
|
||||||
|
k8s-app: kubernetes-dashboard
|
||||||
|
roleRef:
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
kind: ClusterRole
|
||||||
|
name: cluster-admin
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: kubernetes-dashboard
|
||||||
|
namespace: kube-system
|
||||||
|
|
||||||
|
Apply this config::
|
||||||
|
|
||||||
|
kubectl apply -f ./dashboard-admin.yaml
|
||||||
|
|
||||||
|
Start a proxy using `kubectl`::
|
||||||
|
|
||||||
|
kubectl proxy
|
||||||
|
|
||||||
|
Open dashboard on browser at
|
||||||
|
`http://localhost:8001/api/v1/namespaces/kube-system/services/https:kubernetes-dashboard:/proxy/`,
|
||||||
|
skip login process.
|
13
contrib/kubernetes/TODO.rst
Normal file
13
contrib/kubernetes/TODO.rst
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
TODO:
|
||||||
|
- Forbid deleting master cluster before deleting node cluster.
|
||||||
|
- Limit to no more than 1 node in master cluster.
|
||||||
|
- Drain node before deleting worker node.
|
||||||
|
- More validation before cluster creation.
|
||||||
|
- More exception catcher in code.
|
||||||
|
|
||||||
|
Done:
|
||||||
|
|
||||||
|
- Add ability to do actions on cluster creation/deletion.
|
||||||
|
- Add more network interfaces in drivers.
|
||||||
|
- Add kubernetes master profile, use kubeadm to setup one master node.
|
||||||
|
- Add kubernetes node profile, auto retrieve kubernetes data from master cluster.
|
7
contrib/kubernetes/examples/kubemaster.yaml
Normal file
7
contrib/kubernetes/examples/kubemaster.yaml
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
type: senlin.kubernetes.master
|
||||||
|
version: 1.0
|
||||||
|
properties:
|
||||||
|
flavor: k8s.master
|
||||||
|
image: ubuntu-16.04
|
||||||
|
key_name: elynn
|
||||||
|
public_network: public
|
7
contrib/kubernetes/examples/kubenode.yaml
Normal file
7
contrib/kubernetes/examples/kubenode.yaml
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
type: senlin.kubernetes.worker
|
||||||
|
version: 1.0
|
||||||
|
properties:
|
||||||
|
flavor: k8s.worker
|
||||||
|
image: ubuntu-16.04
|
||||||
|
key_name: elynn
|
||||||
|
master_cluster: cm
|
0
contrib/kubernetes/kube/__init__.py
Normal file
0
contrib/kubernetes/kube/__init__.py
Normal file
207
contrib/kubernetes/kube/base.py
Normal file
207
contrib/kubernetes/kube/base.py
Normal file
@ -0,0 +1,207 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import random
|
||||||
|
import string
|
||||||
|
|
||||||
|
from oslo_log import log as logging
|
||||||
|
import six
|
||||||
|
|
||||||
|
from senlin.common import context
|
||||||
|
from senlin.common import exception as exc
|
||||||
|
from senlin.objects import cluster as cluster_obj
|
||||||
|
from senlin.profiles.os.nova import server
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def GenKubeToken():
|
||||||
|
token_id = ''.join([random.choice(
|
||||||
|
string.digits + string.ascii_lowercase) for i in range(6)])
|
||||||
|
token_secret = ''.join([random.choice(
|
||||||
|
string.digits + string.ascii_lowercase) for i in range(16)])
|
||||||
|
token = '.'.join(token_id, token_secret)
|
||||||
|
return token
|
||||||
|
|
||||||
|
def loadScript(path):
|
||||||
|
script_file = os.path.join(os.path.dirname(__file__), path)
|
||||||
|
with open(script_file, "r") as f:
|
||||||
|
content = f.read()
|
||||||
|
return content
|
||||||
|
|
||||||
|
class KubeBaseProfile(server.ServerProfile):
|
||||||
|
"""Kubernetes Base Profile."""
|
||||||
|
|
||||||
|
def __init__(self, type_name, name, **kwargs):
|
||||||
|
super(KubeBaseProfile, self).__init__(type_name, name, **kwargs)
|
||||||
|
self.server_id = None
|
||||||
|
|
||||||
|
def _generate_kubeadm_token(self, obj):
|
||||||
|
token = GenKubeToken()
|
||||||
|
# store generated token
|
||||||
|
|
||||||
|
ctx = context.get_service_context(user=obj.user, project=obj.project)
|
||||||
|
data = obj.data
|
||||||
|
data[self.KUBEADM_TOKEN] = token
|
||||||
|
cluster_obj.Cluster.update(ctx, obj.id, {'data': data})
|
||||||
|
return token
|
||||||
|
|
||||||
|
def _get_kubeadm_token(self, obj):
|
||||||
|
ctx = context.get_service_context(user=obj.user, project=obj.project)
|
||||||
|
if obj.cluster_id:
|
||||||
|
cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id)
|
||||||
|
return cluster.data.get(self.KUBEADM_TOKEN)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _update_master_ip(self, obj, ip):
|
||||||
|
ctx = context.get_service_context(user=obj.user, project=obj.project)
|
||||||
|
if obj.cluster_id:
|
||||||
|
cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id)
|
||||||
|
cluster.data['kube_master_ip'] = ip
|
||||||
|
cluster.update(ctx, obj.cluster_id, {'data': cluster.data})
|
||||||
|
|
||||||
|
def _create_network(self, obj):
|
||||||
|
client = self.network(obj)
|
||||||
|
net = client.network_create()
|
||||||
|
subnet = client.subnet_create(network_id=net.id, cidr='10.7.0.0/24',
|
||||||
|
ip_version=4)
|
||||||
|
pub_net = client.network_get(self.properties[self.PUBLIC_NETWORK])
|
||||||
|
router = client.router_create(
|
||||||
|
external_gateway_info={"network_id": pub_net.id})
|
||||||
|
client.add_interface_to_router(router, subnet_id=subnet.id)
|
||||||
|
fip = client.floatingip_create(floating_network_id=pub_net.id)
|
||||||
|
|
||||||
|
ctx = context.get_service_context(user=obj.user, project=obj.project)
|
||||||
|
data = obj.data
|
||||||
|
data[self.PRIVATE_NETWORK] = net.id
|
||||||
|
data[self.PRIVATE_SUBNET] = subnet.id
|
||||||
|
data[self.PRIVATE_ROUTER] = router.id
|
||||||
|
data[self.KUBE_MASTER_FLOATINGIP] = fip.floating_ip_address
|
||||||
|
data[self.KUBE_MASTER_FLOATINGIP_ID] = fip.id
|
||||||
|
|
||||||
|
cluster_obj.Cluster.update(ctx, obj.id, {'data': data})
|
||||||
|
|
||||||
|
return net.id
|
||||||
|
|
||||||
|
def _delete_network(self, obj):
|
||||||
|
client = self.network(obj)
|
||||||
|
fip_id = obj.data.get(self.KUBE_MASTER_FLOATINGIP_ID)
|
||||||
|
client.floatingip_delete(fip_id)
|
||||||
|
|
||||||
|
router = obj.data.get(self.PRIVATE_ROUTER)
|
||||||
|
subnet = obj.data.get(self.PRIVATE_SUBNET)
|
||||||
|
client.remove_interface_from_router(router, subnet_id=subnet)
|
||||||
|
|
||||||
|
# delete router and network
|
||||||
|
client.router_delete(router, ignore_missing=True)
|
||||||
|
net = obj.data.get(self.PRIVATE_NETWORK)
|
||||||
|
client.network_delete(net, ignore_missing=True)
|
||||||
|
|
||||||
|
def _associate_floatingip(self, obj, server):
|
||||||
|
ctx = context.get_service_context(user=obj.user, project=obj.project)
|
||||||
|
if obj.cluster_id:
|
||||||
|
cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id)
|
||||||
|
fip = cluster.data.get(self.KUBE_MASTER_FLOATINGIP)
|
||||||
|
self.compute(obj).server_floatingip_associate(server, fip)
|
||||||
|
|
||||||
|
def _disassociate_floatingip(self, obj, server):
|
||||||
|
ctx = context.get_service_context(user=obj.user, project=obj.project)
|
||||||
|
if obj.cluster_id:
|
||||||
|
cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id)
|
||||||
|
fip = cluster.data.get(self.KUBE_MASTER_FLOATINGIP)
|
||||||
|
try:
|
||||||
|
self.compute(obj).server_floatingip_disassociate(server, fip)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def _get_cluster_data(self, obj):
|
||||||
|
ctx = context.get_service_context(user=obj.user, project=obj.project)
|
||||||
|
if obj.cluster_id:
|
||||||
|
cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id)
|
||||||
|
return cluster.data
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def _get_network(self, obj):
|
||||||
|
ctx = context.get_service_context(user=obj.user, project=obj.project)
|
||||||
|
if obj.cluster_id:
|
||||||
|
cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id)
|
||||||
|
return cluster.data.get(self.PRIVATE_NETWORK)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _create_security_group(self, obj):
|
||||||
|
ctx = context.get_service_context(user=obj.user, project=obj.project)
|
||||||
|
sgid = obj.data.get(self.SECURITY_GROUP, None)
|
||||||
|
if sgid:
|
||||||
|
return sgid
|
||||||
|
|
||||||
|
client = self.network(obj)
|
||||||
|
try:
|
||||||
|
sg = client.security_group_create(name=self.name)
|
||||||
|
except Exception as ex:
|
||||||
|
raise exc.EResourceCreation(type='kubernetes.master',
|
||||||
|
message=six.text_type(ex))
|
||||||
|
data = obj.data
|
||||||
|
data[self.SECURITY_GROUP] = sg.id
|
||||||
|
cluster_obj.Cluster.update(ctx, obj.id, {'data': data})
|
||||||
|
self._set_security_group_rules(obj, sg.id)
|
||||||
|
|
||||||
|
return sg.id
|
||||||
|
|
||||||
|
def _get_security_group(self, obj):
|
||||||
|
ctx = context.get_service_context(user=obj.user, project=obj.project)
|
||||||
|
if obj.cluster_id:
|
||||||
|
cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id)
|
||||||
|
return cluster.data.get(self.SECURITY_GROUP)
|
||||||
|
return None
|
||||||
|
|
||||||
|
def _set_security_group_rules(self, obj, sgid):
|
||||||
|
client = self.network(obj)
|
||||||
|
open_ports = {
|
||||||
|
'tcp': [22, 80, 8000, 8080, 6443, 8001, 8443, 443,
|
||||||
|
179, 8082, 8086],
|
||||||
|
'udp': [8285, 8472],
|
||||||
|
'icmp': [None]
|
||||||
|
}
|
||||||
|
for p in open_ports.keys():
|
||||||
|
for port in open_ports[p]:
|
||||||
|
try:
|
||||||
|
client.security_group_rule_create(sgid, port, protocol=p)
|
||||||
|
except Exception as ex:
|
||||||
|
raise exc.EResourceCreation(type='kubernetes.master',
|
||||||
|
message=six.text_type(ex))
|
||||||
|
|
||||||
|
def _delete_security_group(self, obj):
|
||||||
|
sgid = obj.data.get(self.SECURITY_GROUP)
|
||||||
|
if sgid:
|
||||||
|
self.network(obj).security_group_delete(sgid, ignore_missing=True)
|
||||||
|
|
||||||
|
def do_validate(self, obj):
|
||||||
|
"""Validate if the spec has provided valid info for server creation.
|
||||||
|
|
||||||
|
:param obj: The node object.
|
||||||
|
"""
|
||||||
|
# validate flavor
|
||||||
|
flavor = self.properties[self.FLAVOR]
|
||||||
|
self._validate_flavor(obj, flavor)
|
||||||
|
|
||||||
|
# validate image
|
||||||
|
image = self.properties[self.IMAGE]
|
||||||
|
if image is not None:
|
||||||
|
self._validate_image(obj, image)
|
||||||
|
|
||||||
|
# validate key_name
|
||||||
|
keypair = self.properties[self.KEY_NAME]
|
||||||
|
if keypair is not None:
|
||||||
|
self._validate_keypair(obj, keypair)
|
||||||
|
|
||||||
|
return True
|
208
contrib/kubernetes/kube/master.py
Normal file
208
contrib/kubernetes/kube/master.py
Normal file
@ -0,0 +1,208 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import base64
|
||||||
|
|
||||||
|
import jinja2
|
||||||
|
from oslo_log import log as logging
|
||||||
|
from oslo_utils import encodeutils
|
||||||
|
import six
|
||||||
|
|
||||||
|
from kube import base
|
||||||
|
from senlin.common import consts
|
||||||
|
from senlin.common import exception as exc
|
||||||
|
from senlin.common.i18n import _
|
||||||
|
from senlin.common import schema
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ServerProfile(base.KubeBaseProfile):
|
||||||
|
"""Profile for an kubernetes master server."""
|
||||||
|
|
||||||
|
VERSIONS = {
|
||||||
|
'1.0': [
|
||||||
|
{'status': consts.EXPERIMENTAL, 'since': '2017.10'}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
KEYS = (
|
||||||
|
CONTEXT, FLAVOR, IMAGE, KEY_NAME,
|
||||||
|
PUBLIC_NETWORK,
|
||||||
|
) = (
|
||||||
|
'context', 'flavor', 'image', 'key_name',
|
||||||
|
'public_network',
|
||||||
|
)
|
||||||
|
|
||||||
|
INTERNAL_KEYS = (
|
||||||
|
KUBEADM_TOKEN, KUBE_MASTER_IP, SECURITY_GROUP,
|
||||||
|
PRIVATE_NETWORK, PRIVATE_SUBNET, PRIVATE_ROUTER,
|
||||||
|
KUBE_MASTER_FLOATINGIP, KUBE_MASTER_FLOATINGIP_ID,
|
||||||
|
SCALE_OUT_RECV_ID, SCALE_OUT_URL,
|
||||||
|
) = (
|
||||||
|
'kubeadm_token', 'kube_master_ip', 'security_group',
|
||||||
|
'private_network', 'private_subnet', 'private_router',
|
||||||
|
'kube_master_floatingip', 'kube_master_floatingip_id',
|
||||||
|
'scale_out_recv_id', 'scale_out_url',
|
||||||
|
)
|
||||||
|
|
||||||
|
NETWORK_KEYS = (
|
||||||
|
PORT, FIXED_IP, NETWORK, PORT_SECURITY_GROUPS,
|
||||||
|
FLOATING_NETWORK, FLOATING_IP,
|
||||||
|
) = (
|
||||||
|
'port', 'fixed_ip', 'network', 'security_groups',
|
||||||
|
'floating_network', 'floating_ip',
|
||||||
|
)
|
||||||
|
|
||||||
|
properties_schema = {
|
||||||
|
CONTEXT: schema.Map(
|
||||||
|
_('Customized security context for operating servers.'),
|
||||||
|
),
|
||||||
|
FLAVOR: schema.String(
|
||||||
|
_('ID of flavor used for the server.'),
|
||||||
|
required=True,
|
||||||
|
updatable=True,
|
||||||
|
),
|
||||||
|
IMAGE: schema.String(
|
||||||
|
# IMAGE is not required, because there could be BDM or BDMv2
|
||||||
|
# support and the corresponding settings effective
|
||||||
|
_('ID of image to be used for the new server.'),
|
||||||
|
updatable=True,
|
||||||
|
),
|
||||||
|
KEY_NAME: schema.String(
|
||||||
|
_('Name of Nova keypair to be injected to server.'),
|
||||||
|
),
|
||||||
|
PUBLIC_NETWORK: schema.String(
|
||||||
|
_('Public network for kubernetes.'),
|
||||||
|
required=True,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, type_name, name, **kwargs):
|
||||||
|
super(ServerProfile, self).__init__(type_name, name, **kwargs)
|
||||||
|
self.server_id = None
|
||||||
|
|
||||||
|
def do_cluster_create(self, obj):
|
||||||
|
self._generate_kubeadm_token(obj)
|
||||||
|
self._create_security_group(obj)
|
||||||
|
self._create_network(obj)
|
||||||
|
|
||||||
|
def do_cluster_delete(self, obj):
|
||||||
|
self._delete_network(obj)
|
||||||
|
self._delete_security_group(obj)
|
||||||
|
|
||||||
|
def do_create(self, obj):
|
||||||
|
"""Create a server for the node object.
|
||||||
|
|
||||||
|
:param obj: The node object for which a server will be created.
|
||||||
|
"""
|
||||||
|
kwargs = {}
|
||||||
|
for key in self.KEYS:
|
||||||
|
if self.properties[key] is not None:
|
||||||
|
kwargs[key] = self.properties[key]
|
||||||
|
|
||||||
|
image_ident = self.properties[self.IMAGE]
|
||||||
|
if image_ident is not None:
|
||||||
|
image = self._validate_image(obj, image_ident, 'create')
|
||||||
|
kwargs.pop(self.IMAGE)
|
||||||
|
kwargs['imageRef'] = image.id
|
||||||
|
|
||||||
|
flavor_ident = self.properties[self.FLAVOR]
|
||||||
|
flavor = self._validate_flavor(obj, flavor_ident, 'create')
|
||||||
|
kwargs.pop(self.FLAVOR)
|
||||||
|
kwargs['flavorRef'] = flavor.id
|
||||||
|
|
||||||
|
keypair_name = self.properties[self.KEY_NAME]
|
||||||
|
if keypair_name:
|
||||||
|
keypair = self._validate_keypair(obj, keypair_name, 'create')
|
||||||
|
kwargs['key_name'] = keypair.name
|
||||||
|
|
||||||
|
kwargs['name'] = obj.name
|
||||||
|
|
||||||
|
metadata = self._build_metadata(obj, {})
|
||||||
|
kwargs['metadata'] = metadata
|
||||||
|
|
||||||
|
jj_vars = {}
|
||||||
|
cluster_data = self._get_cluster_data(obj)
|
||||||
|
kwargs['networks'] = [{'uuid': cluster_data[self.PRIVATE_NETWORK]}]
|
||||||
|
|
||||||
|
# Get user_data parameters from metadata
|
||||||
|
jj_vars['KUBETOKEN'] = cluster_data[self.KUBEADM_TOKEN]
|
||||||
|
jj_vars['MASTER_FLOATINGIP'] = cluster_data[
|
||||||
|
self.KUBE_MASTER_FLOATINGIP]
|
||||||
|
|
||||||
|
# user_data = self.properties[self.USER_DATA]
|
||||||
|
user_data = base.loadScript('./scripts/master.sh')
|
||||||
|
if user_data is not None:
|
||||||
|
# Use jinja2 to replace variables defined in user_data
|
||||||
|
try:
|
||||||
|
jj_t = jinja2.Template(user_data)
|
||||||
|
user_data = jj_t.render(**jj_vars)
|
||||||
|
except (jinja2.exceptions.UndefinedError, ValueError) as ex:
|
||||||
|
# TODO(anyone) Handle jinja2 error
|
||||||
|
pass
|
||||||
|
ud = encodeutils.safe_encode(user_data)
|
||||||
|
kwargs['user_data'] = encodeutils.safe_decode(base64.b64encode(ud))
|
||||||
|
|
||||||
|
sgid = self._get_security_group(obj)
|
||||||
|
kwargs['security_groups'] = [{'name': sgid}]
|
||||||
|
|
||||||
|
server = None
|
||||||
|
resource_id = None
|
||||||
|
try:
|
||||||
|
server = self.compute(obj).server_create(**kwargs)
|
||||||
|
self.compute(obj).wait_for_server(server.id)
|
||||||
|
server = self.compute(obj).server_get(server.id)
|
||||||
|
self._update_master_ip(obj, server.addresses[''][0]['addr'])
|
||||||
|
self._associate_floatingip(obj, server)
|
||||||
|
LOG.info("Created master node: %s" % server.id)
|
||||||
|
return server.id
|
||||||
|
except exc.InternalError as ex:
|
||||||
|
if server and server.id:
|
||||||
|
resource_id = server.id
|
||||||
|
raise exc.EResourceCreation(type='server', message=ex.message,
|
||||||
|
resource_id=resource_id)
|
||||||
|
|
||||||
|
def do_delete(self, obj, **params):
|
||||||
|
"""Delete the physical resource associated with the specified node.
|
||||||
|
|
||||||
|
:param obj: The node object to operate on.
|
||||||
|
:param kwargs params: Optional keyword arguments for the delete
|
||||||
|
operation.
|
||||||
|
:returns: This operation always return True unless exception is
|
||||||
|
caught.
|
||||||
|
:raises: `EResourceDeletion` if interaction with compute service fails.
|
||||||
|
"""
|
||||||
|
if not obj.physical_id:
|
||||||
|
return True
|
||||||
|
|
||||||
|
server_id = obj.physical_id
|
||||||
|
ignore_missing = params.get('ignore_missing', True)
|
||||||
|
internal_ports = obj.data.get('internal_ports', [])
|
||||||
|
force = params.get('force', False)
|
||||||
|
|
||||||
|
try:
|
||||||
|
self._disassociate_floatingip(obj, server_id)
|
||||||
|
driver = self.compute(obj)
|
||||||
|
if force:
|
||||||
|
driver.server_force_delete(server_id, ignore_missing)
|
||||||
|
else:
|
||||||
|
driver.server_delete(server_id, ignore_missing)
|
||||||
|
driver.wait_for_server_delete(server_id)
|
||||||
|
if internal_ports:
|
||||||
|
ex = self._delete_ports(obj, internal_ports)
|
||||||
|
if ex:
|
||||||
|
raise ex
|
||||||
|
return True
|
||||||
|
except exc.InternalError as ex:
|
||||||
|
raise exc.EResourceDeletion(type='server', id=server_id,
|
||||||
|
message=six.text_type(ex))
|
33
contrib/kubernetes/kube/scripts/master.sh
Normal file
33
contrib/kubernetes/kube/scripts/master.sh
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
HOSTNAME=`hostname`
|
||||||
|
echo "127.0.0.1 $HOSTNAME" >> /etc/hosts
|
||||||
|
apt-get update && apt-get install -y docker.io curl apt-transport-https
|
||||||
|
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
|
||||||
|
echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y kubelet kubeadm kubectl
|
||||||
|
PODNETWORKCIDR=10.244.0.0/16
|
||||||
|
kubeadm init --token {{ KUBETOKEN }} --skip-preflight-checks --pod-network-cidr=$PODNETWORKCIDR --apiserver-cert-extra-sans={{ MASTER_FLOATINGIP}} --token-ttl 0
|
||||||
|
mkdir -p $HOME/.kube
|
||||||
|
cp -i /etc/kubernetes/admin.conf $HOME/.kube/config
|
||||||
|
chown $(id -u):$(id -g) $HOME/.kube/config
|
||||||
|
mkdir -p root/.kube
|
||||||
|
cp -i /etc/kubernetes/admin.conf root/.kube/config
|
||||||
|
chown root:root root/.kube/config
|
||||||
|
cp -i /etc/kubernetes/admin.conf /opt/admin.kubeconf
|
||||||
|
echo "# Setup network pod"
|
||||||
|
kubectl apply -f https://raw.githubusercontent.com/coreos/flannel/v0.9.0/Documentation/kube-flannel.yml
|
||||||
|
echo "# Install kubernetes dashboard"
|
||||||
|
kubectl create -f https://raw.githubusercontent.com/kubernetes/dashboard/master/src/deploy/recommended/kubernetes-dashboard.yaml
|
||||||
|
echo "# Install heapster"
|
||||||
|
kubectl create -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/grafana.yaml
|
||||||
|
kubectl create -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/heapster.yaml
|
||||||
|
kubectl create -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/influxdb/influxdb.yaml
|
||||||
|
kubectl create -f https://raw.githubusercontent.com/kubernetes/heapster/master/deploy/kube-config/rbac/heapster-rbac.yaml
|
||||||
|
echo "# Download monitor script"
|
||||||
|
curl -o /opt/monitor.sh https://raw.githubusercontent.com/lynic/templates/master/k8s/monitor.sh
|
||||||
|
chmod a+x /opt/monitor.sh
|
||||||
|
echo "*/1 * * * * root bash /opt/monitor.sh 2>&1 >> /var/log/kube-minitor.log" > /etc/cron.d/kube-monitor
|
||||||
|
systemctl restart cron
|
||||||
|
echo "# Get status"
|
||||||
|
kubectl get nodes
|
10
contrib/kubernetes/kube/scripts/worker.sh
Normal file
10
contrib/kubernetes/kube/scripts/worker.sh
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
HOSTNAME=`hostname`
|
||||||
|
echo "127.0.0.1 $HOSTNAME" >> /etc/hosts
|
||||||
|
apt-get update && apt-get install -y docker.io curl apt-transport-https
|
||||||
|
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
|
||||||
|
echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list
|
||||||
|
apt-get update
|
||||||
|
apt-get install -y kubelet kubeadm kubectl
|
||||||
|
MASTER_IP={{ MASTERIP }}
|
||||||
|
kubeadm join --token {{ KUBETOKEN }} --skip-preflight-checks --discovery-token-unsafe-skip-ca-verification $MASTER_IP:6443
|
250
contrib/kubernetes/kube/worker.py
Normal file
250
contrib/kubernetes/kube/worker.py
Normal file
@ -0,0 +1,250 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import base64
|
||||||
|
|
||||||
|
import jinja2
|
||||||
|
from oslo_log import log as logging
|
||||||
|
from oslo_utils import encodeutils
|
||||||
|
import six
|
||||||
|
|
||||||
|
from kube import base
|
||||||
|
from senlin.common import consts
|
||||||
|
from senlin.common import context
|
||||||
|
from senlin.common import exception as exc
|
||||||
|
from senlin.common.i18n import _
|
||||||
|
from senlin.common import schema
|
||||||
|
from senlin.objects import cluster as cluster_obj
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class ServerProfile(base.KubeBaseProfile):
|
||||||
|
"""Profile for an kubernetes node server."""
|
||||||
|
|
||||||
|
VERSIONS = {
|
||||||
|
'1.0': [
|
||||||
|
{'status': consts.EXPERIMENTAL, 'since': '2017.10'}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
|
||||||
|
KEYS = (
|
||||||
|
CONTEXT, FLAVOR, IMAGE, KEY_NAME,
|
||||||
|
) = (
|
||||||
|
'context', 'flavor', 'image', 'key_name',
|
||||||
|
)
|
||||||
|
|
||||||
|
KUBE_KEYS = (
|
||||||
|
MASTER_CLUSTER,
|
||||||
|
) = (
|
||||||
|
'master_cluster',
|
||||||
|
)
|
||||||
|
|
||||||
|
MASTER_CLUSTER_KEYS = (
|
||||||
|
KUBEADM_TOKEN, KUBE_MASTER_IP,
|
||||||
|
PRIVATE_NETWORK, PRIVATE_SUBNET, PRIVATE_ROUTER,
|
||||||
|
) = (
|
||||||
|
'kubeadm_token', 'kube_master_ip',
|
||||||
|
'private_network', 'private_subnet', 'private_router',
|
||||||
|
)
|
||||||
|
|
||||||
|
INTERNAL_KEYS = (
|
||||||
|
SECURITY_GROUP, SCALE_OUT_RECV_ID, SCALE_OUT_URL,
|
||||||
|
) = (
|
||||||
|
'security_group', 'scale_out_recv_id', 'scale_out_url',
|
||||||
|
)
|
||||||
|
|
||||||
|
NETWORK_KEYS = (
|
||||||
|
PORT, FIXED_IP, NETWORK, PORT_SECURITY_GROUPS,
|
||||||
|
FLOATING_NETWORK, FLOATING_IP,
|
||||||
|
) = (
|
||||||
|
'port', 'fixed_ip', 'network', 'security_groups',
|
||||||
|
'floating_network', 'floating_ip',
|
||||||
|
)
|
||||||
|
|
||||||
|
properties_schema = {
|
||||||
|
CONTEXT: schema.Map(
|
||||||
|
_('Customized security context for operating servers.'),
|
||||||
|
),
|
||||||
|
FLAVOR: schema.String(
|
||||||
|
_('ID of flavor used for the server.'),
|
||||||
|
required=True,
|
||||||
|
updatable=True,
|
||||||
|
),
|
||||||
|
IMAGE: schema.String(
|
||||||
|
# IMAGE is not required, because there could be BDM or BDMv2
|
||||||
|
# support and the corresponding settings effective
|
||||||
|
_('ID of image to be used for the new server.'),
|
||||||
|
updatable=True,
|
||||||
|
),
|
||||||
|
KEY_NAME: schema.String(
|
||||||
|
_('Name of Nova keypair to be injected to server.'),
|
||||||
|
),
|
||||||
|
MASTER_CLUSTER: schema.String(
|
||||||
|
_('Cluster running kubernetes master.'),
|
||||||
|
required=True,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
|
||||||
|
def __init__(self, type_name, name, **kwargs):
|
||||||
|
super(ServerProfile, self).__init__(type_name, name, **kwargs)
|
||||||
|
self.server_id = None
|
||||||
|
|
||||||
|
def _get_master_cluster_info(self, obj):
|
||||||
|
ctx = context.get_service_context(user=obj.user, project=obj.project)
|
||||||
|
master = self.properties[self.MASTER_CLUSTER]
|
||||||
|
try:
|
||||||
|
cluster = cluster_obj.Cluster.find(ctx, master)
|
||||||
|
except Exception as ex:
|
||||||
|
raise exc.EResourceCreation(type='kubernetes.worker',
|
||||||
|
message=six.text_type(ex))
|
||||||
|
for key in self.MASTER_CLUSTER_KEYS:
|
||||||
|
if key not in cluster.data:
|
||||||
|
raise exc.EResourceCreation(
|
||||||
|
type='kubernetes.worker',
|
||||||
|
message="Can't find %s in cluster %s" % (key, master))
|
||||||
|
|
||||||
|
return cluster.data
|
||||||
|
|
||||||
|
def _get_cluster_data(self, obj):
|
||||||
|
ctx = context.get_service_context(user=obj.user, project=obj.project)
|
||||||
|
if obj.cluster_id:
|
||||||
|
cluster = cluster_obj.Cluster.get(ctx, obj.cluster_id)
|
||||||
|
return cluster.data
|
||||||
|
|
||||||
|
return {}
|
||||||
|
|
||||||
|
def do_cluster_create(self, obj):
|
||||||
|
self._create_security_group(obj)
|
||||||
|
|
||||||
|
def do_cluster_delete(self, obj):
|
||||||
|
self._delete_security_group(obj)
|
||||||
|
|
||||||
|
def do_validate(self, obj):
|
||||||
|
"""Validate if the spec has provided valid info for server creation.
|
||||||
|
|
||||||
|
:param obj: The node object.
|
||||||
|
"""
|
||||||
|
# validate flavor
|
||||||
|
flavor = self.properties[self.FLAVOR]
|
||||||
|
self._validate_flavor(obj, flavor)
|
||||||
|
|
||||||
|
# validate image
|
||||||
|
image = self.properties[self.IMAGE]
|
||||||
|
if image is not None:
|
||||||
|
self._validate_image(obj, image)
|
||||||
|
|
||||||
|
# validate key_name
|
||||||
|
keypair = self.properties[self.KEY_NAME]
|
||||||
|
if keypair is not None:
|
||||||
|
self._validate_keypair(obj, keypair)
|
||||||
|
|
||||||
|
return True
|
||||||
|
|
||||||
|
def do_create(self, obj):
|
||||||
|
"""Create a server for the node object.
|
||||||
|
|
||||||
|
:param obj: The node object for which a server will be created.
|
||||||
|
"""
|
||||||
|
kwargs = {}
|
||||||
|
for key in self.KEYS:
|
||||||
|
if self.properties[key] is not None:
|
||||||
|
kwargs[key] = self.properties[key]
|
||||||
|
|
||||||
|
image_ident = self.properties[self.IMAGE]
|
||||||
|
if image_ident is not None:
|
||||||
|
image = self._validate_image(obj, image_ident, 'create')
|
||||||
|
kwargs.pop(self.IMAGE)
|
||||||
|
kwargs['imageRef'] = image.id
|
||||||
|
|
||||||
|
flavor_ident = self.properties[self.FLAVOR]
|
||||||
|
flavor = self._validate_flavor(obj, flavor_ident, 'create')
|
||||||
|
kwargs.pop(self.FLAVOR)
|
||||||
|
kwargs['flavorRef'] = flavor.id
|
||||||
|
|
||||||
|
keypair_name = self.properties[self.KEY_NAME]
|
||||||
|
if keypair_name:
|
||||||
|
keypair = self._validate_keypair(obj, keypair_name, 'create')
|
||||||
|
kwargs['key_name'] = keypair.name
|
||||||
|
|
||||||
|
kwargs['name'] = obj.name
|
||||||
|
|
||||||
|
metadata = self._build_metadata(obj, {})
|
||||||
|
kwargs['metadata'] = metadata
|
||||||
|
|
||||||
|
sgid = self._get_security_group(obj)
|
||||||
|
kwargs['security_groups'] = [{'name': sgid}]
|
||||||
|
|
||||||
|
jj_vars = {}
|
||||||
|
master_cluster = self._get_master_cluster_info(obj)
|
||||||
|
kwargs['networks'] = [{'uuid': master_cluster[self.PRIVATE_NETWORK]}]
|
||||||
|
jj_vars['KUBETOKEN'] = master_cluster[self.KUBEADM_TOKEN]
|
||||||
|
jj_vars['MASTERIP'] = master_cluster[self.KUBE_MASTER_IP]
|
||||||
|
|
||||||
|
user_data = base.loadScript('./scripts/worker.sh')
|
||||||
|
if user_data is not None:
|
||||||
|
# Use jinja2 to replace variables defined in user_data
|
||||||
|
try:
|
||||||
|
jj_t = jinja2.Template(user_data)
|
||||||
|
user_data = jj_t.render(**jj_vars)
|
||||||
|
except (jinja2.exceptions.UndefinedError, ValueError) as ex:
|
||||||
|
# TODO(anyone) Handle jinja2 error
|
||||||
|
pass
|
||||||
|
ud = encodeutils.safe_encode(user_data)
|
||||||
|
kwargs['user_data'] = encodeutils.safe_decode(base64.b64encode(ud))
|
||||||
|
|
||||||
|
server = None
|
||||||
|
resource_id = None
|
||||||
|
try:
|
||||||
|
server = self.compute(obj).server_create(**kwargs)
|
||||||
|
self.compute(obj).wait_for_server(server.id)
|
||||||
|
server = self.compute(obj).server_get(server.id)
|
||||||
|
return server.id
|
||||||
|
except exc.InternalError as ex:
|
||||||
|
if server and server.id:
|
||||||
|
resource_id = server.id
|
||||||
|
raise exc.EResourceCreation(type='server', message=ex.message,
|
||||||
|
resource_id=resource_id)
|
||||||
|
|
||||||
|
def do_delete(self, obj, **params):
|
||||||
|
"""Delete the physical resource associated with the specified node.
|
||||||
|
|
||||||
|
:param obj: The node object to operate on.
|
||||||
|
:param kwargs params: Optional keyword arguments for the delete
|
||||||
|
operation.
|
||||||
|
:returns: This operation always return True unless exception is
|
||||||
|
caught.
|
||||||
|
:raises: `EResourceDeletion` if interaction with compute service fails.
|
||||||
|
"""
|
||||||
|
if not obj.physical_id:
|
||||||
|
return True
|
||||||
|
|
||||||
|
server_id = obj.physical_id
|
||||||
|
ignore_missing = params.get('ignore_missing', True)
|
||||||
|
internal_ports = obj.data.get('internal_ports', [])
|
||||||
|
force = params.get('force', False)
|
||||||
|
|
||||||
|
try:
|
||||||
|
driver = self.compute(obj)
|
||||||
|
if force:
|
||||||
|
driver.server_force_delete(server_id, ignore_missing)
|
||||||
|
else:
|
||||||
|
driver.server_delete(server_id, ignore_missing)
|
||||||
|
driver.wait_for_server_delete(server_id)
|
||||||
|
if internal_ports:
|
||||||
|
ex = self._delete_ports(obj, internal_ports)
|
||||||
|
if ex:
|
||||||
|
raise ex
|
||||||
|
return True
|
||||||
|
except exc.InternalError as ex:
|
||||||
|
raise exc.EResourceDeletion(type='server', id=server_id,
|
||||||
|
message=six.text_type(ex))
|
1
contrib/kubernetes/requirements.txt
Normal file
1
contrib/kubernetes/requirements.txt
Normal file
@ -0,0 +1 @@
|
|||||||
|
Jinja2>=2.8,!=2.9.0,!=2.9.1,!=2.9.2,!=2.9.3,!=2.9.4 # BSD License (3 clause)
|
28
contrib/kubernetes/setup.cfg
Normal file
28
contrib/kubernetes/setup.cfg
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
[metadata]
|
||||||
|
name = senlin-kubernetes
|
||||||
|
summary = Kubernetes profile for senlin
|
||||||
|
description-file =
|
||||||
|
README.rst
|
||||||
|
author = OpenStack
|
||||||
|
author-email = openstack-dev@lists.openstack.org
|
||||||
|
home-page = https://docs.openstack.org/senlin/latest/
|
||||||
|
classifier =
|
||||||
|
Environment :: OpenStack
|
||||||
|
Intended Audience :: Information Technology
|
||||||
|
Intended Audience :: System Administrators
|
||||||
|
License :: OSI Approved :: Apache Software License
|
||||||
|
Operating System :: POSIX :: Linux
|
||||||
|
Programming Language :: Python
|
||||||
|
Programming Language :: Python :: 2
|
||||||
|
Programming Language :: Python :: 2.7
|
||||||
|
Programming Language :: Python :: 3
|
||||||
|
Programming Language :: Python :: 3.5
|
||||||
|
|
||||||
|
[entry_points]
|
||||||
|
senlin.profiles =
|
||||||
|
senlin.kubernetes.master-1.0 = kube.master:ServerProfile
|
||||||
|
senlin.kubernetes.worker-1.0 = kube.worker:ServerProfile
|
||||||
|
|
||||||
|
[global]
|
||||||
|
setup-hooks =
|
||||||
|
pbr.hooks.setup_hook
|
27
contrib/kubernetes/setup.py
Normal file
27
contrib/kubernetes/setup.py
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
# implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
|
||||||
|
import setuptools
|
||||||
|
|
||||||
|
# In python < 2.7.4, a lazy loading of package `pbr` will break
|
||||||
|
# setuptools if some other modules registered functions in `atexit`.
|
||||||
|
# solution from: http://bugs.python.org/issue15881#msg170215
|
||||||
|
try:
|
||||||
|
import multiprocessing # noqa
|
||||||
|
except ImportError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
setuptools.setup(
|
||||||
|
setup_requires=['pbr'],
|
||||||
|
pbr=True)
|
@ -26,6 +26,17 @@ class NeutronClient(base.DriverBase):
|
|||||||
network = self.conn.network.find_network(name_or_id, ignore_missing)
|
network = self.conn.network.find_network(name_or_id, ignore_missing)
|
||||||
return network
|
return network
|
||||||
|
|
||||||
|
@sdk.translate_exception
|
||||||
|
def network_create(self, **attr):
|
||||||
|
network = self.conn.network.create_network(**attr)
|
||||||
|
return network
|
||||||
|
|
||||||
|
@sdk.translate_exception
|
||||||
|
def network_delete(self, network, ignore_missing=False):
|
||||||
|
ret = self.conn.network.delete_network(
|
||||||
|
network, ignore_missing=ignore_missing)
|
||||||
|
return ret
|
||||||
|
|
||||||
@sdk.translate_exception
|
@sdk.translate_exception
|
||||||
def port_find(self, name_or_id, ignore_missing=False):
|
def port_find(self, name_or_id, ignore_missing=False):
|
||||||
port = self.conn.network.find_port(name_or_id, ignore_missing)
|
port = self.conn.network.find_port(name_or_id, ignore_missing)
|
||||||
@ -36,11 +47,74 @@ class NeutronClient(base.DriverBase):
|
|||||||
sg = self.conn.network.find_security_group(name_or_id, ignore_missing)
|
sg = self.conn.network.find_security_group(name_or_id, ignore_missing)
|
||||||
return sg
|
return sg
|
||||||
|
|
||||||
|
@sdk.translate_exception
|
||||||
|
def security_group_create(self, name, description=''):
|
||||||
|
attr = {
|
||||||
|
'name': name,
|
||||||
|
'description': description,
|
||||||
|
}
|
||||||
|
sg = self.conn.network.create_security_group(**attr)
|
||||||
|
return sg
|
||||||
|
|
||||||
|
@sdk.translate_exception
|
||||||
|
def security_group_delete(self, security_group_id, ignore_missing=False):
|
||||||
|
sg = self.conn.network.delete_security_group(
|
||||||
|
security_group_id, ignore_missing)
|
||||||
|
return sg
|
||||||
|
|
||||||
|
@sdk.translate_exception
|
||||||
|
def security_group_rule_create(self, security_group_id, port_range_min,
|
||||||
|
port_range_max=None, ethertype='IPv4',
|
||||||
|
remote_ip_prefix='0.0.0.0/0',
|
||||||
|
direction='ingress', protocol='tcp'):
|
||||||
|
if port_range_max is None:
|
||||||
|
port_range_max = port_range_min
|
||||||
|
attr = {
|
||||||
|
'direction': direction,
|
||||||
|
'remote_ip_prefix': remote_ip_prefix,
|
||||||
|
'protocol': protocol,
|
||||||
|
'port_range_max': port_range_max,
|
||||||
|
'port_range_min': port_range_min,
|
||||||
|
'security_group_id': security_group_id,
|
||||||
|
'ethertype': ethertype,
|
||||||
|
}
|
||||||
|
rule = self.conn.network.create_security_group_rule(**attr)
|
||||||
|
return rule
|
||||||
|
|
||||||
@sdk.translate_exception
|
@sdk.translate_exception
|
||||||
def subnet_get(self, name_or_id, ignore_missing=False):
|
def subnet_get(self, name_or_id, ignore_missing=False):
|
||||||
subnet = self.conn.network.find_subnet(name_or_id, ignore_missing)
|
subnet = self.conn.network.find_subnet(name_or_id, ignore_missing)
|
||||||
return subnet
|
return subnet
|
||||||
|
|
||||||
|
@sdk.translate_exception
|
||||||
|
def subnet_create(self, **attr):
|
||||||
|
subnet = self.conn.network.create_subnet(**attr)
|
||||||
|
return subnet
|
||||||
|
|
||||||
|
@sdk.translate_exception
|
||||||
|
def router_create(self, **attr):
|
||||||
|
router = self.conn.network.create_router(**attr)
|
||||||
|
return router
|
||||||
|
|
||||||
|
@sdk.translate_exception
|
||||||
|
def router_delete(self, router, ignore_missing=False):
|
||||||
|
ret = self.conn.network.delete_router(
|
||||||
|
router, ignore_missing=ignore_missing)
|
||||||
|
return ret
|
||||||
|
|
||||||
|
@sdk.translate_exception
|
||||||
|
def add_interface_to_router(self, router, subnet_id=None, port_id=None):
|
||||||
|
interface = self.conn.network.add_interface_to_router(
|
||||||
|
router, subnet_id=subnet_id, port_id=port_id)
|
||||||
|
return interface
|
||||||
|
|
||||||
|
@sdk.translate_exception
|
||||||
|
def remove_interface_from_router(self, router, subnet_id=None,
|
||||||
|
port_id=None):
|
||||||
|
interface = self.conn.network.remove_interface_from_router(
|
||||||
|
router, subnet_id=subnet_id, port_id=port_id)
|
||||||
|
return interface
|
||||||
|
|
||||||
@sdk.translate_exception
|
@sdk.translate_exception
|
||||||
def port_create(self, **attr):
|
def port_create(self, **attr):
|
||||||
res = self.conn.network.create_port(**attr)
|
res = self.conn.network.create_port(**attr)
|
||||||
|
Loading…
Reference in New Issue
Block a user