diff --git a/magnum/common/exception.py b/magnum/common/exception.py index 441eb7e0a5..1c5cea958b 100644 --- a/magnum/common/exception.py +++ b/magnum/common/exception.py @@ -175,6 +175,10 @@ class InvalidCsr(Invalid): message = _("Received invalid csr %(csr)s.") +class InvalidSubnet(Invalid): + message = _("Received invalid subnet %(subnet)s.") + + class HTTPNotFound(ResourceNotFound): pass diff --git a/magnum/drivers/common/template_def.py b/magnum/drivers/common/template_def.py index 65022bd276..1f6f9b28cc 100644 --- a/magnum/drivers/common/template_def.py +++ b/magnum/drivers/common/template_def.py @@ -36,8 +36,9 @@ template_def_opts = [ default='https://discovery.etcd.io/new?size=%(size)d', help=_('Url for etcd public discovery endpoint.')), cfg.ListOpt('enabled_definitions', - default=['magnum_vm_atomic_k8s', 'magnum_vm_coreos_k8s', - 'magnum_vm_atomic_swarm', 'magnum_vm_ubuntu_mesos'], + default=['magnum_vm_atomic_k8s', 'magnum_bm_fedora_k8s', + 'magnum_vm_coreos_k8s', 'magnum_vm_atomic_swarm', + 'magnum_vm_ubuntu_mesos'], help=_('Enabled bay definition entry points.')), ] diff --git a/magnum/drivers/common/templates/fragments/configure_docker_storage_driver_atomic.sh b/magnum/drivers/common/templates/fragments/configure_docker_storage_driver_atomic.sh index 26971a2608..9a9eb80e6c 100644 --- a/magnum/drivers/common/templates/fragments/configure_docker_storage_driver_atomic.sh +++ b/magnum/drivers/common/templates/fragments/configure_docker_storage_driver_atomic.sh @@ -15,7 +15,7 @@ configure_overlay () { rm -rf /var/lib/docker/* - mkfs.xfs ${device_path} + mkfs.xfs -f ${device_path} echo "${device_path} /var/lib/docker xfs defaults 0 0" >> /etc/fstab mount -a @@ -31,7 +31,7 @@ configure_overlay () { configure_devicemapper () { clear_docker_storage_congiguration - pvcreate ${device_path} + pvcreate -f ${device_path} vgcreate docker ${device_path} echo "VG=docker" > /etc/sysconfig/docker-storage-setup diff --git a/magnum/drivers/k8s_fedora_atomic_v1/image/ironic/kubernetes/Readme.md b/magnum/drivers/k8s_fedora_atomic_v1/image/ironic/kubernetes/Readme.md new file mode 100644 index 0000000000..861430a4c6 --- /dev/null +++ b/magnum/drivers/k8s_fedora_atomic_v1/image/ironic/kubernetes/Readme.md @@ -0,0 +1,42 @@ +Kubernetes elements +=================== + +This directory contains `[diskimage-builder](https://github.com/openstack/diskimage-builder)` +elements to build an image which contains kubernetes required to use kubecluster-fedora-ironic.yaml. + +An example fedora based image and uploaded to glance with the following: + + git clone https://git.openstack.org/openstack/magnum + git clone https://git.openstack.org/openstack/diskimage-builder.git + git clone https://git.openstack.org/openstack/dib-utils.git + export PATH="${PWD}/dib-utils/bin:$PATH" + export ELEMENTS_PATH=diskimage-builder/elements + export ELEMENTS_PATH=${ELEMENTS_PATH}:magnum/magnum/drivers/k8s_fedora_atomic_v1/image/ironic + export DIB_RELEASE=23 + diskimage-builder/bin/disk-image-create baremetal \ + fedora selinux-permissive \ + kubernetes \ + -o fedora-23-kubernetes.qcow2 + + KERNEL_ID=`glance image-create --name fedora-k8s-kernel \ + --visibility public \ + --disk-format=aki \ + --container-format=aki \ + --file=fedora-23-kubernetes.vmlinuz \ + | grep id | tr -d '| ' | cut --bytes=3-57` + RAMDISK_ID=`glance image-create --name fedora-k8s-ramdisk \ + --visibility public \ + --disk-format=ari \ + --container-format=ari \ + --file=fedora-23-kubernetes.initrd \ + | grep id | tr -d '| ' | cut --bytes=3-57` + BASE_ID=`glance image-create --name fedora-k8s \ + --os-distro fedora \ + --visibility public \ + --disk-format=qcow2 \ + --container-format=bare \ + --property kernel_id=$KERNEL_ID \ + --property ramdisk_id=$RAMDISK_ID \ + --file=fedora-23-kubernetes.qcow2 \ + | grep -v kernel | grep -v ramdisk \ + | grep id | tr -d '| ' | cut --bytes=3-57` diff --git a/magnum/drivers/k8s_fedora_atomic_v1/image/ironic/kubernetes/elements-deps b/magnum/drivers/k8s_fedora_atomic_v1/image/ironic/kubernetes/elements-deps new file mode 100644 index 0000000000..7076aba945 --- /dev/null +++ b/magnum/drivers/k8s_fedora_atomic_v1/image/ironic/kubernetes/elements-deps @@ -0,0 +1 @@ +package-installs diff --git a/magnum/drivers/k8s_fedora_atomic_v1/image/ironic/kubernetes/package-installs.yaml b/magnum/drivers/k8s_fedora_atomic_v1/image/ironic/kubernetes/package-installs.yaml new file mode 100644 index 0000000000..ea99a147d8 --- /dev/null +++ b/magnum/drivers/k8s_fedora_atomic_v1/image/ironic/kubernetes/package-installs.yaml @@ -0,0 +1,4 @@ +kubernetes: +etcd: +flannel: +docker-io: \ No newline at end of file diff --git a/magnum/drivers/k8s_fedora_atomic_v1/template_def.py b/magnum/drivers/k8s_fedora_atomic_v1/template_def.py index ad30115aad..634a7af5a4 100644 --- a/magnum/drivers/k8s_fedora_atomic_v1/template_def.py +++ b/magnum/drivers/k8s_fedora_atomic_v1/template_def.py @@ -12,8 +12,11 @@ # License for the specific language governing permissions and limitations # under the License. +from neutronclient.common import exceptions as n_exception +from neutronclient.neutron import v2_0 as neutronV20 import os +from magnum.common import exception from magnum.drivers.common import template_def from oslo_config import cfg @@ -158,3 +161,60 @@ class AtomicK8sTemplateDefinition(K8sTemplateDefinition): def template_path(self): return os.path.join(os.path.dirname(os.path.realpath(__file__)), 'templates/kubecluster.yaml') + + +class FedoraK8sIronicTemplateDefinition(AtomicK8sTemplateDefinition): + """Kubernetes template for a Fedora Baremetal.""" + + provides = [ + {'server_type': 'bm', + 'os': 'fedora', + 'coe': 'kubernetes'}, + ] + + def __init__(self): + super(FedoraK8sIronicTemplateDefinition, self).__init__() + self.add_parameter('fixed_subnet', + baymodel_attr='fixed_subnet', + param_type=str, + required=True) + + def get_fixed_network_id(self, osc, baymodel): + try: + subnet = neutronV20.find_resource_by_name_or_id( + osc.neutron(), + 'subnet', + baymodel.fixed_subnet + ) + except n_exception.NeutronException as e: + # NOTE(yuanying): NeutronCLIError doesn't have status_code + # if subnet name is duplicated, NeutronClientNoUniqueMatch + # (which is kind of NeutronCLIError) will be raised. + if getattr(e, 'status_code', 400) < 500: + raise exception.InvalidSubnet(message=("%s" % e)) + else: + raise e + + if subnet['ip_version'] != 4: + raise exception.InvalidSubnet( + message="Subnet IP version should be 4" + ) + + return subnet['network_id'] + + def get_params(self, context, baymodel, bay, **kwargs): + extra_params = kwargs.pop('extra_params', {}) + + osc = self.get_osc(context) + extra_params['fixed_network'] = self.get_fixed_network_id(osc, + baymodel) + + return super(FedoraK8sIronicTemplateDefinition, + self).get_params(context, baymodel, bay, + extra_params=extra_params, + **kwargs) + + @property + def template_path(self): + return os.path.join(os.path.dirname(os.path.realpath(__file__)), + 'templates/kubecluster-fedora-ironic.yaml') diff --git a/magnum/drivers/k8s_fedora_atomic_v1/templates/fragments/configure-docker-storage.sh b/magnum/drivers/k8s_fedora_atomic_v1/templates/fragments/configure-docker-storage.sh index 7e5f2d52dc..5695b184fd 100644 --- a/magnum/drivers/k8s_fedora_atomic_v1/templates/fragments/configure-docker-storage.sh +++ b/magnum/drivers/k8s_fedora_atomic_v1/templates/fragments/configure-docker-storage.sh @@ -2,25 +2,32 @@ . /etc/sysconfig/heat-params -attempts=60 -while [ ${attempts} -gt 0 ]; do - device_name=$(ls /dev/disk/by-id | grep ${DOCKER_VOLUME:0:20}$) - if [ -n "${device_name}" ]; then - break - fi - echo "waiting for disk device" - sleep 0.5 - udevadm trigger - let attempts-- -done +if [ "$ENABLE_CINDER" == "False" ]; then + # FIXME(yuanying): Use ephemeral disk for docker storage + # Currently Ironic doesn't support cinder volumes, + # so we must use preserved ephemeral disk instead of a cinder volume. + device_path=$(readlink -f /dev/disk/by-label/ephemeral0) +else + attempts=60 + while [ ${attempts} -gt 0 ]; do + device_name=$(ls /dev/disk/by-id | grep ${DOCKER_VOLUME:0:20}$) + if [ -n "${device_name}" ]; then + break + fi + echo "waiting for disk device" + sleep 0.5 + udevadm trigger + let attempts-- + done -if [ -z "${device_name}" ]; then - echo "ERROR: disk device does not exist" >&2 - exit 1 + if [ -z "${device_name}" ]; then + echo "ERROR: disk device does not exist" >&2 + exit 1 + fi + + device_path=/dev/disk/by-id/${device_name} fi -device_path=/dev/disk/by-id/${device_name} - $configure_docker_storage_driver if [ "$DOCKER_STORAGE_DRIVER" = "overlay" ]; then diff --git a/magnum/drivers/k8s_fedora_atomic_v1/templates/fragments/configure-etcd.sh b/magnum/drivers/k8s_fedora_atomic_v1/templates/fragments/configure-etcd.sh index 79350b671d..74563c8ab4 100644 --- a/magnum/drivers/k8s_fedora_atomic_v1/templates/fragments/configure-etcd.sh +++ b/magnum/drivers/k8s_fedora_atomic_v1/templates/fragments/configure-etcd.sh @@ -2,7 +2,12 @@ . /etc/sysconfig/heat-params -myip="$KUBE_NODE_IP" +if [ -z "$KUBE_NODE_IP" ]; then + # FIXME(yuanying): Set KUBE_NODE_IP correctly + KUBE_NODE_IP=$(curl -s http://169.254.169.254/latest/meta-data/local-ipv4) +fi + +myip="${KUBE_NODE_IP}" cat > /etc/etcd/etcd.conf < + size of a cinder volume to allocate to docker for container/image + storage + default: 25 + + docker_storage_driver: + type: string + description: docker storage driver name + default: "devicemapper" + constraints: + - allowed_values: ["devicemapper", "overlay"] + + wait_condition_timeout: + type: number + description: > + timeout for the Wait Conditions + default: 6000 + minions_to_remove: type: comma_delimited_list description: > @@ -79,15 +124,53 @@ parameters: be empty when doing an create. default: [] - wait_condition_timeout: - type: number - description : > - timeout for the Wait Conditions - default: 6000 - - auth_url: + discovery_url: type: string - description: url for keystone + description: > + Discovery URL used for bootstrapping the etcd cluster. + + registry_enabled: + type: boolean + description: > + Indicates whether the docker registry is enabled. + default: false + + registry_port: + type: number + description: port of registry service + default: 5000 + + swift_region: + type: string + description: region of swift service + default: "" + + registry_container: + type: string + description: > + name of swift container which docker registry stores images in + default: "container" + + registry_insecure: + type: boolean + description: > + indicates whether to skip TLS verification between registry and backend storage + default: true + + registry_chunksize: + type: number + description: > + size fo the data segments for the swift dynamic large objects + default: 5242880 + + volume_driver: + type: string + description: volume driver to use for container storage + default: "" + + region_name: + type: string + description: A logically separate section of the cluster username: type: string @@ -108,6 +191,50 @@ parameters: description: > tenant name + loadbalancing_protocol: + type: string + description: > + The protocol which is used for load balancing. If you want to change + tls_disabled option to 'True', please change this to "HTTP". + default: TCP + constraints: + - allowed_values: ["TCP", "HTTP"] + + tls_disabled: + type: boolean + description: whether or not to disable TLS + default: False + + kubernetes_port: + type: number + description: > + The port which are used by kube-apiserver to provide Kubernetes + service. + default: 6443 + + bay_uuid: + type: string + description: identifier for the bay this template is generating + + magnum_url: + type: string + description: endpoint to retrieve TLS certs from + + http_proxy: + type: string + description: http proxy address for docker + default: "" + + https_proxy: + type: string + description: https proxy address for docker + default: "" + + no_proxy: + type: string + description: no proxies for docker + default: "" + trustee_domain_id: type: string description: domain id of the trustee @@ -135,43 +262,194 @@ parameters: default: "" hidden: true + auth_url: + type: string + description: url for keystone + + kube_version: + type: string + description: version of kubernetes used for kubernetes cluster + default: v1.2.0 + + insecure_registry_url: + type: string + description: insecure registry url + default: "" + resources: ###################################################################### # - # kubernetes masters. This is a resource group that will create - # 1 master. + # security groups. we need to permit network traffic of various + # sorts. # - kube_master: - type: OS::Heat::ResourceGroup - depends_on: - - extrouter_inside + secgroup_base: + type: OS::Neutron::SecurityGroup properties: - count: 1 + rules: + - protocol: icmp + - protocol: tcp + port_range_min: 22 + port_range_max: 22 + + secgroup_kube_master: + type: OS::Neutron::SecurityGroup + properties: + rules: + - protocol: tcp + port_range_min: 7080 + port_range_max: 7080 + - protocol: tcp + port_range_min: 8080 + port_range_max: 8080 + - protocol: tcp + port_range_min: 2379 + port_range_max: 2379 + - protocol: tcp + port_range_min: 2380 + port_range_max: 2380 + - protocol: tcp + port_range_min: 6443 + port_range_max: 6443 + - protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + + secgroup_kube_minion: + type: OS::Neutron::SecurityGroup + properties: + rules: + - protocol: icmp + - protocol: tcp + - protocol: udp + + ###################################################################### + # + # load balancers. + # + + api_monitor: + type: Magnum::Optional::Neutron::Pool::HealthMonitor + properties: + type: TCP + delay: 5 + max_retries: 5 + timeout: 5 + + api_pool: + type: Magnum::Optional::Neutron::Pool + properties: + protocol: {get_param: loadbalancing_protocol} + monitors: [{get_resource: api_monitor}] + subnet: {get_param: fixed_subnet} + lb_method: ROUND_ROBIN + vip: + protocol_port: {get_param: kubernetes_port} + + api_pool_floating: + type: Magnum::Optional::Neutron::Pool::FloatingIP + properties: + floating_network: {get_param: external_network} + port_id: {get_attr: [api_pool, vip, port_id]} + + etcd_monitor: + type: Magnum::Optional::Neutron::Pool::HealthMonitor + properties: + type: TCP + delay: 5 + max_retries: 5 + timeout: 5 + + etcd_pool: + type: Magnum::Optional::Neutron::Pool + properties: + protocol: HTTP + monitors: [{get_resource: etcd_monitor}] + subnet: {get_param: fixed_subnet} + lb_method: ROUND_ROBIN + vip: + protocol_port: 2379 + + ###################################################################### + # + # resources that expose the IPs of either the kube master or a given + # LBaaS pool depending on whether LBaaS is enabled for the bay. + # + + api_address_switch: + type: Magnum::ApiGatewaySwitcher + properties: + pool_public_ip: {get_attr: [api_pool_floating, floating_ip_address]} + pool_private_ip: {get_attr: [api_pool, vip, address]} + master_public_ip: {get_attr: [kube_masters, resource.0.kube_master_external_ip]} + master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} + + etcd_address_switch: + type: Magnum::ApiGatewaySwitcher + properties: + pool_private_ip: {get_attr: [etcd_pool, vip, address]} + master_private_ip: {get_attr: [kube_masters, resource.0.kube_master_ip]} + + ###################################################################### + # + # kubernetes masters. This is a resource group that will create + # masters. + # + + kube_masters: + type: OS::Heat::ResourceGroup + properties: + count: {get_param: number_of_masters} resource_def: - type: kubemaster.yaml + type: kubemaster-fedora-ironic.yaml properties: + api_public_address: {get_attr: [api_pool_floating, floating_ip_address]} + api_private_address: {get_attr: [api_pool, vip, address]} ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} master_flavor: {get_param: master_flavor} external_network: {get_param: external_network} kube_allow_priv: {get_param: kube_allow_priv} + docker_storage_driver: {get_param: docker_storage_driver} wait_condition_timeout: {get_param: wait_condition_timeout} + network_driver: {get_param: network_driver} flannel_network_cidr: {get_param: flannel_network_cidr} flannel_network_subnetlen: {get_param: flannel_network_subnetlen} flannel_backend: {get_param: flannel_backend} portal_network_cidr: {get_param: portal_network_cidr} - fixed_network: {get_resource: fixed_network} - auth_url: {get_param: auth_url} + discovery_url: {get_param: discovery_url} + bay_uuid: {get_param: bay_uuid} + magnum_url: {get_param: magnum_url} + fixed_network: {get_param: fixed_network} + fixed_subnet: {get_param: fixed_subnet} + api_pool_id: {get_resource: api_pool} + etcd_pool_id: {get_resource: etcd_pool} username: {get_param: username} password: {get_param: password} tenant_name: {get_param: tenant_name} + kubernetes_port: {get_param: kubernetes_port} + tls_disabled: {get_param: tls_disabled} + secgroup_base_id: {get_resource: secgroup_base} + secgroup_kube_master_id: {get_resource: secgroup_kube_master} + http_proxy: {get_param: http_proxy} + https_proxy: {get_param: https_proxy} + no_proxy: {get_param: no_proxy} + kube_version: {get_param: kube_version} + trustee_user_id: {get_param: trustee_user_id} + trustee_password: {get_param: trustee_password} + trust_id: {get_param: trust_id} + auth_url: {get_param: auth_url} + insecure_registry_url: {get_param: insecure_registry_url} + + ###################################################################### + # + # kubernetes minions. This is an resource group that will initially + # create minions, and needs to be manually scaled. + # kube_minions: type: OS::Heat::ResourceGroup - depends_on: - - kube_master properties: count: {get_param: number_of_minions} removal_policies: [{resource_list: {get_param: minions_to_remove}}] @@ -180,39 +458,86 @@ resources: properties: ssh_key_name: {get_param: ssh_key_name} server_image: {get_param: server_image} - server_flavor: {get_param: server_flavor} + minion_flavor: {get_param: minion_flavor} fixed_network: {get_param: fixed_network} - kube_master_ip: {get_attr: [kube_master, kube_master_external_ip]} + fixed_subnet: {get_param: fixed_subnet} + network_driver: {get_param: network_driver} + flannel_network_cidr: {get_param: flannel_network_cidr} + kube_master_ip: {get_attr: [api_address_switch, private_ip]} + etcd_server_ip: {get_attr: [etcd_address_switch, private_ip]} external_network: {get_param: external_network} kube_allow_priv: {get_param: kube_allow_priv} + docker_storage_driver: {get_param: docker_storage_driver} + wait_condition_timeout: {get_param: wait_condition_timeout} + registry_enabled: {get_param: registry_enabled} + registry_port: {get_param: registry_port} + swift_region: {get_param: swift_region} + registry_container: {get_param: registry_container} + registry_insecure: {get_param: registry_insecure} + registry_chunksize: {get_param: registry_chunksize} + bay_uuid: {get_param: bay_uuid} + magnum_url: {get_param: magnum_url} + volume_driver: {get_param: volume_driver} + region_name: {get_param: region_name} + tenant_name: {get_param: tenant_name} + auth_url: {get_param: auth_url} + username: {get_param: username} + password: {get_param: password} + kubernetes_port: {get_param: kubernetes_port} + tls_disabled: {get_param: tls_disabled} + secgroup_kube_minion_id: {get_resource: secgroup_kube_minion} + http_proxy: {get_param: http_proxy} + https_proxy: {get_param: https_proxy} + no_proxy: {get_param: no_proxy} + kube_version: {get_param: kube_version} + trustee_user_id: {get_param: trustee_user_id} + trustee_username: {get_param: trustee_username} + trustee_password: {get_param: trustee_password} + trustee_domain_id: {get_param: trustee_domain_id} + trust_id: {get_param: trust_id} + auth_url: {get_param: auth_url} + insecure_registry_url: {get_param: insecure_registry_url} outputs: api_address: - value: {get_attr: [kube_master, kube_master_external_ip]} + value: + str_replace: + template: api_ip_address + params: + api_ip_address: {get_attr: [api_address_switch, public_ip]} description: > This is the API endpoint of the Kubernetes cluster. Use this to access the Kubernetes API. + registry_address: + value: + str_replace: + template: localhost:port + params: + port: {get_param: registry_port} + description: + This is the url of docker registry server where you can store docker + images. + kube_masters_private: - value: {get_attr: [kube_master, kube_master_ip]} + value: {get_attr: [kube_masters, kube_master_ip]} description: > This is a list of the "private" IP addresses of all the Kubernetes masters. kube_masters: - value: {get_attr: [kube_master, kube_master_external_ip]} + value: {get_attr: [kube_masters, kube_master_external_ip]} description: > This is a list of the "public" IP addresses of all the Kubernetes masters. - Use these IP addresses to log in to the Kubernetes masters via ssh or to access - the Kubernetes API. + Use these IP addresses to log in to the Kubernetes masters via ssh. kube_minions_private: - value: {get_attr: [kube_minions, kube_node_ip]} + value: {get_attr: [kube_minions, kube_minion_ip]} description: > This is a list of the "private" IP addresses of all the Kubernetes minions. kube_minions: - value: {get_attr: [kube_minions, kube_node_external_ip]} + value: {get_attr: [kube_minions, kube_minion_external_ip]} description: > This is a list of the "public" IP addresses of all the Kubernetes minions. Use these IP addresses to log in to the Kubernetes minions via ssh. diff --git a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster-fedora-ironic.yaml b/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster-fedora-ironic.yaml index 822a70b8e4..c4631001c6 100644 --- a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster-fedora-ironic.yaml +++ b/magnum/drivers/k8s_fedora_atomic_v1/templates/kubemaster-fedora-ironic.yaml @@ -1,24 +1,12 @@ heat_template_version: 2014-10-16 description: > - This template will create a group of kubernetes masters with the - number of masters specified by the number_of_masters parameter, which - defaults to 1. + This is a nested stack that defines a single Kubernetes master, This stack is + included by an ResourceGroup resource in the parent template + (kubecluster.yaml). parameters: - ssh_key_name: - type: string - description: name of ssh key to be provisioned on our server - - external_network: - type: string - description: uuid/name of a network to use for floating ip addresses - - fixed_network: - type: string - description: name of private network into which servers get deployed - server_image: type: string description: glance image used to boot the server @@ -27,26 +15,19 @@ parameters: type: string description: flavor to use when booting the server + ssh_key_name: + type: string + description: name of ssh key to be provisioned on our server + + external_network: + type: string + description: uuid/name of a network to use for floating ip addresses + portal_network_cidr: type: string description: > address range used by kubernetes for service portals - flannel_network_cidr: - type: string - description: network range for flannel overlay network - - flannel_network_subnetlen: - type: number - description: size of subnet assigned to each minion - - flannel_backend: - type: string - description: > - specify the backend for flannel, default udp backend - constraints: - - allowed_values: ["udp", "vxlan", "host-gw"] - kube_allow_priv: type: string description: > @@ -54,16 +35,98 @@ parameters: constraints: - allowed_values: ["true", "false"] + docker_storage_driver: + type: string + description: docker storage driver name + default: "devicemapper" + constraints: + - allowed_values: ["devicemapper", "overlay"] + + flannel_network_cidr: + type: string + description: network range for flannel overlay network + + flannel_network_subnetlen: + type: number + description: size of subnet assigned to each master + + flannel_backend: + type: string + description: > + specify the backend for flannel, default udp backend + constraints: + - allowed_values: ["udp", "vxlan", "host-gw"] + + discovery_url: + type: string + description: > + Discovery URL used for bootstrapping the etcd cluster. + + tls_disabled: + type: boolean + description: whether or not to enable TLS + + kubernetes_port: + type: number + description: > + The port which are used by kube-apiserver to provide Kubernetes + service. + + bay_uuid: + type: string + description: identifier for the bay this template is generating + + magnum_url: + type: string + description: endpoint to retrieve TLS certs from + + api_public_address: + type: string + description: Public IP address of the Kubernetes master server. + default: "" + + api_private_address: + type: string + description: Private IP address of the Kubernetes master server. + default: "" + + fixed_network: + type: string + description: Network from which to allocate fixed addresses. + + fixed_subnet: + type: string + description: Subnet from which to allocate fixed addresses. + + network_driver: + type: string + description: network driver to use for instantiating container networks + wait_condition_timeout: type: number description : > timeout for the Wait Conditions + secgroup_base_id: + type: string + description: ID of the security group for base. + + secgroup_kube_master_id: + type: string + description: ID of the security group for kubernetes master. + + api_pool_id: + type: string + description: ID of the load balancer pool of k8s API server. + + etcd_pool_id: + type: string + description: ID of the load balancer pool of etcd server. + auth_url: type: string description: > - url for kubernetes to authenticate before sending request to neutron - must be v2 since kubernetes backend only suppor v2 at this point + url for kubernetes to authenticate username: type: string @@ -80,6 +143,40 @@ parameters: description: > tenant name + http_proxy: + type: string + description: http proxy address for docker + + https_proxy: + type: string + description: https proxy address for docker + + no_proxy: + type: string + description: no proxies for docker + + kube_version: + type: string + description: version of kubernetes used for kubernetes cluster + + trustee_user_id: + type: string + description: user id of the trustee + + trustee_password: + type: string + description: password of the trustee + hidden: true + + trust_id: + type: string + description: id of the trust which is used by the trustee + hidden: true + + insecure_registry_url: + type: string + description: insecure registry url + resources: master_wait_handle: @@ -94,38 +191,23 @@ resources: ###################################################################### # - # software configs + # resource that exposes the IPs of either the kube master or the API + # LBaaS pool depending on whether LBaaS is enabled for the bay. # - disable_selinux: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: {get_file: fragments/disable-selinux.sh} - kube_master_init: - type: OS::Heat::MultipartMime + api_address_switch: + type: Magnum::ApiGatewaySwitcher properties: - parts: - - config: {get_resource: disable_selinux} - - config: {get_resource: write_heat_params} - - config: {get_resource: enable_etcd} - - config: {get_resource: write_kube_os_config} - - config: {get_resource: configure_kubernetes} - - config: {get_resource: enable_services} - - config: {get_resource: configure_flannel} - - config: {get_resource: master_wc_notify} + pool_public_ip: {get_param: api_public_address} + pool_private_ip: {get_param: api_private_address} + master_public_ip: '' + master_private_ip: '' - master_wc_notify: - type: OS::Heat::SoftwareConfig - properties: - group: ungrouped - config: - str_replace: - template: | - #!/bin/bash -v - wc_notify --data-binary '{"status": "SUCCESS"}' - params: - wc_notify: {get_attr: [master_wait_handle, curl_cli]} + ###################################################################### + # + # software configs. these are components that are combined into + # a multipart MIME user-data archive. + # write_heat_params: type: OS::Heat::SoftwareConfig @@ -135,34 +217,58 @@ resources: str_replace: template: {get_file: fragments/write-heat-params-master.yaml} params: + "$KUBE_API_PUBLIC_ADDRESS": {get_attr: [api_address_switch, public_ip]} + "$KUBE_API_PRIVATE_ADDRESS": {get_attr: [api_address_switch, private_ip]} + "$KUBE_API_PORT": {get_param: kubernetes_port} "$KUBE_ALLOW_PRIV": {get_param: kube_allow_priv} + "$DOCKER_VOLUME": 'None' + "$DOCKER_STORAGE_DRIVER": {get_param: docker_storage_driver} + "$NETWORK_DRIVER": {get_param: network_driver} "$FLANNEL_NETWORK_CIDR": {get_param: flannel_network_cidr} "$FLANNEL_NETWORK_SUBNETLEN": {get_param: flannel_network_subnetlen} "$FLANNEL_BACKEND": {get_param: flannel_backend} "$PORTAL_NETWORK_CIDR": {get_param: portal_network_cidr} + "$ETCD_DISCOVERY_URL": {get_param: discovery_url} "$AUTH_URL": {get_param: auth_url} "$USERNAME": {get_param: username} "$PASSWORD": {get_param: password} "$TENANT_NAME": {get_param: tenant_name} "$CLUSTER_SUBNET": {get_param: fixed_subnet} + "$TLS_DISABLED": {get_param: tls_disabled} + "$BAY_UUID": {get_param: bay_uuid} + "$MAGNUM_URL": {get_param: magnum_url} + "$HTTP_PROXY": {get_param: http_proxy} + "$HTTPS_PROXY": {get_param: https_proxy} + "$NO_PROXY": {get_param: no_proxy} + "$KUBE_VERSION": {get_param: kube_version} + "$WAIT_CURL": {get_attr: [master_wait_handle, curl_cli]} + "$TRUSTEE_USER_ID": {get_param: trustee_user_id} + "$TRUSTEE_PASSWORD": {get_param: trustee_password} + "$TRUST_ID": {get_param: trust_id} + "$INSECURE_REGISTRY_URL": {get_param: insecure_registry_url} + "$ENABLE_CINDER": "False" - configure_kubernetes: + make_cert: type: OS::Heat::SoftwareConfig properties: group: ungrouped - config: {get_file: fragments/configure-kubernetes-master.sh} + config: {get_file: fragments/make-cert.sh} - enable_etcd: + configure_docker_storage: type: OS::Heat::SoftwareConfig properties: group: ungrouped - config: {get_file: fragments/enable-etcd.sh} + config: + str_replace: + params: + $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} + template: {get_file: fragments/configure-docker-storage.sh} - configure_flannel: + configure_etcd: type: OS::Heat::SoftwareConfig properties: group: ungrouped - config: {get_file: fragments/configure-flannel.sh} + config: {get_file: fragments/configure-etcd.sh} write_kube_os_config: type: OS::Heat::SoftwareConfig @@ -170,12 +276,123 @@ resources: group: ungrouped config: {get_file: fragments/write-kube-os-config.sh} + configure_kubernetes: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/configure-kubernetes-master.sh} + + write_network_config: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/write-network-config.sh} + + network_config_service: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/network-config-service.sh} + enable_services: type: OS::Heat::SoftwareConfig properties: group: ungrouped config: {get_file: fragments/enable-services-master.sh} + kube_examples: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/kube-examples.yaml} + + network_service: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/network-service.sh} + + enable_kube_podmaster: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/enable-kube-podmaster.sh} + + kube_system_namespace_service: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/kube-system-namespace-service.sh} + + kube_ui_service: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/kube-ui-service.sh} + + enable_kube_proxy: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/enable-kube-proxy-master.sh} + + master_wc_notify: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/wc-notify-master.sh} + + disable_selinux: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/disable-selinux.sh} + + add_proxy: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/add-proxy.sh} + + kube_master_init: + type: OS::Heat::MultipartMime + properties: + parts: + - config: {get_resource: disable_selinux} + - config: {get_resource: write_heat_params} + - config: {get_resource: configure_etcd} + - config: {get_resource: write_kube_os_config} + - config: {get_resource: make_cert} + - config: {get_resource: configure_docker_storage} + - config: {get_resource: configure_kubernetes} + - config: {get_resource: add_proxy} + - config: {get_resource: enable_services} + - config: {get_resource: write_network_config} + - config: {get_resource: network_config_service} + - config: {get_resource: network_service} + - config: {get_resource: kube_system_namespace_service} + - config: {get_resource: enable_kube_podmaster} + - config: {get_resource: enable_kube_proxy} + - config: {get_resource: kube_ui_service} + - config: {get_resource: kube_examples} + - config: {get_resource: master_wc_notify} + + ###################################################################### + # + # a single kubernetes master. + # + + kube_master: + type: OS::Nova::Server + properties: + image: {get_param: server_image} + flavor: {get_param: master_flavor} + key_name: {get_param: ssh_key_name} + user_data_format: RAW + user_data: {get_resource: kube_master_init} + networks: + - network: {get_param: fixed_network} + kube_master_floating: type: OS::Neutron::FloatingIP properties: @@ -184,16 +401,19 @@ resources: port_id: get_attr: [kube_master, addresses, {get_param: fixed_network}, 0, port] - kube_master: - type: OS::Nova::Server + api_pool_member: + type: Magnum::Optional::Neutron::PoolMember properties: - image: {get_param: server_image} - flavor: {get_param: master_flavor} - key_name: {get_param: ssh_key_name} - networks: - - network: {get_param: fixed_network} - user_data_format: RAW - user_data: {get_resource: kube_master_init} + pool_id: {get_param: api_pool_id} + address: {get_attr: [kube_master, networks, private, 0]} + protocol_port: {get_param: kubernetes_port} + + etcd_pool_member: + type: Magnum::Optional::Neutron::PoolMember + properties: + pool_id: {get_param: etcd_pool_id} + address: {get_attr: [kube_master, networks, private, 0]} + protocol_port: 2379 outputs: diff --git a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion-fedora-ironic.yaml b/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion-fedora-ironic.yaml index 2b6c5c0132..c51af8fdff 100644 --- a/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion-fedora-ironic.yaml +++ b/magnum/drivers/k8s_fedora_atomic_v1/templates/kubeminion-fedora-ironic.yaml @@ -1,9 +1,9 @@ heat_template_version: 2014-10-16 description: > - This is a nested stack that defines a single Kubernetes minion, - based on a vanilla Fedora 20 cloud image. This stack is included by - a ResourceGroup resource in the parent template (kubecluster.yaml). + This is a nested stack that defines a single Kubernetes minion, This stack is + included by an AutoScalingGroup resource in the parent template + (kubecluster.yaml). parameters: @@ -30,19 +30,164 @@ parameters: constraints: - allowed_values: ["true", "false"] + docker_storage_driver: + type: string + description: docker storage driver name + default: "devicemapper" + constraints: + - allowed_values: ["devicemapper", "overlay"] + + tls_disabled: + type: boolean + description: whether or not to enable TLS + + kubernetes_port: + type: number + description: > + The port which are used by kube-apiserver to provide Kubernetes + service. + + bay_uuid: + type: string + description: identifier for the bay this template is generating + + magnum_url: + type: string + description: endpoint to retrieve TLS certs from + kube_master_ip: type: string description: IP address of the Kubernetes master server. + etcd_server_ip: + type: string + description: IP address of the Etcd server. + fixed_network: type: string description: Network from which to allocate fixed addresses. + fixed_subnet: + type: string + description: Subnet from which to allocate fixed addresses. + + network_driver: + type: string + description: network driver to use for instantiating container networks + + flannel_network_cidr: + type: string + description: network range for flannel overlay network + wait_condition_timeout: type: number description : > timeout for the Wait Conditions + registry_enabled: + type: boolean + description: > + Indicates whether the docker registry is enabled. + + registry_port: + type: number + description: port of registry service + + swift_region: + type: string + description: region of swift service + + registry_container: + type: string + description: > + name of swift container which docker registry stores images in + + registry_insecure: + type: boolean + description: > + indicates whether to skip TLS verification between registry and backend storage + + registry_chunksize: + type: number + description: > + size fo the data segments for the swift dynamic large objects + + secgroup_kube_minion_id: + type: string + description: ID of the security group for kubernetes minion. + + volume_driver: + type: string + description: volume driver to use for container storage + + region_name: + type: string + description: A logically separate section of the cluster + + tenant_name: + type: string + description: an alternative term for a project + + username: + type: string + description: > + user account + + password: + type: string + description: > + user password, not set in current implementation, only used to + fill in for Kubernetes config file + hidden: true + + http_proxy: + type: string + description: http proxy address for docker + + https_proxy: + type: string + description: https proxy address for docker + + no_proxy: + type: string + description: no proxies for docker + + kube_version: + type: string + description: version of kubernetes used for kubernetes cluster + + trustee_domain_id: + type: string + description: domain id of the trustee + + trustee_user_id: + type: string + description: user id of the trustee + + trustee_username: + type: string + description: username of the trustee + + trustee_password: + type: string + description: password of the trustee + hidden: true + + trust_id: + type: string + description: id of the trust which is used by the trustee + hidden: true + + auth_url: + type: string + description: > + url for keystone, must be v2 since k8s backend only support v2 + at this point + + insecure_registry_url: + type: string + description: insecure registry url + resources: minion_wait_handle: @@ -71,7 +216,66 @@ resources: params: $KUBE_ALLOW_PRIV: {get_param: kube_allow_priv} $KUBE_MASTER_IP: {get_param: kube_master_ip} - $WAIT_HANDLE: {get_resource: minion_wait_handle} + $KUBE_API_PORT: {get_param: kubernetes_port} + $ETCD_SERVER_IP: {get_param: etcd_server_ip} + $DOCKER_VOLUME: 'None' + $DOCKER_STORAGE_DRIVER: {get_param: docker_storage_driver} + $NETWORK_DRIVER: {get_param: network_driver} + $REGISTRY_ENABLED: {get_param: registry_enabled} + $REGISTRY_PORT: {get_param: registry_port} + $SWIFT_REGION: {get_param: swift_region} + $REGISTRY_CONTAINER: {get_param: registry_container} + $REGISTRY_INSECURE: {get_param: registry_insecure} + $REGISTRY_CHUNKSIZE: {get_param: registry_chunksize} + $TLS_DISABLED: {get_param: tls_disabled} + $BAY_UUID: {get_param: bay_uuid} + $MAGNUM_URL: {get_param: magnum_url} + $USERNAME: {get_param: username} + $PASSWORD: {get_param: password} + $VOLUME_DRIVER: {get_param: volume_driver} + $REGION_NAME: {get_param: region_name} + $TENANT_NAME: {get_param: tenant_name} + $HTTP_PROXY: {get_param: http_proxy} + $HTTPS_PROXY: {get_param: https_proxy} + $NO_PROXY: {get_param: no_proxy} + $KUBE_VERSION: {get_param: kube_version} + $WAIT_CURL: {get_attr: [minion_wait_handle, curl_cli]} + $TRUSTEE_DOMAIN_ID: {get_param: trustee_domain_id} + $TRUSTEE_USER_ID: {get_param: trustee_user_id} + $TRUSTEE_USERNAME: {get_param: trustee_username} + $TRUSTEE_PASSWORD: {get_param: trustee_password} + $TRUST_ID: {get_param: trust_id} + $AUTH_URL: {get_param: auth_url} + $INSECURE_REGISTRY_URL: {get_param: insecure_registry_url} + $ENABLE_CINDER: "False" + + write_kubeconfig: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/write-kubeconfig.yaml} + + make_cert: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/make-cert-client.sh} + + configure_docker_storage: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: + str_replace: + params: + $configure_docker_storage_driver: {get_file: ../../common/templates/fragments/configure_docker_storage_driver_atomic.sh} + template: {get_file: fragments/configure-docker-storage.sh} + + configure_docker_registry: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: ../../common/templates/fragments/configure-docker-registry.sh} configure_kubernetes_minion: type: OS::Heat::SoftwareConfig @@ -85,11 +289,11 @@ resources: group: ungrouped config: {get_file: fragments/kube-examples.yaml} - docker_service: + network_service: type: OS::Heat::SoftwareConfig properties: group: ungrouped - config: {get_file: fragments/docker.service.yaml} + config: {get_file: fragments/network-service.sh} enable_services: type: OS::Heat::SoftwareConfig @@ -97,6 +301,18 @@ resources: group: ungrouped config: {get_file: fragments/enable-services-minion.sh} + enable_docker_registry: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/enable-docker-registry.sh} + + enable_kube_proxy: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/enable-kube-proxy-minion.sh} + minion_wc_notify: type: OS::Heat::SoftwareConfig properties: @@ -115,24 +331,41 @@ resources: group: ungrouped config: {get_file: fragments/disable-selinux.sh} + add_proxy: + type: OS::Heat::SoftwareConfig + properties: + group: ungrouped + config: {get_file: fragments/add-proxy.sh} + kube_minion_init: type: OS::Heat::MultipartMime properties: parts: - config: {get_resource: disable_selinux} - config: {get_resource: write_heat_params} + - config: {get_resource: write_kubeconfig} + - config: {get_resource: make_cert} - config: {get_resource: kube_examples} + - config: {get_resource: configure_docker_storage} + - config: {get_resource: configure_docker_registry} - config: {get_resource: configure_kubernetes_minion} - - config: {get_resource: docker_service} + - config: {get_resource: network_service} + - config: {get_resource: add_proxy} - config: {get_resource: enable_services} + - config: {get_resource: enable_kube_proxy} + - config: {get_resource: enable_docker_registry} - config: {get_resource: minion_wc_notify} + ###################################################################### + # + # a single kubernetes minion. # Important: the name for the heat resource kube-minion below must # not contain "_" (underscore) because it will be used in the # hostname. Because DNS domain name does not allow "_", the "_" # will be converted to a "-" and this will make the hostname different # from the Nova instance name. This in turn will break the load # balancer feature in Kubernetes. + # kube-minion: type: OS::Nova::Server @@ -148,7 +381,8 @@ resources: kube_minion_floating: type: OS::Neutron::FloatingIP properties: - floating_network: {get_param: external_network} + floating_network: + get_param: external_network port_id: get_attr: [kube-minion, addresses, {get_param: fixed_network}, 0, port] @@ -157,7 +391,7 @@ outputs: kube_minion_ip: value: {get_attr: [kube-minion, networks, private, 0]} description: > - This is the "private" IP address of the Kubernetes minion node. + This is the "public" IP address of the Kubernetes minion node. kube_minion_external_ip: value: {get_attr: [kube_minion_floating, floating_ip_address]} diff --git a/magnum/tests/functional/k8s/test_templates.py b/magnum/tests/functional/k8s/test_templates.py index fd4f5b017a..c500db07b0 100644 --- a/magnum/tests/functional/k8s/test_templates.py +++ b/magnum/tests/functional/k8s/test_templates.py @@ -18,7 +18,7 @@ from magnum.tests import base class TestTemplates(base.TestCase): def test_templates_list(self): entry_points = list(tdef.TemplateDefinition.load_entry_points()) - self.assertEqual(4, len(entry_points)) + self.assertEqual(5, len(entry_points)) templates = [] for entry_point, def_class in entry_points: @@ -27,5 +27,6 @@ class TestTemplates(base.TestCase): self.assertEqual(['AtomicK8sTemplateDefinition', 'AtomicSwarmTemplateDefinition', 'CoreOSK8sTemplateDefinition', + 'FedoraK8sIronicTemplateDefinition', 'UbuntuMesosTemplateDefinition'], sorted(templates)) diff --git a/magnum/tests/unit/drivers/test_template_definition.py b/magnum/tests/unit/drivers/test_template_definition.py index 0f20f08244..18e31625ce 100644 --- a/magnum/tests/unit/drivers/test_template_definition.py +++ b/magnum/tests/unit/drivers/test_template_definition.py @@ -13,6 +13,7 @@ # under the License. import mock +from neutronclient.common import exceptions as n_exception from oslo_config import cfg from magnum.common import exception @@ -64,6 +65,15 @@ class TemplateDefinitionTestCase(base.TestCase): self.assertIsInstance(definition, k8sa_tdef.AtomicK8sTemplateDefinition) + def test_get_bm_fedora_kubernetes_ironic_definition(self): + definition = cmn_tdef.TemplateDefinition.get_template_definition( + 'bm', + 'fedora', + 'kubernetes') + + self.assertIsInstance(definition, + k8sa_tdef.FedoraK8sIronicTemplateDefinition) + def test_get_vm_coreos_kubernetes_definition(self): definition = cmn_tdef.TemplateDefinition.get_template_definition( 'vm', @@ -486,6 +496,113 @@ class AtomicK8sTemplateDefinitionTestCase(base.TestCase): self._test_update_outputs_none_api_address('swarm', params) +class FedoraK8sIronicTemplateDefinitionTestCase(base.TestCase): + + def get_definition(self): + return cmn_tdef.TemplateDefinition.get_template_definition( + 'bm', + 'fedora', + 'kubernetes' + ) + + def assert_neutron_find(self, mock_neutron_v20_find, osc, baymodel): + mock_neutron_v20_find.assert_called_once_with( + osc.neutron(), + 'subnet', + baymodel.fixed_subnet + ) + + def assert_raises_from_get_fixed_network_id( + self, + mock_neutron_v20_find, + exeption_from_neutron_client, + expected_exception_class + ): + definition = self.get_definition() + osc = mock.MagicMock() + baymodel = mock.MagicMock() + mock_neutron_v20_find.side_effect = exeption_from_neutron_client + + self.assertRaises( + expected_exception_class, + definition.get_fixed_network_id, + osc, + baymodel + ) + + @mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id') + def test_get_fixed_network_id(self, mock_neutron_v20_find): + expected_network_id = 'expected_network_id' + + osc = mock.MagicMock() + baymodel = mock.MagicMock() + definition = self.get_definition() + mock_neutron_v20_find.return_value = { + 'ip_version': 4, + 'network_id': expected_network_id, + } + + self.assertEqual( + expected_network_id, + definition.get_fixed_network_id(osc, baymodel) + ) + self.assert_neutron_find(mock_neutron_v20_find, osc, baymodel) + + @mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id') + def test_get_fixed_network_id_with_invalid_ip_ver(self, + mock_neutron_v20_find): + osc = mock.MagicMock() + baymodel = mock.MagicMock() + definition = self.get_definition() + mock_neutron_v20_find.return_value = { + 'ip_version': 6, + 'network_id': 'expected_network_id', + } + + self.assertRaises( + exception.InvalidSubnet, + definition.get_fixed_network_id, + osc, + baymodel + ) + + @mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id') + def test_get_fixed_network_id_with_duplicated_name(self, + mock_neutron_v20_find): + ex = n_exception.NeutronClientNoUniqueMatch( + resource='subnet', + name='duplicated-name' + ) + + self.assert_raises_from_get_fixed_network_id( + mock_neutron_v20_find, + ex, + exception.InvalidSubnet, + ) + + @mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id') + def test_get_fixed_network_id_with_client_error(self, + mock_neutron_v20_find): + ex = n_exception.BadRequest() + + self.assert_raises_from_get_fixed_network_id( + mock_neutron_v20_find, + ex, + exception.InvalidSubnet, + ) + + @mock.patch('neutronclient.neutron.v2_0.find_resource_by_name_or_id') + def test_get_fixed_network_id_with_server_error(self, + mock_neutron_v20_find): + ex = n_exception.ServiceUnavailable() + + self.assert_raises_from_get_fixed_network_id( + mock_neutron_v20_find, + ex, + n_exception.ServiceUnavailable, + ) + + class AtomicSwarmTemplateDefinitionTestCase(base.TestCase): @mock.patch('magnum.common.clients.OpenStackClients') diff --git a/setup.cfg b/setup.cfg index 17f16c4254..603555f858 100644 --- a/setup.cfg +++ b/setup.cfg @@ -58,6 +58,7 @@ oslo.config.opts.defaults = magnum = magnum.common.config:set_cors_middleware_defaults magnum.template_definitions = + magnum_bm_fedora_k8s = magnum.drivers.k8s_fedora_atomic_v1.template_def:FedoraK8sIronicTemplateDefinition magnum_vm_atomic_k8s = magnum.drivers.k8s_fedora_atomic_v1.template_def:AtomicK8sTemplateDefinition magnum_vm_coreos_k8s = magnum.drivers.k8s_coreos_v1.template_def:CoreOSK8sTemplateDefinition magnum_vm_atomic_swarm = magnum.drivers.swarm_fedora_atomic_v1.template_def:AtomicSwarmTemplateDefinition