[WIP] cobbling together a command line - NOT READY FOR REVIEW
Change-Id: I9caefe557ac827ca7a3b8f9a1693d623cf369080
This commit is contained in:
parent
2100868fab
commit
854a0918ae
|
@ -0,0 +1,269 @@
|
||||||
|
---
|
||||||
|
# The options in this file can be overridden in 'globals.yml'
|
||||||
|
|
||||||
|
# The "temp" files that are created before merge need to stay persistent due
|
||||||
|
# to the fact that ansible will register a "change" if it has to create them
|
||||||
|
# again. Persistent files allow for idempotency
|
||||||
|
container_config_directory: "/var/lib/kolla/config_files"
|
||||||
|
|
||||||
|
# The directory to store the config files on the destination node
|
||||||
|
node_config_directory: "/etc/kolla"
|
||||||
|
|
||||||
|
|
||||||
|
###################
|
||||||
|
# Kolla options
|
||||||
|
###################
|
||||||
|
# Valid options are [ COPY_ONCE, COPY_ALWAYS ]
|
||||||
|
config_strategy: "COPY_ALWAYS"
|
||||||
|
|
||||||
|
# Valid options are [ centos, fedora, oraclelinux, ubuntu ]
|
||||||
|
kolla_base_distro: "centos"
|
||||||
|
# Valid options are [ binary, source ]
|
||||||
|
kolla_install_type: "binary"
|
||||||
|
|
||||||
|
kolla_internal_vip_address: "{{ kolla_internal_address }}"
|
||||||
|
kolla_internal_fqdn: "{{ kolla_internal_vip_address }}"
|
||||||
|
kolla_external_vip_address: "{{ kolla_internal_vip_address }}"
|
||||||
|
kolla_external_fqdn: "{{ kolla_internal_fqdn if kolla_external_vip_address == kolla_internal_vip_address else kolla_external_vip_address }}"
|
||||||
|
|
||||||
|
kolla_enable_sanity_checks: "no"
|
||||||
|
|
||||||
|
kolla_enable_sanity_keystone: "{{ kolla_enable_sanity_checks }}"
|
||||||
|
kolla_enable_sanity_glance: "{{ kolla_enable_sanity_checks }}"
|
||||||
|
kolla_enable_sanity_cinder: "{{ kolla_enable_sanity_checks }}"
|
||||||
|
kolla_enable_sanity_swift: "{{ kolla_enable_sanity_checks }}"
|
||||||
|
|
||||||
|
####################
|
||||||
|
# Database options
|
||||||
|
####################
|
||||||
|
database_address: "{{ kolla_internal_fqdn }}"
|
||||||
|
database_user: "root"
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# Docker options
|
||||||
|
####################
|
||||||
|
docker_registry_email:
|
||||||
|
docker_registry:
|
||||||
|
docker_namespace: "kollaglue"
|
||||||
|
docker_registry_username:
|
||||||
|
|
||||||
|
# Valid options are [ never, on-failure, always ]
|
||||||
|
docker_restart_policy: "always"
|
||||||
|
|
||||||
|
# '0' means unlimited retries
|
||||||
|
docker_restart_policy_retry: "10"
|
||||||
|
|
||||||
|
# Common options used throughout docker
|
||||||
|
docker_common_options:
|
||||||
|
auth_email: "{{ docker_registry_email }}"
|
||||||
|
auth_password: "{{ docker_registry_password }}"
|
||||||
|
auth_registry: "{{ docker_registry }}"
|
||||||
|
auth_username: "{{ docker_registry_username }}"
|
||||||
|
environment:
|
||||||
|
KOLLA_CONFIG_STRATEGY: "{{ config_strategy }}"
|
||||||
|
restart_policy: "{{ docker_restart_policy }}"
|
||||||
|
restart_retries: "{{ docker_restart_policy_retry }}"
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# keepalived options
|
||||||
|
####################
|
||||||
|
# Arbitary unique number from 0..255
|
||||||
|
keepalived_virtual_router_id: "51"
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# Networking options
|
||||||
|
####################
|
||||||
|
kolla_external_vip_interface: "{{ network_interface }}"
|
||||||
|
api_interface: "{{ network_interface }}"
|
||||||
|
storage_interface: "{{ network_interface }}"
|
||||||
|
tunnel_interface: "{{ network_interface }}"
|
||||||
|
|
||||||
|
# Valid options are [ openvswitch, linuxbridge ]
|
||||||
|
neutron_plugin_agent: "openvswitch"
|
||||||
|
|
||||||
|
# The default ports used by each service.
|
||||||
|
iscsi_port: "3260"
|
||||||
|
|
||||||
|
mariadb_port: "3306"
|
||||||
|
mariadb_wsrep_port: "4567"
|
||||||
|
mariadb_ist_port: "4568"
|
||||||
|
mariadb_sst_port: "4444"
|
||||||
|
|
||||||
|
rabbitmq_port: "5672"
|
||||||
|
rabbitmq_management_port: "15672"
|
||||||
|
rabbitmq_cluster_port: "25672"
|
||||||
|
rabbitmq_epmd_port: "4369"
|
||||||
|
|
||||||
|
mongodb_port: "27017"
|
||||||
|
mongodb_web_port: "28017"
|
||||||
|
|
||||||
|
haproxy_stats_port: "1984"
|
||||||
|
|
||||||
|
keystone_public_port: "5000"
|
||||||
|
keystone_admin_port: "35357"
|
||||||
|
|
||||||
|
glance_api_port: "9292"
|
||||||
|
glance_registry_port: "9191"
|
||||||
|
|
||||||
|
nova_api_port: "8774"
|
||||||
|
nova_api_ec2_port: "8773"
|
||||||
|
nova_metadata_port: "8775"
|
||||||
|
nova_novncproxy_port: "6080"
|
||||||
|
nova_spicehtml5proxy_port: "6082"
|
||||||
|
|
||||||
|
neutron_server_port: "9696"
|
||||||
|
|
||||||
|
cinder_api_port: "8776"
|
||||||
|
|
||||||
|
memcached_port: "11211"
|
||||||
|
|
||||||
|
swift_proxy_server_port: "8080"
|
||||||
|
swift_object_server_port: "6000"
|
||||||
|
swift_account_server_port: "6001"
|
||||||
|
swift_container_server_port: "6002"
|
||||||
|
swift_rsync_port: "10873"
|
||||||
|
|
||||||
|
heat_api_port: "8004"
|
||||||
|
heat_api_cfn_port: "8000"
|
||||||
|
|
||||||
|
murano_api_port: "8082"
|
||||||
|
|
||||||
|
ironic_api_port: "6385"
|
||||||
|
|
||||||
|
magnum_api_port: "9511"
|
||||||
|
|
||||||
|
rgw_port: "6780"
|
||||||
|
|
||||||
|
mistral_api_port: "8989"
|
||||||
|
|
||||||
|
kibana_server_port: "5601"
|
||||||
|
|
||||||
|
elasticsearch_port: "9200"
|
||||||
|
|
||||||
|
manila_api_port: "8786"
|
||||||
|
|
||||||
|
public_protocol: "{{ 'https' if kolla_enable_tls_external | bool else 'http' }}"
|
||||||
|
internal_protocol: "http"
|
||||||
|
admin_protocol: "http"
|
||||||
|
|
||||||
|
####################
|
||||||
|
# OpenStack options
|
||||||
|
####################
|
||||||
|
openstack_release: "2.0.0"
|
||||||
|
openstack_logging_debug: "False"
|
||||||
|
|
||||||
|
openstack_region_name: "RegionOne"
|
||||||
|
|
||||||
|
# Optionally allow Kolla to set sysctl values
|
||||||
|
set_sysctl: "yes"
|
||||||
|
|
||||||
|
# Valid options are [ novnc, spice ]
|
||||||
|
nova_console: "novnc"
|
||||||
|
|
||||||
|
# OpenStack authentication string. You should only need to override these if you
|
||||||
|
# are changing the admin tenant/project or user.
|
||||||
|
openstack_auth:
|
||||||
|
auth_url: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}"
|
||||||
|
username: "admin"
|
||||||
|
password: "{{ keystone_admin_password }}"
|
||||||
|
project_name: "admin"
|
||||||
|
|
||||||
|
# These roles are required for Kolla to be operation, however a savvy deployer
|
||||||
|
# could disable some of these required roles and run their own services.
|
||||||
|
enable_glance: "yes"
|
||||||
|
enable_haproxy: "yes"
|
||||||
|
enable_keystone: "yes"
|
||||||
|
enable_mariadb: "yes"
|
||||||
|
enable_memcached: "yes"
|
||||||
|
enable_neutron: "yes"
|
||||||
|
enable_nova: "yes"
|
||||||
|
enable_rabbitmq: "yes"
|
||||||
|
|
||||||
|
# Additional optional OpenStack services are specified here
|
||||||
|
enable_central_logging: "no"
|
||||||
|
enable_ceph: "no"
|
||||||
|
enable_ceph_rgw: "no"
|
||||||
|
enable_cinder: "no"
|
||||||
|
enable_heat: "yes"
|
||||||
|
enable_horizon: "yes"
|
||||||
|
enable_ironic: "no"
|
||||||
|
enable_iscsi: "no"
|
||||||
|
enable_magnum: "no"
|
||||||
|
enable_manila: "no"
|
||||||
|
enable_mistral: "no"
|
||||||
|
enable_mongodb: "no"
|
||||||
|
enable_murano: "no"
|
||||||
|
enable_swift: "no"
|
||||||
|
|
||||||
|
ironic_keystone_user: "ironic"
|
||||||
|
neutron_keystone_user: "neutron"
|
||||||
|
nova_keystone_user: "nova"
|
||||||
|
|
||||||
|
# Nova fake driver and the number of fake driver per compute node
|
||||||
|
enable_nova_fake: "no"
|
||||||
|
num_nova_fake_per_node: 5
|
||||||
|
|
||||||
|
####################
|
||||||
|
# RabbitMQ options
|
||||||
|
####################
|
||||||
|
rabbitmq_user: "openstack"
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# HAProxy options
|
||||||
|
####################
|
||||||
|
haproxy_user: "openstack"
|
||||||
|
haproxy_enable_external_vip: "{{ 'no' if kolla_external_vip_address == kolla_internal_vip_address else 'yes' }}"
|
||||||
|
kolla_enable_tls_external: "no"
|
||||||
|
kolla_external_fqdn_cert: "{{ node_config_directory }}/certificates/haproxy.pem"
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# Kibana options
|
||||||
|
####################
|
||||||
|
kibana_user: "kibana"
|
||||||
|
|
||||||
|
|
||||||
|
#################################
|
||||||
|
# Cinder - Block Storage options
|
||||||
|
#################################
|
||||||
|
cinder_volume_driver: "{{ 'ceph' if enable_ceph | bool else 'lvm' }}"
|
||||||
|
|
||||||
|
|
||||||
|
###################
|
||||||
|
# Ceph options
|
||||||
|
###################
|
||||||
|
# Ceph can be setup with a caching to improve performance. To use the cache you
|
||||||
|
# must provide separate disks than those for the OSDs
|
||||||
|
ceph_enable_cache: "no"
|
||||||
|
# Valid options are [ forward, none, writeback ]
|
||||||
|
ceph_cache_mode: "writeback"
|
||||||
|
|
||||||
|
# Valid options are [ ext4, btrfs, xfs ]
|
||||||
|
ceph_osd_filesystem: "xfs"
|
||||||
|
|
||||||
|
# These are /etc/fstab options. Comma seperated, no spaces (see fstab(8))
|
||||||
|
ceph_osd_mount_options: "defaults,noatime"
|
||||||
|
|
||||||
|
# A requirement for using the erasure-coded pools is you must setup a cache tier
|
||||||
|
# Valid options are [ erasure, replicated ]
|
||||||
|
ceph_pool_type: "replicated"
|
||||||
|
|
||||||
|
ceph_cinder_pool_name: "volumes"
|
||||||
|
ceph_cinder_backup_pool_name: "backups"
|
||||||
|
ceph_glance_pool_name: "images"
|
||||||
|
ceph_nova_pool_name: "vms"
|
||||||
|
|
||||||
|
ceph_erasure_profile: "k=4 m=2 ruleset-failure-domain=host"
|
||||||
|
ceph_rule: "default host {{ 'indep' if ceph_pool_type == 'erasure' else 'firstn' }}"
|
||||||
|
ceph_cache_rule: "cache host firstn"
|
||||||
|
|
||||||
|
|
||||||
|
#######################################
|
||||||
|
# Manila - Shared File Systems Options
|
||||||
|
#######################################
|
||||||
|
manila_enable_dhss: "yes"
|
||||||
|
manila_dhss: "{{ 'True' if manila_enable_dhss | bool else 'False' }}"
|
|
@ -0,0 +1,27 @@
|
||||||
|
---
|
||||||
|
project_name: "keystone"
|
||||||
|
|
||||||
|
####################
|
||||||
|
# Database
|
||||||
|
####################
|
||||||
|
keystone_database_name: "keystone"
|
||||||
|
keystone_database_user: "keystone"
|
||||||
|
keystone_database_address: "{{ kolla_internal_fqdn }}"
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# Docker
|
||||||
|
####################
|
||||||
|
keystone_image: "{{ docker_registry ~ '/' if docker_registry else '' }}{{ docker_namespace }}/{{ kolla_base_distro }}-{{ kolla_install_type }}-keystone"
|
||||||
|
keystone_tag: "{{ openstack_release }}"
|
||||||
|
keystone_image_full: "{{ keystone_image }}:{{ keystone_tag }}"
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# OpenStack
|
||||||
|
####################
|
||||||
|
keystone_admin_url: "{{ admin_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_admin_port }}/v3"
|
||||||
|
keystone_internal_url: "{{ internal_protocol }}://{{ kolla_internal_fqdn }}:{{ keystone_public_port }}/v3"
|
||||||
|
keystone_public_url: "{{ public_protocol }}://{{ kolla_external_fqdn }}:{{ keystone_public_port }}/v3"
|
||||||
|
|
||||||
|
keystone_logging_debug: "{{ openstack_logging_debug }}"
|
|
@ -0,0 +1,159 @@
|
||||||
|
---
|
||||||
|
# You can use this file to override _any_ variable throughout Kolla.
|
||||||
|
# Additional options can be found in the 'kolla/ansible/group_vars/all.yml' file.
|
||||||
|
# Default value of all the commented parameters are shown here, To override
|
||||||
|
# the default value uncomment the parameter and change its value.
|
||||||
|
|
||||||
|
###################
|
||||||
|
# Kolla options
|
||||||
|
###################
|
||||||
|
# Valid options are [ COPY_ONCE, COPY_ALWAYS ]
|
||||||
|
#config_strategy: "COPY_ALWAYS"
|
||||||
|
|
||||||
|
# Valid options are [ centos, fedora, oraclelinux, ubuntu ]
|
||||||
|
#kolla_base_distro: "centos"
|
||||||
|
|
||||||
|
# Valid options are [ binary, source ]
|
||||||
|
#kolla_install_type: "binary"
|
||||||
|
|
||||||
|
# Valid option is Docker repository tag
|
||||||
|
#openstack_release: "3.0.0"
|
||||||
|
|
||||||
|
# This should be a VIP, an unused IP on your network that will float between
|
||||||
|
# the hosts running keepalived for high-availability. When running an All-In-One
|
||||||
|
# without haproxy and keepalived, this should be the first IP on your
|
||||||
|
# 'network_interface' as set in the Networking section below.
|
||||||
|
kolla_internal_vip_address: "10.10.10.254"
|
||||||
|
|
||||||
|
# This is the DNS name that maps to the kolla_internal_vip_address VIP. By
|
||||||
|
# default it is the same as kolla_internal_vip_address.
|
||||||
|
#kolla_internal_fqdn: "{{ kolla_internal_vip_address }}"
|
||||||
|
|
||||||
|
# This should be a VIP, an unused IP on your network that will float between
|
||||||
|
# the hosts running keepalived for high-availability. It defaults to the
|
||||||
|
# kolla_internal_vip_address, allowing internal and external communication to
|
||||||
|
# share the same address. Specify a kolla_external_vip_address to separate
|
||||||
|
# internal and external requests between two VIPs.
|
||||||
|
#kolla_external_vip_address: "{{ kolla_internal_vip_address }}"
|
||||||
|
|
||||||
|
# The Public address used to communicate with OpenStack as set in the public_url
|
||||||
|
# for the endpoints that will be created. This DNS name should map to
|
||||||
|
# kolla_external_vip_address.
|
||||||
|
#kolla_external_fqdn: "{{ kolla_external_vip_address }}"
|
||||||
|
|
||||||
|
####################
|
||||||
|
# Docker options
|
||||||
|
####################
|
||||||
|
### Example: Private repository with authentication
|
||||||
|
|
||||||
|
#docker_registry: "172.16.0.10:4000"
|
||||||
|
#docker_namespace: "companyname"
|
||||||
|
#docker_registry_username: "sam"
|
||||||
|
#docker_registry_password: "correcthorsebatterystaple"
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# Networking options
|
||||||
|
####################
|
||||||
|
# This interface is what all your api services will be bound to by default.
|
||||||
|
# Additionally, all vxlan/tunnel and storage network traffic will go over this
|
||||||
|
# interface by default. This interface must contain an IPv4 address.
|
||||||
|
network_interface: "eth0"
|
||||||
|
|
||||||
|
# These can be adjusted for even more customization. The default is the same as
|
||||||
|
# the 'network_interface'. These interfaces must container an IPv4 address.
|
||||||
|
#kolla_external_vip_interface: "{{ network_interface }}"
|
||||||
|
#api_interface: "{{ network_interface }}"
|
||||||
|
#storage_interface: "{{ network_interface }}"
|
||||||
|
#tunnel_interface: "{{ network_interface }}"
|
||||||
|
|
||||||
|
# This is the raw interface given to neutron as its external network port. Even
|
||||||
|
# though an IP address can exist on this interface, it will be unusable in most
|
||||||
|
# configurations. It is recommended this interface not be configured with any IP
|
||||||
|
# addresses for that reason.
|
||||||
|
neutron_external_interface: "eth1"
|
||||||
|
|
||||||
|
# Valid options are [ openvswitch, linuxbridge ]
|
||||||
|
#neutron_plugin_agent: "openvswitch"
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# keepalived options
|
||||||
|
####################
|
||||||
|
# Arbitary unique number from 0..255
|
||||||
|
#keepalived_virtual_router_id: "51"
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# TLS options
|
||||||
|
####################
|
||||||
|
# To provide encryption and authentication on the kolla_external_vip_interface,
|
||||||
|
# TLS can be enabled. When TLS is enabled, certificates must be provided to
|
||||||
|
# allow clients to perform authentication.
|
||||||
|
#kolla_enable_tls_external: "no"
|
||||||
|
#kolla_external_fqdn_cert: "{{ node_config_directory }}/certificates/haproxy.pem"
|
||||||
|
|
||||||
|
|
||||||
|
####################
|
||||||
|
# OpenStack options
|
||||||
|
####################
|
||||||
|
# Use these options to set the various log levels across all OpenStack projects
|
||||||
|
# Valid options are [ True, False ]
|
||||||
|
#openstack_logging_debug: "False"
|
||||||
|
|
||||||
|
# Valid options are [ novnc, spice ]
|
||||||
|
#nova_console: "novnc"
|
||||||
|
|
||||||
|
# OpenStack services can be enabled or disabled with these options
|
||||||
|
#enable_central_logging: "no"
|
||||||
|
#enable_ceph: "no"
|
||||||
|
#enable_ceph_rgw: "no"
|
||||||
|
#enable_cinder: "no"
|
||||||
|
#enable_heat: "yes"
|
||||||
|
#enable_horizon: "yes"
|
||||||
|
#enable_ironic: "no"
|
||||||
|
#enable_magnum: "no"
|
||||||
|
#enable_manila: "no"
|
||||||
|
#enable_mistral: "no"
|
||||||
|
#enable_mongodb: "no"
|
||||||
|
#enable_murano: "no"
|
||||||
|
#enable_swift: "no"
|
||||||
|
|
||||||
|
|
||||||
|
###################
|
||||||
|
# Ceph options
|
||||||
|
###################
|
||||||
|
# Ceph can be setup with a caching to improve performance. To use the cache you
|
||||||
|
# must provide separate disks than those for the OSDs
|
||||||
|
#ceph_enable_cache: "no"
|
||||||
|
# Valid options are [ forward, none, writeback ]
|
||||||
|
#ceph_cache_mode: "writeback"
|
||||||
|
|
||||||
|
# A requirement for using the erasure-coded pools is you must setup a cache tier
|
||||||
|
# Valid options are [ erasure, replicated ]
|
||||||
|
#ceph_pool_type: "replicated"
|
||||||
|
|
||||||
|
|
||||||
|
#######################################
|
||||||
|
# Manila - Shared File Systems Options
|
||||||
|
#######################################
|
||||||
|
#manila_enable_dhss: "yes"
|
||||||
|
|
||||||
|
|
||||||
|
##################################
|
||||||
|
# Swift - Object Storage Options
|
||||||
|
##################################
|
||||||
|
# Swift expects block devices to be available for storage. Two types of storage
|
||||||
|
# are supported: 1 - storage device with a special partition name and filesystem
|
||||||
|
# label, 2 - unpartitioned disk with a filesystem. The label of this filesystem
|
||||||
|
# is used to detect the disk which Swift will be using.
|
||||||
|
|
||||||
|
# Swift support two mathcing modes, valid options are [ prefix, strict ]
|
||||||
|
#swift_devices_match_mode: "strict"
|
||||||
|
|
||||||
|
# This parameter defines matching pattern: if "strict" mode was selected,
|
||||||
|
# for swift_devices_match_mode then swift_device_name should specify the name of
|
||||||
|
# the special swift partition for example: "KOLLA_SWIFT_DATA", if "prefix" mode was
|
||||||
|
# selected then swift_devices_name should specify a pattern which would match to
|
||||||
|
# filesystems' labels prepared for swift.
|
||||||
|
#swift_devices_name: "KOLLA_SWIFT_DATA"
|
|
@ -0,0 +1,87 @@
|
||||||
|
---
|
||||||
|
###################
|
||||||
|
# Ceph options
|
||||||
|
####################
|
||||||
|
# These options must be UUID4 values in string format
|
||||||
|
# XXXXXXXX-XXXX-4XXX-XXXX-XXXXXXXXXXXX
|
||||||
|
ceph_cluster_fsid:
|
||||||
|
rbd_secret_uuid:
|
||||||
|
|
||||||
|
###################
|
||||||
|
# Database options
|
||||||
|
####################
|
||||||
|
database_password:
|
||||||
|
|
||||||
|
####################
|
||||||
|
# Docker options
|
||||||
|
####################
|
||||||
|
# This should only be set if you require a password for your Docker registry
|
||||||
|
docker_registry_password:
|
||||||
|
|
||||||
|
####################
|
||||||
|
# OpenStack options
|
||||||
|
####################
|
||||||
|
keystone_admin_password:
|
||||||
|
keystone_database_password:
|
||||||
|
|
||||||
|
glance_database_password:
|
||||||
|
glance_keystone_password:
|
||||||
|
|
||||||
|
nova_database_password:
|
||||||
|
nova_api_database_password:
|
||||||
|
nova_keystone_password:
|
||||||
|
|
||||||
|
neutron_database_password:
|
||||||
|
neutron_keystone_password:
|
||||||
|
metadata_secret:
|
||||||
|
|
||||||
|
cinder_database_password:
|
||||||
|
cinder_keystone_password:
|
||||||
|
|
||||||
|
swift_keystone_password:
|
||||||
|
swift_hash_path_suffix:
|
||||||
|
swift_hash_path_prefix:
|
||||||
|
|
||||||
|
heat_database_password:
|
||||||
|
heat_keystone_password:
|
||||||
|
heat_domain_admin_password:
|
||||||
|
|
||||||
|
murano_database_password:
|
||||||
|
murano_keystone_password:
|
||||||
|
|
||||||
|
ironic_database_password:
|
||||||
|
ironic_keystone_password:
|
||||||
|
|
||||||
|
magnum_database_password:
|
||||||
|
magnum_keystone_password:
|
||||||
|
|
||||||
|
mistral_database_password:
|
||||||
|
mistral_keystone_password:
|
||||||
|
|
||||||
|
horizon_secret_key:
|
||||||
|
|
||||||
|
manila_database_password:
|
||||||
|
manila_keystone_password:
|
||||||
|
|
||||||
|
memcache_secret_key:
|
||||||
|
|
||||||
|
nova_ssh_key:
|
||||||
|
private_key:
|
||||||
|
public_key:
|
||||||
|
|
||||||
|
####################
|
||||||
|
# RabbitMQ options
|
||||||
|
####################
|
||||||
|
rabbitmq_password:
|
||||||
|
rabbitmq_cluster_cookie:
|
||||||
|
|
||||||
|
####################
|
||||||
|
# HAProxy options
|
||||||
|
####################
|
||||||
|
haproxy_password:
|
||||||
|
keepalived_password:
|
||||||
|
|
||||||
|
####################
|
||||||
|
# Kibana options
|
||||||
|
####################
|
||||||
|
kibana_password:
|
|
@ -0,0 +1,47 @@
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from cliff import command
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
|
||||||
|
from kolla_kubernetes import service
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
LOG = log.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
class Run(command.Command):
|
||||||
|
"""Run a service."""
|
||||||
|
|
||||||
|
def get_parser(self, prog_name):
|
||||||
|
parser = super(Run, self).get_parser(prog_name)
|
||||||
|
parser.add_argument('service')
|
||||||
|
return parser
|
||||||
|
|
||||||
|
def take_action(self, parsed_args):
|
||||||
|
service.run_service(parsed_args.service,
|
||||||
|
CONF.service_dir)
|
||||||
|
|
||||||
|
|
||||||
|
class Kill(command.Command):
|
||||||
|
"""Kill a service."""
|
||||||
|
|
||||||
|
def get_parser(self, prog_name):
|
||||||
|
parser = super(Kill, self).get_parser(prog_name)
|
||||||
|
parser.add_argument('service')
|
||||||
|
return parser
|
||||||
|
|
||||||
|
def take_action(self, parsed_args):
|
||||||
|
service.kill_service(parsed_args.service,
|
||||||
|
CONF.service_dir)
|
|
@ -0,0 +1,160 @@
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
import os.path
|
||||||
|
import shlex
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from cliff import app
|
||||||
|
from cliff import commandmanager
|
||||||
|
from cliff import interactive
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
from kolla_kubernetes.common import file_utils
|
||||||
|
from kolla_kubernetes.common import utils
|
||||||
|
|
||||||
|
PROJECT = 'kolla_kubernetes'
|
||||||
|
VERSION = '1.0'
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.import_group('kolla', 'kolla_kubernetes.config')
|
||||||
|
CONF.import_group('kolla_kubernetes', 'kolla_kubernetes.config')
|
||||||
|
|
||||||
|
log.register_options(CONF)
|
||||||
|
log.set_defaults(
|
||||||
|
default_log_levels='requests.packages.urllib3.connectionpool=WARNING')
|
||||||
|
|
||||||
|
cli_opts = [
|
||||||
|
cfg.StrOpt('service-dir',
|
||||||
|
default=utils.env(
|
||||||
|
'KM_SERVICE_DIR', default=os.path.join(
|
||||||
|
file_utils.find_base_dir(), 'services')),
|
||||||
|
help='Directory with services, (Env: KM_SERVICE_DIR)'),
|
||||||
|
]
|
||||||
|
CONF.register_cli_opts(cli_opts)
|
||||||
|
|
||||||
|
|
||||||
|
class KollaKubernetesInteractiveApp(interactive.InteractiveApp):
|
||||||
|
def do_run(self, arg):
|
||||||
|
self.default(arg)
|
||||||
|
|
||||||
|
def do_help(self, arg):
|
||||||
|
line_parts = shlex.split(arg)
|
||||||
|
try:
|
||||||
|
self.command_manager.find_command(line_parts)
|
||||||
|
return self.default(self.parsed('help ' + arg))
|
||||||
|
except ValueError:
|
||||||
|
# There is a builtin cmd2 command
|
||||||
|
pass
|
||||||
|
return interactive.InteractiveApp.do_help(self, arg)
|
||||||
|
|
||||||
|
|
||||||
|
class KollaKubernetesShell(app.App):
|
||||||
|
def __init__(self):
|
||||||
|
super(KollaKubernetesShell, self).__init__(
|
||||||
|
description='Kolla-kubernetes command-line interface',
|
||||||
|
version=VERSION,
|
||||||
|
command_manager=commandmanager.CommandManager(
|
||||||
|
'kolla_kubernetes.cli'),
|
||||||
|
deferred_help=True,
|
||||||
|
interactive_app_factory=KollaKubernetesInteractiveApp
|
||||||
|
)
|
||||||
|
|
||||||
|
def configure_logging(self):
|
||||||
|
return
|
||||||
|
|
||||||
|
def initialize_app(self, argv):
|
||||||
|
self.options.service_dir = CONF.service_dir
|
||||||
|
|
||||||
|
def print_help(self):
|
||||||
|
outputs = []
|
||||||
|
max_len = 0
|
||||||
|
self.stdout.write('\nCommands :\n')
|
||||||
|
|
||||||
|
for name, ep in sorted(self.command_manager):
|
||||||
|
factory = ep.load()
|
||||||
|
cmd = factory(self, None)
|
||||||
|
one_liner = cmd.get_description().split('\n')[0]
|
||||||
|
outputs.append((name, one_liner))
|
||||||
|
max_len = max(len(name), max_len)
|
||||||
|
|
||||||
|
for name, one_liner in outputs:
|
||||||
|
self.stdout.write(' %s %s\n' % (name.ljust(max_len), one_liner))
|
||||||
|
|
||||||
|
|
||||||
|
def _separate_args(argv):
|
||||||
|
conf_opts = _config_opts_map()
|
||||||
|
config_args = []
|
||||||
|
command_args = argv[:]
|
||||||
|
while command_args:
|
||||||
|
nargs = conf_opts.get(command_args[0])
|
||||||
|
if nargs:
|
||||||
|
config_args.extend(command_args[:nargs])
|
||||||
|
command_args = command_args[nargs:]
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
return config_args, command_args
|
||||||
|
|
||||||
|
|
||||||
|
def _config_opts_map():
|
||||||
|
opts = {'--help': 1, '-h': 1, '--config-dir': 2, '--config-file': 2,
|
||||||
|
'--version': 1}
|
||||||
|
for opt in CONF._all_cli_opts():
|
||||||
|
if opt[1]:
|
||||||
|
arg = '%s-%s' % (opt[1].name, opt[0].name)
|
||||||
|
else:
|
||||||
|
arg = opt[0].name
|
||||||
|
|
||||||
|
if isinstance(opt[0], cfg.BoolOpt):
|
||||||
|
nargs = 1
|
||||||
|
opts['--no%s' % arg] = 1
|
||||||
|
else:
|
||||||
|
nargs = 2
|
||||||
|
opts['--%s' % arg] = nargs
|
||||||
|
|
||||||
|
if opt[0].short:
|
||||||
|
opts['-%s' % opt[0].short] = nargs
|
||||||
|
|
||||||
|
for dep_opt in opt[0].deprecated_opts:
|
||||||
|
if getattr(dep_opt, 'group'):
|
||||||
|
opts['--%s-%s' % (dep_opt.group, dep_opt.name)] = nargs
|
||||||
|
else:
|
||||||
|
opts['--%s' % dep_opt.name] = nargs
|
||||||
|
|
||||||
|
return opts
|
||||||
|
|
||||||
|
|
||||||
|
def main(argv=sys.argv[1:]):
|
||||||
|
config_args, command_args = _separate_args(argv)
|
||||||
|
|
||||||
|
need_help = (['help'] == command_args or '-h' in config_args or
|
||||||
|
'--help' in config_args)
|
||||||
|
if need_help:
|
||||||
|
CONF([], project=PROJECT, version=VERSION)
|
||||||
|
CONF.print_help()
|
||||||
|
return KollaKubernetesShell().print_help()
|
||||||
|
|
||||||
|
CONF(config_args, project=PROJECT, version=VERSION)
|
||||||
|
log.setup(CONF, PROJECT, VERSION)
|
||||||
|
|
||||||
|
if '-d' in config_args or '--debug' in config_args:
|
||||||
|
command_args.insert(0, '--debug')
|
||||||
|
CONF.log_opt_values(
|
||||||
|
log.getLogger(PROJECT), log.INFO)
|
||||||
|
|
||||||
|
return KollaKubernetesShell().run(command_args)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
sys.exit(main(sys.argv[1:]))
|
|
@ -0,0 +1,94 @@
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import errno
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
import platform
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from oslo_utils import importutils
|
||||||
|
|
||||||
|
from kolla_kubernetes import exception
|
||||||
|
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def find_os_type():
|
||||||
|
return platform.linux_distribution()[0]
|
||||||
|
|
||||||
|
|
||||||
|
def mkdir_p(path):
|
||||||
|
try:
|
||||||
|
os.makedirs(path)
|
||||||
|
except OSError as exc: # Python >2.5
|
||||||
|
if exc.errno == errno.EEXIST and os.path.isdir(path):
|
||||||
|
pass
|
||||||
|
else:
|
||||||
|
raise
|
||||||
|
|
||||||
|
|
||||||
|
def get_src_dir():
|
||||||
|
kolla_kubernetes = importutils.import_module('kolla_kubernetes')
|
||||||
|
mod_path = os.path.abspath(kolla_kubernetes.__file__)
|
||||||
|
# remove the file and module to get to the base.
|
||||||
|
return os.path.dirname(os.path.dirname(mod_path))
|
||||||
|
|
||||||
|
|
||||||
|
def find_base_dir():
|
||||||
|
script_path = os.path.dirname(os.path.realpath(sys.argv[0]))
|
||||||
|
base_script_path = os.path.basename(script_path)
|
||||||
|
if base_script_path == 'kolla-kubernetes':
|
||||||
|
return script_path
|
||||||
|
if base_script_path == 'kolla_kubernetes':
|
||||||
|
return os.path.join(script_path, '..')
|
||||||
|
if base_script_path == 'cmd':
|
||||||
|
return os.path.join(script_path, '..', '..')
|
||||||
|
if base_script_path == 'subunit':
|
||||||
|
return get_src_dir()
|
||||||
|
if base_script_path == 'bin':
|
||||||
|
if find_os_type() in ['Ubuntu', 'debian']:
|
||||||
|
base_dir = '/usr/local/share/kolla-kubernetes'
|
||||||
|
else:
|
||||||
|
base_dir = '/usr/share/kolla-kubernetes'
|
||||||
|
|
||||||
|
if os.path.exists(base_dir):
|
||||||
|
return base_dir
|
||||||
|
else:
|
||||||
|
return get_src_dir()
|
||||||
|
raise exception.KollaDirNotFoundException(
|
||||||
|
'Unable to detect kolla-kubernetes directory'
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def find_config_file(filename):
|
||||||
|
filepath = os.path.join('/etc/kolla-kubernetes', filename)
|
||||||
|
if os.access(filepath, os.R_OK):
|
||||||
|
config_file = filepath
|
||||||
|
else:
|
||||||
|
config_file = os.path.join(find_base_dir(),
|
||||||
|
'etc', filename)
|
||||||
|
return config_file
|
||||||
|
|
||||||
|
|
||||||
|
POSSIBLE_PATHS = {'/usr/share/kolla-kubernetes',
|
||||||
|
get_src_dir(),
|
||||||
|
find_base_dir()}
|
||||||
|
|
||||||
|
|
||||||
|
def find_file(filename):
|
||||||
|
for path in POSSIBLE_PATHS:
|
||||||
|
file_path = os.path.join(path, filename)
|
||||||
|
if os.path.exists(file_path):
|
||||||
|
return file_path
|
||||||
|
raise exception.KollaNotFoundException(filename, entity='file')
|
|
@ -0,0 +1,102 @@
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import collections
|
||||||
|
import logging
|
||||||
|
import os
|
||||||
|
|
||||||
|
import jinja2
|
||||||
|
from jinja2 import meta
|
||||||
|
import six
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from kolla_kubernetes.common import type_utils
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# Customize PyYAML library to return the OrderedDict. That is needed, because
|
||||||
|
# when iterating on dict, we reuse its previous values when processing the
|
||||||
|
# next values and the order has to be preserved.
|
||||||
|
|
||||||
|
def ordered_dict_constructor(loader, node):
|
||||||
|
"""OrderedDict constructor for PyYAML."""
|
||||||
|
return collections.OrderedDict(loader.construct_pairs(node))
|
||||||
|
|
||||||
|
|
||||||
|
def ordered_dict_representer(dumper, data):
|
||||||
|
"""Representer for PyYAML which is able to work with OrderedDict."""
|
||||||
|
return dumper.represent_dict(data.items())
|
||||||
|
|
||||||
|
|
||||||
|
yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
|
||||||
|
ordered_dict_constructor)
|
||||||
|
yaml.add_representer(collections.OrderedDict, ordered_dict_representer)
|
||||||
|
|
||||||
|
|
||||||
|
def jinja_render(fullpath, global_config, extra=None):
|
||||||
|
variables = global_config
|
||||||
|
if extra:
|
||||||
|
variables.update(extra)
|
||||||
|
|
||||||
|
myenv = jinja2.Environment(
|
||||||
|
loader=jinja2.FileSystemLoader(
|
||||||
|
os.path.dirname(fullpath)))
|
||||||
|
myenv.filters['bool'] = type_utils.str_to_bool
|
||||||
|
return myenv.get_template(os.path.basename(fullpath)).render(variables)
|
||||||
|
|
||||||
|
|
||||||
|
def jinja_render_str(content, global_config, name='dafault_name', extra=None):
|
||||||
|
variables = global_config
|
||||||
|
if extra:
|
||||||
|
variables.update(extra)
|
||||||
|
|
||||||
|
myenv = jinja2.Environment(loader=jinja2.DictLoader({name: content}))
|
||||||
|
myenv.filters['bool'] = type_utils.str_to_bool
|
||||||
|
return myenv.get_template(name).render(variables)
|
||||||
|
|
||||||
|
|
||||||
|
def jinja_find_required_variables(fullpath):
|
||||||
|
myenv = jinja2.Environment(loader=jinja2.FileSystemLoader(
|
||||||
|
os.path.dirname(fullpath)))
|
||||||
|
myenv.filters['bool'] = type_utils.str_to_bool
|
||||||
|
template_source = myenv.loader.get_source(myenv,
|
||||||
|
os.path.basename(fullpath))[0]
|
||||||
|
parsed_content = myenv.parse(template_source)
|
||||||
|
return meta.find_undeclared_variables(parsed_content)
|
||||||
|
|
||||||
|
|
||||||
|
def dict_jinja_render(raw_dict, jvars):
|
||||||
|
"""Renders dict with jinja2 using provided variables and itself.
|
||||||
|
|
||||||
|
By using itself, we mean reusing the previous values from dict for the
|
||||||
|
potential render of the next value in dict.
|
||||||
|
"""
|
||||||
|
for key, value in raw_dict.items():
|
||||||
|
if isinstance(value, six.string_types):
|
||||||
|
value = jinja_render_str(value, jvars)
|
||||||
|
elif isinstance(value, dict):
|
||||||
|
value = dict_jinja_render(value, jvars)
|
||||||
|
jvars[key] = value
|
||||||
|
|
||||||
|
|
||||||
|
def yaml_jinja_render(filename, jvars):
|
||||||
|
"""Parses YAML file and templates it with jinja2.
|
||||||
|
|
||||||
|
1. YAML file is rendered by jinja2 based on the provided variables.
|
||||||
|
2. Rendered file is parsed.
|
||||||
|
3. The every element dictionary being a result of parsing is rendered again
|
||||||
|
with itself.
|
||||||
|
"""
|
||||||
|
with open(filename, 'r') as yaml_file:
|
||||||
|
raw_dict = yaml.load(yaml_file)
|
||||||
|
dict_jinja_render(raw_dict, jvars)
|
|
@ -0,0 +1,19 @@
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
def str_to_bool(text):
|
||||||
|
if not text:
|
||||||
|
return False
|
||||||
|
if text.lower() in ['true', 'yes']:
|
||||||
|
return True
|
||||||
|
return False
|
|
@ -0,0 +1,46 @@
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import collections
|
||||||
|
import os
|
||||||
|
from six.moves.urllib import parse
|
||||||
|
|
||||||
|
|
||||||
|
def env(*args, **kwargs):
|
||||||
|
for arg in args:
|
||||||
|
value = os.environ.get(arg)
|
||||||
|
if value:
|
||||||
|
return value
|
||||||
|
return kwargs.get('default', '')
|
||||||
|
|
||||||
|
|
||||||
|
def dict_update(d, u):
|
||||||
|
"""Recursively update 'd' with 'u' and return the result."""
|
||||||
|
|
||||||
|
if not isinstance(u, collections.Mapping):
|
||||||
|
return u
|
||||||
|
|
||||||
|
for k, v in u.items():
|
||||||
|
if isinstance(v, collections.Mapping):
|
||||||
|
d[k] = dict_update(d.get(k, {}), v)
|
||||||
|
else:
|
||||||
|
d[k] = u[k]
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def get_query_string(search_opts):
|
||||||
|
if search_opts:
|
||||||
|
qparams = sorted(search_opts.items(), key=lambda x: x[0])
|
||||||
|
query_string = "?%s" % parse.urlencode(qparams, doseq=True)
|
||||||
|
else:
|
||||||
|
query_string = ""
|
||||||
|
return query_string
|
|
@ -0,0 +1,55 @@
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
|
||||||
|
from kolla_kubernetes.common import utils
|
||||||
|
|
||||||
|
CONF = cfg.CONF
|
||||||
|
|
||||||
|
kolla_opts = [
|
||||||
|
cfg.StrOpt('namespace',
|
||||||
|
default='kollaglue',
|
||||||
|
help='The Docker namespace name'),
|
||||||
|
cfg.StrOpt('tag',
|
||||||
|
default='1.0.0',
|
||||||
|
help='The Docker tag'),
|
||||||
|
cfg.StrOpt('base',
|
||||||
|
default='centos',
|
||||||
|
help='The base distro which was used to build images'),
|
||||||
|
cfg.StrOpt('base-tag',
|
||||||
|
default='latest',
|
||||||
|
help='The base distro image tag'),
|
||||||
|
cfg.StrOpt('install-type',
|
||||||
|
default='binary',
|
||||||
|
help='The method of the OpenStack install'),
|
||||||
|
cfg.StrOpt('deployment-id',
|
||||||
|
default=utils.env('USER', default='default'),
|
||||||
|
help='Uniq name for deployment'),
|
||||||
|
cfg.StrOpt('profile',
|
||||||
|
default='default',
|
||||||
|
help='Build profile which was used to build images')
|
||||||
|
]
|
||||||
|
kolla_opt_group = cfg.OptGroup(name='kolla',
|
||||||
|
title='Options for Kolla Docker images')
|
||||||
|
CONF.register_group(kolla_opt_group)
|
||||||
|
CONF.register_cli_opts(kolla_opts, kolla_opt_group)
|
||||||
|
CONF.register_opts(kolla_opts, kolla_opt_group)
|
||||||
|
|
||||||
|
kubernetes_opts = [
|
||||||
|
cfg.StrOpt('host', default='http://localhost:8080'),
|
||||||
|
cfg.StrOpt('kubectl_path', default='kubectl')
|
||||||
|
]
|
||||||
|
kubernetes_opt_group = cfg.OptGroup(name='kolla_kubernetes')
|
||||||
|
CONF.register_group(kubernetes_opt_group)
|
||||||
|
CONF.register_cli_opts(kubernetes_opts, kubernetes_opt_group)
|
||||||
|
CONF.register_opts(kubernetes_opts, kubernetes_opt_group)
|
|
@ -0,0 +1,31 @@
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
class KollaException(Exception):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class KollaDirNotFoundException(KollaException):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
class KollaNotFoundException(KollaException):
|
||||||
|
def __init__(self, message, entity='file'):
|
||||||
|
super(KollaNotFoundException, self).__init__(
|
||||||
|
'The %s "%s" was not found' % (entity, message))
|
||||||
|
|
||||||
|
|
||||||
|
class KollaNotSupportedException(KollaNotFoundException):
|
||||||
|
def __init__(self, operation='update', entity='kubernetes'):
|
||||||
|
super(KollaNotFoundException, self).__init__(
|
||||||
|
'Operation "%s" is not supported by "%s"' % (operation, entity))
|
|
@ -0,0 +1,265 @@
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import datetime
|
||||||
|
import functools
|
||||||
|
import os.path
|
||||||
|
import subprocess
|
||||||
|
import tempfile
|
||||||
|
import time
|
||||||
|
|
||||||
|
from oslo_config import cfg
|
||||||
|
from oslo_log import log as logging
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from kolla_kubernetes.common import file_utils
|
||||||
|
from kolla_kubernetes.common import jinja_utils
|
||||||
|
from kolla_kubernetes import service_definition
|
||||||
|
|
||||||
|
LOG = logging.getLogger()
|
||||||
|
CONF = cfg.CONF
|
||||||
|
CONF.import_group('kolla', 'kolla_kubernetes.config')
|
||||||
|
CONF.import_group('kolla_kubernetes', 'kolla_kubernetes.config')
|
||||||
|
|
||||||
|
|
||||||
|
def execute_if_enabled(f):
|
||||||
|
"""Decorator for executing methods only if runner is enabled."""
|
||||||
|
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapper(self, *args, **kwargs):
|
||||||
|
if not self._enabled:
|
||||||
|
return
|
||||||
|
return f(self, *args, **kwargs)
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
class File(object):
|
||||||
|
def __init__(self, conf, name, service_name):
|
||||||
|
self._conf = conf
|
||||||
|
self._name = name
|
||||||
|
self._service_name = service_name
|
||||||
|
self.base_dir = os.path.abspath(file_utils.find_base_dir())
|
||||||
|
|
||||||
|
# def merge_ini_files(self, source_files):
|
||||||
|
# config_p = configparser.ConfigParser()
|
||||||
|
# for src_file in source_files:
|
||||||
|
# if not src_file.startswith('/'):
|
||||||
|
# src_file = os.path.join(self.base_dir, src_file)
|
||||||
|
# if not os.path.exists(src_file):
|
||||||
|
# LOG.warning('path missing %s' % src_file)
|
||||||
|
# continue
|
||||||
|
# config_p.read(src_file)
|
||||||
|
# merged_f = cStringIO()
|
||||||
|
# config_p.write(merged_f)
|
||||||
|
# return merged_f.getvalue()
|
||||||
|
#
|
||||||
|
# def write_to_zookeeper(self, zk, base_node):
|
||||||
|
# dest_node = os.path.join(base_node, self._service_name,
|
||||||
|
# 'files', self._name)
|
||||||
|
# zk.ensure_path(dest_node)
|
||||||
|
# if isinstance(self._conf['source'], list):
|
||||||
|
# content = self.merge_ini_files(self._conf['source'])
|
||||||
|
# else:
|
||||||
|
# src_file = self._conf['source']
|
||||||
|
# if not src_file.startswith('/'):
|
||||||
|
# src_file = file_utils.find_file(src_file)
|
||||||
|
# with open(src_file) as fp:
|
||||||
|
# content = fp.read()
|
||||||
|
# zk.set(dest_node, content.encode('utf-8'))
|
||||||
|
|
||||||
|
|
||||||
|
class Command(object):
|
||||||
|
def __init__(self, conf, name, service_name):
|
||||||
|
self._conf = conf
|
||||||
|
self._name = name
|
||||||
|
self._service_name = service_name
|
||||||
|
|
||||||
|
# def write_to_zookeeper(self, zk, base_node):
|
||||||
|
# for fn in self._conf.get('files', []):
|
||||||
|
# fo = File(self._conf['files'][fn], fn, self._service_name)
|
||||||
|
# fo.write_to_zookeeper(zk, base_node)
|
||||||
|
|
||||||
|
|
||||||
|
class Runner(object):
|
||||||
|
def __init__(self, conf):
|
||||||
|
self._conf = conf
|
||||||
|
self.base_dir = os.path.abspath(file_utils.find_base_dir())
|
||||||
|
self.type_name = None
|
||||||
|
self._enabled = self._conf.get('enabled', True)
|
||||||
|
if not self._enabled:
|
||||||
|
LOG.warn('Service %s disabled', self._conf['name'])
|
||||||
|
self.app_file = None
|
||||||
|
self.app_def = None
|
||||||
|
|
||||||
|
def __new__(cls, conf):
|
||||||
|
"""Create a new Runner of the appropriate class for its type."""
|
||||||
|
# Call is already for a subclass, so pass it through
|
||||||
|
RunnerClass = cls
|
||||||
|
return super(Runner, cls).__new__(RunnerClass)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def load_from_file(cls, service_file, variables):
|
||||||
|
return Runner(yaml.load(
|
||||||
|
jinja_utils.jinja_render(service_file, variables)))
|
||||||
|
|
||||||
|
def _list_commands(self):
|
||||||
|
if 'service' in self._conf:
|
||||||
|
yield 'daemon', self._conf['service']['daemon']
|
||||||
|
for key in self._conf.get('commands', []):
|
||||||
|
yield key, self._conf['commands'][key]
|
||||||
|
|
||||||
|
# @execute_if_enabled
|
||||||
|
# def write_to_zookeeper(self, zk, base_node):
|
||||||
|
# for cmd_name, cmd_conf in self._list_commands():
|
||||||
|
# cmd = Command(cmd_conf, cmd_name, self._conf['name'])
|
||||||
|
# # cmd.write_to_zookeeper(zk, base_node)
|
||||||
|
#
|
||||||
|
# dest_node = os.path.join(base_node, self._conf['name'])
|
||||||
|
# # zk.ensure_path(dest_node)
|
||||||
|
# # try:
|
||||||
|
# # zk.set(dest_node, json.dumps(self._conf).encode('utf-8'))
|
||||||
|
# # except Exception as te:
|
||||||
|
# # LOG.error('%s=%s -> %s' % (dest_node, self._conf, te))
|
||||||
|
|
||||||
|
# @classmethod
|
||||||
|
# def load_from_zk(cls, zk, service_name):
|
||||||
|
# variables = _load_variables_from_zk(zk)
|
||||||
|
# base_node = os.path.join('kolla', CONF.kolla.deployment_id)
|
||||||
|
# dest_node = os.path.join(base_node, "openstack",
|
||||||
|
# service_name.split('-')[0], service_name)
|
||||||
|
# try:
|
||||||
|
# conf_raw, _st = zk.get(dest_node)
|
||||||
|
# except Exception as te:
|
||||||
|
# LOG.error('%s -> %s' % (dest_node, te))
|
||||||
|
# raise NameError(te)
|
||||||
|
# return Runner(yaml.load(
|
||||||
|
# jinja_utils.jinja_render_str(conf_raw.decode('utf-8'),
|
||||||
|
# variables)))
|
||||||
|
|
||||||
|
|
||||||
|
class JvarsDict(dict):
|
||||||
|
"""Dict which can contain the 'global_vars' which are always preserved.
|
||||||
|
|
||||||
|
They cannot be be overriden by any update nor single item setting.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
super(JvarsDict, self).__init__(*args, **kwargs)
|
||||||
|
self.global_vars = {}
|
||||||
|
|
||||||
|
def __setitem__(self, key, value, force=False):
|
||||||
|
if not force and key in self.global_vars:
|
||||||
|
return
|
||||||
|
return super(JvarsDict, self).__setitem__(key, value)
|
||||||
|
|
||||||
|
def set_force(self, key, value):
|
||||||
|
"""Sets the variable even if it will override a global variable."""
|
||||||
|
return self.__setitem__(key, value, force=True)
|
||||||
|
|
||||||
|
def update(self, other_dict, force=False):
|
||||||
|
if not force:
|
||||||
|
other_dict = {key: value for key, value in other_dict.items()
|
||||||
|
if key not in self.global_vars}
|
||||||
|
super(JvarsDict, self).update(other_dict)
|
||||||
|
|
||||||
|
def set_global_vars(self, global_vars):
|
||||||
|
self.update(global_vars)
|
||||||
|
self.global_vars = global_vars
|
||||||
|
|
||||||
|
|
||||||
|
def _load_variables_from_file(service_dir, project_name):
|
||||||
|
config_dir = os.path.join(service_dir, '..', 'config')
|
||||||
|
jvars = JvarsDict()
|
||||||
|
LOG.debug('globals path : %s', file_utils.find_config_file('globals.yml'))
|
||||||
|
with open(file_utils.find_config_file('globals.yml'), 'r') as gf:
|
||||||
|
jvars.set_global_vars(yaml.load(gf))
|
||||||
|
with open(file_utils.find_config_file('passwords.yml'), 'r') as gf:
|
||||||
|
jvars.update(yaml.load(gf))
|
||||||
|
# Apply the basic variables that aren't defined in any config file.
|
||||||
|
jvars.update({
|
||||||
|
'deployment_id': CONF.kolla.deployment_id,
|
||||||
|
'node_config_directory': '',
|
||||||
|
'timestamp': str(time.time())
|
||||||
|
})
|
||||||
|
# Get the exact marathon framework name.
|
||||||
|
# config.get_marathon_framework(jvars)
|
||||||
|
# all.yml file uses some its variables to template itself by jinja2,
|
||||||
|
# so its raw content is used to template the file
|
||||||
|
all_yml_name = os.path.join(config_dir, 'all.yml')
|
||||||
|
jinja_utils.yaml_jinja_render(all_yml_name, jvars)
|
||||||
|
# Apply the dynamic deployment variables.
|
||||||
|
# config.apply_deployment_vars(jvars)
|
||||||
|
|
||||||
|
proj_yml_name = os.path.join(config_dir, project_name,
|
||||||
|
'defaults', 'main.yml')
|
||||||
|
if os.path.exists(proj_yml_name):
|
||||||
|
jinja_utils.yaml_jinja_render(proj_yml_name, jvars)
|
||||||
|
else:
|
||||||
|
LOG.warning('Path missing %s' % proj_yml_name)
|
||||||
|
return jvars
|
||||||
|
|
||||||
|
|
||||||
|
def _build_runner(service_name, service_dir, variables=None):
|
||||||
|
ts = time.time()
|
||||||
|
ts = datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d_%H-%M-%S_')
|
||||||
|
temp_dir = tempfile.mkdtemp(prefix='kolla-' + ts)
|
||||||
|
working_dir = os.path.join(temp_dir, 'kubernetes')
|
||||||
|
os.makedirs(working_dir)
|
||||||
|
|
||||||
|
for filename in service_definition.find_service_files(service_name,
|
||||||
|
service_dir):
|
||||||
|
proj_filename = filename.split('/')[-1]
|
||||||
|
proj_name = filename.split('/')[-2]
|
||||||
|
LOG.debug(
|
||||||
|
'proj_filename : %s proj_name: %s' % (proj_filename, proj_name))
|
||||||
|
|
||||||
|
# is this a snapshot or from original src?
|
||||||
|
variables = _load_variables_from_file(service_dir, proj_name)
|
||||||
|
|
||||||
|
# 1. validate the definition with the given variables
|
||||||
|
service_definition.validate(service_name, service_dir, variables)
|
||||||
|
|
||||||
|
content = yaml.load(
|
||||||
|
jinja_utils.jinja_render(filename, variables))
|
||||||
|
with open(os.path.join(working_dir, proj_filename), 'w') as f:
|
||||||
|
LOG.debug('_build_runner : service file : %s' %
|
||||||
|
os.path.join(working_dir, proj_filename))
|
||||||
|
f.write(yaml.dump(content, default_flow_style=False))
|
||||||
|
|
||||||
|
return working_dir
|
||||||
|
|
||||||
|
|
||||||
|
def run_service(service_name, service_dir, variables=None):
|
||||||
|
directory = _build_runner(service_name, service_dir, variables=variables)
|
||||||
|
_deploy_instance(directory, service_name)
|
||||||
|
|
||||||
|
|
||||||
|
def kill_service(service_name, service_dir, variables=None):
|
||||||
|
directory = _build_runner(service_name, service_dir, variables=variables)
|
||||||
|
_delete_instance(directory, service_name)
|
||||||
|
|
||||||
|
|
||||||
|
def _deploy_instance(directory, service_name):
|
||||||
|
server = "--server=" + CONF.kolla_kubernetes.host
|
||||||
|
cmd = [CONF.kolla_kubernetes.kubectl_path, server, "create", "-f",
|
||||||
|
directory]
|
||||||
|
LOG.info('Command : %r' % cmd)
|
||||||
|
subprocess.call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def _delete_instance(directory, service_name):
|
||||||
|
server = "--server=" + CONF.kolla_kubernetes.host
|
||||||
|
cmd = [CONF.kolla_kubernetes.kubectl_path, server, "delete", "-f",
|
||||||
|
directory]
|
||||||
|
LOG.info('Command : %r' % cmd)
|
||||||
|
subprocess.call(cmd)
|
|
@ -0,0 +1,151 @@
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
|
||||||
|
import os.path
|
||||||
|
import socket
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
import jinja2
|
||||||
|
from oslo_log import log
|
||||||
|
|
||||||
|
from kolla_kubernetes.common import jinja_utils
|
||||||
|
from kolla_kubernetes import exception
|
||||||
|
|
||||||
|
CNF_FIELDS = ('source', 'dest', 'owner', 'perm')
|
||||||
|
CMD_FIELDS = ('run_once', 'dependencies', 'command', 'env',
|
||||||
|
'delay', 'retries', 'files')
|
||||||
|
DEP_FIELDS = ('path', 'scope')
|
||||||
|
SCOPE_OPTS = ('global', 'local')
|
||||||
|
LOG = log.getLogger()
|
||||||
|
|
||||||
|
|
||||||
|
def find_service_file(service_name, service_dir):
|
||||||
|
# let's be flexible with the input, to make life easy
|
||||||
|
# for users.
|
||||||
|
if not os.path.exists(service_dir):
|
||||||
|
raise exception.KollaNotFoundException(service_dir,
|
||||||
|
entity='service directory')
|
||||||
|
|
||||||
|
short_name = service_name.split('/')[-1].replace('_ansible_tasks', '-init')
|
||||||
|
for root, dirs, names in os.walk(service_dir):
|
||||||
|
for name in names:
|
||||||
|
if short_name in name:
|
||||||
|
return os.path.join(root, name)
|
||||||
|
|
||||||
|
raise exception.KollaNotFoundException(service_name,
|
||||||
|
entity='service definition')
|
||||||
|
|
||||||
|
|
||||||
|
def find_service_files(service_name, service_dir):
|
||||||
|
# let's be flexible with the input, to make life easy
|
||||||
|
# for users.
|
||||||
|
if not os.path.exists(service_dir):
|
||||||
|
raise exception.KollaNotFoundException(service_dir,
|
||||||
|
entity='service directory')
|
||||||
|
|
||||||
|
short_name = service_name.split('/')[-1].replace('_ansible_tasks', '-init')
|
||||||
|
files = []
|
||||||
|
for root, dirs, names in os.walk(service_dir):
|
||||||
|
for name in names:
|
||||||
|
if short_name in name:
|
||||||
|
files.append(os.path.join(root, name))
|
||||||
|
|
||||||
|
if not files:
|
||||||
|
raise exception.KollaNotFoundException(service_name,
|
||||||
|
entity='service definition')
|
||||||
|
return files
|
||||||
|
|
||||||
|
|
||||||
|
def inspect(service_name, service_dir):
|
||||||
|
filename = find_service_file(service_name, service_dir)
|
||||||
|
try:
|
||||||
|
required_variables = set.union(
|
||||||
|
jinja_utils.jinja_find_required_variables(filename))
|
||||||
|
except jinja2.exceptions.TemplateNotFound:
|
||||||
|
raise exception.KollaNotFoundException(filename,
|
||||||
|
entity='service definition')
|
||||||
|
return dict(required_variables=list(required_variables))
|
||||||
|
|
||||||
|
|
||||||
|
def validate(service_name, service_dir, variables=None, deps=None):
|
||||||
|
if variables is None:
|
||||||
|
variables = {}
|
||||||
|
if deps is None:
|
||||||
|
deps = {}
|
||||||
|
|
||||||
|
filename = find_service_file(service_name, service_dir)
|
||||||
|
try:
|
||||||
|
cnf = yaml.load(jinja_utils.jinja_render(filename, variables))
|
||||||
|
except jinja2.exceptions.TemplateNotFound:
|
||||||
|
raise exception.KollaNotFoundException(filename,
|
||||||
|
entity='service definition')
|
||||||
|
|
||||||
|
def get_commands():
|
||||||
|
for cmd in cnf.get('commands', {}):
|
||||||
|
yield cmd, cnf['commands'][cmd]
|
||||||
|
if 'service' in cnf:
|
||||||
|
yield 'daemon', cnf['service']['daemon']
|
||||||
|
|
||||||
|
LOG.debug('%s: file found at %s' % (cnf['metadata']['name'], filename))
|
||||||
|
for cmd, cmd_info in get_commands():
|
||||||
|
_validate_command(filename, cmd, cmd_info, deps,
|
||||||
|
cnf['name'], service_dir)
|
||||||
|
return deps
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_config(filename, conf, service_dir):
|
||||||
|
for file in conf:
|
||||||
|
for key in conf[file]:
|
||||||
|
assert key in CNF_FIELDS, '%s: %s not in %s' % (filename,
|
||||||
|
key, CNF_FIELDS)
|
||||||
|
srcs = conf[file]['source']
|
||||||
|
if isinstance(srcs, str):
|
||||||
|
srcs = [srcs]
|
||||||
|
for src in srcs:
|
||||||
|
file_path = os.path.join(service_dir, '..', src)
|
||||||
|
if not file_path.startswith('/etc'):
|
||||||
|
assert os.path.exists(file_path), '%s missing' % file_path
|
||||||
|
|
||||||
|
|
||||||
|
def _validate_command(filename, cmd, cmd_info, deps,
|
||||||
|
service_name, service_dir):
|
||||||
|
for key in cmd_info:
|
||||||
|
assert key in CMD_FIELDS, '%s not in %s' % (key, CMD_FIELDS)
|
||||||
|
|
||||||
|
_, group, role = service_name.split('/')
|
||||||
|
regs = ['%s/%s' % (role, cmd),
|
||||||
|
'%s/%s/%s' % (socket.gethostname(), role, cmd)]
|
||||||
|
reqs = cmd_info.get('dependencies', [])
|
||||||
|
for reg in regs:
|
||||||
|
if reg not in deps:
|
||||||
|
deps[reg] = {'waiters': {}}
|
||||||
|
deps[reg]['registered_by'] = cmd
|
||||||
|
deps[reg]['name'] = cmd
|
||||||
|
deps[reg]['run_by'] = filename
|
||||||
|
for req in reqs:
|
||||||
|
for key in req:
|
||||||
|
assert key in DEP_FIELDS, '%s: %s not in %s' % (filename,
|
||||||
|
key, DEP_FIELDS)
|
||||||
|
scope = req.get('scope', 'global')
|
||||||
|
assert scope in SCOPE_OPTS, '%s: %s not in %s' % (filename,
|
||||||
|
scope, SCOPE_OPTS)
|
||||||
|
req_path = req['path']
|
||||||
|
if scope == 'local':
|
||||||
|
req_path = os.path.join(socket.gethostname(), req_path)
|
||||||
|
if req_path not in deps:
|
||||||
|
deps[req_path] = {'waiters': {}}
|
||||||
|
for reg in regs:
|
||||||
|
deps[req_path]['waiters'][cmd] = reg
|
||||||
|
if 'files' in cmd_info:
|
||||||
|
_validate_config(filename, cmd_info['files'], service_dir)
|
||||||
|
LOG.debug('%s: command "%s" validated' % (service_name, cmd))
|
|
@ -3,3 +3,7 @@
|
||||||
# process, which may cause wedges in the gate later.
|
# process, which may cause wedges in the gate later.
|
||||||
|
|
||||||
pbr>=1.6
|
pbr>=1.6
|
||||||
|
cliff!=1.16.0,!=1.17.0,>=1.15.0 # Apache-2.0
|
||||||
|
oslo.config>=3.9.0 # Apache-2.0
|
||||||
|
oslo.utils>=3.5.0 # Apache-2.0
|
||||||
|
oslo.log>=1.14.0 # Apache-2.0
|
||||||
|
|
|
@ -0,0 +1,20 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
spec:
|
||||||
|
hostNetwork: True
|
||||||
|
containers:
|
||||||
|
#TODO: Use a jinja2 template for image
|
||||||
|
- image: "{{ keystone_image_full }}"
|
||||||
|
name: keystone
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: "/var/lib/kolla/config_files"
|
||||||
|
name: keystone-config
|
||||||
|
env:
|
||||||
|
- name: KOLLA_CONFIG_STRATEGY
|
||||||
|
value: COPY_ALWAYS
|
||||||
|
volumes:
|
||||||
|
- name: keystone-config
|
||||||
|
hostPath:
|
||||||
|
path: "/etc/kolla/keystone"
|
||||||
|
metadata:
|
||||||
|
name: keystone
|
|
@ -0,0 +1,9 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- port: 5000
|
||||||
|
selector:
|
||||||
|
name: keystone-public
|
||||||
|
metadata:
|
||||||
|
name: keystone-public
|
|
@ -0,0 +1,9 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
spec:
|
||||||
|
ports:
|
||||||
|
- port: 5000
|
||||||
|
selector:
|
||||||
|
name: keystone-public
|
||||||
|
metadata:
|
||||||
|
name: keystone-public
|
12
setup.cfg
12
setup.cfg
|
@ -21,7 +21,17 @@ classifier =
|
||||||
|
|
||||||
[files]
|
[files]
|
||||||
packages =
|
packages =
|
||||||
kolla-kubernetes
|
kolla_kubernetes
|
||||||
|
data_files =
|
||||||
|
share/kolla-kubernetes/services = services/*
|
||||||
|
|
||||||
|
[entry_points]
|
||||||
|
console_scripts =
|
||||||
|
kolla-kubernetes = kolla_kubernetes.cmd.shell:main
|
||||||
|
|
||||||
|
kolla_kubernetes.cli =
|
||||||
|
run = kolla_kubernetes.cli.service:Run
|
||||||
|
kill = kolla_kubernetes.cli.service:Kill
|
||||||
|
|
||||||
[build_sphinx]
|
[build_sphinx]
|
||||||
source-dir = doc/source
|
source-dir = doc/source
|
||||||
|
|
Loading…
Reference in New Issue