Implement scenario004 with Ceph Rados Gateway scenario

Like Puppet OpenStack CI, implement scenario004 with Ceph RGW scenario,
where Glance uses it as a image storage backend.

Change-Id: If055ca225c456a738c5726ef1e76a4a4f9c566a8
This commit is contained in:
Emilien Macchi 2016-11-29 17:47:36 -05:00
parent f33475840c
commit 072a06f3b2
2 changed files with 189 additions and 0 deletions

View File

@ -0,0 +1,62 @@
resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Services::CephMon: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-mon.yaml
OS::TripleO::Services::CephOSD: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-osd.yaml
OS::TripleO::Services::CephClient: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-client.yaml
OS::TripleO::Services::CephRgw: /usr/share/openstack-tripleo-heat-templates/puppet/services/ceph-rgw.yaml
OS::TripleO::Services::SwiftProxy: OS::Heat::None
OS::TripleO::Services::SwiftStorage: OS::Heat::None
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
parameter_defaults:
ControllerServices:
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::GlanceRegistry
- OS::TripleO::Services::HeatApi
- OS::TripleO::Services::HeatApiCfn
- OS::TripleO::Services::HeatApiCloudwatch
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
- OS::TripleO::Services::NeutronServer
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::NeutronOvsAgent
- OS::TripleO::Services::RabbitMQ
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Keepalived
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::NovaApi
- OS::TripleO::Services::NovaMetadata
- OS::TripleO::Services::NovaScheduler
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CephRgw
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
Debug: true
#NOTE(gfidente): not great but we need this to deploy on ext4
#http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/
ExtraConfig:
ceph::profile::params::osd_max_object_name_len: 256
ceph::profile::params::osd_max_object_namespace_len: 64
#NOTE: These ID's and keys should be regenerated for
# a production deployment. What is here is suitable for
# developer and CI testing only.
CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='

View File

@ -0,0 +1,127 @@
heat_template_version: 2013-05-23
description: >
HOT template to created resources deployed by scenario004.
parameters:
key_name:
type: string
description: Name of keypair to assign to servers
default: 'pingtest_key'
image:
type: string
description: Name of image to use for servers
default: 'pingtest_image'
public_net_name:
type: string
default: 'nova'
description: >
ID or name of public network for which floating IP addresses will be allocated
private_net_name:
type: string
description: Name of private network to be created
default: 'default-net'
private_net_cidr:
type: string
description: Private network address (CIDR notation)
default: '192.168.2.0/24'
private_net_gateway:
type: string
description: Private network gateway address
default: '192.168.2.1'
private_net_pool_start:
type: string
description: Start of private network IP address allocation pool
default: '192.168.2.100'
private_net_pool_end:
type: string
default: '192.168.2.200'
description: End of private network IP address allocation pool
resources:
key_pair:
type: OS::Nova::KeyPair
properties:
save_private_key: true
name: {get_param: key_name }
private_net:
type: OS::Neutron::Net
properties:
name: { get_param: private_net_name }
private_subnet:
type: OS::Neutron::Subnet
properties:
network_id: { get_resource: private_net }
cidr: { get_param: private_net_cidr }
gateway_ip: { get_param: private_net_gateway }
allocation_pools:
- start: { get_param: private_net_pool_start }
end: { get_param: private_net_pool_end }
router:
type: OS::Neutron::Router
properties:
external_gateway_info:
network: { get_param: public_net_name }
router_interface:
type: OS::Neutron::RouterInterface
properties:
router_id: { get_resource: router }
subnet_id: { get_resource: private_subnet }
server1:
type: OS::Nova::Server
properties:
name: Server1
flavor: { get_resource: test_flavor }
image: { get_param: image }
key_name: { get_resource: key_pair }
networks:
- port: { get_resource: server1_port }
server1_port:
type: OS::Neutron::Port
properties:
network_id: { get_resource: private_net }
fixed_ips:
- subnet_id: { get_resource: private_subnet }
security_groups: [{ get_resource: server_security_group }]
server1_floating_ip:
type: OS::Neutron::FloatingIP
# TODO: investigate why we need this depends_on and if we could
# replace it by router_id with get_resource: router_interface
depends_on: router_interface
properties:
floating_network: { get_param: public_net_name }
port_id: { get_resource: server1_port }
server_security_group:
type: OS::Neutron::SecurityGroup
properties:
description: Add security group rules for server
name: pingtest-security-group
rules:
- remote_ip_prefix: 0.0.0.0/0
protocol: tcp
port_range_min: 22
port_range_max: 22
- remote_ip_prefix: 0.0.0.0/0
protocol: icmp
test_flavor:
type: OS::Nova::Flavor
properties:
ram: 512
vcpus: 1
outputs:
server1_private_ip:
description: IP address of server1 in private network
value: { get_attr: [ server1, first_address ] }
server1_public_ip:
description: Floating IP address of server1 in public network
value: { get_attr: [ server1_floating_ip, floating_ip_address ] }