From 8dfc414bc37155b049fbbd68274badeac0c00fb5 Mon Sep 17 00:00:00 2001 From: Dmitry Shulyak <dshulyak@mirantis.com> Date: Thu, 8 Oct 2015 17:54:55 +0300 Subject: [PATCH] Add ceph_mon resource and example --- examples/library_ceph/ceph.py | 55 +++++++++++++++++ resources/ceph_mon/actions/run.pp | 95 ++++++++++++++++++++++++++++++ resources/ceph_mon/actions/test.pp | 4 ++ resources/ceph_mon/meta.yaml | 43 ++++++++++++++ 4 files changed, 197 insertions(+) create mode 100644 examples/library_ceph/ceph.py create mode 100644 resources/ceph_mon/actions/run.pp create mode 100644 resources/ceph_mon/actions/test.pp create mode 100644 resources/ceph_mon/meta.yaml diff --git a/examples/library_ceph/ceph.py b/examples/library_ceph/ceph.py new file mode 100644 index 0000000..d89eb9a --- /dev/null +++ b/examples/library_ceph/ceph.py @@ -0,0 +1,55 @@ + +from solar.core.resource import virtual_resource as vr +from solar.interfaces.db import get_db + +import yaml + +db = get_db() + +STORAGE = {'objects_ceph': True, + 'osd_pool_size': 2, + 'pg_num': 128} + +KEYSTONE = {'admin_token': 'abcde'} + + +NETWORK_SCHEMA = { + 'endpoints': {'eth1': {'IP': ['10.0.0.3/24']}}, + 'roles': {'ceph/replication': 'eth1', + 'ceph/public': 'eth1'} + } + +NETWORK_METADATA = yaml.load(""" + node-1: + uid: '1' + fqdn: node-1 + network_roles: + ceph/public: 10.0.0.3 + ceph/replication: 10.0.0.3 + node_roles: + - ceph-mon + name: node-1 + + """) + + +def deploy(): + db.clear() + resources = vr.create('nodes', 'templates/nodes.yaml', {'count': 1}) + first_node = next(x for x in resources if x.name.startswith('node')) + + ceph_mon = vr.create('ceph_mon1', 'resources/ceph_mon', + {'storage': STORAGE, + 'keystone': KEYSTONE, + 'network_scheme': NETWORK_SCHEMA, + 'ceph_monitor_nodes': NETWORK_METADATA, + 'ceph_primary_monitor_node': NETWORK_METADATA, + 'role': 'controller', + })[0] + first_node.connect(ceph_mon) + first_node.connect(ceph_mon, {'ip': 'public_vip'}) + first_node.connect(ceph_mon, {'ip': 'management_vip'}) + + +if __name__ == '__main__': + deploy() diff --git a/resources/ceph_mon/actions/run.pp b/resources/ceph_mon/actions/run.pp new file mode 100644 index 0000000..6b172a8 --- /dev/null +++ b/resources/ceph_mon/actions/run.pp @@ -0,0 +1,95 @@ +notice('MODULAR: ceph/mon.pp') + + +$storage_hash = hiera('storage', {}) +$public_vip = hiera('public_vip') +$management_vip = hiera('management_vip') +$use_syslog = hiera('use_syslog', true) +$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0') +$keystone_hash = hiera('keystone', {}) +$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public') + +if ($storage_hash['images_ceph']) { + $glance_backend = 'ceph' +} elsif ($storage_hash['images_vcenter']) { + $glance_backend = 'vmware' +} else { + $glance_backend = 'swift' +} + +if ($storage_hash['volumes_ceph'] or + $storage_hash['images_ceph'] or + $storage_hash['objects_ceph'] or + $storage_hash['ephemeral_ceph'] +) { + $use_ceph = true +} else { + $use_ceph = false +} + +if $use_ceph { + $ceph_primary_monitor_node = hiera('ceph_primary_monitor_node') + $primary_mons = keys($ceph_primary_monitor_node) + $primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name'] + + prepare_network_config(hiera_hash('network_scheme')) + $ceph_cluster_network = get_network_role_property('ceph/replication', 'network') + $ceph_public_network = get_network_role_property('ceph/public', 'network') + $mon_addr = get_network_role_property('ceph/public', 'ipaddr') + + class {'ceph': + primary_mon => $primary_mon, + mon_hosts => keys($mon_address_map), + mon_ip_addresses => values($mon_address_map), + mon_addr => $mon_addr, + cluster_node_address => $public_vip, + osd_pool_default_size => $storage_hash['osd_pool_size'], + osd_pool_default_pg_num => $storage_hash['pg_num'], + osd_pool_default_pgp_num => $storage_hash['pg_num'], + use_rgw => false, + glance_backend => $glance_backend, + rgw_pub_ip => $public_vip, + rgw_adm_ip => $management_vip, + rgw_int_ip => $management_vip, + cluster_network => $ceph_cluster_network, + public_network => $ceph_public_network, + use_syslog => $use_syslog, + syslog_log_level => hiera('syslog_log_level_ceph', 'info'), + syslog_log_facility => $syslog_log_facility_ceph, + rgw_keystone_admin_token => $keystone_hash['admin_token'], + ephemeral_ceph => $storage_hash['ephemeral_ceph'] + } + + if ($storage_hash['volumes_ceph']) { + include ::cinder::params + service { 'cinder-volume': + ensure => 'running', + name => $::cinder::params::volume_service, + hasstatus => true, + hasrestart => true, + } + + service { 'cinder-backup': + ensure => 'running', + name => $::cinder::params::backup_service, + hasstatus => true, + hasrestart => true, + } + + Class['ceph'] ~> Service['cinder-volume'] + Class['ceph'] ~> Service['cinder-backup'] + } + + if ($storage_hash['images_ceph']) { + include ::glance::params + service { 'glance-api': + ensure => 'running', + name => $::glance::params::api_service_name, + hasstatus => true, + hasrestart => true, + } + + Class['ceph'] ~> Service['glance-api'] + } + +} diff --git a/resources/ceph_mon/actions/test.pp b/resources/ceph_mon/actions/test.pp new file mode 100644 index 0000000..b5d0bbf --- /dev/null +++ b/resources/ceph_mon/actions/test.pp @@ -0,0 +1,4 @@ +prepare_network_config(hiera_hash('network_scheme')) +$ceph_cluster_network = get_network_role_property('ceph/replication', 'network') + +notify{"The value is: ${ceph_cluster_network}": } diff --git a/resources/ceph_mon/meta.yaml b/resources/ceph_mon/meta.yaml new file mode 100644 index 0000000..3e4c322 --- /dev/null +++ b/resources/ceph_mon/meta.yaml @@ -0,0 +1,43 @@ +id: ceph_mon +handler: puppet +version: 1.0.0 + + +input: + library: + schema: {repository: str!, branch: str!, puppet_modules: str} + value: {repository: 'https://github.com/stackforge/fuel-library', + branch: 'stable/7.0', + puppet_modules: 'deployment/puppet'} + ip: + schema: str! + value: + public_vip: + schema: str! + value: + management_vip: + schema: str! + value: + use_syslog: + schema: bool + value: true + keystone: + schema: {'admin_token': 'str'} + value: {} + ceph_monitor_nodes: + schema: [] + value: [] + ceph_primary_monitor_node: + schema: [] + value: [] + storage: + schema: {} + value: {} + network_scheme: + schema: {} + value: {} + role: + schema: str! + value: + +tags: []