Add ceph_mon resource and example
This commit is contained in:
parent
edbf342988
commit
19366f66c9
55
examples/library_ceph/ceph.py
Normal file
55
examples/library_ceph/ceph.py
Normal file
@ -0,0 +1,55 @@
|
||||
|
||||
from solar.core.resource import virtual_resource as vr
|
||||
from solar.interfaces.db import get_db
|
||||
|
||||
import yaml
|
||||
|
||||
db = get_db()
|
||||
|
||||
STORAGE = {'objects_ceph': True,
|
||||
'osd_pool_size': 2,
|
||||
'pg_num': 128}
|
||||
|
||||
KEYSTONE = {'admin_token': 'abcde'}
|
||||
|
||||
|
||||
NETWORK_SCHEMA = {
|
||||
'endpoints': {'eth1': {'IP': ['10.0.0.3/24']}},
|
||||
'roles': {'ceph/replication': 'eth1',
|
||||
'ceph/public': 'eth1'}
|
||||
}
|
||||
|
||||
NETWORK_METADATA = yaml.load("""
|
||||
node-1:
|
||||
uid: '1'
|
||||
fqdn: node-1
|
||||
network_roles:
|
||||
ceph/public: 10.0.0.3
|
||||
ceph/replication: 10.0.0.3
|
||||
node_roles:
|
||||
- ceph-mon
|
||||
name: node-1
|
||||
|
||||
""")
|
||||
|
||||
|
||||
def deploy():
|
||||
db.clear()
|
||||
resources = vr.create('nodes', 'templates/nodes.yaml', {'count': 1})
|
||||
first_node = next(x for x in resources if x.name.startswith('node'))
|
||||
|
||||
ceph_mon = vr.create('ceph_mon1', 'resources/ceph_mon',
|
||||
{'storage': STORAGE,
|
||||
'keystone': KEYSTONE,
|
||||
'network_scheme': NETWORK_SCHEMA,
|
||||
'ceph_monitor_nodes': NETWORK_METADATA,
|
||||
'ceph_primary_monitor_node': NETWORK_METADATA,
|
||||
'role': 'controller',
|
||||
})[0]
|
||||
first_node.connect(ceph_mon)
|
||||
first_node.connect(ceph_mon, {'ip': 'public_vip'})
|
||||
first_node.connect(ceph_mon, {'ip': 'management_vip'})
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
deploy()
|
95
resources/ceph_mon/actions/run.pp
Normal file
95
resources/ceph_mon/actions/run.pp
Normal file
@ -0,0 +1,95 @@
|
||||
notice('MODULAR: ceph/mon.pp')
|
||||
|
||||
|
||||
$storage_hash = hiera('storage', {})
|
||||
$public_vip = hiera('public_vip')
|
||||
$management_vip = hiera('management_vip')
|
||||
$use_syslog = hiera('use_syslog', true)
|
||||
$syslog_log_facility_ceph = hiera('syslog_log_facility_ceph','LOG_LOCAL0')
|
||||
$keystone_hash = hiera('keystone', {})
|
||||
$mon_address_map = get_node_to_ipaddr_map_by_network_role(hiera_hash('ceph_monitor_nodes'), 'ceph/public')
|
||||
|
||||
if ($storage_hash['images_ceph']) {
|
||||
$glance_backend = 'ceph'
|
||||
} elsif ($storage_hash['images_vcenter']) {
|
||||
$glance_backend = 'vmware'
|
||||
} else {
|
||||
$glance_backend = 'swift'
|
||||
}
|
||||
|
||||
if ($storage_hash['volumes_ceph'] or
|
||||
$storage_hash['images_ceph'] or
|
||||
$storage_hash['objects_ceph'] or
|
||||
$storage_hash['ephemeral_ceph']
|
||||
) {
|
||||
$use_ceph = true
|
||||
} else {
|
||||
$use_ceph = false
|
||||
}
|
||||
|
||||
if $use_ceph {
|
||||
$ceph_primary_monitor_node = hiera('ceph_primary_monitor_node')
|
||||
$primary_mons = keys($ceph_primary_monitor_node)
|
||||
$primary_mon = $ceph_primary_monitor_node[$primary_mons[0]]['name']
|
||||
|
||||
prepare_network_config(hiera_hash('network_scheme'))
|
||||
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
|
||||
$ceph_public_network = get_network_role_property('ceph/public', 'network')
|
||||
$mon_addr = get_network_role_property('ceph/public', 'ipaddr')
|
||||
|
||||
class {'ceph':
|
||||
primary_mon => $primary_mon,
|
||||
mon_hosts => keys($mon_address_map),
|
||||
mon_ip_addresses => values($mon_address_map),
|
||||
mon_addr => $mon_addr,
|
||||
cluster_node_address => $public_vip,
|
||||
osd_pool_default_size => $storage_hash['osd_pool_size'],
|
||||
osd_pool_default_pg_num => $storage_hash['pg_num'],
|
||||
osd_pool_default_pgp_num => $storage_hash['pg_num'],
|
||||
use_rgw => false,
|
||||
glance_backend => $glance_backend,
|
||||
rgw_pub_ip => $public_vip,
|
||||
rgw_adm_ip => $management_vip,
|
||||
rgw_int_ip => $management_vip,
|
||||
cluster_network => $ceph_cluster_network,
|
||||
public_network => $ceph_public_network,
|
||||
use_syslog => $use_syslog,
|
||||
syslog_log_level => hiera('syslog_log_level_ceph', 'info'),
|
||||
syslog_log_facility => $syslog_log_facility_ceph,
|
||||
rgw_keystone_admin_token => $keystone_hash['admin_token'],
|
||||
ephemeral_ceph => $storage_hash['ephemeral_ceph']
|
||||
}
|
||||
|
||||
if ($storage_hash['volumes_ceph']) {
|
||||
include ::cinder::params
|
||||
service { 'cinder-volume':
|
||||
ensure => 'running',
|
||||
name => $::cinder::params::volume_service,
|
||||
hasstatus => true,
|
||||
hasrestart => true,
|
||||
}
|
||||
|
||||
service { 'cinder-backup':
|
||||
ensure => 'running',
|
||||
name => $::cinder::params::backup_service,
|
||||
hasstatus => true,
|
||||
hasrestart => true,
|
||||
}
|
||||
|
||||
Class['ceph'] ~> Service['cinder-volume']
|
||||
Class['ceph'] ~> Service['cinder-backup']
|
||||
}
|
||||
|
||||
if ($storage_hash['images_ceph']) {
|
||||
include ::glance::params
|
||||
service { 'glance-api':
|
||||
ensure => 'running',
|
||||
name => $::glance::params::api_service_name,
|
||||
hasstatus => true,
|
||||
hasrestart => true,
|
||||
}
|
||||
|
||||
Class['ceph'] ~> Service['glance-api']
|
||||
}
|
||||
|
||||
}
|
4
resources/ceph_mon/actions/test.pp
Normal file
4
resources/ceph_mon/actions/test.pp
Normal file
@ -0,0 +1,4 @@
|
||||
prepare_network_config(hiera_hash('network_scheme'))
|
||||
$ceph_cluster_network = get_network_role_property('ceph/replication', 'network')
|
||||
|
||||
notify{"The value is: ${ceph_cluster_network}": }
|
43
resources/ceph_mon/meta.yaml
Normal file
43
resources/ceph_mon/meta.yaml
Normal file
@ -0,0 +1,43 @@
|
||||
id: ceph_mon
|
||||
handler: puppet
|
||||
version: 1.0.0
|
||||
|
||||
|
||||
input:
|
||||
library:
|
||||
schema: {repository: str!, branch: str!, puppet_modules: str}
|
||||
value: {repository: 'https://github.com/stackforge/fuel-library',
|
||||
branch: 'stable/7.0',
|
||||
puppet_modules: 'deployment/puppet'}
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
public_vip:
|
||||
schema: str!
|
||||
value:
|
||||
management_vip:
|
||||
schema: str!
|
||||
value:
|
||||
use_syslog:
|
||||
schema: bool
|
||||
value: true
|
||||
keystone:
|
||||
schema: {'admin_token': 'str'}
|
||||
value: {}
|
||||
ceph_monitor_nodes:
|
||||
schema: []
|
||||
value: []
|
||||
ceph_primary_monitor_node:
|
||||
schema: []
|
||||
value: []
|
||||
storage:
|
||||
schema: {}
|
||||
value: {}
|
||||
network_scheme:
|
||||
schema: {}
|
||||
value: {}
|
||||
role:
|
||||
schema: str!
|
||||
value:
|
||||
|
||||
tags: []
|
@ -16,6 +16,8 @@
|
||||
import os
|
||||
import yaml
|
||||
|
||||
from fabric import api as fabric_api
|
||||
|
||||
from solar.core.log import log
|
||||
from solar.core.handlers.base import TempFileHandler
|
||||
from solar.core.provider import GitProvider
|
||||
@ -131,9 +133,7 @@ class Puppet(TempFileHandler):
|
||||
|
||||
def upload_hiera_resource(self, resource):
|
||||
with open('/tmp/puppet_resource.yaml', 'w') as f:
|
||||
f.write(yaml.dump({
|
||||
resource.name: resource.to_dict()
|
||||
}))
|
||||
f.write(yaml.safe_dump(resource.args))
|
||||
|
||||
self.transport_sync.copy(
|
||||
resource,
|
||||
@ -154,9 +154,17 @@ class Puppet(TempFileHandler):
|
||||
def upload_library(self, resource):
|
||||
git = resource.args['library']
|
||||
p = GitProvider(git['repository'], branch=git['branch'])
|
||||
|
||||
#fabric_ai.local('cd {}/deployment && ./update_modules.sh'.format(
|
||||
# p.directory))
|
||||
|
||||
fabric_api.local(
|
||||
'ansible-playbook -i "localhost," -c local /tmp/git-provider.yaml'
|
||||
)
|
||||
|
||||
modules_path = os.path.join(p.directory, git['puppet_modules'])
|
||||
|
||||
fuel_modules = '/etc/fuel/modules'
|
||||
fuel_modules = '/etc/puppet/modules'
|
||||
self.transport_run.run(
|
||||
resource, 'sudo', 'mkdir', '-p', fuel_modules
|
||||
)
|
||||
|
@ -44,7 +44,7 @@ class GitProvider(BaseProvider):
|
||||
super(GitProvider, self).__init__(*args, **kwargs)
|
||||
|
||||
self.repository = repository
|
||||
self.branch = 'master'
|
||||
self.branch = branch
|
||||
self.path = path
|
||||
|
||||
directory = self._directory()
|
||||
|
Loading…
Reference in New Issue
Block a user