Fixup missing files
This commit is contained in:
parent
7bdde70565
commit
b5e6ec5fcd
17
.project
Normal file
17
.project
Normal file
@ -0,0 +1,17 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<projectDescription>
|
||||
<name>swift-proxy</name>
|
||||
<comment></comment>
|
||||
<projects>
|
||||
</projects>
|
||||
<buildSpec>
|
||||
<buildCommand>
|
||||
<name>org.python.pydev.PyDevBuilder</name>
|
||||
<arguments>
|
||||
</arguments>
|
||||
</buildCommand>
|
||||
</buildSpec>
|
||||
<natures>
|
||||
<nature>org.python.pydev.pythonNature</nature>
|
||||
</natures>
|
||||
</projectDescription>
|
8
.pydevproject
Normal file
8
.pydevproject
Normal file
@ -0,0 +1,8 @@
|
||||
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
|
||||
<?eclipse-pydev version="1.0"?><pydev_project>
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
|
||||
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
|
||||
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
|
||||
<path>/swift-proxy/hooks</path>
|
||||
</pydev_pathproperty>
|
||||
</pydev_project>
|
1
hooks/cluster-relation-changed
Symbolic link
1
hooks/cluster-relation-changed
Symbolic link
@ -0,0 +1 @@
|
||||
swift_hooks.py
|
1
hooks/cluster-relation-joined
Symbolic link
1
hooks/cluster-relation-joined
Symbolic link
@ -0,0 +1 @@
|
||||
swift_hooks.py
|
@ -1 +1 @@
|
||||
swift-hooks.py
|
||||
swift_hooks.py
|
1
hooks/ha-relation-changed
Symbolic link
1
hooks/ha-relation-changed
Symbolic link
@ -0,0 +1 @@
|
||||
swift_hooks.py
|
1
hooks/ha-relation-joined
Symbolic link
1
hooks/ha-relation-joined
Symbolic link
@ -0,0 +1 @@
|
||||
swift_hooks.py
|
@ -1 +1 @@
|
||||
swift-hooks.py
|
||||
swift_hooks.py
|
@ -1 +1 @@
|
||||
swift-hooks.py
|
||||
swift_hooks.py
|
@ -1 +1 @@
|
||||
swift-hooks.py
|
||||
swift_hooks.py
|
@ -1 +1 @@
|
||||
swift-hooks.py
|
||||
swift_hooks.py
|
@ -1 +1 @@
|
||||
swift-hooks.py
|
||||
swift_hooks.py
|
@ -1 +1 @@
|
||||
swift-hooks.py
|
||||
swift_hooks.py
|
@ -63,7 +63,7 @@ def install():
|
||||
|
||||
|
||||
def keystone_joined(relid=None):
|
||||
if is_clustered():
|
||||
if utils.is_clustered():
|
||||
hostname = utils.config_get('vip')
|
||||
else:
|
||||
hostname = utils.unit_get('private-address')
|
||||
@ -102,17 +102,17 @@ def balance_rings():
|
||||
shutil.copyfile(os.path.join(swift.SWIFT_CONF_DIR, f),
|
||||
os.path.join(swift.WWW_DIR, f))
|
||||
|
||||
if eligible_leader():
|
||||
msg = 'Broadcasting notification to all storage nodes that new '\
|
||||
'ring is ready for consumption.'
|
||||
utils.juju_log('INFO', msg)
|
||||
www_dir = swift.WWW_DIR.split('/var/www/')[1]
|
||||
trigger = uuid.uuid4()
|
||||
swift_hash = swift.get_swift_hash()
|
||||
# notify storage nodes that there is a new ring to fetch.
|
||||
for relid in utils.relation_ids('swift-storage'):
|
||||
utils.relation_set(rid=relid, swift_hash=swift_hash,
|
||||
www_dir=www_dir, trigger=trigger)
|
||||
if utils.eligible_leader():
|
||||
msg = 'Broadcasting notification to all storage nodes that new '\
|
||||
'ring is ready for consumption.'
|
||||
utils.juju_log('INFO', msg)
|
||||
www_dir = swift.WWW_DIR.split('/var/www/')[1]
|
||||
trigger = uuid.uuid4()
|
||||
swift_hash = swift.get_swift_hash()
|
||||
# notify storage nodes that there is a new ring to fetch.
|
||||
for relid in utils.relation_ids('swift-storage'):
|
||||
utils.relation_set(rid=relid, swift_hash=swift_hash,
|
||||
www_dir=www_dir, trigger=trigger)
|
||||
|
||||
swift.proxy_control('restart')
|
||||
|
||||
@ -168,29 +168,31 @@ SERVICE_PORTS = {
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
def cluster_changed():
|
||||
cluster_hosts = {}
|
||||
cluster_hosts[os.getenv('JUJU_UNIT_NAME').replace('/','-')] = \
|
||||
utils.util_get('private-address')
|
||||
for r_id in relation_ids('cluster'):
|
||||
for unit in relation_list(r_id):
|
||||
cluster_hosts[unit.replace('/','-')] = \
|
||||
cluster_hosts[os.getenv('JUJU_UNIT_NAME').replace('/', '-')] = \
|
||||
utils.unit_get('private-address')
|
||||
for r_id in utils.relation_ids('cluster'):
|
||||
for unit in utils.relation_list(r_id):
|
||||
cluster_hosts[unit.replace('/', '-')] = \
|
||||
utils.relation_get(attribute='private-address',
|
||||
rid=r_id,
|
||||
unit=unit)
|
||||
configure_haproxy(cluster_hosts,
|
||||
SERVICE_PORTS)
|
||||
openstack.configure_haproxy(cluster_hosts,
|
||||
SERVICE_PORTS)
|
||||
utils.restart('haproxy')
|
||||
|
||||
|
||||
def ha_relation_changed():
|
||||
clustered = utils.relation_get('clustered')
|
||||
if clustered and is_leader():
|
||||
juju_log('Cluster configured, notifying other services and updating'
|
||||
'keystone endpoint configuration')
|
||||
if clustered and utils.is_leader():
|
||||
utils.juju_log('INFO',
|
||||
'Cluster configured, notifying other services and'
|
||||
'updating keystone endpoint configuration')
|
||||
# Tell all related services to start using
|
||||
# the VIP and haproxy ports instead
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for r_id in utils.relation_ids('identity-service'):
|
||||
keystone_joined(relid=r_id)
|
||||
|
||||
|
35
templates/haproxy.cfg
Normal file
35
templates/haproxy.cfg
Normal file
@ -0,0 +1,35 @@
|
||||
global
|
||||
log 127.0.0.1 local0
|
||||
log 127.0.0.1 local1 notice
|
||||
maxconn 20000
|
||||
user haproxy
|
||||
group haproxy
|
||||
spread-checks 0
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode http
|
||||
option httplog
|
||||
option dontlognull
|
||||
retries 3
|
||||
timeout queue 1000
|
||||
timeout connect 1000
|
||||
timeout client 30000
|
||||
timeout server 30000
|
||||
|
||||
listen stats :8888
|
||||
mode http
|
||||
stats enable
|
||||
stats hide-version
|
||||
stats realm Haproxy\ Statistics
|
||||
stats uri /
|
||||
stats auth admin:password
|
||||
|
||||
{% for service, ports in service_ports.iteritems() -%}
|
||||
listen {{ service }} 0.0.0.0:{{ ports[0] }}
|
||||
balance roundrobin
|
||||
option tcplog
|
||||
{% for unit, address in units.iteritems() -%}
|
||||
server {{ unit }} {{ address }}:{{ ports[1] }} check
|
||||
{% endfor %}
|
||||
{% endfor %}
|
Loading…
Reference in New Issue
Block a user