diff --git a/Vagrantfile b/Vagrantfile index 2283604..061f512 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -43,10 +43,18 @@ Vagrant.configure(2) do |config| end config.vm.provision 'ansible' do |ansible| ansible.playbook = 'playbook.yaml' + ansible.extra_vars = { + install_proxy: CONFIG['proxy']['install'] + } end - if CONFIG['use_cache'] && Vagrant.has_plugin?('vagrant-cachier') - config.cache.scope = :machine + if CONFIG['proxy']['use'] && Vagrant.has_plugin?('vagrant-proxyconf') + if CONFIG['proxy']['install'] + config.proxy.http = "http://#{CONFIG['address']['controller']}:3128/" + else + config.proxy.http = CONFIG['proxy']['address'] + end + config.proxy.no_proxy = 'localhost,127.0.0.1' end if Vagrant.has_plugin?("vagrant-vbguest") @@ -67,7 +75,7 @@ Vagrant.configure(2) do |config| end end - %w('network' 'storage' 'shared').each do |name| + %w(network storage shared).each do |name| config.vm.define name do |node| node.vm.hostname = name node.vm.network :public_network, @@ -108,5 +116,8 @@ Vagrant.configure(2) do |config| end add_block_device(node, 1, CONFIG['resources']['storage']) add_block_device(node, 2, CONFIG['resources']['storage']) + if CONFIG['proxy']['install'] && CONFIG['proxy']['use'] + node.proxy.enabled = false + end end end diff --git a/ansible/initialize.yaml b/ansible/initialize.yaml index a5fe62f..1ee1962 100644 --- a/ansible/initialize.yaml +++ b/ansible/initialize.yaml @@ -1,6 +1,7 @@ --- +- lineinfile: dest=/etc/yum/pluginconf.d/fastestmirror.conf regexp=^enabled= line=enabled=0 - yum: name=deltarpm state=present -- yum: name=https://rdo.fedorapeople.org/rdo-release.rpm state=present +- yum: name=http://rdo.fedorapeople.org/rdo-release.rpm state=present - yum: name=* state=latest - yum: name=openstack-selinux state=present - yum: name=vim-enhanced state=present diff --git a/ansible/proxy.yaml b/ansible/proxy.yaml new file mode 100644 index 0000000..514cc81 --- /dev/null +++ b/ansible/proxy.yaml @@ -0,0 +1,4 @@ +--- +- yum: name=squid state=present +- copy: src=files/squid.conf dest=/etc/squid/squid.conf +- service: name=squid state=started enabled=yes diff --git a/config.yaml.sample b/config.yaml.sample index a039734..78ca4cb 100644 --- a/config.yaml.sample +++ b/config.yaml.sample @@ -7,7 +7,10 @@ bridge_external: tap1 network_agent: openvswitch network_type: gre netmask_internal: 255.255.0.0 -use_cache: true +proxy: + address: 'http://proxy.company.site:3128' + install: true + use: true storage_backend: nfs address: controller: 10.100.50.10 diff --git a/doc/source/configuration.rst b/doc/source/configuration.rst index 1b428a5..c258d29 100644 --- a/doc/source/configuration.rst +++ b/doc/source/configuration.rst @@ -116,16 +116,32 @@ Timezone At the moment it is not possible to configure the timezone with Packstack. -Caching -------- +Caching / Proxying +------------------ To speed up the provisioning the Vagrant plugin -`vagrant-cachier `__ -can be used. +`vagrant-proxyconf `__ +configures a HTTP proxy to be used by ``yum``. :: - $ vagrant plugin install vagrant-cachier + $ vagrant plugin install vagrant-proxyconf -When the plugin is installed caching is enabled by default. To explicitly -disable caching when the plugin is installed set ``use_cache: false``. +When the plugin is installed caching/proxying is enabled by default and the +HTTP proxy `Squid `__ will be installed on the +controller node. + +:: + + proxy: + use: true + address: 'http://proxy.company.site:3128' + install: true + +To explicitly disable caching/proxying when ``vagrant-proxyconf`` is installed +set ``use`` to ``false``. + +To skip the installation of Squid on the controller node set ``install`` +to ``false``. ``address`` has to point to an existing HTTP proxy server (e.g. +``http://proxy.company.site:3128``) when Squid is not installed. ``address`` +has not to be set when installing Squid on the controller node. diff --git a/files/squid.conf b/files/squid.conf new file mode 100644 index 0000000..1f875f0 --- /dev/null +++ b/files/squid.conf @@ -0,0 +1,76 @@ +# +# Recommended minimum configuration: +# + +# Example rule allowing access from your local networks. +# Adapt to list your (internal) IP networks from where browsing +# should be allowed +acl localnet src 10.0.0.0/8 # RFC1918 possible internal network +acl localnet src 172.16.0.0/12 # RFC1918 possible internal network +acl localnet src 192.168.0.0/16 # RFC1918 possible internal network +acl localnet src fc00::/7 # RFC 4193 local private network range +acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines + +acl SSL_ports port 443 +acl Safe_ports port 80 # http +acl Safe_ports port 21 # ftp +acl Safe_ports port 443 # https +acl Safe_ports port 70 # gopher +acl Safe_ports port 210 # wais +acl Safe_ports port 1025-65535 # unregistered ports +acl Safe_ports port 280 # http-mgmt +acl Safe_ports port 488 # gss-http +acl Safe_ports port 591 # filemaker +acl Safe_ports port 777 # multiling http +acl CONNECT method CONNECT + +# +# Recommended minimum Access Permission configuration: +# +# Deny requests to certain unsafe ports +http_access deny !Safe_ports + +# Deny CONNECT to other than secure SSL ports +http_access deny CONNECT !SSL_ports + +# Only allow cachemgr access from localhost +http_access allow localhost manager +http_access deny manager + +# We strongly recommend the following be uncommented to protect innocent +# web applications running on the proxy server who think the only +# one who can access services on "localhost" is a local user +#http_access deny to_localhost + +# +# INSERT YOUR OWN RULE(S) HERE TO ALLOW ACCESS FROM YOUR CLIENTS +# + +# Example rule allowing access from your local networks. +# Adapt localnet in the ACL section to list your (internal) IP networks +# from where browsing should be allowed +http_access allow localnet +http_access allow localhost + +# And finally deny all other access to this proxy +http_access deny all + +# Squid normally listens to port 3128 +http_port 3128 + +# Uncomment and adjust the following to add a disk cache directory. +cache_dir ufs /var/spool/squid 1024 16 256 + +maximum_object_size 256 MB +maximum_object_size_in_memory 4 MB + +# Leave coredumps in the first cache dir +coredump_dir /var/spool/squid + +# +# Add any of your own refresh_pattern entries above these. +# +refresh_pattern ^ftp: 1440 20% 10080 +refresh_pattern ^gopher: 1440 0% 1440 +refresh_pattern -i (/cgi-bin/|\?) 0 0% 0 +refresh_pattern . 0 20% 4320 diff --git a/playbook.yaml b/playbook.yaml index d42e4ff..dec7a44 100644 --- a/playbook.yaml +++ b/playbook.yaml @@ -9,3 +9,5 @@ when: inventory_hostname_short == 'network' - include: ansible/nfs.yaml when: inventory_hostname_short == 'nfs' + - include: ansible/proxy.yaml + when: inventory_hostname_short == 'controller' and install_proxy diff --git a/scripts/bootstrap.sh b/scripts/bootstrap.sh index 4eee1cf..43101f7 100755 --- a/scripts/bootstrap.sh +++ b/scripts/bootstrap.sh @@ -3,7 +3,8 @@ run() { number=$1 shift - python scripts/get_hosts.py | xargs -n 1 -P $number -I BOX sh -c "echo - BOX && (vagrant $* BOX 2>&1 >> log/BOX.log)" + python scripts/get_hosts.py | grep -v controller | xargs -n 1 -P $number \ + -I BOX sh -c "echo - BOX && (vagrant $* BOX 2>&1 >> log/BOX.log)" } if [[ ! -e config.yaml ]]; then @@ -15,13 +16,17 @@ echo "$(date) cleaning up" rm -f log/* vagrant destroy +echo "$(date) bringign up, provisioning and reloading the controller VM" +vagrant up controller >> log/controller.log +vagrant reload controller >> log/controller.log + echo "$(date) brining up all VMs" run 2 up --no-provision -echo "$(date) provisioning all VMs" +echo "$(date) provisioning all other VMs" run 4 provision -echo "$(date) reloading all VMs" +echo "$(date) reloading all other VMs" run 4 reload echo "$(date) initializing the controller node"