Add infrstucture scripts to prepare rh2
Add scripts to prepare rh2, (an OVB based cloud) for CI. This patch only includes whats needed to prepare the cloud for CI, the changes to the CI scripts themselves will be part of another patch. Change-Id: Ie2d1c607f283e6babb00ea19d32bebae5383867a
This commit is contained in:
parent
db2abf835c
commit
2a10f319a7
|
@ -0,0 +1,100 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
set -eu
|
||||
|
||||
SCRIPT_NAME=$(basename $0)
|
||||
SCRIPT_HOME=$(dirname $0)
|
||||
|
||||
function show_options {
|
||||
echo "Usage: $SCRIPT_NAME [options]"
|
||||
echo
|
||||
echo "Ensure that a given user exists."
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -h -- this help"
|
||||
echo " -e -- email"
|
||||
echo " -n -- name"
|
||||
echo " -t -- tenant"
|
||||
echo " -u -- usercode"
|
||||
echo
|
||||
exit $1
|
||||
}
|
||||
|
||||
EMAIL=''
|
||||
NAME=''
|
||||
TENANT=''
|
||||
USERCODE=''
|
||||
|
||||
TEMP=`getopt -o hu:e:n:t: -n $SCRIPT_NAME -- "$@"`
|
||||
if [ $? != 0 ]; then
|
||||
echo "Terminating..." >&2;
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
# Note the quotes around `$TEMP': they are essential!
|
||||
eval set -- "$TEMP"
|
||||
|
||||
while true ; do
|
||||
case "$1" in
|
||||
-h) show_options 0;;
|
||||
-e) EMAIL=$2; shift 2 ;;
|
||||
-n) NAME=$2; shift 2 ;;
|
||||
-t) TENANT=$2; shift 2 ;;
|
||||
-u) USERCODE=$2; shift 2 ;;
|
||||
--) shift ; break ;;
|
||||
*) echo "Error: unsupported option $1." ; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
EXTRA_ARGS=${1:-''}
|
||||
|
||||
if [ -z "$EMAIL" -o -z "$NAME" -o -z "$TENANT" -o -z "$USERCODE" -o -n "$EXTRA_ARGS" ]; then
|
||||
show_options 1
|
||||
fi
|
||||
|
||||
echo "Checking for user $USERCODE"
|
||||
#TODO: fix after bug 1392035 in the keystone client library
|
||||
USER_ID=$(openstack user list | awk '{print tolower($0)}' |grep " ${USERCODE,,} " |awk '{print$2}')
|
||||
if [ -z "$USER_ID" ]; then
|
||||
PASSWORD=''
|
||||
if [ -e os-asserted-users ]; then
|
||||
PASSWORD=$(awk "\$1==\"$USERCODE\" { print \$2 }" < os-asserted-users)
|
||||
fi
|
||||
if [ -z "$PASSWORD" ]; then
|
||||
PASSWORD=$(os-make-password)
|
||||
echo "$USERCODE $PASSWORD" >> os-asserted-users
|
||||
fi
|
||||
USER_ID=$(openstack user create --password "$PASSWORD" --email "$EMAIL" $USERCODE | awk '$2=="id" {print $4}')
|
||||
fi
|
||||
#TODO: fix after bug 1392035 in the keystone client library
|
||||
TENANT_ID=$(openstack project list | awk '{print tolower($0)}' |grep " ${TENANT,,} " |awk '{print$2}')
|
||||
if [ -z "$TENANT_ID" ]; then
|
||||
TENANT_ID=$(openstack project create $TENANT | awk '$2=="id" {print $4}')
|
||||
fi
|
||||
if [ "$TENANT" = "admin" ]; then
|
||||
ROLE="admin"
|
||||
else
|
||||
ROLE="_member_"
|
||||
fi
|
||||
ROLE_ID=$(openstack role show $ROLE | awk '$2=="id" {print $4}')
|
||||
if openstack user role list --project $TENANT_ID $USER_ID | grep "${ROLE_ID}.*${ROLE}.*${USER_ID}" ; then
|
||||
echo "User already has role '$ROLE'"
|
||||
else
|
||||
openstack role add --project $TENANT_ID --user $USER_ID $ROLE_ID
|
||||
fi
|
||||
echo "User $USERCODE configured."
|
|
@ -0,0 +1,15 @@
|
|||
#!/bin/bash
|
||||
|
||||
# taken from openstack-virtual-baremetal/bin/install_openstackbmc.sh
|
||||
yum -y update centos-release # required for rdo-release install to work
|
||||
yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
|
||||
yum install -y https://rdo.fedorapeople.org/rdo-release.rpm
|
||||
yum install -y python-pip python2-crypto os-net-config python-novaclient python-neutronclient git jq
|
||||
pip install pyghmi
|
||||
|
||||
# the CI cloud is using a unsafe dick caching mode so the sync will be ignore by the host
|
||||
# we're syncing date on the VM, then give the host 5 seconds to write it to disk and hope its long enough
|
||||
sync
|
||||
sleep 5
|
||||
|
||||
touch /var/tmp/ready
|
|
@ -0,0 +1,17 @@
|
|||
#!/bin/bash
|
||||
set -x
|
||||
|
||||
|
||||
nova delete $(nova list --all-tenants | grep -e te-broker -e mirror-server -e proxy-server| awk '{print $2}')
|
||||
|
||||
sleep 5
|
||||
keystone user-delete openstack-nodepool
|
||||
keystone tenant-delete openstack-nodepool
|
||||
|
||||
neutron router-interface-delete private_router private_subnet
|
||||
neutron router-delete private_router
|
||||
neutron subnet-delete private_subnet
|
||||
neutron net-delete private
|
||||
neutron subnet-delete public_subnet
|
||||
neutron net-delete public
|
||||
|
|
@ -4,12 +4,12 @@
|
|||
# to run based on the hostname e.g. to create a mirror server then one can simply
|
||||
# nova boot --image <id> --flavor <id> --user-data scripts/deploy-server.sh --nic net-id=<id> --nic net-id=<id>,v4-fixed-ip=192.168.1.101 mirror-server
|
||||
|
||||
|
||||
yum install -y https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
|
||||
yum install -y puppet git
|
||||
|
||||
echo puppetlabs-apache adrien-filemapper | xargs -n 1 sudo puppet module install
|
||||
echo puppetlabs-apache adrien-filemapper | xargs -n 1 puppet module install
|
||||
|
||||
sudo git clone https://github.com/puppetlabs/puppetlabs-vcsrepo.git /etc/puppet/modules/vcsrepo
|
||||
git clone https://github.com/puppetlabs/puppetlabs-vcsrepo.git /etc/puppet/modules/vcsrepo
|
||||
|
||||
if [ -e /sys/class/net/eth1 ] ; then
|
||||
echo -e 'DEVICE=eth1\nBOOTPROTO=dhcp\nONBOOT=yes\nPERSISTENT_DHCLIENT=yes\nPEERDNS=no' > /etc/sysconfig/network-scripts/ifcfg-eth1
|
||||
|
@ -17,4 +17,14 @@ if [ -e /sys/class/net/eth1 ] ; then
|
|||
ifup eth1
|
||||
fi
|
||||
|
||||
curl http://git.openstack.org/cgit/openstack-infra/tripleo-ci/plain/scripts/$(hostname)/$(hostname).pp | puppet apply
|
||||
CIREPO=/opt/stack/tripleo-ci
|
||||
mkdir -p $CIREPO
|
||||
git clone https://git.openstack.org/openstack-infra/tripleo-ci $CIREPO
|
||||
|
||||
if [ -f $CIREPO/scripts/$(hostname)/$(hostname).sh ] ; then
|
||||
bash $CIREPO/scripts/$(hostname)/$(hostname).sh
|
||||
fi
|
||||
|
||||
if [ -f $CIREPO/scripts/$(hostname)/$(hostname).pp ] ; then
|
||||
puppet apply $CIREPO/scripts/$(hostname)/$(hostname).pp
|
||||
fi
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
Exec { path => [ "/bin/", "/sbin/" ] }
|
||||
|
||||
package{"wget": }
|
||||
package{"python3": }
|
||||
package{"python34": }
|
||||
|
||||
# The git repositories are created in a unconfined context
|
||||
# TODO: fix this
|
||||
|
|
|
@ -0,0 +1,60 @@
|
|||
#!/bin/bash
|
||||
#
|
||||
# Copyright 2013 Hewlett-Packard Development Company, L.P.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
set -eu
|
||||
set -o pipefail
|
||||
|
||||
SCRIPT_NAME=$(basename $0)
|
||||
SCRIPT_HOME=$(dirname $0)
|
||||
|
||||
function show_options {
|
||||
echo "Usage: $SCRIPT_NAME"
|
||||
echo
|
||||
echo "Create a random password."
|
||||
echo
|
||||
echo "This outputs a random password."
|
||||
echo
|
||||
echo "The password is made by taking a uuid and passing it though sha1sum."
|
||||
echo "We may change this in future to gain more entropy."
|
||||
echo
|
||||
exit $1
|
||||
}
|
||||
|
||||
TEMP=`getopt -o h -n $SCRIPT_NAME -- "$@"`
|
||||
if [ $? != 0 ]; then
|
||||
echo "Terminating..." >&2;
|
||||
exit 1;
|
||||
fi
|
||||
|
||||
# Note the quotes around `$TEMP': they are essential!
|
||||
eval set -- "$TEMP"
|
||||
|
||||
while true ; do
|
||||
case "$1" in
|
||||
-h) show_options 0;;
|
||||
--) shift ; break ;;
|
||||
*) echo "Error: unsupported option $1." ; exit 1 ;;
|
||||
esac
|
||||
done
|
||||
|
||||
EXTRA=${1:-""}
|
||||
|
||||
if [ -n "$EXTRA" ]; then
|
||||
show_options 1
|
||||
fi
|
||||
|
||||
uuidgen | sha1sum | awk '{print $1}'
|
|
@ -0,0 +1,84 @@
|
|||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
export PATH=$PATH:scripts
|
||||
source $1
|
||||
|
||||
# Script to deploy the base infrastructure required to create the ovb-common and ovb-testenv stacks
|
||||
# Parts of this script could have been a heat stack but not all
|
||||
|
||||
# We can't use heat to create the flavors as they can't be given a name with the heat resource
|
||||
nova flavor-show bmc || nova flavor-create bmc auto 512 20 1
|
||||
nova flavor-show baremetal || nova flavor-create baremetal auto 5120 41 1
|
||||
nova flavor-show undercloud || nova flavor-create undercloud auto 6144 40 2
|
||||
|
||||
# Remove the flavors that provide most disk space, the disks on rh2 are small we've over commited
|
||||
# disk space, so this will help protect against an single instance filling the disk on a compute node
|
||||
nova flavor-delete m1.large || true
|
||||
nova flavor-delete m1.xlarge || true
|
||||
|
||||
glance image-show 'CentOS-7-x86_64-GenericCloud' || \
|
||||
glance image-create --progress --name 'CentOS-7-x86_64-GenericCloud' --is-public true --disk-format qcow2 --container-format bare \
|
||||
--copy-from http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2
|
||||
|
||||
glance image-show 'ipxe-boot' || \
|
||||
glance image-create --name ipxe-boot --is-public true --disk-format qcow2 --property os_shutdown_timeout=5 --container-format bare \
|
||||
--copy-from https://raw.githubusercontent.com/cybertron/openstack-virtual-baremetal/master/ipxe/ipxe-boot.qcow2
|
||||
|
||||
# Create a pool of floating IP's
|
||||
neutron net-show public || neutron net-create public --router:external=True
|
||||
neutron subnet-show public_subnet || neutron subnet-create --name public_subnet --enable_dhcp=False --allocation_pool start=$PUBLIC_IP_FLOATING_START,end=$PUBLIC_IP_FLOATING_END --gateway $PUBLIC_IP_GATWAY public $PUBLIC_IP_NET
|
||||
|
||||
# Create a shared private network
|
||||
neutron net-show private || neutron net-create --shared private
|
||||
neutron subnet-show private_subnet || neutron subnet-create --name private_subnet --gateway 192.168.100.1 --allocation-pool start=192.168.100.2,end=192.168.103.254 --dns-nameserver 8.8.8.8 private 192.168.100.0/22
|
||||
|
||||
|
||||
# Give outside access to the private network
|
||||
if ! neutron router-show private_router ; then
|
||||
neutron router-create private_router
|
||||
neutron router-gateway-set private_router public
|
||||
neutron router-interface-add private_router private_subnet
|
||||
fi
|
||||
|
||||
# Keys to used in infrastructure
|
||||
nova keypair-show tripleo-cd-admins || nova keypair-add --pub-key scripts/tripleo-cd-admins tripleo-cd-admins
|
||||
|
||||
# Create a new project/user whose creds will be injected into the te-broker for creating heat stacks
|
||||
./scripts/assert-user -n openstack-nodepool -t openstack-nodepool -u openstack-nodepool -e openstack-nodepool@noreply.org || true
|
||||
NODEPOOLUSERID=$(openstack user show openstack-nodepool | awk '$2=="id" {print $4}')
|
||||
NODEPOOLPROJECTID=$(openstack project show openstack-nodepool | awk '$2=="id" {print $4}')
|
||||
nova quota-update --instances 9999 --cores 9999 --ram $QUOTA_RAM --floating-ips $QUOTA_FIPS $NODEPOOLPROJECTID
|
||||
nova quota-update --instances 9999 --cores 9999 --ram $QUOTA_RAM --floating-ips $QUOTA_FIPS --user $NODEPOOLUSERID $NODEPOOLPROJECTID
|
||||
neutron quota-update --network $QUOTA_NETS --subnet $QUOTA_NETS --port $QUOTA_PORTS --floatingip $QUOTA_FIPS --tenant-id $NODEPOOLPROJECTID
|
||||
|
||||
|
||||
touch ~/nodepoolrc
|
||||
chmod 600 ~/nodepoolrc
|
||||
echo -e "export OS_USERNAME=openstack-nodepool\nexport OS_TENANT_NAME=openstack-nodepool" > ~/nodepoolrc
|
||||
echo "export OS_AUTH_URL=$OS_AUTH_URL" >> ~/nodepoolrc
|
||||
|
||||
set +x
|
||||
PASSWORD=$(grep openstack-nodepool os-asserted-users | awk '{print $2}')
|
||||
echo "export OS_PASSWORD=$PASSWORD" >> ~/nodepoolrc
|
||||
set -x
|
||||
|
||||
source ~/nodepoolrc
|
||||
|
||||
nova keypair-show tripleo-cd-admins || nova keypair-add --pub-key scripts/tripleo-cd-admins tripleo-cd-admins
|
||||
# And finally some servers we need
|
||||
nova show te-broker || nova boot --flavor m1.medium --image "CentOS-7-x86_64-GenericCloud" --key-name tripleo-cd-admins --nic net-name=private,v4-fixed-ip=$TEBROKERIP --user-data scripts/deploy-server.sh --file "/etc/nodepoolrc=$HOME/nodepoolrc" te-broker
|
||||
nova show mirror-server || nova boot --flavor m1.medium --image "CentOS-7-x86_64-GenericCloud" --key-name tripleo-cd-admins --nic net-name=private,v4-fixed-ip=$MIRRORIP --user-data scripts/deploy-server.sh mirror-server
|
||||
nova show proxy-server || nova boot --flavor m1.medium --image "CentOS-7-x86_64-GenericCloud" --key-name tripleo-cd-admins --nic net-name=private,v4-fixed-ip=$PROXYIP --user-data scripts/deploy-server.sh proxy-server
|
||||
if ! nova image-show bmc-template ; then
|
||||
nova keypair-add --pub-key ~/.ssh/id_rsa.pub undercloud
|
||||
nova boot --flavor bmc --image "CentOS-7-x86_64-GenericCloud" --key-name undercloud --user-data scripts/deploy-server.sh bmc-template
|
||||
FLOATINGIP=$(nova floating-ip-create $EXTNET | grep public | awk '{print $4}')
|
||||
nova floating-ip-associate bmc-template $FLOATINGIP
|
||||
while ! ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=2 centos@$FLOATINGIP ls /var/tmp/ready ; do
|
||||
sleep 10
|
||||
done
|
||||
nova image-create --poll bmc-template bmc-template
|
||||
nova delete bmc-template
|
||||
nova keypair-delete undercloud
|
||||
fi
|
|
@ -0,0 +1,21 @@
|
|||
Exec { path => [ "/bin/", "/sbin/" ] }
|
||||
|
||||
vcsrepo {"/opt/stack/tripleo-ci":
|
||||
source => "https://git.openstack.org/openstack-infra/tripleo-ci",
|
||||
provider => git,
|
||||
ensure => latest,
|
||||
}
|
||||
|
||||
cron {"refresh-server":
|
||||
command => "timeout 20m puppet apply /opt/stack/tripleo-ci/scripts/te-broker/te-broker.pp",
|
||||
minute => "*/30"
|
||||
}
|
||||
|
||||
package{"squid": } ->
|
||||
file {"/etc/squid/squid.conf":
|
||||
source => "/opt/stack/tripleo-ci/scripts/proxy-server/squid.conf",
|
||||
} ~>
|
||||
service {"squid":
|
||||
ensure => "running",
|
||||
}
|
||||
|
|
@ -0,0 +1,78 @@
|
|||
#
|
||||
# Recommended minimum configuration:
|
||||
#
|
||||
|
||||
# Example rule allowing access from your local networks.
|
||||
# Adapt to list your (internal) IP networks from where browsing
|
||||
# should be allowed
|
||||
acl localnet src 10.0.0.0/8 # RFC1918 possible internal network
|
||||
acl localnet src 172.16.0.0/12 # RFC1918 possible internal network
|
||||
acl localnet src 192.168.0.0/16 # RFC1918 possible internal network
|
||||
acl localnet src fc00::/7 # RFC 4193 local private network range
|
||||
acl localnet src fe80::/10 # RFC 4291 link-local (directly plugged) machines
|
||||
|
||||
acl SSL_ports port 443
|
||||
acl Safe_ports port 80 # http
|
||||
acl Safe_ports port 21 # ftp
|
||||
acl Safe_ports port 443 # https
|
||||
acl Safe_ports port 70 # gopher
|
||||
acl Safe_ports port 210 # wais
|
||||
acl Safe_ports port 1025-65535 # unregistered ports
|
||||
acl Safe_ports port 280 # http-mgmt
|
||||
acl Safe_ports port 488 # gss-http
|
||||
acl Safe_ports port 591 # filemaker
|
||||
acl Safe_ports port 777 # multiling http
|
||||
acl CONNECT method CONNECT
|
||||
|
||||
#
|
||||
# Recommended minimum Access Permission configuration:
|
||||
#
|
||||
# Deny requests to certain unsafe ports
|
||||
http_access deny !Safe_ports
|
||||
|
||||
# Deny CONNECT to other than secure SSL ports
|
||||
http_access deny CONNECT !SSL_ports
|
||||
|
||||
# Only allow cachemgr access from localhost
|
||||
http_access allow localhost manager
|
||||
http_access deny manager
|
||||
|
||||
# We strongly recommend the following be uncommented to protect innocent
|
||||
# web applications running on the proxy server who think the only
|
||||
# one who can access services on "localhost" is a local user
|
||||
#http_access deny to_localhost
|
||||
|
||||
#
|
||||
# INSERT YOUR OWN RULE(S) HERE TO ALLOW ACCESS FROM YOUR CLIENTS
|
||||
#
|
||||
|
||||
# Example rule allowing access from your local networks.
|
||||
# Adapt localnet in the ACL section to list your (internal) IP networks
|
||||
# from where browsing should be allowed
|
||||
http_access allow localnet
|
||||
http_access allow localhost
|
||||
|
||||
# And finally deny all other access to this proxy
|
||||
http_access deny all
|
||||
|
||||
# Squid normally listens to port 3128
|
||||
http_port 3128
|
||||
|
||||
# stray from the default here so that we can cache cloud images
|
||||
# (derekh): carried over from rh1 may no longer be needed
|
||||
maximum_object_size 1024 MB
|
||||
cache_dir aufs /var/spool/squid 16384 16 256
|
||||
|
||||
# Leave coredumps in the first cache dir
|
||||
coredump_dir /var/spool/squid
|
||||
|
||||
#
|
||||
# Add any of your own refresh_pattern entries above these.
|
||||
#
|
||||
refresh_pattern ^ftp: 1440 20% 10080
|
||||
refresh_pattern ^gopher: 1440 0% 1440
|
||||
# Never cache repomd.xml in yum repositories as serving an old one
|
||||
# causes yum installs to fail
|
||||
refresh_pattern -i repomd.xml$ 0 0% 0
|
||||
refresh_pattern -i (/cgi-bin/|\?) 0 0% 0
|
||||
refresh_pattern . 0 20% 4320
|
|
@ -0,0 +1,75 @@
|
|||
parameter_defaults:
|
||||
CloudName: ci-overcloud.rh2.tripleo.org
|
||||
controllerExtraConfig:
|
||||
tripleo::loadbalancer::public_virtual_ip: 8.43.87.224
|
||||
neutron::agents::ml2::ovs::prevent_arp_spoofing: false
|
||||
# https://bugs.launchpad.net/tripleo/+bug/1590101
|
||||
# Tripleo sets this to 1400, the mtu of most pysical networks without jumbo frames is 1500
|
||||
# Tripleo also forces dhcp-option-force=26,1400 , this leaves no overhead room for vxlan
|
||||
# we probably shouldn't force this as neutron automatically subtracts the overlay protocol overhead global_physnet_mtu.
|
||||
# TODO: investigate properly
|
||||
neutron::global_physnet_mtu: 1500
|
||||
# this is deprecated but take precendence ??
|
||||
neutron::network_device_mtu: 1500
|
||||
# rh2 disks are small, we're relying on the fact that CI jobs at different stages wont
|
||||
# ever use peak disk usage together (also they don't use all thats allocate in the flavor)
|
||||
nova::scheduler::filter::disk_allocation_ratio: 3
|
||||
NovaComputeExtraConfig:
|
||||
neutron::agents::ml2::ovs::prevent_arp_spoofing: false
|
||||
neutron::plugins::ml2::firewall_driver: neutron.agent.firewall.NoopFirewallDriver
|
||||
neutron::agents::ml2::ovs::firewall_driver: neutron.agent.firewall.NoopFirewallDriver
|
||||
neutron::global_physnet_mtu: 1500
|
||||
# this is deprecated but takes precendence ??
|
||||
neutron::network_device_mtu: 1500
|
||||
# Allow file injection so that the nodepool cloud creds can be injected into the te-broker
|
||||
nova::compute::libvirt::libvirt_inject_partition: -1
|
||||
# This should be OK if the cloud is exclusivly for CI but it might end in tears
|
||||
nova::compute::libvirt::libvirt_disk_cachemodes:
|
||||
- file=unsafe
|
||||
EndpointMap:
|
||||
AodhAdmin: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
|
||||
AodhInternal: {protocol: 'http', port: '8042', host: 'IP_ADDRESS'}
|
||||
AodhPublic: {protocol: 'https', port: '13042', host: 'CLOUDNAME'}
|
||||
CeilometerAdmin: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
|
||||
CeilometerInternal: {protocol: 'http', port: '8777', host: 'IP_ADDRESS'}
|
||||
CeilometerPublic: {protocol: 'https', port: '13777', host: 'CLOUDNAME'}
|
||||
CinderAdmin: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
|
||||
CinderInternal: {protocol: 'http', port: '8776', host: 'IP_ADDRESS'}
|
||||
CinderPublic: {protocol: 'https', port: '13776', host: 'CLOUDNAME'}
|
||||
GlanceAdmin: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
|
||||
GlanceInternal: {protocol: 'http', port: '9292', host: 'IP_ADDRESS'}
|
||||
GlancePublic: {protocol: 'https', port: '13292', host: 'CLOUDNAME'}
|
||||
GnocchiAdmin: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
|
||||
GnocchiInternal: {protocol: 'http', port: '8041', host: 'IP_ADDRESS'}
|
||||
GnocchiPublic: {protocol: 'https', port: '13041', host: 'CLOUDNAME'}
|
||||
HeatAdmin: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
|
||||
HeatInternal: {protocol: 'http', port: '8004', host: 'IP_ADDRESS'}
|
||||
HeatPublic: {protocol: 'https', port: '13004', host: 'CLOUDNAME'}
|
||||
HorizonPublic: {protocol: 'https', port: '443', host: 'CLOUDNAME'}
|
||||
KeystoneAdmin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
|
||||
KeystoneInternal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
|
||||
KeystonePublic: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
|
||||
KeystoneV3Admin: {protocol: 'http', port: '35357', host: 'IP_ADDRESS'}
|
||||
KeystoneV3Internal: {protocol: 'http', port: '5000', host: 'IP_ADDRESS'}
|
||||
KeystoneV3Public: {protocol: 'https', port: '13000', host: 'CLOUDNAME'}
|
||||
NeutronAdmin: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
|
||||
NeutronInternal: {protocol: 'http', port: '9696', host: 'IP_ADDRESS'}
|
||||
NeutronPublic: {protocol: 'https', port: '13696', host: 'CLOUDNAME'}
|
||||
NovaAdmin: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
|
||||
NovaInternal: {protocol: 'http', port: '8774', host: 'IP_ADDRESS'}
|
||||
NovaPublic: {protocol: 'https', port: '13774', host: 'CLOUDNAME'}
|
||||
NovaEC2Admin: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
|
||||
NovaEC2Internal: {protocol: 'http', port: '8773', host: 'IP_ADDRESS'}
|
||||
NovaEC2Public: {protocol: 'https', port: '13773', host: 'CLOUDNAME'}
|
||||
NovaVNCProxyAdmin: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
|
||||
NovaVNCProxyInternal: {protocol: 'http', port: '6080', host: 'IP_ADDRESS'}
|
||||
NovaVNCProxyPublic: {protocol: 'https', port: '13080', host: 'CLOUDNAME'}
|
||||
SaharaAdmin: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
|
||||
SaharaInternal: {protocol: 'http', port: '8386', host: 'IP_ADDRESS'}
|
||||
SaharaPublic: {protocol: 'https', port: '13386', host: 'CLOUDNAME'}
|
||||
SwiftAdmin: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
|
||||
SwiftInternal: {protocol: 'http', port: '8080', host: 'IP_ADDRESS'}
|
||||
SwiftPublic: {protocol: 'https', port: '13808', host: 'CLOUDNAME'}
|
||||
|
||||
resource_registry:
|
||||
OS::TripleO::NodeTLSData: /usr/share/openstack-tripleo-heat-templates/puppet/extraconfig/tls/tls-cert-inject.yaml
|
|
@ -0,0 +1,16 @@
|
|||
#!/bin/bash
|
||||
|
||||
export PUBLIC_IP_NET=8.43.86.0/23
|
||||
export PUBLIC_IP_GATWAY=8.43.87.254
|
||||
export PUBLIC_IP_FLOATING_START=8.43.87.225
|
||||
export PUBLIC_IP_FLOATING_END=8.43.87.253
|
||||
|
||||
export TOTALOVBENVS=15
|
||||
export QUOTA_RAM=524288
|
||||
export QUOTA_FIPS=20
|
||||
export QUOTA_NETS=20
|
||||
export QUOTA_PORTS=200
|
||||
|
||||
export TEBROKERIP=192.168.103.254
|
||||
export MIRRORIP=192.168.103.253
|
||||
export PROXYIP=192.168.103.252
|
|
@ -0,0 +1,48 @@
|
|||
#!/bin/bash
|
||||
set -ex
|
||||
|
||||
# If we hit any problem then pause this env so it stops
|
||||
# trying to dish out testenvs
|
||||
trap "echo There was a problem, going to sleep ; sleep infinity" ERR
|
||||
|
||||
|
||||
ENVNUM=${1:-$(date +%s)}
|
||||
NODECOUNT=${2:-2}
|
||||
UCINSTANCE=${3:-}
|
||||
PROVISIONNET=provision_${ENVNUM}
|
||||
ENVFILE=env_${ENVNUM}.yaml
|
||||
|
||||
|
||||
source /etc/nodepoolrc
|
||||
|
||||
cd /opt/stack/openstack-virtual-baremetal/
|
||||
|
||||
/bin/cp --remove-destination templates/env.yaml.example $ENVFILE
|
||||
sed -i -e "s/baremetal_prefix:.*/baremetal_prefix: baremetal_${ENVNUM}/" $ENVFILE
|
||||
sed -i -e "s/bmc_image:.*/bmc_image: bmc-template/" $ENVFILE
|
||||
sed -i -e "s/bmc_prefix:.*/bmc_prefix: bmc_${ENVNUM}/" $ENVFILE
|
||||
sed -i -e "s/key_name:.*/key_name: tripleo-cd-admins/" $ENVFILE
|
||||
sed -i -e "s/node_count:.*/node_count: ${NODECOUNT}/" $ENVFILE
|
||||
sed -i -e "s/os_auth_url:.*/os_auth_url: ${OS_AUTH_URL//\//\/}/" $ENVFILE
|
||||
sed -i -e "s/os_password:.*/os_password: $OS_PASSWORD/" $ENVFILE
|
||||
sed -i -e "s/os_tenant:.*/os_tenant: $OS_TENANT_NAME/" $ENVFILE
|
||||
sed -i -e "s/os_user:.*/os_user: $OS_USERNAME/" $ENVFILE
|
||||
sed -i -e "s/provision_net:.*/provision_net: $PROVISIONNET/" $ENVFILE
|
||||
|
||||
PROVISIONNETID=$(neutron net-create $PROVISIONNET | awk '$2=="id" {print $4}')
|
||||
neutron subnet-create --name provision_subnet_${ENVNUM} --enable_dhcp=False $PROVISIONNET 192.0.2.0/24
|
||||
if [ -n "$UCINSTANCE" ] ; then
|
||||
nova interface-attach --net-id $PROVISIONNETID $UCINSTANCE
|
||||
fi
|
||||
/opt/stack/openstack-virtual-baremetal/bin/deploy.py --env $ENVFILE --name baremetal_${ENVNUM}
|
||||
|
||||
while ! heat stack-show baremetal_${ENVNUM} | grep CREATE_COMPLETE ; do
|
||||
sleep 10
|
||||
if heat stack-show baremetal_${ENVNUM} | grep CREATE_FAILED ; then
|
||||
echo "Failed creating OVB stack" > $TE_DATAFILE
|
||||
exit 0
|
||||
fi
|
||||
done
|
||||
|
||||
/opt/stack/openstack-virtual-baremetal/bin/build-nodes-json --env $ENVFILE --nodes_json $TE_DATAFILE
|
||||
|
|
@ -0,0 +1,28 @@
|
|||
#!/bin/bash
|
||||
set -xe
|
||||
|
||||
# If we hit any problem then pause this env so it stops
|
||||
# trying to dish out testenvs
|
||||
trap "echo There was a problem, going to sleep ; sleep infinity" ERR
|
||||
|
||||
ENVNUM=${1:-$(date +%s)}
|
||||
PROVISIONNET=provision_${ENVNUM}
|
||||
ENVFILE=env_${ENVNUM}.yaml
|
||||
|
||||
rm -f /opt/stack/openstack-virtual-baremetal/env_${ENVNUM}.yaml
|
||||
|
||||
source /etc/nodepoolrc
|
||||
while heat stack-show baremetal_${ENVNUM} 2>&1 > /dev/null ; do
|
||||
# keep calling delete until its gone
|
||||
heat stack-delete -y baremetal_${ENVNUM} || true
|
||||
sleep 20
|
||||
done
|
||||
|
||||
# Delete the port that has been attached to the undercloud
|
||||
SUBNETID=$(neutron subnet-show provision_subnet_${ENVNUM} | awk '$2=="id" {print $4}')
|
||||
for PORT in $(neutron port-list | grep $SUBNETID | awk '{print $2}') ; do
|
||||
neutron port-delete $PORT
|
||||
done
|
||||
|
||||
neutron subnet-delete provision_subnet_${ENVNUM}
|
||||
neutron net-delete $PROVISIONNET
|
|
@ -0,0 +1,6 @@
|
|||
[Unit]
|
||||
Description=Geard deamon
|
||||
|
||||
[Service]
|
||||
ExecStart=/bin/geard -d
|
||||
|
|
@ -0,0 +1,19 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Keep X number of testenv workers running, each testenv worker exists after processing a single job
|
||||
BASEPATH=$(realpath $(dirname $0)/../..)
|
||||
ENVFILE=$BASEPATH/scripts/rh2.env
|
||||
|
||||
|
||||
TENUM=0
|
||||
while true ; do
|
||||
NUMCURRENTJOBS=$(jobs -p -r | wc -l)
|
||||
source $ENVFILE
|
||||
if [ $NUMCURRENTJOBS -lt $TOTALOVBENVS ] ; then
|
||||
TENUM=$(($TENUM+1))
|
||||
echo "Starting testenv-worker $TENUM"
|
||||
python $BASEPATH/scripts/te-broker/testenv-worker --tenum $TENUM $BASEPATH/scripts/te-broker/create-env $BASEPATH/scripts/te-broker/destroy-env &
|
||||
fi
|
||||
# Trottle a little so we don't end up hitting the openstack APIs too hard
|
||||
sleep 10
|
||||
done
|
|
@ -0,0 +1,26 @@
|
|||
Exec { path => [ "/bin/", "/sbin/" ] }
|
||||
|
||||
vcsrepo {"/opt/stack/openstack-virtual-baremetal":
|
||||
source => "https://github.com/cybertron/openstack-virtual-baremetal.git",
|
||||
provider => git,
|
||||
ensure => latest,
|
||||
}
|
||||
|
||||
vcsrepo {"/opt/stack/tripleo-ci":
|
||||
source => "https://git.openstack.org/openstack-infra/tripleo-ci",
|
||||
provider => git,
|
||||
ensure => latest,
|
||||
}
|
||||
|
||||
cron {"refresh-server":
|
||||
command => "timeout 20m puppet apply /opt/stack/tripleo-ci/scripts/te-broker/te-broker.pp",
|
||||
minute => "*/30"
|
||||
}
|
||||
|
||||
service{"te_workers":
|
||||
ensure => "running",
|
||||
}
|
||||
service{"geard":
|
||||
ensure => "running",
|
||||
}
|
||||
|
|
@ -0,0 +1,14 @@
|
|||
#!/bin/bash
|
||||
|
||||
curl http://trunk.rdoproject.org/centos7/current-tripleo/delorean.repo > /etc/yum.repos.d/delorean.repo
|
||||
curl http://trunk.rdoproject.org/centos7/delorean-deps.repo > /etc/yum.repos.d/delorean-deps.repo
|
||||
|
||||
yum install -y python-pip python-heatclient python-neutronclient python-novaclient python-swiftclient
|
||||
|
||||
pip install gear
|
||||
|
||||
BASEPATH=$(realpath $(dirname $0))
|
||||
|
||||
cp $BASEPATH/geard.service /lib/systemd/system/geard.service
|
||||
cp $BASEPATH/te_workers.service /lib/systemd/system/te_workers.service
|
||||
|
|
@ -0,0 +1,6 @@
|
|||
[Unit]
|
||||
Description=TE Workers
|
||||
|
||||
[Service]
|
||||
ExecStart=/opt/stack/tripleo-ci/scripts/te-broker/start_workers.sh
|
||||
|
|
@ -0,0 +1,217 @@
|
|||
#!/usr/bin/python
|
||||
#
|
||||
# Runs a tripleo-ci test-worker
|
||||
#
|
||||
# Copyright 2013 Red Hat, Inc.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
#
|
||||
|
||||
import argparse
|
||||
import json
|
||||
import logging
|
||||
import logging.handlers
|
||||
import os
|
||||
import sys
|
||||
import subprocess
|
||||
import tempfile
|
||||
import threading
|
||||
import time
|
||||
import uuid
|
||||
|
||||
import gear
|
||||
|
||||
# 100Mb log files
|
||||
maxBytes=1024*1024*100
|
||||
|
||||
logging.basicConfig(filename="/var/log/testenv-worker.log", format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
|
||||
|
||||
class CallbackClient(gear.Client):
|
||||
def __init__(self):
|
||||
super(CallbackClient, self).__init__()
|
||||
self.event = threading.Event()
|
||||
|
||||
def handleWorkComplete(self, packet):
|
||||
super(CallbackClient, self).handleWorkComplete(packet)
|
||||
self.event.set()
|
||||
|
||||
def handleWorkException(self, packet):
|
||||
super(CallbackClient, self).handleWorkException(packet)
|
||||
self.event.set()
|
||||
|
||||
def handleWorkFail(self, packet):
|
||||
super(CallbackClient, self).handleWorkFail(packet)
|
||||
self.event.set()
|
||||
|
||||
def wait(self, timeout=None):
|
||||
"""Wait for notification of completion, error or failure.
|
||||
|
||||
:param timeout: a timeout for the operation in seconds
|
||||
:type timeout: float
|
||||
:returns: True if a notification was received, False on timeout
|
||||
"""
|
||||
self.event.wait(timeout)
|
||||
return self.event.is_set()
|
||||
|
||||
|
||||
class TEWorkerThread(threading.Thread):
|
||||
def __init__(self, geard, num, timeout, scriptfiles):
|
||||
super(TEWorkerThread, self).__init__()
|
||||
self.geard = geard
|
||||
self.timeout = timeout
|
||||
self.scriptfiles = scriptfiles
|
||||
self.running = True
|
||||
self.num = num
|
||||
self.worker = None
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
logger.info('running TE worker')
|
||||
self.runJob()
|
||||
except gear.InterruptedError:
|
||||
logger.info('getJob interrupted...')
|
||||
except:
|
||||
logger.exception('Error while run_te_worker worker')
|
||||
self.running = False
|
||||
|
||||
def runJob(self):
|
||||
self.worker = gear.Worker('testenv-worker-%s' % self.num)
|
||||
try:
|
||||
self._add_servers(self.worker, self.geard)
|
||||
self.worker.waitForServer()
|
||||
|
||||
self.worker.registerFunction('lockenv')
|
||||
|
||||
logger.info('Getting new job...')
|
||||
job = self.worker.getJob()
|
||||
logger.info('Received job : %s', job.arguments)
|
||||
|
||||
arguments = json.loads(job.arguments)
|
||||
call_back = arguments["callback_name"]
|
||||
job_timeout = int(arguments.get("timeout", self.timeout))
|
||||
|
||||
# Once this Job is called we call back to the client to run its
|
||||
# commands while this environment is locked
|
||||
self._run_callback(job_timeout, call_back, arguments)
|
||||
|
||||
job.sendWorkComplete("")
|
||||
finally:
|
||||
self.worker.shutdown()
|
||||
|
||||
def _add_servers(self, client, servers):
|
||||
for server in servers.split(','):
|
||||
server = server.rsplit(':', 1)
|
||||
if len(server) == 1:
|
||||
server.append('4730')
|
||||
client.addServer(server[0], int(server[1]))
|
||||
|
||||
def _run_callback(self, timeout, callback_name, arguments):
|
||||
client = CallbackClient()
|
||||
self._add_servers(client, self.geard)
|
||||
client.waitForServer()
|
||||
|
||||
try:
|
||||
with tempfile.NamedTemporaryFile('r') as fp:
|
||||
os.environ["TE_DATAFILE"] = fp.name
|
||||
logger.info(
|
||||
subprocess.check_output([self.scriptfiles[0], self.num, arguments.get("envsize","2"), arguments.get("ucinstance","")]))
|
||||
clientdata = fp.read()
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(e.output)
|
||||
clientdata = "Couldn't retrieve env"
|
||||
|
||||
cb_job = gear.Job(callback_name, clientdata)
|
||||
client.submitJob(cb_job)
|
||||
|
||||
# Wait for 30 seconds, then test the status of the job
|
||||
if not client.wait(30):
|
||||
# Request the job status from the broker
|
||||
cb_job.connection.sendPacket(gear.Packet(gear.constants.REQ,
|
||||
gear.constants.GET_STATUS,
|
||||
cb_job.handle))
|
||||
# Let a little time pass for the STATUS_RES to return, If we're in
|
||||
# here we've already waited 30 seconds so another 10 wont make much
|
||||
# difference
|
||||
time.sleep(10)
|
||||
if not cb_job.running:
|
||||
logger.error("No sign of the Callback job starting,"
|
||||
"assuming its no longer present")
|
||||
clientdata = subprocess.check_output([self.scriptfiles[1], self.num])
|
||||
logger.info(clientdata)
|
||||
client.shutdown()
|
||||
return
|
||||
|
||||
# We timeout after the configured timeout - the 40 second sleep that we
|
||||
# perform during initial handshaking. Note that after this timeout we
|
||||
# offer the environment for other test clients, but the prior client's
|
||||
# credentials are still valid, so very confusing errors can occur if we
|
||||
# were ever to timeout without the client timing out first.
|
||||
client.wait(timeout - 40)
|
||||
if cb_job.failure:
|
||||
logger.error("The Job appears to have failed.")
|
||||
elif not cb_job.complete:
|
||||
logger.error("No sign of Job completing, Freeing environment.")
|
||||
else:
|
||||
logger.info('Returned from Job : %s', cb_job.data)
|
||||
try:
|
||||
clientdata = subprocess.check_output([self.scriptfiles[1], self.num])
|
||||
except subprocess.CalledProcessError as e:
|
||||
logger.error(e.output)
|
||||
raise
|
||||
logger.info(clientdata)
|
||||
client.shutdown()
|
||||
|
||||
|
||||
def main(args=sys.argv[1:]):
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Registers a test environment with a gearman broker, the '
|
||||
'registered job "lockenv" then holds the environment in a '
|
||||
'"locked" state while it calls back to the client. The '
|
||||
'clients job is provided with data (contents of datafile)'
|
||||
)
|
||||
parser.add_argument('scriptfiles', nargs=2,
|
||||
help='Path to a script whos output is provided to the client')
|
||||
parser.add_argument('--timeout', '-t', type=int, default=10800,
|
||||
help='The maximum number of seconds to hold the '
|
||||
'testenv for, can be overridden by the client.')
|
||||
parser.add_argument('--tenum', '-n', default=uuid.uuid4().hex,
|
||||
help='A unique identifier identifing this env on '
|
||||
'this host.')
|
||||
parser.add_argument('--geard', '-b', default='127.0.0.1:4730',
|
||||
help='A comma separated list of gearman brokers to '
|
||||
'connect to.')
|
||||
parser.add_argument('--debug', '-d', action='store_true',
|
||||
help='Set to debug mode.')
|
||||
opts = parser.parse_args(args)
|
||||
|
||||
global logger
|
||||
logger = logging.getLogger('testenv-worker-' + opts.tenum)
|
||||
logger.addHandler(logging.handlers.RotatingFileHandler("/var/log/testenv-worker.log", maxBytes=maxBytes, backupCount=5))
|
||||
logger.setLevel(logging.INFO)
|
||||
logger.removeHandler(logger.handlers[0])
|
||||
|
||||
if opts.debug:
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
logger.info('Starting test-env worker with data %r', opts.scriptfiles)
|
||||
te_worker = TEWorkerThread(opts.geard, opts.tenum, opts.timeout, opts.scriptfiles)
|
||||
|
||||
te_worker.start()
|
||||
while te_worker.running:
|
||||
time.sleep(1)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -0,0 +1,4 @@
|
|||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCdK/KnJPXaycb8hOX9j1f/lU1HuB9GMUhCN7of4KrRs3OjzCtMUNZJSTCQaeUlR6VijNvUIe6Wx5zWCDR1DgNv/InQjr+7RhSgMofStz1TfJrlkGBfZw2mrhpPnX3fLaCf9Dyl+8+cuTjxGTMtYngJx2/9aNpgl75hMgaM6BRh/ULM+lLBDlCI+r9dFSsVx9UKTQUyenDh4A3J3eszP1CTT1sBWDKU5zz39MqWpzIxRaBgk+nI8+Uxbih7FUPaLLqlrl4Nxdr9A/Aul9pB8OSk/BK1oX/8mYW5mDE3usbeXIVU53ojEG0pkKaq4N9oeMorGB65xG7uAf845Av0qOpr derekh@Tea540
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA+yNMzUrQXa0EOfv+WJtfmLO1WdoOaD47G9qwllSUc4GPRkYzkTNdxcEPrR3XBR94ctOeWOHZ/w7ymhvwK5LLsoNBK+WgRz/mg8oHcii2GoL0fNojdwUMyFMIJxJT+iwjF/omyhyrW/aLAztAKRO7BdOkNlXMAAcMxKzQtFqdZm09ghoImu3BPYUTyDKHMp+t0P1d7mkHdd719oDfMf+5miHxQeJZJCWAsGwroN7k8a46rvezDHEygBsDAF2ZpS2iGMABos/vTp1oyHkCgCqc3rM0OoKqcKB5iQ9Qaqi5ung08BXP/PHfVynXzdGMjTh4w+6jiMw7Dx2GrQIJsDolKQ== dan.prince@dovetail
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDSP0DZRAwaTAvHk7mHlLfSwVq6QCRqKn8mE6nwW1UzBmTzKdq9pK5XPqEAQgUKoarl+M+QhCNrBaNKpUqPF1dH76S0+2k2HARrxubTlXsQ9UDQQHQZxGjsrYW9sZ/F7yh4Yac7HW4pZANumyAxt0yKE0BLTZX9JojaiBn7bMzw1i5BS6qXIyH7oohd3YThxkpMCqP4O6W6wX90FSDYPtbSaZ1Q+9hzNkS29bXcsoy6uwTixkfedsCgkLb2wa9jcDHCely94Tn/oR+JjT9OQ19Tq8p/rjL8lullIrkHsEEsQ/4sIlB6441DgbeLtQAPPA7pyw50KfBCyTfHQZWPsacN jslagle@redhat.com
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDv9l/F0vq7nzAT5hdrBfqDv0rD76nHNn+siS6s5gaFyAuvXJG808pqFk5bJLbRdRIA1/cLxIQeB+bB7IjeTS7Afbz/baAOPTtoumwEU8wLPzR7IyTg60R4o7pKOJG2cP45s3TGODsYt5eEAr96EGp9ayyanfuJZZf2wQWdNp1+vQXain8WHv9KIKI5XmcKI80x8RBWV86OKKsmbqV4yYxAkuLitq4h3Bhw3LP+VOxaqApevnpt7fcrvn8QR3XMsLKNZsJhT9r1qeLEZisundZPN+0EuiC7seu5zAuCBcKjRrBo7Ime8TYn5sjz9DTMcWvY3xHF2DZN2YdVxp4O8/iD bnemec@localhost.localdomain
|
Loading…
Reference in New Issue