Kubernetes integration with OpenStack networking
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

1347 lines
42 KiB

  1. #!/bin/bash
  2. #
  3. # lib/kuryr
  4. # Utilities for kuryr-kubernetes devstack
  5. # bind_for_kubelet
  6. # Description: Creates an OVS internal port so that baremetal kubelet will be
  7. # able to make both liveness and readiness http/tcp probes.
  8. # Params:
  9. # project - Id or name of the project used for kuryr devstack
  10. # port - Port to open for K8s API, relevant only for OpenStack infra
  11. # Dependencies:
  12. # (none)
  13. function ovs_bind_for_kubelet() {
  14. local port_id
  15. local port_mac
  16. local fixed_ips
  17. local port_ips
  18. local port_subnets
  19. local prefix
  20. local project_id
  21. local port_number
  22. local security_group
  23. local ifname
  24. local service_subnet_cidr
  25. local pod_subnet_gw
  26. local cidrs
  27. project_id="$1"
  28. port_number="$2"
  29. security_group=$(openstack security group list \
  30. --project "$project_id" -c ID -c Name -f value | \
  31. awk '{if ($2=="default") print $1}')
  32. port_id=$(openstack port create \
  33. --device-owner compute:kuryr \
  34. --project "$project_id" \
  35. --security-group "$security_group" \
  36. --host "${HOSTNAME}" \
  37. --network "${KURYR_NEUTRON_DEFAULT_POD_NET}" \
  38. -f value -c id \
  39. kubelet-"${HOSTNAME}")
  40. # Need to enable Amphorae subnet access to the kubelet iface for API
  41. # access
  42. openstack port set "$port_id" --security-group service_pod_access
  43. ifname="kubelet${port_id}"
  44. ifname="${ifname:0:14}"
  45. service_subnet_cidr=$(openstack --os-cloud devstack-admin \
  46. --os-region "$REGION_NAME" \
  47. subnet show "$KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET" \
  48. -c cidr -f value)
  49. pod_subnet_gw=$(openstack subnet show "$KURYR_NEUTRON_DEFAULT_POD_SUBNET" \
  50. -c gateway_ip -f value)
  51. port_mac=$(openstack port show "$port_id" -c mac_address -f value)
  52. fixed_ips=$(openstack port show "$port_id" -f value -c fixed_ips)
  53. port_ips=($(python3 -c "print(${fixed_ips}[0]['ip_address'])"))
  54. port_subnets=($(python3 -c "print(${fixed_ips}[0]['subnet_id'])"))
  55. sudo ovs-vsctl -- --may-exist add-port $OVS_BRIDGE "$ifname" \
  56. -- set Interface "$ifname" type=internal \
  57. -- set Interface "$ifname" external-ids:iface-status=active \
  58. -- set Interface "$ifname" external-ids:attached-mac="$port_mac" \
  59. -- set Interface "$ifname" external-ids:iface-id="$port_id"
  60. sudo ip link set dev "$ifname" address "$port_mac"
  61. sudo ip link set dev "$ifname" up
  62. for ((i=0; i < ${#port_ips[@]}; i++)); do
  63. prefix=$(openstack subnet show "${port_subnets[$i]}" \
  64. -c cidr -f value | \
  65. cut -f2 -d/)
  66. sudo ip addr add "${port_ips[$i]}/${prefix}" dev "$ifname"
  67. done
  68. if [[ "$KURYR_SUBNET_DRIVER" == "namespace" ]]; then
  69. subnetpool_id=${KURYR_NEUTRON_DEFAULT_SUBNETPOOL_ID:-${SUBNETPOOL_V4_ID}}
  70. cidrs=$(openstack subnet pool show "${subnetpool_id}" -c prefixes -f value)
  71. subnetpool_cidr=$(python3 -c "print(${cidrs}[0])")
  72. sudo ip route add "$subnetpool_cidr" via "$pod_subnet_gw" dev "$ifname"
  73. else
  74. sudo ip route add "$service_subnet_cidr" via "$pod_subnet_gw" dev "$ifname"
  75. fi
  76. if [ -n "$port_number" ]; then
  77. # if openstack-INPUT chain doesn't exist we create it in INPUT (for
  78. # local development envs since openstack-INPUT is usually only in gates)
  79. sudo iptables -I openstack-INPUT 1 \
  80. -p tcp -s 0.0.0.0/0 -d 0.0.0.0/0 --dport $port_number -j ACCEPT || \
  81. sudo iptables -I INPUT 1 \
  82. -p tcp -m conntrack --ctstate NEW \
  83. -m tcp --dport "$port_number" \
  84. -m comment --comment "kuryr-devstack: Access to OpenShift API" -j ACCEPT
  85. fi
  86. }
  87. # get_container
  88. # Description: Pulls a container from Dockerhub
  89. # Params:
  90. # image_name - the name of the image in docker hub
  91. # version - The version of the image to pull. Defaults to 'latest'
  92. function get_container {
  93. local image
  94. local image_name
  95. local version
  96. image_name="$1"
  97. version="${2:-latest}"
  98. if [ "$image_name" == "" ]; then
  99. return 0
  100. fi
  101. image="${image_name}:${version}"
  102. if [ -z "$(container_runtime images -q "$image")" ]; then
  103. container_runtime pull "$image"
  104. fi
  105. }
  106. # run_container
  107. # Description: Runs a container and attaches devstack's logging to it
  108. # Params:
  109. # name - Name of the container to run
  110. # args - arguments to run the container with
  111. function run_container {
  112. # Runs a detached container and uses devstack's run process to monitor
  113. # its logs
  114. local name
  115. name="$1"
  116. shift
  117. args="$@"
  118. container_runtime create --name $name $args
  119. if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
  120. run_process "$name" "$(which podman) start --attach $name" root root
  121. else
  122. run_process "$name" "$(which docker) start --attach $name"
  123. fi
  124. }
  125. # stop_container
  126. # Description: stops a container and its devstack logging
  127. # Params:
  128. # name - Name of the container to stop
  129. function stop_container {
  130. local name
  131. name="$1"
  132. container_runtime kill "$name"
  133. container_runtime rm "$name"
  134. stop_process "$name"
  135. }
  136. # _allocation_range
  137. # Description: Writes out tab separated usable ip range for a CIDR
  138. # Params:
  139. # cidr - The cidr to get the range for
  140. # gateway_position - Whether to reserve at 'beginning' or at 'end'
  141. function _allocation_range {
  142. python3 - <<EOF "$@"
  143. import sys
  144. from netaddr import IPNetwork
  145. import six
  146. n = IPNetwork(six.text_type(sys.argv[1]))
  147. gateway_position = sys.argv[2]
  148. if gateway_position == 'beginning':
  149. beg_offset = 2
  150. end_offset = 2
  151. elif gateway_position == 'end':
  152. beg_offset = 1
  153. end_offset = 3
  154. else:
  155. raise ValueError('Disallowed gateway position %s' % gateway_position)
  156. print("%s\\t%s" % (n[beg_offset], n[-end_offset]))
  157. EOF
  158. }
  159. # create_k8s_icmp_sg_rules
  160. # Description: Creates icmp sg rules for Kuryr-Kubernetes pods
  161. # Params:
  162. # sg_id - Kuryr's security group id
  163. # direction - egress or ingress direction
  164. function create_k8s_icmp_sg_rules {
  165. local sg_id=$1
  166. local direction="$2"
  167. local project_id
  168. project_id=$(get_or_create_project \
  169. "$KURYR_NEUTRON_DEFAULT_PROJECT" default)
  170. icmp_sg_rules=$(openstack --os-cloud devstack-admin \
  171. --os-region "$REGION_NAME" \
  172. security group rule create \
  173. --project "$project_id" \
  174. --protocol icmp \
  175. --"$direction" "$sg_id")
  176. die_if_not_set $LINENO icmp_sg_rules \
  177. "Failure creating icmp sg ${direction} rule for ${sg_id}"
  178. }
  179. # create_k8s_subnet
  180. # Description: Creates a network and subnet for Kuryr-Kubernetes usage
  181. # Params:
  182. # project_id - Kuryr's project uuid
  183. # net_name - Name of the network to create
  184. # subnet_name - Name of the subnet to create
  185. # subnetpool_id - uuid of the subnet pool to use
  186. # router - name of the router to plug the subnet to
  187. # split_allocation - Whether to allocate on all the subnet or only the
  188. # latter half
  189. function create_k8s_subnet {
  190. # REVISIT(apuimedo): add support for IPv6
  191. local project_id=$1
  192. local net_name="$2"
  193. local subnet_name="$3"
  194. local subnetpool_id="$4"
  195. local router="$5"
  196. local subnet_params="--project $project_id "
  197. local subnet_cidr
  198. local split_allocation
  199. split_allocation="${6:-False}"
  200. subnet_params+="--ip-version 4 "
  201. subnet_params+="--no-dhcp --gateway none "
  202. subnet_params+="--subnet-pool $subnetpool_id "
  203. local net_id
  204. net_id=$(openstack --os-cloud devstack-admin \
  205. --os-region "$REGION_NAME" \
  206. network create --project "$project_id" \
  207. "$net_name" \
  208. -c id -f value)
  209. subnet_params+="--network $net_id $subnet_name"
  210. local subnet_id
  211. subnet_id=$(openstack --os-cloud devstack-admin \
  212. --os-region "$REGION_NAME" \
  213. subnet create $subnet_params \
  214. --project "$project_id" \
  215. -c id -f value)
  216. die_if_not_set $LINENO subnet_id \
  217. "Failure creating K8s ${subnet_name} IPv4 subnet for ${project_id}"
  218. subnet_cidr=$(openstack --os-cloud devstack-admin \
  219. --os-region "$REGION_NAME" \
  220. subnet show "$subnet_id" \
  221. -c cidr -f value)
  222. die_if_not_set $LINENO subnet_cidr \
  223. "Failure getting K8s ${subnet_name} IPv4 subnet for $project_id"
  224. # Since K8s has its own IPAM for services and allocates the first IP from
  225. # service subnet CIDR to Kubernetes apiserver, we'll always put the router
  226. # interface at the end of the range.
  227. local router_ip
  228. local allocation_start
  229. local allocation_end
  230. local allocation_subnet
  231. router_ip=$(_cidr_range "$subnet_cidr" | cut -f2)
  232. if [[ "$split_allocation" == "True" ]]; then
  233. allocation_subnet=$(split_subnet "$subnet_cidr" | cut -f2)
  234. allocation_start=$(_allocation_range "$allocation_subnet" end | cut -f1)
  235. allocation_end=$(_allocation_range "$allocation_subnet" end | cut -f2)
  236. else
  237. allocation_start=$(_allocation_range "$subnet_cidr" end | cut -f1)
  238. allocation_end=$(_allocation_range "$subnet_cidr" end | cut -f2)
  239. fi
  240. die_if_not_set $LINENO router_ip \
  241. "Failed to determine K8s ${subnet_name} subnet router IP"
  242. openstack --os-cloud devstack-admin \
  243. --os-region "$REGION_NAME" subnet set \
  244. --gateway "$router_ip" --no-allocation-pool "$subnet_id" \
  245. || die $LINENO "Failed to update K8s ${subnet_name} subnet"
  246. # Set a new allocation pool for the subnet so ports can be created again
  247. openstack --os-cloud devstack-admin \
  248. --os-region "$REGION_NAME" subnet set \
  249. --allocation-pool "start=${allocation_start},end=${allocation_end}" \
  250. "$subnet_id" || die $LINENO "Failed to update K8s ${subnet_name} subnet"
  251. openstack --os-cloud devstack-admin \
  252. --os-region "$REGION_NAME" \
  253. router add subnet "$router" "$subnet_id" \
  254. || die $LINENO \
  255. "Failed to enable routing for K8s ${subnet_name} subnet"
  256. }
  257. # create_k8s_fake_service
  258. # Description: Creates an endpoint-less kubernetes service to keep Kubernetes
  259. # API server from allocating this IP for another service
  260. function create_k8s_fake_service {
  261. local fake_svc_name
  262. local fake_svc_ip
  263. fake_svc_name="$1"
  264. fake_svc_ip="$2"
  265. existing_svc_ip=$(/usr/local/bin/kubectl get svc --namespace kube-system -o jsonpath='{.items[?(@.metadata.name=='"\"${fake_svc_name}\""')].spec.clusterIP}')
  266. if [[ "$existing_svc_ip" == "" ]]; then
  267. # Create fake service so the clusterIP can't be reassigned
  268. cat <<EOF | /usr/local/bin/kubectl create -f -
  269. kind: Service
  270. apiVersion: v1
  271. metadata:
  272. name: "${fake_svc_name}"
  273. namespace: kube-system
  274. spec:
  275. type: ClusterIP
  276. clusterIP: "${fake_svc_ip}"
  277. ports:
  278. - protocol: TCP
  279. port: 80
  280. EOF
  281. fi
  282. }
  283. # build_kuryr_containers
  284. # Description: Generates a Kuryr controller and Kuryr CNI docker images in
  285. # the local docker registry as kuryr/controller:latest and
  286. # kuryr/cni:latest respectively
  287. function build_kuryr_containers() {
  288. local build_args
  289. local build_dir
  290. build_dir="${DEST}/kuryr-kubernetes"
  291. pushd "$build_dir"
  292. KURYR_CONTAINERS_USE_LOWER_CONSTRAINTS=$(trueorfalse False KURYR_CONTAINERS_USE_LOWER_CONSTRAINTS)
  293. if [[ "$KURYR_CONTAINERS_USE_LOWER_CONSTRAINTS" == "True" ]]; then
  294. build_args="--build-arg UPPER_CONSTRAINTS_FILE=/opt/kuryr-kubernetes/lower-constraints.txt"
  295. fi
  296. # Build images
  297. # FIXME(dulek): Until https://github.com/containers/buildah/issues/1206 is
  298. # resolved instead of podman we need to use buildah directly,
  299. # hence this awful if clause.
  300. if [[ ${CONTAINER_ENGINE} == 'crio' ]]; then
  301. sudo buildah bud -t docker.io/kuryr/controller -f controller.Dockerfile .
  302. sudo buildah bud -t docker.io/kuryr/cni -f cni.Dockerfile .
  303. else
  304. container_runtime build -t kuryr/controller -f controller.Dockerfile ${build_args} .
  305. container_runtime build -t kuryr/cni -f cni.Dockerfile ${build_args} .
  306. fi
  307. popd
  308. }
  309. function indent() {
  310. sed 's/^/ /';
  311. }
  312. function generate_kuryr_configmap() {
  313. local output_dir
  314. local conf_path
  315. output_dir=$1
  316. conf_path=${2:-""}
  317. mkdir -p "$output_dir"
  318. rm -f ${output_dir}/config_map.yml
  319. cat >> "${output_dir}/config_map.yml" << EOF
  320. apiVersion: v1
  321. kind: ConfigMap
  322. metadata:
  323. name: kuryr-config
  324. namespace: kube-system
  325. data:
  326. kuryr.conf: |
  327. EOF
  328. cat $conf_path | indent >> "${output_dir}/config_map.yml"
  329. }
  330. function generate_kuryr_certificates_secret() {
  331. local output_dir
  332. local certs_bundle_path
  333. output_dir=$1
  334. certs_bundle_path=${2:-""}
  335. mkdir -p "$output_dir"
  336. rm -f ${output_dir}/certificates_secret.yml
  337. CA_CERT=\"\" # It's a "" string that will be inserted into yaml file.
  338. if [ $certs_bundle_path -a -f $certs_bundle_path ]; then
  339. CA_CERT=$(base64 -w0 < "$certs_bundle_path")
  340. fi
  341. cat >> "${output_dir}/certificates_secret.yml" << EOF
  342. apiVersion: v1
  343. kind: Secret
  344. metadata:
  345. name: kuryr-certificates
  346. namespace: kube-system
  347. type: Opaque
  348. data:
  349. kuryr-ca-bundle.crt: $CA_CERT
  350. EOF
  351. }
  352. function generate_kuryr_service_account() {
  353. output_dir=$1
  354. mkdir -p "$output_dir"
  355. rm -f ${output_dir}/service_account.yml
  356. cat >> "${output_dir}/service_account.yml" << EOF
  357. ---
  358. apiVersion: v1
  359. kind: ServiceAccount
  360. metadata:
  361. name: kuryr-controller
  362. namespace: kube-system
  363. ---
  364. kind: ClusterRole
  365. apiVersion: rbac.authorization.k8s.io/v1
  366. metadata:
  367. name: kuryr-controller
  368. rules:
  369. - apiGroups:
  370. - ""
  371. verbs: ["*"]
  372. resources:
  373. - endpoints
  374. - pods
  375. - nodes
  376. - services
  377. - services/status
  378. - namespaces
  379. - apiGroups:
  380. - openstack.org
  381. verbs: ["*"]
  382. resources:
  383. - kuryrnets
  384. - kuryrnetpolicies
  385. - kuryrloadbalancers
  386. - apiGroups: ["networking.k8s.io"]
  387. resources:
  388. - networkpolicies
  389. verbs:
  390. - get
  391. - list
  392. - watch
  393. - update
  394. - patch
  395. - apiGroups: ["k8s.cni.cncf.io"]
  396. resources:
  397. - network-attachment-definitions
  398. verbs:
  399. - get
  400. ---
  401. kind: ClusterRoleBinding
  402. apiVersion: rbac.authorization.k8s.io/v1
  403. metadata:
  404. name: kuryr-controller-global
  405. subjects:
  406. - kind: ServiceAccount
  407. name: kuryr-controller
  408. namespace: kube-system
  409. roleRef:
  410. kind: ClusterRole
  411. name: kuryr-controller
  412. apiGroup: rbac.authorization.k8s.io
  413. EOF
  414. }
  415. function generate_controller_deployment() {
  416. output_dir=$1
  417. health_server_port=$2
  418. controller_ha=$3
  419. mkdir -p "$output_dir"
  420. rm -f ${output_dir}/controller_deployment.yml
  421. cat >> "${output_dir}/controller_deployment.yml" << EOF
  422. apiVersion: apps/v1
  423. kind: Deployment
  424. metadata:
  425. labels:
  426. name: kuryr-controller
  427. name: kuryr-controller
  428. namespace: kube-system
  429. spec:
  430. replicas: ${KURYR_CONTROLLER_REPLICAS:-1}
  431. selector:
  432. matchLabels:
  433. name: kuryr-controller
  434. EOF
  435. # When running without HA we should make sure that we won't have more than
  436. # one kuryr-controller pod in the deployment.
  437. if [ "$controller_ha" == "False" ]; then
  438. cat >> "${output_dir}/controller_deployment.yml" << EOF
  439. strategy:
  440. type: RollingUpdate
  441. rollingUpdate:
  442. maxSurge: 0
  443. maxUnavailable: 1
  444. EOF
  445. fi
  446. cat >> "${output_dir}/controller_deployment.yml" << EOF
  447. template:
  448. metadata:
  449. labels:
  450. name: kuryr-controller
  451. name: kuryr-controller
  452. spec:
  453. serviceAccountName: kuryr-controller
  454. automountServiceAccountToken: true
  455. hostNetwork: true
  456. containers:
  457. EOF
  458. if [ "$controller_ha" == "True" ]; then
  459. cat >> "${output_dir}/controller_deployment.yml" << EOF
  460. - image: gcr.io/google_containers/leader-elector:0.5
  461. name: leader-elector
  462. args:
  463. - "--election=kuryr-controller"
  464. - "--http=0.0.0.0:${KURYR_CONTROLLER_HA_PORT:-16401}"
  465. - "--election-namespace=kube-system"
  466. - "--ttl=5s"
  467. ports:
  468. - containerPort: ${KURYR_CONTROLLER_HA_PORT:-16401}
  469. protocol: TCP
  470. EOF
  471. fi
  472. cat >> "${output_dir}/controller_deployment.yml" << EOF
  473. - image: kuryr/controller:latest
  474. imagePullPolicy: Never
  475. name: controller
  476. terminationMessagePath: "/dev/termination-log"
  477. volumeMounts:
  478. - name: config-volume
  479. mountPath: "/etc/kuryr"
  480. - name: certificates-volume
  481. mountPath: "/etc/ssl/certs"
  482. readOnly: true
  483. readinessProbe:
  484. httpGet:
  485. path: /ready
  486. port: ${health_server_port}
  487. scheme: HTTP
  488. timeoutSeconds: 5
  489. livenessProbe:
  490. httpGet:
  491. path: /alive
  492. port: ${health_server_port}
  493. initialDelaySeconds: 15
  494. EOF
  495. cat >> "${output_dir}/controller_deployment.yml" << EOF
  496. volumes:
  497. - name: config-volume
  498. configMap:
  499. name: kuryr-config
  500. - name: certificates-volume
  501. secret:
  502. secretName: kuryr-certificates
  503. restartPolicy: Always
  504. tolerations:
  505. - key: "node-role.kubernetes.io/master"
  506. operator: "Exists"
  507. effect: "NoSchedule"
  508. - key: "node.kubernetes.io/not-ready"
  509. operator: "Exists"
  510. effect: "NoSchedule"
  511. EOF
  512. }
  513. function generate_cni_daemon_set() {
  514. output_dir=$1
  515. cni_health_server_port=$2
  516. cni_bin_dir=${3:-/opt/cni/bin}
  517. cni_conf_dir=${4:-/etc/cni/net.d}
  518. mkdir -p "$output_dir"
  519. rm -f ${output_dir}/cni_ds.yml
  520. cat >> "${output_dir}/cni_ds.yml" << EOF
  521. apiVersion: apps/v1
  522. kind: DaemonSet
  523. metadata:
  524. name: kuryr-cni-ds
  525. namespace: kube-system
  526. labels:
  527. tier: node
  528. app: kuryr-cni
  529. spec:
  530. selector:
  531. matchLabels:
  532. app: kuryr-cni
  533. template:
  534. metadata:
  535. labels:
  536. tier: node
  537. app: kuryr-cni
  538. spec:
  539. hostNetwork: true
  540. tolerations:
  541. - key: node-role.kubernetes.io/master
  542. operator: Exists
  543. effect: NoSchedule
  544. - key: "node.kubernetes.io/not-ready"
  545. operator: "Exists"
  546. effect: "NoSchedule"
  547. serviceAccountName: kuryr-controller
  548. containers:
  549. - name: kuryr-cni
  550. image: kuryr/cni:latest
  551. imagePullPolicy: Never
  552. command: [ "cni_ds_init" ]
  553. env:
  554. - name: KUBERNETES_NODE_NAME
  555. valueFrom:
  556. fieldRef:
  557. fieldPath: spec.nodeName
  558. - name: KURYR_CNI_POD_NAME
  559. valueFrom:
  560. fieldRef:
  561. fieldPath: metadata.name
  562. securityContext:
  563. privileged: true
  564. volumeMounts:
  565. - name: bin
  566. mountPath: /opt/cni/bin
  567. - name: net-conf
  568. mountPath: /etc/cni/net.d
  569. - name: config-volume
  570. mountPath: /etc/kuryr
  571. - name: proc
  572. mountPath: /host_proc
  573. - name: var-pci
  574. mountPath: /var/pci_address
  575. EOF
  576. if [[ -n "$VAR_RUN_PATH" ]]; then
  577. cat >> "${output_dir}/cni_ds.yml" << EOF
  578. - name: openvswitch
  579. mountPath: /var/run
  580. EOF
  581. fi
  582. cat >> "${output_dir}/cni_ds.yml" << EOF
  583. readinessProbe:
  584. httpGet:
  585. path: /ready
  586. port: ${cni_health_server_port}
  587. scheme: HTTP
  588. initialDelaySeconds: 60
  589. timeoutSeconds: 10
  590. livenessProbe:
  591. httpGet:
  592. path: /alive
  593. port: ${cni_health_server_port}
  594. initialDelaySeconds: 60
  595. volumes:
  596. - name: bin
  597. hostPath:
  598. path: ${cni_bin_dir}
  599. - name: net-conf
  600. hostPath:
  601. path: ${cni_conf_dir}
  602. - name: config-volume
  603. configMap:
  604. name: kuryr-config
  605. - name: proc
  606. hostPath:
  607. path: /proc
  608. - name: var-pci
  609. hostPath:
  610. path: /var/pci_address
  611. EOF
  612. if [[ -n "$VAR_RUN_PATH" ]]; then
  613. cat >> "${output_dir}/cni_ds.yml" << EOF
  614. - name: openvswitch
  615. hostPath:
  616. path: ${VAR_RUN_PATH}
  617. EOF
  618. fi
  619. }
  620. # install_openshift_binary
  621. # Description: Fetches the configured binary release of OpenShift and
  622. # installs it in the system
  623. function install_openshift_binary {
  624. mkdir -p "$OPENSHIFT_BIN"
  625. curl -L ${OPENSHIFT_BINARY_BASE_URL}/${OPENSHIFT_BINARY_VERSION}/CHECKSUM --silent | \
  626. awk -v "ver=${OPENSHIFT_BINARY_VERSION}" \
  627. -v "dest=${OPENSHIFT_BIN}/openshift.tar.gz" \
  628. -v "baseurl=${OPENSHIFT_BINARY_BASE_URL}" \
  629. '/server/ {system("curl -L " baseurl "/" ver "/" $2 " --retry 2 -o " dest)}'
  630. tar xzvf "${OPENSHIFT_BIN}/openshift.tar.gz" --strip 1 -C "$OPENSHIFT_BIN"
  631. # Make openshift run from its untarred directory
  632. cat << EOF | sudo tee /usr/local/bin/openshift
  633. #!/bin/bash
  634. cd ${OPENSHIFT_BIN}
  635. exec ./openshift "\$@"
  636. EOF
  637. sudo chmod a+x /usr/local/bin/openshift
  638. # For releases >= 3.11 we'll need hyperkube as well
  639. cat << EOF | sudo tee /usr/local/bin/hyperkube
  640. #!/bin/bash
  641. cd ${OPENSHIFT_BIN}
  642. exec ./hyperkube "\$@"
  643. EOF
  644. sudo chmod a+x /usr/local/bin/hyperkube
  645. # Make oc easily available
  646. cat << EOF | sudo tee /usr/local/bin/oc
  647. #!/bin/bash
  648. CURL_CA_BUNDLE=${OPENSHIFT_DATA_DIR}/master/ca.crt \
  649. KUBECONFIG=${OPENSHIFT_DATA_DIR}/master/admin.kubeconfig \
  650. ${OPENSHIFT_BIN}/oc "\$@"
  651. EOF
  652. sudo chmod a+x /usr/local/bin/oc
  653. # Make kubectl easily available
  654. cat << EOF | sudo tee /usr/local/bin/kubectl
  655. #!/bin/bash
  656. CURL_CA_BUNDLE=${OPENSHIFT_DATA_DIR}/master/ca.crt \
  657. KUBECONFIG=${OPENSHIFT_DATA_DIR}/master/admin.kubeconfig \
  658. ${OPENSHIFT_BIN}/kubectl "\$@"
  659. EOF
  660. sudo chmod a+x /usr/local/bin/kubectl
  661. }
  662. # run_openshift_master
  663. # Description: Starts the openshift master
  664. function run_openshift_master {
  665. local cmd
  666. local pod_subnet_cidr
  667. local service_subnet_cidr
  668. local portal_net
  669. sudo install -d -o "$STACK_USER" "$OPENSHIFT_DATA_DIR"
  670. pod_subnet_cidr=$(openstack --os-cloud devstack-admin \
  671. --os-region "$REGION_NAME" \
  672. subnet show "$KURYR_NEUTRON_DEFAULT_POD_SUBNET" \
  673. -c cidr -f value)
  674. service_subnet_cidr=$(openstack --os-cloud devstack-admin \
  675. --os-region "$REGION_NAME" \
  676. subnet show "$KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET" \
  677. -c cidr -f value)
  678. if is_service_enabled octavia; then
  679. portal_net=$(split_subnet "$service_subnet_cidr" | cut -f1)
  680. else
  681. portal_net="$service_subnet_cidr"
  682. fi
  683. # Generate master config
  684. "${OPENSHIFT_BIN}/openshift" start master \
  685. "--etcd=http://${SERVICE_HOST}:${ETCD_PORT}" \
  686. "--network-cidr=${pod_subnet_cidr}" \
  687. "--portal-net=${portal_net}" \
  688. "--listen=0.0.0.0:${OPENSHIFT_API_PORT}" \
  689. "--master=${OPENSHIFT_API_URL}" \
  690. "--write-config=${OPENSHIFT_DATA_DIR}/master"
  691. # Enable externalIPs
  692. sed -i 's/externalIPNetworkCIDRs: null/externalIPNetworkCIDRs: ["0.0.0.0\/0"]/' "${OPENSHIFT_DATA_DIR}/master/master-config.yaml"
  693. # Reconfigure Kuryr-Kubernetes to use the certs generated
  694. iniset "$KURYR_CONFIG" kubernetes ssl_client_crt_file "${OPENSHIFT_DATA_DIR}/master/admin.crt"
  695. iniset "$KURYR_CONFIG" kubernetes ssl_client_key_file "${OPENSHIFT_DATA_DIR}/master/admin.key"
  696. iniset "$KURYR_CONFIG" kubernetes ssl_ca_crt_file "${OPENSHIFT_DATA_DIR}/master/ca.crt"
  697. sudo chown "${STACK_USER}:${STACK_USER}" -R "$OPENSHIFT_DATA_DIR"
  698. # Generate kubelet kubeconfig
  699. "${OPENSHIFT_BIN}/oc" adm create-kubeconfig \
  700. "--client-key=${OPENSHIFT_DATA_DIR}/master/master.kubelet-client.key" \
  701. "--client-certificate=${OPENSHIFT_DATA_DIR}/master/master.kubelet-client.crt" \
  702. "--certificate-authority=${OPENSHIFT_DATA_DIR}/master/ca.crt" \
  703. "--master=${OPENSHIFT_API_URL}" \
  704. "--kubeconfig=${OPENSHIFT_DATA_DIR}/master/master.kubelet-client.kubeconfig"
  705. cmd="/usr/local/bin/openshift start master \
  706. --config=${OPENSHIFT_DATA_DIR}/master/master-config.yaml"
  707. wait_for "etcd" "http://${SERVICE_HOST}:${ETCD_PORT}/v2/machines"
  708. run_process openshift-master "$cmd" root root
  709. }
  710. # make_admin_cluster_admin
  711. # Description: Gives the system:admin permissions over the cluster
  712. function make_admin_cluster_admin {
  713. wait_for "OpenShift API Server" "$OPENSHIFT_API_URL" \
  714. "${OPENSHIFT_DATA_DIR}/master/ca.crt"
  715. /usr/local/bin/oc adm policy add-cluster-role-to-user cluster-admin admin \
  716. "--config=${OPENSHIFT_DATA_DIR}/master/openshift-master.kubeconfig"
  717. /usr/local/bin/oc adm policy add-cluster-role-to-user cluster-admin system:openshift-node-admin \
  718. "--config=${OPENSHIFT_DATA_DIR}/master/openshift-master.kubeconfig"
  719. }
  720. # run_openshift_node
  721. # Description: Starts the openshift node
  722. function run_openshift_node {
  723. local command
  724. #install required CNI loopback driver
  725. sudo mkdir -p "$CNI_BIN_DIR"
  726. curl -L "$OPENSHIFT_CNI_BINARY_URL" | sudo tar -C "$CNI_BIN_DIR" -xzvf - ./loopback
  727. # Since 3.11 we should run upstream kubelet through hyperkube.
  728. declare -r min_no_node_ver="v3.11.0"
  729. if [[ "$min_no_node_ver" == "$(echo -e "${OPENSHIFT_BINARY_VERSION}\n${min_no_node_ver}" | sort -V | head -n 1)" ]]; then
  730. # generate kubelet configuration and certs
  731. local name
  732. name=`hostname`
  733. oc adm create-node-config --node-dir ${OPENSHIFT_DATA_DIR}/node \
  734. --node ${name} \
  735. --hostnames ${name} \
  736. --certificate-authority ${OPENSHIFT_DATA_DIR}/master/ca.crt \
  737. --signer-cert ${OPENSHIFT_DATA_DIR}/master/ca.crt \
  738. --signer-key=${OPENSHIFT_DATA_DIR}/master/ca.key \
  739. --signer-serial ${OPENSHIFT_DATA_DIR}/master/ca.serial.txt \
  740. --node-client-certificate-authority=${OPENSHIFT_DATA_DIR}/master/ca.crt
  741. command="/usr/local/bin/hyperkube kubelet \
  742. --network-plugin=cni \
  743. --address=0.0.0.0 \
  744. --port=10250 \
  745. --cgroup-driver $(docker info|awk '/Cgroup/ {print $NF}') \
  746. --fail-swap-on=false \
  747. --allow-privileged=true \
  748. --v=2 \
  749. --tls-cert-file=${OPENSHIFT_DATA_DIR}/node/server.crt \
  750. --tls-private-key-file=${OPENSHIFT_DATA_DIR}/node/server.key"
  751. else
  752. command="/usr/local/bin/openshift start node \
  753. --enable=kubelet,plugins \
  754. --network-plugin=cni \
  755. --listen=https://0.0.0.0:8442"
  756. fi
  757. command+=" --kubeconfig=${OPENSHIFT_DATA_DIR}/master/master.kubelet-client.kubeconfig"
  758. # Link master config necessary for bootstrapping
  759. # TODO: This needs to be generated so we don't depend on it on multinode
  760. mkdir -p "${OPENSHIFT_BIN}/openshift.local.config"
  761. ln -fs "${OPENSHIFT_DATA_DIR}/master" "${OPENSHIFT_BIN}/openshift.local.config/master"
  762. mkdir -p "${OPENSHIFT_DATA_DIR}/node"
  763. ln -fs "${OPENSHIFT_DATA_DIR}/node" "${OPENSHIFT_BIN}/openshift.local.config/node"
  764. # Link stack CNI to location expected by openshift node
  765. sudo mkdir -p /etc/cni
  766. sudo rm -fr /etc/cni/net.d
  767. sudo rm -fr /opt/cni/bin
  768. sudo ln -fs "${CNI_CONF_DIR}" /etc/cni/net.d
  769. sudo mkdir -p /opt/cni
  770. sudo ln -fs "${CNI_BIN_DIR}" /opt/cni/bin
  771. run_process openshift-node "$command" root root
  772. }
  773. # lb_state
  774. # Description: Returns the state of the load balancer
  775. # Params:
  776. # id - Id or name of the loadbalancer the state of which needs to be
  777. # retrieved.
  778. function lb_state {
  779. local lb_id
  780. lb_id="$1"
  781. openstack loadbalancer show "$lb_id" | \
  782. awk '/provisioning_status/ {print $4}'
  783. }
  784. function wait_for_lb {
  785. local lb_name
  786. local curr_time
  787. local time_diff
  788. local start_time
  789. lb_name="$1"
  790. timeout=${2:-$KURYR_WAIT_TIMEOUT}
  791. echo -n "Waiting for LB:$lb_name"
  792. start_time=$(date +%s)
  793. while [[ "$(lb_state "$lb_name")" != "ACTIVE" ]]; do
  794. echo -n "Waiting till LB=$lb_name is ACTIVE."
  795. curr_time=$(date +%s)
  796. time_diff=$((curr_time - start_time))
  797. [[ $time_diff -le $timeout ]] || die "Timed out waiting for $lb_name"
  798. sleep 5
  799. done
  800. }
  801. # create_load_balancer
  802. # Description: Creates an OpenStack Load Balancer with either neutron LBaaS
  803. # or Octavia
  804. # Params:
  805. # lb_name: Name to give to the load balancer.
  806. # lb_vip_subnet: Id or name of the subnet where lb_vip should be
  807. # allocated.
  808. # project_id: Id of the project where the load balancer should be
  809. # allocated.
  810. # lb_vip: Virtual IP to give to the load balancer - optional.
  811. function create_load_balancer {
  812. local lb_name
  813. local lb_vip_subnet
  814. local lb_params
  815. local project_id
  816. lb_name="$1"
  817. lb_vip_subnet="$2"
  818. project_id="$3"
  819. lb_params=" --name $lb_name "
  820. if [ -z "$4" ]; then
  821. echo -n "create_load_balancer LB=$lb_name, lb_vip not provided."
  822. else
  823. lb_params+=" --vip-address $4"
  824. fi
  825. lb_params+=" --project ${project_id} --vip-subnet-id $lb_vip_subnet"
  826. openstack loadbalancer create $lb_params
  827. }
  828. # create_load_balancer_listener
  829. # Description: Creates an OpenStack Load Balancer Listener for the specified
  830. # Load Balancer with either neutron LBaaS or Octavia
  831. # Params:
  832. # name: Name to give to the load balancer listener.
  833. # protocol: Whether it is HTTP, HTTPS, TCP, etc.
  834. # port: The TCP port number to listen to.
  835. # data_timeouts: Octavia's timeouts for client and server inactivity.
  836. # lb: Id or name of the Load Balancer we want to add the Listener to.
  837. # project_id: Id of the project where this listener belongs to.
  838. function create_load_balancer_listener {
  839. local name
  840. local protocol
  841. local port
  842. local lb
  843. local data_timeouts
  844. local max_timeout
  845. local project_id
  846. name="$1"
  847. protocol="$2"
  848. port="$3"
  849. lb="$4"
  850. project_id="$5"
  851. data_timeouts="$6"
  852. max_timeout=1200
  853. # Octavia needs the LB to be active for the listener
  854. wait_for_lb $lb $max_timeout
  855. openstack loadbalancer listener create --name "$name" \
  856. --protocol "$protocol" \
  857. --protocol-port "$port" \
  858. --timeout-client-data "$data_timeouts" \
  859. --timeout-member-data "$data_timeouts" \
  860. "$lb"
  861. }
  862. # create_load_balancer_pool
  863. # Description: Creates an OpenStack Load Balancer Pool for the specified
  864. # Load Balancer listener with either neutron LBaaS or Octavia
  865. # Params:
  866. # name: Name to give to the load balancer listener.
  867. # protocol: Whether it is HTTP, HTTPS, TCP, etc.
  868. # algorithm: Load Balancing algorithm to use.
  869. # listener: Id or name of the Load Balancer Listener we want to add the
  870. # pool to.
  871. # project_id: Id of the project where this pool belongs to.
  872. # lb: Id or name of the Load Balancer we want to add the pool to
  873. # (optional).
  874. function create_load_balancer_pool {
  875. local name
  876. local protocol
  877. local algorithm
  878. local listener
  879. local lb
  880. local project_id
  881. name="$1"
  882. protocol="$2"
  883. algorithm="$3"
  884. listener="$4"
  885. project_id="$5"
  886. lb="$6"
  887. # We must wait for the LB to be active before we can put a Pool for it
  888. wait_for_lb $lb
  889. openstack loadbalancer pool create --name "$name" \
  890. --listener "$listener" \
  891. --protocol "$protocol" \
  892. --lb-algorithm "$algorithm"
  893. }
  894. # create_load_balancer_member
  895. # Description: Creates an OpenStack load balancer pool member
  896. # Params:
  897. # name: Name to give to the load balancer pool member.
  898. # address: Whether it is HTTP, HTTPS, TCP, etc.
  899. # port: Port number the pool member is listening on.
  900. # pool: Id or name of the Load Balancer pool this member belongs to.
  901. # subnet: Id or name of the subnet the member address belongs to.
  902. # lb: Id or name of the load balancer the member belongs to.
  903. # project_id: Id of the project where this pool belongs to.
  904. function create_load_balancer_member {
  905. local name
  906. local address
  907. local port
  908. local pool
  909. local subnet
  910. local lb
  911. local project_id
  912. name="$1"
  913. address="$2"
  914. port="$3"
  915. pool="$4"
  916. subnet="$5"
  917. lb="$6"
  918. project_id="$7"
  919. # We must wait for the pool creation update before we can add members
  920. wait_for_lb $lb
  921. openstack loadbalancer member create --name "$name" \
  922. --address "$address" \
  923. --protocol-port "$port" \
  924. "$pool"
  925. }
  926. # split_subnet
  927. # Description: Splits a subnet in two subnets that constitute its halves
  928. # Params:
  929. # cidr: Subnet CIDR to split
  930. # Returns: tab separated CIDRs of the two halves.
  931. function split_subnet {
  932. # precondition: The passed cidr must be of a prefix <= 30
  933. python3 - <<EOF "$@"
  934. import sys
  935. from netaddr import IPNetwork
  936. import six
  937. n = IPNetwork(six.text_type(sys.argv[1]))
  938. first, last = n.subnet(n.prefixlen+1)
  939. print("%s\\t%s" % (first, last))
  940. EOF
  941. }
  942. # get_loadbalancer_attribute
  943. # Description: Get load balancer attribute
  944. # Params:
  945. # lb_name: Load balancer name
  946. # lb_attr: attribute name
  947. function get_loadbalancer_attribute {
  948. local lb_name
  949. local lb_attr
  950. lb_name="$1"
  951. lb_attr="$2"
  952. openstack loadbalancer show "$lb_name" -c "$lb_attr" -f value
  953. }
  954. # openshift_node_set_dns_config
  955. # Description: Configures Openshift node's DNS section atomically
  956. # Params:
  957. # node_conf_path: path_to_node_config
  958. # upstream_dns_ip: IP of the upstream DNS
  959. function openshift_node_set_dns_config {
  960. local openshift_dnsmasq_recursive_resolv
  961. local upstream_dns_ip
  962. openshift_dnsmasq_recursive_resolv="${OPENSHIFT_DATA_DIR}/node/resolv.conf"
  963. upstream_dns_ip="$2"
  964. cat > "$openshift_dnsmasq_recursive_resolv" << EOF
  965. nameserver $upstream_dns_ip
  966. EOF
  967. python3 - <<EOF "$@"
  968. import os
  969. import sys
  970. import tempfile
  971. import traceback
  972. import yaml
  973. if len(sys.argv) < 3:
  974. sys.exit(1)
  975. node_conf_path = sys.argv[1]
  976. conf_dir = os.path.dirname(node_conf_path)
  977. def dns_configure_copy(conf):
  978. new_conf = conf.copy()
  979. # 127.0.0.1 is used by unbound in gates, let's use another localshost addr
  980. new_conf['dnsBindAddress'] = '127.0.0.11:53'
  981. new_conf['dnsDomain'] = 'cluster.local'
  982. new_conf['dnsIP'] = '0.0.0.0'
  983. new_conf['dnsRecursiveResolvConf'] = '${openshift_dnsmasq_recursive_resolv}'
  984. return new_conf
  985. old_config = {}
  986. while True:
  987. tp = tempfile.NamedTemporaryFile(dir=conf_dir, delete=False, mode='w')
  988. try:
  989. with open(node_conf_path) as node_conf:
  990. current_conf = yaml.load(node_conf.read())
  991. if current_conf == old_config:
  992. tp.write(yaml.dump(new_conf, default_flow_style=False))
  993. tp.flush()
  994. os.fsync(tp.fileno())
  995. tp.close()
  996. os.rename(tp.name, node_conf_path)
  997. break
  998. else:
  999. new_conf = dns_configure_copy(current_conf)
  1000. old_config = current_conf
  1001. tp.close()
  1002. os.unlink(tp.name)
  1003. except Exception as e:
  1004. traceback.print_exc(file=sys.stdout)
  1005. tp.close()
  1006. os.unlink(tp.name)
  1007. EOF
  1008. }
  1009. # run_openshift_dnsmasq
  1010. # Description: Configures and runs a dnsmasq instance to be run as the node
  1011. # DNS server that will choose between openshift's DNS and the
  1012. # upstream DNS depending on the domain
  1013. # Params:
  1014. # upstream_dns_ip: IP of the upstream DNS
  1015. function run_openshift_dnsmasq {
  1016. local dnmasq_binary
  1017. local cmd
  1018. local upstream_dns_ip
  1019. local openshift_dnsmasq_conf_path
  1020. local search_domains
  1021. upstream_dns_ip="$1"
  1022. openshift_dnsmasq_conf_path="${OPENSHIFT_DATA_DIR}/node/node_dnsmasq.conf"
  1023. install_package dnsmasq
  1024. cat > "$openshift_dnsmasq_conf_path" << EOF
  1025. server=${upstream_dns_ip}
  1026. no-resolv
  1027. domain-needed
  1028. no-negcache
  1029. max-cache-ttl=1
  1030. # Enable dbus so openshift dns can use it to set cluster.local rules
  1031. enable-dbus
  1032. dns-forward-max=10000
  1033. cache-size=10000
  1034. bind-dynamic
  1035. # Do not bind to localhost addresses 127.0.0.1/8 (where skydns binds)
  1036. except-interface=lo
  1037. EOF
  1038. #Open port 53 so pods can reach the DNS server
  1039. sudo iptables -I INPUT 1 -p udp -m udp --dport 53 -m comment --comment "kuryr-devstack: Access to OpenShift API" -j ACCEPT
  1040. dnsmasq_binary="$(command -v dnsmasq)"
  1041. cmd="${dnsmasq_binary} -k -C ${openshift_dnsmasq_conf_path}"
  1042. run_process openshift-dnsmasq "$cmd" root root
  1043. sudo cp /etc/resolv.conf /etc/resolv.conf.orig
  1044. search_domains=$(awk '/search/ {for (i=2; i<NF; i++) printf $i " "; print $NF}' /etc/resolv.conf.orig)
  1045. search_domains="cluster.local ${search_domains}"
  1046. echo "search ${search_domains}" | sudo tee /etc/resolv.conf.openshift_devstack
  1047. echo "options ndots:4" | sudo tee /etc/resolv.conf.openshift_devstack
  1048. echo "nameserver ${HOST_IP}" | sudo tee --append /etc/resolv.conf.openshift_devstack
  1049. grep "nameserver" /etc/resolv.conf.orig | sudo tee --append /etc/resolv.conf.openshift_devstack
  1050. sudo mv /etc/resolv.conf.openshift_devstack /etc/resolv.conf
  1051. }
  1052. function reinstate_old_dns_config {
  1053. sudo mv /etc/resolv.conf.orig /etc/resolv.conf
  1054. }
  1055. # run_openshift_dns
  1056. # Description: Starts openshift's DNS
  1057. function run_openshift_dns {
  1058. local command
  1059. command="/usr/local/bin/openshift start network \
  1060. --enable=dns \
  1061. --config=${OPENSHIFT_DATA_DIR}/node/node-config.yaml \
  1062. --kubeconfig=${OPENSHIFT_DATA_DIR}/node/node.kubeconfig"
  1063. run_process openshift-dns "$command" root root
  1064. }
  1065. # cleanup_kuryr_devstack_iptables
  1066. # Description: Fins all the iptables rules we set and deletes them
  1067. function cleanup_kuryr_devstack_iptables {
  1068. local chains
  1069. chains=( INPUT FORWARD OUTPUT )
  1070. for chain in ${chains[@]}; do
  1071. sudo iptables -n -L "$chain" -v --line-numbers | \
  1072. awk -v chain="$chain" \
  1073. '/kuryr-devstack/ {print "sudo iptables -D " chain " " $1}' | \
  1074. tac | bash /dev/stdin
  1075. done
  1076. }
  1077. # run_openshift_registry
  1078. # Description: Deploys Openshift's registry as a DeploymentConfig
  1079. function run_openshift_registry {
  1080. local registry_yaml
  1081. local registry_ip="$1"
  1082. mkdir -p "${OPENSHIFT_DATA_DIR}/registry"
  1083. registry_yaml=$(mktemp)
  1084. oc adm registry \
  1085. --config=${OPENSHIFT_DATA_DIR}/master/admin.kubeconfig \
  1086. --service-account=registry \
  1087. --mount-host=${OPENSHIFT_DATA_DIR}/registry \
  1088. --tls-certificate=${OPENSHIFT_DATA_DIR}/master/registry.crt \
  1089. --tls-key=${OPENSHIFT_DATA_DIR}/master/registry.key \
  1090. -o yaml > $registry_yaml
  1091. python3 - <<EOF "$registry_yaml" "$registry_ip"
  1092. import copy
  1093. import os
  1094. import sys
  1095. import tempfile
  1096. import traceback
  1097. import yaml
  1098. if len(sys.argv) < 3:
  1099. sys.exit(1)
  1100. registry_conf_path = sys.argv[1]
  1101. registry_cluster_ip = sys.argv[2]
  1102. conf_dir = os.path.dirname(registry_conf_path)
  1103. def service_configure_registry_clusterIP(conf):
  1104. new_conf = copy.deepcopy(conf)
  1105. for object in new_conf['items']:
  1106. if object['kind'] == 'Service':
  1107. object['spec']['clusterIP'] = registry_cluster_ip
  1108. return new_conf
  1109. old_conf = {}
  1110. while True:
  1111. tp = tempfile.NamedTemporaryFile(dir=conf_dir, delete=False, mode='w')
  1112. try:
  1113. with open(registry_conf_path) as registry_conf:
  1114. current_conf = yaml.load(registry_conf.read())
  1115. if current_conf == old_conf:
  1116. tp.write(yaml.dump(new_conf, default_flow_style=False))
  1117. tp.flush()
  1118. os.fsync(tp.fileno())
  1119. tp.close()
  1120. os.rename(tp.name, registry_conf_path)
  1121. break
  1122. else:
  1123. new_conf = service_configure_registry_clusterIP(current_conf)
  1124. old_conf = current_conf
  1125. tp.close()
  1126. os.unlink(tp.name)
  1127. except Exception as e:
  1128. traceback.print_exc(file=sys.stdout)
  1129. tp.close()
  1130. os.unlink(tp.name)
  1131. EOF
  1132. oc adm policy add-scc-to-user privileged -z registry -n default
  1133. oc create -f "$registry_yaml"
  1134. }
  1135. # oc_generate_server_certificates
  1136. # Description: Generates and CA signs openshift cert & key for server
  1137. # Params:
  1138. # - name: filename without extension of the cert and key
  1139. # - hostnames: the comma separated hostnames to sign the cert for
  1140. function oc_generate_server_certificates {
  1141. local name
  1142. local cert_hostnames
  1143. name="$1"
  1144. cert_hostnames="$2"
  1145. oc adm ca create-server-cert \
  1146. --signer-cert="${OPENSHIFT_DATA_DIR}/master/ca.crt" \
  1147. --signer-key="${OPENSHIFT_DATA_DIR}/master/ca.key" \
  1148. --signer-serial="${OPENSHIFT_DATA_DIR}/master/ca.serial.txt" \
  1149. --hostnames="$cert_hostnames" \
  1150. --cert="${OPENSHIFT_DATA_DIR}/master/${name}.crt" \
  1151. --key="${OPENSHIFT_DATA_DIR}/master/${name}.key"
  1152. }
  1153. # docker_install_ca_certs
  1154. # Description: Installs registry openshift_ca_certs to docker
  1155. # Params:
  1156. # - registry_hostnames: the comma separated hostnames to give the CA for
  1157. function docker_install_ca_certs {
  1158. local registry_hostnames
  1159. local destdir
  1160. # TODO(dulek): Support for CRI-O.
  1161. registry_hostnames=(${1//,/ })
  1162. for hostname in ${registry_hostnames[@]}; do
  1163. destdir="/etc/docker/certs.d/${hostname}:5000"
  1164. sudo install -d -o "$STACK_USER" "$destdir"
  1165. sudo install -o "$STACK_USER" "${OPENSHIFT_DATA_DIR}/master/ca.crt" "${destdir}/"
  1166. done
  1167. }
  1168. function _nth_cidr_ip {
  1169. local cidr
  1170. local position
  1171. cidr="$1"
  1172. position="$2"
  1173. python3 - <<EOF "$cidr" "$position"
  1174. import sys
  1175. from netaddr import IPAddress, IPNetwork
  1176. cmdname, cidr, position = sys.argv
  1177. n = IPNetwork(cidr)
  1178. print("%s" % IPAddress(n.first + int(position)))
  1179. EOF
  1180. }
  1181. function configure_and_run_registry {
  1182. local service_cidr
  1183. local registry_ip
  1184. local hostnames
  1185. # TODO(dulek): Support for CRI-O.
  1186. service_cidr=$(openstack --os-cloud devstack-admin \
  1187. --os-region "$REGION_NAME" \
  1188. subnet show "$KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET" \
  1189. -c cidr -f value)
  1190. registry_ip=$(_nth_cidr_ip "$service_cidr" 2)
  1191. hostnames="docker-registry.default.svc.cluster.local,docker-registry.default.svc,${registry_ip}"
  1192. docker_install_ca_certs "$hostnames"
  1193. oc_generate_server_certificates registry "$hostnames"
  1194. run_openshift_registry "$registry_ip"
  1195. }