Browse Source

VirtualBMC support for tripleo-quickstart

This patch is replacing the usage of the pxe_ssh driver in Ironic in
favor of pxe_ipmitool + VirtualBMC*. The main reason for this patch is
because the pxe_ssh driver is already marked as unsupported and will be
removed from Ironic upstream at the end of the Ocata release.

Older releases: Liberty, Mitaka and Newton will continue to rely on the
pxe_ssh driver.

* VirtualBMC is a proxy that converts IPMI commands to libvirt calls

Partial-Bug: #1645733
Change-Id: Iaaa9fc1fd593cbce045456f4341c461a8eea82ad
tags/2.0.0
Lucas Alvares Gomes 2 years ago
parent
commit
c8c6e57ca2

+ 3
- 0
config/general_config/ceph.yml View File

@@ -4,12 +4,15 @@
4 4
 overcloud_nodes:
5 5
   - name: control_0
6 6
     flavor: control
7
+    virtualbmc_port: 6230
7 8
 
8 9
   - name: compute_0
9 10
     flavor: compute
11
+    virtualbmc_port: 6231
10 12
 
11 13
   - name: ceph_0
12 14
     flavor: ceph
15
+    virtualbmc_port: 6232
13 16
 
14 17
 # Tell tripleo how we want things done.
15 18
 extra_args: >-

+ 6
- 0
config/general_config/ha.yml View File

@@ -24,13 +24,19 @@ undercloud_generate_service_certificate: True
24 24
 overcloud_nodes:
25 25
   - name: control_0
26 26
     flavor: control
27
+    virtualbmc_port: 6230
28
+
27 29
   - name: control_1
28 30
     flavor: control
31
+    virtualbmc_port: 6231
32
+
29 33
   - name: control_2
30 34
     flavor: control
35
+    virtualbmc_port: 6232
31 36
 
32 37
   - name: compute_0
33 38
     flavor: compute
39
+    virtualbmc_port: 6233
34 40
 
35 41
 # We don't need introspection in a virtual environment (because we are
36 42
 # creating all the "hardware" we really know the necessary

+ 10
- 0
config/general_config/ha_big.yml View File

@@ -21,17 +21,27 @@ default_vcpu: 1
21 21
 overcloud_nodes:
22 22
   - name: control_0
23 23
     flavor: control
24
+    virtualbmc_port: 6230
25
+
24 26
   - name: control_1
25 27
     flavor: control
28
+    virtualbmc_port: 6231
29
+
26 30
   - name: control_2
27 31
     flavor: control
32
+    virtualbmc_port: 6232
28 33
 
29 34
   - name: compute_0
30 35
     flavor: compute
36
+    virtualbmc_port: 6233
37
+
31 38
   - name: compute_1
32 39
     flavor: compute
40
+    virtualbmc_port: 6234
41
+
33 42
   - name: compute_2
34 43
     flavor: compute
44
+    virtualbmc_port: 6235
35 45
 
36 46
 # We don't need introspection in a virtual environment (because we are
37 47
 # creating all the "hardware" we really know the necessary

+ 2
- 0
config/general_config/minimal.yml View File

@@ -7,9 +7,11 @@ step_introspect: true
7 7
 overcloud_nodes:
8 8
   - name: control_0
9 9
     flavor: control
10
+    virtualbmc_port: 6230
10 11
 
11 12
   - name: compute_0
12 13
     flavor: compute
14
+    virtualbmc_port: 6231
13 15
 
14 16
 # Tell tripleo how we want things done.
15 17
 extra_args: >-

+ 2
- 0
config/general_config/minimal_no_netiso.yml View File

@@ -6,9 +6,11 @@ step_introspect: true
6 6
 overcloud_nodes:
7 7
   - name: control_0
8 8
     flavor: control
9
+    virtualbmc_port: 6230
9 10
 
10 11
   - name: compute_0
11 12
     flavor: compute
13
+    virtualbmc_port: 6231
12 14
 
13 15
 network_isolation: false
14 16
 

+ 2
- 0
config/general_config/minimal_pacemaker.yml View File

@@ -5,9 +5,11 @@
5 5
 overcloud_nodes:
6 6
   - name: control_0
7 7
     flavor: control
8
+    virtualbmc_port: 6230
8 9
 
9 10
   - name: compute_0
10 11
     flavor: compute
12
+    virtualbmc_port: 6231
11 13
 
12 14
 # Tell tripleo how we want things done.
13 15
 extra_args: >-

+ 4
- 0
roles/common/defaults/main.yml View File

@@ -152,3 +152,7 @@ devmode: false
152 152
 
153 153
 # Tuned profile set while provisioning remote hosts to optimize for deployment
154 154
 tuned_profile: 'virtual-host'
155
+
156
+# This is the name of the user the `provision` role will create on the
157
+# remote host.
158
+non_root_user: stack

+ 12
- 4
roles/libvirt/setup/overcloud/templates/instackenv.json.j2 View File

@@ -6,8 +6,18 @@
6 6
   {% for node in overcloud_nodes %}
7 7
     {
8 8
       "name": "{{ node.name|replace('_', '-') }}",
9
-      "pm_password": {{ virt_power_key_pvt | to_nice_json }},
10
-      "pm_type": "pxe_ssh",
9
+      {% if release in ['liberty', 'mitaka', 'newton' ] %}
10
+        "pm_password": {{ virt_power_key_pvt | to_nice_json }},
11
+        "pm_type": "pxe_ssh",
12
+        "pm_user": "{{ ansible_user_id }}",
13
+        "pm_addr": "{{ host_ip }}",
14
+      {% else %}
15
+        "pm_password": "password",
16
+        "pm_type": "pxe_ipmitool",
17
+        "pm_user": "admin",
18
+        "pm_addr": "127.0.0.1",
19
+        "pm_port": "{{ node.virtualbmc_port }}",
20
+      {% endif %}
11 21
       "mac": [
12 22
         "{{ node_mac_map.get(node.name).get('overcloud') }}"
13 23
       ],
@@ -15,8 +25,6 @@
15 25
       "memory": "{{ flavors[node.flavor].memory }}",
16 26
       "disk": "{{ flavors[node.flavor].disk }}",
17 27
       "arch": "{{ libvirt_arch }}",
18
-      "pm_user": "{{ ansible_user_id }}",
19
-      "pm_addr": "{{ host_ip }}",
20 28
       "capabilities": "profile:{{ node.flavor }},boot_option:local"
21 29
     }
22 30
     {% if not loop.last %}

+ 21
- 1
roles/libvirt/setup/undercloud/tasks/main.yml View File

@@ -86,6 +86,14 @@
86 86
       src: "{{ undercloud_key }}.pub"
87 87
       dest: "{{ working_dir }}/id_rsa_undercloud.pub"
88 88
 
89
+  # Copy the virt host private key to `$HOME/.ssh/id_rsa_virt_power` for
90
+  # VirtualBMC be able to access the hypervisor where the VMs are located
91
+  - name: Copy virt host ssh private key to working dir
92
+    when: release not in ['liberty', 'mitaka', 'newton']
93
+    copy:
94
+      src: "{{ virt_power_key }}"
95
+      dest: "{{ working_dir }}/id_rsa_virt_power"
96
+
89 97
   # Copy the public key to `$HOME/.ssh/authorized_keys` for the `root`
90 98
   # and `stack` user on the undercloud.
91 99
   - name: Inject undercloud ssh public key to appliance
@@ -106,6 +114,19 @@
106 114
         owner: stack
107 115
         group: stack
108 116
 
117
+  # This copies the `id_rsa_virt_power` private key that we generated
118
+  # in the overcloud setup role to the undercloud host to be used by
119
+  # VirtualBMC+libvirt to access the virthost.
120
+  - name: Copy id_rsa_virt_power to appliance
121
+    when: release not in ['liberty', 'mitaka', 'newton']
122
+    command: >
123
+      virt-customize -a {{ working_dir }}/undercloud.qcow2
124
+      --upload '{{ working_dir }}/id_rsa_virt_power:/root/.ssh/id_rsa_virt_power'
125
+      --run-command 'chown root:root /root/.ssh/id_rsa_virt_power'
126
+      --run-command 'chmod 0600 /root/.ssh/id_rsa_virt_power'
127
+    environment:
128
+      LIBGUESTFS_BACKEND: direct
129
+
109 130
   - name: Create undercloud customize script
110 131
     template:
111 132
       src: "{{ undercloud_customize_script }}"
@@ -308,4 +329,3 @@
308 329
   template:
309 330
     src: ssh.config.j2
310 331
     dest: "{{ local_working_dir }}/ssh.config.ansible"
311
-

+ 0
- 4
roles/provision/defaults/main.yml View File

@@ -1,7 +1,3 @@
1
-# This is the name of the user the `provision` role will create on the
2
-# remote host.
3
-non_root_user: stack
4
-
5 1
 # The path to an ssh key (that we will generate) that can be used to
6 2
 # log in to the virt host.
7 3
 virt_host_key: "{{ local_working_dir }}/id_rsa_virt_host"

+ 9
- 0
roles/provision/remote/tasks/main.yml View File

@@ -40,6 +40,15 @@
40 40
     shell: /bin/bash
41 41
   become: true
42 42
 
43
+- name: Get the non-root user UID
44
+  command: "id {{ non_root_user }} -u"
45
+  register: non_root_user_uid_output
46
+  changed_when: false
47
+
48
+- name: Save the non-root user UID
49
+  set_fact:
50
+    non_root_user_uid: "{{ non_root_user_uid_output.stdout }}"
51
+
43 52
 # Install the public component of `virt_host_key` in the
44 53
 # `.ssh/authorized_keys` file for the non-root user.
45 54
 - name: Configure non-root user authorized_keys

+ 47
- 0
roles/tripleo/undercloud/tasks/post-install.yml View File

@@ -24,3 +24,50 @@
24 24
     dest: "{{ local_working_dir }}/stackrc"
25 25
     line: "export OS_PASSWORD={{ undercloud_admin_password.stdout }}"
26 26
     regexp: "OS_PASSWORD"
27
+
28
+- name: Install VirtualBMC package
29
+  when: release not in ['liberty', 'mitaka', 'newton']
30
+  package:
31
+    name: "python2-virtualbmc"
32
+    state: present
33
+    use: yum
34
+  become: true
35
+
36
+- name: Create the Virtual BMCs
37
+  when: release not in ['liberty', 'mitaka', 'newton']
38
+  command: >
39
+    vbmc add {{item.name}} --port {{item.virtualbmc_port}} --libvirt-uri "qemu+ssh://{{ non_root_user }}@{{ networks[0].address }}/session?socket=/run/user/{{ hostvars[groups['virthost'][0]].non_root_user_uid }}/libvirt/libvirt-sock&keyfile=/root/.ssh/id_rsa_virt_power&no_verify=1&no_tty=1"
40
+  with_items: "{{ overcloud_nodes }}"
41
+  become: true
42
+  become_user: root
43
+  changed_when: false
44
+
45
+# TODO(lucasagomes): The service file should be included in the
46
+#                    virtualbmc RPM package.
47
+- name: Create the VirtualBMC systemd service
48
+  when: release not in ['liberty', 'mitaka', 'newton']
49
+  copy:
50
+    mode: 0664
51
+    dest: "/usr/lib/systemd/system/virtualbmc.service"
52
+    content: |
53
+      [Unit]
54
+      Description=VirtualBMC service
55
+      After=network.target
56
+
57
+      [Service]
58
+      Type=oneshot
59
+      ExecStart=/bin/bash -c 'for bmc in $(ls /root/.vbmc/); do vbmc start $bmc; done'
60
+      ExecStop=/bin/bash -c 'for bmc in $(ls /root/.vbmc/); do vbmc stop $bmc; done'
61
+      RemainAfterExit=yes
62
+
63
+      [Install]
64
+      WantedBy=multi-user.target
65
+  become: true
66
+
67
+- name: Start the Virtual BMCs
68
+  when: release not in ['liberty', 'mitaka', 'newton']
69
+  service:
70
+    name: virtualbmc
71
+    state: started
72
+    enabled: true
73
+  become: true

Loading…
Cancel
Save