Adds support for clusterctl move of capi and bmo resources

from ephermeral to workload cluster.

Change-Id: Ib6d31282056468d5a153177dfb33ce4a55514ab3
This commit is contained in:
Arvinderpal Wander 2020-05-01 12:35:35 -07:00 committed by Dmitry Ukov
parent 1e6c449a8c
commit 3edf72eeb6
12 changed files with 1335 additions and 54 deletions

View File

@ -44,6 +44,7 @@ func NewClusterCommand(rootSettings *environment.AirshipCTLSettings) *cobra.Comm
} }
clusterRootCmd.AddCommand(NewInitCommand(rootSettings)) clusterRootCmd.AddCommand(NewInitCommand(rootSettings))
clusterRootCmd.AddCommand(NewMoveCommand(rootSettings))
return clusterRootCmd return clusterRootCmd
} }

60
cmd/cluster/move.go Normal file
View File

@ -0,0 +1,60 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cluster
import (
"github.com/spf13/cobra"
clusterctlcmd "opendev.org/airship/airshipctl/pkg/clusterctl/cmd"
"opendev.org/airship/airshipctl/pkg/environment"
)
const (
moveLong = `
Move Cluster API objects, provider specific objects and all dependencies to the target cluster.
Note: The destination cluster MUST have the required provider components installed.
`
moveExample = `
Move Cluster API objects, provider specific objects and all dependencies to the target cluster.
airshipctl cluster move --target-context <context name>
`
)
// NewMoveCommand creates a command to move capi and bmo resources to the target cluster
func NewMoveCommand(rootSettings *environment.AirshipCTLSettings) *cobra.Command {
var toKubeconfigContext string
moveCmd := &cobra.Command{
Use: "move",
Short: "Move Cluster API objects, provider specific objects and all dependencies to the target cluster",
Long: moveLong[1:],
Example: moveExample,
RunE: func(cmd *cobra.Command, args []string) error {
command, err := clusterctlcmd.NewCommand(rootSettings)
if err != nil {
return err
}
return command.Move(toKubeconfigContext)
},
}
moveCmd.Flags().StringVar(&toKubeconfigContext, "target-context", "",
"Context to be used within the kubeconfig file for the target cluster. If empty, current context will be used.")
return moveCmd
}

View File

@ -7,6 +7,7 @@ Usage:
Available Commands: Available Commands:
help Help about any command help Help about any command
init Deploy cluster-api provider components init Deploy cluster-api provider components
move Move Cluster API objects, provider specific objects and all dependencies to the target cluster
Flags: Flags:
-h, --help help for cluster -h, --help help for cluster

View File

@ -26,4 +26,5 @@ such as getting status and deploying initial infrastructure.
* [airshipctl](airshipctl.md) - A unified entrypoint to various airship components * [airshipctl](airshipctl.md) - A unified entrypoint to various airship components
* [airshipctl cluster init](airshipctl_cluster_init.md) - Deploy cluster-api provider components * [airshipctl cluster init](airshipctl_cluster_init.md) - Deploy cluster-api provider components
* [airshipctl cluster move](airshipctl_cluster_move.md) - Move Cluster API objects, provider specific objects and all dependencies to the target cluster

View File

@ -0,0 +1,44 @@
## airshipctl cluster move
Move Cluster API objects, provider specific objects and all dependencies to the target cluster
### Synopsis
Move Cluster API objects, provider specific objects and all dependencies to the target cluster.
Note: The destination cluster MUST have the required provider components installed.
```
airshipctl cluster move [flags]
```
### Examples
```
Move Cluster API objects, provider specific objects and all dependencies to the target cluster.
airshipctl cluster move --target-context <context name>
```
### Options
```
-h, --help help for move
--target-context string Context to be used within the kubeconfig file for the target cluster. If empty, current context will be used.
```
### Options inherited from parent commands
```
--airshipconf string Path to file for airshipctl configuration. (default "$HOME/.airship/config")
--debug enable verbose output
--kubeconfig string Path to kubeconfig associated with airshipctl configuration. (default "$HOME/.airship/kubeconfig")
```
### SEE ALSO
* [airshipctl cluster](airshipctl_cluster.md) - Manage Kubernetes clusters

33
go.mod
View File

@ -3,42 +3,41 @@ module opendev.org/airship/airshipctl
go 1.13 go 1.13
require ( require (
github.com/Masterminds/goutils v1.1.0 // indirect
github.com/Masterminds/semver v1.5.0 // indirect
github.com/Masterminds/sprig v2.22.0+incompatible github.com/Masterminds/sprig v2.22.0+incompatible
github.com/Microsoft/go-winio v0.4.12 // indirect
github.com/chai2010/gettext-go v0.0.0-20170215093142-bf70f2a70fb1 // indirect github.com/chai2010/gettext-go v0.0.0-20170215093142-bf70f2a70fb1 // indirect
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0 github.com/docker/docker v1.4.2-0.20200203170920-46ec8731fbce
github.com/docker/go-connections v0.3.0 // indirect
github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect github.com/docker/spdystream v0.0.0-20181023171402-6480d4af844c // indirect
github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f // indirect github.com/elazarl/goproxy v0.0.0-20190421051319-9d40249d3c2f // indirect
github.com/elazarl/goproxy/ext v0.0.0-20190421051319-9d40249d3c2f // indirect github.com/elazarl/goproxy/ext v0.0.0-20190421051319-9d40249d3c2f // indirect
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 // indirect
github.com/go-git/go-billy/v5 v5.0.0 github.com/go-git/go-billy/v5 v5.0.0
github.com/go-git/go-git-fixtures/v4 v4.0.1 github.com/go-git/go-git-fixtures/v4 v4.0.1
github.com/go-git/go-git/v5 v5.0.0 github.com/go-git/go-git/v5 v5.0.0
github.com/gorilla/mux v1.7.4 // indirect github.com/gorilla/mux v1.7.4 // indirect
github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc // indirect github.com/gregjones/httpcache v0.0.0-20190212212710-3befbb6ad0cc // indirect
github.com/huandu/xstrings v1.3.1 // indirect github.com/huandu/xstrings v1.3.1 // indirect
github.com/mitchellh/copystructure v1.0.0 // indirect github.com/metal3-io/baremetal-operator v0.0.0-20200501205115-2c0dc9997bfa
github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c // indirect github.com/onsi/gomega v1.9.0
github.com/opencontainers/image-spec v1.0.1 // indirect github.com/pkg/errors v0.9.1
github.com/spf13/cobra v0.0.6 github.com/spf13/cobra v0.0.6
github.com/stretchr/testify v1.4.0 github.com/stretchr/testify v1.4.0
k8s.io/api v0.17.3 k8s.io/api v0.17.4
k8s.io/apiextensions-apiserver v0.17.3 k8s.io/apiextensions-apiserver v0.17.4
k8s.io/apimachinery v0.17.3 k8s.io/apimachinery v0.17.4
k8s.io/cli-runtime v0.17.3 k8s.io/cli-runtime v0.17.4
k8s.io/client-go v11.0.0+incompatible k8s.io/client-go v12.0.0+incompatible
k8s.io/kubectl v0.17.3 k8s.io/kubectl v0.17.4
opendev.org/airship/go-redfish v0.0.0-20200318103738-db034d1d753a opendev.org/airship/go-redfish v0.0.0-20200318103738-db034d1d753a
opendev.org/airship/go-redfish/client v0.0.0-20200318103738-db034d1d753a opendev.org/airship/go-redfish/client v0.0.0-20200318103738-db034d1d753a
sigs.k8s.io/cluster-api v0.3.5 sigs.k8s.io/cluster-api v0.3.5
sigs.k8s.io/controller-runtime v0.5.2
sigs.k8s.io/kustomize/api v0.3.1 sigs.k8s.io/kustomize/api v0.3.1
sigs.k8s.io/yaml v1.2.0 sigs.k8s.io/yaml v1.2.0
) )
replace github.com/russross/blackfriday => github.com/russross/blackfriday v1.5.2
// Required by baremetal-operator:
replace ( replace (
github.com/russross/blackfriday => github.com/russross/blackfriday v1.5.2 github.com/Azure/go-autorest => github.com/Azure/go-autorest v13.3.2+incompatible // Required by OLM
k8s.io/client-go => k8s.io/client-go v0.0.0-20191114101535-6c5935290e33 k8s.io/client-go => k8s.io/client-go v0.17.4
) )

718
go.sum

File diff suppressed because it is too large Load Diff

View File

@ -34,6 +34,7 @@ type Clusterctl struct {
Providers []*Provider `json:"providers,omitempty"` Providers []*Provider `json:"providers,omitempty"`
InitOptions *InitOptions `json:"init-options,omitempty"` InitOptions *InitOptions `json:"init-options,omitempty"`
MoveOptions *MoveOptions `json:"move-options,omitempty"`
} }
// Provider is part of clusterctl config // Provider is part of clusterctl config
@ -79,3 +80,9 @@ func (c *Clusterctl) Provider(name string, providerType clusterctlv1.ProviderTyp
} }
return nil return nil
} }
// MoveOptions carries the options supported by move.
type MoveOptions struct {
// The namespace where the workload cluster is hosted. If unspecified, the target context's namespace is used.
Namespace string `json:"namespace,omitempty"`
}

View File

@ -34,12 +34,14 @@ var _ Interface = &Client{}
// Interface is abstraction to Clusterctl // Interface is abstraction to Clusterctl
type Interface interface { type Interface interface {
Init(kubeconfigPath, kubeconfigContext string) error Init(kubeconfigPath, kubeconfigContext string) error
Move(fromKubeconfigPath, fromKubeconfigContext, toKubeconfigPath, toKubeconfigContext, namespace string) error
} }
// Client Implements interface to Clusterctl // Client Implements interface to Clusterctl
type Client struct { type Client struct {
clusterctlClient clusterctlclient.Client clusterctlClient clusterctlclient.Client
initOptions clusterctlclient.InitOptions initOptions clusterctlclient.InitOptions
moveOptions clusterctlclient.MoveOptions
} }
// NewClient returns instance of clusterctl client // NewClient returns instance of clusterctl client

View File

@ -0,0 +1,168 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"context"
"opendev.org/airship/airshipctl/pkg/log"
bmoapis "github.com/metal3-io/baremetal-operator/pkg/apis"
bmh "github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1"
"github.com/pkg/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
clusterctlclient "sigs.k8s.io/cluster-api/cmd/clusterctl/client"
"sigs.k8s.io/cluster-api/cmd/clusterctl/client/cluster"
"sigs.k8s.io/controller-runtime/pkg/client"
)
func init() {
//nolint:errcheck
bmoapis.AddToScheme(cluster.Scheme)
}
// Move implements interface to Clusterctl
func (c *Client) Move(fromKubeconfigPath, fromKubeconfigContext,
toKubeconfigPath, toKubeconfigContext, namespace string) error {
ctx := context.TODO()
var err error
// ephemeral cluster client
pFrom := cluster.New(cluster.Kubeconfig{
Path: fromKubeconfigPath,
Context: fromKubeconfigContext}, nil).Proxy()
cFrom, err := pFrom.NewClient()
if err != nil {
return errors.Wrap(err, "failed to create ephemeral cluster client")
}
// target cluster client
pTo := cluster.New(cluster.Kubeconfig{
Path: toKubeconfigPath,
Context: toKubeconfigContext}, nil).Proxy()
cTo, err := pTo.NewClient()
if err != nil {
return errors.Wrap(err, "failed to create target cluster client")
}
// If namespace is empty, try to detect it.
if namespace == "" {
var currentNamespace string
currentNamespace, err = pFrom.CurrentNamespace()
if err != nil {
return err
}
namespace = currentNamespace
}
// Pause
err = pauseUnpauseBMHs(ctx, cFrom, namespace, true)
if err != nil {
return errors.Wrap(err, "failed to pause BareMetalHost objects")
}
// clusterctl move
c.moveOptions = clusterctlclient.MoveOptions{
FromKubeconfig: clusterctlclient.Kubeconfig{Path: fromKubeconfigPath, Context: fromKubeconfigContext},
ToKubeconfig: clusterctlclient.Kubeconfig{Path: toKubeconfigPath, Context: toKubeconfigContext},
Namespace: namespace,
}
err = c.clusterctlClient.Move(c.moveOptions)
if err != nil {
return errors.Wrapf(err, "error during clusterctl move")
}
// Update BMH Status
err = copyBMHStatus(ctx, cFrom, cTo, namespace)
if err != nil {
return errors.Wrap(err, "failed to copy BareMetalHost Status")
}
// Unpause
err = pauseUnpauseBMHs(ctx, cFrom, namespace, false)
if err != nil {
return errors.Wrap(err, "failed to unpause BareMetalHost objects")
}
return err
}
// copyBMHStatus will copy the BareMetalHost Status field from a specific
// cluser to a target cluster.
func copyBMHStatus(ctx context.Context, cFrom client.Client, cTo client.Client, namespace string) error {
fromHosts, err := getBMHs(ctx, cFrom, namespace)
if err != nil {
return errors.Wrap(err, "failed to list BareMetalHost objects")
}
toHosts, err := getBMHs(ctx, cTo, namespace)
if err != nil {
return errors.Wrap(err, "failed to list BMH objects")
}
// Copy the Status field from old BMH to new BMH
log.Debugf("Copying BareMetalHost status to target cluster")
for _, toHost := range toHosts.Items {
var found bool
t := metav1.Now()
for _, fromHost := range fromHosts.Items {
if fromHost.Name == toHost.Name {
toHost.Status = fromHost.Status
found = true
break
}
}
if !found {
return errors.Errorf("BMH with the same name %s/%s not found in the source cluster", toHost.Name, namespace)
}
toHost.Status.LastUpdated = &t
err = cTo.Status().Update(ctx, &toHost)
if err != nil {
return errors.Wrap(err, "failed to update BareMetalHost status")
}
}
return nil
}
// pauseUnpauseBMHs will add/remove the pause annotation from the
// BareMetalHost objects.
func pauseUnpauseBMHs(ctx context.Context, crClient client.Client, namespace string, pause bool) error {
hosts, err := getBMHs(ctx, crClient, namespace)
if err != nil {
return errors.Wrap(err, "failed to list BMH objects")
}
for _, host := range hosts.Items {
annotations := host.GetAnnotations()
if annotations == nil {
host.Annotations = map[string]string{}
}
if pause {
log.Debugf("Pausing BareMetalHost object %s/%s", host.Name, namespace)
host.Annotations[bmh.PausedAnnotation] = "true"
} else {
log.Debugf("Unpausing BareMetalHost object %s/%s", host.Name, namespace)
delete(host.Annotations, bmh.PausedAnnotation)
}
if err := crClient.Update(ctx, &host); err != nil {
return errors.Wrapf(err, "error updating BareMetalHost %q %s/%s",
host.GroupVersionKind(), host.GetNamespace(), host.GetName())
}
}
return nil
}
// getBMHs will return all BareMetalHost objects in the specified namepace.
// It also checks to see if the BareMetalHost resource is installed, if not,
// it will return false.
func getBMHs(ctx context.Context, crClient client.Client, namespace string) (bmh.BareMetalHostList, error) {
hosts := bmh.BareMetalHostList{}
opts := &client.ListOptions{
Namespace: namespace,
}
err := crClient.List(ctx, &hosts, opts)
return hosts, err
}

View File

@ -0,0 +1,344 @@
/*
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package client
import (
"context"
"testing"
. "github.com/onsi/gomega"
bmoapis "github.com/metal3-io/baremetal-operator/pkg/apis"
bmh "github.com/metal3-io/baremetal-operator/pkg/apis/metal3/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/client/fake"
)
var bmh1 = &bmh.BareMetalHost{
TypeMeta: metav1.TypeMeta{
APIVersion: "metal3.io/v1alpha1",
Kind: "BareMetalHost",
},
ObjectMeta: metav1.ObjectMeta{
Name: "bmh1",
Namespace: "ns1",
},
Spec: bmh.BareMetalHostSpec{
Online: false,
BootMACAddress: "00:2e:30:d7:11:19",
},
Status: bmh.BareMetalHostStatus{
HardwareProfile: "bmh1-hw-profile",
},
}
var bmh2 = &bmh.BareMetalHost{
TypeMeta: metav1.TypeMeta{
APIVersion: "metal3.io/v1alpha1",
Kind: "BareMetalHost",
},
ObjectMeta: metav1.ObjectMeta{
Name: "bmh2",
Namespace: "ns1",
},
Spec: bmh.BareMetalHostSpec{
Online: false,
BootMACAddress: "01:23:45:67:89:ab",
},
Status: bmh.BareMetalHostStatus{
HardwareProfile: "bmh2-hw-profile",
},
}
func newClientWithBMHObject() client.Client {
scheme := runtime.NewScheme()
//nolint:errcheck
bmoapis.AddToScheme(scheme)
return fake.NewFakeClientWithScheme(scheme, bmh1)
}
func newClientWithTwoBMHObjects() client.Client {
scheme := runtime.NewScheme()
//nolint:errcheck
bmoapis.AddToScheme(scheme)
return fake.NewFakeClientWithScheme(scheme, bmh1, bmh2)
}
func newClientWithNoBMHObject() client.Client {
scheme := runtime.NewScheme()
//nolint:errcheck
bmoapis.AddToScheme(scheme)
return fake.NewFakeClientWithScheme(scheme)
}
func Test_move_getBMHs(t *testing.T) {
type args struct {
c client.Client
namespace string
}
type want struct {
bmhList bmh.BareMetalHostList
}
tests := []struct {
name string
args args
wantErr bool
want want
}{
{
name: "returns a BareMetalHost object",
args: args{
c: newClientWithBMHObject(),
namespace: "ns1",
},
wantErr: false,
want: want{
bmhList: bmh.BareMetalHostList{
Items: []bmh.BareMetalHost{*bmh1},
},
},
},
{
name: "returns multiple BareMetalHost object",
args: args{
c: newClientWithTwoBMHObjects(),
namespace: "ns1",
},
wantErr: false,
want: want{
bmhList: bmh.BareMetalHostList{
Items: []bmh.BareMetalHost{*bmh1, *bmh2},
},
},
},
{
name: "returns an empty list of BareMetalHost objects",
args: args{
c: newClientWithNoBMHObject(),
namespace: "ns2",
},
wantErr: false,
want: want{
bmhList: bmh.BareMetalHostList{},
},
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
bmhList, err := getBMHs(context.TODO(), tt.args.c, tt.args.namespace)
if tt.wantErr {
g.Expect(err).To(HaveOccurred())
return
}
g.Expect(err).NotTo(HaveOccurred())
g.Expect(len(bmhList.Items)).To(BeEquivalentTo(len(tt.want.bmhList.Items)))
})
}
}
func Test_move_pauseUnpauseBMHs(t *testing.T) {
type args struct {
c client.Client
namespace string
}
tests := []struct {
name string
args args
wantErr bool
}{
{
name: "pause and unpause a single BareMetalHost object",
args: args{
c: newClientWithBMHObject(),
namespace: "ns1",
},
wantErr: false,
},
{
name: "pause and unpause multiple BareMetalHost objects",
args: args{
c: newClientWithTwoBMHObjects(),
namespace: "ns1",
},
wantErr: false,
},
{
name: "pause and unpause should do nothing when there is no BareMetalHost object present",
args: args{
c: newClientWithNoBMHObject(),
namespace: "ns2",
},
wantErr: false,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
err := pauseUnpauseBMHs(context.TODO(), tt.args.c, tt.args.namespace, true)
if tt.wantErr {
g.Expect(err).To(HaveOccurred())
return
}
g.Expect(err).NotTo(HaveOccurred())
bmhList, err := getBMHs(context.TODO(), tt.args.c, tt.args.namespace)
g.Expect(err).NotTo(HaveOccurred())
for _, host := range bmhList.Items {
g.Expect(host.Annotations[bmh.PausedAnnotation]).To(Equal("true"))
}
err = pauseUnpauseBMHs(context.TODO(), tt.args.c, tt.args.namespace, false)
if tt.wantErr {
g.Expect(err).To(HaveOccurred())
return
}
g.Expect(err).NotTo(HaveOccurred())
bmhList, err = getBMHs(context.TODO(), tt.args.c, tt.args.namespace)
g.Expect(err).NotTo(HaveOccurred())
for _, host := range bmhList.Items {
_, present := host.Annotations[bmh.PausedAnnotation]
g.Expect(present).To(Equal(false))
}
})
}
}
var bmh1NoStatus = &bmh.BareMetalHost{
TypeMeta: metav1.TypeMeta{
APIVersion: "metal3.io/v1alpha1",
Kind: "BareMetalHost",
},
ObjectMeta: metav1.ObjectMeta{
Name: "bmh1",
Namespace: "ns1",
},
Spec: bmh.BareMetalHostSpec{
Online: false,
BootMACAddress: "00:2e:30:d7:11:19",
},
}
var bmh2NoStatus = &bmh.BareMetalHost{
TypeMeta: metav1.TypeMeta{
APIVersion: "metal3.io/v1alpha1",
Kind: "BareMetalHost",
},
ObjectMeta: metav1.ObjectMeta{
Name: "bmh2",
Namespace: "ns1",
},
Spec: bmh.BareMetalHostSpec{
Online: false,
BootMACAddress: "01:23:45:67:89:ab",
},
}
func newClientFromCluster() client.Client {
scheme := runtime.NewScheme()
//nolint:errcheck
bmoapis.AddToScheme(scheme)
return fake.NewFakeClientWithScheme(scheme, bmh1, bmh2)
}
func newClientToCluster() client.Client {
scheme := runtime.NewScheme()
//nolint:errcheck
bmoapis.AddToScheme(scheme)
return fake.NewFakeClientWithScheme(scheme, bmh1NoStatus, bmh2NoStatus)
}
func Test_move_copyBMHStatus(t *testing.T) {
type args struct {
cFrom client.Client
cTo client.Client
namespace string
}
type want struct {
bmhList bmh.BareMetalHostList
}
tests := []struct {
name string
args args
wantErr bool
want want
}{
{
name: "copies the status field of multiple BareMetalHost objects",
args: args{
cFrom: newClientFromCluster(),
cTo: newClientToCluster(),
namespace: "ns1",
},
wantErr: false,
want: want{
bmhList: bmh.BareMetalHostList{
Items: []bmh.BareMetalHost{*bmh1, *bmh2},
},
},
},
{
name: "no copy occurs b/c no BareMetalHost objects are present",
args: args{
cFrom: newClientWithNoBMHObject(),
cTo: newClientWithNoBMHObject(),
namespace: "ns1",
},
wantErr: false,
want: want{
bmhList: bmh.BareMetalHostList{
Items: []bmh.BareMetalHost{},
},
},
},
{
name: "error should occur b/c BareMetalHost does not exist in the source cluster",
args: args{
cFrom: newClientWithNoBMHObject(),
cTo: newClientToCluster(),
namespace: "ns1",
},
wantErr: true,
},
}
for _, tt := range tests {
tt := tt
t.Run(tt.name, func(t *testing.T) {
g := NewWithT(t)
err := copyBMHStatus(context.TODO(), tt.args.cFrom, tt.args.cTo, tt.args.namespace)
if tt.wantErr {
g.Expect(err).To(HaveOccurred())
return
}
g.Expect(err).NotTo(HaveOccurred())
bmhList, err := getBMHs(context.TODO(), tt.args.cTo, tt.args.namespace)
g.Expect(err).NotTo(HaveOccurred())
NEXTHOST:
for _, host := range bmhList.Items {
for _, wantHost := range tt.want.bmhList.Items {
if host.Name == wantHost.Name {
g.Expect(host.Status.HardwareProfile).To(Equal(wantHost.Status.HardwareProfile))
continue NEXTHOST
}
}
t.Errorf("unexpected host %s", host.Name)
}
})
}
}

View File

@ -55,6 +55,7 @@ func NewCommand(rs *environment.AirshipCTLSettings) (*Command, error) {
return nil, err return nil, err
} }
kubeConfigPath := rs.Config.KubeConfigPath() kubeConfigPath := rs.Config.KubeConfigPath()
return &Command{ return &Command{
kubeconfigPath: kubeConfigPath, kubeconfigPath: kubeConfigPath,
documentRoot: root, documentRoot: root,
@ -94,3 +95,12 @@ func getBundle(conf *config.Config) (document.Bundle, error) {
} }
return document.NewBundleByPath(path) return document.NewBundleByPath(path)
} }
// Move runs clusterctl move
func (c *Command) Move(toKubeconfigContext string) error {
if c.options.MoveOptions != nil {
return c.client.Move(c.kubeconfigPath, c.kubeconfigContext,
c.kubeconfigPath, toKubeconfigContext, c.options.MoveOptions.Namespace)
}
return c.client.Move(c.kubeconfigPath, c.kubeconfigContext, c.kubeconfigPath, toKubeconfigContext, "")
}