Support for v1/v2/autoprobe openstack cinder blockstorage
Support for cinder v1/v2 api with the new gophercloud/gophercloud library. API version is configurable and defaulting autodetection. Change-Id: I83de02beb08f50c15f8faa8a3f070d67fd64de41
This commit is contained in:
parent
3e8cd7b94e
commit
e771dcdb90
|
@ -72,6 +72,7 @@ go_test(
|
|||
"//pkg/api/v1:go_default_library",
|
||||
"//pkg/cloudprovider:go_default_library",
|
||||
"//vendor:github.com/gophercloud/gophercloud",
|
||||
"//vendor:github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions",
|
||||
"//vendor:github.com/gophercloud/gophercloud/openstack/compute/v2/servers",
|
||||
"//vendor:k8s.io/apimachinery/pkg/apis/meta/v1",
|
||||
"//vendor:k8s.io/apimachinery/pkg/types",
|
||||
|
|
|
@ -24,11 +24,13 @@ import (
|
|||
"io/ioutil"
|
||||
"net/http"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack"
|
||||
apiversions_v1 "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
|
||||
"github.com/gophercloud/gophercloud/openstack/identity/v3/extensions/trusts"
|
||||
tokens3 "github.com/gophercloud/gophercloud/openstack/identity/v3/tokens"
|
||||
|
@ -89,7 +91,8 @@ type LoadBalancerOpts struct {
|
|||
}
|
||||
|
||||
type BlockStorageOpts struct {
|
||||
TrustDevicePath bool `gcfg:"trust-device-path"` // See Issue #33128
|
||||
BSVersion string `gcfg:"bs-version"` // overrides autodetection. v1 or v2. Defaults to auto
|
||||
TrustDevicePath bool `gcfg:"trust-device-path"` // See Issue #33128
|
||||
}
|
||||
|
||||
type RouterOpts struct {
|
||||
|
@ -173,6 +176,7 @@ func readConfig(config io.Reader) (Config, error) {
|
|||
var cfg Config
|
||||
|
||||
// Set default values for config params
|
||||
cfg.BlockStorage.BSVersion = "auto"
|
||||
cfg.BlockStorage.TrustDevicePath = false
|
||||
|
||||
err := gcfg.ReadInto(&cfg, config)
|
||||
|
@ -535,3 +539,111 @@ func (os *OpenStack) Routes() (cloudprovider.Routes, bool) {
|
|||
|
||||
return r, true
|
||||
}
|
||||
|
||||
// Implementation of sort interface for blockstorage version probing
|
||||
type APIVersionsByID []apiversions_v1.APIVersion
|
||||
|
||||
func (apiVersions APIVersionsByID) Len() int {
|
||||
return len(apiVersions)
|
||||
}
|
||||
|
||||
func (apiVersions APIVersionsByID) Swap(i, j int) {
|
||||
apiVersions[i], apiVersions[j] = apiVersions[j], apiVersions[i]
|
||||
}
|
||||
|
||||
func (apiVersions APIVersionsByID) Less(i, j int) bool {
|
||||
return apiVersions[i].ID > apiVersions[j].ID
|
||||
}
|
||||
|
||||
func autoVersionSelector(apiVersion *apiversions_v1.APIVersion) string {
|
||||
switch strings.ToLower(apiVersion.ID) {
|
||||
case "v2.0":
|
||||
return "v2"
|
||||
case "v1.0":
|
||||
return "v1"
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
func doBsApiVersionAutodetect(availableApiVersions []apiversions_v1.APIVersion) string {
|
||||
sort.Sort(APIVersionsByID(availableApiVersions))
|
||||
for _, status := range []string{"CURRENT", "SUPPORTED"} {
|
||||
for _, version := range availableApiVersions {
|
||||
if strings.ToUpper(version.Status) == status {
|
||||
if detectedApiVersion := autoVersionSelector(&version); detectedApiVersion != "" {
|
||||
glog.V(3).Infof("Blockstorage API version probing has found a suitable %s api version: %s", status, detectedApiVersion)
|
||||
return detectedApiVersion
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
|
||||
}
|
||||
|
||||
func (os *OpenStack) volumeService(forceVersion string) (volumeService, error) {
|
||||
bsVersion := ""
|
||||
if forceVersion == "" {
|
||||
bsVersion = os.bsOpts.BSVersion
|
||||
} else {
|
||||
bsVersion = forceVersion
|
||||
}
|
||||
|
||||
switch bsVersion {
|
||||
case "v1":
|
||||
sClient, err := openstack.NewBlockStorageV1(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
if err != nil || sClient == nil {
|
||||
glog.Errorf("Unable to initialize cinder client for region: %s", os.region)
|
||||
return nil, err
|
||||
}
|
||||
return &VolumesV1{sClient, os.bsOpts}, nil
|
||||
case "v2":
|
||||
sClient, err := openstack.NewBlockStorageV2(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
if err != nil || sClient == nil {
|
||||
glog.Errorf("Unable to initialize cinder v2 client for region: %s", os.region)
|
||||
return nil, err
|
||||
}
|
||||
return &VolumesV2{sClient, os.bsOpts}, nil
|
||||
case "auto":
|
||||
sClient, err := openstack.NewBlockStorageV1(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
if err != nil || sClient == nil {
|
||||
glog.Errorf("Unable to initialize cinder client for region: %s", os.region)
|
||||
return nil, err
|
||||
}
|
||||
availableApiVersions := []apiversions_v1.APIVersion{}
|
||||
err = apiversions_v1.List(sClient).EachPage(func(page pagination.Page) (bool, error) {
|
||||
// returning false from this handler stops page iteration, error is propagated to the upper function
|
||||
apiversions, err := apiversions_v1.ExtractAPIVersions(page)
|
||||
if err != nil {
|
||||
glog.Errorf("Unable to extract api versions from page: %v", err)
|
||||
return false, err
|
||||
}
|
||||
availableApiVersions = append(availableApiVersions, apiversions...)
|
||||
return true, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Error when retrieving list of supported blockstorage api versions: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
if autodetectedVersion := doBsApiVersionAutodetect(availableApiVersions); autodetectedVersion != "" {
|
||||
return os.volumeService(autodetectedVersion)
|
||||
} else {
|
||||
// Nothing suitable found, failed autodetection
|
||||
return nil, errors.New("BS API version autodetection failed.")
|
||||
}
|
||||
|
||||
default:
|
||||
err_txt := fmt.Sprintf("Config error: unrecognised bs-version \"%v\"", os.bsOpts.BSVersion)
|
||||
glog.Warningf(err_txt)
|
||||
return nil, errors.New(err_txt)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,6 +17,10 @@ limitations under the License.
|
|||
package openstack
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack/blockstorage/v1/apiversions"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
|
@ -24,9 +28,6 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/servers"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/rand"
|
||||
"k8s.io/kubernetes/pkg/api/v1"
|
||||
|
@ -81,7 +82,9 @@ func TestReadConfig(t *testing.T) {
|
|||
monitor-timeout = 30s
|
||||
monitor-max-retries = 3
|
||||
[BlockStorage]
|
||||
bs-version = auto
|
||||
trust-device-path = yes
|
||||
|
||||
`))
|
||||
if err != nil {
|
||||
t.Fatalf("Should succeed when a valid config is provided: %s", err)
|
||||
|
@ -105,6 +108,9 @@ func TestReadConfig(t *testing.T) {
|
|||
if cfg.BlockStorage.TrustDevicePath != true {
|
||||
t.Errorf("incorrect bs.trustdevicepath: %v", cfg.BlockStorage.TrustDevicePath)
|
||||
}
|
||||
if cfg.BlockStorage.BSVersion != "auto" {
|
||||
t.Errorf("incorrect bs.bs-version: %v", cfg.BlockStorage.BSVersion)
|
||||
}
|
||||
}
|
||||
|
||||
func TestToAuthOptions(t *testing.T) {
|
||||
|
@ -385,3 +391,45 @@ func TestVolumes(t *testing.T) {
|
|||
t.Logf("Volume (%s) deleted\n", vol)
|
||||
|
||||
}
|
||||
|
||||
func TestCinderAutoDetectApiVersion(t *testing.T) {
|
||||
updated := "" // not relevant to this test, can be set to any value
|
||||
status_current := "CURRENT"
|
||||
status_supported := "SUPpORTED" // lowercase to test regression resitance if api returns different case
|
||||
status_deprecated := "DEPRECATED"
|
||||
|
||||
var result_version, api_version [4]string
|
||||
|
||||
for ver := 0; ver <= 3; ver++ {
|
||||
api_version[ver] = fmt.Sprintf("v%d.0", ver)
|
||||
result_version[ver] = fmt.Sprintf("v%d", ver)
|
||||
}
|
||||
result_version[0] = ""
|
||||
api_current_v1 := apiversions.APIVersion{ID: api_version[1], Status: status_current, Updated: updated}
|
||||
api_current_v2 := apiversions.APIVersion{ID: api_version[2], Status: status_current, Updated: updated}
|
||||
api_current_v3 := apiversions.APIVersion{ID: api_version[3], Status: status_current, Updated: updated}
|
||||
|
||||
api_supported_v1 := apiversions.APIVersion{ID: api_version[1], Status: status_supported, Updated: updated}
|
||||
api_supported_v2 := apiversions.APIVersion{ID: api_version[2], Status: status_supported, Updated: updated}
|
||||
|
||||
api_deprecated_v1 := apiversions.APIVersion{ID: api_version[1], Status: status_deprecated, Updated: updated}
|
||||
api_deprecated_v2 := apiversions.APIVersion{ID: api_version[2], Status: status_deprecated, Updated: updated}
|
||||
|
||||
var testCases = []struct {
|
||||
test_case []apiversions.APIVersion
|
||||
wanted_result string
|
||||
}{
|
||||
{[]apiversions.APIVersion{api_current_v1}, result_version[1]},
|
||||
{[]apiversions.APIVersion{api_current_v2}, result_version[2]},
|
||||
{[]apiversions.APIVersion{api_supported_v1, api_current_v2}, result_version[2]}, // current always selected
|
||||
{[]apiversions.APIVersion{api_current_v1, api_supported_v2}, result_version[1]}, // current always selected
|
||||
{[]apiversions.APIVersion{api_current_v3, api_supported_v2, api_deprecated_v1}, result_version[2]}, // with current v3, but should fall back to v2
|
||||
{[]apiversions.APIVersion{api_current_v3, api_deprecated_v2, api_deprecated_v1}, result_version[0]}, // v3 is not supported
|
||||
}
|
||||
|
||||
for _, suite := range testCases {
|
||||
if autodetectedVersion := doBsApiVersionAutodetect(suite.test_case); autodetectedVersion != suite.wanted_result {
|
||||
t.Fatalf("Autodetect for suite: %s, failed with result: '%s', wanted '%s'", suite.test_case, autodetectedVersion, suite.wanted_result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,100 +23,96 @@ import (
|
|||
"path"
|
||||
"strings"
|
||||
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
k8s_volume "k8s.io/kubernetes/pkg/volume"
|
||||
|
||||
"github.com/gophercloud/gophercloud"
|
||||
"github.com/gophercloud/gophercloud/openstack"
|
||||
"github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes"
|
||||
volumes_v1 "github.com/gophercloud/gophercloud/openstack/blockstorage/v1/volumes"
|
||||
volumes_v2 "github.com/gophercloud/gophercloud/openstack/blockstorage/v2/volumes"
|
||||
"github.com/gophercloud/gophercloud/openstack/compute/v2/extensions/volumeattach"
|
||||
"github.com/gophercloud/gophercloud/pagination"
|
||||
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// Attaches given cinder volume to the compute running kubelet
|
||||
func (os *OpenStack) AttachDisk(instanceID string, diskName string) (string, error) {
|
||||
disk, err := os.getVolume(diskName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
cClient, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
if err != nil || cClient == nil {
|
||||
glog.Errorf("Unable to initialize nova client for region: %s", os.region)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil {
|
||||
if instanceID == disk.Attachments[0]["server_id"] {
|
||||
glog.V(4).Infof("Disk: %q is already attached to compute: %q", diskName, instanceID)
|
||||
return disk.ID, nil
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Disk %q is attached to a different compute (%q), detaching", diskName, disk.Attachments[0]["server_id"])
|
||||
err = os.DetachDisk(fmt.Sprintf("%s", disk.Attachments[0]["server_id"]), diskName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// add read only flag here if possible spothanis
|
||||
_, err = volumeattach.Create(cClient, instanceID, &volumeattach.CreateOpts{
|
||||
VolumeID: disk.ID,
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to attach %s volume to %s compute: %v", diskName, instanceID, err)
|
||||
return "", err
|
||||
}
|
||||
glog.V(2).Infof("Successfully attached %s volume to %s compute", diskName, instanceID)
|
||||
return disk.ID, nil
|
||||
type volumeService interface {
|
||||
createVolume(opts VolumeCreateOpts) (string, error)
|
||||
getVolume(diskName string) (Volume, error)
|
||||
deleteVolume(volumeName string) error
|
||||
}
|
||||
|
||||
// Detaches given cinder volume from the compute running kubelet
|
||||
func (os *OpenStack) DetachDisk(instanceID string, partialDiskId string) error {
|
||||
disk, err := os.getVolume(partialDiskId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cClient, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
if err != nil || cClient == nil {
|
||||
glog.Errorf("Unable to initialize nova client for region: %s", os.region)
|
||||
return err
|
||||
}
|
||||
if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil && instanceID == disk.Attachments[0]["server_id"] {
|
||||
// This is a blocking call and effects kubelet's performance directly.
|
||||
// We should consider kicking it out into a separate routine, if it is bad.
|
||||
err = volumeattach.Delete(cClient, instanceID, disk.ID).ExtractErr()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to delete volume %s from compute %s attached %v", disk.ID, instanceID, err)
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Successfully detached volume: %s from compute: %s", disk.ID, instanceID)
|
||||
} else {
|
||||
errMsg := fmt.Sprintf("Disk: %s has no attachments or is not attached to compute: %s", disk.Name, instanceID)
|
||||
glog.Errorf(errMsg)
|
||||
return errors.New(errMsg)
|
||||
}
|
||||
return nil
|
||||
// Volumes implementation for v1
|
||||
type VolumesV1 struct {
|
||||
blockstorage *gophercloud.ServiceClient
|
||||
opts BlockStorageOpts
|
||||
}
|
||||
|
||||
// Takes a partial/full disk id or diskname
|
||||
func (os *OpenStack) getVolume(diskName string) (volumes.Volume, error) {
|
||||
sClient, err := openstack.NewBlockStorageV1(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
// Volumes implementation for v2
|
||||
type VolumesV2 struct {
|
||||
blockstorage *gophercloud.ServiceClient
|
||||
opts BlockStorageOpts
|
||||
}
|
||||
|
||||
var volume volumes.Volume
|
||||
if err != nil || sClient == nil {
|
||||
glog.Errorf("Unable to initialize cinder client for region: %s", os.region)
|
||||
return volume, err
|
||||
type Volume struct {
|
||||
// ID of the instance, to which this volume is attached. "" if not attached
|
||||
AttachedServerId string
|
||||
// Device file path
|
||||
AttachedDevice string
|
||||
// Unique identifier for the volume.
|
||||
ID string
|
||||
// Human-readable display name for the volume.
|
||||
Name string
|
||||
// Current status of the volume.
|
||||
Status string
|
||||
}
|
||||
|
||||
type VolumeCreateOpts struct {
|
||||
Size int
|
||||
Availability string
|
||||
Name string
|
||||
VolumeType string
|
||||
Metadata map[string]string
|
||||
}
|
||||
|
||||
func (volumes *VolumesV1) createVolume(opts VolumeCreateOpts) (string, error) {
|
||||
|
||||
create_opts := volumes_v1.CreateOpts{
|
||||
Name: opts.Name,
|
||||
Size: opts.Size,
|
||||
VolumeType: opts.VolumeType,
|
||||
Availability: opts.Availability,
|
||||
Metadata: opts.Metadata,
|
||||
}
|
||||
|
||||
err = volumes.List(sClient, nil).EachPage(func(page pagination.Page) (bool, error) {
|
||||
vols, err := volumes.ExtractVolumes(page)
|
||||
vol, err := volumes_v1.Create(volumes.blockstorage, create_opts).Extract()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return vol.ID, nil
|
||||
}
|
||||
|
||||
func (volumes *VolumesV2) createVolume(opts VolumeCreateOpts) (string, error) {
|
||||
|
||||
create_opts := volumes_v2.CreateOpts{
|
||||
Name: opts.Name,
|
||||
Size: opts.Size,
|
||||
VolumeType: opts.VolumeType,
|
||||
AvailabilityZone: opts.Availability,
|
||||
Metadata: opts.Metadata,
|
||||
}
|
||||
|
||||
vol, err := volumes_v2.Create(volumes.blockstorage, create_opts).Extract()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return vol.ID, nil
|
||||
}
|
||||
|
||||
func (volumes *VolumesV1) getVolume(diskName string) (Volume, error) {
|
||||
var volume_v1 volumes_v1.Volume
|
||||
var volume Volume
|
||||
err := volumes_v1.List(volumes.blockstorage, nil).EachPage(func(page pagination.Page) (bool, error) {
|
||||
vols, err := volumes_v1.ExtractVolumes(page)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to extract volumes: %v", err)
|
||||
return false, err
|
||||
|
@ -124,35 +120,177 @@ func (os *OpenStack) getVolume(diskName string) (volumes.Volume, error) {
|
|||
for _, v := range vols {
|
||||
glog.V(4).Infof("%s %s %v", v.ID, v.Name, v.Attachments)
|
||||
if v.Name == diskName || strings.Contains(v.ID, diskName) {
|
||||
volume = v
|
||||
volume_v1 = v
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// if it reached here then no disk with the given name was found.
|
||||
errmsg := fmt.Sprintf("Unable to find disk: %s in region %s", diskName, os.region)
|
||||
errmsg := fmt.Sprintf("Unable to find disk: %s", diskName)
|
||||
return false, errors.New(errmsg)
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred getting volume: %s", diskName)
|
||||
return volume, err
|
||||
}
|
||||
return volume, err
|
||||
|
||||
volume.ID = volume_v1.ID
|
||||
volume.Name = volume_v1.Name
|
||||
volume.Status = volume_v1.Status
|
||||
|
||||
if len(volume_v1.Attachments) > 0 && volume_v1.Attachments[0]["server_id"] != nil {
|
||||
volume.AttachedServerId = volume_v1.Attachments[0]["server_id"].(string)
|
||||
volume.AttachedDevice = volume_v1.Attachments[0]["device"].(string)
|
||||
}
|
||||
|
||||
return volume, nil
|
||||
}
|
||||
|
||||
func (volumes *VolumesV2) getVolume(diskName string) (Volume, error) {
|
||||
var volume_v2 volumes_v2.Volume
|
||||
var volume Volume
|
||||
err := volumes_v2.List(volumes.blockstorage, nil).EachPage(func(page pagination.Page) (bool, error) {
|
||||
vols, err := volumes_v2.ExtractVolumes(page)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to extract volumes: %v", err)
|
||||
return false, err
|
||||
} else {
|
||||
for _, v := range vols {
|
||||
glog.V(4).Infof("%s %s %v", v.ID, v.Name, v.Attachments)
|
||||
if v.Name == diskName || strings.Contains(v.ID, diskName) {
|
||||
volume_v2 = v
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// if it reached here then no disk with the given name was found.
|
||||
errmsg := fmt.Sprintf("Unable to find disk: %s", diskName)
|
||||
return false, errors.New(errmsg)
|
||||
})
|
||||
if err != nil {
|
||||
glog.Errorf("Error occurred getting volume: %s", diskName)
|
||||
return volume, err
|
||||
}
|
||||
|
||||
volume.ID = volume_v2.ID
|
||||
volume.Name = volume_v2.Name
|
||||
volume.Status = volume_v2.Status
|
||||
|
||||
if len(volume_v2.Attachments) > 0 {
|
||||
volume.AttachedServerId = volume_v2.Attachments[0].ServerID
|
||||
volume.AttachedDevice = volume_v2.Attachments[0].Device
|
||||
}
|
||||
|
||||
return volume, nil
|
||||
}
|
||||
|
||||
func (volumes *VolumesV1) deleteVolume(volumeName string) error {
|
||||
|
||||
err := volumes_v1.Delete(volumes.blockstorage, volumeName).ExtractErr()
|
||||
if err != nil {
|
||||
glog.Errorf("Cannot delete volume %s: %v", volumeName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (volumes *VolumesV2) deleteVolume(volumeName string) error {
|
||||
err := volumes_v2.Delete(volumes.blockstorage, volumeName).ExtractErr()
|
||||
if err != nil {
|
||||
glog.Errorf("Cannot delete volume %s: %v", volumeName, err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Attaches given cinder volume to the compute running kubelet
|
||||
func (os *OpenStack) AttachDisk(instanceID string, diskName string) (string, error) {
|
||||
volume, err := os.getVolume(diskName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
cClient, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
if err != nil || cClient == nil {
|
||||
glog.Errorf("Unable to initialize nova client for region: %s", os.region)
|
||||
return "", err
|
||||
}
|
||||
|
||||
if volume.AttachedServerId != "" {
|
||||
if instanceID == volume.AttachedServerId {
|
||||
glog.V(4).Infof("Disk: %q is already attached to compute: %q", diskName, instanceID)
|
||||
return volume.ID, nil
|
||||
}
|
||||
glog.V(2).Infof("Disk %q is attached to a different compute (%q), detaching", diskName, volume.AttachedServerId)
|
||||
err = os.DetachDisk(volume.AttachedServerId, diskName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
// add read only flag here if possible spothanis
|
||||
_, err = volumeattach.Create(cClient, instanceID, &volumeattach.CreateOpts{
|
||||
VolumeID: volume.ID,
|
||||
}).Extract()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to attach %s volume to %s compute: %v", diskName, instanceID, err)
|
||||
return "", err
|
||||
}
|
||||
glog.V(2).Infof("Successfully attached %s volume to %s compute", diskName, instanceID)
|
||||
return volume.ID, nil
|
||||
}
|
||||
|
||||
// Detaches given cinder volume from the compute running kubelet
|
||||
func (os *OpenStack) DetachDisk(instanceID string, partialDiskId string) error {
|
||||
volume, err := os.getVolume(partialDiskId)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cClient, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
if err != nil || cClient == nil {
|
||||
glog.Errorf("Unable to initialize nova client for region: %s", os.region)
|
||||
return err
|
||||
}
|
||||
if volume.AttachedServerId != instanceID {
|
||||
errMsg := fmt.Sprintf("Disk: %s has no attachments or is not attached to compute: %s", volume.Name, instanceID)
|
||||
glog.Errorf(errMsg)
|
||||
return errors.New(errMsg)
|
||||
} else {
|
||||
// This is a blocking call and effects kubelet's performance directly.
|
||||
// We should consider kicking it out into a separate routine, if it is bad.
|
||||
err = volumeattach.Delete(cClient, instanceID, volume.ID).ExtractErr()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to delete volume %s from compute %s attached %v", volume.ID, instanceID, err)
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("Successfully detached volume: %s from compute: %s", volume.ID, instanceID)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Takes a partial/full disk id or diskname
|
||||
func (os *OpenStack) getVolume(diskName string) (Volume, error) {
|
||||
|
||||
volumes, err := os.volumeService("")
|
||||
if err != nil || volumes == nil {
|
||||
glog.Errorf("Unable to initialize cinder client for region: %s", os.region)
|
||||
return Volume{}, err
|
||||
}
|
||||
|
||||
return volumes.getVolume(diskName)
|
||||
}
|
||||
|
||||
// Create a volume of given size (in GiB)
|
||||
func (os *OpenStack) CreateVolume(name string, size int, vtype, availability string, tags *map[string]string) (volumeName string, err error) {
|
||||
|
||||
sClient, err := openstack.NewBlockStorageV1(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
|
||||
if err != nil || sClient == nil {
|
||||
volumes, err := os.volumeService("")
|
||||
if err != nil || volumes == nil {
|
||||
glog.Errorf("Unable to initialize cinder client for region: %s", os.region)
|
||||
return "", err
|
||||
}
|
||||
|
||||
opts := volumes.CreateOpts{
|
||||
opts := VolumeCreateOpts{
|
||||
Name: name,
|
||||
Size: size,
|
||||
VolumeType: vtype,
|
||||
|
@ -161,13 +299,15 @@ func (os *OpenStack) CreateVolume(name string, size int, vtype, availability str
|
|||
if tags != nil {
|
||||
opts.Metadata = *tags
|
||||
}
|
||||
vol, err := volumes.Create(sClient, opts).Extract()
|
||||
volume_id, err := volumes.createVolume(opts)
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to create a %d GB volume: %v", size, err)
|
||||
return "", err
|
||||
}
|
||||
glog.Infof("Created volume %v", vol.ID)
|
||||
return vol.ID, err
|
||||
|
||||
glog.Infof("Created volume %v", volume_id)
|
||||
return volume_id, nil
|
||||
}
|
||||
|
||||
// GetDevicePath returns the path of an attached block storage volume, specified by its id.
|
||||
|
@ -202,39 +342,38 @@ func (os *OpenStack) DeleteVolume(volumeName string) error {
|
|||
}
|
||||
if used {
|
||||
msg := fmt.Sprintf("Cannot delete the volume %q, it's still attached to a node", volumeName)
|
||||
return volume.NewDeletedVolumeInUseError(msg)
|
||||
return k8s_volume.NewDeletedVolumeInUseError(msg)
|
||||
}
|
||||
|
||||
sClient, err := openstack.NewBlockStorageV1(os.provider, gophercloud.EndpointOpts{
|
||||
Region: os.region,
|
||||
})
|
||||
|
||||
if err != nil || sClient == nil {
|
||||
volumes, err := os.volumeService("")
|
||||
if err != nil || volumes == nil {
|
||||
glog.Errorf("Unable to initialize cinder client for region: %s", os.region)
|
||||
return err
|
||||
}
|
||||
err = volumes.Delete(sClient, volumeName).ExtractErr()
|
||||
|
||||
err = volumes.deleteVolume(volumeName)
|
||||
if err != nil {
|
||||
glog.Errorf("Cannot delete volume %s: %v", volumeName, err)
|
||||
}
|
||||
return err
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
// Get device path of attached volume to the compute running kubelet, as known by cinder
|
||||
func (os *OpenStack) GetAttachmentDiskPath(instanceID string, diskName string) (string, error) {
|
||||
// See issue #33128 - Cinder does not always tell you the right device path, as such
|
||||
// we must only use this value as a last resort.
|
||||
disk, err := os.getVolume(diskName)
|
||||
volume, err := os.getVolume(diskName)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil {
|
||||
if instanceID == disk.Attachments[0]["server_id"] {
|
||||
if volume.AttachedServerId != "" {
|
||||
if instanceID == volume.AttachedServerId {
|
||||
// Attachment[0]["device"] points to the device path
|
||||
// see http://developer.openstack.org/api-ref-blockstorage-v1.html
|
||||
return disk.Attachments[0]["device"].(string), nil
|
||||
return volume.AttachedDevice, nil
|
||||
} else {
|
||||
errMsg := fmt.Sprintf("Disk %q is attached to a different compute: %q, should be detached before proceeding", diskName, disk.Attachments[0]["server_id"])
|
||||
errMsg := fmt.Sprintf("Disk %q is attached to a different compute: %q, should be detached before proceeding", diskName, volume.AttachedServerId)
|
||||
glog.Errorf(errMsg)
|
||||
return "", errors.New(errMsg)
|
||||
}
|
||||
|
@ -244,11 +383,12 @@ func (os *OpenStack) GetAttachmentDiskPath(instanceID string, diskName string) (
|
|||
|
||||
// query if a volume is attached to a compute instance
|
||||
func (os *OpenStack) DiskIsAttached(diskName, instanceID string) (bool, error) {
|
||||
disk, err := os.getVolume(diskName)
|
||||
volume, err := os.getVolume(diskName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil && instanceID == disk.Attachments[0]["server_id"] {
|
||||
|
||||
if instanceID == volume.AttachedServerId {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
|
@ -258,27 +398,19 @@ func (os *OpenStack) DiskIsAttached(diskName, instanceID string) (bool, error) {
|
|||
func (os *OpenStack) DisksAreAttached(diskNames []string, instanceID string) (map[string]bool, error) {
|
||||
attached := make(map[string]bool)
|
||||
for _, diskName := range diskNames {
|
||||
attached[diskName] = false
|
||||
}
|
||||
for _, diskName := range diskNames {
|
||||
disk, err := os.getVolume(diskName)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil && instanceID == disk.Attachments[0]["server_id"] {
|
||||
attached[diskName] = true
|
||||
}
|
||||
is_attached, _ := os.DiskIsAttached(diskName, instanceID)
|
||||
attached[diskName] = is_attached
|
||||
}
|
||||
return attached, nil
|
||||
}
|
||||
|
||||
// diskIsUsed returns true a disk is attached to any node.
|
||||
func (os *OpenStack) diskIsUsed(diskName string) (bool, error) {
|
||||
disk, err := os.getVolume(diskName)
|
||||
volume, err := os.getVolume(diskName)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if len(disk.Attachments) > 0 {
|
||||
if volume.AttachedServerId != "" {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
|
|
Loading…
Reference in New Issue