Merge pull request #6689 from spothanis/cinder-vol-plugin

Volume Plugin for Cinder; Openstack Block Storage
This commit is contained in:
Prashanth B 2015-08-28 10:47:31 -07:00
commit 2fb61ca009
1 changed files with 158 additions and 0 deletions

View File

@ -22,12 +22,16 @@ import (
"io"
"net"
"net/http"
ossys "os"
"regexp"
"strings"
"time"
"code.google.com/p/gcfg"
"github.com/rackspace/gophercloud"
"github.com/rackspace/gophercloud/openstack"
"github.com/rackspace/gophercloud/openstack/blockstorage/v1/volumes"
"github.com/rackspace/gophercloud/openstack/compute/v2/extensions/volumeattach"
"github.com/rackspace/gophercloud/openstack/compute/v2/flavors"
"github.com/rackspace/gophercloud/openstack/compute/v2/servers"
"github.com/rackspace/gophercloud/openstack/networking/v2/extensions/lbaas/members"
@ -763,3 +767,157 @@ func (os *OpenStack) GetZone() (cloudprovider.Zone, error) {
func (os *OpenStack) Routes() (cloudprovider.Routes, bool) {
return nil, false
}
// Attaches given cinder volume to the compute running kubelet
func (os *OpenStack) AttachDisk(diskName string) (string, error) {
disk, err := os.getVolume(diskName)
if err != nil {
return "", err
}
cClient, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
Region: os.region,
})
if err != nil || cClient == nil {
glog.Errorf("Unable to initialize nova client for region: %s", os.region)
return "", err
}
compute_id, err := os.getComputeIDbyHostname(cClient)
if err != nil || len(compute_id) == 0 {
glog.Errorf("Unable to get minion's id by minion's hostname")
return "", err
}
if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil {
if compute_id == disk.Attachments[0]["server_id"] {
glog.V(4).Infof("Disk: %q is already attached to compute: %q", diskName, compute_id)
return disk.ID, nil
} else {
errMsg := fmt.Sprintf("Disk %q is attached to a different compute: %q, should be detached before proceeding", diskName, disk.Attachments[0]["server_id"])
glog.Errorf(errMsg)
return "", errors.New(errMsg)
}
}
// add read only flag here if possible spothanis
_, err = volumeattach.Create(cClient, compute_id, &volumeattach.CreateOpts{
VolumeID: disk.ID,
}).Extract()
if err != nil {
glog.Errorf("Failed to attach %s volume to %s compute", diskName, compute_id)
return "", err
}
glog.V(2).Infof("Successfully attached %s volume to %s compute", diskName, compute_id)
return disk.ID, nil
}
// Detaches given cinder volume from the compute running kubelet
func (os *OpenStack) DetachDisk(partialDiskId string) error {
disk, err := os.getVolume(partialDiskId)
if err != nil {
return err
}
cClient, err := openstack.NewComputeV2(os.provider, gophercloud.EndpointOpts{
Region: os.region,
})
if err != nil || cClient == nil {
glog.Errorf("Unable to initialize nova client for region: %s", os.region)
return err
}
compute_id, err := os.getComputeIDbyHostname(cClient)
if err != nil || len(compute_id) == 0 {
glog.Errorf("Unable to get compute id while detaching disk")
return err
}
if len(disk.Attachments) > 0 && disk.Attachments[0]["server_id"] != nil && compute_id == disk.Attachments[0]["server_id"] {
// This is a blocking call and effects kubelet's performance directly.
// We should consider kicking it out into a separate routine, if it is bad.
err = volumeattach.Delete(cClient, compute_id, disk.ID).ExtractErr()
if err != nil {
glog.Errorf("Failed to delete volume %s from compute %s attached %v", disk.ID, compute_id, err)
return err
}
glog.V(2).Infof("Successfully detached volume: %s from compute: %s", disk.ID, compute_id)
} else {
errMsg := fmt.Sprintf("Disk: %s has no attachments or is not attached to compute: %s", disk.Name, compute_id)
glog.Errorf(errMsg)
return errors.New(errMsg)
}
return nil
}
// Takes a partial/full disk id or diskname
func (os *OpenStack) getVolume(diskName string) (volumes.Volume, error) {
sClient, err := openstack.NewBlockStorageV1(os.provider, gophercloud.EndpointOpts{
Region: os.region,
})
var volume volumes.Volume
if err != nil || sClient == nil {
glog.Errorf("Unable to initialize cinder client for region: %s", os.region)
return volume, err
}
err = volumes.List(sClient, nil).EachPage(func(page pagination.Page) (bool, error) {
vols, err := volumes.ExtractVolumes(page)
if err != nil {
glog.Errorf("Failed to extract volumes: %v", err)
return false, err
} else {
for _, v := range vols {
glog.V(4).Infof("%s %s %v", v.ID, v.Name, v.Attachments)
if v.Name == diskName || strings.Contains(v.ID, diskName) {
volume = v
return true, nil
}
}
}
// if it reached here then no disk with the given name was found.
errmsg := fmt.Sprintf("Unable to find disk: %s in region %s", diskName, os.region)
return false, errors.New(errmsg)
})
if err != nil {
glog.Errorf("Error occured getting volume: %s", diskName)
return volume, err
}
return volume, err
}
func (os *OpenStack) getComputeIDbyHostname(cClient *gophercloud.ServiceClient) (string, error) {
hostname, err := ossys.Hostname()
if err != nil {
glog.Errorf("Failed to get Minion's hostname: %v", err)
return "", err
}
i, ok := os.Instances()
if !ok {
glog.Errorf("Unable to get instances")
return "", errors.New("Unable to get instances")
}
srvs, err := i.List(".")
if err != nil {
glog.Errorf("Failed to list servers: %v", err)
return "", err
}
if len(srvs) == 0 {
glog.Errorf("Found no servers in the region")
return "", errors.New("Found no servers in the region")
}
glog.V(4).Infof("found servers: %v", srvs)
for _, srvname := range srvs {
server, err := getServerByName(cClient, srvname)
if err != nil {
return "", err
} else {
if (server.Metadata["hostname"] != nil && server.Metadata["hostname"] == hostname) || (len(server.Name) > 0 && server.Name == hostname) {
glog.V(4).Infof("found server: %s with host :%s", server.Name, hostname)
return server.ID, nil
}
}
}
return "", fmt.Errorf("No server found matching hostname: %s", hostname)
}