vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

70
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/BUILD generated vendored Normal file
View File

@ -0,0 +1,70 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"attacher.go",
"doc.go",
"gce_pd.go",
"gce_util.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/gce_pd",
deps = [
"//pkg/cloudprovider:go_default_library",
"//pkg/cloudprovider/providers/gce:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"attacher_test.go",
"gce_pd_test.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/gce_pd",
library = ":go_default_library",
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

12
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/OWNERS generated vendored Normal file
View File

@ -0,0 +1,12 @@
approvers:
- saad-ali
- thockin
reviewers:
- saad-ali
- jsafrane
- jingxu97
- matchstick
- gnufied
- msau42
- verult
- davidz627

277
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/attacher.go generated vendored Normal file
View File

@ -0,0 +1,277 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gce_pd
import (
"fmt"
"os"
"path"
"path/filepath"
"strconv"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
type gcePersistentDiskAttacher struct {
host volume.VolumeHost
gceDisks gce.Disks
}
var _ volume.Attacher = &gcePersistentDiskAttacher{}
var _ volume.AttachableVolumePlugin = &gcePersistentDiskPlugin{}
func (plugin *gcePersistentDiskPlugin) NewAttacher() (volume.Attacher, error) {
gceCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
return nil, err
}
return &gcePersistentDiskAttacher{
host: plugin.host,
gceDisks: gceCloud,
}, nil
}
func (plugin *gcePersistentDiskPlugin) GetDeviceMountRefs(deviceMountPath string) ([]string, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
return mount.GetMountRefs(mounter, deviceMountPath)
}
// Attach checks with the GCE cloud provider if the specified volume is already
// attached to the node with the specified Name.
// If the volume is attached, it succeeds (returns nil).
// If it is not, Attach issues a call to the GCE cloud provider to attach it.
// Callers are responsible for retrying on failure.
// Callers are responsible for thread safety between concurrent attach and
// detach operations.
func (attacher *gcePersistentDiskAttacher) Attach(spec *volume.Spec, nodeName types.NodeName) (string, error) {
volumeSource, readOnly, err := getVolumeSource(spec)
if err != nil {
return "", err
}
pdName := volumeSource.PDName
attached, err := attacher.gceDisks.DiskIsAttached(pdName, nodeName)
if err != nil {
// Log error and continue with attach
glog.Errorf(
"Error checking if PD (%q) is already attached to current node (%q). Will continue and try attach anyway. err=%v",
pdName, nodeName, err)
}
if err == nil && attached {
// Volume is already attached to node.
glog.Infof("Attach operation is successful. PD %q is already attached to node %q.", pdName, nodeName)
} else {
if err := attacher.gceDisks.AttachDisk(pdName, nodeName, readOnly); err != nil {
glog.Errorf("Error attaching PD %q to node %q: %+v", pdName, nodeName, err)
return "", err
}
}
return path.Join(diskByIdPath, diskGooglePrefix+pdName), nil
}
func (attacher *gcePersistentDiskAttacher) VolumesAreAttached(specs []*volume.Spec, nodeName types.NodeName) (map[*volume.Spec]bool, error) {
volumesAttachedCheck := make(map[*volume.Spec]bool)
volumePdNameMap := make(map[string]*volume.Spec)
pdNameList := []string{}
for _, spec := range specs {
volumeSource, _, err := getVolumeSource(spec)
// If error is occured, skip this volume and move to the next one
if err != nil {
glog.Errorf("Error getting volume (%q) source : %v", spec.Name(), err)
continue
}
pdNameList = append(pdNameList, volumeSource.PDName)
volumesAttachedCheck[spec] = true
volumePdNameMap[volumeSource.PDName] = spec
}
attachedResult, err := attacher.gceDisks.DisksAreAttached(pdNameList, nodeName)
if err != nil {
// Log error and continue with attach
glog.Errorf(
"Error checking if PDs (%v) are already attached to current node (%q). err=%v",
pdNameList, nodeName, err)
return volumesAttachedCheck, err
}
for pdName, attached := range attachedResult {
if !attached {
spec := volumePdNameMap[pdName]
volumesAttachedCheck[spec] = false
glog.V(2).Infof("VolumesAreAttached: check volume %q (specName: %q) is no longer attached", pdName, spec.Name())
}
}
return volumesAttachedCheck, nil
}
func (attacher *gcePersistentDiskAttacher) WaitForAttach(spec *volume.Spec, devicePath string, _ *v1.Pod, timeout time.Duration) (string, error) {
ticker := time.NewTicker(checkSleepDuration)
defer ticker.Stop()
timer := time.NewTimer(timeout)
defer timer.Stop()
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
pdName := volumeSource.PDName
partition := ""
if volumeSource.Partition != 0 {
partition = strconv.Itoa(int(volumeSource.Partition))
}
sdBefore, err := filepath.Glob(diskSDPattern)
if err != nil {
glog.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err)
}
sdBeforeSet := sets.NewString(sdBefore...)
devicePaths := getDiskByIdPaths(pdName, partition)
for {
select {
case <-ticker.C:
glog.V(5).Infof("Checking GCE PD %q is attached.", pdName)
path, err := verifyDevicePath(devicePaths, sdBeforeSet)
if err != nil {
// Log error, if any, and continue checking periodically. See issue #11321
glog.Errorf("Error verifying GCE PD (%q) is attached: %v", pdName, err)
} else if path != "" {
// A device path has successfully been created for the PD
glog.Infof("Successfully found attached GCE PD %q.", pdName)
return path, nil
}
case <-timer.C:
return "", fmt.Errorf("Could not find attached GCE PD %q. Timeout waiting for mount paths to be created.", pdName)
}
}
}
func (attacher *gcePersistentDiskAttacher) GetDeviceMountPath(
spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return makeGlobalPDName(attacher.host, volumeSource.PDName), nil
}
func (attacher *gcePersistentDiskAttacher) MountDevice(spec *volume.Spec, devicePath string, deviceMountPath string) error {
// Only mount the PD globally once.
mounter := attacher.host.GetMounter(gcePersistentDiskPluginName)
notMnt, err := mounter.IsLikelyNotMountPoint(deviceMountPath)
if err != nil {
if os.IsNotExist(err) {
if err := os.MkdirAll(deviceMountPath, 0750); err != nil {
return err
}
notMnt = true
} else {
return err
}
}
volumeSource, readOnly, err := getVolumeSource(spec)
if err != nil {
return err
}
options := []string{}
if readOnly {
options = append(options, "ro")
}
if notMnt {
diskMounter := volumehelper.NewSafeFormatAndMountFromHost(gcePersistentDiskPluginName, attacher.host)
mountOptions := volume.MountOptionFromSpec(spec, options...)
err = diskMounter.FormatAndMount(devicePath, deviceMountPath, volumeSource.FSType, mountOptions)
if err != nil {
os.Remove(deviceMountPath)
return err
}
glog.V(4).Infof("formatting spec %v devicePath %v deviceMountPath %v fs %v with options %+v", spec.Name(), devicePath, deviceMountPath, volumeSource.FSType, options)
}
return nil
}
type gcePersistentDiskDetacher struct {
host volume.VolumeHost
gceDisks gce.Disks
}
var _ volume.Detacher = &gcePersistentDiskDetacher{}
func (plugin *gcePersistentDiskPlugin) NewDetacher() (volume.Detacher, error) {
gceCloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
return nil, err
}
return &gcePersistentDiskDetacher{
host: plugin.host,
gceDisks: gceCloud,
}, nil
}
// Detach checks with the GCE cloud provider if the specified volume is already
// attached to the specified node. If the volume is not attached, it succeeds
// (returns nil). If it is attached, Detach issues a call to the GCE cloud
// provider to attach it.
// Callers are responsible for retrying on failure.
// Callers are responsible for thread safety between concurrent attach and detach
// operations.
func (detacher *gcePersistentDiskDetacher) Detach(volumeName string, nodeName types.NodeName) error {
pdName := path.Base(volumeName)
attached, err := detacher.gceDisks.DiskIsAttached(pdName, nodeName)
if err != nil {
// Log error and continue with detach
glog.Errorf(
"Error checking if PD (%q) is already attached to current node (%q). Will continue and try detach anyway. err=%v",
pdName, nodeName, err)
}
if err == nil && !attached {
// Volume is not attached to node. Success!
glog.Infof("Detach operation is successful. PD %q was not attached to node %q.", pdName, nodeName)
return nil
}
if err = detacher.gceDisks.DetachDisk(pdName, nodeName); err != nil {
glog.Errorf("Error detaching PD %q from node %q: %v", pdName, nodeName, err)
return err
}
return nil
}
func (detacher *gcePersistentDiskDetacher) UnmountDevice(deviceMountPath string) error {
return volumeutil.UnmountPath(deviceMountPath, detacher.host.GetMounter(gcePersistentDiskPluginName))
}

View File

@ -0,0 +1,383 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gce_pd
import (
"errors"
"fmt"
"testing"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/types"
)
func TestGetDeviceName_Volume(t *testing.T) {
plugin := newPlugin()
name := "my-pd-volume"
spec := createVolSpec(name, false)
deviceName, err := plugin.GetVolumeName(spec)
if err != nil {
t.Errorf("GetDeviceName error: %v", err)
}
if deviceName != name {
t.Errorf("GetDeviceName error: expected %s, got %s", name, deviceName)
}
}
func TestGetDeviceName_PersistentVolume(t *testing.T) {
plugin := newPlugin()
name := "my-pd-pv"
spec := createPVSpec(name, true)
deviceName, err := plugin.GetVolumeName(spec)
if err != nil {
t.Errorf("GetDeviceName error: %v", err)
}
if deviceName != name {
t.Errorf("GetDeviceName error: expected %s, got %s", name, deviceName)
}
}
// One testcase for TestAttachDetach table test below
type testcase struct {
name string
// For fake GCE:
attach attachCall
detach detachCall
diskIsAttached diskIsAttachedCall
t *testing.T
// Actual test to run
test func(test *testcase) error
// Expected return of the test
expectedReturn error
}
func TestAttachDetach(t *testing.T) {
diskName := "disk"
nodeName := types.NodeName("instance")
readOnly := false
spec := createVolSpec(diskName, readOnly)
attachError := errors.New("Fake attach error")
detachError := errors.New("Fake detach error")
diskCheckError := errors.New("Fake DiskIsAttached error")
tests := []testcase{
// Successful Attach call
{
name: "Attach_Positive",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
attach: attachCall{diskName, nodeName, readOnly, nil},
test: func(testcase *testcase) error {
attacher := newAttacher(testcase)
devicePath, err := attacher.Attach(spec, nodeName)
if devicePath != "/dev/disk/by-id/google-disk" {
return fmt.Errorf("devicePath incorrect. Expected<\"/dev/disk/by-id/google-disk\"> Actual: <%q>", devicePath)
}
return err
},
},
// Disk is already attached
{
name: "Attach_Positive_AlreadyAttached",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, true, nil},
test: func(testcase *testcase) error {
attacher := newAttacher(testcase)
devicePath, err := attacher.Attach(spec, nodeName)
if devicePath != "/dev/disk/by-id/google-disk" {
return fmt.Errorf("devicePath incorrect. Expected<\"/dev/disk/by-id/google-disk\"> Actual: <%q>", devicePath)
}
return err
},
},
// DiskIsAttached fails and Attach succeeds
{
name: "Attach_Positive_CheckFails",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
attach: attachCall{diskName, nodeName, readOnly, nil},
test: func(testcase *testcase) error {
attacher := newAttacher(testcase)
devicePath, err := attacher.Attach(spec, nodeName)
if devicePath != "/dev/disk/by-id/google-disk" {
return fmt.Errorf("devicePath incorrect. Expected<\"/dev/disk/by-id/google-disk\"> Actual: <%q>", devicePath)
}
return err
},
},
// Attach call fails
{
name: "Attach_Negative",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
attach: attachCall{diskName, nodeName, readOnly, attachError},
test: func(testcase *testcase) error {
attacher := newAttacher(testcase)
devicePath, err := attacher.Attach(spec, nodeName)
if devicePath != "" {
return fmt.Errorf("devicePath incorrect. Expected<\"\"> Actual: <%q>", devicePath)
}
return err
},
expectedReturn: attachError,
},
// Detach succeeds
{
name: "Detach_Positive",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, true, nil},
detach: detachCall{diskName, nodeName, nil},
test: func(testcase *testcase) error {
detacher := newDetacher(testcase)
return detacher.Detach(diskName, nodeName)
},
},
// Disk is already detached
{
name: "Detach_Positive_AlreadyDetached",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, nil},
test: func(testcase *testcase) error {
detacher := newDetacher(testcase)
return detacher.Detach(diskName, nodeName)
},
},
// Detach succeeds when DiskIsAttached fails
{
name: "Detach_Positive_CheckFails",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
detach: detachCall{diskName, nodeName, nil},
test: func(testcase *testcase) error {
detacher := newDetacher(testcase)
return detacher.Detach(diskName, nodeName)
},
},
// Detach fails
{
name: "Detach_Negative",
diskIsAttached: diskIsAttachedCall{diskName, nodeName, false, diskCheckError},
detach: detachCall{diskName, nodeName, detachError},
test: func(testcase *testcase) error {
detacher := newDetacher(testcase)
return detacher.Detach(diskName, nodeName)
},
expectedReturn: detachError,
},
}
for _, testcase := range tests {
testcase.t = t
err := testcase.test(&testcase)
if err != testcase.expectedReturn {
t.Errorf("%s failed: expected err=%q, got %q", testcase.name, testcase.expectedReturn.Error(), err.Error())
}
t.Logf("Test %q succeeded", testcase.name)
}
}
// newPlugin creates a new gcePersistentDiskPlugin with fake cloud, NewAttacher
// and NewDetacher won't work.
func newPlugin() *gcePersistentDiskPlugin {
host := volumetest.NewFakeVolumeHost(
"/tmp", /* rootDir */
nil, /* kubeClient */
nil, /* plugins */
)
plugins := ProbeVolumePlugins()
plugin := plugins[0]
plugin.Init(host)
return plugin.(*gcePersistentDiskPlugin)
}
func newAttacher(testcase *testcase) *gcePersistentDiskAttacher {
return &gcePersistentDiskAttacher{
host: nil,
gceDisks: testcase,
}
}
func newDetacher(testcase *testcase) *gcePersistentDiskDetacher {
return &gcePersistentDiskDetacher{
gceDisks: testcase,
}
}
func createVolSpec(name string, readOnly bool) *volume.Spec {
return &volume.Spec{
Volume: &v1.Volume{
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: name,
ReadOnly: readOnly,
},
},
},
}
}
func createPVSpec(name string, readOnly bool) *volume.Spec {
return &volume.Spec{
PersistentVolume: &v1.PersistentVolume{
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: name,
ReadOnly: readOnly,
},
},
},
},
}
}
// Fake GCE implementation
type attachCall struct {
diskName string
nodeName types.NodeName
readOnly bool
ret error
}
type detachCall struct {
devicePath string
nodeName types.NodeName
ret error
}
type diskIsAttachedCall struct {
diskName string
nodeName types.NodeName
isAttached bool
ret error
}
func (testcase *testcase) AttachDisk(diskName string, nodeName types.NodeName, readOnly bool) error {
expected := &testcase.attach
if expected.diskName == "" && expected.nodeName == "" {
// testcase.attach looks uninitialized, test did not expect to call
// AttachDisk
testcase.t.Errorf("Unexpected AttachDisk call!")
return errors.New("Unexpected AttachDisk call!")
}
if expected.diskName != diskName {
testcase.t.Errorf("Unexpected AttachDisk call: expected diskName %s, got %s", expected.diskName, diskName)
return errors.New("Unexpected AttachDisk call: wrong diskName")
}
if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected AttachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return errors.New("Unexpected AttachDisk call: wrong nodeName")
}
if expected.readOnly != readOnly {
testcase.t.Errorf("Unexpected AttachDisk call: expected readOnly %v, got %v", expected.readOnly, readOnly)
return errors.New("Unexpected AttachDisk call: wrong readOnly")
}
glog.V(4).Infof("AttachDisk call: %s, %s, %v, returning %v", diskName, nodeName, readOnly, expected.ret)
return expected.ret
}
func (testcase *testcase) DetachDisk(devicePath string, nodeName types.NodeName) error {
expected := &testcase.detach
if expected.devicePath == "" && expected.nodeName == "" {
// testcase.detach looks uninitialized, test did not expect to call
// DetachDisk
testcase.t.Errorf("Unexpected DetachDisk call!")
return errors.New("Unexpected DetachDisk call!")
}
if expected.devicePath != devicePath {
testcase.t.Errorf("Unexpected DetachDisk call: expected devicePath %s, got %s", expected.devicePath, devicePath)
return errors.New("Unexpected DetachDisk call: wrong diskName")
}
if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected DetachDisk call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return errors.New("Unexpected DetachDisk call: wrong nodeName")
}
glog.V(4).Infof("DetachDisk call: %s, %s, returning %v", devicePath, nodeName, expected.ret)
return expected.ret
}
func (testcase *testcase) DiskIsAttached(diskName string, nodeName types.NodeName) (bool, error) {
expected := &testcase.diskIsAttached
if expected.diskName == "" && expected.nodeName == "" {
// testcase.diskIsAttached looks uninitialized, test did not expect to
// call DiskIsAttached
testcase.t.Errorf("Unexpected DiskIsAttached call!")
return false, errors.New("Unexpected DiskIsAttached call!")
}
if expected.diskName != diskName {
testcase.t.Errorf("Unexpected DiskIsAttached call: expected diskName %s, got %s", expected.diskName, diskName)
return false, errors.New("Unexpected DiskIsAttached call: wrong diskName")
}
if expected.nodeName != nodeName {
testcase.t.Errorf("Unexpected DiskIsAttached call: expected nodeName %s, got %s", expected.nodeName, nodeName)
return false, errors.New("Unexpected DiskIsAttached call: wrong nodeName")
}
glog.V(4).Infof("DiskIsAttached call: %s, %s, returning %v, %v", diskName, nodeName, expected.isAttached, expected.ret)
return expected.isAttached, expected.ret
}
func (testcase *testcase) DisksAreAttached(diskNames []string, nodeName types.NodeName) (map[string]bool, error) {
return nil, errors.New("Not implemented")
}
func (testcase *testcase) CreateDisk(name string, diskType string, zone string, sizeGb int64, tags map[string]string) error {
return errors.New("Not implemented")
}
func (testcase *testcase) CreateRegionalDisk(name string, diskType string, replicaZones sets.String, sizeGb int64, tags map[string]string) error {
return errors.New("Not implemented")
}
func (testcase *testcase) DeleteDisk(diskToDelete string) error {
return errors.New("Not implemented")
}
func (testcase *testcase) GetAutoLabelsForPD(name string, zone string) (map[string]string, error) {
return map[string]string{}, errors.New("Not implemented")
}
func (testcase *testcase) ResizeDisk(
diskName string,
oldSize resource.Quantity,
newSize resource.Quantity) (resource.Quantity, error) {
return oldSize, errors.New("Not implemented")
}

19
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package gce_pd contains the internal representation of GCE PersistentDisk
// volumes.
package gce_pd // import "k8s.io/kubernetes/pkg/volume/gce_pd"

453
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_pd.go generated vendored Normal file
View File

@ -0,0 +1,453 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gce_pd
import (
"fmt"
"os"
"path"
"strconv"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/kubernetes/pkg/util/mount"
kstrings "k8s.io/kubernetes/pkg/util/strings"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/util"
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
)
// This is the primary entrypoint for volume plugins.
func ProbeVolumePlugins() []volume.VolumePlugin {
return []volume.VolumePlugin{&gcePersistentDiskPlugin{nil}}
}
type gcePersistentDiskPlugin struct {
host volume.VolumeHost
}
var _ volume.VolumePlugin = &gcePersistentDiskPlugin{}
var _ volume.PersistentVolumePlugin = &gcePersistentDiskPlugin{}
var _ volume.DeletableVolumePlugin = &gcePersistentDiskPlugin{}
var _ volume.ProvisionableVolumePlugin = &gcePersistentDiskPlugin{}
var _ volume.ExpandableVolumePlugin = &gcePersistentDiskPlugin{}
const (
gcePersistentDiskPluginName = "kubernetes.io/gce-pd"
)
func getPath(uid types.UID, volName string, host volume.VolumeHost) string {
return host.GetPodVolumeDir(uid, kstrings.EscapeQualifiedNameForDisk(gcePersistentDiskPluginName), volName)
}
func (plugin *gcePersistentDiskPlugin) Init(host volume.VolumeHost) error {
plugin.host = host
return nil
}
func (plugin *gcePersistentDiskPlugin) GetPluginName() string {
return gcePersistentDiskPluginName
}
func (plugin *gcePersistentDiskPlugin) GetVolumeName(spec *volume.Spec) (string, error) {
volumeSource, _, err := getVolumeSource(spec)
if err != nil {
return "", err
}
return volumeSource.PDName, nil
}
func (plugin *gcePersistentDiskPlugin) CanSupport(spec *volume.Spec) bool {
return (spec.PersistentVolume != nil && spec.PersistentVolume.Spec.GCEPersistentDisk != nil) ||
(spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil)
}
func (plugin *gcePersistentDiskPlugin) RequiresRemount() bool {
return false
}
func (plugin *gcePersistentDiskPlugin) SupportsMountOption() bool {
return true
}
func (plugin *gcePersistentDiskPlugin) SupportsBulkVolumeVerification() bool {
return false
}
func (plugin *gcePersistentDiskPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode {
return []v1.PersistentVolumeAccessMode{
v1.ReadWriteOnce,
v1.ReadOnlyMany,
}
}
func (plugin *gcePersistentDiskPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newMounterInternal(spec, pod.UID, &GCEDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func getVolumeSource(
spec *volume.Spec) (*v1.GCEPersistentDiskVolumeSource, bool, error) {
if spec.Volume != nil && spec.Volume.GCEPersistentDisk != nil {
return spec.Volume.GCEPersistentDisk, spec.Volume.GCEPersistentDisk.ReadOnly, nil
} else if spec.PersistentVolume != nil &&
spec.PersistentVolume.Spec.GCEPersistentDisk != nil {
return spec.PersistentVolume.Spec.GCEPersistentDisk, spec.ReadOnly, nil
}
return nil, false, fmt.Errorf("Spec does not reference a GCE volume type")
}
func (plugin *gcePersistentDiskPlugin) newMounterInternal(spec *volume.Spec, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Mounter, error) {
// GCEPDs used directly in a pod have a ReadOnly flag set by the pod author.
// GCEPDs used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
volumeSource, readOnly, err := getVolumeSource(spec)
if err != nil {
return nil, err
}
pdName := volumeSource.PDName
partition := ""
if volumeSource.Partition != 0 {
partition = strconv.Itoa(int(volumeSource.Partition))
}
return &gcePersistentDiskMounter{
gcePersistentDisk: &gcePersistentDisk{
podUID: podUID,
volName: spec.Name(),
pdName: pdName,
partition: partition,
mounter: mounter,
manager: manager,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, spec.Name(), plugin.host)),
},
readOnly: readOnly}, nil
}
func (plugin *gcePersistentDiskPlugin) NewUnmounter(volName string, podUID types.UID) (volume.Unmounter, error) {
// Inject real implementations here, test through the internal function.
return plugin.newUnmounterInternal(volName, podUID, &GCEDiskUtil{}, plugin.host.GetMounter(plugin.GetPluginName()))
}
func (plugin *gcePersistentDiskPlugin) newUnmounterInternal(volName string, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Unmounter, error) {
return &gcePersistentDiskUnmounter{&gcePersistentDisk{
podUID: podUID,
volName: volName,
manager: manager,
mounter: mounter,
plugin: plugin,
MetricsProvider: volume.NewMetricsStatFS(getPath(podUID, volName, plugin.host)),
}}, nil
}
func (plugin *gcePersistentDiskPlugin) NewDeleter(spec *volume.Spec) (volume.Deleter, error) {
return plugin.newDeleterInternal(spec, &GCEDiskUtil{})
}
func (plugin *gcePersistentDiskPlugin) newDeleterInternal(spec *volume.Spec, manager pdManager) (volume.Deleter, error) {
if spec.PersistentVolume != nil && spec.PersistentVolume.Spec.GCEPersistentDisk == nil {
return nil, fmt.Errorf("spec.PersistentVolumeSource.GCEPersistentDisk is nil")
}
return &gcePersistentDiskDeleter{
gcePersistentDisk: &gcePersistentDisk{
volName: spec.Name(),
pdName: spec.PersistentVolume.Spec.GCEPersistentDisk.PDName,
manager: manager,
plugin: plugin,
}}, nil
}
func (plugin *gcePersistentDiskPlugin) NewProvisioner(options volume.VolumeOptions) (volume.Provisioner, error) {
return plugin.newProvisionerInternal(options, &GCEDiskUtil{})
}
func (plugin *gcePersistentDiskPlugin) newProvisionerInternal(options volume.VolumeOptions, manager pdManager) (volume.Provisioner, error) {
return &gcePersistentDiskProvisioner{
gcePersistentDisk: &gcePersistentDisk{
manager: manager,
plugin: plugin,
},
options: options,
}, nil
}
func (plugin *gcePersistentDiskPlugin) RequiresFSResize() bool {
return true
}
func (plugin *gcePersistentDiskPlugin) ExpandVolumeDevice(
spec *volume.Spec,
newSize resource.Quantity,
oldSize resource.Quantity) (resource.Quantity, error) {
cloud, err := getCloudProvider(plugin.host.GetCloudProvider())
if err != nil {
return oldSize, err
}
pdName := spec.PersistentVolume.Spec.GCEPersistentDisk.PDName
updatedQuantity, err := cloud.ResizeDisk(pdName, oldSize, newSize)
if err != nil {
return oldSize, err
}
return updatedQuantity, nil
}
func (plugin *gcePersistentDiskPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
mounter := plugin.host.GetMounter(plugin.GetPluginName())
pluginDir := plugin.host.GetPluginDir(plugin.GetPluginName())
sourceName, err := mounter.GetDeviceNameFromMount(mountPath, pluginDir)
if err != nil {
return nil, err
}
gceVolume := &v1.Volume{
Name: volumeName,
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: sourceName,
},
},
}
return volume.NewSpecFromVolume(gceVolume), nil
}
// Abstract interface to PD operations.
type pdManager interface {
// Creates a volume
CreateVolume(provisioner *gcePersistentDiskProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error)
// Deletes a volume
DeleteVolume(deleter *gcePersistentDiskDeleter) error
}
// gcePersistentDisk volumes are disk resources provided by Google Compute Engine
// that are attached to the kubelet's host machine and exposed to the pod.
type gcePersistentDisk struct {
volName string
podUID types.UID
// Unique identifier of the PD, used to find the disk resource in the provider.
pdName string
// Specifies the partition to mount
partition string
// Utility interface to provision and delete disks
manager pdManager
// Mounter interface that provides system calls to mount the global path to the pod local path.
mounter mount.Interface
plugin *gcePersistentDiskPlugin
volume.MetricsProvider
}
type gcePersistentDiskMounter struct {
*gcePersistentDisk
// Specifies whether the disk will be mounted as read-only.
readOnly bool
}
var _ volume.Mounter = &gcePersistentDiskMounter{}
func (b *gcePersistentDiskMounter) GetAttributes() volume.Attributes {
return volume.Attributes{
ReadOnly: b.readOnly,
Managed: !b.readOnly,
SupportsSELinux: true,
}
}
// Checks prior to mount operations to verify that the required components (binaries, etc.)
// to mount the volume are available on the underlying node.
// If not, it returns an error
func (b *gcePersistentDiskMounter) CanMount() error {
return nil
}
// SetUp bind mounts the disk global mount to the volume path.
func (b *gcePersistentDiskMounter) SetUp(fsGroup *int64) error {
return b.SetUpAt(b.GetPath(), fsGroup)
}
// SetUp bind mounts the disk global mount to the give volume path.
func (b *gcePersistentDiskMounter) SetUpAt(dir string, fsGroup *int64) error {
// TODO: handle failed mounts here.
notMnt, err := b.mounter.IsLikelyNotMountPoint(dir)
glog.V(4).Infof("GCE PersistentDisk set up: Dir (%s) PD name (%q) Mounted (%t) Error (%v), ReadOnly (%t)", dir, b.pdName, !notMnt, err, b.readOnly)
if err != nil && !os.IsNotExist(err) {
glog.Errorf("cannot validate mount point: %s %v", dir, err)
return err
}
if !notMnt {
return nil
}
if err := os.MkdirAll(dir, 0750); err != nil {
glog.Errorf("mkdir failed on disk %s (%v)", dir, err)
return err
}
// Perform a bind mount to the full path to allow duplicate mounts of the same PD.
options := []string{"bind"}
if b.readOnly {
options = append(options, "ro")
}
globalPDPath := makeGlobalPDName(b.plugin.host, b.pdName)
glog.V(4).Infof("attempting to mount %s", dir)
err = b.mounter.Mount(globalPDPath, dir, "", options)
if err != nil {
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notMnt {
if mntErr = b.mounter.Unmount(dir); mntErr != nil {
glog.Errorf("Failed to unmount: %v", mntErr)
return err
}
notMnt, mntErr := b.mounter.IsLikelyNotMountPoint(dir)
if mntErr != nil {
glog.Errorf("IsLikelyNotMountPoint check failed: %v", mntErr)
return err
}
if !notMnt {
// This is very odd, we don't expect it. We'll try again next sync loop.
glog.Errorf("%s is still mounted, despite call to unmount(). Will try again next sync loop.", dir)
return err
}
}
os.Remove(dir)
glog.Errorf("Mount of disk %s failed: %v", dir, err)
return err
}
if !b.readOnly {
volume.SetVolumeOwnership(b, fsGroup)
}
glog.V(4).Infof("Successfully mounted %s", dir)
return nil
}
func makeGlobalPDName(host volume.VolumeHost, devName string) string {
return path.Join(host.GetPluginDir(gcePersistentDiskPluginName), mount.MountsInGlobalPDPath, devName)
}
func (b *gcePersistentDiskMounter) GetPath() string {
return getPath(b.podUID, b.volName, b.plugin.host)
}
type gcePersistentDiskUnmounter struct {
*gcePersistentDisk
}
var _ volume.Unmounter = &gcePersistentDiskUnmounter{}
func (c *gcePersistentDiskUnmounter) GetPath() string {
return getPath(c.podUID, c.volName, c.plugin.host)
}
// Unmounts the bind mount, and detaches the disk only if the PD
// resource was the last reference to that disk on the kubelet.
func (c *gcePersistentDiskUnmounter) TearDown() error {
return c.TearDownAt(c.GetPath())
}
// TearDownAt unmounts the bind mount
func (c *gcePersistentDiskUnmounter) TearDownAt(dir string) error {
return util.UnmountPath(dir, c.mounter)
}
type gcePersistentDiskDeleter struct {
*gcePersistentDisk
}
var _ volume.Deleter = &gcePersistentDiskDeleter{}
func (d *gcePersistentDiskDeleter) GetPath() string {
return getPath(d.podUID, d.volName, d.plugin.host)
}
func (d *gcePersistentDiskDeleter) Delete() error {
return d.manager.DeleteVolume(d)
}
type gcePersistentDiskProvisioner struct {
*gcePersistentDisk
options volume.VolumeOptions
}
var _ volume.Provisioner = &gcePersistentDiskProvisioner{}
func (c *gcePersistentDiskProvisioner) Provision() (*v1.PersistentVolume, error) {
if !volume.AccessModesContainedInAll(c.plugin.GetAccessModes(), c.options.PVC.Spec.AccessModes) {
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", c.options.PVC.Spec.AccessModes, c.plugin.GetAccessModes())
}
volumeID, sizeGB, labels, fstype, err := c.manager.CreateVolume(c)
if err != nil {
return nil, err
}
if fstype == "" {
fstype = "ext4"
}
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: c.options.PVName,
Labels: map[string]string{},
Annotations: map[string]string{
volumehelper.VolumeDynamicallyCreatedByKey: "gce-pd-dynamic-provisioner",
},
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeReclaimPolicy: c.options.PersistentVolumeReclaimPolicy,
AccessModes: c.options.PVC.Spec.AccessModes,
Capacity: v1.ResourceList{
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGB)),
},
PersistentVolumeSource: v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: volumeID,
Partition: 0,
ReadOnly: false,
FSType: fstype,
},
},
MountOptions: c.options.MountOptions,
},
}
if len(c.options.PVC.Spec.AccessModes) == 0 {
pv.Spec.AccessModes = c.plugin.GetAccessModes()
}
if len(labels) != 0 {
if pv.Labels == nil {
pv.Labels = make(map[string]string)
}
for k, v := range labels {
pv.Labels[k] = v
}
}
return pv, nil
}

View File

@ -0,0 +1,258 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gce_pd
import (
"fmt"
"os"
"path"
"testing"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("gcepdTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/gce-pd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/gce-pd" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if !plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}}}}) {
t.Errorf("Expected true")
}
if !plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{}}}}}) {
t.Errorf("Expected true")
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("gcepdTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/gce-pd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteOnce) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadOnlyMany) {
t.Errorf("Expected two AccessModeTypes: %s and %s", v1.ReadWriteOnce, v1.ReadOnlyMany)
}
}
type fakePDManager struct {
}
func (fake *fakePDManager) CreateVolume(c *gcePersistentDiskProvisioner) (volumeID string, volumeSizeGB int, labels map[string]string, fstype string, err error) {
labels = make(map[string]string)
labels["fakepdmanager"] = "yes"
return "test-gce-volume-name", 100, labels, "", nil
}
func (fake *fakePDManager) DeleteVolume(cd *gcePersistentDiskDeleter) error {
if cd.pdName != "test-gce-volume-name" {
return fmt.Errorf("Deleter got unexpected volume name: %s", cd.pdName)
}
return nil
}
func TestPlugin(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("gcepdTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/gce-pd")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
spec := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
PDName: "pd",
FSType: "ext4",
},
},
}
fakeManager := &fakePDManager{}
fakeMounter := &mount.FakeMounter{}
mounter, err := plug.(*gcePersistentDiskPlugin).newMounterInternal(volume.NewSpecFromVolume(spec), types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Errorf("Got a nil Mounter")
}
volPath := path.Join(tmpDir, "pods/poduid/volumes/kubernetes.io~gce-pd/vol1")
path := mounter.GetPath()
if path != volPath {
t.Errorf("Got unexpected path: %s", path)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", path)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
fakeManager = &fakePDManager{}
unmounter, err := plug.(*gcePersistentDiskPlugin).newUnmounterInternal("vol1", types.UID("poduid"), fakeManager, fakeMounter)
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Errorf("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(path); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", path)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
// Test Provisioner
options := volume.VolumeOptions{
PVC: volumetest.CreateTestPVC("100Mi", []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}),
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimDelete,
}
provisioner, err := plug.(*gcePersistentDiskPlugin).newProvisionerInternal(options, &fakePDManager{})
if err != nil {
t.Errorf("Error creating new provisioner:%v", err)
}
persistentSpec, err := provisioner.Provision()
if err != nil {
t.Errorf("Provision() failed: %v", err)
}
if persistentSpec.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName != "test-gce-volume-name" {
t.Errorf("Provision() returned unexpected volume ID: %s", persistentSpec.Spec.PersistentVolumeSource.GCEPersistentDisk.PDName)
}
cap := persistentSpec.Spec.Capacity[v1.ResourceStorage]
size := cap.Value()
if size != 100*1024*1024*1024 {
t.Errorf("Provision() returned unexpected volume size: %v", size)
}
if persistentSpec.Labels["fakepdmanager"] != "yes" {
t.Errorf("Provision() returned unexpected labels: %v", persistentSpec.Labels)
}
// Test Deleter
volSpec := &volume.Spec{
PersistentVolume: persistentSpec,
}
deleter, err := plug.(*gcePersistentDiskPlugin).newDeleterInternal(volSpec, &fakePDManager{})
if err != nil {
t.Errorf("Error creating new deleter:%v", err)
}
err = deleter.Delete()
if err != nil {
t.Errorf("Deleter() failed: %v", err)
}
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
client := fake.NewSimpleClientset(pv, claim)
tmpDir, err := utiltesting.MkTmpdir("gcepdTest")
if err != nil {
t.Fatalf("can't make a temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(gcePersistentDiskPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if mounter == nil {
t.Fatalf("Got a nil Mounter")
}
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}

359
vendor/k8s.io/kubernetes/pkg/volume/gce_pd/gce_util.go generated vendored Normal file
View File

@ -0,0 +1,359 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package gce_pd
import (
"fmt"
"path"
"path/filepath"
"strings"
"time"
"github.com/golang/glog"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/cloudprovider"
gcecloud "k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/volume"
volumeutil "k8s.io/kubernetes/pkg/volume/util"
"k8s.io/utils/exec"
)
const (
diskByIdPath = "/dev/disk/by-id/"
diskGooglePrefix = "google-"
diskScsiGooglePrefix = "scsi-0Google_PersistentDisk_"
diskPartitionSuffix = "-part"
diskSDPath = "/dev/sd"
diskSDPattern = "/dev/sd*"
regionalPDZonesAuto = "auto" // "replica-zones: auto" means Kubernetes will select zones for RePD
maxChecks = 60
maxRetries = 10
checkSleepDuration = time.Second
maxRegionalPDZones = 2
)
// These variables are modified only in unit tests and should be constant
// otherwise.
var (
errorSleepDuration time.Duration = 5 * time.Second
)
type GCEDiskUtil struct{}
func (util *GCEDiskUtil) DeleteVolume(d *gcePersistentDiskDeleter) error {
cloud, err := getCloudProvider(d.gcePersistentDisk.plugin.host.GetCloudProvider())
if err != nil {
return err
}
if err = cloud.DeleteDisk(d.pdName); err != nil {
glog.V(2).Infof("Error deleting GCE PD volume %s: %v", d.pdName, err)
// GCE cloud provider returns volume.deletedVolumeInUseError when
// necessary, no handling needed here.
return err
}
glog.V(2).Infof("Successfully deleted GCE PD volume %s", d.pdName)
return nil
}
// CreateVolume creates a GCE PD.
// Returns: gcePDName, volumeSizeGB, labels, fsType, error
func (gceutil *GCEDiskUtil) CreateVolume(c *gcePersistentDiskProvisioner) (string, int, map[string]string, string, error) {
cloud, err := getCloudProvider(c.gcePersistentDisk.plugin.host.GetCloudProvider())
if err != nil {
return "", 0, nil, "", err
}
name := volume.GenerateVolumeName(c.options.ClusterName, c.options.PVName, 63) // GCE PD name can have up to 63 characters
capacity := c.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
requestBytes := capacity.Value()
// GCE works with gigabytes, convert to GiB with rounding up
requestGB := volume.RoundUpSize(requestBytes, 1024*1024*1024)
// Apply Parameters (case-insensitive). We leave validation of
// the values to the cloud provider.
diskType := ""
configuredZone := ""
configuredZones := ""
configuredReplicaZones := ""
zonePresent := false
zonesPresent := false
replicaZonesPresent := false
fstype := ""
for k, v := range c.options.Parameters {
switch strings.ToLower(k) {
case "type":
diskType = v
case "zone":
zonePresent = true
configuredZone = v
case "zones":
zonesPresent = true
configuredZones = v
case "replica-zones":
replicaZonesPresent = true
configuredReplicaZones = v
case volume.VolumeParameterFSType:
fstype = v
default:
return "", 0, nil, "", fmt.Errorf("invalid option %q for volume plugin %s", k, c.plugin.GetPluginName())
}
}
if ((zonePresent || zonesPresent) && replicaZonesPresent) ||
(zonePresent && zonesPresent) {
// 011, 101, 111, 110
return "", 0, nil, "", fmt.Errorf("a combination of zone, zones, and replica-zones StorageClass parameters must not be used at the same time")
}
// TODO: implement PVC.Selector parsing
if c.options.PVC.Spec.Selector != nil {
return "", 0, nil, "", fmt.Errorf("claim.Spec.Selector is not supported for dynamic provisioning on GCE")
}
if !zonePresent && !zonesPresent && replicaZonesPresent {
// 001 - "replica-zones" specified
replicaZones, err := volumeutil.ZonesToSet(configuredReplicaZones)
if err != nil {
return "", 0, nil, "", err
}
err = createRegionalPD(
name,
c.options.PVC.Name,
diskType,
replicaZones,
requestGB,
c.options.CloudTags,
cloud)
if err != nil {
glog.V(2).Infof("Error creating regional GCE PD volume: %v", err)
return "", 0, nil, "", err
}
glog.V(2).Infof("Successfully created Regional GCE PD volume %s", name)
} else {
var zones sets.String
if !zonePresent && !zonesPresent {
// 000 - neither "zone", "zones", or "replica-zones" specified
// Pick a zone randomly selected from all active zones where
// Kubernetes cluster has a node.
zones, err = cloud.GetAllCurrentZones()
if err != nil {
glog.V(2).Infof("error getting zone information from GCE: %v", err)
return "", 0, nil, "", err
}
} else if !zonePresent && zonesPresent {
// 010 - "zones" specified
// Pick a zone randomly selected from specified set.
if zones, err = volumeutil.ZonesToSet(configuredZones); err != nil {
return "", 0, nil, "", err
}
} else if zonePresent && !zonesPresent {
// 100 - "zone" specified
// Use specified zone
if err := volume.ValidateZone(configuredZone); err != nil {
return "", 0, nil, "", err
}
zones = make(sets.String)
zones.Insert(configuredZone)
}
zone := volume.ChooseZoneForVolume(zones, c.options.PVC.Name)
if err := cloud.CreateDisk(
name,
diskType,
zone,
int64(requestGB),
*c.options.CloudTags); err != nil {
glog.V(2).Infof("Error creating single-zone GCE PD volume: %v", err)
return "", 0, nil, "", err
}
glog.V(2).Infof("Successfully created single-zone GCE PD volume %s", name)
}
labels, err := cloud.GetAutoLabelsForPD(name, "" /* zone */)
if err != nil {
// We don't really want to leak the volume here...
glog.Errorf("error getting labels for volume %q: %v", name, err)
}
return name, int(requestGB), labels, fstype, nil
}
// Creates a Regional PD
func createRegionalPD(
diskName string,
pvcName string,
diskType string,
replicaZones sets.String,
requestGB int64,
cloudTags *map[string]string,
cloud *gcecloud.GCECloud) error {
autoZoneSelection := false
if replicaZones.Len() != maxRegionalPDZones {
replicaZonesList := replicaZones.UnsortedList()
if replicaZones.Len() == 1 && replicaZonesList[0] == regionalPDZonesAuto {
// User requested automatic zone selection.
autoZoneSelection = true
} else {
return fmt.Errorf(
"replica-zones specifies %d zones. It must specify %d zones or the keyword \"auto\" to let Kubernetes select zones.",
replicaZones.Len(),
maxRegionalPDZones)
}
}
selectedReplicaZones := replicaZones
if autoZoneSelection {
selectedReplicaZones = volume.ChooseZonesForVolume(
replicaZones, pvcName, maxRegionalPDZones)
}
if err := cloud.CreateRegionalDisk(
diskName,
diskType,
selectedReplicaZones,
int64(requestGB),
*cloudTags); err != nil {
return err
}
return nil
}
// Returns the first path that exists, or empty string if none exist.
func verifyDevicePath(devicePaths []string, sdBeforeSet sets.String) (string, error) {
if err := udevadmChangeToNewDrives(sdBeforeSet); err != nil {
// udevadm errors should not block disk detachment, log and continue
glog.Errorf("udevadmChangeToNewDrives failed with: %v", err)
}
for _, path := range devicePaths {
if pathExists, err := volumeutil.PathExists(path); err != nil {
return "", fmt.Errorf("Error checking if path exists: %v", err)
} else if pathExists {
return path, nil
}
}
return "", nil
}
// Returns the first path that exists, or empty string if none exist.
func verifyAllPathsRemoved(devicePaths []string) (bool, error) {
allPathsRemoved := true
for _, path := range devicePaths {
if err := udevadmChangeToDrive(path); err != nil {
// udevadm errors should not block disk detachment, log and continue
glog.Errorf("%v", err)
}
if exists, err := volumeutil.PathExists(path); err != nil {
return false, fmt.Errorf("Error checking if path exists: %v", err)
} else {
allPathsRemoved = allPathsRemoved && !exists
}
}
return allPathsRemoved, nil
}
// Returns list of all /dev/disk/by-id/* paths for given PD.
func getDiskByIdPaths(pdName string, partition string) []string {
devicePaths := []string{
path.Join(diskByIdPath, diskGooglePrefix+pdName),
path.Join(diskByIdPath, diskScsiGooglePrefix+pdName),
}
if partition != "" {
for i, path := range devicePaths {
devicePaths[i] = path + diskPartitionSuffix + partition
}
}
return devicePaths
}
// Return cloud provider
func getCloudProvider(cloudProvider cloudprovider.Interface) (*gcecloud.GCECloud, error) {
var err error
for numRetries := 0; numRetries < maxRetries; numRetries++ {
gceCloudProvider, ok := cloudProvider.(*gcecloud.GCECloud)
if !ok || gceCloudProvider == nil {
// Retry on error. See issue #11321
glog.Errorf("Failed to get GCE Cloud Provider. plugin.host.GetCloudProvider returned %v instead", cloudProvider)
time.Sleep(errorSleepDuration)
continue
}
return gceCloudProvider, nil
}
return nil, fmt.Errorf("Failed to get GCE GCECloudProvider with error %v", err)
}
// Triggers the application of udev rules by calling "udevadm trigger
// --action=change" for newly created "/dev/sd*" drives (exist only in
// after set). This is workaround for Issue #7972. Once the underlying
// issue has been resolved, this may be removed.
func udevadmChangeToNewDrives(sdBeforeSet sets.String) error {
sdAfter, err := filepath.Glob(diskSDPattern)
if err != nil {
return fmt.Errorf("Error filepath.Glob(\"%s\"): %v\r\n", diskSDPattern, err)
}
for _, sd := range sdAfter {
if !sdBeforeSet.Has(sd) {
return udevadmChangeToDrive(sd)
}
}
return nil
}
// Calls "udevadm trigger --action=change" on the specified drive.
// drivePath must be the block device path to trigger on, in the format "/dev/sd*", or a symlink to it.
// This is workaround for Issue #7972. Once the underlying issue has been resolved, this may be removed.
func udevadmChangeToDrive(drivePath string) error {
glog.V(5).Infof("udevadmChangeToDrive: drive=%q", drivePath)
// Evaluate symlink, if any
drive, err := filepath.EvalSymlinks(drivePath)
if err != nil {
return fmt.Errorf("udevadmChangeToDrive: filepath.EvalSymlinks(%q) failed with %v.", drivePath, err)
}
glog.V(5).Infof("udevadmChangeToDrive: symlink path is %q", drive)
// Check to make sure input is "/dev/sd*"
if !strings.Contains(drive, diskSDPath) {
return fmt.Errorf("udevadmChangeToDrive: expected input in the form \"%s\" but drive is %q.", diskSDPattern, drive)
}
// Call "udevadm trigger --action=change --property-match=DEVNAME=/dev/sd..."
_, err = exec.New().Command(
"udevadm",
"trigger",
"--action=change",
fmt.Sprintf("--property-match=DEVNAME=%s", drive)).CombinedOutput()
if err != nil {
return fmt.Errorf("udevadmChangeToDrive: udevadm trigger failed for drive %q with %v.", drive, err)
}
return nil
}