mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-14 02:43:36 +00:00
vendor updates
This commit is contained in:
5
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD
generated
vendored
5
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD
generated
vendored
@ -21,7 +21,6 @@ go_library(
|
||||
"//pkg/util/strings:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
"//pkg/volume/util:go_default_library",
|
||||
"//pkg/volume/util/volumehelper:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/github.com/heketi/heketi/client/api/go-client:go_default_library",
|
||||
"//vendor/github.com/heketi/heketi/pkg/glusterfs/api:go_default_library",
|
||||
@ -32,6 +31,7 @@ go_library(
|
||||
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
|
||||
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -42,8 +42,7 @@ go_test(
|
||||
"glusterfs_minmax_test.go",
|
||||
"glusterfs_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/volume/glusterfs",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/util/mount:go_default_library",
|
||||
"//pkg/volume:go_default_library",
|
||||
|
329
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go
generated
vendored
329
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs.go
generated
vendored
@ -36,13 +36,13 @@ import (
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
clientset "k8s.io/client-go/kubernetes"
|
||||
v1helper "k8s.io/kubernetes/pkg/apis/core/v1/helper"
|
||||
"k8s.io/kubernetes/pkg/util/mount"
|
||||
"k8s.io/kubernetes/pkg/util/strings"
|
||||
"k8s.io/kubernetes/pkg/volume"
|
||||
volutil "k8s.io/kubernetes/pkg/volume/util"
|
||||
"k8s.io/kubernetes/pkg/volume/util/volumehelper"
|
||||
)
|
||||
|
||||
// ProbeVolumePlugins is the primary entrypoint for volume plugins.
|
||||
@ -74,6 +74,7 @@ const (
|
||||
gciLinuxGlusterMountBinaryPath = "/sbin/mount.glusterfs"
|
||||
defaultGidMin = 2000
|
||||
defaultGidMax = math.MaxInt32
|
||||
|
||||
// absoluteGidMin/Max are currently the same as the
|
||||
// default values, but they play a different role and
|
||||
// could take a different value. Only thing we need is:
|
||||
@ -84,6 +85,7 @@ const (
|
||||
heketiAnn = "heketi-dynamic-provisioner"
|
||||
glusterTypeAnn = "gluster.org/type"
|
||||
glusterDescAnn = "Gluster-Internal: Dynamically provisioned PV"
|
||||
heketiVolIDAnn = "gluster.kubernetes.io/heketi-volume-id"
|
||||
)
|
||||
|
||||
func (plugin *glusterfsPlugin) Init(host volume.VolumeHost) error {
|
||||
@ -141,9 +143,13 @@ func (plugin *glusterfsPlugin) GetAccessModes() []v1.PersistentVolumeAccessMode
|
||||
}
|
||||
|
||||
func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volume.VolumeOptions) (volume.Mounter, error) {
|
||||
source, _ := plugin.getGlusterVolumeSource(spec)
|
||||
source, _, err := getVolumeSource(spec)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get gluster volumesource: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
epName := source.EndpointsName
|
||||
// PVC/POD is in same ns.
|
||||
// PVC/POD is in same namespace.
|
||||
podNs := pod.Namespace
|
||||
kubeClient := plugin.host.GetKubeClient()
|
||||
if kubeClient == nil {
|
||||
@ -151,35 +157,27 @@ func (plugin *glusterfsPlugin) NewMounter(spec *volume.Spec, pod *v1.Pod, _ volu
|
||||
}
|
||||
ep, err := kubeClient.CoreV1().Endpoints(podNs).Get(epName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get endpoints %s[%v]", epName, err)
|
||||
glog.Errorf("failed to get endpoint %s: %v", epName, err)
|
||||
return nil, err
|
||||
}
|
||||
glog.V(1).Infof("glusterfs pv endpoint %v", ep)
|
||||
glog.V(4).Infof("glusterfs pv endpoint %v", ep)
|
||||
return plugin.newMounterInternal(spec, ep, pod, plugin.host.GetMounter(plugin.GetPluginName()))
|
||||
}
|
||||
|
||||
func (plugin *glusterfsPlugin) getGlusterVolumeSource(spec *volume.Spec) (*v1.GlusterfsVolumeSource, bool) {
|
||||
// Glusterfs volumes used directly in a pod have a ReadOnly flag set by the pod author.
|
||||
// Glusterfs volumes used as a PersistentVolume gets the ReadOnly flag indirectly through the persistent-claim volume used to mount the PV
|
||||
if spec.Volume != nil && spec.Volume.Glusterfs != nil {
|
||||
return spec.Volume.Glusterfs, spec.Volume.Glusterfs.ReadOnly
|
||||
}
|
||||
return spec.PersistentVolume.Spec.Glusterfs, spec.ReadOnly
|
||||
}
|
||||
|
||||
func (plugin *glusterfsPlugin) newMounterInternal(spec *volume.Spec, ep *v1.Endpoints, pod *v1.Pod, mounter mount.Interface) (volume.Mounter, error) {
|
||||
source, readOnly := plugin.getGlusterVolumeSource(spec)
|
||||
source, readOnly, _ := getVolumeSource(spec)
|
||||
return &glusterfsMounter{
|
||||
glusterfs: &glusterfs{
|
||||
volName: spec.Name(),
|
||||
mounter: mounter,
|
||||
pod: pod,
|
||||
plugin: plugin,
|
||||
volName: spec.Name(),
|
||||
mounter: mounter,
|
||||
pod: pod,
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir(pod.UID, strings.EscapeQualifiedNameForDisk(glusterfsPluginName), spec.Name())),
|
||||
},
|
||||
hosts: ep,
|
||||
path: source.Path,
|
||||
readOnly: readOnly,
|
||||
mountOptions: volume.MountOptionFromSpec(spec),
|
||||
mountOptions: volutil.MountOptionFromSpec(spec),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -189,16 +187,17 @@ func (plugin *glusterfsPlugin) NewUnmounter(volName string, podUID types.UID) (v
|
||||
|
||||
func (plugin *glusterfsPlugin) newUnmounterInternal(volName string, podUID types.UID, mounter mount.Interface) (volume.Unmounter, error) {
|
||||
return &glusterfsUnmounter{&glusterfs{
|
||||
volName: volName,
|
||||
mounter: mounter,
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID}},
|
||||
plugin: plugin,
|
||||
volName: volName,
|
||||
mounter: mounter,
|
||||
pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: podUID}},
|
||||
plugin: plugin,
|
||||
MetricsProvider: volume.NewMetricsStatFS(plugin.host.GetPodVolumeDir(podUID, strings.EscapeQualifiedNameForDisk(glusterfsPluginName), volName)),
|
||||
}}, nil
|
||||
}
|
||||
|
||||
func (plugin *glusterfsPlugin) ConstructVolumeSpec(volumeName, mountPath string) (*volume.Spec, error) {
|
||||
|
||||
// To reconstrcut volume spec we need endpoint where fetching endpoint from mount
|
||||
// To reconstruct volume spec we need endpoint where fetching endpoint from mount
|
||||
// string looks to be impossible, so returning error.
|
||||
|
||||
return nil, fmt.Errorf("impossible to reconstruct glusterfs volume spec from volume mountpath")
|
||||
@ -210,7 +209,7 @@ type glusterfs struct {
|
||||
pod *v1.Pod
|
||||
mounter mount.Interface
|
||||
plugin *glusterfsPlugin
|
||||
volume.MetricsNil
|
||||
volume.MetricsProvider
|
||||
}
|
||||
|
||||
type glusterfsMounter struct {
|
||||
@ -238,7 +237,7 @@ func (b *glusterfsMounter) CanMount() error {
|
||||
exe := b.plugin.host.GetExec(b.plugin.GetPluginName())
|
||||
switch runtime.GOOS {
|
||||
case "linux":
|
||||
if _, err := exe.Run("/bin/ls", gciLinuxGlusterMountBinaryPath); err != nil {
|
||||
if _, err := exe.Run("test", "-x", gciLinuxGlusterMountBinaryPath); err != nil {
|
||||
return fmt.Errorf("Required binary %s is missing", gciLinuxGlusterMountBinaryPath)
|
||||
}
|
||||
}
|
||||
@ -259,8 +258,9 @@ func (b *glusterfsMounter) SetUpAt(dir string, fsGroup *int64) error {
|
||||
if !notMnt {
|
||||
return nil
|
||||
}
|
||||
|
||||
os.MkdirAll(dir, 0750)
|
||||
if err := os.MkdirAll(dir, 0750); err != nil {
|
||||
return err
|
||||
}
|
||||
err = b.setUpAtInternal(dir)
|
||||
if err == nil {
|
||||
return nil
|
||||
@ -300,7 +300,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
||||
|
||||
p := path.Join(b.glusterfs.plugin.host.GetPluginDir(glusterfsPluginName), b.glusterfs.volName)
|
||||
if err := os.MkdirAll(p, 0750); err != nil {
|
||||
return fmt.Errorf("Error creating directory %v: %v", p, err)
|
||||
return fmt.Errorf("failed to create directory %v: %v", p, err)
|
||||
}
|
||||
|
||||
// adding log-level ERROR to remove noise
|
||||
@ -312,7 +312,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
||||
|
||||
var addrlist []string
|
||||
if b.hosts == nil {
|
||||
return fmt.Errorf("glusterfs: endpoint is nil")
|
||||
return fmt.Errorf("glusterfs endpoint is nil in mounter")
|
||||
}
|
||||
addr := sets.String{}
|
||||
if b.hosts.Subsets != nil {
|
||||
@ -327,18 +327,18 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
||||
|
||||
}
|
||||
options = append(options, "backup-volfile-servers="+dstrings.Join(addrlist[:], ":"))
|
||||
mountOptions := volume.JoinMountOptions(b.mountOptions, options)
|
||||
mountOptions := volutil.JoinMountOptions(b.mountOptions, options)
|
||||
|
||||
// with `backup-volfile-servers` mount option in place, it is not required to
|
||||
// iterate over all the servers in the addrlist. A mount attempt with this option
|
||||
// will fetch all the servers mentioned in the backup-volfile-servers list.
|
||||
// Refer backup-volfile-servers @ https://access.redhat.com/documentation/en-US/Red_Hat_Storage/3/html/Administration_Guide/sect-Native_Client.html
|
||||
// Refer to backup-volfile-servers @ http://docs.gluster.org/en/latest/Administrator%20Guide/Setting%20Up%20Clients/
|
||||
|
||||
if (len(addrlist) > 0) && (addrlist[0] != "") {
|
||||
ip := addrlist[0]
|
||||
errs = b.mounter.Mount(ip+":"+b.path, dir, "glusterfs", mountOptions)
|
||||
if errs == nil {
|
||||
glog.Infof("glusterfs: successfully mounted %s", dir)
|
||||
glog.Infof("successfully mounted directory %s", dir)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -374,8 +374,7 @@ func (b *glusterfsMounter) setUpAtInternal(dir string) error {
|
||||
|
||||
}
|
||||
|
||||
func getVolumeSource(
|
||||
spec *volume.Spec) (*v1.GlusterfsVolumeSource, bool, error) {
|
||||
func getVolumeSource(spec *volume.Spec) (*v1.GlusterfsVolumeSource, bool, error) {
|
||||
if spec.Volume != nil && spec.Volume.Glusterfs != nil {
|
||||
return spec.Volume.Glusterfs, spec.Volume.Glusterfs.ReadOnly, nil
|
||||
} else if spec.PersistentVolume != nil &&
|
||||
@ -402,17 +401,18 @@ func (plugin *glusterfsPlugin) newProvisionerInternal(options volume.VolumeOptio
|
||||
}
|
||||
|
||||
type provisionerConfig struct {
|
||||
url string
|
||||
user string
|
||||
userKey string
|
||||
secretNamespace string
|
||||
secretName string
|
||||
secretValue string
|
||||
clusterID string
|
||||
gidMin int
|
||||
gidMax int
|
||||
volumeType gapi.VolumeDurabilityInfo
|
||||
volumeOptions []string
|
||||
url string
|
||||
user string
|
||||
userKey string
|
||||
secretNamespace string
|
||||
secretName string
|
||||
secretValue string
|
||||
clusterID string
|
||||
gidMin int
|
||||
gidMax int
|
||||
volumeType gapi.VolumeDurabilityInfo
|
||||
volumeOptions []string
|
||||
volumeNamePrefix string
|
||||
}
|
||||
|
||||
type glusterfsVolumeProvisioner struct {
|
||||
@ -424,11 +424,11 @@ type glusterfsVolumeProvisioner struct {
|
||||
func convertGid(gidString string) (int, error) {
|
||||
gid64, err := strconv.ParseInt(gidString, 10, 32)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse gid %v ", gidString)
|
||||
return 0, fmt.Errorf("failed to parse gid %v: %v", gidString, err)
|
||||
}
|
||||
|
||||
if gid64 < 0 {
|
||||
return 0, fmt.Errorf("negative GIDs are not allowed: %v", gidString)
|
||||
return 0, fmt.Errorf("negative GIDs %v are not allowed", gidString)
|
||||
}
|
||||
|
||||
// ParseInt returns a int64, but since we parsed only
|
||||
@ -441,7 +441,7 @@ func convertVolumeParam(volumeString string) (int, error) {
|
||||
|
||||
count, err := strconv.Atoi(volumeString)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to parse %q", volumeString)
|
||||
return 0, fmt.Errorf("failed to parse volumestring %q: %v", volumeString, err)
|
||||
}
|
||||
|
||||
if count < 0 {
|
||||
@ -490,7 +490,7 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll
|
||||
}
|
||||
pvList, err := kubeClient.CoreV1().PersistentVolumes().List(metav1.ListOptions{LabelSelector: labels.Everything().String()})
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get existing persistent volumes")
|
||||
glog.Error("failed to get existing persistent volumes")
|
||||
return err
|
||||
}
|
||||
|
||||
@ -501,24 +501,24 @@ func (plugin *glusterfsPlugin) collectGids(className string, gidTable *MinMaxAll
|
||||
|
||||
pvName := pv.ObjectMeta.Name
|
||||
|
||||
gidStr, ok := pv.Annotations[volumehelper.VolumeGidAnnotationKey]
|
||||
gidStr, ok := pv.Annotations[volutil.VolumeGidAnnotationKey]
|
||||
|
||||
if !ok {
|
||||
glog.Warningf("no GID found in pv '%v'", pvName)
|
||||
glog.Warningf("no GID found in pv %v", pvName)
|
||||
continue
|
||||
}
|
||||
|
||||
gid, err := convertGid(gidStr)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
glog.Errorf("failed to parse gid %s: %v", gidStr, err)
|
||||
continue
|
||||
}
|
||||
|
||||
_, err = gidTable.Allocate(gid)
|
||||
if err == ErrConflict {
|
||||
glog.Warningf("GID %v found in pv %v was already allocated", gid)
|
||||
glog.Warningf("GID %v found in pv %v was already allocated", gid, pvName)
|
||||
} else if err != nil {
|
||||
glog.Errorf("failed to store gid %v found in pv '%v': %v", gid, pvName, err)
|
||||
glog.Errorf("failed to store gid %v found in pv %v: %v", gid, pvName, err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -563,7 +563,6 @@ func (plugin *glusterfsPlugin) getGidTable(className string, min int, max int) (
|
||||
}
|
||||
|
||||
// if in the meantime a table appeared, use it
|
||||
|
||||
plugin.gidTableLock.Lock()
|
||||
defer plugin.gidTableLock.Unlock()
|
||||
|
||||
@ -583,7 +582,7 @@ func (plugin *glusterfsPlugin) getGidTable(className string, min int, max int) (
|
||||
}
|
||||
|
||||
func (d *glusterfsVolumeDeleter) getGid() (int, bool, error) {
|
||||
gidStr, ok := d.spec.Annotations[volumehelper.VolumeGidAnnotationKey]
|
||||
gidStr, ok := d.spec.Annotations[volutil.VolumeGidAnnotationKey]
|
||||
|
||||
if !ok {
|
||||
return 0, false, nil
|
||||
@ -595,9 +594,14 @@ func (d *glusterfsVolumeDeleter) getGid() (int, bool, error) {
|
||||
}
|
||||
|
||||
func (d *glusterfsVolumeDeleter) Delete() error {
|
||||
glog.V(2).Infof("delete volume: %s ", d.glusterfsMounter.path)
|
||||
glog.V(2).Infof("delete volume %s", d.glusterfsMounter.path)
|
||||
|
||||
volumeName := d.glusterfsMounter.path
|
||||
volumeID := dstrings.TrimPrefix(volumeName, volPrefix)
|
||||
volumeID, err := getVolumeID(d.spec, volumeName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get volumeID: %v", err)
|
||||
}
|
||||
|
||||
class, err := volutil.GetClassForVolume(d.plugin.host.GetKubeClient(), d.spec)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -609,7 +613,7 @@ func (d *glusterfsVolumeDeleter) Delete() error {
|
||||
}
|
||||
d.provisionerConfig = *cfg
|
||||
|
||||
glog.V(4).Infof("deleting volume %q with configuration %+v", volumeID, d.provisionerConfig)
|
||||
glog.V(4).Infof("deleting volume %q", volumeID)
|
||||
|
||||
gid, exists, err := d.getGid()
|
||||
if err != nil {
|
||||
@ -628,17 +632,17 @@ func (d *glusterfsVolumeDeleter) Delete() error {
|
||||
|
||||
cli := gcli.NewClient(d.url, d.user, d.secretValue)
|
||||
if cli == nil {
|
||||
glog.Errorf("failed to create glusterfs rest client")
|
||||
return fmt.Errorf("failed to create glusterfs rest client, REST server authentication failed")
|
||||
glog.Errorf("failed to create glusterfs REST client")
|
||||
return fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed")
|
||||
}
|
||||
err = cli.VolumeDelete(volumeID)
|
||||
if err != nil {
|
||||
glog.Errorf("error when deleting the volume :%v", err)
|
||||
glog.Errorf("failed to delete volume %s: %v", volumeName, err)
|
||||
return err
|
||||
}
|
||||
glog.V(2).Infof("volume %s deleted successfully", volumeName)
|
||||
|
||||
//Deleter takes endpoint and endpointnamespace from pv spec.
|
||||
//Deleter takes endpoint and namespace from pv spec.
|
||||
pvSpec := d.spec.Spec
|
||||
var dynamicEndpoint, dynamicNamespace string
|
||||
if pvSpec.ClaimRef == nil {
|
||||
@ -653,18 +657,18 @@ func (d *glusterfsVolumeDeleter) Delete() error {
|
||||
if pvSpec.Glusterfs.EndpointsName != "" {
|
||||
dynamicEndpoint = pvSpec.Glusterfs.EndpointsName
|
||||
}
|
||||
glog.V(3).Infof("dynamic namespace and endpoint : [%v/%v]", dynamicNamespace, dynamicEndpoint)
|
||||
glog.V(3).Infof("dynamic namespace and endpoint %v/%v", dynamicNamespace, dynamicEndpoint)
|
||||
err = d.deleteEndpointService(dynamicNamespace, dynamicEndpoint)
|
||||
if err != nil {
|
||||
glog.Errorf("error when deleting endpoint/service :%v", err)
|
||||
glog.Errorf("failed to delete endpoint/service %v/%v: %v", dynamicNamespace, dynamicEndpoint, err)
|
||||
} else {
|
||||
glog.V(1).Infof("endpoint: %v and service: %v deleted successfully ", dynamicNamespace, dynamicEndpoint)
|
||||
glog.V(1).Infof("endpoint %v/%v is deleted successfully ", dynamicNamespace, dynamicEndpoint)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
if !volume.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||
if !volutil.AccessModesContainedInAll(p.plugin.GetAccessModes(), p.options.PVC.Spec.AccessModes) {
|
||||
return nil, fmt.Errorf("invalid AccessModes %v: only AccessModes %v are supported", p.options.PVC.Spec.AccessModes, p.plugin.GetAccessModes())
|
||||
}
|
||||
|
||||
@ -693,21 +697,23 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
return nil, fmt.Errorf("failed to reserve GID from table: %v", err)
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Allocated GID [%d] for PVC %s", gid, p.options.PVC.Name)
|
||||
glog.V(2).Infof("Allocated GID %d for PVC %s", gid, p.options.PVC.Name)
|
||||
|
||||
glusterfs, sizeGB, err := p.CreateVolume(gid)
|
||||
glusterfs, sizeGiB, volID, err := p.CreateVolume(gid)
|
||||
if err != nil {
|
||||
if releaseErr := gidTable.Release(gid); releaseErr != nil {
|
||||
glog.Errorf("error when releasing GID in storageclass: %s", scName)
|
||||
glog.Errorf("error when releasing GID in storageclass %s: %v", scName, releaseErr)
|
||||
}
|
||||
|
||||
glog.Errorf("create volume error: %v.", err)
|
||||
return nil, fmt.Errorf("create volume error: %v", err)
|
||||
glog.Errorf("failed to create volume: %v", err)
|
||||
return nil, fmt.Errorf("failed to create volume: %v", err)
|
||||
}
|
||||
mode := v1.PersistentVolumeFilesystem
|
||||
pv := new(v1.PersistentVolume)
|
||||
pv.Spec.PersistentVolumeSource.Glusterfs = glusterfs
|
||||
pv.Spec.PersistentVolumeReclaimPolicy = p.options.PersistentVolumeReclaimPolicy
|
||||
pv.Spec.AccessModes = p.options.PVC.Spec.AccessModes
|
||||
pv.Spec.VolumeMode = &mode
|
||||
if len(pv.Spec.AccessModes) == 0 {
|
||||
pv.Spec.AccessModes = p.plugin.GetAccessModes()
|
||||
}
|
||||
@ -716,74 +722,83 @@ func (p *glusterfsVolumeProvisioner) Provision() (*v1.PersistentVolume, error) {
|
||||
gidStr := strconv.FormatInt(int64(gid), 10)
|
||||
|
||||
pv.Annotations = map[string]string{
|
||||
volumehelper.VolumeGidAnnotationKey: gidStr,
|
||||
volumehelper.VolumeDynamicallyCreatedByKey: heketiAnn,
|
||||
glusterTypeAnn: "file",
|
||||
"Description": glusterDescAnn,
|
||||
v1.MountOptionAnnotation: "auto_unmount",
|
||||
volutil.VolumeGidAnnotationKey: gidStr,
|
||||
volutil.VolumeDynamicallyCreatedByKey: heketiAnn,
|
||||
glusterTypeAnn: "file",
|
||||
"Description": glusterDescAnn,
|
||||
v1.MountOptionAnnotation: "auto_unmount",
|
||||
heketiVolIDAnn: volID,
|
||||
}
|
||||
|
||||
pv.Spec.Capacity = v1.ResourceList{
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dG", sizeGB)),
|
||||
v1.ResourceName(v1.ResourceStorage): resource.MustParse(fmt.Sprintf("%dGi", sizeGiB)),
|
||||
}
|
||||
return pv, nil
|
||||
}
|
||||
|
||||
func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolumeSource, size int, err error) {
|
||||
func (p *glusterfsVolumeProvisioner) CreateVolume(gid int) (r *v1.GlusterfsVolumeSource, size int, volID string, err error) {
|
||||
var clusterIDs []string
|
||||
customVolumeName := ""
|
||||
capacity := p.options.PVC.Spec.Resources.Requests[v1.ResourceName(v1.ResourceStorage)]
|
||||
volSizeBytes := capacity.Value()
|
||||
// Glusterfs creates volumes in units of GBs
|
||||
sz := int(volume.RoundUpSize(volSizeBytes, 1000*1000*1000))
|
||||
glog.V(2).Infof("create volume of size: %d bytes and configuration %+v", volSizeBytes, p.provisionerConfig)
|
||||
|
||||
// GlusterFS/heketi creates volumes in units of GiB.
|
||||
sz := int(volutil.RoundUpToGiB(capacity))
|
||||
glog.V(2).Infof("create volume of size %dGiB", sz)
|
||||
|
||||
if p.url == "" {
|
||||
glog.Errorf("REST server endpoint is empty")
|
||||
return nil, 0, fmt.Errorf("failed to create glusterfs REST client, REST URL is empty")
|
||||
return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST URL is empty")
|
||||
}
|
||||
cli := gcli.NewClient(p.url, p.user, p.secretValue)
|
||||
if cli == nil {
|
||||
glog.Errorf("failed to create glusterfs rest client")
|
||||
return nil, 0, fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed")
|
||||
glog.Errorf("failed to create glusterfs REST client")
|
||||
return nil, 0, "", fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed")
|
||||
}
|
||||
if p.provisionerConfig.clusterID != "" {
|
||||
clusterIDs = dstrings.Split(p.clusterID, ",")
|
||||
glog.V(4).Infof("provided clusterIDs: %v", clusterIDs)
|
||||
glog.V(4).Infof("provided clusterIDs %v", clusterIDs)
|
||||
}
|
||||
|
||||
if p.provisionerConfig.volumeNamePrefix != "" {
|
||||
customVolumeName = fmt.Sprintf("%s_%s_%s_%s", p.provisionerConfig.volumeNamePrefix, p.options.PVC.Namespace, p.options.PVC.Name, uuid.NewUUID())
|
||||
}
|
||||
|
||||
gid64 := int64(gid)
|
||||
volumeReq := &gapi.VolumeCreateRequest{Size: sz, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions}
|
||||
volumeReq := &gapi.VolumeCreateRequest{Size: sz, Name: customVolumeName, Clusters: clusterIDs, Gid: gid64, Durability: p.volumeType, GlusterVolumeOptions: p.volumeOptions}
|
||||
volume, err := cli.VolumeCreate(volumeReq)
|
||||
if err != nil {
|
||||
glog.Errorf("error creating volume %v ", err)
|
||||
return nil, 0, fmt.Errorf("error creating volume %v", err)
|
||||
glog.Errorf("failed to create volume: %v", err)
|
||||
return nil, 0, "", fmt.Errorf("failed to create volume: %v", err)
|
||||
}
|
||||
glog.V(1).Infof("volume with size: %d and name: %s created", volume.Size, volume.Name)
|
||||
glog.V(1).Infof("volume with size %d and name %s created", volume.Size, volume.Name)
|
||||
volID = volume.Id
|
||||
dynamicHostIps, err := getClusterNodes(cli, volume.Cluster)
|
||||
if err != nil {
|
||||
glog.Errorf("error [%v] when getting cluster nodes for volume %s", err, volume)
|
||||
return nil, 0, fmt.Errorf("error [%v] when getting cluster nodes for volume %s", err, volume)
|
||||
glog.Errorf("failed to get cluster nodes for volume %s: %v", volume, err)
|
||||
return nil, 0, "", fmt.Errorf("failed to get cluster nodes for volume %s: %v", volume, err)
|
||||
}
|
||||
|
||||
// The 'endpointname' is created in form of 'glusterfs-dynamic-<claimname>'.
|
||||
// createEndpointService() checks for this 'endpoint' existence in PVC's namespace and
|
||||
// If not found, it create an endpoint and svc using the IPs we dynamically picked at time
|
||||
// If not found, it create an endpoint and service using the IPs we dynamically picked at time
|
||||
// of volume creation.
|
||||
epServiceName := dynamicEpSvcPrefix + p.options.PVC.Name
|
||||
epNamespace := p.options.PVC.Namespace
|
||||
endpoint, service, err := p.createEndpointService(epNamespace, epServiceName, dynamicHostIps, p.options.PVC.Name)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to create endpoint/service: %v", err)
|
||||
glog.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err)
|
||||
deleteErr := cli.VolumeDelete(volume.Id)
|
||||
if deleteErr != nil {
|
||||
glog.Errorf("error when deleting the volume :%v , manual deletion required", deleteErr)
|
||||
glog.Errorf("failed to delete volume: %v, manual deletion of the volume required", deleteErr)
|
||||
}
|
||||
return nil, 0, fmt.Errorf("failed to create endpoint/service %v", err)
|
||||
return nil, 0, "", fmt.Errorf("failed to create endpoint/service %v/%v: %v", epNamespace, epServiceName, err)
|
||||
}
|
||||
glog.V(3).Infof("dynamic ep %v and svc : %v ", endpoint, service)
|
||||
glog.V(3).Infof("dynamic endpoint %v and service %v ", endpoint, service)
|
||||
return &v1.GlusterfsVolumeSource{
|
||||
EndpointsName: endpoint.Name,
|
||||
Path: volume.Name,
|
||||
ReadOnly: false,
|
||||
}, sz, nil
|
||||
}, sz, volID, nil
|
||||
}
|
||||
|
||||
func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epServiceName string, hostips []string, pvcname string) (endpoint *v1.Endpoints, service *v1.Service, err error) {
|
||||
@ -811,12 +826,12 @@ func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epS
|
||||
}
|
||||
_, err = kubeClient.CoreV1().Endpoints(namespace).Create(endpoint)
|
||||
if err != nil && errors.IsAlreadyExists(err) {
|
||||
glog.V(1).Infof("endpoint [%s] already exist in namespace [%s]", endpoint, namespace)
|
||||
glog.V(1).Infof("endpoint %s already exist in namespace %s", endpoint, namespace)
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("failed to create endpoint: %v", err)
|
||||
return nil, nil, fmt.Errorf("error creating endpoint: %v", err)
|
||||
return nil, nil, fmt.Errorf("failed to create endpoint: %v", err)
|
||||
}
|
||||
service = &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
@ -831,7 +846,7 @@ func (p *glusterfsVolumeProvisioner) createEndpointService(namespace string, epS
|
||||
{Protocol: "TCP", Port: 1}}}}
|
||||
_, err = kubeClient.CoreV1().Services(namespace).Create(service)
|
||||
if err != nil && errors.IsAlreadyExists(err) {
|
||||
glog.V(1).Infof("service [%s] already exist in namespace [%s]", service, namespace)
|
||||
glog.V(1).Infof("service %s already exist in namespace %s", service, namespace)
|
||||
err = nil
|
||||
}
|
||||
if err != nil {
|
||||
@ -848,10 +863,10 @@ func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServi
|
||||
}
|
||||
err = kubeClient.CoreV1().Services(namespace).Delete(epServiceName, nil)
|
||||
if err != nil {
|
||||
glog.Errorf("error deleting service %s/%s: %v", namespace, epServiceName, err)
|
||||
return fmt.Errorf("error deleting service %s/%s: %v", namespace, epServiceName, err)
|
||||
glog.Errorf("failed to delete service %s/%s: %v", namespace, epServiceName, err)
|
||||
return fmt.Errorf("failed to delete service %s/%s: %v", namespace, epServiceName, err)
|
||||
}
|
||||
glog.V(1).Infof("service/endpoint %s/%s deleted successfully", namespace, epServiceName)
|
||||
glog.V(1).Infof("service/endpoint: %s/%s deleted successfully", namespace, epServiceName)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -859,7 +874,7 @@ func (d *glusterfsVolumeDeleter) deleteEndpointService(namespace string, epServi
|
||||
func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (string, error) {
|
||||
secretMap, err := volutil.GetSecretForPV(namespace, secretName, glusterfsPluginName, kubeClient)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get secret %s/%s: %v", namespace, secretName, err)
|
||||
glog.Errorf("failed to get secret: %s/%s: %v", namespace, secretName, err)
|
||||
return "", fmt.Errorf("failed to get secret %s/%s: %v", namespace, secretName, err)
|
||||
}
|
||||
if len(secretMap) == 0 {
|
||||
@ -872,12 +887,12 @@ func parseSecret(namespace, secretName string, kubeClient clientset.Interface) (
|
||||
}
|
||||
secret = v
|
||||
}
|
||||
|
||||
// If not found, the last secret in the map wins as done before
|
||||
return secret, nil
|
||||
}
|
||||
|
||||
// getClusterNodes() returns the cluster nodes of a given cluster
|
||||
|
||||
func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string, err error) {
|
||||
clusterinfo, err := cli.ClusterInfo(cluster)
|
||||
if err != nil {
|
||||
@ -889,15 +904,15 @@ func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string,
|
||||
// of the cluster on which provisioned volume belongs to, as there can be multiple
|
||||
// clusters.
|
||||
for _, node := range clusterinfo.Nodes {
|
||||
nodei, err := cli.NodeInfo(string(node))
|
||||
nodeInfo, err := cli.NodeInfo(string(node))
|
||||
if err != nil {
|
||||
glog.Errorf(" failed to get hostip: %v", err)
|
||||
return nil, fmt.Errorf("failed to get hostip: %v", err)
|
||||
glog.Errorf("failed to get host ipaddress: %v", err)
|
||||
return nil, fmt.Errorf("failed to get host ipaddress: %v", err)
|
||||
}
|
||||
ipaddr := dstrings.Join(nodei.NodeAddRequest.Hostnames.Storage, "")
|
||||
ipaddr := dstrings.Join(nodeInfo.NodeAddRequest.Hostnames.Storage, "")
|
||||
dynamicHostIps = append(dynamicHostIps, ipaddr)
|
||||
}
|
||||
glog.V(3).Infof("hostlist :%v", dynamicHostIps)
|
||||
glog.V(3).Infof("host list :%v", dynamicHostIps)
|
||||
if len(dynamicHostIps) == 0 {
|
||||
glog.Errorf("no hosts found: %v", err)
|
||||
return nil, fmt.Errorf("no hosts found: %v", err)
|
||||
@ -905,7 +920,7 @@ func getClusterNodes(cli *gcli.Client, cluster string) (dynamicHostIps []string,
|
||||
return dynamicHostIps, nil
|
||||
}
|
||||
|
||||
// parseClassParameters parses StorageClass.Parameters
|
||||
// parseClassParameters parses StorageClass parameters.
|
||||
func parseClassParameters(params map[string]string, kubeClient clientset.Interface) (*provisionerConfig, error) {
|
||||
var cfg provisionerConfig
|
||||
var err error
|
||||
@ -915,6 +930,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
authEnabled := true
|
||||
parseVolumeType := ""
|
||||
parseVolumeOptions := ""
|
||||
parseVolumeNamePrefix := ""
|
||||
|
||||
for k, v := range params {
|
||||
switch dstrings.ToLower(k) {
|
||||
@ -937,7 +953,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
case "gidmin":
|
||||
parseGidMin, err := convertGid(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value %q for volume plugin %s", k, glusterfsPluginName)
|
||||
return nil, fmt.Errorf("invalid gidMin value %q for volume plugin %s", k, glusterfsPluginName)
|
||||
}
|
||||
if parseGidMin < absoluteGidMin {
|
||||
return nil, fmt.Errorf("gidMin must be >= %v", absoluteGidMin)
|
||||
@ -949,7 +965,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
case "gidmax":
|
||||
parseGidMax, err := convertGid(v)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid value %q for volume plugin %s", k, glusterfsPluginName)
|
||||
return nil, fmt.Errorf("invalid gidMax value %q for volume plugin %s", k, glusterfsPluginName)
|
||||
}
|
||||
if parseGidMax < absoluteGidMin {
|
||||
return nil, fmt.Errorf("gidMax must be >= %v", absoluteGidMin)
|
||||
@ -965,7 +981,10 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
if len(v) != 0 {
|
||||
parseVolumeOptions = v
|
||||
}
|
||||
|
||||
case "volumenameprefix":
|
||||
if len(v) != 0 {
|
||||
parseVolumeNamePrefix = v
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid option %q for volume plugin %s", k, glusterfsPluginName)
|
||||
}
|
||||
@ -985,7 +1004,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
if len(parseVolumeTypeInfo) >= 2 {
|
||||
newReplicaCount, err := convertVolumeParam(parseVolumeTypeInfo[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error [%v] when parsing value %q of option '%s' for volume plugin %s", err, parseVolumeTypeInfo[1], "volumetype", glusterfsPluginName)
|
||||
return nil, fmt.Errorf("error parsing volumeType %q: %s", parseVolumeTypeInfo[1], err)
|
||||
}
|
||||
cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityReplicate, Replicate: gapi.ReplicaDurability{Replica: newReplicaCount}}
|
||||
} else {
|
||||
@ -995,11 +1014,11 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
if len(parseVolumeTypeInfo) >= 3 {
|
||||
newDisperseData, err := convertVolumeParam(parseVolumeTypeInfo[1])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error [%v] when parsing value %q of option '%s' for volume plugin %s", parseVolumeTypeInfo[1], err, "volumetype", glusterfsPluginName)
|
||||
return nil, fmt.Errorf("error parsing volumeType %q: %s", parseVolumeTypeInfo[1], err)
|
||||
}
|
||||
newDisperseRedundancy, err := convertVolumeParam(parseVolumeTypeInfo[2])
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error [%v] when parsing value %q of option '%s' for volume plugin %s", err, parseVolumeTypeInfo[2], "volumetype", glusterfsPluginName)
|
||||
return nil, fmt.Errorf("error parsing volumeType %q: %s", parseVolumeTypeInfo[2], err)
|
||||
}
|
||||
cfg.volumeType = gapi.VolumeDurabilityInfo{Type: gapi.DurabilityEC, Disperse: gapi.DisperseDurability{Data: newDisperseData, Redundancy: newDisperseRedundancy}}
|
||||
} else {
|
||||
@ -1020,6 +1039,7 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
}
|
||||
|
||||
if len(cfg.secretName) != 0 || len(cfg.secretNamespace) != 0 {
|
||||
|
||||
// secretName + Namespace has precedence over userKey
|
||||
if len(cfg.secretName) != 0 && len(cfg.secretNamespace) != 0 {
|
||||
cfg.secretValue, err = parseSecret(cfg.secretNamespace, cfg.secretName, kubeClient)
|
||||
@ -1040,23 +1060,52 @@ func parseClassParameters(params map[string]string, kubeClient clientset.Interfa
|
||||
if len(parseVolumeOptions) != 0 {
|
||||
volOptions := dstrings.Split(parseVolumeOptions, ",")
|
||||
if len(volOptions) == 0 {
|
||||
return nil, fmt.Errorf("StorageClass for provisioner %q must have valid ( for eg, 'client.ssl on') volume option", glusterfsPluginName)
|
||||
return nil, fmt.Errorf("StorageClass for provisioner %q must have valid (for e.g., 'client.ssl on') volume option", glusterfsPluginName)
|
||||
}
|
||||
cfg.volumeOptions = volOptions
|
||||
|
||||
}
|
||||
|
||||
if len(parseVolumeNamePrefix) != 0 {
|
||||
if dstrings.Contains(parseVolumeNamePrefix, "_") {
|
||||
return nil, fmt.Errorf("Storageclass parameter 'volumenameprefix' should not contain '_' in its value")
|
||||
}
|
||||
cfg.volumeNamePrefix = parseVolumeNamePrefix
|
||||
}
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
// getVolumeID returns volumeID from the PV or volumename.
|
||||
func getVolumeID(pv *v1.PersistentVolume, volumeName string) (string, error) {
|
||||
volumeID := ""
|
||||
|
||||
// Get volID from pvspec if available, else fill it from volumename.
|
||||
if pv != nil {
|
||||
if pv.Annotations[heketiVolIDAnn] != "" {
|
||||
volumeID = pv.Annotations[heketiVolIDAnn]
|
||||
} else {
|
||||
volumeID = dstrings.TrimPrefix(volumeName, volPrefix)
|
||||
}
|
||||
} else {
|
||||
return volumeID, fmt.Errorf("provided PV spec is nil")
|
||||
}
|
||||
if volumeID == "" {
|
||||
return volumeID, fmt.Errorf("volume ID is empty")
|
||||
}
|
||||
return volumeID, nil
|
||||
}
|
||||
|
||||
func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize resource.Quantity, oldSize resource.Quantity) (resource.Quantity, error) {
|
||||
pvSpec := spec.PersistentVolume.Spec
|
||||
glog.V(2).Infof("Request to expand volume: %s ", pvSpec.Glusterfs.Path)
|
||||
volumeName := pvSpec.Glusterfs.Path
|
||||
glog.V(2).Infof("Received request to expand volume %s", volumeName)
|
||||
volumeID, err := getVolumeID(spec.PersistentVolume, volumeName)
|
||||
|
||||
// Fetch the volume for expansion.
|
||||
volumeID := dstrings.TrimPrefix(volumeName, volPrefix)
|
||||
if err != nil {
|
||||
return oldSize, fmt.Errorf("failed to get volumeID for volume %s: %v", volumeName, err)
|
||||
}
|
||||
|
||||
//Get details of SC.
|
||||
//Get details of StorageClass.
|
||||
class, err := volutil.GetClassForVolume(plugin.host.GetKubeClient(), spec.PersistentVolume)
|
||||
if err != nil {
|
||||
return oldSize, err
|
||||
@ -1066,30 +1115,44 @@ func (plugin *glusterfsPlugin) ExpandVolumeDevice(spec *volume.Spec, newSize res
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Expanding volume %q with configuration %+v", volumeID, cfg)
|
||||
glog.V(4).Infof("expanding volume: %q with configuration: %+v", volumeID, cfg)
|
||||
|
||||
//Create REST server connection
|
||||
cli := gcli.NewClient(cfg.url, cfg.user, cfg.secretValue)
|
||||
if cli == nil {
|
||||
glog.Errorf("failed to create glusterfs rest client")
|
||||
return oldSize, fmt.Errorf("failed to create glusterfs rest client, REST server authentication failed")
|
||||
glog.Errorf("failed to create glusterfs REST client")
|
||||
return oldSize, fmt.Errorf("failed to create glusterfs REST client, REST server authentication failed")
|
||||
}
|
||||
|
||||
// Find out delta size
|
||||
expansionSize := (newSize.Value() - oldSize.Value())
|
||||
expansionSizeGB := int(volume.RoundUpSize(expansionSize, 1000*1000*1000))
|
||||
expansionSizeGiB := int(volutil.RoundUpSize(expansionSize, volutil.GIB))
|
||||
|
||||
// Find out requested Size
|
||||
requestGiB := volutil.RoundUpToGiB(newSize)
|
||||
|
||||
//Check the existing volume size
|
||||
currentVolumeInfo, err := cli.VolumeInfo(volumeID)
|
||||
if err != nil {
|
||||
glog.Errorf("error when fetching details of volume %s: %v", volumeName, err)
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
if int64(currentVolumeInfo.Size) >= requestGiB {
|
||||
return newSize, nil
|
||||
}
|
||||
|
||||
// Make volume expansion request
|
||||
volumeExpandReq := &gapi.VolumeExpandRequest{Size: expansionSizeGB}
|
||||
volumeExpandReq := &gapi.VolumeExpandRequest{Size: expansionSizeGiB}
|
||||
|
||||
// Expand the volume
|
||||
volumeInfoRes, err := cli.VolumeExpand(volumeID, volumeExpandReq)
|
||||
if err != nil {
|
||||
glog.Errorf("error when expanding the volume :%v", err)
|
||||
glog.Errorf("failed to expand volume %s: %v", volumeName, err)
|
||||
return oldSize, err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("volume %s expanded to new size %d successfully", volumeName, volumeInfoRes.Size)
|
||||
newVolumeSize := resource.MustParse(fmt.Sprintf("%dG", volumeInfoRes.Size))
|
||||
newVolumeSize := resource.MustParse(fmt.Sprintf("%dGi", volumeInfoRes.Size))
|
||||
return newVolumeSize, nil
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs_util.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/glusterfs_util.go
generated
vendored
@ -27,24 +27,24 @@ import (
|
||||
// readGlusterLog will take the last 2 lines of the log file
|
||||
// on failure of gluster SetUp and return those so kubelet can
|
||||
// properly expose them
|
||||
// return nil on any failure
|
||||
// return error on any failure
|
||||
func readGlusterLog(path string, podName string) error {
|
||||
|
||||
var line1 string
|
||||
var line2 string
|
||||
linecount := 0
|
||||
|
||||
glog.Infof("glusterfs: failure, now attempting to read the gluster log for pod %s", podName)
|
||||
glog.Infof("failure, now attempting to read the gluster log for pod %s", podName)
|
||||
|
||||
// Check and make sure path exists
|
||||
if len(path) == 0 {
|
||||
return fmt.Errorf("glusterfs: log file does not exist for pod: %s", podName)
|
||||
return fmt.Errorf("log file does not exist for pod %s", podName)
|
||||
}
|
||||
|
||||
// open the log file
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return fmt.Errorf("glusterfs: could not open log file for pod: %s", podName)
|
||||
return fmt.Errorf("could not open log file for pod %s", podName)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
|
Reference in New Issue
Block a user