vendor files

This commit is contained in:
Serguei Bezverkhi
2018-01-09 13:57:14 -05:00
parent 558bc6c02a
commit 7b24313bd6
16547 changed files with 4527373 additions and 0 deletions

73
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/BUILD generated vendored Normal file
View File

@ -0,0 +1,73 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"doc.go",
"glusterfs.go",
"glusterfs_minmax.go",
"glusterfs_util.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/glusterfs",
deps = [
"//pkg/apis/core/v1/helper:go_default_library",
"//pkg/util/mount:go_default_library",
"//pkg/util/strings:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/util/volumehelper:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/heketi/heketi/client/api/go-client:go_default_library",
"//vendor/github.com/heketi/heketi/pkg/glusterfs/api:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/api/resource:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/labels:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"glusterfs_minmax_test.go",
"glusterfs_test.go",
],
importpath = "k8s.io/kubernetes/pkg/volume/glusterfs",
library = ":go_default_library",
deps = [
"//pkg/util/mount:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/testing:go_default_library",
"//vendor/github.com/heketi/heketi/pkg/glusterfs/api:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/runtime:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/client-go/kubernetes/fake:go_default_library",
"//vendor/k8s.io/client-go/testing:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

12
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/OWNERS generated vendored Normal file
View File

@ -0,0 +1,12 @@
approvers:
- rootfs
- saad-ali
- jingxu97
- humblec
reviewers:
- saad-ali
- jsafrane
- rootfs
- humblec
- jingxu97
- msau42

19
vendor/k8s.io/kubernetes/pkg/volume/glusterfs/doc.go generated vendored Normal file
View File

@ -0,0 +1,19 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package glusterfs contains the internal representation of glusterfs
// volumes.
package glusterfs // import "k8s.io/kubernetes/pkg/volume/glusterfs"

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,193 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
//
// This implementation is space-efficient for a sparse
// allocation over a big range. Could be optimized
// for high absolute allocation number with a bitmap.
//
package glusterfs
import (
"errors"
"sync"
)
var (
//ErrConflict returned when value is already in use.
ErrConflict = errors.New("number already allocated")
//ErrInvalidRange returned invalid range, for eg# min > max
ErrInvalidRange = errors.New("invalid range")
//ErrOutOfRange returned when value is not in pool range.
ErrOutOfRange = errors.New("out of range")
//ErrRangeFull returned when no more free values in the pool.
ErrRangeFull = errors.New("range full")
//ErrInternal returned when no free item found, but a.free != 0.
ErrInternal = errors.New("internal error")
)
//MinMaxAllocator defines allocator struct.
type MinMaxAllocator struct {
lock sync.Mutex
min int
max int
free int
used map[int]bool
}
var _ Rangeable = &MinMaxAllocator{}
// Rangeable is an Interface that can adjust its min/max range.
// Rangeable should be threadsafe
type Rangeable interface {
Allocate(int) (bool, error)
AllocateNext() (int, bool, error)
Release(int) error
Has(int) bool
Free() int
SetRange(min, max int) error
}
// NewMinMaxAllocator return a new allocator or error based on provided min/max value.
func NewMinMaxAllocator(min, max int) (*MinMaxAllocator, error) {
if min > max {
return nil, ErrInvalidRange
}
return &MinMaxAllocator{
min: min,
max: max,
free: 1 + max - min,
used: map[int]bool{},
}, nil
}
//SetRange defines the range/pool with provided min and max values.
func (a *MinMaxAllocator) SetRange(min, max int) error {
if min > max {
return ErrInvalidRange
}
a.lock.Lock()
defer a.lock.Unlock()
// Check if we need to change
if a.min == min && a.max == max {
return nil
}
a.min = min
a.max = max
// Recompute how many free we have in the range
numUsed := 0
for i := range a.used {
if a.inRange(i) {
numUsed++
}
}
a.free = 1 + max - min - numUsed
return nil
}
//Allocate allocates provided value in the allocator and mark it as used.
func (a *MinMaxAllocator) Allocate(i int) (bool, error) {
a.lock.Lock()
defer a.lock.Unlock()
if !a.inRange(i) {
return false, ErrOutOfRange
}
if a.has(i) {
return false, ErrConflict
}
a.used[i] = true
a.free--
return true, nil
}
//AllocateNext allocates next value from the allocator.
func (a *MinMaxAllocator) AllocateNext() (int, bool, error) {
a.lock.Lock()
defer a.lock.Unlock()
// Fast check if we're out of items
if a.free <= 0 {
return 0, false, ErrRangeFull
}
// Scan from the minimum until we find a free item
for i := a.min; i <= a.max; i++ {
if !a.has(i) {
a.used[i] = true
a.free--
return i, true, nil
}
}
// no free item found, but a.free != 0
return 0, false, ErrInternal
}
//Release free/delete provided value from the allocator.
func (a *MinMaxAllocator) Release(i int) error {
a.lock.Lock()
defer a.lock.Unlock()
if !a.has(i) {
return nil
}
delete(a.used, i)
if a.inRange(i) {
a.free++
}
return nil
}
func (a *MinMaxAllocator) has(i int) bool {
_, ok := a.used[i]
return ok
}
//Has check whether the provided value is used in the allocator
func (a *MinMaxAllocator) Has(i int) bool {
a.lock.Lock()
defer a.lock.Unlock()
return a.has(i)
}
//Free returns the number of free values in the allocator.
func (a *MinMaxAllocator) Free() int {
a.lock.Lock()
defer a.lock.Unlock()
return a.free
}
func (a *MinMaxAllocator) inRange(i int) bool {
return a.min <= i && i <= a.max
}

View File

@ -0,0 +1,226 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package glusterfs
import (
"testing"
)
func TestNewFree(t *testing.T) {
min := 1
max := 10
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if f := m.Free(); f != (max - min + 1) {
t.Errorf("expect to get %d free, but got %d", (max - min + 1), f)
}
}
func TestNewInvalidRange(t *testing.T) {
if _, err := NewMinMaxAllocator(10, 1); err != ErrInvalidRange {
t.Errorf("expect to get Error '%v', got '%v'", ErrInvalidRange, err)
}
}
func TestSetRange(t *testing.T) {
min := 1
max := 10
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if err = m.SetRange(10, 1); err != ErrInvalidRange {
t.Errorf("expected to get error '%v', got '%v'", ErrInvalidRange, err)
}
if err = m.SetRange(1, 2); err != nil {
t.Errorf("error setting range: '%v'", err)
}
if f := m.Free(); f != 2 {
t.Errorf("expect to get %d free, but got %d", 2, f)
}
if ok, _ := m.Allocate(1); !ok {
t.Errorf("error allocate offset %v", 1)
}
if f := m.Free(); f != 1 {
t.Errorf("expect to get 1 free, but got %d", f)
}
if err = m.SetRange(1, 1); err != nil {
t.Errorf("error setting range: '%v'", err)
}
if f := m.Free(); f != 0 {
t.Errorf("expect to get 0 free, but got %d", f)
}
if err = m.SetRange(2, 2); err != nil {
t.Errorf("error setting range: '%v'", err)
}
if f := m.Free(); f != 1 {
t.Errorf("expect to get 1 free, but got %d", f)
}
}
func TestAllocateNext(t *testing.T) {
min := 1
max := 10
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
el, ok, _ := m.AllocateNext()
if !ok {
t.Fatalf("unexpected error")
}
if !m.Has(el) {
t.Errorf("expect element %v allocated", el)
}
if f := m.Free(); f != (max-min+1)-1 {
t.Errorf("expect to get %d free, but got %d", (max-min+1)-1, f)
}
}
func TestAllocateMax(t *testing.T) {
min := 1
max := 10
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
for i := 1; i <= max; i++ {
if _, ok, _ := m.AllocateNext(); !ok {
t.Fatalf("unexpected error")
}
}
if _, ok, _ := m.AllocateNext(); ok {
t.Errorf("unexpected success")
}
if f := m.Free(); f != 0 {
t.Errorf("expect to get %d free, but got %d", 0, f)
}
}
func TestAllocate(t *testing.T) {
min := 1
max := 10
offset := 3
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if ok, err := m.Allocate(offset); !ok {
t.Errorf("error allocate offset %v: %v", offset, err)
}
if !m.Has(offset) {
t.Errorf("expect element %v allocated", offset)
}
if f := m.Free(); f != (max-min+1)-1 {
t.Errorf("expect to get %d free, but got %d", (max-min+1)-1, f)
}
}
func TestAllocateConflict(t *testing.T) {
min := 1
max := 10
offset := 3
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if ok, err := m.Allocate(offset); !ok {
t.Errorf("error allocate offset %v: %v", offset, err)
}
ok, err := m.Allocate(offset)
if ok {
t.Errorf("unexpected success")
}
if err != ErrConflict {
t.Errorf("expected error '%v', got '%v'", ErrConflict, err)
}
}
func TestAllocateOutOfRange(t *testing.T) {
min := 1
max := 10
offset := 11
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
ok, err := m.Allocate(offset)
if ok {
t.Errorf("unexpected success")
}
if err != ErrOutOfRange {
t.Errorf("expected error '%v', got '%v'", ErrOutOfRange, err)
}
}
func TestRelease(t *testing.T) {
min := 1
max := 10
offset := 3
m, err := NewMinMaxAllocator(min, max)
if err != nil {
t.Errorf("error creating new allocator: '%v'", err)
}
if ok, err := m.Allocate(offset); !ok {
t.Errorf("error allocate offset %v: %v", offset, err)
}
if !m.Has(offset) {
t.Errorf("expect offset %v allocated", offset)
}
if err = m.Release(offset); err != nil {
t.Errorf("unexpected error: %v", err)
}
if m.Has(offset) {
t.Errorf("expect offset %v not allocated", offset)
}
}

View File

@ -0,0 +1,567 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package glusterfs
import (
"fmt"
"os"
"reflect"
"testing"
gapi "github.com/heketi/heketi/pkg/glusterfs/api"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes/fake"
core "k8s.io/client-go/testing"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume"
volumetest "k8s.io/kubernetes/pkg/volume/testing"
)
func TestCanSupport(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if plug.GetPluginName() != "kubernetes.io/glusterfs" {
t.Errorf("Wrong name: %s", plug.GetPluginName())
}
if plug.CanSupport(&volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{PersistentVolumeSource: v1.PersistentVolumeSource{}}}}) {
t.Errorf("Expected false")
}
if plug.CanSupport(&volume.Spec{Volume: &v1.Volume{VolumeSource: v1.VolumeSource{}}}) {
t.Errorf("Expected false")
}
}
func TestGetAccessModes(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPersistentPluginByName("kubernetes.io/glusterfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
if !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteOnce) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadOnlyMany) || !volumetest.ContainsAccessMode(plug.GetAccessModes(), v1.ReadWriteMany) {
t.Errorf("Expected three AccessModeTypes: %s, %s, and %s", v1.ReadWriteOnce, v1.ReadOnlyMany, v1.ReadWriteMany)
}
}
func doTestPlugin(t *testing.T, spec *volume.Spec) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, nil, nil))
plug, err := plugMgr.FindPluginByName("kubernetes.io/glusterfs")
if err != nil {
t.Errorf("Can't find the plugin by name")
}
ep := &v1.Endpoints{ObjectMeta: metav1.ObjectMeta{Name: "foo"}, Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}}}}}
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{UID: types.UID("poduid")}}
mounter, err := plug.(*glusterfsPlugin).newMounterInternal(spec, ep, pod, &mount.FakeMounter{})
volumePath := mounter.GetPath()
if err != nil {
t.Errorf("Failed to make a new Mounter: %v", err)
}
if mounter == nil {
t.Error("Got a nil Mounter")
}
expectedPath := fmt.Sprintf("%s/pods/poduid/volumes/kubernetes.io~glusterfs/vol1", tmpDir)
if volumePath != expectedPath {
t.Errorf("Unexpected path, expected %q, got: %q", expectedPath, volumePath)
}
if err := mounter.SetUp(nil); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volumePath); err != nil {
if os.IsNotExist(err) {
t.Errorf("SetUp() failed, volume path not created: %s", volumePath)
} else {
t.Errorf("SetUp() failed: %v", err)
}
}
unmounter, err := plug.(*glusterfsPlugin).newUnmounterInternal("vol1", types.UID("poduid"), &mount.FakeMounter{})
if err != nil {
t.Errorf("Failed to make a new Unmounter: %v", err)
}
if unmounter == nil {
t.Error("Got a nil Unmounter")
}
if err := unmounter.TearDown(); err != nil {
t.Errorf("Expected success, got: %v", err)
}
if _, err := os.Stat(volumePath); err == nil {
t.Errorf("TearDown() failed, volume path still exists: %s", volumePath)
} else if !os.IsNotExist(err) {
t.Errorf("TearDown() failed: %v", err)
}
}
func TestPluginVolume(t *testing.T) {
vol := &v1.Volume{
Name: "vol1",
VolumeSource: v1.VolumeSource{Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false}},
}
doTestPlugin(t, volume.NewSpecFromVolume(vol))
}
func TestPluginPersistentVolume(t *testing.T) {
vol := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "vol1",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
},
},
}
doTestPlugin(t, volume.NewSpecFromPersistentVolume(vol, false))
}
func TestPersistentClaimReadOnlyFlag(t *testing.T) {
tmpDir, err := utiltesting.MkTmpdir("glusterfs_test")
if err != nil {
t.Fatalf("error creating temp dir: %v", err)
}
defer os.RemoveAll(tmpDir)
pv := &v1.PersistentVolume{
ObjectMeta: metav1.ObjectMeta{
Name: "pvA",
},
Spec: v1.PersistentVolumeSpec{
PersistentVolumeSource: v1.PersistentVolumeSource{
Glusterfs: &v1.GlusterfsVolumeSource{EndpointsName: "ep", Path: "vol", ReadOnly: false},
},
ClaimRef: &v1.ObjectReference{
Name: "claimA",
},
},
}
claim := &v1.PersistentVolumeClaim{
ObjectMeta: metav1.ObjectMeta{
Name: "claimA",
Namespace: "nsA",
},
Spec: v1.PersistentVolumeClaimSpec{
VolumeName: "pvA",
},
Status: v1.PersistentVolumeClaimStatus{
Phase: v1.ClaimBound,
},
}
ep := &v1.Endpoints{
ObjectMeta: metav1.ObjectMeta{
Namespace: "nsA",
Name: "ep",
},
Subsets: []v1.EndpointSubset{{
Addresses: []v1.EndpointAddress{{IP: "127.0.0.1"}},
Ports: []v1.EndpointPort{{Name: "foo", Port: 80, Protocol: v1.ProtocolTCP}},
}},
}
client := fake.NewSimpleClientset(pv, claim, ep)
plugMgr := volume.VolumePluginMgr{}
plugMgr.InitPlugins(ProbeVolumePlugins(), nil /* prober */, volumetest.NewFakeVolumeHost(tmpDir, client, nil))
plug, _ := plugMgr.FindPluginByName(glusterfsPluginName)
// readOnly bool is supplied by persistent-claim volume source when its mounter creates other volumes
spec := volume.NewSpecFromPersistentVolume(pv, true)
pod := &v1.Pod{ObjectMeta: metav1.ObjectMeta{Namespace: "nsA", UID: types.UID("poduid")}}
mounter, _ := plug.NewMounter(spec, pod, volume.VolumeOptions{})
if !mounter.GetAttributes().ReadOnly {
t.Errorf("Expected true for mounter.IsReadOnly")
}
}
func TestParseClassParameters(t *testing.T) {
secret := v1.Secret{
Type: "kubernetes.io/glusterfs",
Data: map[string][]byte{
"data": []byte("mypassword"),
},
}
tests := []struct {
name string
parameters map[string]string
secret *v1.Secret
expectError bool
expectConfig *provisionerConfig
}{
{
"password",
map[string]string{
"resturl": "https://localhost:8080",
"restuser": "admin",
"restuserkey": "password",
},
nil, // secret
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
user: "admin",
userKey: "password",
secretValue: "password",
gidMin: 2000,
gidMax: 2147483647,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
},
},
{
"secret",
map[string]string{
"resturl": "https://localhost:8080",
"restuser": "admin",
"secretname": "mysecret",
"secretnamespace": "default",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
user: "admin",
secretName: "mysecret",
secretNamespace: "default",
secretValue: "mypassword",
gidMin: 2000,
gidMax: 2147483647,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
},
},
{
"no authentication",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 2000,
gidMax: 2147483647,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
},
},
{
"missing secret",
map[string]string{
"resturl": "https://localhost:8080",
"secretname": "mysecret",
"secretnamespace": "default",
},
nil, // secret
true, // expect error
nil,
},
{
"secret with no namespace",
map[string]string{
"resturl": "https://localhost:8080",
"secretname": "mysecret",
},
&secret,
true, // expect error
nil,
},
{
"missing url",
map[string]string{
"restuser": "admin",
"restuserkey": "password",
},
nil, // secret
true, // expect error
nil,
},
{
"unknown parameter",
map[string]string{
"unknown": "yes",
"resturl": "https://localhost:8080",
"restuser": "admin",
"restuserkey": "password",
},
nil, // secret
true, // expect error
nil,
},
{
"invalid gidMin #1",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "0",
},
&secret,
true, // expect error
nil,
},
{
"invalid gidMin #2",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "1999",
},
&secret,
true, // expect error
nil,
},
{
"invalid gidMin #3",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "1999",
},
&secret,
true, // expect error
nil,
},
{
"invalid gidMax #1",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMax": "0",
},
&secret,
true, // expect error
nil,
},
{
"invalid gidMax #2",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMax": "1999",
},
&secret,
true, // expect error
nil,
},
{
"invalid gidMax #3",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMax": "1999",
},
&secret,
true, // expect error
nil,
},
{
"invalid gidMin:gidMax",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "5001",
"gidMax": "5000",
},
&secret,
true, // expect error
nil,
},
{
"valid gidMin",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 2147483647,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
},
},
{
"valid gidMax",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMax": "5000",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 2000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
},
},
{
"valid gidMin:gidMax",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 3}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
},
},
{
"valid volumetype: replicate",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
"volumetype": "replicate:4",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "replicate", Replicate: gapi.ReplicaDurability{Replica: 4}, Disperse: gapi.DisperseDurability{Data: 0, Redundancy: 0}},
},
},
{
"valid volumetype: disperse",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"gidMin": "4000",
"gidMax": "5000",
"volumetype": "disperse:4:2",
},
&secret,
false, // expect error
&provisionerConfig{
url: "https://localhost:8080",
gidMin: 4000,
gidMax: 5000,
volumeType: gapi.VolumeDurabilityInfo{Type: "disperse", Replicate: gapi.ReplicaDurability{Replica: 0}, Disperse: gapi.DisperseDurability{Data: 4, Redundancy: 2}},
},
},
{
"invalid volumetype (disperse) parameter",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"volumetype": "disperse:4:asd",
},
&secret,
true, // expect error
nil,
},
{
"invalid volumetype (replicate) parameter",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"volumetype": "replicate:asd",
},
&secret,
true, // expect error
nil,
},
{
"invalid volumetype: unknown volumetype",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"volumetype": "dispersereplicate:4:2",
},
&secret,
true, // expect error
nil,
},
{
"invalid volumetype : negative value",
map[string]string{
"resturl": "https://localhost:8080",
"restauthenabled": "false",
"volumetype": "replicate:-1000",
},
&secret,
true, // expect error
nil,
},
}
for _, test := range tests {
client := &fake.Clientset{}
client.AddReactor("get", "secrets", func(action core.Action) (handled bool, ret runtime.Object, err error) {
if test.secret != nil {
return true, test.secret, nil
}
return true, nil, fmt.Errorf("Test %s did not set a secret", test.name)
})
cfg, err := parseClassParameters(test.parameters, client)
if err != nil && !test.expectError {
t.Errorf("Test %s got unexpected error %v", test.name, err)
}
if err == nil && test.expectError {
t.Errorf("test %s expected error and got none", test.name)
}
if test.expectConfig != nil {
if !reflect.DeepEqual(cfg, test.expectConfig) {
t.Errorf("Test %s returned unexpected data, expected: %+v, got: %+v", test.name, test.expectConfig, cfg)
}
}
}
}

View File

@ -0,0 +1,71 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package glusterfs
import (
"bufio"
"fmt"
"os"
"github.com/golang/glog"
)
// readGlusterLog will take the last 2 lines of the log file
// on failure of gluster SetUp and return those so kubelet can
// properly expose them
// return nil on any failure
func readGlusterLog(path string, podName string) error {
var line1 string
var line2 string
linecount := 0
glog.Infof("glusterfs: failure, now attempting to read the gluster log for pod %s", podName)
// Check and make sure path exists
if len(path) == 0 {
return fmt.Errorf("glusterfs: log file does not exist for pod: %s", podName)
}
// open the log file
file, err := os.Open(path)
if err != nil {
return fmt.Errorf("glusterfs: could not open log file for pod: %s", podName)
}
defer file.Close()
// read in and scan the file using scanner
// from stdlib
fscan := bufio.NewScanner(file)
// rather than guessing on bytes or using Seek
// going to scan entire file and take the last two lines
// generally the file should be small since it is pod specific
for fscan.Scan() {
if linecount > 0 {
line1 = line2
}
line2 = "\n" + fscan.Text()
linecount++
}
if linecount > 0 {
return fmt.Errorf("%v", line1+line2+"\n")
}
return nil
}