vendor cleanup: remove unused,non-go and test files

This commit is contained in:
Madhu Rajanna
2019-01-16 00:05:52 +05:30
parent 52cf4aa902
commit b10ba188e7
15421 changed files with 17 additions and 4208853 deletions

View File

@ -1,207 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"convert.go",
"doc.go",
"docker_checkpoint.go",
"docker_container.go",
"docker_image.go",
"docker_legacy_service.go",
"docker_logs.go",
"docker_sandbox.go",
"docker_service.go",
"docker_streaming.go",
"exec.go",
"helpers.go",
"naming.go",
"security_context.go",
"selinux_util.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"docker_image_unsupported.go",
"docker_sandbox_others.go",
"docker_stats_unsupported.go",
"helpers_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"docker_image_unsupported.go",
"docker_sandbox_others.go",
"docker_stats_unsupported.go",
"helpers_unsupported.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"docker_image_unsupported.go",
"docker_sandbox_others.go",
"docker_stats_unsupported.go",
"helpers_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"docker_image_unsupported.go",
"docker_sandbox_others.go",
"docker_stats_unsupported.go",
"helpers_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"docker_image_linux.go",
"docker_sandbox_others.go",
"docker_stats_linux.go",
"helpers_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"docker_image_unsupported.go",
"docker_sandbox_others.go",
"docker_stats_unsupported.go",
"helpers_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"docker_image_unsupported.go",
"docker_sandbox_others.go",
"docker_stats_unsupported.go",
"helpers_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"docker_image_unsupported.go",
"docker_sandbox_others.go",
"docker_stats_unsupported.go",
"helpers_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"docker_image_unsupported.go",
"docker_sandbox_others.go",
"docker_stats_unsupported.go",
"helpers_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"docker_image_unsupported.go",
"docker_sandbox_others.go",
"docker_stats_unsupported.go",
"helpers_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"docker_image_windows.go",
"docker_sandbox_windows.go",
"docker_stats_windows.go",
"helpers_windows.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim",
visibility = ["//visibility:public"],
deps = [
"//pkg/credentialprovider:go_default_library",
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/checkpointmanager:go_default_library",
"//pkg/kubelet/checkpointmanager/checksum:go_default_library",
"//pkg/kubelet/checkpointmanager/errors:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockershim/cm:go_default_library",
"//pkg/kubelet/dockershim/libdocker:go_default_library",
"//pkg/kubelet/dockershim/metrics:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
"//pkg/kubelet/dockershim/network/cni:go_default_library",
"//pkg/kubelet/dockershim/network/hostport:go_default_library",
"//pkg/kubelet/dockershim/network/kubenet:go_default_library",
"//pkg/kubelet/kuberuntime:go_default_library",
"//pkg/kubelet/leaky:go_default_library",
"//pkg/kubelet/qos:go_default_library",
"//pkg/kubelet/server/streaming:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/kubelet/util/cache:go_default_library",
"//pkg/kubelet/util/ioutils:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//pkg/util/parsers:go_default_library",
"//vendor/github.com/armon/circbuf:go_default_library",
"//vendor/github.com/blang/semver:go_default_library",
"//vendor/github.com/docker/docker/api/types:go_default_library",
"//vendor/github.com/docker/docker/api/types/container:go_default_library",
"//vendor/github.com/docker/docker/api/types/filters:go_default_library",
"//vendor/github.com/docker/docker/api/types/strslice:go_default_library",
"//vendor/github.com/docker/docker/pkg/jsonmessage:go_default_library",
"//vendor/github.com/docker/go-connections/nat:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/client-go/tools/remotecommand:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:windows": [
"//pkg/kubelet/apis:go_default_library",
"//pkg/kubelet/winstats:go_default_library",
],
"//conditions:default": [],
}),
)
go_test(
name = "go_default_test",
srcs = [
"convert_test.go",
"docker_checkpoint_test.go",
"docker_container_test.go",
"docker_image_test.go",
"docker_sandbox_test.go",
"docker_service_test.go",
"helpers_test.go",
"naming_test.go",
"security_context_test.go",
"selinux_util_test.go",
] + select({
"@io_bazel_rules_go//go/platform:linux": [
"helpers_linux_test.go",
],
"//conditions:default": [],
}),
embed = [":go_default_library"],
deps = [
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
"//pkg/kubelet/checkpointmanager:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/container/testing:go_default_library",
"//pkg/kubelet/dockershim/libdocker:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
"//pkg/kubelet/dockershim/network/testing:go_default_library",
"//pkg/kubelet/types:go_default_library",
"//pkg/kubelet/util/cache:go_default_library",
"//pkg/security/apparmor:go_default_library",
"//vendor/github.com/blang/semver:go_default_library",
"//vendor/github.com/docker/docker/api/types:go_default_library",
"//vendor/github.com/docker/docker/api/types/container:go_default_library",
"//vendor/github.com/docker/docker/pkg/jsonmessage:go_default_library",
"//vendor/github.com/docker/go-connections/nat:go_default_library",
"//vendor/github.com/golang/mock/gomock:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/github.com/stretchr/testify/require:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:linux": [
"//vendor/k8s.io/api/core/v1:go_default_library",
],
"//conditions:default": [],
}),
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/kubelet/dockershim/cm:all-srcs",
"//pkg/kubelet/dockershim/libdocker:all-srcs",
"//pkg/kubelet/dockershim/metrics:all-srcs",
"//pkg/kubelet/dockershim/network:all-srcs",
"//pkg/kubelet/dockershim/remote:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,105 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = [
"container_manager.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"container_manager_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"container_manager_unsupported.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"container_manager_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"container_manager_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"container_manager_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"container_manager_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"container_manager_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"container_manager_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"container_manager_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"container_manager_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"container_manager_windows.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/cm",
deps = select({
"@io_bazel_rules_go//go/platform:android": [
"//pkg/kubelet/dockershim/libdocker:go_default_library",
],
"@io_bazel_rules_go//go/platform:darwin": [
"//pkg/kubelet/dockershim/libdocker:go_default_library",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"//pkg/kubelet/dockershim/libdocker:go_default_library",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"//pkg/kubelet/dockershim/libdocker:go_default_library",
],
"@io_bazel_rules_go//go/platform:linux": [
"//pkg/kubelet/cm:go_default_library",
"//pkg/kubelet/dockershim/libdocker:go_default_library",
"//pkg/kubelet/qos:go_default_library",
"//pkg/util/version:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/opencontainers/runc/libcontainer/cgroups/fs:go_default_library",
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
],
"@io_bazel_rules_go//go/platform:nacl": [
"//pkg/kubelet/dockershim/libdocker:go_default_library",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"//pkg/kubelet/dockershim/libdocker:go_default_library",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"//pkg/kubelet/dockershim/libdocker:go_default_library",
],
"@io_bazel_rules_go//go/platform:plan9": [
"//pkg/kubelet/dockershim/libdocker:go_default_library",
],
"@io_bazel_rules_go//go/platform:solaris": [
"//pkg/kubelet/dockershim/libdocker:go_default_library",
],
"@io_bazel_rules_go//go/platform:windows": [
"//pkg/kubelet/dockershim/libdocker:go_default_library",
],
"//conditions:default": [],
}),
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,21 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
type ContainerManager interface {
Start() error
}

View File

@ -1,148 +0,0 @@
// +build linux
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"fmt"
"io/ioutil"
"regexp"
"strconv"
"time"
"github.com/golang/glog"
"github.com/opencontainers/runc/libcontainer/cgroups/fs"
"github.com/opencontainers/runc/libcontainer/configs"
"k8s.io/apimachinery/pkg/util/wait"
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
"k8s.io/kubernetes/pkg/kubelet/qos"
utilversion "k8s.io/kubernetes/pkg/util/version"
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
)
const (
// The percent of the machine memory capacity.
dockerMemoryLimitThresholdPercent = kubecm.DockerMemoryLimitThresholdPercent
// The minimum memory limit allocated to docker container.
minDockerMemoryLimit = kubecm.MinDockerMemoryLimit
// The Docker OOM score adjustment.
dockerOOMScoreAdj = qos.DockerOOMScoreAdj
)
var (
memoryCapacityRegexp = regexp.MustCompile(`MemTotal:\s*([0-9]+) kB`)
)
func NewContainerManager(cgroupsName string, client libdocker.Interface) ContainerManager {
return &containerManager{
cgroupsName: cgroupsName,
client: client,
}
}
type containerManager struct {
// Docker client.
client libdocker.Interface
// Name of the cgroups.
cgroupsName string
// Manager for the cgroups.
cgroupsManager *fs.Manager
}
func (m *containerManager) Start() error {
// TODO: check if the required cgroups are mounted.
if len(m.cgroupsName) != 0 {
manager, err := createCgroupManager(m.cgroupsName)
if err != nil {
return err
}
m.cgroupsManager = manager
}
go wait.Until(m.doWork, 5*time.Minute, wait.NeverStop)
return nil
}
func (m *containerManager) doWork() {
v, err := m.client.Version()
if err != nil {
glog.Errorf("Unable to get docker version: %v", err)
return
}
version, err := utilversion.ParseGeneric(v.APIVersion)
if err != nil {
glog.Errorf("Unable to parse docker version %q: %v", v.APIVersion, err)
return
}
// EnsureDockerInContainer does two things.
// 1. Ensure processes run in the cgroups if m.cgroupsManager is not nil.
// 2. Ensure processes have the OOM score applied.
if err := kubecm.EnsureDockerInContainer(version, dockerOOMScoreAdj, m.cgroupsManager); err != nil {
glog.Errorf("Unable to ensure the docker processes run in the desired containers: %v", err)
}
}
func createCgroupManager(name string) (*fs.Manager, error) {
var memoryLimit uint64
memoryCapacity, err := getMemoryCapacity()
if err != nil || memoryCapacity*dockerMemoryLimitThresholdPercent/100 < minDockerMemoryLimit {
memoryLimit = minDockerMemoryLimit
}
glog.V(2).Infof("Configure resource-only container %q with memory limit: %d", name, memoryLimit)
allowAllDevices := true
cm := &fs.Manager{
Cgroups: &configs.Cgroup{
Parent: "/",
Name: name,
Resources: &configs.Resources{
Memory: int64(memoryLimit),
MemorySwap: -1,
AllowAllDevices: &allowAllDevices,
},
},
}
return cm, nil
}
// getMemoryCapacity returns the memory capacity on the machine in bytes.
func getMemoryCapacity() (uint64, error) {
out, err := ioutil.ReadFile("/proc/meminfo")
if err != nil {
return 0, err
}
return parseCapacity(out, memoryCapacityRegexp)
}
// parseCapacity matches a Regexp in a []byte, returning the resulting value in bytes.
// Assumes that the value matched by the Regexp is in KB.
func parseCapacity(b []byte, r *regexp.Regexp) (uint64, error) {
matches := r.FindSubmatch(b)
if len(matches) != 2 {
return 0, fmt.Errorf("failed to match regexp in output: %q", string(b))
}
m, err := strconv.ParseUint(string(matches[1]), 10, 64)
if err != nil {
return 0, err
}
// Convert to bytes.
return m * 1024, err
}

View File

@ -1,36 +0,0 @@
// +build !linux,!windows
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"fmt"
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
)
type unsupportedContainerManager struct {
}
func NewContainerManager(_ string, _ libdocker.Interface) ContainerManager {
return &unsupportedContainerManager{}
}
func (m *unsupportedContainerManager) Start() error {
return fmt.Errorf("Container Manager is unsupported in this build")
}

View File

@ -1,35 +0,0 @@
// +build windows
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cm
import (
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
)
// no-op
type containerManager struct {
}
func NewContainerManager(_ string, _ libdocker.Interface) ContainerManager {
return &containerManager{}
}
func (m *containerManager) Start() error {
return nil
}

View File

@ -1,178 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"fmt"
"strings"
"time"
dockertypes "github.com/docker/docker/api/types"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
)
// This file contains helper functions to convert docker API types to runtime
// API types, or vice versa.
func imageToRuntimeAPIImage(image *dockertypes.ImageSummary) (*runtimeapi.Image, error) {
if image == nil {
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime API image")
}
size := uint64(image.VirtualSize)
return &runtimeapi.Image{
Id: image.ID,
RepoTags: image.RepoTags,
RepoDigests: image.RepoDigests,
Size_: size,
}, nil
}
func imageInspectToRuntimeAPIImage(image *dockertypes.ImageInspect) (*runtimeapi.Image, error) {
if image == nil || image.Config == nil {
return nil, fmt.Errorf("unable to convert a nil pointer to a runtime API image")
}
size := uint64(image.VirtualSize)
runtimeImage := &runtimeapi.Image{
Id: image.ID,
RepoTags: image.RepoTags,
RepoDigests: image.RepoDigests,
Size_: size,
}
uid, username := getUserFromImageUser(image.Config.User)
if uid != nil {
runtimeImage.Uid = &runtimeapi.Int64Value{Value: *uid}
}
runtimeImage.Username = username
return runtimeImage, nil
}
func toPullableImageID(id string, image *dockertypes.ImageInspect) string {
// Default to the image ID, but if RepoDigests is not empty, use
// the first digest instead.
imageID := DockerImageIDPrefix + id
if len(image.RepoDigests) > 0 {
imageID = DockerPullableImageIDPrefix + image.RepoDigests[0]
}
return imageID
}
func toRuntimeAPIContainer(c *dockertypes.Container) (*runtimeapi.Container, error) {
state := toRuntimeAPIContainerState(c.Status)
if len(c.Names) == 0 {
return nil, fmt.Errorf("unexpected empty container name: %+v", c)
}
metadata, err := parseContainerName(c.Names[0])
if err != nil {
return nil, err
}
labels, annotations := extractLabels(c.Labels)
sandboxID := c.Labels[sandboxIDLabelKey]
// The timestamp in dockertypes.Container is in seconds.
createdAt := c.Created * int64(time.Second)
return &runtimeapi.Container{
Id: c.ID,
PodSandboxId: sandboxID,
Metadata: metadata,
Image: &runtimeapi.ImageSpec{Image: c.Image},
ImageRef: c.ImageID,
State: state,
CreatedAt: createdAt,
Labels: labels,
Annotations: annotations,
}, nil
}
func toDockerContainerStatus(state runtimeapi.ContainerState) string {
switch state {
case runtimeapi.ContainerState_CONTAINER_CREATED:
return "created"
case runtimeapi.ContainerState_CONTAINER_RUNNING:
return "running"
case runtimeapi.ContainerState_CONTAINER_EXITED:
return "exited"
case runtimeapi.ContainerState_CONTAINER_UNKNOWN:
fallthrough
default:
return "unknown"
}
}
func toRuntimeAPIContainerState(state string) runtimeapi.ContainerState {
// Parse the state string in dockertypes.Container. This could break when
// we upgrade docker.
switch {
case strings.HasPrefix(state, libdocker.StatusRunningPrefix):
return runtimeapi.ContainerState_CONTAINER_RUNNING
case strings.HasPrefix(state, libdocker.StatusExitedPrefix):
return runtimeapi.ContainerState_CONTAINER_EXITED
case strings.HasPrefix(state, libdocker.StatusCreatedPrefix):
return runtimeapi.ContainerState_CONTAINER_CREATED
default:
return runtimeapi.ContainerState_CONTAINER_UNKNOWN
}
}
func toRuntimeAPISandboxState(state string) runtimeapi.PodSandboxState {
// Parse the state string in dockertypes.Container. This could break when
// we upgrade docker.
switch {
case strings.HasPrefix(state, libdocker.StatusRunningPrefix):
return runtimeapi.PodSandboxState_SANDBOX_READY
default:
return runtimeapi.PodSandboxState_SANDBOX_NOTREADY
}
}
func containerToRuntimeAPISandbox(c *dockertypes.Container) (*runtimeapi.PodSandbox, error) {
state := toRuntimeAPISandboxState(c.Status)
if len(c.Names) == 0 {
return nil, fmt.Errorf("unexpected empty sandbox name: %+v", c)
}
metadata, err := parseSandboxName(c.Names[0])
if err != nil {
return nil, err
}
labels, annotations := extractLabels(c.Labels)
// The timestamp in dockertypes.Container is in seconds.
createdAt := c.Created * int64(time.Second)
return &runtimeapi.PodSandbox{
Id: c.ID,
Metadata: metadata,
State: state,
CreatedAt: createdAt,
Labels: labels,
Annotations: annotations,
}, nil
}
func checkpointToRuntimeAPISandbox(id string, checkpoint DockershimCheckpoint) *runtimeapi.PodSandbox {
state := runtimeapi.PodSandboxState_SANDBOX_NOTREADY
_, name, namespace, _, _ := checkpoint.GetData()
return &runtimeapi.PodSandbox{
Id: id,
Metadata: &runtimeapi.PodSandboxMetadata{
Name: name,
Namespace: namespace,
},
State: state,
}
}

View File

@ -1,71 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"testing"
dockertypes "github.com/docker/docker/api/types"
"github.com/stretchr/testify/assert"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
)
func TestConvertDockerStatusToRuntimeAPIState(t *testing.T) {
testCases := []struct {
input string
expected runtimeapi.ContainerState
}{
{input: "Up 5 hours", expected: runtimeapi.ContainerState_CONTAINER_RUNNING},
{input: "Exited (0) 2 hours ago", expected: runtimeapi.ContainerState_CONTAINER_EXITED},
{input: "Created", expected: runtimeapi.ContainerState_CONTAINER_CREATED},
{input: "Random string", expected: runtimeapi.ContainerState_CONTAINER_UNKNOWN},
}
for _, test := range testCases {
actual := toRuntimeAPIContainerState(test.input)
assert.Equal(t, test.expected, actual)
}
}
func TestConvertToPullableImageID(t *testing.T) {
testCases := []struct {
id string
image *dockertypes.ImageInspect
expected string
}{
{
id: "image-1",
image: &dockertypes.ImageInspect{
RepoDigests: []string{"digest-1"},
},
expected: DockerPullableImageIDPrefix + "digest-1",
},
{
id: "image-2",
image: &dockertypes.ImageInspect{
RepoDigests: []string{},
},
expected: DockerImageIDPrefix + "image-2",
},
}
for _, test := range testCases {
actual := toPullableImageID(test.id, test.image)
assert.Equal(t, test.expected, actual)
}
}

View File

@ -1,18 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Docker integration using pkg/kubelet/apis/cri/runtime/v1alpha2/api.pb.go
package dockershim

View File

@ -1,95 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"encoding/json"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/checksum"
)
const (
// default directory to store pod sandbox checkpoint files
sandboxCheckpointDir = "sandbox"
protocolTCP = Protocol("tcp")
protocolUDP = Protocol("udp")
schemaVersion = "v1"
)
type DockershimCheckpoint interface {
checkpointmanager.Checkpoint
GetData() (string, string, string, []*PortMapping, bool)
}
type Protocol string
// PortMapping is the port mapping configurations of a sandbox.
type PortMapping struct {
// Protocol of the port mapping.
Protocol *Protocol `json:"protocol,omitempty"`
// Port number within the container.
ContainerPort *int32 `json:"container_port,omitempty"`
// Port number on the host.
HostPort *int32 `json:"host_port,omitempty"`
}
// CheckpointData contains all types of data that can be stored in the checkpoint.
type CheckpointData struct {
PortMappings []*PortMapping `json:"port_mappings,omitempty"`
HostNetwork bool `json:"host_network,omitempty"`
}
// PodSandboxCheckpoint is the checkpoint structure for a sandbox
type PodSandboxCheckpoint struct {
// Version of the pod sandbox checkpoint schema.
Version string `json:"version"`
// Pod name of the sandbox. Same as the pod name in the PodSpec.
Name string `json:"name"`
// Pod namespace of the sandbox. Same as the pod namespace in the PodSpec.
Namespace string `json:"namespace"`
// Data to checkpoint for pod sandbox.
Data *CheckpointData `json:"data,omitempty"`
// Checksum is calculated with fnv hash of the checkpoint object with checksum field set to be zero
Checksum checksum.Checksum `json:"checksum"`
}
func NewPodSandboxCheckpoint(namespace, name string, data *CheckpointData) DockershimCheckpoint {
return &PodSandboxCheckpoint{
Version: schemaVersion,
Namespace: namespace,
Name: name,
Data: data,
}
}
func (cp *PodSandboxCheckpoint) MarshalCheckpoint() ([]byte, error) {
cp.Checksum = checksum.New(*cp.Data)
return json.Marshal(*cp)
}
func (cp *PodSandboxCheckpoint) UnmarshalCheckpoint(blob []byte) error {
return json.Unmarshal(blob, cp)
}
func (cp *PodSandboxCheckpoint) VerifyChecksum() error {
return cp.Checksum.Verify(*cp.Data)
}
func (cp *PodSandboxCheckpoint) GetData() (string, string, string, []*PortMapping, bool) {
return cp.Version, cp.Name, cp.Namespace, cp.Data.PortMappings, cp.Data.HostNetwork
}

View File

@ -1,33 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestPodSandboxCheckpoint(t *testing.T) {
data := &CheckpointData{HostNetwork: true}
checkpoint := NewPodSandboxCheckpoint("ns1", "sandbox1", data)
version, name, namespace, _, hostNetwork := checkpoint.GetData()
assert.Equal(t, schemaVersion, version)
assert.Equal(t, "ns1", namespace)
assert.Equal(t, "sandbox1", name)
assert.Equal(t, true, hostNetwork)
}

View File

@ -1,429 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"context"
"fmt"
"os"
"path/filepath"
"time"
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
dockerfilters "github.com/docker/docker/api/types/filters"
dockerstrslice "github.com/docker/docker/api/types/strslice"
"github.com/golang/glog"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
)
// ListContainers lists all containers matching the filter.
func (ds *dockerService) ListContainers(_ context.Context, r *runtimeapi.ListContainersRequest) (*runtimeapi.ListContainersResponse, error) {
filter := r.GetFilter()
opts := dockertypes.ContainerListOptions{All: true}
opts.Filters = dockerfilters.NewArgs()
f := newDockerFilter(&opts.Filters)
// Add filter to get *only* (non-sandbox) containers.
f.AddLabel(containerTypeLabelKey, containerTypeLabelContainer)
if filter != nil {
if filter.Id != "" {
f.Add("id", filter.Id)
}
if filter.State != nil {
f.Add("status", toDockerContainerStatus(filter.GetState().State))
}
if filter.PodSandboxId != "" {
f.AddLabel(sandboxIDLabelKey, filter.PodSandboxId)
}
if filter.LabelSelector != nil {
for k, v := range filter.LabelSelector {
f.AddLabel(k, v)
}
}
}
containers, err := ds.client.ListContainers(opts)
if err != nil {
return nil, err
}
// Convert docker to runtime api containers.
result := []*runtimeapi.Container{}
for i := range containers {
c := containers[i]
converted, err := toRuntimeAPIContainer(&c)
if err != nil {
glog.V(4).Infof("Unable to convert docker to runtime API container: %v", err)
continue
}
result = append(result, converted)
}
return &runtimeapi.ListContainersResponse{Containers: result}, nil
}
// CreateContainer creates a new container in the given PodSandbox
// Docker cannot store the log to an arbitrary location (yet), so we create an
// symlink at LogPath, linking to the actual path of the log.
// TODO: check if the default values returned by the runtime API are ok.
func (ds *dockerService) CreateContainer(_ context.Context, r *runtimeapi.CreateContainerRequest) (*runtimeapi.CreateContainerResponse, error) {
podSandboxID := r.PodSandboxId
config := r.GetConfig()
sandboxConfig := r.GetSandboxConfig()
if config == nil {
return nil, fmt.Errorf("container config is nil")
}
if sandboxConfig == nil {
return nil, fmt.Errorf("sandbox config is nil for container %q", config.Metadata.Name)
}
labels := makeLabels(config.GetLabels(), config.GetAnnotations())
// Apply a the container type label.
labels[containerTypeLabelKey] = containerTypeLabelContainer
// Write the container log path in the labels.
labels[containerLogPathLabelKey] = filepath.Join(sandboxConfig.LogDirectory, config.LogPath)
// Write the sandbox ID in the labels.
labels[sandboxIDLabelKey] = podSandboxID
apiVersion, err := ds.getDockerAPIVersion()
if err != nil {
return nil, fmt.Errorf("unable to get the docker API version: %v", err)
}
image := ""
if iSpec := config.GetImage(); iSpec != nil {
image = iSpec.Image
}
createConfig := dockertypes.ContainerCreateConfig{
Name: makeContainerName(sandboxConfig, config),
Config: &dockercontainer.Config{
// TODO: set User.
Entrypoint: dockerstrslice.StrSlice(config.Command),
Cmd: dockerstrslice.StrSlice(config.Args),
Env: generateEnvList(config.GetEnvs()),
Image: image,
WorkingDir: config.WorkingDir,
Labels: labels,
// Interactive containers:
OpenStdin: config.Stdin,
StdinOnce: config.StdinOnce,
Tty: config.Tty,
// Disable Docker's health check until we officially support it
// (https://github.com/kubernetes/kubernetes/issues/25829).
Healthcheck: &dockercontainer.HealthConfig{
Test: []string{"NONE"},
},
},
HostConfig: &dockercontainer.HostConfig{
Binds: generateMountBindings(config.GetMounts()),
},
}
hc := createConfig.HostConfig
err = ds.updateCreateConfig(&createConfig, config, sandboxConfig, podSandboxID, securityOptSeparator, apiVersion)
if err != nil {
return nil, fmt.Errorf("failed to update container create config: %v", err)
}
// Set devices for container.
devices := make([]dockercontainer.DeviceMapping, len(config.Devices))
for i, device := range config.Devices {
devices[i] = dockercontainer.DeviceMapping{
PathOnHost: device.HostPath,
PathInContainer: device.ContainerPath,
CgroupPermissions: device.Permissions,
}
}
hc.Resources.Devices = devices
securityOpts, err := ds.getSecurityOpts(config.GetLinux().GetSecurityContext().GetSeccompProfilePath(), securityOptSeparator)
if err != nil {
return nil, fmt.Errorf("failed to generate security options for container %q: %v", config.Metadata.Name, err)
}
hc.SecurityOpt = append(hc.SecurityOpt, securityOpts...)
createResp, err := ds.client.CreateContainer(createConfig)
if err != nil {
createResp, err = recoverFromCreationConflictIfNeeded(ds.client, createConfig, err)
}
if createResp != nil {
return &runtimeapi.CreateContainerResponse{ContainerId: createResp.ID}, nil
}
return nil, err
}
// getContainerLogPath returns the container log path specified by kubelet and the real
// path where docker stores the container log.
func (ds *dockerService) getContainerLogPath(containerID string) (string, string, error) {
info, err := ds.client.InspectContainer(containerID)
if err != nil {
return "", "", fmt.Errorf("failed to inspect container %q: %v", containerID, err)
}
return info.Config.Labels[containerLogPathLabelKey], info.LogPath, nil
}
// createContainerLogSymlink creates the symlink for docker container log.
func (ds *dockerService) createContainerLogSymlink(containerID string) error {
path, realPath, err := ds.getContainerLogPath(containerID)
if err != nil {
return fmt.Errorf("failed to get container %q log path: %v", containerID, err)
}
if path == "" {
glog.V(5).Infof("Container %s log path isn't specified, will not create the symlink", containerID)
return nil
}
if realPath != "" {
// Only create the symlink when container log path is specified and log file exists.
// Delete possibly existing file first
if err = ds.os.Remove(path); err == nil {
glog.Warningf("Deleted previously existing symlink file: %q", path)
}
if err = ds.os.Symlink(realPath, path); err != nil {
return fmt.Errorf("failed to create symbolic link %q to the container log file %q for container %q: %v",
path, realPath, containerID, err)
}
} else {
supported, err := ds.IsCRISupportedLogDriver()
if err != nil {
glog.Warningf("Failed to check supported logging driver by CRI: %v", err)
return nil
}
if supported {
glog.Warningf("Cannot create symbolic link because container log file doesn't exist!")
} else {
glog.V(5).Infof("Unsupported logging driver by CRI")
}
}
return nil
}
// removeContainerLogSymlink removes the symlink for docker container log.
func (ds *dockerService) removeContainerLogSymlink(containerID string) error {
path, _, err := ds.getContainerLogPath(containerID)
if err != nil {
return fmt.Errorf("failed to get container %q log path: %v", containerID, err)
}
if path != "" {
// Only remove the symlink when container log path is specified.
err := ds.os.Remove(path)
if err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to remove container %q log symlink %q: %v", containerID, path, err)
}
}
return nil
}
// StartContainer starts the container.
func (ds *dockerService) StartContainer(_ context.Context, r *runtimeapi.StartContainerRequest) (*runtimeapi.StartContainerResponse, error) {
err := ds.client.StartContainer(r.ContainerId)
// Create container log symlink for all containers (including failed ones).
if linkError := ds.createContainerLogSymlink(r.ContainerId); linkError != nil {
// Do not stop the container if we failed to create symlink because:
// 1. This is not a critical failure.
// 2. We don't have enough information to properly stop container here.
// Kubelet will surface this error to user via an event.
return nil, linkError
}
if err != nil {
err = transformStartContainerError(err)
return nil, fmt.Errorf("failed to start container %q: %v", r.ContainerId, err)
}
return &runtimeapi.StartContainerResponse{}, nil
}
// StopContainer stops a running container with a grace period (i.e., timeout).
func (ds *dockerService) StopContainer(_ context.Context, r *runtimeapi.StopContainerRequest) (*runtimeapi.StopContainerResponse, error) {
err := ds.client.StopContainer(r.ContainerId, time.Duration(r.Timeout)*time.Second)
if err != nil {
return nil, err
}
return &runtimeapi.StopContainerResponse{}, nil
}
// RemoveContainer removes the container.
func (ds *dockerService) RemoveContainer(_ context.Context, r *runtimeapi.RemoveContainerRequest) (*runtimeapi.RemoveContainerResponse, error) {
// Ideally, log lifecycle should be independent of container lifecycle.
// However, docker will remove container log after container is removed,
// we can't prevent that now, so we also clean up the symlink here.
err := ds.removeContainerLogSymlink(r.ContainerId)
if err != nil {
return nil, err
}
err = ds.client.RemoveContainer(r.ContainerId, dockertypes.ContainerRemoveOptions{RemoveVolumes: true, Force: true})
if err != nil {
return nil, fmt.Errorf("failed to remove container %q: %v", r.ContainerId, err)
}
return &runtimeapi.RemoveContainerResponse{}, nil
}
func getContainerTimestamps(r *dockertypes.ContainerJSON) (time.Time, time.Time, time.Time, error) {
var createdAt, startedAt, finishedAt time.Time
var err error
createdAt, err = libdocker.ParseDockerTimestamp(r.Created)
if err != nil {
return createdAt, startedAt, finishedAt, err
}
startedAt, err = libdocker.ParseDockerTimestamp(r.State.StartedAt)
if err != nil {
return createdAt, startedAt, finishedAt, err
}
finishedAt, err = libdocker.ParseDockerTimestamp(r.State.FinishedAt)
if err != nil {
return createdAt, startedAt, finishedAt, err
}
return createdAt, startedAt, finishedAt, nil
}
// ContainerStatus inspects the docker container and returns the status.
func (ds *dockerService) ContainerStatus(_ context.Context, req *runtimeapi.ContainerStatusRequest) (*runtimeapi.ContainerStatusResponse, error) {
containerID := req.ContainerId
r, err := ds.client.InspectContainer(containerID)
if err != nil {
return nil, err
}
// Parse the timestamps.
createdAt, startedAt, finishedAt, err := getContainerTimestamps(r)
if err != nil {
return nil, fmt.Errorf("failed to parse timestamp for container %q: %v", containerID, err)
}
// Convert the image id to a pullable id.
ir, err := ds.client.InspectImageByID(r.Image)
if err != nil {
return nil, fmt.Errorf("unable to inspect docker image %q while inspecting docker container %q: %v", r.Image, containerID, err)
}
imageID := toPullableImageID(r.Image, ir)
// Convert the mounts.
mounts := make([]*runtimeapi.Mount, 0, len(r.Mounts))
for i := range r.Mounts {
m := r.Mounts[i]
readonly := !m.RW
mounts = append(mounts, &runtimeapi.Mount{
HostPath: m.Source,
ContainerPath: m.Destination,
Readonly: readonly,
// Note: Can't set SeLinuxRelabel
})
}
// Interpret container states.
var state runtimeapi.ContainerState
var reason, message string
if r.State.Running {
// Container is running.
state = runtimeapi.ContainerState_CONTAINER_RUNNING
} else {
// Container is *not* running. We need to get more details.
// * Case 1: container has run and exited with non-zero finishedAt
// time.
// * Case 2: container has failed to start; it has a zero finishedAt
// time, but a non-zero exit code.
// * Case 3: container has been created, but not started (yet).
if !finishedAt.IsZero() { // Case 1
state = runtimeapi.ContainerState_CONTAINER_EXITED
switch {
case r.State.OOMKilled:
// TODO: consider exposing OOMKilled via the runtimeAPI.
// Note: if an application handles OOMKilled gracefully, the
// exit code could be zero.
reason = "OOMKilled"
case r.State.ExitCode == 0:
reason = "Completed"
default:
reason = "Error"
}
} else if r.State.ExitCode != 0 { // Case 2
state = runtimeapi.ContainerState_CONTAINER_EXITED
// Adjust finshedAt and startedAt time to createdAt time to avoid
// the confusion.
finishedAt, startedAt = createdAt, createdAt
reason = "ContainerCannotRun"
} else { // Case 3
state = runtimeapi.ContainerState_CONTAINER_CREATED
}
message = r.State.Error
}
// Convert to unix timestamps.
ct, st, ft := createdAt.UnixNano(), startedAt.UnixNano(), finishedAt.UnixNano()
exitCode := int32(r.State.ExitCode)
metadata, err := parseContainerName(r.Name)
if err != nil {
return nil, err
}
labels, annotations := extractLabels(r.Config.Labels)
imageName := r.Config.Image
if len(ir.RepoTags) > 0 {
imageName = ir.RepoTags[0]
}
status := &runtimeapi.ContainerStatus{
Id: r.ID,
Metadata: metadata,
Image: &runtimeapi.ImageSpec{Image: imageName},
ImageRef: imageID,
Mounts: mounts,
ExitCode: exitCode,
State: state,
CreatedAt: ct,
StartedAt: st,
FinishedAt: ft,
Reason: reason,
Message: message,
Labels: labels,
Annotations: annotations,
LogPath: r.Config.Labels[containerLogPathLabelKey],
}
return &runtimeapi.ContainerStatusResponse{Status: status}, nil
}
func (ds *dockerService) UpdateContainerResources(_ context.Context, r *runtimeapi.UpdateContainerResourcesRequest) (*runtimeapi.UpdateContainerResourcesResponse, error) {
resources := r.Linux
updateConfig := dockercontainer.UpdateConfig{
Resources: dockercontainer.Resources{
CPUPeriod: resources.CpuPeriod,
CPUQuota: resources.CpuQuota,
CPUShares: resources.CpuShares,
Memory: resources.MemoryLimitInBytes,
CpusetCpus: resources.CpusetCpus,
CpusetMems: resources.CpusetMems,
},
}
err := ds.client.UpdateContainerResources(r.ContainerId, updateConfig)
if err != nil {
return nil, fmt.Errorf("failed to update container %q: %v", r.ContainerId, err)
}
return &runtimeapi.UpdateContainerResourcesResponse{}, nil
}

View File

@ -1,310 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"context"
"fmt"
"path/filepath"
"strings"
"testing"
"time"
dockertypes "github.com/docker/docker/api/types"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
)
// A helper to create a basic config.
func makeContainerConfig(sConfig *runtimeapi.PodSandboxConfig, name, image string, attempt uint32, labels, annotations map[string]string) *runtimeapi.ContainerConfig {
return &runtimeapi.ContainerConfig{
Metadata: &runtimeapi.ContainerMetadata{
Name: name,
Attempt: attempt,
},
Image: &runtimeapi.ImageSpec{Image: image},
Labels: labels,
Annotations: annotations,
}
}
func getTestCTX() context.Context {
return context.Background()
}
// TestListContainers creates several containers and then list them to check
// whether the correct metadatas, states, and labels are returned.
func TestListContainers(t *testing.T) {
ds, _, fakeClock := newTestDockerService()
podName, namespace := "foo", "bar"
containerName, image := "sidecar", "logger"
configs := []*runtimeapi.ContainerConfig{}
sConfigs := []*runtimeapi.PodSandboxConfig{}
for i := 0; i < 3; i++ {
s := makeSandboxConfig(fmt.Sprintf("%s%d", podName, i),
fmt.Sprintf("%s%d", namespace, i), fmt.Sprintf("%d", i), 0)
labels := map[string]string{"abc.xyz": fmt.Sprintf("label%d", i)}
annotations := map[string]string{"foo.bar.baz": fmt.Sprintf("annotation%d", i)}
c := makeContainerConfig(s, fmt.Sprintf("%s%d", containerName, i),
fmt.Sprintf("%s:v%d", image, i), uint32(i), labels, annotations)
sConfigs = append(sConfigs, s)
configs = append(configs, c)
}
expected := []*runtimeapi.Container{}
state := runtimeapi.ContainerState_CONTAINER_RUNNING
var createdAt int64 = fakeClock.Now().UnixNano()
for i := range configs {
// We don't care about the sandbox id; pass a bogus one.
sandboxID := fmt.Sprintf("sandboxid%d", i)
req := &runtimeapi.CreateContainerRequest{PodSandboxId: sandboxID, Config: configs[i], SandboxConfig: sConfigs[i]}
createResp, err := ds.CreateContainer(getTestCTX(), req)
require.NoError(t, err)
id := createResp.ContainerId
_, err = ds.StartContainer(getTestCTX(), &runtimeapi.StartContainerRequest{ContainerId: id})
require.NoError(t, err)
imageRef := "" // FakeDockerClient doesn't populate ImageRef yet.
// Prepend to the expected list because ListContainers returns
// the most recent containers first.
expected = append([]*runtimeapi.Container{{
Metadata: configs[i].Metadata,
Id: id,
PodSandboxId: sandboxID,
State: state,
CreatedAt: createdAt,
Image: configs[i].Image,
ImageRef: imageRef,
Labels: configs[i].Labels,
Annotations: configs[i].Annotations,
}}, expected...)
}
listResp, err := ds.ListContainers(getTestCTX(), &runtimeapi.ListContainersRequest{})
require.NoError(t, err)
assert.Len(t, listResp.Containers, len(expected))
assert.Equal(t, expected, listResp.Containers)
}
// TestContainerStatus tests the basic lifecycle operations and verify that
// the status returned reflects the operations performed.
func TestContainerStatus(t *testing.T) {
ds, fDocker, fClock := newTestDockerService()
sConfig := makeSandboxConfig("foo", "bar", "1", 0)
labels := map[string]string{"abc.xyz": "foo"}
annotations := map[string]string{"foo.bar.baz": "abc"}
imageName := "iamimage"
config := makeContainerConfig(sConfig, "pause", imageName, 0, labels, annotations)
var defaultTime time.Time
dt := defaultTime.UnixNano()
ct, st, ft := dt, dt, dt
state := runtimeapi.ContainerState_CONTAINER_CREATED
imageRef := DockerImageIDPrefix + imageName
// The following variables are not set in FakeDockerClient.
exitCode := int32(0)
var reason, message string
expected := &runtimeapi.ContainerStatus{
State: state,
CreatedAt: ct,
StartedAt: st,
FinishedAt: ft,
Metadata: config.Metadata,
Image: config.Image,
ImageRef: imageRef,
ExitCode: exitCode,
Reason: reason,
Message: message,
Mounts: []*runtimeapi.Mount{},
Labels: config.Labels,
Annotations: config.Annotations,
}
fDocker.InjectImages([]dockertypes.ImageSummary{{ID: imageName}})
// Create the container.
fClock.SetTime(time.Now().Add(-1 * time.Hour))
expected.CreatedAt = fClock.Now().UnixNano()
const sandboxId = "sandboxid"
req := &runtimeapi.CreateContainerRequest{PodSandboxId: sandboxId, Config: config, SandboxConfig: sConfig}
createResp, err := ds.CreateContainer(getTestCTX(), req)
require.NoError(t, err)
id := createResp.ContainerId
// Check internal labels
c, err := fDocker.InspectContainer(id)
require.NoError(t, err)
assert.Equal(t, c.Config.Labels[containerTypeLabelKey], containerTypeLabelContainer)
assert.Equal(t, c.Config.Labels[sandboxIDLabelKey], sandboxId)
// Set the id manually since we don't know the id until it's created.
expected.Id = id
assert.NoError(t, err)
resp, err := ds.ContainerStatus(getTestCTX(), &runtimeapi.ContainerStatusRequest{ContainerId: id})
require.NoError(t, err)
assert.Equal(t, expected, resp.Status)
// Advance the clock and start the container.
fClock.SetTime(time.Now())
expected.StartedAt = fClock.Now().UnixNano()
expected.State = runtimeapi.ContainerState_CONTAINER_RUNNING
_, err = ds.StartContainer(getTestCTX(), &runtimeapi.StartContainerRequest{ContainerId: id})
require.NoError(t, err)
resp, err = ds.ContainerStatus(getTestCTX(), &runtimeapi.ContainerStatusRequest{ContainerId: id})
require.NoError(t, err)
assert.Equal(t, expected, resp.Status)
// Advance the clock and stop the container.
fClock.SetTime(time.Now().Add(1 * time.Hour))
expected.FinishedAt = fClock.Now().UnixNano()
expected.State = runtimeapi.ContainerState_CONTAINER_EXITED
expected.Reason = "Completed"
_, err = ds.StopContainer(getTestCTX(), &runtimeapi.StopContainerRequest{ContainerId: id, Timeout: int64(0)})
assert.NoError(t, err)
resp, err = ds.ContainerStatus(getTestCTX(), &runtimeapi.ContainerStatusRequest{ContainerId: id})
require.NoError(t, err)
assert.Equal(t, expected, resp.Status)
// Remove the container.
_, err = ds.RemoveContainer(getTestCTX(), &runtimeapi.RemoveContainerRequest{ContainerId: id})
require.NoError(t, err)
resp, err = ds.ContainerStatus(getTestCTX(), &runtimeapi.ContainerStatusRequest{ContainerId: id})
assert.Error(t, err, fmt.Sprintf("status of container: %+v", resp))
}
// TestContainerLogPath tests the container log creation logic.
func TestContainerLogPath(t *testing.T) {
ds, fDocker, _ := newTestDockerService()
podLogPath := "/pod/1"
containerLogPath := "0"
kubeletContainerLogPath := filepath.Join(podLogPath, containerLogPath)
sConfig := makeSandboxConfig("foo", "bar", "1", 0)
sConfig.LogDirectory = podLogPath
config := makeContainerConfig(sConfig, "pause", "iamimage", 0, nil, nil)
config.LogPath = containerLogPath
const sandboxId = "sandboxid"
req := &runtimeapi.CreateContainerRequest{PodSandboxId: sandboxId, Config: config, SandboxConfig: sConfig}
createResp, err := ds.CreateContainer(getTestCTX(), req)
require.NoError(t, err)
id := createResp.ContainerId
// Check internal container log label
c, err := fDocker.InspectContainer(id)
assert.NoError(t, err)
assert.Equal(t, c.Config.Labels[containerLogPathLabelKey], kubeletContainerLogPath)
// Set docker container log path
dockerContainerLogPath := "/docker/container/log"
c.LogPath = dockerContainerLogPath
// Verify container log symlink creation
fakeOS := ds.os.(*containertest.FakeOS)
fakeOS.SymlinkFn = func(oldname, newname string) error {
assert.Equal(t, dockerContainerLogPath, oldname)
assert.Equal(t, kubeletContainerLogPath, newname)
return nil
}
_, err = ds.StartContainer(getTestCTX(), &runtimeapi.StartContainerRequest{ContainerId: id})
require.NoError(t, err)
_, err = ds.StopContainer(getTestCTX(), &runtimeapi.StopContainerRequest{ContainerId: id, Timeout: int64(0)})
require.NoError(t, err)
// Verify container log symlink deletion
// symlink is also tentatively deleted at startup
_, err = ds.RemoveContainer(getTestCTX(), &runtimeapi.RemoveContainerRequest{ContainerId: id})
require.NoError(t, err)
assert.Equal(t, []string{kubeletContainerLogPath, kubeletContainerLogPath}, fakeOS.Removes)
}
// TestContainerCreationConflict tests the logic to work around docker container
// creation naming conflict bug.
func TestContainerCreationConflict(t *testing.T) {
sConfig := makeSandboxConfig("foo", "bar", "1", 0)
config := makeContainerConfig(sConfig, "pause", "iamimage", 0, map[string]string{}, map[string]string{})
containerName := makeContainerName(sConfig, config)
const sandboxId = "sandboxid"
const containerId = "containerid"
conflictError := fmt.Errorf("Error response from daemon: Conflict. The name \"/%s\" is already in use by container %s. You have to remove (or rename) that container to be able to reuse that name.",
containerName, containerId)
noContainerError := fmt.Errorf("Error response from daemon: No such container: %s", containerId)
randomError := fmt.Errorf("random error")
for desc, test := range map[string]struct {
createError error
removeError error
expectError error
expectCalls []string
expectFields int
}{
"no create error": {
expectCalls: []string{"create"},
expectFields: 6,
},
"random create error": {
createError: randomError,
expectError: randomError,
expectCalls: []string{"create"},
},
"conflict create error with successful remove": {
createError: conflictError,
expectError: conflictError,
expectCalls: []string{"create", "remove"},
},
"conflict create error with random remove error": {
createError: conflictError,
removeError: randomError,
expectError: conflictError,
expectCalls: []string{"create", "remove"},
},
"conflict create error with no such container remove error": {
createError: conflictError,
removeError: noContainerError,
expectCalls: []string{"create", "remove", "create"},
expectFields: 7,
},
} {
t.Logf("TestCase: %s", desc)
ds, fDocker, _ := newTestDockerService()
if test.createError != nil {
fDocker.InjectError("create", test.createError)
}
if test.removeError != nil {
fDocker.InjectError("remove", test.removeError)
}
req := &runtimeapi.CreateContainerRequest{PodSandboxId: sandboxId, Config: config, SandboxConfig: sConfig}
createResp, err := ds.CreateContainer(getTestCTX(), req)
require.Equal(t, test.expectError, err)
assert.NoError(t, fDocker.AssertCalls(test.expectCalls))
if err == nil {
c, err := fDocker.InspectContainer(createResp.ContainerId)
assert.NoError(t, err)
assert.Len(t, strings.Split(c.Name, nameDelimiter), test.expectFields)
}
}
}

View File

@ -1,175 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"context"
"fmt"
"net/http"
dockertypes "github.com/docker/docker/api/types"
dockerfilters "github.com/docker/docker/api/types/filters"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/golang/glog"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
)
// This file implements methods in ImageManagerService.
// ListImages lists existing images.
func (ds *dockerService) ListImages(_ context.Context, r *runtimeapi.ListImagesRequest) (*runtimeapi.ListImagesResponse, error) {
filter := r.GetFilter()
opts := dockertypes.ImageListOptions{}
if filter != nil {
if filter.GetImage().GetImage() != "" {
opts.Filters = dockerfilters.NewArgs()
opts.Filters.Add("reference", filter.GetImage().GetImage())
}
}
images, err := ds.client.ListImages(opts)
if err != nil {
return nil, err
}
result := make([]*runtimeapi.Image, 0, len(images))
for _, i := range images {
apiImage, err := imageToRuntimeAPIImage(&i)
if err != nil {
glog.V(5).Infof("Failed to convert docker API image %+v to runtime API image: %v", i, err)
continue
}
result = append(result, apiImage)
}
return &runtimeapi.ListImagesResponse{Images: result}, nil
}
// ImageStatus returns the status of the image, returns nil if the image doesn't present.
func (ds *dockerService) ImageStatus(_ context.Context, r *runtimeapi.ImageStatusRequest) (*runtimeapi.ImageStatusResponse, error) {
image := r.GetImage()
imageInspect, err := ds.client.InspectImageByRef(image.Image)
if err != nil {
if libdocker.IsImageNotFoundError(err) {
return &runtimeapi.ImageStatusResponse{}, nil
}
return nil, err
}
imageStatus, err := imageInspectToRuntimeAPIImage(imageInspect)
if err != nil {
return nil, err
}
res := runtimeapi.ImageStatusResponse{Image: imageStatus}
if r.GetVerbose() {
res.Info = imageInspect.Config.Labels
}
return &res, nil
}
// PullImage pulls an image with authentication config.
func (ds *dockerService) PullImage(_ context.Context, r *runtimeapi.PullImageRequest) (*runtimeapi.PullImageResponse, error) {
image := r.GetImage()
auth := r.GetAuth()
authConfig := dockertypes.AuthConfig{}
if auth != nil {
authConfig.Username = auth.Username
authConfig.Password = auth.Password
authConfig.ServerAddress = auth.ServerAddress
authConfig.IdentityToken = auth.IdentityToken
authConfig.RegistryToken = auth.RegistryToken
}
err := ds.client.PullImage(image.Image,
authConfig,
dockertypes.ImagePullOptions{},
)
if err != nil {
return nil, filterHTTPError(err, image.Image)
}
imageRef, err := getImageRef(ds.client, image.Image)
if err != nil {
return nil, err
}
return &runtimeapi.PullImageResponse{ImageRef: imageRef}, nil
}
// RemoveImage removes the image.
func (ds *dockerService) RemoveImage(_ context.Context, r *runtimeapi.RemoveImageRequest) (*runtimeapi.RemoveImageResponse, error) {
image := r.GetImage()
// If the image has multiple tags, we need to remove all the tags
// TODO: We assume image.Image is image ID here, which is true in the current implementation
// of kubelet, but we should still clarify this in CRI.
imageInspect, err := ds.client.InspectImageByID(image.Image)
if err == nil && imageInspect != nil && len(imageInspect.RepoTags) > 1 {
for _, tag := range imageInspect.RepoTags {
if _, err := ds.client.RemoveImage(tag, dockertypes.ImageRemoveOptions{PruneChildren: true}); err != nil && !libdocker.IsImageNotFoundError(err) {
return nil, err
}
}
return &runtimeapi.RemoveImageResponse{}, nil
}
// dockerclient.InspectImageByID doesn't work with digest and repoTags,
// it is safe to continue removing it since there is another check below.
if err != nil && !libdocker.IsImageNotFoundError(err) {
return nil, err
}
_, err = ds.client.RemoveImage(image.Image, dockertypes.ImageRemoveOptions{PruneChildren: true})
if err != nil && !libdocker.IsImageNotFoundError(err) {
return nil, err
}
return &runtimeapi.RemoveImageResponse{}, nil
}
// getImageRef returns the image digest if exists, or else returns the image ID.
func getImageRef(client libdocker.Interface, image string) (string, error) {
img, err := client.InspectImageByRef(image)
if err != nil {
return "", err
}
if img == nil {
return "", fmt.Errorf("unable to inspect image %s", image)
}
// Returns the digest if it exist.
if len(img.RepoDigests) > 0 {
return img.RepoDigests[0], nil
}
return img.ID, nil
}
func filterHTTPError(err error, image string) error {
// docker/docker/pull/11314 prints detailed error info for docker pull.
// When it hits 502, it returns a verbose html output including an inline svg,
// which makes the output of kubectl get pods much harder to parse.
// Here converts such verbose output to a concise one.
jerr, ok := err.(*jsonmessage.JSONError)
if ok && (jerr.Code == http.StatusBadGateway ||
jerr.Code == http.StatusServiceUnavailable ||
jerr.Code == http.StatusGatewayTimeout) {
return fmt.Errorf("RegistryUnavailable: %v", err)
}
return err
}

View File

@ -1,31 +0,0 @@
// +build linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"context"
"fmt"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
)
// ImageFsInfo returns information of the filesystem that is used to store images.
func (ds *dockerService) ImageFsInfo(_ context.Context, r *runtimeapi.ImageFsInfoRequest) (*runtimeapi.ImageFsInfoResponse, error) {
return nil, fmt.Errorf("not implemented")
}

View File

@ -1,75 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"fmt"
"testing"
dockertypes "github.com/docker/docker/api/types"
"github.com/docker/docker/pkg/jsonmessage"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
)
func TestRemoveImage(t *testing.T) {
ds, fakeDocker, _ := newTestDockerService()
id := "1111"
fakeDocker.InjectImageInspects([]dockertypes.ImageInspect{{ID: id, RepoTags: []string{"foo"}}})
ds.RemoveImage(getTestCTX(), &runtimeapi.RemoveImageRequest{Image: &runtimeapi.ImageSpec{Image: id}})
fakeDocker.AssertCallDetails(libdocker.NewCalledDetail("inspect_image", nil),
libdocker.NewCalledDetail("remove_image", []interface{}{id, dockertypes.ImageRemoveOptions{PruneChildren: true}}))
}
func TestRemoveImageWithMultipleTags(t *testing.T) {
ds, fakeDocker, _ := newTestDockerService()
id := "1111"
fakeDocker.InjectImageInspects([]dockertypes.ImageInspect{{ID: id, RepoTags: []string{"foo", "bar"}}})
ds.RemoveImage(getTestCTX(), &runtimeapi.RemoveImageRequest{Image: &runtimeapi.ImageSpec{Image: id}})
fakeDocker.AssertCallDetails(libdocker.NewCalledDetail("inspect_image", nil),
libdocker.NewCalledDetail("remove_image", []interface{}{"foo", dockertypes.ImageRemoveOptions{PruneChildren: true}}),
libdocker.NewCalledDetail("remove_image", []interface{}{"bar", dockertypes.ImageRemoveOptions{PruneChildren: true}}))
}
func TestPullWithJSONError(t *testing.T) {
ds, fakeDocker, _ := newTestDockerService()
tests := map[string]struct {
image *runtimeapi.ImageSpec
err error
expectedError string
}{
"Json error": {
&runtimeapi.ImageSpec{Image: "ubuntu"},
&jsonmessage.JSONError{Code: 50, Message: "Json error"},
"Json error",
},
"Bad gateway": {
&runtimeapi.ImageSpec{Image: "ubuntu"},
&jsonmessage.JSONError{Code: 502, Message: "<!doctype html>\n<html class=\"no-js\" lang=\"\">\n <head>\n </head>\n <body>\n <h1>Oops, there was an error!</h1>\n <p>We have been contacted of this error, feel free to check out <a href=\"http://status.docker.com/\">status.docker.com</a>\n to see if there is a bigger issue.</p>\n\n </body>\n</html>"},
"RegistryUnavailable",
},
}
for key, test := range tests {
fakeDocker.InjectError("pull", test.err)
_, err := ds.PullImage(getTestCTX(), &runtimeapi.PullImageRequest{Image: test.image, Auth: &runtimeapi.AuthConfig{}})
require.Error(t, err, fmt.Sprintf("TestCase [%s]", key))
assert.Contains(t, err.Error(), test.expectedError)
}
}

View File

@ -1,31 +0,0 @@
// +build !linux,!windows
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"context"
"fmt"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
)
// ImageFsInfo returns information of the filesystem that is used to store images.
func (ds *dockerService) ImageFsInfo(_ context.Context, r *runtimeapi.ImageFsInfoRequest) (*runtimeapi.ImageFsInfoResponse, error) {
return nil, fmt.Errorf("not implemented")
}

View File

@ -1,58 +0,0 @@
// +build windows
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"context"
"time"
"github.com/golang/glog"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/winstats"
)
// ImageFsInfo returns information of the filesystem that is used to store images.
func (ds *dockerService) ImageFsInfo(_ context.Context, _ *runtimeapi.ImageFsInfoRequest) (*runtimeapi.ImageFsInfoResponse, error) {
info, err := ds.client.Info()
if err != nil {
glog.Errorf("Failed to get docker info: %v", err)
return nil, err
}
statsClient := &winstats.StatsClient{}
fsinfo, err := statsClient.GetDirFsInfo(info.DockerRootDir)
if err != nil {
glog.Errorf("Failed to get dir fsInfo for %q: %v", info.DockerRootDir, err)
return nil, err
}
filesystems := []*runtimeapi.FilesystemUsage{
{
Timestamp: time.Now().UnixNano(),
UsedBytes: &runtimeapi.UInt64Value{Value: fsinfo.Usage},
InodesUsed: &runtimeapi.UInt64Value{Value: 0},
FsId: &runtimeapi.FilesystemIdentifier{
Mountpoint: info.DockerRootDir,
},
},
}
return &runtimeapi.ImageFsInfoResponse{ImageFilesystems: filesystems}, nil
}

View File

@ -1,123 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"fmt"
"io"
"strconv"
"time"
"github.com/armon/circbuf"
dockertypes "github.com/docker/docker/api/types"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubetypes "k8s.io/apimachinery/pkg/types"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
)
// DockerLegacyService interface embeds some legacy methods for backward compatibility.
// This file/interface will be removed in the near future. Do not modify or add
// more functions.
type DockerLegacyService interface {
// GetContainerLogs gets logs for a specific container.
GetContainerLogs(*v1.Pod, kubecontainer.ContainerID, *v1.PodLogOptions, io.Writer, io.Writer) error
// IsCRISupportedLogDriver checks whether the logging driver used by docker is
// supported by native CRI integration.
// TODO(resouer): remove this when deprecating unsupported log driver
IsCRISupportedLogDriver() (bool, error)
kuberuntime.LegacyLogProvider
}
// GetContainerLogs get container logs directly from docker daemon.
func (d *dockerService) GetContainerLogs(pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error {
container, err := d.client.InspectContainer(containerID.ID)
if err != nil {
return err
}
var since int64
if logOptions.SinceSeconds != nil {
t := metav1.Now().Add(-time.Duration(*logOptions.SinceSeconds) * time.Second)
since = t.Unix()
}
if logOptions.SinceTime != nil {
since = logOptions.SinceTime.Unix()
}
opts := dockertypes.ContainerLogsOptions{
ShowStdout: true,
ShowStderr: true,
Since: strconv.FormatInt(since, 10),
Timestamps: logOptions.Timestamps,
Follow: logOptions.Follow,
}
if logOptions.TailLines != nil {
opts.Tail = strconv.FormatInt(*logOptions.TailLines, 10)
}
sopts := libdocker.StreamOptions{
OutputStream: stdout,
ErrorStream: stderr,
RawTerminal: container.Config.Tty,
}
return d.client.Logs(containerID.ID, opts, sopts)
}
// GetContainerLogTail attempts to read up to MaxContainerTerminationMessageLogLength
// from the end of the log when docker is configured with a log driver other than json-log.
// It reads up to MaxContainerTerminationMessageLogLines lines.
func (d *dockerService) GetContainerLogTail(uid kubetypes.UID, name, namespace string, containerId kubecontainer.ContainerID) (string, error) {
value := int64(kubecontainer.MaxContainerTerminationMessageLogLines)
buf, _ := circbuf.NewBuffer(kubecontainer.MaxContainerTerminationMessageLogLength)
// Although this is not a full spec pod, dockerLegacyService.GetContainerLogs() currently completely ignores its pod param
pod := &v1.Pod{
ObjectMeta: metav1.ObjectMeta{
UID: uid,
Name: name,
Namespace: namespace,
},
}
err := d.GetContainerLogs(pod, containerId, &v1.PodLogOptions{TailLines: &value}, buf, buf)
if err != nil {
return "", err
}
return buf.String(), nil
}
// criSupportedLogDrivers are log drivers supported by native CRI integration.
var criSupportedLogDrivers = []string{"json-file"}
// IsCRISupportedLogDriver checks whether the logging driver used by docker is
// supported by native CRI integration.
func (d *dockerService) IsCRISupportedLogDriver() (bool, error) {
info, err := d.client.Info()
if err != nil {
return false, fmt.Errorf("failed to get docker info: %v", err)
}
for _, driver := range criSupportedLogDrivers {
if info.LoggingDriver == driver {
return true, nil
}
}
return false, nil
}

View File

@ -1,29 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"context"
"fmt"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
)
// ReopenContainerLog reopens the container log file.
func (ds *dockerService) ReopenContainerLog(_ context.Context, _ *runtimeapi.ReopenContainerLogRequest) (*runtimeapi.ReopenContainerLogResponse, error) {
return nil, fmt.Errorf("docker does not support reopening container log files")
}

View File

@ -1,712 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"context"
"fmt"
"os"
"strings"
"time"
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
dockerfilters "github.com/docker/docker/api/types/filters"
"github.com/golang/glog"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/kubelet/types"
)
const (
defaultSandboxImage = "k8s.gcr.io/pause:3.1"
// Various default sandbox resources requests/limits.
defaultSandboxCPUshares int64 = 2
// Name of the underlying container runtime
runtimeName = "docker"
)
var (
// Termination grace period
defaultSandboxGracePeriod = time.Duration(10) * time.Second
)
// Returns whether the sandbox network is ready, and whether the sandbox is known
func (ds *dockerService) getNetworkReady(podSandboxID string) (bool, bool) {
ds.networkReadyLock.Lock()
defer ds.networkReadyLock.Unlock()
ready, ok := ds.networkReady[podSandboxID]
return ready, ok
}
func (ds *dockerService) setNetworkReady(podSandboxID string, ready bool) {
ds.networkReadyLock.Lock()
defer ds.networkReadyLock.Unlock()
ds.networkReady[podSandboxID] = ready
}
func (ds *dockerService) clearNetworkReady(podSandboxID string) {
ds.networkReadyLock.Lock()
defer ds.networkReadyLock.Unlock()
delete(ds.networkReady, podSandboxID)
}
// RunPodSandbox creates and starts a pod-level sandbox. Runtimes should ensure
// the sandbox is in ready state.
// For docker, PodSandbox is implemented by a container holding the network
// namespace for the pod.
// Note: docker doesn't use LogDirectory (yet).
func (ds *dockerService) RunPodSandbox(ctx context.Context, r *runtimeapi.RunPodSandboxRequest) (*runtimeapi.RunPodSandboxResponse, error) {
config := r.GetConfig()
// Step 1: Pull the image for the sandbox.
image := defaultSandboxImage
podSandboxImage := ds.podSandboxImage
if len(podSandboxImage) != 0 {
image = podSandboxImage
}
// NOTE: To use a custom sandbox image in a private repository, users need to configure the nodes with credentials properly.
// see: http://kubernetes.io/docs/user-guide/images/#configuring-nodes-to-authenticate-to-a-private-repository
// Only pull sandbox image when it's not present - v1.PullIfNotPresent.
if err := ensureSandboxImageExists(ds.client, image); err != nil {
return nil, err
}
// Step 2: Create the sandbox container.
createConfig, err := ds.makeSandboxDockerConfig(config, image)
if err != nil {
return nil, fmt.Errorf("failed to make sandbox docker config for pod %q: %v", config.Metadata.Name, err)
}
createResp, err := ds.client.CreateContainer(*createConfig)
if err != nil {
createResp, err = recoverFromCreationConflictIfNeeded(ds.client, *createConfig, err)
}
if err != nil || createResp == nil {
return nil, fmt.Errorf("failed to create a sandbox for pod %q: %v", config.Metadata.Name, err)
}
resp := &runtimeapi.RunPodSandboxResponse{PodSandboxId: createResp.ID}
ds.setNetworkReady(createResp.ID, false)
defer func(e *error) {
// Set networking ready depending on the error return of
// the parent function
if *e == nil {
ds.setNetworkReady(createResp.ID, true)
}
}(&err)
// Step 3: Create Sandbox Checkpoint.
if err = ds.checkpointManager.CreateCheckpoint(createResp.ID, constructPodSandboxCheckpoint(config)); err != nil {
return nil, err
}
// Step 4: Start the sandbox container.
// Assume kubelet's garbage collector would remove the sandbox later, if
// startContainer failed.
err = ds.client.StartContainer(createResp.ID)
if err != nil {
return nil, fmt.Errorf("failed to start sandbox container for pod %q: %v", config.Metadata.Name, err)
}
// Rewrite resolv.conf file generated by docker.
// NOTE: cluster dns settings aren't passed anymore to docker api in all cases,
// not only for pods with host network: the resolver conf will be overwritten
// after sandbox creation to override docker's behaviour. This resolv.conf
// file is shared by all containers of the same pod, and needs to be modified
// only once per pod.
if dnsConfig := config.GetDnsConfig(); dnsConfig != nil {
containerInfo, err := ds.client.InspectContainer(createResp.ID)
if err != nil {
return nil, fmt.Errorf("failed to inspect sandbox container for pod %q: %v", config.Metadata.Name, err)
}
if err := rewriteResolvFile(containerInfo.ResolvConfPath, dnsConfig.Servers, dnsConfig.Searches, dnsConfig.Options); err != nil {
return nil, fmt.Errorf("rewrite resolv.conf failed for pod %q: %v", config.Metadata.Name, err)
}
}
// Do not invoke network plugins if in hostNetwork mode.
if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetNetwork() == runtimeapi.NamespaceMode_NODE {
return resp, nil
}
// Step 5: Setup networking for the sandbox.
// All pod networking is setup by a CNI plugin discovered at startup time.
// This plugin assigns the pod ip, sets up routes inside the sandbox,
// creates interfaces etc. In theory, its jurisdiction ends with pod
// sandbox networking, but it might insert iptables rules or open ports
// on the host as well, to satisfy parts of the pod spec that aren't
// recognized by the CNI standard yet.
cID := kubecontainer.BuildContainerID(runtimeName, createResp.ID)
err = ds.network.SetUpPod(config.GetMetadata().Namespace, config.GetMetadata().Name, cID, config.Annotations)
if err != nil {
errList := []error{fmt.Errorf("failed to set up sandbox container %q network for pod %q: %v", createResp.ID, config.Metadata.Name, err)}
// Ensure network resources are cleaned up even if the plugin
// succeeded but an error happened between that success and here.
err = ds.network.TearDownPod(config.GetMetadata().Namespace, config.GetMetadata().Name, cID)
if err != nil {
errList = append(errList, fmt.Errorf("failed to clean up sandbox container %q network for pod %q: %v", createResp.ID, config.Metadata.Name, err))
}
err = ds.client.StopContainer(createResp.ID, defaultSandboxGracePeriod)
if err != nil {
errList = append(errList, fmt.Errorf("failed to stop sandbox container %q for pod %q: %v", createResp.ID, config.Metadata.Name, err))
}
return resp, utilerrors.NewAggregate(errList)
}
return resp, nil
}
// StopPodSandbox stops the sandbox. If there are any running containers in the
// sandbox, they should be force terminated.
// TODO: This function blocks sandbox teardown on networking teardown. Is it
// better to cut our losses assuming an out of band GC routine will cleanup
// after us?
func (ds *dockerService) StopPodSandbox(ctx context.Context, r *runtimeapi.StopPodSandboxRequest) (*runtimeapi.StopPodSandboxResponse, error) {
var namespace, name string
var hostNetwork bool
podSandboxID := r.PodSandboxId
resp := &runtimeapi.StopPodSandboxResponse{}
// Try to retrieve minimal sandbox information from docker daemon or sandbox checkpoint.
inspectResult, metadata, statusErr := ds.getPodSandboxDetails(podSandboxID)
if statusErr == nil {
namespace = metadata.Namespace
name = metadata.Name
hostNetwork = (networkNamespaceMode(inspectResult) == runtimeapi.NamespaceMode_NODE)
} else {
checkpoint := NewPodSandboxCheckpoint("", "", &CheckpointData{})
checkpointErr := ds.checkpointManager.GetCheckpoint(podSandboxID, checkpoint)
// Proceed if both sandbox container and checkpoint could not be found. This means that following
// actions will only have sandbox ID and not have pod namespace and name information.
// Return error if encounter any unexpected error.
if checkpointErr != nil {
if checkpointErr != errors.ErrCheckpointNotFound {
err := ds.checkpointManager.RemoveCheckpoint(podSandboxID)
if err != nil {
glog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", podSandboxID, err)
}
}
if libdocker.IsContainerNotFoundError(statusErr) {
glog.Warningf("Both sandbox container and checkpoint for id %q could not be found. "+
"Proceed without further sandbox information.", podSandboxID)
} else {
return nil, utilerrors.NewAggregate([]error{
fmt.Errorf("failed to get checkpoint for sandbox %q: %v", podSandboxID, checkpointErr),
fmt.Errorf("failed to get sandbox status: %v", statusErr)})
}
} else {
_, name, namespace, _, hostNetwork = checkpoint.GetData()
}
}
// WARNING: The following operations made the following assumption:
// 1. kubelet will retry on any error returned by StopPodSandbox.
// 2. tearing down network and stopping sandbox container can succeed in any sequence.
// This depends on the implementation detail of network plugin and proper error handling.
// For kubenet, if tearing down network failed and sandbox container is stopped, kubelet
// will retry. On retry, kubenet will not be able to retrieve network namespace of the sandbox
// since it is stopped. With empty network namespcae, CNI bridge plugin will conduct best
// effort clean up and will not return error.
errList := []error{}
ready, ok := ds.getNetworkReady(podSandboxID)
if !hostNetwork && (ready || !ok) {
// Only tear down the pod network if we haven't done so already
cID := kubecontainer.BuildContainerID(runtimeName, podSandboxID)
err := ds.network.TearDownPod(namespace, name, cID)
if err == nil {
ds.setNetworkReady(podSandboxID, false)
} else {
errList = append(errList, err)
}
}
if err := ds.client.StopContainer(podSandboxID, defaultSandboxGracePeriod); err != nil {
// Do not return error if the container does not exist
if !libdocker.IsContainerNotFoundError(err) {
glog.Errorf("Failed to stop sandbox %q: %v", podSandboxID, err)
errList = append(errList, err)
} else {
// remove the checkpoint for any sandbox that is not found in the runtime
ds.checkpointManager.RemoveCheckpoint(podSandboxID)
}
}
if len(errList) == 0 {
return resp, nil
}
// TODO: Stop all running containers in the sandbox.
return nil, utilerrors.NewAggregate(errList)
}
// RemovePodSandbox removes the sandbox. If there are running containers in the
// sandbox, they should be forcibly removed.
func (ds *dockerService) RemovePodSandbox(ctx context.Context, r *runtimeapi.RemovePodSandboxRequest) (*runtimeapi.RemovePodSandboxResponse, error) {
podSandboxID := r.PodSandboxId
var errs []error
opts := dockertypes.ContainerListOptions{All: true}
opts.Filters = dockerfilters.NewArgs()
f := newDockerFilter(&opts.Filters)
f.AddLabel(sandboxIDLabelKey, podSandboxID)
containers, err := ds.client.ListContainers(opts)
if err != nil {
errs = append(errs, err)
}
// Remove all containers in the sandbox.
for i := range containers {
if _, err := ds.RemoveContainer(ctx, &runtimeapi.RemoveContainerRequest{ContainerId: containers[i].ID}); err != nil && !libdocker.IsContainerNotFoundError(err) {
errs = append(errs, err)
}
}
// Remove the sandbox container.
err = ds.client.RemoveContainer(podSandboxID, dockertypes.ContainerRemoveOptions{RemoveVolumes: true, Force: true})
if err == nil || libdocker.IsContainerNotFoundError(err) {
// Only clear network ready when the sandbox has actually been
// removed from docker or doesn't exist
ds.clearNetworkReady(podSandboxID)
} else {
errs = append(errs, err)
}
// Remove the checkpoint of the sandbox.
if err := ds.checkpointManager.RemoveCheckpoint(podSandboxID); err != nil {
errs = append(errs, err)
}
if len(errs) == 0 {
return &runtimeapi.RemovePodSandboxResponse{}, nil
}
return nil, utilerrors.NewAggregate(errs)
}
// getIPFromPlugin interrogates the network plugin for an IP.
func (ds *dockerService) getIPFromPlugin(sandbox *dockertypes.ContainerJSON) (string, error) {
metadata, err := parseSandboxName(sandbox.Name)
if err != nil {
return "", err
}
msg := fmt.Sprintf("Couldn't find network status for %s/%s through plugin", metadata.Namespace, metadata.Name)
cID := kubecontainer.BuildContainerID(runtimeName, sandbox.ID)
networkStatus, err := ds.network.GetPodNetworkStatus(metadata.Namespace, metadata.Name, cID)
if err != nil {
return "", err
}
if networkStatus == nil {
return "", fmt.Errorf("%v: invalid network status for", msg)
}
return networkStatus.IP.String(), nil
}
// getIP returns the ip given the output of `docker inspect` on a pod sandbox,
// first interrogating any registered plugins, then simply trusting the ip
// in the sandbox itself. We look for an ipv4 address before ipv6.
func (ds *dockerService) getIP(podSandboxID string, sandbox *dockertypes.ContainerJSON) string {
if sandbox.NetworkSettings == nil {
return ""
}
if networkNamespaceMode(sandbox) == runtimeapi.NamespaceMode_NODE {
// For sandboxes using host network, the shim is not responsible for
// reporting the IP.
return ""
}
// Don't bother getting IP if the pod is known and networking isn't ready
ready, ok := ds.getNetworkReady(podSandboxID)
if ok && !ready {
return ""
}
ip, err := ds.getIPFromPlugin(sandbox)
if err == nil {
return ip
}
// TODO: trusting the docker ip is not a great idea. However docker uses
// eth0 by default and so does CNI, so if we find a docker IP here, we
// conclude that the plugin must have failed setup, or forgotten its ip.
// This is not a sensible assumption for plugins across the board, but if
// a plugin doesn't want this behavior, it can throw an error.
if sandbox.NetworkSettings.IPAddress != "" {
return sandbox.NetworkSettings.IPAddress
}
if sandbox.NetworkSettings.GlobalIPv6Address != "" {
return sandbox.NetworkSettings.GlobalIPv6Address
}
// If all else fails, warn but don't return an error, as pod status
// should generally not return anything except fatal errors
// FIXME: handle network errors by restarting the pod somehow?
glog.Warningf("failed to read pod IP from plugin/docker: %v", err)
return ""
}
// Returns the inspect container response, the sandbox metadata, and network namespace mode
func (ds *dockerService) getPodSandboxDetails(podSandboxID string) (*dockertypes.ContainerJSON, *runtimeapi.PodSandboxMetadata, error) {
resp, err := ds.client.InspectContainer(podSandboxID)
if err != nil {
return nil, nil, err
}
metadata, err := parseSandboxName(resp.Name)
if err != nil {
return nil, nil, err
}
return resp, metadata, nil
}
// PodSandboxStatus returns the status of the PodSandbox.
func (ds *dockerService) PodSandboxStatus(ctx context.Context, req *runtimeapi.PodSandboxStatusRequest) (*runtimeapi.PodSandboxStatusResponse, error) {
podSandboxID := req.PodSandboxId
r, metadata, err := ds.getPodSandboxDetails(podSandboxID)
if err != nil {
return nil, err
}
// Parse the timestamps.
createdAt, _, _, err := getContainerTimestamps(r)
if err != nil {
return nil, fmt.Errorf("failed to parse timestamp for container %q: %v", podSandboxID, err)
}
ct := createdAt.UnixNano()
// Translate container to sandbox state.
state := runtimeapi.PodSandboxState_SANDBOX_NOTREADY
if r.State.Running {
state = runtimeapi.PodSandboxState_SANDBOX_READY
}
var IP string
// TODO: Remove this when sandbox is available on windows
// This is a workaround for windows, where sandbox is not in use, and pod IP is determined through containers belonging to the Pod.
if IP = ds.determinePodIPBySandboxID(podSandboxID); IP == "" {
IP = ds.getIP(podSandboxID, r)
}
labels, annotations := extractLabels(r.Config.Labels)
status := &runtimeapi.PodSandboxStatus{
Id: r.ID,
State: state,
CreatedAt: ct,
Metadata: metadata,
Labels: labels,
Annotations: annotations,
Network: &runtimeapi.PodSandboxNetworkStatus{
Ip: IP,
},
Linux: &runtimeapi.LinuxPodSandboxStatus{
Namespaces: &runtimeapi.Namespace{
Options: &runtimeapi.NamespaceOption{
Network: networkNamespaceMode(r),
Pid: pidNamespaceMode(r),
Ipc: ipcNamespaceMode(r),
},
},
},
}
return &runtimeapi.PodSandboxStatusResponse{Status: status}, nil
}
// ListPodSandbox returns a list of Sandbox.
func (ds *dockerService) ListPodSandbox(_ context.Context, r *runtimeapi.ListPodSandboxRequest) (*runtimeapi.ListPodSandboxResponse, error) {
filter := r.GetFilter()
// By default, list all containers whether they are running or not.
opts := dockertypes.ContainerListOptions{All: true}
filterOutReadySandboxes := false
opts.Filters = dockerfilters.NewArgs()
f := newDockerFilter(&opts.Filters)
// Add filter to select only sandbox containers.
f.AddLabel(containerTypeLabelKey, containerTypeLabelSandbox)
if filter != nil {
if filter.Id != "" {
f.Add("id", filter.Id)
}
if filter.State != nil {
if filter.GetState().State == runtimeapi.PodSandboxState_SANDBOX_READY {
// Only list running containers.
opts.All = false
} else {
// runtimeapi.PodSandboxState_SANDBOX_NOTREADY can mean the
// container is in any of the non-running state (e.g., created,
// exited). We can't tell docker to filter out running
// containers directly, so we'll need to filter them out
// ourselves after getting the results.
filterOutReadySandboxes = true
}
}
if filter.LabelSelector != nil {
for k, v := range filter.LabelSelector {
f.AddLabel(k, v)
}
}
}
// Make sure we get the list of checkpoints first so that we don't include
// new PodSandboxes that are being created right now.
var err error
checkpoints := []string{}
if filter == nil {
checkpoints, err = ds.checkpointManager.ListCheckpoints()
if err != nil {
glog.Errorf("Failed to list checkpoints: %v", err)
}
}
containers, err := ds.client.ListContainers(opts)
if err != nil {
return nil, err
}
// Convert docker containers to runtime api sandboxes.
result := []*runtimeapi.PodSandbox{}
// using map as set
sandboxIDs := make(map[string]bool)
for i := range containers {
c := containers[i]
converted, err := containerToRuntimeAPISandbox(&c)
if err != nil {
glog.V(4).Infof("Unable to convert docker to runtime API sandbox %+v: %v", c, err)
continue
}
if filterOutReadySandboxes && converted.State == runtimeapi.PodSandboxState_SANDBOX_READY {
continue
}
sandboxIDs[converted.Id] = true
result = append(result, converted)
}
// Include sandbox that could only be found with its checkpoint if no filter is applied
// These PodSandbox will only include PodSandboxID, Name, Namespace.
// These PodSandbox will be in PodSandboxState_SANDBOX_NOTREADY state.
for _, id := range checkpoints {
if _, ok := sandboxIDs[id]; ok {
continue
}
checkpoint := NewPodSandboxCheckpoint("", "", &CheckpointData{})
err := ds.checkpointManager.GetCheckpoint(id, checkpoint)
if err != nil {
glog.Errorf("Failed to retrieve checkpoint for sandbox %q: %v", id, err)
if err == errors.ErrCorruptCheckpoint {
err = ds.checkpointManager.RemoveCheckpoint(id)
if err != nil {
glog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", id, err)
}
}
continue
}
result = append(result, checkpointToRuntimeAPISandbox(id, checkpoint))
}
return &runtimeapi.ListPodSandboxResponse{Items: result}, nil
}
func (ds *dockerService) applySandboxResources(hc *dockercontainer.HostConfig, lc *runtimeapi.LinuxPodSandboxConfig) error {
hc.Resources = dockercontainer.Resources{
MemorySwap: DefaultMemorySwap(),
CPUShares: defaultSandboxCPUshares,
// Use docker's default cpu quota/period.
}
if lc != nil {
// Apply Cgroup options.
cgroupParent, err := ds.GenerateExpectedCgroupParent(lc.CgroupParent)
if err != nil {
return err
}
hc.CgroupParent = cgroupParent
}
return nil
}
// makeSandboxDockerConfig returns dockertypes.ContainerCreateConfig based on runtimeapi.PodSandboxConfig.
func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig, image string) (*dockertypes.ContainerCreateConfig, error) {
// Merge annotations and labels because docker supports only labels.
labels := makeLabels(c.GetLabels(), c.GetAnnotations())
// Apply a label to distinguish sandboxes from regular containers.
labels[containerTypeLabelKey] = containerTypeLabelSandbox
// Apply a container name label for infra container. This is used in summary v1.
// TODO(random-liu): Deprecate this label once container metrics is directly got from CRI.
labels[types.KubernetesContainerNameLabel] = sandboxContainerName
hc := &dockercontainer.HostConfig{}
createConfig := &dockertypes.ContainerCreateConfig{
Name: makeSandboxName(c),
Config: &dockercontainer.Config{
Hostname: c.Hostname,
// TODO: Handle environment variables.
Image: image,
Labels: labels,
},
HostConfig: hc,
}
// Apply platform-specific options.
if err := ds.applySandboxPlatformOptions(hc, c, createConfig, image, securityOptSeparator); err != nil {
return nil, err
}
// Set port mappings.
exposedPorts, portBindings := makePortsAndBindings(c.GetPortMappings())
createConfig.Config.ExposedPorts = exposedPorts
hc.PortBindings = portBindings
// TODO: Get rid of the dependency on kubelet internal package.
hc.OomScoreAdj = qos.PodInfraOOMAdj
// Apply resource options.
if err := ds.applySandboxResources(hc, c.GetLinux()); err != nil {
return nil, err
}
// Set security options.
securityOpts, err := ds.getSecurityOpts(c.GetLinux().GetSecurityContext().GetSeccompProfilePath(), securityOptSeparator)
if err != nil {
return nil, fmt.Errorf("failed to generate sandbox security options for sandbox %q: %v", c.Metadata.Name, err)
}
hc.SecurityOpt = append(hc.SecurityOpt, securityOpts...)
applyExperimentalCreateConfig(createConfig, c.Annotations)
return createConfig, nil
}
// networkNamespaceMode returns the network runtimeapi.NamespaceMode for this container.
// Supports: POD, NODE
func networkNamespaceMode(container *dockertypes.ContainerJSON) runtimeapi.NamespaceMode {
if container != nil && container.HostConfig != nil && string(container.HostConfig.NetworkMode) == namespaceModeHost {
return runtimeapi.NamespaceMode_NODE
}
return runtimeapi.NamespaceMode_POD
}
// pidNamespaceMode returns the PID runtimeapi.NamespaceMode for this container.
// Supports: CONTAINER, NODE
// TODO(verb): add support for POD PID namespace sharing
func pidNamespaceMode(container *dockertypes.ContainerJSON) runtimeapi.NamespaceMode {
if container != nil && container.HostConfig != nil && string(container.HostConfig.PidMode) == namespaceModeHost {
return runtimeapi.NamespaceMode_NODE
}
return runtimeapi.NamespaceMode_CONTAINER
}
// ipcNamespaceMode returns the IPC runtimeapi.NamespaceMode for this container.
// Supports: POD, NODE
func ipcNamespaceMode(container *dockertypes.ContainerJSON) runtimeapi.NamespaceMode {
if container != nil && container.HostConfig != nil && string(container.HostConfig.IpcMode) == namespaceModeHost {
return runtimeapi.NamespaceMode_NODE
}
return runtimeapi.NamespaceMode_POD
}
func constructPodSandboxCheckpoint(config *runtimeapi.PodSandboxConfig) checkpointmanager.Checkpoint {
data := CheckpointData{}
for _, pm := range config.GetPortMappings() {
proto := toCheckpointProtocol(pm.Protocol)
data.PortMappings = append(data.PortMappings, &PortMapping{
HostPort: &pm.HostPort,
ContainerPort: &pm.ContainerPort,
Protocol: &proto,
})
}
if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetNetwork() == runtimeapi.NamespaceMode_NODE {
data.HostNetwork = true
}
return NewPodSandboxCheckpoint(config.Metadata.Namespace, config.Metadata.Name, &data)
}
func toCheckpointProtocol(protocol runtimeapi.Protocol) Protocol {
switch protocol {
case runtimeapi.Protocol_TCP:
return protocolTCP
case runtimeapi.Protocol_UDP:
return protocolUDP
}
glog.Warningf("Unknown protocol %q: defaulting to TCP", protocol)
return protocolTCP
}
// rewriteResolvFile rewrites resolv.conf file generated by docker.
func rewriteResolvFile(resolvFilePath string, dns []string, dnsSearch []string, dnsOptions []string) error {
if len(resolvFilePath) == 0 {
glog.Errorf("ResolvConfPath is empty.")
return nil
}
if _, err := os.Stat(resolvFilePath); os.IsNotExist(err) {
return fmt.Errorf("ResolvConfPath %q does not exist", resolvFilePath)
}
var resolvFileContent []string
for _, srv := range dns {
resolvFileContent = append(resolvFileContent, "nameserver "+srv)
}
if len(dnsSearch) > 0 {
resolvFileContent = append(resolvFileContent, "search "+strings.Join(dnsSearch, " "))
}
if len(dnsOptions) > 0 {
resolvFileContent = append(resolvFileContent, "options "+strings.Join(dnsOptions, " "))
}
if len(resolvFileContent) > 0 {
resolvFileContentStr := strings.Join(resolvFileContent, "\n")
resolvFileContentStr += "\n"
glog.V(4).Infof("Will attempt to re-write config file %s with: \n%s", resolvFilePath, resolvFileContent)
if err := rewriteFile(resolvFilePath, resolvFileContentStr); err != nil {
glog.Errorf("resolv.conf could not be updated: %v", err)
return err
}
}
return nil
}
func rewriteFile(filePath, stringToWrite string) error {
f, err := os.OpenFile(filePath, os.O_TRUNC|os.O_WRONLY, 0644)
if err != nil {
return err
}
defer f.Close()
_, err = f.WriteString(stringToWrite)
return err
}

View File

@ -1,42 +0,0 @@
// +build !windows
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
)
// applySandboxPlatformOptions applies platform specific options to dockercontainer.HostConfig and dockercontainer.ContainerCreateConfig.
func (ds *dockerService) applySandboxPlatformOptions(hc *dockercontainer.HostConfig, config *runtimeapi.PodSandboxConfig, createConfig *dockertypes.ContainerCreateConfig, image string, separator rune) error {
lc := config.GetLinux()
if lc == nil {
return nil
}
// Apply security context.
if err := applySandboxSecurityContext(lc, createConfig.Config, hc, ds.network, separator); err != nil {
return err
}
// Set sysctls.
hc.Sysctls = lc.Sysctls
return nil
}

View File

@ -1,306 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"errors"
"fmt"
"math/rand"
"net"
"testing"
"time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
"k8s.io/kubernetes/pkg/kubelet/types"
)
// A helper to create a basic config.
func makeSandboxConfig(name, namespace, uid string, attempt uint32) *runtimeapi.PodSandboxConfig {
return makeSandboxConfigWithLabelsAndAnnotations(name, namespace, uid, attempt, map[string]string{}, map[string]string{})
}
func makeSandboxConfigWithLabelsAndAnnotations(name, namespace, uid string, attempt uint32, labels, annotations map[string]string) *runtimeapi.PodSandboxConfig {
return &runtimeapi.PodSandboxConfig{
Metadata: &runtimeapi.PodSandboxMetadata{
Name: name,
Namespace: namespace,
Uid: uid,
Attempt: attempt,
},
Labels: labels,
Annotations: annotations,
}
}
// TestListSandboxes creates several sandboxes and then list them to check
// whether the correct metadatas, states, and labels are returned.
func TestListSandboxes(t *testing.T) {
ds, _, fakeClock := newTestDockerService()
name, namespace := "foo", "bar"
configs := []*runtimeapi.PodSandboxConfig{}
for i := 0; i < 3; i++ {
c := makeSandboxConfigWithLabelsAndAnnotations(fmt.Sprintf("%s%d", name, i),
fmt.Sprintf("%s%d", namespace, i), fmt.Sprintf("%d", i), 0,
map[string]string{"label": fmt.Sprintf("foo%d", i)},
map[string]string{"annotation": fmt.Sprintf("bar%d", i)},
)
configs = append(configs, c)
}
expected := []*runtimeapi.PodSandbox{}
state := runtimeapi.PodSandboxState_SANDBOX_READY
var createdAt int64 = fakeClock.Now().UnixNano()
for i := range configs {
runResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{Config: configs[i]})
require.NoError(t, err)
// Prepend to the expected list because ListPodSandbox returns
// the most recent sandbox first.
expected = append([]*runtimeapi.PodSandbox{{
Metadata: configs[i].Metadata,
Id: runResp.PodSandboxId,
State: state,
CreatedAt: createdAt,
Labels: configs[i].Labels,
Annotations: configs[i].Annotations,
}}, expected...)
}
listResp, err := ds.ListPodSandbox(getTestCTX(), &runtimeapi.ListPodSandboxRequest{})
require.NoError(t, err)
assert.Len(t, listResp.Items, len(expected))
assert.Equal(t, expected, listResp.Items)
}
// TestSandboxStatus tests the basic lifecycle operations and verify that
// the status returned reflects the operations performed.
func TestSandboxStatus(t *testing.T) {
ds, fDocker, fClock := newTestDockerService()
labels := map[string]string{"label": "foobar1"}
annotations := map[string]string{"annotation": "abc"}
config := makeSandboxConfigWithLabelsAndAnnotations("foo", "bar", "1", 0, labels, annotations)
r := rand.New(rand.NewSource(0)).Uint32()
podIP := fmt.Sprintf("10.%d.%d.%d", byte(r>>16), byte(r>>8), byte(r))
state := runtimeapi.PodSandboxState_SANDBOX_READY
ct := int64(0)
expected := &runtimeapi.PodSandboxStatus{
State: state,
CreatedAt: ct,
Metadata: config.Metadata,
Network: &runtimeapi.PodSandboxNetworkStatus{Ip: podIP},
Linux: &runtimeapi.LinuxPodSandboxStatus{
Namespaces: &runtimeapi.Namespace{
Options: &runtimeapi.NamespaceOption{
Pid: runtimeapi.NamespaceMode_CONTAINER,
},
},
},
Labels: labels,
Annotations: annotations,
}
// Create the sandbox.
fClock.SetTime(time.Now())
expected.CreatedAt = fClock.Now().UnixNano()
runResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{Config: config})
require.NoError(t, err)
id := runResp.PodSandboxId
// Check internal labels
c, err := fDocker.InspectContainer(id)
assert.NoError(t, err)
assert.Equal(t, c.Config.Labels[containerTypeLabelKey], containerTypeLabelSandbox)
assert.Equal(t, c.Config.Labels[types.KubernetesContainerNameLabel], sandboxContainerName)
expected.Id = id // ID is only known after the creation.
statusResp, err := ds.PodSandboxStatus(getTestCTX(), &runtimeapi.PodSandboxStatusRequest{PodSandboxId: id})
require.NoError(t, err)
assert.Equal(t, expected, statusResp.Status)
// Stop the sandbox.
expected.State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
_, err = ds.StopPodSandbox(getTestCTX(), &runtimeapi.StopPodSandboxRequest{PodSandboxId: id})
require.NoError(t, err)
// IP not valid after sandbox stop
expected.Network.Ip = ""
statusResp, err = ds.PodSandboxStatus(getTestCTX(), &runtimeapi.PodSandboxStatusRequest{PodSandboxId: id})
require.NoError(t, err)
assert.Equal(t, expected, statusResp.Status)
// Remove the container.
_, err = ds.RemovePodSandbox(getTestCTX(), &runtimeapi.RemovePodSandboxRequest{PodSandboxId: id})
require.NoError(t, err)
statusResp, err = ds.PodSandboxStatus(getTestCTX(), &runtimeapi.PodSandboxStatusRequest{PodSandboxId: id})
assert.Error(t, err, fmt.Sprintf("status of sandbox: %+v", statusResp))
}
// TestSandboxStatusAfterRestart tests that retrieving sandbox status returns
// an IP address even if RunPodSandbox() was not yet called for this pod, as
// would happen on kubelet restart
func TestSandboxStatusAfterRestart(t *testing.T) {
ds, _, fClock := newTestDockerService()
config := makeSandboxConfig("foo", "bar", "1", 0)
r := rand.New(rand.NewSource(0)).Uint32()
podIP := fmt.Sprintf("10.%d.%d.%d", byte(r>>16), byte(r>>8), byte(r))
state := runtimeapi.PodSandboxState_SANDBOX_READY
ct := int64(0)
expected := &runtimeapi.PodSandboxStatus{
State: state,
CreatedAt: ct,
Metadata: config.Metadata,
Network: &runtimeapi.PodSandboxNetworkStatus{Ip: podIP},
Linux: &runtimeapi.LinuxPodSandboxStatus{
Namespaces: &runtimeapi.Namespace{
Options: &runtimeapi.NamespaceOption{
Pid: runtimeapi.NamespaceMode_CONTAINER,
},
},
},
Labels: map[string]string{},
Annotations: map[string]string{},
}
// Create the sandbox.
fClock.SetTime(time.Now())
expected.CreatedAt = fClock.Now().UnixNano()
createConfig, err := ds.makeSandboxDockerConfig(config, defaultSandboxImage)
assert.NoError(t, err)
createResp, err := ds.client.CreateContainer(*createConfig)
assert.NoError(t, err)
err = ds.client.StartContainer(createResp.ID)
assert.NoError(t, err)
// Check status without RunPodSandbox() having set up networking
expected.Id = createResp.ID // ID is only known after the creation.
statusResp, err := ds.PodSandboxStatus(getTestCTX(), &runtimeapi.PodSandboxStatusRequest{PodSandboxId: createResp.ID})
require.NoError(t, err)
assert.Equal(t, expected, statusResp.Status)
}
// TestNetworkPluginInvocation checks that the right SetUpPod and TearDownPod
// calls are made when we run/stop a sandbox.
func TestNetworkPluginInvocation(t *testing.T) {
ds, _, _ := newTestDockerService()
mockPlugin := newTestNetworkPlugin(t)
ds.network = network.NewPluginManager(mockPlugin)
defer mockPlugin.Finish()
name := "foo0"
ns := "bar0"
c := makeSandboxConfigWithLabelsAndAnnotations(
name, ns, "0", 0,
map[string]string{"label": name},
map[string]string{"annotation": ns},
)
cID := kubecontainer.ContainerID{Type: runtimeName, ID: libdocker.GetFakeContainerID(fmt.Sprintf("/%v", makeSandboxName(c)))}
mockPlugin.EXPECT().Name().Return("mockNetworkPlugin").AnyTimes()
setup := mockPlugin.EXPECT().SetUpPod(ns, name, cID)
mockPlugin.EXPECT().TearDownPod(ns, name, cID).After(setup)
_, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{Config: c})
require.NoError(t, err)
_, err = ds.StopPodSandbox(getTestCTX(), &runtimeapi.StopPodSandboxRequest{PodSandboxId: cID.ID})
require.NoError(t, err)
}
// TestHostNetworkPluginInvocation checks that *no* SetUp/TearDown calls happen
// for host network sandboxes.
func TestHostNetworkPluginInvocation(t *testing.T) {
ds, _, _ := newTestDockerService()
mockPlugin := newTestNetworkPlugin(t)
ds.network = network.NewPluginManager(mockPlugin)
defer mockPlugin.Finish()
name := "foo0"
ns := "bar0"
c := makeSandboxConfigWithLabelsAndAnnotations(
name, ns, "0", 0,
map[string]string{"label": name},
map[string]string{"annotation": ns},
)
c.Linux = &runtimeapi.LinuxPodSandboxConfig{
SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{
NamespaceOptions: &runtimeapi.NamespaceOption{
Network: runtimeapi.NamespaceMode_NODE,
},
},
}
cID := kubecontainer.ContainerID{Type: runtimeName, ID: libdocker.GetFakeContainerID(fmt.Sprintf("/%v", makeSandboxName(c)))}
// No calls to network plugin are expected
_, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{Config: c})
require.NoError(t, err)
_, err = ds.StopPodSandbox(getTestCTX(), &runtimeapi.StopPodSandboxRequest{PodSandboxId: cID.ID})
require.NoError(t, err)
}
// TestSetUpPodFailure checks that the sandbox should be not ready when it
// hits a SetUpPod failure.
func TestSetUpPodFailure(t *testing.T) {
ds, _, _ := newTestDockerService()
mockPlugin := newTestNetworkPlugin(t)
ds.network = network.NewPluginManager(mockPlugin)
defer mockPlugin.Finish()
name := "foo0"
ns := "bar0"
c := makeSandboxConfigWithLabelsAndAnnotations(
name, ns, "0", 0,
map[string]string{"label": name},
map[string]string{"annotation": ns},
)
cID := kubecontainer.ContainerID{Type: runtimeName, ID: libdocker.GetFakeContainerID(fmt.Sprintf("/%v", makeSandboxName(c)))}
mockPlugin.EXPECT().Name().Return("mockNetworkPlugin").AnyTimes()
mockPlugin.EXPECT().SetUpPod(ns, name, cID).Return(errors.New("setup pod error")).AnyTimes()
// If SetUpPod() fails, we expect TearDownPod() to immediately follow
mockPlugin.EXPECT().TearDownPod(ns, name, cID)
// Assume network plugin doesn't return error, dockershim should still be able to return not ready correctly.
mockPlugin.EXPECT().GetPodNetworkStatus(ns, name, cID).Return(&network.PodNetworkStatus{IP: net.IP("127.0.0.01")}, nil).AnyTimes()
t.Logf("RunPodSandbox should return error")
_, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{Config: c})
assert.Error(t, err)
t.Logf("PodSandboxStatus should be not ready")
statusResp, err := ds.PodSandboxStatus(getTestCTX(), &runtimeapi.PodSandboxStatusRequest{PodSandboxId: cID.ID})
require.NoError(t, err)
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, statusResp.Status.State)
t.Logf("ListPodSandbox should also show not ready")
listResp, err := ds.ListPodSandbox(getTestCTX(), &runtimeapi.ListPodSandboxRequest{})
require.NoError(t, err)
var sandbox *runtimeapi.PodSandbox
for _, s := range listResp.Items {
if s.Id == cID.ID {
sandbox = s
break
}
}
assert.NotNil(t, sandbox)
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, sandbox.State)
}

View File

@ -1,39 +0,0 @@
// +build windows
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
)
// applySandboxPlatformOptions applies platform specific options to dockercontainer.HostConfig and dockercontainer.ContainerCreateConfig.
func (ds *dockerService) applySandboxPlatformOptions(hc *dockercontainer.HostConfig, config *runtimeapi.PodSandboxConfig, createConfig *dockertypes.ContainerCreateConfig, image string, separator rune) error {
dnsConfig := config.GetDnsConfig()
if dnsConfig == nil {
return nil
}
// Setup DNS.
hc.DNS = dnsConfig.GetServers()
hc.DNSSearch = dnsConfig.GetSearches()
hc.DNSOptions = dnsConfig.GetOptions()
return nil
}

View File

@ -1,553 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"context"
"fmt"
"net/http"
"path"
"path/filepath"
"sync"
"time"
"github.com/blang/semver"
dockertypes "github.com/docker/docker/api/types"
"github.com/golang/glog"
"k8s.io/api/core/v1"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager/errors"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/cm"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet"
"k8s.io/kubernetes/pkg/kubelet/server/streaming"
"k8s.io/kubernetes/pkg/kubelet/util/cache"
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
"k8s.io/kubernetes/pkg/kubelet/dockershim/metrics"
)
const (
dockerRuntimeName = "docker"
kubeAPIVersion = "0.1.0"
// String used to detect docker host mode for various namespaces (e.g.
// networking). Must match the value returned by docker inspect -f
// '{{.HostConfig.NetworkMode}}'.
namespaceModeHost = "host"
dockerNetNSFmt = "/proc/%v/ns/net"
// Internal docker labels used to identify whether a container is a sandbox
// or a regular container.
// TODO: This is not backward compatible with older containers. We will
// need to add filtering based on names.
containerTypeLabelKey = "io.kubernetes.docker.type"
containerTypeLabelSandbox = "podsandbox"
containerTypeLabelContainer = "container"
containerLogPathLabelKey = "io.kubernetes.container.logpath"
sandboxIDLabelKey = "io.kubernetes.sandbox.id"
// The expiration time of version cache.
versionCacheTTL = 60 * time.Second
defaultCgroupDriver = "cgroupfs"
// TODO: https://github.com/kubernetes/kubernetes/pull/31169 provides experimental
// defaulting of host user namespace that may be enabled when the docker daemon
// is using remapped UIDs.
// Dockershim should provide detection support for a remapping environment .
// This should be included in the feature proposal. Defaulting may still occur according
// to kubelet behavior and system settings in addition to any API flags that may be introduced.
)
// CRIService includes all methods necessary for a CRI server.
type CRIService interface {
runtimeapi.RuntimeServiceServer
runtimeapi.ImageServiceServer
Start() error
}
// DockerService is an interface that embeds the new RuntimeService and
// ImageService interfaces.
type DockerService interface {
CRIService
// For serving streaming calls.
http.Handler
// For supporting legacy features.
DockerLegacyService
}
// NetworkPluginSettings is the subset of kubelet runtime args we pass
// to the container runtime shim so it can probe for network plugins.
// In the future we will feed these directly to a standalone container
// runtime process.
type NetworkPluginSettings struct {
// HairpinMode is best described by comments surrounding the kubelet arg
HairpinMode kubeletconfig.HairpinMode
// NonMasqueradeCIDR is the range of ips which should *not* be included
// in any MASQUERADE rules applied by the plugin
NonMasqueradeCIDR string
// PluginName is the name of the plugin, runtime shim probes for
PluginName string
// PluginBinDirsString is a list of directiores delimited by commas, in
// which the binaries for the plugin with PluginName may be found.
PluginBinDirString string
// PluginBinDirs is an array of directories in which the binaries for
// the plugin with PluginName may be found. The admin is responsible for
// provisioning these binaries before-hand.
PluginBinDirs []string
// PluginConfDir is the directory in which the admin places a CNI conf.
// Depending on the plugin, this may be an optional field, eg: kubenet
// generates its own plugin conf.
PluginConfDir string
// MTU is the desired MTU for network devices created by the plugin.
MTU int
}
// namespaceGetter is a wrapper around the dockerService that implements
// the network.NamespaceGetter interface.
type namespaceGetter struct {
ds *dockerService
}
func (n *namespaceGetter) GetNetNS(containerID string) (string, error) {
return n.ds.GetNetNS(containerID)
}
// portMappingGetter is a wrapper around the dockerService that implements
// the network.PortMappingGetter interface.
type portMappingGetter struct {
ds *dockerService
}
func (p *portMappingGetter) GetPodPortMappings(containerID string) ([]*hostport.PortMapping, error) {
return p.ds.GetPodPortMappings(containerID)
}
// dockerNetworkHost implements network.Host by wrapping the legacy host passed in by the kubelet
// and dockerServices which implements the rest of the network host interfaces.
// The legacy host methods are slated for deletion.
type dockerNetworkHost struct {
*namespaceGetter
*portMappingGetter
}
var internalLabelKeys []string = []string{containerTypeLabelKey, containerLogPathLabelKey, sandboxIDLabelKey}
// ClientConfig is parameters used to initialize docker client
type ClientConfig struct {
DockerEndpoint string
RuntimeRequestTimeout time.Duration
ImagePullProgressDeadline time.Duration
// Configuration for fake docker client
EnableSleep bool
WithTraceDisabled bool
}
// NewDockerClientFromConfig create a docker client from given configure
// return nil if nil configure is given.
func NewDockerClientFromConfig(config *ClientConfig) libdocker.Interface {
if config != nil {
// Create docker client.
client := libdocker.ConnectToDockerOrDie(
config.DockerEndpoint,
config.RuntimeRequestTimeout,
config.ImagePullProgressDeadline,
config.WithTraceDisabled,
config.EnableSleep,
)
return client
}
return nil
}
// NOTE: Anything passed to DockerService should be eventually handled in another way when we switch to running the shim as a different process.
func NewDockerService(config *ClientConfig, podSandboxImage string, streamingConfig *streaming.Config,
pluginSettings *NetworkPluginSettings, cgroupsName string, kubeCgroupDriver string, dockershimRootDir string,
disableSharedPID, startLocalStreamingServer bool) (DockerService, error) {
client := NewDockerClientFromConfig(config)
c := libdocker.NewInstrumentedInterface(client)
checkpointManager, err := checkpointmanager.NewCheckpointManager(filepath.Join(dockershimRootDir, sandboxCheckpointDir))
if err != nil {
return nil, err
}
ds := &dockerService{
client: c,
os: kubecontainer.RealOS{},
podSandboxImage: podSandboxImage,
streamingRuntime: &streamingRuntime{
client: client,
execHandler: &NativeExecHandler{},
},
containerManager: cm.NewContainerManager(cgroupsName, client),
checkpointManager: checkpointManager,
disableSharedPID: disableSharedPID,
startLocalStreamingServer: startLocalStreamingServer,
networkReady: make(map[string]bool),
}
// check docker version compatibility.
if err = ds.checkVersionCompatibility(); err != nil {
return nil, err
}
// create streaming server if configured.
if streamingConfig != nil {
var err error
ds.streamingServer, err = streaming.NewServer(*streamingConfig, ds.streamingRuntime)
if err != nil {
return nil, err
}
}
// Determine the hairpin mode.
if err := effectiveHairpinMode(pluginSettings); err != nil {
// This is a non-recoverable error. Returning it up the callstack will just
// lead to retries of the same failure, so just fail hard.
return nil, err
}
glog.Infof("Hairpin mode set to %q", pluginSettings.HairpinMode)
// dockershim currently only supports CNI plugins.
pluginSettings.PluginBinDirs = cni.SplitDirs(pluginSettings.PluginBinDirString)
cniPlugins := cni.ProbeNetworkPlugins(pluginSettings.PluginConfDir, pluginSettings.PluginBinDirs)
cniPlugins = append(cniPlugins, kubenet.NewPlugin(pluginSettings.PluginBinDirs))
netHost := &dockerNetworkHost{
&namespaceGetter{ds},
&portMappingGetter{ds},
}
plug, err := network.InitNetworkPlugin(cniPlugins, pluginSettings.PluginName, netHost, pluginSettings.HairpinMode, pluginSettings.NonMasqueradeCIDR, pluginSettings.MTU)
if err != nil {
return nil, fmt.Errorf("didn't find compatible CNI plugin with given settings %+v: %v", pluginSettings, err)
}
ds.network = network.NewPluginManager(plug)
glog.Infof("Docker cri networking managed by %v", plug.Name())
// NOTE: cgroup driver is only detectable in docker 1.11+
cgroupDriver := defaultCgroupDriver
dockerInfo, err := ds.client.Info()
glog.Infof("Docker Info: %+v", dockerInfo)
if err != nil {
glog.Errorf("Failed to execute Info() call to the Docker client: %v", err)
glog.Warningf("Falling back to use the default driver: %q", cgroupDriver)
} else if len(dockerInfo.CgroupDriver) == 0 {
glog.Warningf("No cgroup driver is set in Docker")
glog.Warningf("Falling back to use the default driver: %q", cgroupDriver)
} else {
cgroupDriver = dockerInfo.CgroupDriver
}
if len(kubeCgroupDriver) != 0 && kubeCgroupDriver != cgroupDriver {
return nil, fmt.Errorf("misconfiguration: kubelet cgroup driver: %q is different from docker cgroup driver: %q", kubeCgroupDriver, cgroupDriver)
}
glog.Infof("Setting cgroupDriver to %s", cgroupDriver)
ds.cgroupDriver = cgroupDriver
ds.versionCache = cache.NewObjectCache(
func() (interface{}, error) {
return ds.getDockerVersion()
},
versionCacheTTL,
)
// Register prometheus metrics.
metrics.Register()
return ds, nil
}
type dockerService struct {
client libdocker.Interface
os kubecontainer.OSInterface
podSandboxImage string
streamingRuntime *streamingRuntime
streamingServer streaming.Server
network *network.PluginManager
// Map of podSandboxID :: network-is-ready
networkReady map[string]bool
networkReadyLock sync.Mutex
containerManager cm.ContainerManager
// cgroup driver used by Docker runtime.
cgroupDriver string
checkpointManager checkpointmanager.CheckpointManager
// caches the version of the runtime.
// To be compatible with multiple docker versions, we need to perform
// version checking for some operations. Use this cache to avoid querying
// the docker daemon every time we need to do such checks.
versionCache *cache.ObjectCache
// This option provides an escape hatch to override the new default behavior for Docker under
// the CRI to use a shared PID namespace for all pods. It is temporary and will be removed.
// See proposals/pod-pid-namespace.md for details.
// TODO: Remove once the escape hatch is no longer used (https://issues.k8s.io/41938)
disableSharedPID bool
// startLocalStreamingServer indicates whether dockershim should start a
// streaming server on localhost.
startLocalStreamingServer bool
}
// TODO: handle context.
// Version returns the runtime name, runtime version and runtime API version
func (ds *dockerService) Version(_ context.Context, r *runtimeapi.VersionRequest) (*runtimeapi.VersionResponse, error) {
v, err := ds.getDockerVersion()
if err != nil {
return nil, err
}
return &runtimeapi.VersionResponse{
Version: kubeAPIVersion,
RuntimeName: dockerRuntimeName,
RuntimeVersion: v.Version,
RuntimeApiVersion: v.APIVersion,
}, nil
}
// dockerVersion gets the version information from docker.
func (ds *dockerService) getDockerVersion() (*dockertypes.Version, error) {
v, err := ds.client.Version()
if err != nil {
return nil, fmt.Errorf("failed to get docker version: %v", err)
}
// Docker API version (e.g., 1.23) is not semver compatible. Add a ".0"
// suffix to remedy this.
v.APIVersion = fmt.Sprintf("%s.0", v.APIVersion)
return v, nil
}
// UpdateRuntimeConfig updates the runtime config. Currently only handles podCIDR updates.
func (ds *dockerService) UpdateRuntimeConfig(_ context.Context, r *runtimeapi.UpdateRuntimeConfigRequest) (*runtimeapi.UpdateRuntimeConfigResponse, error) {
runtimeConfig := r.GetRuntimeConfig()
if runtimeConfig == nil {
return &runtimeapi.UpdateRuntimeConfigResponse{}, nil
}
glog.Infof("docker cri received runtime config %+v", runtimeConfig)
if ds.network != nil && runtimeConfig.NetworkConfig.PodCidr != "" {
event := make(map[string]interface{})
event[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = runtimeConfig.NetworkConfig.PodCidr
ds.network.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, event)
}
return &runtimeapi.UpdateRuntimeConfigResponse{}, nil
}
// GetNetNS returns the network namespace of the given containerID. The ID
// supplied is typically the ID of a pod sandbox. This getter doesn't try
// to map non-sandbox IDs to their respective sandboxes.
func (ds *dockerService) GetNetNS(podSandboxID string) (string, error) {
r, err := ds.client.InspectContainer(podSandboxID)
if err != nil {
return "", err
}
return getNetworkNamespace(r)
}
// GetPodPortMappings returns the port mappings of the given podSandbox ID.
func (ds *dockerService) GetPodPortMappings(podSandboxID string) ([]*hostport.PortMapping, error) {
// TODO: get portmappings from docker labels for backward compatibility
checkpoint := NewPodSandboxCheckpoint("", "", &CheckpointData{})
err := ds.checkpointManager.GetCheckpoint(podSandboxID, checkpoint)
// Return empty portMappings if checkpoint is not found
if err != nil {
if err == errors.ErrCheckpointNotFound {
return nil, nil
}
errRem := ds.checkpointManager.RemoveCheckpoint(podSandboxID)
if errRem != nil {
glog.Errorf("Failed to delete corrupt checkpoint for sandbox %q: %v", podSandboxID, errRem)
}
return nil, err
}
_, _, _, checkpointedPortMappings, _ := checkpoint.GetData()
portMappings := make([]*hostport.PortMapping, 0, len(checkpointedPortMappings))
for _, pm := range checkpointedPortMappings {
proto := toAPIProtocol(*pm.Protocol)
portMappings = append(portMappings, &hostport.PortMapping{
HostPort: *pm.HostPort,
ContainerPort: *pm.ContainerPort,
Protocol: proto,
})
}
return portMappings, nil
}
// Start initializes and starts components in dockerService.
func (ds *dockerService) Start() error {
// Initialize the legacy cleanup flag.
if ds.startLocalStreamingServer {
go func() {
if err := ds.streamingServer.Start(true); err != nil {
glog.Fatalf("Streaming server stopped unexpectedly: %v", err)
}
}()
}
return ds.containerManager.Start()
}
// Status returns the status of the runtime.
func (ds *dockerService) Status(_ context.Context, r *runtimeapi.StatusRequest) (*runtimeapi.StatusResponse, error) {
runtimeReady := &runtimeapi.RuntimeCondition{
Type: runtimeapi.RuntimeReady,
Status: true,
}
networkReady := &runtimeapi.RuntimeCondition{
Type: runtimeapi.NetworkReady,
Status: true,
}
conditions := []*runtimeapi.RuntimeCondition{runtimeReady, networkReady}
if _, err := ds.client.Version(); err != nil {
runtimeReady.Status = false
runtimeReady.Reason = "DockerDaemonNotReady"
runtimeReady.Message = fmt.Sprintf("docker: failed to get docker version: %v", err)
}
if err := ds.network.Status(); err != nil {
networkReady.Status = false
networkReady.Reason = "NetworkPluginNotReady"
networkReady.Message = fmt.Sprintf("docker: network plugin is not ready: %v", err)
}
status := &runtimeapi.RuntimeStatus{Conditions: conditions}
return &runtimeapi.StatusResponse{Status: status}, nil
}
func (ds *dockerService) ServeHTTP(w http.ResponseWriter, r *http.Request) {
if ds.streamingServer != nil {
ds.streamingServer.ServeHTTP(w, r)
} else {
http.NotFound(w, r)
}
}
// GenerateExpectedCgroupParent returns cgroup parent in syntax expected by cgroup driver
func (ds *dockerService) GenerateExpectedCgroupParent(cgroupParent string) (string, error) {
if cgroupParent != "" {
// if docker uses the systemd cgroup driver, it expects *.slice style names for cgroup parent.
// if we configured kubelet to use --cgroup-driver=cgroupfs, and docker is configured to use systemd driver
// docker will fail to launch the container because the name we provide will not be a valid slice.
// this is a very good thing.
if ds.cgroupDriver == "systemd" {
// Pass only the last component of the cgroup path to systemd.
cgroupParent = path.Base(cgroupParent)
}
}
glog.V(3).Infof("Setting cgroup parent to: %q", cgroupParent)
return cgroupParent, nil
}
// checkVersionCompatibility verifies whether docker is in a compatible version.
func (ds *dockerService) checkVersionCompatibility() error {
apiVersion, err := ds.getDockerAPIVersion()
if err != nil {
return err
}
minAPIVersion, err := semver.Parse(libdocker.MinimumDockerAPIVersion)
if err != nil {
return err
}
// Verify the docker version.
result := apiVersion.Compare(minAPIVersion)
if result < 0 {
return fmt.Errorf("docker API version is older than %s", libdocker.MinimumDockerAPIVersion)
}
return nil
}
// getDockerAPIVersion gets the semver-compatible docker api version.
func (ds *dockerService) getDockerAPIVersion() (*semver.Version, error) {
var dv *dockertypes.Version
var err error
if ds.versionCache != nil {
dv, err = ds.getDockerVersionFromCache()
} else {
dv, err = ds.getDockerVersion()
}
if err != nil {
return nil, err
}
apiVersion, err := semver.Parse(dv.APIVersion)
if err != nil {
return nil, err
}
return &apiVersion, nil
}
func (ds *dockerService) getDockerVersionFromCache() (*dockertypes.Version, error) {
// We only store on key in the cache.
const dummyKey = "version"
value, err := ds.versionCache.Get(dummyKey)
if err != nil {
return nil, err
}
dv, ok := value.(*dockertypes.Version)
if !ok {
return nil, fmt.Errorf("Converted to *dockertype.Version error")
}
return dv, nil
}
func toAPIProtocol(protocol Protocol) v1.Protocol {
switch protocol {
case protocolTCP:
return v1.ProtocolTCP
case protocolUDP:
return v1.ProtocolUDP
}
glog.Warningf("Unknown protocol %q: defaulting to TCP", protocol)
return v1.ProtocolTCP
}
// effectiveHairpinMode determines the effective hairpin mode given the
// configured mode, and whether cbr0 should be configured.
func effectiveHairpinMode(s *NetworkPluginSettings) error {
// The hairpin mode setting doesn't matter if:
// - We're not using a bridge network. This is hard to check because we might
// be using a plugin.
// - It's set to hairpin-veth for a container runtime that doesn't know how
// to set the hairpin flag on the veth's of containers. Currently the
// docker runtime is the only one that understands this.
// - It's set to "none".
if s.HairpinMode == kubeletconfig.PromiscuousBridge || s.HairpinMode == kubeletconfig.HairpinVeth {
if s.HairpinMode == kubeletconfig.PromiscuousBridge && s.PluginName != "kubenet" {
// This is not a valid combination, since promiscuous-bridge only works on kubenet. Users might be using the
// default values (from before the hairpin-mode flag existed) and we
// should keep the old behavior.
glog.Warningf("Hairpin mode set to %q but kubenet is not enabled, falling back to %q", s.HairpinMode, kubeletconfig.HairpinVeth)
s.HairpinMode = kubeletconfig.HairpinVeth
return nil
}
} else if s.HairpinMode != kubeletconfig.HairpinNone {
return fmt.Errorf("unknown value: %q", s.HairpinMode)
}
return nil
}

View File

@ -1,173 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"errors"
"math/rand"
"testing"
"time"
"github.com/blang/semver"
dockertypes "github.com/docker/docker/api/types"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/apimachinery/pkg/util/clock"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/checkpointmanager"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
nettest "k8s.io/kubernetes/pkg/kubelet/dockershim/network/testing"
"k8s.io/kubernetes/pkg/kubelet/util/cache"
)
// newTestNetworkPlugin returns a mock plugin that implements network.NetworkPlugin
func newTestNetworkPlugin(t *testing.T) *nettest.MockNetworkPlugin {
ctrl := gomock.NewController(t)
return nettest.NewMockNetworkPlugin(ctrl)
}
type mockCheckpointManager struct {
checkpoint map[string]*PodSandboxCheckpoint
}
func (ckm *mockCheckpointManager) CreateCheckpoint(checkpointKey string, checkpoint checkpointmanager.Checkpoint) error {
ckm.checkpoint[checkpointKey] = checkpoint.(*PodSandboxCheckpoint)
return nil
}
func (ckm *mockCheckpointManager) GetCheckpoint(checkpointKey string, checkpoint checkpointmanager.Checkpoint) error {
*(checkpoint.(*PodSandboxCheckpoint)) = *(ckm.checkpoint[checkpointKey])
return nil
}
func (ckm *mockCheckpointManager) RemoveCheckpoint(checkpointKey string) error {
_, ok := ckm.checkpoint[checkpointKey]
if ok {
delete(ckm.checkpoint, "moo")
}
return nil
}
func (ckm *mockCheckpointManager) ListCheckpoints() ([]string, error) {
var keys []string
for key := range ckm.checkpoint {
keys = append(keys, key)
}
return keys, nil
}
func newMockCheckpointManager() checkpointmanager.CheckpointManager {
return &mockCheckpointManager{checkpoint: make(map[string]*PodSandboxCheckpoint)}
}
func newTestDockerService() (*dockerService, *libdocker.FakeDockerClient, *clock.FakeClock) {
fakeClock := clock.NewFakeClock(time.Time{})
c := libdocker.NewFakeDockerClient().WithClock(fakeClock).WithVersion("1.11.2", "1.23").WithRandSource(rand.NewSource(0))
pm := network.NewPluginManager(&network.NoopNetworkPlugin{})
ckm := newMockCheckpointManager()
return &dockerService{
client: c,
os: &containertest.FakeOS{},
network: pm,
checkpointManager: ckm,
networkReady: make(map[string]bool),
}, c, fakeClock
}
func newTestDockerServiceWithVersionCache() (*dockerService, *libdocker.FakeDockerClient, *clock.FakeClock) {
ds, c, fakeClock := newTestDockerService()
ds.versionCache = cache.NewObjectCache(
func() (interface{}, error) {
return ds.getDockerVersion()
},
time.Hour*10,
)
return ds, c, fakeClock
}
// TestStatus tests the runtime status logic.
func TestStatus(t *testing.T) {
ds, fDocker, _ := newTestDockerService()
assertStatus := func(expected map[string]bool, status *runtimeapi.RuntimeStatus) {
conditions := status.GetConditions()
assert.Equal(t, len(expected), len(conditions))
for k, v := range expected {
for _, c := range conditions {
if k == c.Type {
assert.Equal(t, v, c.Status)
}
}
}
}
// Should report ready status if version returns no error.
statusResp, err := ds.Status(getTestCTX(), &runtimeapi.StatusRequest{})
require.NoError(t, err)
assertStatus(map[string]bool{
runtimeapi.RuntimeReady: true,
runtimeapi.NetworkReady: true,
}, statusResp.Status)
// Should not report ready status if version returns error.
fDocker.InjectError("version", errors.New("test error"))
statusResp, err = ds.Status(getTestCTX(), &runtimeapi.StatusRequest{})
assert.NoError(t, err)
assertStatus(map[string]bool{
runtimeapi.RuntimeReady: false,
runtimeapi.NetworkReady: true,
}, statusResp.Status)
// Should not report ready status is network plugin returns error.
mockPlugin := newTestNetworkPlugin(t)
ds.network = network.NewPluginManager(mockPlugin)
defer mockPlugin.Finish()
mockPlugin.EXPECT().Status().Return(errors.New("network error"))
statusResp, err = ds.Status(getTestCTX(), &runtimeapi.StatusRequest{})
assert.NoError(t, err)
assertStatus(map[string]bool{
runtimeapi.RuntimeReady: true,
runtimeapi.NetworkReady: false,
}, statusResp.Status)
}
func TestVersion(t *testing.T) {
ds, _, _ := newTestDockerService()
expectedVersion := &dockertypes.Version{Version: "1.11.2", APIVersion: "1.23.0"}
v, err := ds.getDockerVersion()
require.NoError(t, err)
assert.Equal(t, expectedVersion, v)
expectedAPIVersion := &semver.Version{Major: 1, Minor: 23, Patch: 0}
apiVersion, err := ds.getDockerAPIVersion()
require.NoError(t, err)
assert.Equal(t, expectedAPIVersion, apiVersion)
}
func TestAPIVersionWithCache(t *testing.T) {
ds, _, _ := newTestDockerServiceWithVersionCache()
expected := &semver.Version{Major: 1, Minor: 23, Patch: 0}
version, err := ds.getDockerAPIVersion()
require.NoError(t, err)
assert.Equal(t, expected, version)
}

View File

@ -1,36 +0,0 @@
// +build linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"context"
"fmt"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
)
// ContainerStats returns stats for a container stats request based on container id.
func (ds *dockerService) ContainerStats(_ context.Context, r *runtimeapi.ContainerStatsRequest) (*runtimeapi.ContainerStatsResponse, error) {
return nil, fmt.Errorf("not implemented")
}
// ListContainerStats returns stats for a list container stats request based on a filter.
func (ds *dockerService) ListContainerStats(_ context.Context, r *runtimeapi.ListContainerStatsRequest) (*runtimeapi.ListContainerStatsResponse, error) {
return nil, fmt.Errorf("not implemented")
}

View File

@ -1,36 +0,0 @@
// +build !linux,!windows
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"context"
"fmt"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
)
// ContainerStats returns stats for a container stats request based on container id.
func (ds *dockerService) ContainerStats(_ context.Context, r *runtimeapi.ContainerStatsRequest) (*runtimeapi.ContainerStatsResponse, error) {
return nil, fmt.Errorf("not implemented")
}
// ListContainerStats returns stats for a list container stats request based on a filter.
func (ds *dockerService) ListContainerStats(_ context.Context, r *runtimeapi.ListContainerStatsRequest) (*runtimeapi.ListContainerStatsResponse, error) {
return nil, fmt.Errorf("not implemented")
}

View File

@ -1,114 +0,0 @@
// +build windows
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"context"
"time"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
)
// ContainerStats returns stats for a container stats request based on container id.
func (ds *dockerService) ContainerStats(_ context.Context, r *runtimeapi.ContainerStatsRequest) (*runtimeapi.ContainerStatsResponse, error) {
stats, err := ds.getContainerStats(r.ContainerId)
if err != nil {
return nil, err
}
return &runtimeapi.ContainerStatsResponse{Stats: stats}, nil
}
// ListContainerStats returns stats for a list container stats request based on a filter.
func (ds *dockerService) ListContainerStats(ctx context.Context, r *runtimeapi.ListContainerStatsRequest) (*runtimeapi.ListContainerStatsResponse, error) {
containerStatsFilter := r.GetFilter()
filter := &runtimeapi.ContainerFilter{}
if containerStatsFilter != nil {
filter.Id = containerStatsFilter.Id
filter.PodSandboxId = containerStatsFilter.PodSandboxId
filter.LabelSelector = containerStatsFilter.LabelSelector
}
listResp, err := ds.ListContainers(ctx, &runtimeapi.ListContainersRequest{Filter: filter})
if err != nil {
return nil, err
}
var stats []*runtimeapi.ContainerStats
for _, container := range listResp.Containers {
containerStats, err := ds.getContainerStats(container.Id)
if err != nil {
return nil, err
}
stats = append(stats, containerStats)
}
return &runtimeapi.ListContainerStatsResponse{Stats: stats}, nil
}
func (ds *dockerService) getContainerStats(containerID string) (*runtimeapi.ContainerStats, error) {
info, err := ds.client.Info()
if err != nil {
return nil, err
}
statsJSON, err := ds.client.GetContainerStats(containerID)
if err != nil {
return nil, err
}
containerJSON, err := ds.client.InspectContainerWithSize(containerID)
if err != nil {
return nil, err
}
statusResp, err := ds.ContainerStatus(context.Background(), &runtimeapi.ContainerStatusRequest{ContainerId: containerID})
if err != nil {
return nil, err
}
status := statusResp.GetStatus()
dockerStats := statsJSON.Stats
timestamp := time.Now().UnixNano()
containerStats := &runtimeapi.ContainerStats{
Attributes: &runtimeapi.ContainerAttributes{
Id: containerID,
Metadata: status.Metadata,
Labels: status.Labels,
Annotations: status.Annotations,
},
Cpu: &runtimeapi.CpuUsage{
Timestamp: timestamp,
// have to multiply cpu usage by 100 since docker stats units is in 100's of nano seconds for Windows
// see https://github.com/moby/moby/blob/v1.13.1/api/types/stats.go#L22
UsageCoreNanoSeconds: &runtimeapi.UInt64Value{Value: dockerStats.CPUStats.CPUUsage.TotalUsage * 100},
},
Memory: &runtimeapi.MemoryUsage{
Timestamp: timestamp,
WorkingSetBytes: &runtimeapi.UInt64Value{Value: dockerStats.MemoryStats.PrivateWorkingSet},
},
WritableLayer: &runtimeapi.FilesystemUsage{
Timestamp: timestamp,
FsId: &runtimeapi.FilesystemIdentifier{Mountpoint: info.DockerRootDir},
UsedBytes: &runtimeapi.UInt64Value{Value: uint64(*containerJSON.SizeRw)},
},
}
return containerStats, nil
}

View File

@ -1,233 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"bytes"
"context"
"fmt"
"io"
"math"
"os/exec"
"strings"
"time"
dockertypes "github.com/docker/docker/api/types"
"github.com/golang/glog"
"k8s.io/client-go/tools/remotecommand"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/server/streaming"
"k8s.io/kubernetes/pkg/kubelet/util/ioutils"
utilexec "k8s.io/utils/exec"
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
)
type streamingRuntime struct {
client libdocker.Interface
execHandler ExecHandler
}
var _ streaming.Runtime = &streamingRuntime{}
func (r *streamingRuntime) Exec(containerID string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
return r.exec(containerID, cmd, in, out, err, tty, resize, 0)
}
// Internal version of Exec adds a timeout.
func (r *streamingRuntime) exec(containerID string, cmd []string, in io.Reader, out, errw io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error {
container, err := checkContainerStatus(r.client, containerID)
if err != nil {
return err
}
return r.execHandler.ExecInContainer(r.client, container, cmd, in, out, errw, tty, resize, timeout)
}
func (r *streamingRuntime) Attach(containerID string, in io.Reader, out, errw io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
_, err := checkContainerStatus(r.client, containerID)
if err != nil {
return err
}
return attachContainer(r.client, containerID, in, out, errw, tty, resize)
}
func (r *streamingRuntime) PortForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error {
if port < 0 || port > math.MaxUint16 {
return fmt.Errorf("invalid port %d", port)
}
return portForward(r.client, podSandboxID, port, stream)
}
// ExecSync executes a command in the container, and returns the stdout output.
// If command exits with a non-zero exit code, an error is returned.
func (ds *dockerService) ExecSync(_ context.Context, req *runtimeapi.ExecSyncRequest) (*runtimeapi.ExecSyncResponse, error) {
timeout := time.Duration(req.Timeout) * time.Second
var stdoutBuffer, stderrBuffer bytes.Buffer
err := ds.streamingRuntime.exec(req.ContainerId, req.Cmd,
nil, // in
ioutils.WriteCloserWrapper(&stdoutBuffer),
ioutils.WriteCloserWrapper(&stderrBuffer),
false, // tty
nil, // resize
timeout)
var exitCode int32
if err != nil {
exitError, ok := err.(utilexec.ExitError)
if !ok {
return nil, err
}
exitCode = int32(exitError.ExitStatus())
}
return &runtimeapi.ExecSyncResponse{
Stdout: stdoutBuffer.Bytes(),
Stderr: stderrBuffer.Bytes(),
ExitCode: exitCode,
}, nil
}
// Exec prepares a streaming endpoint to execute a command in the container, and returns the address.
func (ds *dockerService) Exec(_ context.Context, req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
if ds.streamingServer == nil {
return nil, streaming.ErrorStreamingDisabled("exec")
}
_, err := checkContainerStatus(ds.client, req.ContainerId)
if err != nil {
return nil, err
}
return ds.streamingServer.GetExec(req)
}
// Attach prepares a streaming endpoint to attach to a running container, and returns the address.
func (ds *dockerService) Attach(_ context.Context, req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
if ds.streamingServer == nil {
return nil, streaming.ErrorStreamingDisabled("attach")
}
_, err := checkContainerStatus(ds.client, req.ContainerId)
if err != nil {
return nil, err
}
return ds.streamingServer.GetAttach(req)
}
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address.
func (ds *dockerService) PortForward(_ context.Context, req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
if ds.streamingServer == nil {
return nil, streaming.ErrorStreamingDisabled("port forward")
}
_, err := checkContainerStatus(ds.client, req.PodSandboxId)
if err != nil {
return nil, err
}
// TODO(tallclair): Verify that ports are exposed.
return ds.streamingServer.GetPortForward(req)
}
func checkContainerStatus(client libdocker.Interface, containerID string) (*dockertypes.ContainerJSON, error) {
container, err := client.InspectContainer(containerID)
if err != nil {
return nil, err
}
if !container.State.Running {
return nil, fmt.Errorf("container not running (%s)", container.ID)
}
return container, nil
}
func attachContainer(client libdocker.Interface, containerID string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error {
// Have to start this before the call to client.AttachToContainer because client.AttachToContainer is a blocking
// call :-( Otherwise, resize events don't get processed and the terminal never resizes.
kubecontainer.HandleResizing(resize, func(size remotecommand.TerminalSize) {
client.ResizeContainerTTY(containerID, uint(size.Height), uint(size.Width))
})
// TODO(random-liu): Do we really use the *Logs* field here?
opts := dockertypes.ContainerAttachOptions{
Stream: true,
Stdin: stdin != nil,
Stdout: stdout != nil,
Stderr: stderr != nil,
}
sopts := libdocker.StreamOptions{
InputStream: stdin,
OutputStream: stdout,
ErrorStream: stderr,
RawTerminal: tty,
}
return client.AttachToContainer(containerID, opts, sopts)
}
func portForward(client libdocker.Interface, podSandboxID string, port int32, stream io.ReadWriteCloser) error {
container, err := client.InspectContainer(podSandboxID)
if err != nil {
return err
}
if !container.State.Running {
return fmt.Errorf("container not running (%s)", container.ID)
}
containerPid := container.State.Pid
socatPath, lookupErr := exec.LookPath("socat")
if lookupErr != nil {
return fmt.Errorf("unable to do port forwarding: socat not found.")
}
args := []string{"-t", fmt.Sprintf("%d", containerPid), "-n", socatPath, "-", fmt.Sprintf("TCP4:localhost:%d", port)}
nsenterPath, lookupErr := exec.LookPath("nsenter")
if lookupErr != nil {
return fmt.Errorf("unable to do port forwarding: nsenter not found.")
}
commandString := fmt.Sprintf("%s %s", nsenterPath, strings.Join(args, " "))
glog.V(4).Infof("executing port forwarding command: %s", commandString)
command := exec.Command(nsenterPath, args...)
command.Stdout = stream
stderr := new(bytes.Buffer)
command.Stderr = stderr
// If we use Stdin, command.Run() won't return until the goroutine that's copying
// from stream finishes. Unfortunately, if you have a client like telnet connected
// via port forwarding, as long as the user's telnet client is connected to the user's
// local listener that port forwarding sets up, the telnet session never exits. This
// means that even if socat has finished running, command.Run() won't ever return
// (because the client still has the connection and stream open).
//
// The work around is to use StdinPipe(), as Wait() (called by Run()) closes the pipe
// when the command (socat) exits.
inPipe, err := command.StdinPipe()
if err != nil {
return fmt.Errorf("unable to do port forwarding: error creating stdin pipe: %v", err)
}
go func() {
io.Copy(inPipe, stream)
inPipe.Close()
}()
if err := command.Run(); err != nil {
return fmt.Errorf("%v: %s", err, stderr.String())
}
return nil
}

View File

@ -1,135 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"fmt"
"io"
"time"
dockertypes "github.com/docker/docker/api/types"
"github.com/golang/glog"
"k8s.io/client-go/tools/remotecommand"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
)
// ExecHandler knows how to execute a command in a running Docker container.
type ExecHandler interface {
ExecInContainer(client libdocker.Interface, container *dockertypes.ContainerJSON, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error
}
type dockerExitError struct {
Inspect *dockertypes.ContainerExecInspect
}
func (d *dockerExitError) String() string {
return d.Error()
}
func (d *dockerExitError) Error() string {
return fmt.Sprintf("Error executing in Docker Container: %d", d.Inspect.ExitCode)
}
func (d *dockerExitError) Exited() bool {
return !d.Inspect.Running
}
func (d *dockerExitError) ExitStatus() int {
return d.Inspect.ExitCode
}
// NativeExecHandler executes commands in Docker containers using Docker's exec API.
type NativeExecHandler struct{}
func (*NativeExecHandler) ExecInContainer(client libdocker.Interface, container *dockertypes.ContainerJSON, cmd []string, stdin io.Reader, stdout, stderr io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize, timeout time.Duration) error {
done := make(chan struct{})
defer close(done)
createOpts := dockertypes.ExecConfig{
Cmd: cmd,
AttachStdin: stdin != nil,
AttachStdout: stdout != nil,
AttachStderr: stderr != nil,
Tty: tty,
}
execObj, err := client.CreateExec(container.ID, createOpts)
if err != nil {
return fmt.Errorf("failed to exec in container - Exec setup failed - %v", err)
}
// Have to start this before the call to client.StartExec because client.StartExec is a blocking
// call :-( Otherwise, resize events don't get processed and the terminal never resizes.
//
// We also have to delay attempting to send a terminal resize request to docker until after the
// exec has started; otherwise, the initial resize request will fail.
execStarted := make(chan struct{})
go func() {
select {
case <-execStarted:
// client.StartExec has started the exec, so we can start resizing
case <-done:
// ExecInContainer has returned, so short-circuit
return
}
kubecontainer.HandleResizing(resize, func(size remotecommand.TerminalSize) {
client.ResizeExecTTY(execObj.ID, uint(size.Height), uint(size.Width))
})
}()
startOpts := dockertypes.ExecStartCheck{Detach: false, Tty: tty}
streamOpts := libdocker.StreamOptions{
InputStream: stdin,
OutputStream: stdout,
ErrorStream: stderr,
RawTerminal: tty,
ExecStarted: execStarted,
}
err = client.StartExec(execObj.ID, startOpts, streamOpts)
if err != nil {
return err
}
ticker := time.NewTicker(2 * time.Second)
defer ticker.Stop()
count := 0
for {
inspect, err2 := client.InspectExec(execObj.ID)
if err2 != nil {
return err2
}
if !inspect.Running {
if inspect.ExitCode != 0 {
err = &dockerExitError{inspect}
}
break
}
count++
if count == 5 {
glog.Errorf("Exec session %s in container %s terminated but process still running!", execObj.ID, container.ID)
break
}
<-ticker.C
}
return err
}

View File

@ -1,393 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"fmt"
"regexp"
"strconv"
"strings"
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
dockerfilters "github.com/docker/docker/api/types/filters"
dockernat "github.com/docker/go-connections/nat"
"github.com/golang/glog"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/kubernetes/pkg/credentialprovider"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
"k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/security/apparmor"
"k8s.io/kubernetes/pkg/util/parsers"
)
const (
annotationPrefix = "annotation."
securityOptSeparator = '='
)
var (
conflictRE = regexp.MustCompile(`Conflict. (?:.)+ is already in use by container ([0-9a-z]+)`)
// this is hacky, but extremely common.
// if a container starts but the executable file is not found, runc gives a message that matches
startRE = regexp.MustCompile(`\\\\\\\"(.*)\\\\\\\": executable file not found`)
defaultSeccompOpt = []dockerOpt{{"seccomp", "unconfined", ""}}
)
// generateEnvList converts KeyValue list to a list of strings, in the form of
// '<key>=<value>', which can be understood by docker.
func generateEnvList(envs []*runtimeapi.KeyValue) (result []string) {
for _, env := range envs {
result = append(result, fmt.Sprintf("%s=%s", env.Key, env.Value))
}
return
}
// makeLabels converts annotations to labels and merge them with the given
// labels. This is necessary because docker does not support annotations;
// we *fake* annotations using labels. Note that docker labels are not
// updatable.
func makeLabels(labels, annotations map[string]string) map[string]string {
merged := make(map[string]string)
for k, v := range labels {
merged[k] = v
}
for k, v := range annotations {
// Assume there won't be conflict.
merged[fmt.Sprintf("%s%s", annotationPrefix, k)] = v
}
return merged
}
// extractLabels converts raw docker labels to the CRI labels and annotations.
// It also filters out internal labels used by this shim.
func extractLabels(input map[string]string) (map[string]string, map[string]string) {
labels := make(map[string]string)
annotations := make(map[string]string)
for k, v := range input {
// Check if the key is used internally by the shim.
internal := false
for _, internalKey := range internalLabelKeys {
if k == internalKey {
internal = true
break
}
}
if internal {
continue
}
// Delete the container name label for the sandbox. It is added in the shim,
// should not be exposed via CRI.
if k == types.KubernetesContainerNameLabel &&
input[containerTypeLabelKey] == containerTypeLabelSandbox {
continue
}
// Check if the label should be treated as an annotation.
if strings.HasPrefix(k, annotationPrefix) {
annotations[strings.TrimPrefix(k, annotationPrefix)] = v
continue
}
labels[k] = v
}
return labels, annotations
}
// generateMountBindings converts the mount list to a list of strings that
// can be understood by docker.
// '<HostPath>:<ContainerPath>[:options]', where 'options'
// is a comma-separated list of the following strings:
// 'ro', if the path is read only
// 'Z', if the volume requires SELinux relabeling
// propagation mode such as 'rslave'
func generateMountBindings(mounts []*runtimeapi.Mount) []string {
result := make([]string, 0, len(mounts))
for _, m := range mounts {
bind := fmt.Sprintf("%s:%s", m.HostPath, m.ContainerPath)
var attrs []string
if m.Readonly {
attrs = append(attrs, "ro")
}
// Only request relabeling if the pod provides an SELinux context. If the pod
// does not provide an SELinux context relabeling will label the volume with
// the container's randomly allocated MCS label. This would restrict access
// to the volume to the container which mounts it first.
if m.SelinuxRelabel {
attrs = append(attrs, "Z")
}
switch m.Propagation {
case runtimeapi.MountPropagation_PROPAGATION_PRIVATE:
// noop, private is default
case runtimeapi.MountPropagation_PROPAGATION_BIDIRECTIONAL:
attrs = append(attrs, "rshared")
case runtimeapi.MountPropagation_PROPAGATION_HOST_TO_CONTAINER:
attrs = append(attrs, "rslave")
default:
glog.Warningf("unknown propagation mode for hostPath %q", m.HostPath)
// Falls back to "private"
}
if len(attrs) > 0 {
bind = fmt.Sprintf("%s:%s", bind, strings.Join(attrs, ","))
}
result = append(result, bind)
}
return result
}
func makePortsAndBindings(pm []*runtimeapi.PortMapping) (dockernat.PortSet, map[dockernat.Port][]dockernat.PortBinding) {
exposedPorts := dockernat.PortSet{}
portBindings := map[dockernat.Port][]dockernat.PortBinding{}
for _, port := range pm {
exteriorPort := port.HostPort
if exteriorPort == 0 {
// No need to do port binding when HostPort is not specified
continue
}
interiorPort := port.ContainerPort
// Some of this port stuff is under-documented voodoo.
// See http://stackoverflow.com/questions/20428302/binding-a-port-to-a-host-interface-using-the-rest-api
var protocol string
switch port.Protocol {
case runtimeapi.Protocol_UDP:
protocol = "/udp"
case runtimeapi.Protocol_TCP:
protocol = "/tcp"
default:
glog.Warningf("Unknown protocol %q: defaulting to TCP", port.Protocol)
protocol = "/tcp"
}
dockerPort := dockernat.Port(strconv.Itoa(int(interiorPort)) + protocol)
exposedPorts[dockerPort] = struct{}{}
hostBinding := dockernat.PortBinding{
HostPort: strconv.Itoa(int(exteriorPort)),
HostIP: port.HostIp,
}
// Allow multiple host ports bind to same docker port
if existedBindings, ok := portBindings[dockerPort]; ok {
// If a docker port already map to a host port, just append the host ports
portBindings[dockerPort] = append(existedBindings, hostBinding)
} else {
// Otherwise, it's fresh new port binding
portBindings[dockerPort] = []dockernat.PortBinding{
hostBinding,
}
}
}
return exposedPorts, portBindings
}
// getApparmorSecurityOpts gets apparmor options from container config.
func getApparmorSecurityOpts(sc *runtimeapi.LinuxContainerSecurityContext, separator rune) ([]string, error) {
if sc == nil || sc.ApparmorProfile == "" {
return nil, nil
}
appArmorOpts, err := getAppArmorOpts(sc.ApparmorProfile)
if err != nil {
return nil, err
}
fmtOpts := fmtDockerOpts(appArmorOpts, separator)
return fmtOpts, nil
}
// dockerFilter wraps around dockerfilters.Args and provides methods to modify
// the filter easily.
type dockerFilter struct {
args *dockerfilters.Args
}
func newDockerFilter(args *dockerfilters.Args) *dockerFilter {
return &dockerFilter{args: args}
}
func (f *dockerFilter) Add(key, value string) {
f.args.Add(key, value)
}
func (f *dockerFilter) AddLabel(key, value string) {
f.Add("label", fmt.Sprintf("%s=%s", key, value))
}
// parseUserFromImageUser splits the user out of an user:group string.
func parseUserFromImageUser(id string) string {
if id == "" {
return id
}
// split instances where the id may contain user:group
if strings.Contains(id, ":") {
return strings.Split(id, ":")[0]
}
// no group, just return the id
return id
}
// getUserFromImageUser gets uid or user name of the image user.
// If user is numeric, it will be treated as uid; or else, it is treated as user name.
func getUserFromImageUser(imageUser string) (*int64, string) {
user := parseUserFromImageUser(imageUser)
// return both nil if user is not specified in the image.
if user == "" {
return nil, ""
}
// user could be either uid or user name. Try to interpret as numeric uid.
uid, err := strconv.ParseInt(user, 10, 64)
if err != nil {
// If user is non numeric, assume it's user name.
return nil, user
}
// If user is a numeric uid.
return &uid, ""
}
// See #33189. If the previous attempt to create a sandbox container name FOO
// failed due to "device or resource busy", it is possible that docker did
// not clean up properly and has inconsistent internal state. Docker would
// not report the existence of FOO, but would complain if user wants to
// create a new container named FOO. To work around this, we parse the error
// message to identify failure caused by naming conflict, and try to remove
// the old container FOO.
// See #40443. Sometimes even removal may fail with "no such container" error.
// In that case we have to create the container with a randomized name.
// TODO(random-liu): Remove this work around after docker 1.11 is deprecated.
// TODO(#33189): Monitor the tests to see if the fix is sufficient.
func recoverFromCreationConflictIfNeeded(client libdocker.Interface, createConfig dockertypes.ContainerCreateConfig, err error) (*dockercontainer.ContainerCreateCreatedBody, error) {
matches := conflictRE.FindStringSubmatch(err.Error())
if len(matches) != 2 {
return nil, err
}
id := matches[1]
glog.Warningf("Unable to create pod sandbox due to conflict. Attempting to remove sandbox %q", id)
if rmErr := client.RemoveContainer(id, dockertypes.ContainerRemoveOptions{RemoveVolumes: true}); rmErr == nil {
glog.V(2).Infof("Successfully removed conflicting container %q", id)
return nil, err
} else {
glog.Errorf("Failed to remove the conflicting container %q: %v", id, rmErr)
// Return if the error is not container not found error.
if !libdocker.IsContainerNotFoundError(rmErr) {
return nil, err
}
}
// randomize the name to avoid conflict.
createConfig.Name = randomizeName(createConfig.Name)
glog.V(2).Infof("Create the container with randomized name %s", createConfig.Name)
return client.CreateContainer(createConfig)
}
// transformStartContainerError does regex parsing on returned error
// for where container runtimes are giving less than ideal error messages.
func transformStartContainerError(err error) error {
if err == nil {
return nil
}
matches := startRE.FindStringSubmatch(err.Error())
if len(matches) > 0 {
return fmt.Errorf("executable not found in $PATH")
}
return err
}
// ensureSandboxImageExists pulls the sandbox image when it's not present.
func ensureSandboxImageExists(client libdocker.Interface, image string) error {
_, err := client.InspectImageByRef(image)
if err == nil {
return nil
}
if !libdocker.IsImageNotFoundError(err) {
return fmt.Errorf("failed to inspect sandbox image %q: %v", image, err)
}
repoToPull, _, _, err := parsers.ParseImageName(image)
if err != nil {
return err
}
keyring := credentialprovider.NewDockerKeyring()
creds, withCredentials := keyring.Lookup(repoToPull)
if !withCredentials {
glog.V(3).Infof("Pulling image %q without credentials", image)
err := client.PullImage(image, dockertypes.AuthConfig{}, dockertypes.ImagePullOptions{})
if err != nil {
return fmt.Errorf("failed pulling image %q: %v", image, err)
}
return nil
}
var pullErrs []error
for _, currentCreds := range creds {
authConfig := credentialprovider.LazyProvide(currentCreds)
err := client.PullImage(image, authConfig, dockertypes.ImagePullOptions{})
// If there was no error, return success
if err == nil {
return nil
}
pullErrs = append(pullErrs, err)
}
return utilerrors.NewAggregate(pullErrs)
}
func getAppArmorOpts(profile string) ([]dockerOpt, error) {
if profile == "" || profile == apparmor.ProfileRuntimeDefault {
// The docker applies the default profile by default.
return nil, nil
}
// Return unconfined profile explicitly
if profile == apparmor.ProfileNameUnconfined {
return []dockerOpt{{"apparmor", apparmor.ProfileNameUnconfined, ""}}, nil
}
// Assume validation has already happened.
profileName := strings.TrimPrefix(profile, apparmor.ProfileNamePrefix)
return []dockerOpt{{"apparmor", profileName, ""}}, nil
}
// fmtDockerOpts formats the docker security options using the given separator.
func fmtDockerOpts(opts []dockerOpt, sep rune) []string {
fmtOpts := make([]string, len(opts))
for i, opt := range opts {
fmtOpts[i] = fmt.Sprintf("%s%c%s", opt.key, sep, opt.value)
}
return fmtOpts
}
type dockerOpt struct {
// The key-value pair passed to docker.
key, value string
// The alternative value to use in log/event messages.
msg string
}
// Expose key/value from dockerOpt.
func (d dockerOpt) GetKV() (string, string) {
return d.key, d.value
}

View File

@ -1,153 +0,0 @@
// +build linux
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"bytes"
"crypto/md5"
"encoding/json"
"fmt"
"io/ioutil"
"path/filepath"
"strings"
"github.com/blang/semver"
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
"k8s.io/api/core/v1"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
)
func DefaultMemorySwap() int64 {
return 0
}
func (ds *dockerService) getSecurityOpts(seccompProfile string, separator rune) ([]string, error) {
// Apply seccomp options.
seccompSecurityOpts, err := getSeccompSecurityOpts(seccompProfile, separator)
if err != nil {
return nil, fmt.Errorf("failed to generate seccomp security options for container: %v", err)
}
return seccompSecurityOpts, nil
}
func getSeccompDockerOpts(seccompProfile string) ([]dockerOpt, error) {
if seccompProfile == "" || seccompProfile == "unconfined" {
// return early the default
return defaultSeccompOpt, nil
}
if seccompProfile == v1.SeccompProfileRuntimeDefault || seccompProfile == v1.DeprecatedSeccompProfileDockerDefault {
// return nil so docker will load the default seccomp profile
return nil, nil
}
if !strings.HasPrefix(seccompProfile, "localhost/") {
return nil, fmt.Errorf("unknown seccomp profile option: %s", seccompProfile)
}
// get the full path of seccomp profile when prefixed with 'localhost/'.
fname := strings.TrimPrefix(seccompProfile, "localhost/")
if !filepath.IsAbs(fname) {
return nil, fmt.Errorf("seccomp profile path must be absolute, but got relative path %q", fname)
}
file, err := ioutil.ReadFile(filepath.FromSlash(fname))
if err != nil {
return nil, fmt.Errorf("cannot load seccomp profile %q: %v", fname, err)
}
b := bytes.NewBuffer(nil)
if err := json.Compact(b, file); err != nil {
return nil, err
}
// Rather than the full profile, just put the filename & md5sum in the event log.
msg := fmt.Sprintf("%s(md5:%x)", fname, md5.Sum(file))
return []dockerOpt{{"seccomp", b.String(), msg}}, nil
}
// getSeccompSecurityOpts gets container seccomp options from container seccomp profile.
// It is an experimental feature and may be promoted to official runtime api in the future.
func getSeccompSecurityOpts(seccompProfile string, separator rune) ([]string, error) {
seccompOpts, err := getSeccompDockerOpts(seccompProfile)
if err != nil {
return nil, err
}
return fmtDockerOpts(seccompOpts, separator), nil
}
func (ds *dockerService) updateCreateConfig(
createConfig *dockertypes.ContainerCreateConfig,
config *runtimeapi.ContainerConfig,
sandboxConfig *runtimeapi.PodSandboxConfig,
podSandboxID string, securityOptSep rune, apiVersion *semver.Version) error {
// Apply Linux-specific options if applicable.
if lc := config.GetLinux(); lc != nil {
// TODO: Check if the units are correct.
// TODO: Can we assume the defaults are sane?
rOpts := lc.GetResources()
if rOpts != nil {
createConfig.HostConfig.Resources = dockercontainer.Resources{
// Memory and MemorySwap are set to the same value, this prevents containers from using any swap.
Memory: rOpts.MemoryLimitInBytes,
MemorySwap: rOpts.MemoryLimitInBytes,
CPUShares: rOpts.CpuShares,
CPUQuota: rOpts.CpuQuota,
CPUPeriod: rOpts.CpuPeriod,
}
createConfig.HostConfig.OomScoreAdj = int(rOpts.OomScoreAdj)
}
// Note: ShmSize is handled in kube_docker_client.go
// Apply security context.
if err := applyContainerSecurityContext(lc, podSandboxID, createConfig.Config, createConfig.HostConfig, securityOptSep); err != nil {
return fmt.Errorf("failed to apply container security context for container %q: %v", config.Metadata.Name, err)
}
modifyContainerPIDNamespaceOverrides(ds.disableSharedPID, apiVersion, createConfig.HostConfig, podSandboxID)
}
// Apply cgroupsParent derived from the sandbox config.
if lc := sandboxConfig.GetLinux(); lc != nil {
// Apply Cgroup options.
cgroupParent, err := ds.GenerateExpectedCgroupParent(lc.CgroupParent)
if err != nil {
return fmt.Errorf("failed to generate cgroup parent in expected syntax for container %q: %v", config.Metadata.Name, err)
}
createConfig.HostConfig.CgroupParent = cgroupParent
}
return nil
}
func (ds *dockerService) determinePodIPBySandboxID(uid string) string {
return ""
}
func getNetworkNamespace(c *dockertypes.ContainerJSON) (string, error) {
if c.State.Pid == 0 {
// Docker reports pid 0 for an exited container.
return "", fmt.Errorf("cannot find network namespace for the terminated container %q", c.ID)
}
return fmt.Sprintf(dockerNetNSFmt, c.State.Pid), nil
}
// applyExperimentalCreateConfig applys experimental configures from sandbox annotations.
func applyExperimentalCreateConfig(createConfig *dockertypes.ContainerCreateConfig, annotations map[string]string) {
}

View File

@ -1,108 +0,0 @@
// +build linux
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"k8s.io/api/core/v1"
)
func TestGetSeccompSecurityOpts(t *testing.T) {
tests := []struct {
msg string
seccompProfile string
expectedOpts []string
}{{
msg: "No security annotations",
seccompProfile: "",
expectedOpts: []string{"seccomp=unconfined"},
}, {
msg: "Seccomp unconfined",
seccompProfile: "unconfined",
expectedOpts: []string{"seccomp=unconfined"},
}, {
msg: "Seccomp default",
seccompProfile: v1.SeccompProfileRuntimeDefault,
expectedOpts: nil,
}, {
msg: "Seccomp deprecated default",
seccompProfile: v1.DeprecatedSeccompProfileDockerDefault,
expectedOpts: nil,
}}
for i, test := range tests {
opts, err := getSeccompSecurityOpts(test.seccompProfile, '=')
assert.NoError(t, err, "TestCase[%d]: %s", i, test.msg)
assert.Len(t, opts, len(test.expectedOpts), "TestCase[%d]: %s", i, test.msg)
for _, opt := range test.expectedOpts {
assert.Contains(t, opts, opt, "TestCase[%d]: %s", i, test.msg)
}
}
}
func TestLoadSeccompLocalhostProfiles(t *testing.T) {
tmpdir, err := ioutil.TempDir("", "seccomp-local-profile-test")
require.NoError(t, err)
defer os.RemoveAll(tmpdir)
testProfile := `{"foo": "bar"}`
err = ioutil.WriteFile(filepath.Join(tmpdir, "test"), []byte(testProfile), 0644)
require.NoError(t, err)
tests := []struct {
msg string
seccompProfile string
expectedOpts []string
expectErr bool
}{{
msg: "Seccomp localhost/test profile should return correct seccomp profiles",
seccompProfile: "localhost/" + filepath.Join(tmpdir, "test"),
expectedOpts: []string{`seccomp={"foo":"bar"}`},
expectErr: false,
}, {
msg: "Non-existent profile should return error",
seccompProfile: "localhost/" + filepath.Join(tmpdir, "fixtures/non-existent"),
expectedOpts: nil,
expectErr: true,
}, {
msg: "Relative profile path should return error",
seccompProfile: "localhost/fixtures/test",
expectedOpts: nil,
expectErr: true,
}}
for i, test := range tests {
opts, err := getSeccompSecurityOpts(test.seccompProfile, '=')
if test.expectErr {
assert.Error(t, err, fmt.Sprintf("TestCase[%d]: %s", i, test.msg))
continue
}
assert.NoError(t, err, "TestCase[%d]: %s", i, test.msg)
assert.Len(t, opts, len(test.expectedOpts), "TestCase[%d]: %s", i, test.msg)
for _, opt := range test.expectedOpts {
assert.Contains(t, opts, opt, "TestCase[%d]: %s", i, test.msg)
}
}
}

View File

@ -1,346 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
dockertypes "github.com/docker/docker/api/types"
dockernat "github.com/docker/go-connections/nat"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
"k8s.io/kubernetes/pkg/security/apparmor"
)
func TestLabelsAndAnnotationsRoundTrip(t *testing.T) {
expectedLabels := map[string]string{"foo.123.abc": "baz", "bar.456.xyz": "qwe"}
expectedAnnotations := map[string]string{"uio.ert": "dfs", "jkl": "asd"}
// Merge labels and annotations into docker labels.
dockerLabels := makeLabels(expectedLabels, expectedAnnotations)
// Extract labels and annotations from docker labels.
actualLabels, actualAnnotations := extractLabels(dockerLabels)
assert.Equal(t, expectedLabels, actualLabels)
assert.Equal(t, expectedAnnotations, actualAnnotations)
}
// TestGetApparmorSecurityOpts tests the logic of generating container apparmor options from sandbox annotations.
func TestGetApparmorSecurityOpts(t *testing.T) {
makeConfig := func(profile string) *runtimeapi.LinuxContainerSecurityContext {
return &runtimeapi.LinuxContainerSecurityContext{
ApparmorProfile: profile,
}
}
tests := []struct {
msg string
config *runtimeapi.LinuxContainerSecurityContext
expectedOpts []string
}{{
msg: "No AppArmor options",
config: makeConfig(""),
expectedOpts: nil,
}, {
msg: "AppArmor runtime/default",
config: makeConfig("runtime/default"),
expectedOpts: []string{},
}, {
msg: "AppArmor local profile",
config: makeConfig(apparmor.ProfileNamePrefix + "foo"),
expectedOpts: []string{"apparmor=foo"},
}}
for i, test := range tests {
opts, err := getApparmorSecurityOpts(test.config, '=')
assert.NoError(t, err, "TestCase[%d]: %s", i, test.msg)
assert.Len(t, opts, len(test.expectedOpts), "TestCase[%d]: %s", i, test.msg)
for _, opt := range test.expectedOpts {
assert.Contains(t, opts, opt, "TestCase[%d]: %s", i, test.msg)
}
}
}
// TestGetUserFromImageUser tests the logic of getting image uid or user name of image user.
func TestGetUserFromImageUser(t *testing.T) {
newI64 := func(i int64) *int64 { return &i }
for c, test := range map[string]struct {
user string
uid *int64
name string
}{
"no gid": {
user: "0",
uid: newI64(0),
},
"uid/gid": {
user: "0:1",
uid: newI64(0),
},
"empty user": {
user: "",
},
"multiple spearators": {
user: "1:2:3",
uid: newI64(1),
},
"root username": {
user: "root:root",
name: "root",
},
"username": {
user: "test:test",
name: "test",
},
} {
t.Logf("TestCase - %q", c)
actualUID, actualName := getUserFromImageUser(test.user)
assert.Equal(t, test.uid, actualUID)
assert.Equal(t, test.name, actualName)
}
}
func TestParsingCreationConflictError(t *testing.T) {
// Expected error message from docker.
msg := "Conflict. The name \"/k8s_POD_pfpod_e2e-tests-port-forwarding-dlxt2_81a3469e-99e1-11e6-89f2-42010af00002_0\" is already in use by container 24666ab8c814d16f986449e504ea0159468ddf8da01897144a770f66dce0e14e. You have to remove (or rename) that container to be able to reuse that name."
matches := conflictRE.FindStringSubmatch(msg)
require.Len(t, matches, 2)
require.Equal(t, matches[1], "24666ab8c814d16f986449e504ea0159468ddf8da01897144a770f66dce0e14e")
}
// writeDockerConfig will write a config file into a temporary dir, and return that dir.
// Caller is responsible for deleting the dir and its contents.
func writeDockerConfig(cfg string) (string, error) {
tmpdir, err := ioutil.TempDir("", "dockershim=helpers_test.go=")
if err != nil {
return "", err
}
dir := filepath.Join(tmpdir, ".docker")
if err := os.Mkdir(dir, 0755); err != nil {
return "", err
}
return tmpdir, ioutil.WriteFile(filepath.Join(dir, "config.json"), []byte(cfg), 0644)
}
func TestEnsureSandboxImageExists(t *testing.T) {
sandboxImage := "gcr.io/test/image"
authConfig := dockertypes.AuthConfig{Username: "user", Password: "pass"}
for desc, test := range map[string]struct {
injectImage bool
imgNeedsAuth bool
injectErr error
calls []string
err bool
configJSON string
}{
"should not pull image when it already exists": {
injectImage: true,
injectErr: nil,
calls: []string{"inspect_image"},
},
"should pull image when it doesn't exist": {
injectImage: false,
injectErr: libdocker.ImageNotFoundError{ID: "image_id"},
calls: []string{"inspect_image", "pull"},
},
"should return error when inspect image fails": {
injectImage: false,
injectErr: fmt.Errorf("arbitrary error"),
calls: []string{"inspect_image"},
err: true,
},
"should return error when image pull needs private auth, but none provided": {
injectImage: true,
imgNeedsAuth: true,
injectErr: libdocker.ImageNotFoundError{ID: "image_id"},
calls: []string{"inspect_image", "pull"},
err: true,
},
} {
t.Logf("TestCase: %q", desc)
_, fakeDocker, _ := newTestDockerService()
if test.injectImage {
images := []dockertypes.ImageSummary{{ID: sandboxImage}}
fakeDocker.InjectImages(images)
if test.imgNeedsAuth {
fakeDocker.MakeImagesPrivate(images, authConfig)
}
}
fakeDocker.InjectError("inspect_image", test.injectErr)
err := ensureSandboxImageExists(fakeDocker, sandboxImage)
assert.NoError(t, fakeDocker.AssertCalls(test.calls))
assert.Equal(t, test.err, err != nil)
}
}
func TestMakePortsAndBindings(t *testing.T) {
for desc, test := range map[string]struct {
pm []*runtimeapi.PortMapping
exposedPorts dockernat.PortSet
portmappings map[dockernat.Port][]dockernat.PortBinding
}{
"no port mapping": {
pm: nil,
exposedPorts: map[dockernat.Port]struct{}{},
portmappings: map[dockernat.Port][]dockernat.PortBinding{},
},
"tcp port mapping": {
pm: []*runtimeapi.PortMapping{
{
Protocol: runtimeapi.Protocol_TCP,
ContainerPort: 80,
HostPort: 80,
},
},
exposedPorts: map[dockernat.Port]struct{}{
"80/tcp": {},
},
portmappings: map[dockernat.Port][]dockernat.PortBinding{
"80/tcp": {
{
HostPort: "80",
},
},
},
},
"udp port mapping": {
pm: []*runtimeapi.PortMapping{
{
Protocol: runtimeapi.Protocol_UDP,
ContainerPort: 80,
HostPort: 80,
},
},
exposedPorts: map[dockernat.Port]struct{}{
"80/udp": {},
},
portmappings: map[dockernat.Port][]dockernat.PortBinding{
"80/udp": {
{
HostPort: "80",
},
},
},
},
"multiple port mappings": {
pm: []*runtimeapi.PortMapping{
{
Protocol: runtimeapi.Protocol_TCP,
ContainerPort: 80,
HostPort: 80,
},
{
Protocol: runtimeapi.Protocol_TCP,
ContainerPort: 80,
HostPort: 81,
},
},
exposedPorts: map[dockernat.Port]struct{}{
"80/tcp": {},
},
portmappings: map[dockernat.Port][]dockernat.PortBinding{
"80/tcp": {
{
HostPort: "80",
},
{
HostPort: "81",
},
},
},
},
} {
t.Logf("TestCase: %s", desc)
actualExposedPorts, actualPortMappings := makePortsAndBindings(test.pm)
assert.Equal(t, test.exposedPorts, actualExposedPorts)
assert.Equal(t, test.portmappings, actualPortMappings)
}
}
func TestGenerateMountBindings(t *testing.T) {
mounts := []*runtimeapi.Mount{
// everything default
{
HostPath: "/mnt/1",
ContainerPath: "/var/lib/mysql/1",
},
// readOnly
{
HostPath: "/mnt/2",
ContainerPath: "/var/lib/mysql/2",
Readonly: true,
},
// SELinux
{
HostPath: "/mnt/3",
ContainerPath: "/var/lib/mysql/3",
SelinuxRelabel: true,
},
// Propagation private
{
HostPath: "/mnt/4",
ContainerPath: "/var/lib/mysql/4",
Propagation: runtimeapi.MountPropagation_PROPAGATION_PRIVATE,
},
// Propagation rslave
{
HostPath: "/mnt/5",
ContainerPath: "/var/lib/mysql/5",
Propagation: runtimeapi.MountPropagation_PROPAGATION_HOST_TO_CONTAINER,
},
// Propagation rshared
{
HostPath: "/mnt/6",
ContainerPath: "/var/lib/mysql/6",
Propagation: runtimeapi.MountPropagation_PROPAGATION_BIDIRECTIONAL,
},
// Propagation unknown (falls back to private)
{
HostPath: "/mnt/7",
ContainerPath: "/var/lib/mysql/7",
Propagation: runtimeapi.MountPropagation(42),
},
// Everything
{
HostPath: "/mnt/8",
ContainerPath: "/var/lib/mysql/8",
Readonly: true,
SelinuxRelabel: true,
Propagation: runtimeapi.MountPropagation_PROPAGATION_BIDIRECTIONAL,
},
}
expectedResult := []string{
"/mnt/1:/var/lib/mysql/1",
"/mnt/2:/var/lib/mysql/2:ro",
"/mnt/3:/var/lib/mysql/3:Z",
"/mnt/4:/var/lib/mysql/4",
"/mnt/5:/var/lib/mysql/5:rslave",
"/mnt/6:/var/lib/mysql/6:rshared",
"/mnt/7:/var/lib/mysql/7",
"/mnt/8:/var/lib/mysql/8:ro,Z,rshared",
}
result := generateMountBindings(mounts)
assert.Equal(t, expectedResult, result)
}

View File

@ -1,59 +0,0 @@
// +build !linux,!windows
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"fmt"
"github.com/blang/semver"
dockertypes "github.com/docker/docker/api/types"
"github.com/golang/glog"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
)
func DefaultMemorySwap() int64 {
return -1
}
func (ds *dockerService) getSecurityOpts(seccompProfile string, separator rune) ([]string, error) {
glog.Warningf("getSecurityOpts is unsupported in this build")
return nil, nil
}
func (ds *dockerService) updateCreateConfig(
createConfig *dockertypes.ContainerCreateConfig,
config *runtimeapi.ContainerConfig,
sandboxConfig *runtimeapi.PodSandboxConfig,
podSandboxID string, securityOptSep rune, apiVersion *semver.Version) error {
glog.Warningf("updateCreateConfig is unsupported in this build")
return nil
}
func (ds *dockerService) determinePodIPBySandboxID(uid string) string {
glog.Warningf("determinePodIPBySandboxID is unsupported in this build")
return ""
}
func getNetworkNamespace(c *dockertypes.ContainerJSON) (string, error) {
return "", fmt.Errorf("unsupported platform")
}
// applyExperimentalCreateConfig applys experimental configures from sandbox annotations.
func applyExperimentalCreateConfig(createConfig *dockertypes.ContainerCreateConfig, annotations map[string]string) {
}

View File

@ -1,174 +0,0 @@
// +build windows
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"os"
"github.com/blang/semver"
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
dockerfilters "github.com/docker/docker/api/types/filters"
"github.com/golang/glog"
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
)
func DefaultMemorySwap() int64 {
return 0
}
func (ds *dockerService) getSecurityOpts(seccompProfile string, separator rune) ([]string, error) {
if seccompProfile != "" {
glog.Warningf("seccomp annotations are not supported on windows")
}
return nil, nil
}
// applyExperimentalCreateConfig applys experimental configures from sandbox annotations.
func applyExperimentalCreateConfig(createConfig *dockertypes.ContainerCreateConfig, annotations map[string]string) {
if kubeletapis.ShouldIsolatedByHyperV(annotations) {
createConfig.HostConfig.Isolation = kubeletapis.HypervIsolationValue
if networkMode := os.Getenv("CONTAINER_NETWORK"); networkMode == "" {
createConfig.HostConfig.NetworkMode = dockercontainer.NetworkMode("none")
}
}
}
func (ds *dockerService) updateCreateConfig(
createConfig *dockertypes.ContainerCreateConfig,
config *runtimeapi.ContainerConfig,
sandboxConfig *runtimeapi.PodSandboxConfig,
podSandboxID string, securityOptSep rune, apiVersion *semver.Version) error {
if networkMode := os.Getenv("CONTAINER_NETWORK"); networkMode != "" {
createConfig.HostConfig.NetworkMode = dockercontainer.NetworkMode(networkMode)
} else if !kubeletapis.ShouldIsolatedByHyperV(sandboxConfig.Annotations) {
// Todo: Refactor this call in future for calling methods directly in security_context.go
modifyHostOptionsForContainer(nil, podSandboxID, createConfig.HostConfig)
}
// Apply Windows-specific options if applicable.
if wc := config.GetWindows(); wc != nil {
rOpts := wc.GetResources()
if rOpts != nil {
createConfig.HostConfig.Resources = dockercontainer.Resources{
Memory: rOpts.MemoryLimitInBytes,
CPUShares: rOpts.CpuShares,
CPUCount: rOpts.CpuCount,
CPUPercent: rOpts.CpuMaximum,
}
}
// Apply security context.
applyWindowsContainerSecurityContext(wc.GetSecurityContext(), createConfig.Config, createConfig.HostConfig)
}
applyExperimentalCreateConfig(createConfig, sandboxConfig.Annotations)
return nil
}
// applyWindowsContainerSecurityContext updates docker container options according to security context.
func applyWindowsContainerSecurityContext(wsc *runtimeapi.WindowsContainerSecurityContext, config *dockercontainer.Config, hc *dockercontainer.HostConfig) {
if wsc == nil {
return
}
if wsc.GetRunAsUsername() != "" {
config.User = wsc.GetRunAsUsername()
}
}
func (ds *dockerService) determinePodIPBySandboxID(sandboxID string) string {
opts := dockertypes.ContainerListOptions{
All: true,
Filters: dockerfilters.NewArgs(),
}
f := newDockerFilter(&opts.Filters)
f.AddLabel(containerTypeLabelKey, containerTypeLabelContainer)
f.AddLabel(sandboxIDLabelKey, sandboxID)
containers, err := ds.client.ListContainers(opts)
if err != nil {
return ""
}
for _, c := range containers {
r, err := ds.client.InspectContainer(c.ID)
if err != nil {
continue
}
// Versions and feature support
// ============================
// Windows version == Windows Server, Version 1709,, Supports both sandbox and non-sandbox case
// Windows version == Windows Server 2016 Support only non-sandbox case
// Windows version < Windows Server 2016 is Not Supported
// Sandbox support in Windows mandates CNI Plugin.
// Presence of CONTAINER_NETWORK flag is considered as non-Sandbox cases here
// Todo: Add a kernel version check for more validation
if networkMode := os.Getenv("CONTAINER_NETWORK"); networkMode == "" {
// On Windows, every container that is created in a Sandbox, needs to invoke CNI plugin again for adding the Network,
// with the shared container name as NetNS info,
// This is passed down to the platform to replicate some necessary information to the new container
//
// This place is chosen as a hack for now, since ds.getIP would end up calling CNI's addToNetwork
// That is why addToNetwork is required to be idempotent
// Instead of relying on this call, an explicit call to addToNetwork should be
// done immediately after ContainerCreation, in case of Windows only. TBD Issue # to handle this
if r.HostConfig.Isolation == kubeletapis.HypervIsolationValue {
// Hyper-V only supports one container per Pod yet and the container will have a different
// IP address from sandbox. Return the first non-sandbox container IP as POD IP.
// TODO(feiskyer): remove this workaround after Hyper-V supports multiple containers per Pod.
if containerIP := ds.getIP(c.ID, r); containerIP != "" {
return containerIP
}
} else {
// Do not return any IP, so that we would continue and get the IP of the Sandbox.
// Windows 1709 and 1803 doesn't have the Namespace support, so getIP() is called
// to replicate the DNS registry key to the Workload container (IP/Gateway/MAC is
// set separately than DNS).
// TODO(feiskyer): remove this workaround after Namespace is supported in Windows RS5.
ds.getIP(sandboxID, r)
}
} else {
// ds.getIP will call the CNI plugin to fetch the IP
if containerIP := ds.getIP(c.ID, r); containerIP != "" {
return containerIP
}
}
}
return ""
}
func getNetworkNamespace(c *dockertypes.ContainerJSON) (string, error) {
// Currently in windows there is no identifier exposed for network namespace
// Like docker, the referenced container id is used to figure out the network namespace id internally by the platform
// so returning the docker networkMode (which holds container:<ref containerid> for network namespace here
return string(c.HostConfig.NetworkMode), nil
}

View File

@ -1,59 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_test(
name = "go_default_test",
srcs = [
"helpers_test.go",
"kube_docker_client_test.go",
],
embed = [":go_default_library"],
deps = [
"//vendor/github.com/docker/docker/api/types:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
],
)
go_library(
name = "go_default_library",
srcs = [
"client.go",
"fake_client.go",
"helpers.go",
"instrumented_client.go",
"kube_docker_client.go",
],
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker",
deps = [
"//pkg/kubelet/dockershim/metrics:go_default_library",
"//vendor/github.com/docker/distribution/reference:go_default_library",
"//vendor/github.com/docker/docker/api/types:go_default_library",
"//vendor/github.com/docker/docker/api/types/container:go_default_library",
"//vendor/github.com/docker/docker/api/types/image:go_default_library",
"//vendor/github.com/docker/docker/client:go_default_library",
"//vendor/github.com/docker/docker/pkg/jsonmessage:go_default_library",
"//vendor/github.com/docker/docker/pkg/stdcopy:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/opencontainers/go-digest:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,106 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package libdocker
import (
"time"
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
dockerimagetypes "github.com/docker/docker/api/types/image"
dockerapi "github.com/docker/docker/client"
"github.com/golang/glog"
)
const (
// https://docs.docker.com/engine/reference/api/docker_remote_api/
// docker version should be at least 1.11.x
MinimumDockerAPIVersion = "1.23.0"
// Status of a container returned by ListContainers.
StatusRunningPrefix = "Up"
StatusCreatedPrefix = "Created"
StatusExitedPrefix = "Exited"
// Fake docker endpoint
FakeDockerEndpoint = "fake://"
)
// Interface is an abstract interface for testability. It abstracts the interface of docker client.
type Interface interface {
ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error)
InspectContainer(id string) (*dockertypes.ContainerJSON, error)
InspectContainerWithSize(id string) (*dockertypes.ContainerJSON, error)
CreateContainer(dockertypes.ContainerCreateConfig) (*dockercontainer.ContainerCreateCreatedBody, error)
StartContainer(id string) error
StopContainer(id string, timeout time.Duration) error
UpdateContainerResources(id string, updateConfig dockercontainer.UpdateConfig) error
RemoveContainer(id string, opts dockertypes.ContainerRemoveOptions) error
InspectImageByRef(imageRef string) (*dockertypes.ImageInspect, error)
InspectImageByID(imageID string) (*dockertypes.ImageInspect, error)
ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.ImageSummary, error)
PullImage(image string, auth dockertypes.AuthConfig, opts dockertypes.ImagePullOptions) error
RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error)
ImageHistory(id string) ([]dockerimagetypes.HistoryResponseItem, error)
Logs(string, dockertypes.ContainerLogsOptions, StreamOptions) error
Version() (*dockertypes.Version, error)
Info() (*dockertypes.Info, error)
CreateExec(string, dockertypes.ExecConfig) (*dockertypes.IDResponse, error)
StartExec(string, dockertypes.ExecStartCheck, StreamOptions) error
InspectExec(id string) (*dockertypes.ContainerExecInspect, error)
AttachToContainer(string, dockertypes.ContainerAttachOptions, StreamOptions) error
ResizeContainerTTY(id string, height, width uint) error
ResizeExecTTY(id string, height, width uint) error
GetContainerStats(id string) (*dockertypes.StatsJSON, error)
}
// Get a *dockerapi.Client, either using the endpoint passed in, or using
// DOCKER_HOST, DOCKER_TLS_VERIFY, and DOCKER_CERT path per their spec
func getDockerClient(dockerEndpoint string) (*dockerapi.Client, error) {
if len(dockerEndpoint) > 0 {
glog.Infof("Connecting to docker on %s", dockerEndpoint)
return dockerapi.NewClient(dockerEndpoint, "", nil, nil)
}
return dockerapi.NewEnvClient()
}
// ConnectToDockerOrDie creates docker client connecting to docker daemon.
// If the endpoint passed in is "fake://", a fake docker client
// will be returned. The program exits if error occurs. The requestTimeout
// is the timeout for docker requests. If timeout is exceeded, the request
// will be cancelled and throw out an error. If requestTimeout is 0, a default
// value will be applied.
func ConnectToDockerOrDie(dockerEndpoint string, requestTimeout, imagePullProgressDeadline time.Duration,
withTraceDisabled bool, enableSleep bool) Interface {
if dockerEndpoint == FakeDockerEndpoint {
fakeClient := NewFakeDockerClient()
if withTraceDisabled {
fakeClient = fakeClient.WithTraceDisabled()
}
if enableSleep {
fakeClient.EnableSleep = true
}
return fakeClient
}
client, err := getDockerClient(dockerEndpoint)
if err != nil {
glog.Fatalf("Couldn't connect to docker: %v", err)
}
glog.Infof("Start docker client with request timeout=%v", requestTimeout)
return newKubeDockerClient(client, requestTimeout, imagePullProgressDeadline)
}

View File

@ -1,921 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package libdocker
import (
"encoding/json"
"fmt"
"hash/fnv"
"math/rand"
"os"
"reflect"
"sort"
"strconv"
"strings"
"sync"
"time"
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
dockerimagetypes "github.com/docker/docker/api/types/image"
"k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/util/clock"
)
type calledDetail struct {
name string
arguments []interface{}
}
// NewCalledDetail create a new call detail item.
func NewCalledDetail(name string, arguments []interface{}) calledDetail {
return calledDetail{name: name, arguments: arguments}
}
// FakeDockerClient is a simple fake docker client, so that kubelet can be run for testing without requiring a real docker setup.
type FakeDockerClient struct {
sync.Mutex
Clock clock.Clock
RunningContainerList []dockertypes.Container
ExitedContainerList []dockertypes.Container
ContainerMap map[string]*dockertypes.ContainerJSON
ImageInspects map[string]*dockertypes.ImageInspect
Images []dockertypes.ImageSummary
ImageIDsNeedingAuth map[string]dockertypes.AuthConfig
Errors map[string]error
called []calledDetail
pulled []string
EnableTrace bool
RandGenerator *rand.Rand
// Created, Started, Stopped and Removed all contain container docker ID
Created []string
Started []string
Stopped []string
Removed []string
// Images pulled by ref (name or ID).
ImagesPulled []string
VersionInfo dockertypes.Version
Information dockertypes.Info
ExecInspect *dockertypes.ContainerExecInspect
execCmd []string
EnableSleep bool
ImageHistoryMap map[string][]dockerimagetypes.HistoryResponseItem
}
const (
// Notice that if someday we also have minimum docker version requirement, this should also be updated.
fakeDockerVersion = "1.11.2"
fakeImageSize = 1024
// Docker prepends '/' to the container name.
dockerNamePrefix = "/"
)
func NewFakeDockerClient() *FakeDockerClient {
return &FakeDockerClient{
// Docker's API version does not include the patch number.
VersionInfo: dockertypes.Version{Version: fakeDockerVersion, APIVersion: strings.TrimSuffix(MinimumDockerAPIVersion, ".0")},
Errors: make(map[string]error),
ContainerMap: make(map[string]*dockertypes.ContainerJSON),
Clock: clock.RealClock{},
// default this to true, so that we trace calls, image pulls and container lifecycle
EnableTrace: true,
ImageInspects: make(map[string]*dockertypes.ImageInspect),
ImageIDsNeedingAuth: make(map[string]dockertypes.AuthConfig),
RandGenerator: rand.New(rand.NewSource(time.Now().UnixNano())),
}
}
func (f *FakeDockerClient) WithClock(c clock.Clock) *FakeDockerClient {
f.Lock()
defer f.Unlock()
f.Clock = c
return f
}
func (f *FakeDockerClient) WithVersion(version, apiVersion string) *FakeDockerClient {
f.Lock()
defer f.Unlock()
f.VersionInfo = dockertypes.Version{Version: version, APIVersion: apiVersion}
return f
}
func (f *FakeDockerClient) WithTraceDisabled() *FakeDockerClient {
f.Lock()
defer f.Unlock()
f.EnableTrace = false
return f
}
func (f *FakeDockerClient) WithRandSource(source rand.Source) *FakeDockerClient {
f.Lock()
defer f.Unlock()
f.RandGenerator = rand.New(source)
return f
}
func (f *FakeDockerClient) appendCalled(callDetail calledDetail) {
if f.EnableTrace {
f.called = append(f.called, callDetail)
}
}
func (f *FakeDockerClient) appendPulled(pull string) {
if f.EnableTrace {
f.pulled = append(f.pulled, pull)
}
}
func (f *FakeDockerClient) appendContainerTrace(traceCategory string, containerName string) {
if !f.EnableTrace {
return
}
switch traceCategory {
case "Created":
f.Created = append(f.Created, containerName)
case "Started":
f.Started = append(f.Started, containerName)
case "Stopped":
f.Stopped = append(f.Stopped, containerName)
case "Removed":
f.Removed = append(f.Removed, containerName)
}
}
func (f *FakeDockerClient) InjectError(fn string, err error) {
f.Lock()
defer f.Unlock()
f.Errors[fn] = err
}
func (f *FakeDockerClient) InjectErrors(errs map[string]error) {
f.Lock()
defer f.Unlock()
for fn, err := range errs {
f.Errors[fn] = err
}
}
func (f *FakeDockerClient) ClearErrors() {
f.Lock()
defer f.Unlock()
f.Errors = map[string]error{}
}
func (f *FakeDockerClient) ClearCalls() {
f.Lock()
defer f.Unlock()
f.called = []calledDetail{}
f.pulled = []string{}
f.Created = []string{}
f.Started = []string{}
f.Stopped = []string{}
f.Removed = []string{}
}
func (f *FakeDockerClient) getCalledNames() []string {
names := []string{}
for _, detail := range f.called {
names = append(names, detail.name)
}
return names
}
// Because the new data type returned by engine-api is too complex to manually initialize, we need a
// fake container which is easier to initialize.
type FakeContainer struct {
ID string
Name string
Running bool
ExitCode int
Pid int
CreatedAt time.Time
StartedAt time.Time
FinishedAt time.Time
Config *dockercontainer.Config
HostConfig *dockercontainer.HostConfig
}
// convertFakeContainer converts the fake container to real container
func convertFakeContainer(f *FakeContainer) *dockertypes.ContainerJSON {
if f.Config == nil {
f.Config = &dockercontainer.Config{}
}
if f.HostConfig == nil {
f.HostConfig = &dockercontainer.HostConfig{}
}
return &dockertypes.ContainerJSON{
ContainerJSONBase: &dockertypes.ContainerJSONBase{
ID: f.ID,
Name: f.Name,
Image: f.Config.Image,
State: &dockertypes.ContainerState{
Running: f.Running,
ExitCode: f.ExitCode,
Pid: f.Pid,
StartedAt: dockerTimestampToString(f.StartedAt),
FinishedAt: dockerTimestampToString(f.FinishedAt),
},
Created: dockerTimestampToString(f.CreatedAt),
HostConfig: f.HostConfig,
},
Config: f.Config,
NetworkSettings: &dockertypes.NetworkSettings{},
}
}
func (f *FakeDockerClient) SetFakeContainers(containers []*FakeContainer) {
f.Lock()
defer f.Unlock()
// Reset the lists and the map.
f.ContainerMap = map[string]*dockertypes.ContainerJSON{}
f.RunningContainerList = []dockertypes.Container{}
f.ExitedContainerList = []dockertypes.Container{}
for i := range containers {
c := containers[i]
f.ContainerMap[c.ID] = convertFakeContainer(c)
container := dockertypes.Container{
Names: []string{c.Name},
ID: c.ID,
}
if c.Config != nil {
container.Labels = c.Config.Labels
}
if c.Running {
f.RunningContainerList = append(f.RunningContainerList, container)
} else {
f.ExitedContainerList = append(f.ExitedContainerList, container)
}
}
}
func (f *FakeDockerClient) SetFakeRunningContainers(containers []*FakeContainer) {
for _, c := range containers {
c.Running = true
}
f.SetFakeContainers(containers)
}
func (f *FakeDockerClient) AssertCalls(calls []string) (err error) {
f.Lock()
defer f.Unlock()
if !reflect.DeepEqual(calls, f.getCalledNames()) {
err = fmt.Errorf("expected %#v, got %#v", calls, f.getCalledNames())
}
return
}
func (f *FakeDockerClient) AssertCallDetails(calls ...calledDetail) (err error) {
f.Lock()
defer f.Unlock()
if !reflect.DeepEqual(calls, f.called) {
err = fmt.Errorf("expected %#v, got %#v", calls, f.called)
}
return
}
// idsToNames converts container ids into names. The caller must hold the lock.
func (f *FakeDockerClient) idsToNames(ids []string) ([]string, error) {
names := []string{}
for _, id := range ids {
names = append(names, strings.TrimPrefix(f.ContainerMap[id].Name, dockerNamePrefix))
}
return names, nil
}
func (f *FakeDockerClient) AssertCreatedByNameWithOrder(created []string) error {
f.Lock()
defer f.Unlock()
actualCreated, err := f.idsToNames(f.Created)
if err != nil {
return err
}
if !reflect.DeepEqual(created, actualCreated) {
return fmt.Errorf("expected %#v, got %#v", created, actualCreated)
}
return nil
}
func (f *FakeDockerClient) AssertCreatedByName(created []string) error {
f.Lock()
defer f.Unlock()
actualCreated, err := f.idsToNames(f.Created)
if err != nil {
return err
}
return sortedStringSlicesEqual(created, actualCreated)
}
func (f *FakeDockerClient) AssertStoppedByName(stopped []string) error {
f.Lock()
defer f.Unlock()
actualStopped, err := f.idsToNames(f.Stopped)
if err != nil {
return err
}
return sortedStringSlicesEqual(stopped, actualStopped)
}
func (f *FakeDockerClient) AssertStopped(stopped []string) error {
f.Lock()
defer f.Unlock()
// Copy stopped to avoid modifying it.
actualStopped := append([]string{}, f.Stopped...)
return sortedStringSlicesEqual(stopped, actualStopped)
}
func (f *FakeDockerClient) AssertImagesPulled(pulled []string) error {
f.Lock()
defer f.Unlock()
// Copy pulled to avoid modifying it.
actualPulled := append([]string{}, f.ImagesPulled...)
return sortedStringSlicesEqual(pulled, actualPulled)
}
func (f *FakeDockerClient) AssertImagesPulledMsgs(expected []string) error {
f.Lock()
defer f.Unlock()
// Copy pulled to avoid modifying it.
actual := append([]string{}, f.pulled...)
return sortedStringSlicesEqual(expected, actual)
}
func sortedStringSlicesEqual(expected, actual []string) error {
sort.StringSlice(expected).Sort()
sort.StringSlice(actual).Sort()
if !reflect.DeepEqual(expected, actual) {
return fmt.Errorf("expected %#v, got %#v", expected, actual)
}
return nil
}
func (f *FakeDockerClient) popError(op string) error {
if f.Errors == nil {
return nil
}
err, ok := f.Errors[op]
if ok {
delete(f.Errors, op)
return err
}
return nil
}
// ListContainers is a test-spy implementation of Interface.ListContainers.
// It adds an entry "list" to the internal method call record.
func (f *FakeDockerClient) ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "list"})
err := f.popError("list")
containerList := append([]dockertypes.Container{}, f.RunningContainerList...)
if options.All {
// Although the container is not sorted, but the container with the same name should be in order,
// that is enough for us now.
// TODO(random-liu): Is a fully sorted array needed?
containerList = append(containerList, f.ExitedContainerList...)
}
// Filters containers with id, only support 1 id.
idFilters := options.Filters.Get("id")
if len(idFilters) != 0 {
var filtered []dockertypes.Container
for _, container := range containerList {
for _, idFilter := range idFilters {
if container.ID == idFilter {
filtered = append(filtered, container)
break
}
}
}
containerList = filtered
}
// Filters containers with status, only support 1 status.
statusFilters := options.Filters.Get("status")
if len(statusFilters) == 1 {
var filtered []dockertypes.Container
for _, container := range containerList {
for _, statusFilter := range statusFilters {
if toDockerContainerStatus(container.Status) == statusFilter {
filtered = append(filtered, container)
break
}
}
}
containerList = filtered
}
// Filters containers with label filter.
labelFilters := options.Filters.Get("label")
if len(labelFilters) != 0 {
var filtered []dockertypes.Container
for _, container := range containerList {
match := true
for _, labelFilter := range labelFilters {
kv := strings.Split(labelFilter, "=")
if len(kv) != 2 {
return nil, fmt.Errorf("invalid label filter %q", labelFilter)
}
if container.Labels[kv[0]] != kv[1] {
match = false
break
}
}
if match {
filtered = append(filtered, container)
}
}
containerList = filtered
}
return containerList, err
}
func toDockerContainerStatus(state string) string {
switch {
case strings.HasPrefix(state, StatusCreatedPrefix):
return "created"
case strings.HasPrefix(state, StatusRunningPrefix):
return "running"
case strings.HasPrefix(state, StatusExitedPrefix):
return "exited"
default:
return "unknown"
}
}
// InspectContainer is a test-spy implementation of Interface.InspectContainer.
// It adds an entry "inspect" to the internal method call record.
func (f *FakeDockerClient) InspectContainer(id string) (*dockertypes.ContainerJSON, error) {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "inspect_container"})
err := f.popError("inspect_container")
if container, ok := f.ContainerMap[id]; ok {
return container, err
}
if err != nil {
// Use the custom error if it exists.
return nil, err
}
return nil, fmt.Errorf("container %q not found", id)
}
// InspectContainerWithSize is a test-spy implementation of Interface.InspectContainerWithSize.
// It adds an entry "inspect" to the internal method call record.
func (f *FakeDockerClient) InspectContainerWithSize(id string) (*dockertypes.ContainerJSON, error) {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "inspect_container_withsize"})
err := f.popError("inspect_container_withsize")
if container, ok := f.ContainerMap[id]; ok {
return container, err
}
if err != nil {
// Use the custom error if it exists.
return nil, err
}
return nil, fmt.Errorf("container %q not found", id)
}
// InspectImageByRef is a test-spy implementation of Interface.InspectImageByRef.
// It adds an entry "inspect" to the internal method call record.
func (f *FakeDockerClient) InspectImageByRef(name string) (*dockertypes.ImageInspect, error) {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "inspect_image"})
if err := f.popError("inspect_image"); err != nil {
return nil, err
}
if result, ok := f.ImageInspects[name]; ok {
return result, nil
}
return nil, ImageNotFoundError{name}
}
// InspectImageByID is a test-spy implementation of Interface.InspectImageByID.
// It adds an entry "inspect" to the internal method call record.
func (f *FakeDockerClient) InspectImageByID(name string) (*dockertypes.ImageInspect, error) {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "inspect_image"})
if err := f.popError("inspect_image"); err != nil {
return nil, err
}
if result, ok := f.ImageInspects[name]; ok {
return result, nil
}
return nil, ImageNotFoundError{name}
}
// Sleeps random amount of time with the normal distribution with given mean and stddev
// (in milliseconds), we never sleep less than cutOffMillis
func (f *FakeDockerClient) normalSleep(mean, stdDev, cutOffMillis int) {
if !f.EnableSleep {
return
}
cutoff := (time.Duration)(cutOffMillis) * time.Millisecond
delay := (time.Duration)(rand.NormFloat64()*float64(stdDev)+float64(mean)) * time.Millisecond
if delay < cutoff {
delay = cutoff
}
time.Sleep(delay)
}
// GetFakeContainerID generates a fake container id from container name with a hash.
func GetFakeContainerID(name string) string {
hash := fnv.New64a()
hash.Write([]byte(name))
return strconv.FormatUint(hash.Sum64(), 16)
}
// CreateContainer is a test-spy implementation of Interface.CreateContainer.
// It adds an entry "create" to the internal method call record.
func (f *FakeDockerClient) CreateContainer(c dockertypes.ContainerCreateConfig) (*dockercontainer.ContainerCreateCreatedBody, error) {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "create"})
if err := f.popError("create"); err != nil {
return nil, err
}
// This is not a very good fake. We'll just add this container's name to the list.
name := dockerNamePrefix + c.Name
id := GetFakeContainerID(name)
f.appendContainerTrace("Created", id)
timestamp := f.Clock.Now()
// The newest container should be in front, because we assume so in GetPodStatus()
f.RunningContainerList = append([]dockertypes.Container{
{ID: id, Names: []string{name}, Image: c.Config.Image, Created: timestamp.Unix(), State: StatusCreatedPrefix, Labels: c.Config.Labels},
}, f.RunningContainerList...)
f.ContainerMap[id] = convertFakeContainer(&FakeContainer{
ID: id, Name: name, Config: c.Config, HostConfig: c.HostConfig, CreatedAt: timestamp})
f.normalSleep(100, 25, 25)
return &dockercontainer.ContainerCreateCreatedBody{ID: id}, nil
}
// StartContainer is a test-spy implementation of Interface.StartContainer.
// It adds an entry "start" to the internal method call record.
func (f *FakeDockerClient) StartContainer(id string) error {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "start"})
if err := f.popError("start"); err != nil {
return err
}
f.appendContainerTrace("Started", id)
container, ok := f.ContainerMap[id]
if container.HostConfig.NetworkMode.IsContainer() {
hostContainerID := container.HostConfig.NetworkMode.ConnectedContainer()
found := false
for _, container := range f.RunningContainerList {
if container.ID == hostContainerID {
found = true
}
}
if !found {
return fmt.Errorf("failed to start container \"%s\": Error response from daemon: cannot join network of a non running container: %s", id, hostContainerID)
}
}
timestamp := f.Clock.Now()
if !ok {
container = convertFakeContainer(&FakeContainer{ID: id, Name: id, CreatedAt: timestamp})
}
container.State.Running = true
container.State.Pid = os.Getpid()
container.State.StartedAt = dockerTimestampToString(timestamp)
r := f.RandGenerator.Uint32()
container.NetworkSettings.IPAddress = fmt.Sprintf("10.%d.%d.%d", byte(r>>16), byte(r>>8), byte(r))
f.ContainerMap[id] = container
f.updateContainerStatus(id, StatusRunningPrefix)
f.normalSleep(200, 50, 50)
return nil
}
// StopContainer is a test-spy implementation of Interface.StopContainer.
// It adds an entry "stop" to the internal method call record.
func (f *FakeDockerClient) StopContainer(id string, timeout time.Duration) error {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "stop"})
if err := f.popError("stop"); err != nil {
return err
}
f.appendContainerTrace("Stopped", id)
// Container status should be Updated before container moved to ExitedContainerList
f.updateContainerStatus(id, StatusExitedPrefix)
var newList []dockertypes.Container
for _, container := range f.RunningContainerList {
if container.ID == id {
// The newest exited container should be in front. Because we assume so in GetPodStatus()
f.ExitedContainerList = append([]dockertypes.Container{container}, f.ExitedContainerList...)
continue
}
newList = append(newList, container)
}
f.RunningContainerList = newList
container, ok := f.ContainerMap[id]
if !ok {
container = convertFakeContainer(&FakeContainer{
ID: id,
Name: id,
Running: false,
StartedAt: time.Now().Add(-time.Second),
FinishedAt: time.Now(),
})
} else {
container.State.FinishedAt = dockerTimestampToString(f.Clock.Now())
container.State.Running = false
}
f.ContainerMap[id] = container
f.normalSleep(200, 50, 50)
return nil
}
func (f *FakeDockerClient) RemoveContainer(id string, opts dockertypes.ContainerRemoveOptions) error {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "remove"})
err := f.popError("remove")
if err != nil {
return err
}
for i := range f.ExitedContainerList {
if f.ExitedContainerList[i].ID == id {
delete(f.ContainerMap, id)
f.ExitedContainerList = append(f.ExitedContainerList[:i], f.ExitedContainerList[i+1:]...)
f.appendContainerTrace("Removed", id)
return nil
}
}
for i := range f.RunningContainerList {
// allow removal of running containers which are not running
if f.RunningContainerList[i].ID == id && !f.ContainerMap[id].State.Running {
delete(f.ContainerMap, id)
f.RunningContainerList = append(f.RunningContainerList[:i], f.RunningContainerList[i+1:]...)
f.appendContainerTrace("Removed", id)
return nil
}
}
// To be a good fake, report error if container is not stopped.
return fmt.Errorf("container not stopped")
}
func (f *FakeDockerClient) UpdateContainerResources(id string, updateConfig dockercontainer.UpdateConfig) error {
return nil
}
// Logs is a test-spy implementation of Interface.Logs.
// It adds an entry "logs" to the internal method call record.
func (f *FakeDockerClient) Logs(id string, opts dockertypes.ContainerLogsOptions, sopts StreamOptions) error {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "logs"})
return f.popError("logs")
}
func (f *FakeDockerClient) isAuthorizedForImage(image string, auth dockertypes.AuthConfig) bool {
if reqd, exists := f.ImageIDsNeedingAuth[image]; !exists {
return true // no auth needed
} else {
return auth.Username == reqd.Username && auth.Password == reqd.Password
}
}
// PullImage is a test-spy implementation of Interface.PullImage.
// It adds an entry "pull" to the internal method call record.
func (f *FakeDockerClient) PullImage(image string, auth dockertypes.AuthConfig, opts dockertypes.ImagePullOptions) error {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "pull"})
err := f.popError("pull")
if err == nil {
if !f.isAuthorizedForImage(image, auth) {
return ImageNotFoundError{ID: image}
}
authJson, _ := json.Marshal(auth)
inspect := createImageInspectFromRef(image)
f.ImageInspects[image] = inspect
f.appendPulled(fmt.Sprintf("%s using %s", image, string(authJson)))
f.Images = append(f.Images, *createImageFromImageInspect(*inspect))
f.ImagesPulled = append(f.ImagesPulled, image)
}
return err
}
func (f *FakeDockerClient) Version() (*dockertypes.Version, error) {
f.Lock()
defer f.Unlock()
v := f.VersionInfo
return &v, f.popError("version")
}
func (f *FakeDockerClient) Info() (*dockertypes.Info, error) {
return &f.Information, nil
}
func (f *FakeDockerClient) CreateExec(id string, opts dockertypes.ExecConfig) (*dockertypes.IDResponse, error) {
f.Lock()
defer f.Unlock()
f.execCmd = opts.Cmd
f.appendCalled(calledDetail{name: "create_exec"})
return &dockertypes.IDResponse{ID: "12345678"}, nil
}
func (f *FakeDockerClient) StartExec(startExec string, opts dockertypes.ExecStartCheck, sopts StreamOptions) error {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "start_exec"})
return nil
}
func (f *FakeDockerClient) AttachToContainer(id string, opts dockertypes.ContainerAttachOptions, sopts StreamOptions) error {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "attach"})
return nil
}
func (f *FakeDockerClient) InspectExec(id string) (*dockertypes.ContainerExecInspect, error) {
return f.ExecInspect, f.popError("inspect_exec")
}
func (f *FakeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.ImageSummary, error) {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "list_images"})
err := f.popError("list_images")
return f.Images, err
}
func (f *FakeDockerClient) RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "remove_image", arguments: []interface{}{image, opts}})
err := f.popError("remove_image")
if err == nil {
for i := range f.Images {
if f.Images[i].ID == image {
f.Images = append(f.Images[:i], f.Images[i+1:]...)
break
}
}
}
return []dockertypes.ImageDeleteResponseItem{{Deleted: image}}, err
}
func (f *FakeDockerClient) InjectImages(images []dockertypes.ImageSummary) {
f.Lock()
defer f.Unlock()
f.Images = append(f.Images, images...)
for _, i := range images {
f.ImageInspects[i.ID] = createImageInspectFromImage(i)
}
}
func (f *FakeDockerClient) MakeImagesPrivate(images []dockertypes.ImageSummary, auth dockertypes.AuthConfig) {
f.Lock()
defer f.Unlock()
for _, i := range images {
f.ImageIDsNeedingAuth[i.ID] = auth
}
}
func (f *FakeDockerClient) ResetImages() {
f.Lock()
defer f.Unlock()
f.Images = []dockertypes.ImageSummary{}
f.ImageInspects = make(map[string]*dockertypes.ImageInspect)
f.ImageIDsNeedingAuth = make(map[string]dockertypes.AuthConfig)
}
func (f *FakeDockerClient) InjectImageInspects(inspects []dockertypes.ImageInspect) {
f.Lock()
defer f.Unlock()
for _, i := range inspects {
f.Images = append(f.Images, *createImageFromImageInspect(i))
f.ImageInspects[i.ID] = &i
}
}
func (f *FakeDockerClient) updateContainerStatus(id, status string) {
for i := range f.RunningContainerList {
if f.RunningContainerList[i].ID == id {
f.RunningContainerList[i].Status = status
}
}
}
func (f *FakeDockerClient) ResizeExecTTY(id string, height, width uint) error {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "resize_exec"})
return nil
}
func (f *FakeDockerClient) ResizeContainerTTY(id string, height, width uint) error {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "resize_container"})
return nil
}
func createImageInspectFromRef(ref string) *dockertypes.ImageInspect {
return &dockertypes.ImageInspect{
ID: ref,
RepoTags: []string{ref},
// Image size is required to be non-zero for CRI integration.
VirtualSize: fakeImageSize,
Size: fakeImageSize,
Config: &dockercontainer.Config{},
}
}
func createImageInspectFromImage(image dockertypes.ImageSummary) *dockertypes.ImageInspect {
return &dockertypes.ImageInspect{
ID: image.ID,
RepoTags: image.RepoTags,
// Image size is required to be non-zero for CRI integration.
VirtualSize: fakeImageSize,
Size: fakeImageSize,
Config: &dockercontainer.Config{},
}
}
func createImageFromImageInspect(inspect dockertypes.ImageInspect) *dockertypes.ImageSummary {
return &dockertypes.ImageSummary{
ID: inspect.ID,
RepoTags: inspect.RepoTags,
// Image size is required to be non-zero for CRI integration.
VirtualSize: fakeImageSize,
Size: fakeImageSize,
}
}
// dockerTimestampToString converts the timestamp to string
func dockerTimestampToString(t time.Time) string {
return t.Format(time.RFC3339Nano)
}
func (f *FakeDockerClient) ImageHistory(id string) ([]dockerimagetypes.HistoryResponseItem, error) {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "image_history"})
history := f.ImageHistoryMap[id]
return history, nil
}
func (f *FakeDockerClient) InjectImageHistory(data map[string][]dockerimagetypes.HistoryResponseItem) {
f.Lock()
defer f.Unlock()
f.ImageHistoryMap = data
}
// FakeDockerPuller is meant to be a simple wrapper around FakeDockerClient.
// Please do not add more functionalities to it.
type FakeDockerPuller struct {
client Interface
}
func (f *FakeDockerPuller) Pull(image string, _ []v1.Secret) error {
return f.client.PullImage(image, dockertypes.AuthConfig{}, dockertypes.ImagePullOptions{})
}
func (f *FakeDockerPuller) GetImageRef(image string) (string, error) {
_, err := f.client.InspectImageByRef(image)
if err != nil && IsImageNotFoundError(err) {
return "", nil
}
return image, err
}
func (f *FakeDockerClient) GetContainerStats(id string) (*dockertypes.StatsJSON, error) {
f.Lock()
defer f.Unlock()
f.appendCalled(calledDetail{name: "getContainerStats"})
return nil, fmt.Errorf("not implemented")
}

View File

@ -1,172 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package libdocker
import (
"strings"
"time"
dockerref "github.com/docker/distribution/reference"
dockertypes "github.com/docker/docker/api/types"
"github.com/golang/glog"
godigest "github.com/opencontainers/go-digest"
)
// ParseDockerTimestamp parses the timestamp returned by Interface from string to time.Time
func ParseDockerTimestamp(s string) (time.Time, error) {
// Timestamp returned by Docker is in time.RFC3339Nano format.
return time.Parse(time.RFC3339Nano, s)
}
// matchImageTagOrSHA checks if the given image specifier is a valid image ref,
// and that it matches the given image. It should fail on things like image IDs
// (config digests) and other digest-only references, but succeed on image names
// (`foo`), tag references (`foo:bar`), and manifest digest references
// (`foo@sha256:xyz`).
func matchImageTagOrSHA(inspected dockertypes.ImageInspect, image string) bool {
// The image string follows the grammar specified here
// https://github.com/docker/distribution/blob/master/reference/reference.go#L4
named, err := dockerref.ParseNormalizedNamed(image)
if err != nil {
glog.V(4).Infof("couldn't parse image reference %q: %v", image, err)
return false
}
_, isTagged := named.(dockerref.Tagged)
digest, isDigested := named.(dockerref.Digested)
if !isTagged && !isDigested {
// No Tag or SHA specified, so just return what we have
return true
}
if isTagged {
// Check the RepoTags for a match.
for _, tag := range inspected.RepoTags {
// An image name (without the tag/digest) can be [hostname '/'] component ['/' component]*
// Because either the RepoTag or the name *may* contain the
// hostname or not, we only check for the suffix match.
if strings.HasSuffix(image, tag) || strings.HasSuffix(tag, image) {
return true
} else {
// TODO: We need to remove this hack when project atomic based
// docker distro(s) like centos/fedora/rhel image fix problems on
// their end.
// Say the tag is "docker.io/busybox:latest"
// and the image is "docker.io/library/busybox:latest"
t, err := dockerref.ParseNormalizedNamed(tag)
if err != nil {
continue
}
// the parsed/normalized tag will look like
// reference.taggedReference {
// namedRepository: reference.repository {
// domain: "docker.io",
// path: "library/busybox"
// },
// tag: "latest"
// }
// If it does not have tags then we bail out
t2, ok := t.(dockerref.Tagged)
if !ok {
continue
}
// normalized tag would look like "docker.io/library/busybox:latest"
// note the library get added in the string
normalizedTag := t2.String()
if normalizedTag == "" {
continue
}
if strings.HasSuffix(image, normalizedTag) || strings.HasSuffix(normalizedTag, image) {
return true
}
}
}
}
if isDigested {
for _, repoDigest := range inspected.RepoDigests {
named, err := dockerref.ParseNormalizedNamed(repoDigest)
if err != nil {
glog.V(4).Infof("couldn't parse image RepoDigest reference %q: %v", repoDigest, err)
continue
}
if d, isDigested := named.(dockerref.Digested); isDigested {
if digest.Digest().Algorithm().String() == d.Digest().Algorithm().String() &&
digest.Digest().Hex() == d.Digest().Hex() {
return true
}
}
}
// process the ID as a digest
id, err := godigest.Parse(inspected.ID)
if err != nil {
glog.V(4).Infof("couldn't parse image ID reference %q: %v", id, err)
return false
}
if digest.Digest().Algorithm().String() == id.Algorithm().String() && digest.Digest().Hex() == id.Hex() {
return true
}
}
glog.V(4).Infof("Inspected image (%q) does not match %s", inspected.ID, image)
return false
}
// matchImageIDOnly checks that the given image specifier is a digest-only
// reference, and that it matches the given image.
func matchImageIDOnly(inspected dockertypes.ImageInspect, image string) bool {
// If the image ref is literally equal to the inspected image's ID,
// just return true here (this might be the case for Docker 1.9,
// where we won't have a digest for the ID)
if inspected.ID == image {
return true
}
// Otherwise, we should try actual parsing to be more correct
ref, err := dockerref.Parse(image)
if err != nil {
glog.V(4).Infof("couldn't parse image reference %q: %v", image, err)
return false
}
digest, isDigested := ref.(dockerref.Digested)
if !isDigested {
glog.V(4).Infof("the image reference %q was not a digest reference", image)
return false
}
id, err := godigest.Parse(inspected.ID)
if err != nil {
glog.V(4).Infof("couldn't parse image ID reference %q: %v", id, err)
return false
}
if digest.Digest().Algorithm().String() == id.Algorithm().String() && digest.Digest().Hex() == id.Hex() {
return true
}
glog.V(4).Infof("The reference %s does not directly refer to the given image's ID (%q)", image, inspected.ID)
return false
}
// isImageNotFoundError returns whether the err is caused by image not found in docker
// TODO: Use native error tester once ImageNotFoundError is supported in docker-engine client(eg. ImageRemove())
func isImageNotFoundError(err error) bool {
if err != nil {
return strings.Contains(err.Error(), "No such image:")
}
return false
}

View File

@ -1,270 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package libdocker
import (
"fmt"
"testing"
dockertypes "github.com/docker/docker/api/types"
"github.com/stretchr/testify/assert"
)
func TestMatchImageTagOrSHA(t *testing.T) {
for i, testCase := range []struct {
Inspected dockertypes.ImageInspect
Image string
Output bool
}{
{
Inspected: dockertypes.ImageInspect{RepoTags: []string{"ubuntu:latest"}},
Image: "ubuntu",
Output: true,
},
{
Inspected: dockertypes.ImageInspect{RepoTags: []string{"ubuntu:14.04"}},
Image: "ubuntu:latest",
Output: false,
},
{
Inspected: dockertypes.ImageInspect{RepoTags: []string{"colemickens/hyperkube-amd64:217.9beff63"}},
Image: "colemickens/hyperkube-amd64:217.9beff63",
Output: true,
},
{
Inspected: dockertypes.ImageInspect{RepoTags: []string{"colemickens/hyperkube-amd64:217.9beff63"}},
Image: "docker.io/colemickens/hyperkube-amd64:217.9beff63",
Output: true,
},
{
Inspected: dockertypes.ImageInspect{RepoTags: []string{"docker.io/kubernetes/pause:latest"}},
Image: "kubernetes/pause:latest",
Output: true,
},
{
Inspected: dockertypes.ImageInspect{
ID: "sha256:2208f7a29005d226d1ee33a63e33af1f47af6156c740d7d23c7948e8d282d53d",
},
Image: "myimage@sha256:2208f7a29005d226d1ee33a63e33af1f47af6156c740d7d23c7948e8d282d53d",
Output: true,
},
{
Inspected: dockertypes.ImageInspect{
ID: "sha256:2208f7a29005d226d1ee33a63e33af1f47af6156c740d7d23c7948e8d282d53d",
},
Image: "myimage@sha256:2208f7a29005",
Output: false,
},
{
Inspected: dockertypes.ImageInspect{
ID: "sha256:2208f7a29005d226d1ee33a63e33af1f47af6156c740d7d23c7948e8d282d53d",
},
Image: "myimage@sha256:2208",
Output: false,
},
{
// mismatched ID is ignored
Inspected: dockertypes.ImageInspect{
ID: "sha256:2208f7a29005d226d1ee33a63e33af1f47af6156c740d7d23c7948e8d282d53d",
},
Image: "myimage@sha256:0000f7a29005d226d1ee33a63e33af1f47af6156c740d7d23c7948e8d282d53d",
Output: false,
},
{
// invalid digest is ignored
Inspected: dockertypes.ImageInspect{
ID: "sha256:unparseable",
},
Image: "myimage@sha256:unparseable",
Output: false,
},
{
// v1 schema images can be pulled in one format and returned in another
Inspected: dockertypes.ImageInspect{
ID: "sha256:9bbdf247c91345f0789c10f50a57e36a667af1189687ad1de88a6243d05a2227",
RepoDigests: []string{"centos/ruby-23-centos7@sha256:940584acbbfb0347272112d2eb95574625c0c60b4e2fdadb139de5859cf754bf"},
},
Image: "centos/ruby-23-centos7@sha256:940584acbbfb0347272112d2eb95574625c0c60b4e2fdadb139de5859cf754bf",
Output: true,
},
{
Inspected: dockertypes.ImageInspect{
ID: "sha256:9bbdf247c91345f0789c10f50a57e36a667af1189687ad1de88a6243d05a2227",
RepoTags: []string{"docker.io/busybox:latest"},
},
Image: "docker.io/library/busybox:latest",
Output: true,
},
{
// RepoDigest match is is required
Inspected: dockertypes.ImageInspect{
ID: "",
RepoDigests: []string{"docker.io/centos/ruby-23-centos7@sha256:000084acbbfb0347272112d2eb95574625c0c60b4e2fdadb139de5859cf754bf"},
},
Image: "centos/ruby-23-centos7@sha256:940584acbbfb0347272112d2eb95574625c0c60b4e2fdadb139de5859cf754bf",
Output: false,
},
{
// RepoDigest match is allowed
Inspected: dockertypes.ImageInspect{
ID: "sha256:9bbdf247c91345f0789c10f50a57e36a667af1189687ad1de88a6243d05a2227",
RepoDigests: []string{"docker.io/centos/ruby-23-centos7@sha256:940584acbbfb0347272112d2eb95574625c0c60b4e2fdadb139de5859cf754bf"},
},
Image: "centos/ruby-23-centos7@sha256:940584acbbfb0347272112d2eb95574625c0c60b4e2fdadb139de5859cf754bf",
Output: true,
},
{
// RepoDigest and ID are checked
Inspected: dockertypes.ImageInspect{
ID: "sha256:940584acbbfb0347272112d2eb95574625c0c60b4e2fdadb139de5859cf754bf",
RepoDigests: []string{"docker.io/centos/ruby-23-centos7@sha256:9bbdf247c91345f0789c10f50a57e36a667af1189687ad1de88a6243d05a2227"},
},
Image: "centos/ruby-23-centos7@sha256:940584acbbfb0347272112d2eb95574625c0c60b4e2fdadb139de5859cf754bf",
Output: true,
},
{
// unparseable RepoDigests are skipped
Inspected: dockertypes.ImageInspect{
ID: "sha256:9bbdf247c91345f0789c10f50a57e36a667af1189687ad1de88a6243d05a2227",
RepoDigests: []string{
"centos/ruby-23-centos7@sha256:unparseable",
"docker.io/centos/ruby-23-centos7@sha256:940584acbbfb0347272112d2eb95574625c0c60b4e2fdadb139de5859cf754bf",
},
},
Image: "centos/ruby-23-centos7@sha256:940584acbbfb0347272112d2eb95574625c0c60b4e2fdadb139de5859cf754bf",
Output: true,
},
{
// unparseable RepoDigest is ignored
Inspected: dockertypes.ImageInspect{
ID: "sha256:9bbdf247c91345f0789c10f50a57e36a667af1189687ad1de88a6243d05a2227",
RepoDigests: []string{"docker.io/centos/ruby-23-centos7@sha256:unparseable"},
},
Image: "centos/ruby-23-centos7@sha256:940584acbbfb0347272112d2eb95574625c0c60b4e2fdadb139de5859cf754bf",
Output: false,
},
{
// unparseable image digest is ignored
Inspected: dockertypes.ImageInspect{
ID: "sha256:9bbdf247c91345f0789c10f50a57e36a667af1189687ad1de88a6243d05a2227",
RepoDigests: []string{"docker.io/centos/ruby-23-centos7@sha256:unparseable"},
},
Image: "centos/ruby-23-centos7@sha256:unparseable",
Output: false,
},
{
// prefix match is rejected for ID and RepoDigest
Inspected: dockertypes.ImageInspect{
ID: "sha256:unparseable",
RepoDigests: []string{"docker.io/centos/ruby-23-centos7@sha256:unparseable"},
},
Image: "sha256:unparseable",
Output: false,
},
{
// possible SHA prefix match is rejected for ID and RepoDigest because it is not in the named format
Inspected: dockertypes.ImageInspect{
ID: "sha256:0000f247c91345f0789c10f50a57e36a667af1189687ad1de88a6243d05a2227",
RepoDigests: []string{"docker.io/centos/ruby-23-centos7@sha256:0000f247c91345f0789c10f50a57e36a667af1189687ad1de88a6243d05a2227"},
},
Image: "sha256:0000",
Output: false,
},
} {
match := matchImageTagOrSHA(testCase.Inspected, testCase.Image)
assert.Equal(t, testCase.Output, match, testCase.Image+fmt.Sprintf(" is not a match (%d)", i))
}
}
func TestMatchImageIDOnly(t *testing.T) {
for i, testCase := range []struct {
Inspected dockertypes.ImageInspect
Image string
Output bool
}{
// shouldn't match names or tagged names
{
Inspected: dockertypes.ImageInspect{RepoTags: []string{"ubuntu:latest"}},
Image: "ubuntu",
Output: false,
},
{
Inspected: dockertypes.ImageInspect{RepoTags: []string{"colemickens/hyperkube-amd64:217.9beff63"}},
Image: "colemickens/hyperkube-amd64:217.9beff63",
Output: false,
},
// should match name@digest refs if they refer to the image ID (but only the full ID)
{
Inspected: dockertypes.ImageInspect{
ID: "sha256:2208f7a29005d226d1ee33a63e33af1f47af6156c740d7d23c7948e8d282d53d",
},
Image: "myimage@sha256:2208f7a29005d226d1ee33a63e33af1f47af6156c740d7d23c7948e8d282d53d",
Output: true,
},
{
Inspected: dockertypes.ImageInspect{
ID: "sha256:2208f7a29005d226d1ee33a63e33af1f47af6156c740d7d23c7948e8d282d53d",
},
Image: "myimage@sha256:2208f7a29005",
Output: false,
},
{
Inspected: dockertypes.ImageInspect{
ID: "sha256:2208f7a29005d226d1ee33a63e33af1f47af6156c740d7d23c7948e8d282d53d",
},
Image: "myimage@sha256:2208",
Output: false,
},
// should match when the IDs are literally the same
{
Inspected: dockertypes.ImageInspect{
ID: "foobar",
},
Image: "foobar",
Output: true,
},
// shouldn't match mismatched IDs
{
Inspected: dockertypes.ImageInspect{
ID: "sha256:2208f7a29005d226d1ee33a63e33af1f47af6156c740d7d23c7948e8d282d53d",
},
Image: "myimage@sha256:0000f7a29005d226d1ee33a63e33af1f47af6156c740d7d23c7948e8d282d53d",
Output: false,
},
// shouldn't match invalid IDs or refs
{
Inspected: dockertypes.ImageInspect{
ID: "sha256:unparseable",
},
Image: "myimage@sha256:unparseable",
Output: false,
},
// shouldn't match against repo digests
{
Inspected: dockertypes.ImageInspect{
ID: "sha256:9bbdf247c91345f0789c10f50a57e36a667af1189687ad1de88a6243d05a2227",
RepoDigests: []string{"centos/ruby-23-centos7@sha256:940584acbbfb0347272112d2eb95574625c0c60b4e2fdadb139de5859cf754bf"},
},
Image: "centos/ruby-23-centos7@sha256:940584acbbfb0347272112d2eb95574625c0c60b4e2fdadb139de5859cf754bf",
Output: false,
},
} {
match := matchImageIDOnly(testCase.Inspected, testCase.Image)
assert.Equal(t, testCase.Output, match, fmt.Sprintf("%s is not a match (%d)", testCase.Image, i))
}
}

View File

@ -1,272 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package libdocker
import (
"time"
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
dockerimagetypes "github.com/docker/docker/api/types/image"
"k8s.io/kubernetes/pkg/kubelet/dockershim/metrics"
)
// instrumentedInterface wraps the Interface and records the operations
// and errors metrics.
type instrumentedInterface struct {
client Interface
}
// NewInstrumentedInterface creates an instrumented Interface from an existing Interface.
func NewInstrumentedInterface(dockerClient Interface) Interface {
return instrumentedInterface{
client: dockerClient,
}
}
// recordOperation records the duration of the operation.
func recordOperation(operation string, start time.Time) {
metrics.DockerOperations.WithLabelValues(operation).Inc()
metrics.DockerOperationsLatency.WithLabelValues(operation).Observe(metrics.SinceInMicroseconds(start))
}
// recordError records error for metric if an error occurred.
func recordError(operation string, err error) {
if err != nil {
if _, ok := err.(operationTimeout); ok {
metrics.DockerOperationsTimeout.WithLabelValues(operation).Inc()
}
// Docker operation timeout error is also a docker error, so we don't add else here.
metrics.DockerOperationsErrors.WithLabelValues(operation).Inc()
}
}
func (in instrumentedInterface) ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) {
const operation = "list_containers"
defer recordOperation(operation, time.Now())
out, err := in.client.ListContainers(options)
recordError(operation, err)
return out, err
}
func (in instrumentedInterface) InspectContainer(id string) (*dockertypes.ContainerJSON, error) {
const operation = "inspect_container"
defer recordOperation(operation, time.Now())
out, err := in.client.InspectContainer(id)
recordError(operation, err)
return out, err
}
func (in instrumentedInterface) InspectContainerWithSize(id string) (*dockertypes.ContainerJSON, error) {
const operation = "inspect_container_withsize"
defer recordOperation(operation, time.Now())
out, err := in.client.InspectContainerWithSize(id)
recordError(operation, err)
return out, err
}
func (in instrumentedInterface) CreateContainer(opts dockertypes.ContainerCreateConfig) (*dockercontainer.ContainerCreateCreatedBody, error) {
const operation = "create_container"
defer recordOperation(operation, time.Now())
out, err := in.client.CreateContainer(opts)
recordError(operation, err)
return out, err
}
func (in instrumentedInterface) StartContainer(id string) error {
const operation = "start_container"
defer recordOperation(operation, time.Now())
err := in.client.StartContainer(id)
recordError(operation, err)
return err
}
func (in instrumentedInterface) StopContainer(id string, timeout time.Duration) error {
const operation = "stop_container"
defer recordOperation(operation, time.Now())
err := in.client.StopContainer(id, timeout)
recordError(operation, err)
return err
}
func (in instrumentedInterface) RemoveContainer(id string, opts dockertypes.ContainerRemoveOptions) error {
const operation = "remove_container"
defer recordOperation(operation, time.Now())
err := in.client.RemoveContainer(id, opts)
recordError(operation, err)
return err
}
func (in instrumentedInterface) UpdateContainerResources(id string, updateConfig dockercontainer.UpdateConfig) error {
const operation = "update_container"
defer recordOperation(operation, time.Now())
err := in.client.UpdateContainerResources(id, updateConfig)
recordError(operation, err)
return err
}
func (in instrumentedInterface) InspectImageByRef(image string) (*dockertypes.ImageInspect, error) {
const operation = "inspect_image"
defer recordOperation(operation, time.Now())
out, err := in.client.InspectImageByRef(image)
recordError(operation, err)
return out, err
}
func (in instrumentedInterface) InspectImageByID(image string) (*dockertypes.ImageInspect, error) {
const operation = "inspect_image"
defer recordOperation(operation, time.Now())
out, err := in.client.InspectImageByID(image)
recordError(operation, err)
return out, err
}
func (in instrumentedInterface) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.ImageSummary, error) {
const operation = "list_images"
defer recordOperation(operation, time.Now())
out, err := in.client.ListImages(opts)
recordError(operation, err)
return out, err
}
func (in instrumentedInterface) PullImage(imageID string, auth dockertypes.AuthConfig, opts dockertypes.ImagePullOptions) error {
const operation = "pull_image"
defer recordOperation(operation, time.Now())
err := in.client.PullImage(imageID, auth, opts)
recordError(operation, err)
return err
}
func (in instrumentedInterface) RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) {
const operation = "remove_image"
defer recordOperation(operation, time.Now())
imageDelete, err := in.client.RemoveImage(image, opts)
recordError(operation, err)
return imageDelete, err
}
func (in instrumentedInterface) Logs(id string, opts dockertypes.ContainerLogsOptions, sopts StreamOptions) error {
const operation = "logs"
defer recordOperation(operation, time.Now())
err := in.client.Logs(id, opts, sopts)
recordError(operation, err)
return err
}
func (in instrumentedInterface) Version() (*dockertypes.Version, error) {
const operation = "version"
defer recordOperation(operation, time.Now())
out, err := in.client.Version()
recordError(operation, err)
return out, err
}
func (in instrumentedInterface) Info() (*dockertypes.Info, error) {
const operation = "info"
defer recordOperation(operation, time.Now())
out, err := in.client.Info()
recordError(operation, err)
return out, err
}
func (in instrumentedInterface) CreateExec(id string, opts dockertypes.ExecConfig) (*dockertypes.IDResponse, error) {
const operation = "create_exec"
defer recordOperation(operation, time.Now())
out, err := in.client.CreateExec(id, opts)
recordError(operation, err)
return out, err
}
func (in instrumentedInterface) StartExec(startExec string, opts dockertypes.ExecStartCheck, sopts StreamOptions) error {
const operation = "start_exec"
defer recordOperation(operation, time.Now())
err := in.client.StartExec(startExec, opts, sopts)
recordError(operation, err)
return err
}
func (in instrumentedInterface) InspectExec(id string) (*dockertypes.ContainerExecInspect, error) {
const operation = "inspect_exec"
defer recordOperation(operation, time.Now())
out, err := in.client.InspectExec(id)
recordError(operation, err)
return out, err
}
func (in instrumentedInterface) AttachToContainer(id string, opts dockertypes.ContainerAttachOptions, sopts StreamOptions) error {
const operation = "attach"
defer recordOperation(operation, time.Now())
err := in.client.AttachToContainer(id, opts, sopts)
recordError(operation, err)
return err
}
func (in instrumentedInterface) ImageHistory(id string) ([]dockerimagetypes.HistoryResponseItem, error) {
const operation = "image_history"
defer recordOperation(operation, time.Now())
out, err := in.client.ImageHistory(id)
recordError(operation, err)
return out, err
}
func (in instrumentedInterface) ResizeExecTTY(id string, height, width uint) error {
const operation = "resize_exec"
defer recordOperation(operation, time.Now())
err := in.client.ResizeExecTTY(id, height, width)
recordError(operation, err)
return err
}
func (in instrumentedInterface) ResizeContainerTTY(id string, height, width uint) error {
const operation = "resize_container"
defer recordOperation(operation, time.Now())
err := in.client.ResizeContainerTTY(id, height, width)
recordError(operation, err)
return err
}
func (in instrumentedInterface) GetContainerStats(id string) (*dockertypes.StatsJSON, error) {
const operation = "stats"
defer recordOperation(operation, time.Now())
out, err := in.client.GetContainerStats(id)
recordError(operation, err)
return out, err
}

View File

@ -1,678 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package libdocker
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"regexp"
"sync"
"time"
"github.com/golang/glog"
dockertypes "github.com/docker/docker/api/types"
dockercontainer "github.com/docker/docker/api/types/container"
dockerimagetypes "github.com/docker/docker/api/types/image"
dockerapi "github.com/docker/docker/client"
dockermessage "github.com/docker/docker/pkg/jsonmessage"
dockerstdcopy "github.com/docker/docker/pkg/stdcopy"
)
// kubeDockerClient is a wrapped layer of docker client for kubelet internal use. This layer is added to:
// 1) Redirect stream for exec and attach operations.
// 2) Wrap the context in this layer to make the Interface cleaner.
type kubeDockerClient struct {
// timeout is the timeout of short running docker operations.
timeout time.Duration
// If no pulling progress is made before imagePullProgressDeadline, the image pulling will be cancelled.
// Docker reports image progress for every 512kB block, so normally there shouldn't be too long interval
// between progress updates.
imagePullProgressDeadline time.Duration
client *dockerapi.Client
}
// Make sure that kubeDockerClient implemented the Interface.
var _ Interface = &kubeDockerClient{}
// There are 2 kinds of docker operations categorized by running time:
// * Long running operation: The long running operation could run for arbitrary long time, and the running time
// usually depends on some uncontrollable factors. These operations include: PullImage, Logs, StartExec, AttachToContainer.
// * Non-long running operation: Given the maximum load of the system, the non-long running operation should finish
// in expected and usually short time. These include all other operations.
// kubeDockerClient only applies timeout on non-long running operations.
const (
// defaultTimeout is the default timeout of short running docker operations.
// Value is slightly offset from 2 minutes to make timeouts due to this
// constant recognizable.
defaultTimeout = 2*time.Minute - 1*time.Second
// defaultShmSize is the default ShmSize to use (in bytes) if not specified.
defaultShmSize = int64(1024 * 1024 * 64)
// defaultImagePullingProgressReportInterval is the default interval of image pulling progress reporting.
defaultImagePullingProgressReportInterval = 10 * time.Second
)
// newKubeDockerClient creates an kubeDockerClient from an existing docker client. If requestTimeout is 0,
// defaultTimeout will be applied.
func newKubeDockerClient(dockerClient *dockerapi.Client, requestTimeout, imagePullProgressDeadline time.Duration) Interface {
if requestTimeout == 0 {
requestTimeout = defaultTimeout
}
k := &kubeDockerClient{
client: dockerClient,
timeout: requestTimeout,
imagePullProgressDeadline: imagePullProgressDeadline,
}
// Notice that this assumes that docker is running before kubelet is started.
v, err := k.Version()
if err != nil {
glog.Errorf("failed to retrieve docker version: %v", err)
glog.Warningf("Using empty version for docker client, this may sometimes cause compatibility issue.")
} else {
// Update client version with real api version.
dockerClient.NegotiateAPIVersionPing(dockertypes.Ping{APIVersion: v.APIVersion})
}
return k
}
func (d *kubeDockerClient) ListContainers(options dockertypes.ContainerListOptions) ([]dockertypes.Container, error) {
ctx, cancel := d.getTimeoutContext()
defer cancel()
containers, err := d.client.ContainerList(ctx, options)
if ctxErr := contextError(ctx); ctxErr != nil {
return nil, ctxErr
}
if err != nil {
return nil, err
}
return containers, nil
}
func (d *kubeDockerClient) InspectContainer(id string) (*dockertypes.ContainerJSON, error) {
ctx, cancel := d.getTimeoutContext()
defer cancel()
containerJSON, err := d.client.ContainerInspect(ctx, id)
if ctxErr := contextError(ctx); ctxErr != nil {
return nil, ctxErr
}
if err != nil {
return nil, err
}
return &containerJSON, nil
}
// InspectContainerWithSize is currently only used for Windows container stats
func (d *kubeDockerClient) InspectContainerWithSize(id string) (*dockertypes.ContainerJSON, error) {
ctx, cancel := d.getTimeoutContext()
defer cancel()
// Inspects the container including the fields SizeRw and SizeRootFs.
containerJSON, _, err := d.client.ContainerInspectWithRaw(ctx, id, true)
if ctxErr := contextError(ctx); ctxErr != nil {
return nil, ctxErr
}
if err != nil {
return nil, err
}
return &containerJSON, nil
}
func (d *kubeDockerClient) CreateContainer(opts dockertypes.ContainerCreateConfig) (*dockercontainer.ContainerCreateCreatedBody, error) {
ctx, cancel := d.getTimeoutContext()
defer cancel()
// we provide an explicit default shm size as to not depend on docker daemon.
// TODO: evaluate exposing this as a knob in the API
if opts.HostConfig != nil && opts.HostConfig.ShmSize <= 0 {
opts.HostConfig.ShmSize = defaultShmSize
}
createResp, err := d.client.ContainerCreate(ctx, opts.Config, opts.HostConfig, opts.NetworkingConfig, opts.Name)
if ctxErr := contextError(ctx); ctxErr != nil {
return nil, ctxErr
}
if err != nil {
return nil, err
}
return &createResp, nil
}
func (d *kubeDockerClient) StartContainer(id string) error {
ctx, cancel := d.getTimeoutContext()
defer cancel()
err := d.client.ContainerStart(ctx, id, dockertypes.ContainerStartOptions{})
if ctxErr := contextError(ctx); ctxErr != nil {
return ctxErr
}
return err
}
// Stopping an already stopped container will not cause an error in dockerapi.
func (d *kubeDockerClient) StopContainer(id string, timeout time.Duration) error {
ctx, cancel := d.getCustomTimeoutContext(timeout)
defer cancel()
err := d.client.ContainerStop(ctx, id, &timeout)
if ctxErr := contextError(ctx); ctxErr != nil {
return ctxErr
}
return err
}
func (d *kubeDockerClient) RemoveContainer(id string, opts dockertypes.ContainerRemoveOptions) error {
ctx, cancel := d.getTimeoutContext()
defer cancel()
err := d.client.ContainerRemove(ctx, id, opts)
if ctxErr := contextError(ctx); ctxErr != nil {
return ctxErr
}
return err
}
func (d *kubeDockerClient) UpdateContainerResources(id string, updateConfig dockercontainer.UpdateConfig) error {
ctx, cancel := d.getTimeoutContext()
defer cancel()
_, err := d.client.ContainerUpdate(ctx, id, updateConfig)
if ctxErr := contextError(ctx); ctxErr != nil {
return ctxErr
}
return err
}
func (d *kubeDockerClient) inspectImageRaw(ref string) (*dockertypes.ImageInspect, error) {
ctx, cancel := d.getTimeoutContext()
defer cancel()
resp, _, err := d.client.ImageInspectWithRaw(ctx, ref)
if ctxErr := contextError(ctx); ctxErr != nil {
return nil, ctxErr
}
if err != nil {
if dockerapi.IsErrImageNotFound(err) {
err = ImageNotFoundError{ID: ref}
}
return nil, err
}
return &resp, nil
}
func (d *kubeDockerClient) InspectImageByID(imageID string) (*dockertypes.ImageInspect, error) {
resp, err := d.inspectImageRaw(imageID)
if err != nil {
return nil, err
}
if !matchImageIDOnly(*resp, imageID) {
return nil, ImageNotFoundError{ID: imageID}
}
return resp, nil
}
func (d *kubeDockerClient) InspectImageByRef(imageRef string) (*dockertypes.ImageInspect, error) {
resp, err := d.inspectImageRaw(imageRef)
if err != nil {
return nil, err
}
if !matchImageTagOrSHA(*resp, imageRef) {
return nil, ImageNotFoundError{ID: imageRef}
}
return resp, nil
}
func (d *kubeDockerClient) ImageHistory(id string) ([]dockerimagetypes.HistoryResponseItem, error) {
ctx, cancel := d.getTimeoutContext()
defer cancel()
resp, err := d.client.ImageHistory(ctx, id)
if ctxErr := contextError(ctx); ctxErr != nil {
return nil, ctxErr
}
return resp, err
}
func (d *kubeDockerClient) ListImages(opts dockertypes.ImageListOptions) ([]dockertypes.ImageSummary, error) {
ctx, cancel := d.getTimeoutContext()
defer cancel()
images, err := d.client.ImageList(ctx, opts)
if ctxErr := contextError(ctx); ctxErr != nil {
return nil, ctxErr
}
if err != nil {
return nil, err
}
return images, nil
}
func base64EncodeAuth(auth dockertypes.AuthConfig) (string, error) {
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(auth); err != nil {
return "", err
}
return base64.URLEncoding.EncodeToString(buf.Bytes()), nil
}
// progress is a wrapper of dockermessage.JSONMessage with a lock protecting it.
type progress struct {
sync.RWMutex
// message stores the latest docker json message.
message *dockermessage.JSONMessage
// timestamp of the latest update.
timestamp time.Time
}
func newProgress() *progress {
return &progress{timestamp: time.Now()}
}
func (p *progress) set(msg *dockermessage.JSONMessage) {
p.Lock()
defer p.Unlock()
p.message = msg
p.timestamp = time.Now()
}
func (p *progress) get() (string, time.Time) {
p.RLock()
defer p.RUnlock()
if p.message == nil {
return "No progress", p.timestamp
}
// The following code is based on JSONMessage.Display
var prefix string
if p.message.ID != "" {
prefix = fmt.Sprintf("%s: ", p.message.ID)
}
if p.message.Progress == nil {
return fmt.Sprintf("%s%s", prefix, p.message.Status), p.timestamp
}
return fmt.Sprintf("%s%s %s", prefix, p.message.Status, p.message.Progress.String()), p.timestamp
}
// progressReporter keeps the newest image pulling progress and periodically report the newest progress.
type progressReporter struct {
*progress
image string
cancel context.CancelFunc
stopCh chan struct{}
imagePullProgressDeadline time.Duration
}
// newProgressReporter creates a new progressReporter for specific image with specified reporting interval
func newProgressReporter(image string, cancel context.CancelFunc, imagePullProgressDeadline time.Duration) *progressReporter {
return &progressReporter{
progress: newProgress(),
image: image,
cancel: cancel,
stopCh: make(chan struct{}),
imagePullProgressDeadline: imagePullProgressDeadline,
}
}
// start starts the progressReporter
func (p *progressReporter) start() {
go func() {
ticker := time.NewTicker(defaultImagePullingProgressReportInterval)
defer ticker.Stop()
for {
// TODO(random-liu): Report as events.
select {
case <-ticker.C:
progress, timestamp := p.progress.get()
// If there is no progress for p.imagePullProgressDeadline, cancel the operation.
if time.Since(timestamp) > p.imagePullProgressDeadline {
glog.Errorf("Cancel pulling image %q because of no progress for %v, latest progress: %q", p.image, p.imagePullProgressDeadline, progress)
p.cancel()
return
}
glog.V(2).Infof("Pulling image %q: %q", p.image, progress)
case <-p.stopCh:
progress, _ := p.progress.get()
glog.V(2).Infof("Stop pulling image %q: %q", p.image, progress)
return
}
}
}()
}
// stop stops the progressReporter
func (p *progressReporter) stop() {
close(p.stopCh)
}
func (d *kubeDockerClient) PullImage(image string, auth dockertypes.AuthConfig, opts dockertypes.ImagePullOptions) error {
// RegistryAuth is the base64 encoded credentials for the registry
base64Auth, err := base64EncodeAuth(auth)
if err != nil {
return err
}
opts.RegistryAuth = base64Auth
ctx, cancel := d.getCancelableContext()
defer cancel()
resp, err := d.client.ImagePull(ctx, image, opts)
if err != nil {
return err
}
defer resp.Close()
reporter := newProgressReporter(image, cancel, d.imagePullProgressDeadline)
reporter.start()
defer reporter.stop()
decoder := json.NewDecoder(resp)
for {
var msg dockermessage.JSONMessage
err := decoder.Decode(&msg)
if err == io.EOF {
break
}
if err != nil {
return err
}
if msg.Error != nil {
return msg.Error
}
reporter.set(&msg)
}
return nil
}
func (d *kubeDockerClient) RemoveImage(image string, opts dockertypes.ImageRemoveOptions) ([]dockertypes.ImageDeleteResponseItem, error) {
ctx, cancel := d.getTimeoutContext()
defer cancel()
resp, err := d.client.ImageRemove(ctx, image, opts)
if ctxErr := contextError(ctx); ctxErr != nil {
return nil, ctxErr
}
if isImageNotFoundError(err) {
return nil, ImageNotFoundError{ID: image}
}
return resp, err
}
func (d *kubeDockerClient) Logs(id string, opts dockertypes.ContainerLogsOptions, sopts StreamOptions) error {
ctx, cancel := d.getCancelableContext()
defer cancel()
resp, err := d.client.ContainerLogs(ctx, id, opts)
if ctxErr := contextError(ctx); ctxErr != nil {
return ctxErr
}
if err != nil {
return err
}
defer resp.Close()
return d.redirectResponseToOutputStream(sopts.RawTerminal, sopts.OutputStream, sopts.ErrorStream, resp)
}
func (d *kubeDockerClient) Version() (*dockertypes.Version, error) {
ctx, cancel := d.getTimeoutContext()
defer cancel()
resp, err := d.client.ServerVersion(ctx)
if ctxErr := contextError(ctx); ctxErr != nil {
return nil, ctxErr
}
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *kubeDockerClient) Info() (*dockertypes.Info, error) {
ctx, cancel := d.getTimeoutContext()
defer cancel()
resp, err := d.client.Info(ctx)
if ctxErr := contextError(ctx); ctxErr != nil {
return nil, ctxErr
}
if err != nil {
return nil, err
}
return &resp, nil
}
// TODO(random-liu): Add unit test for exec and attach functions, just like what go-dockerclient did.
func (d *kubeDockerClient) CreateExec(id string, opts dockertypes.ExecConfig) (*dockertypes.IDResponse, error) {
ctx, cancel := d.getTimeoutContext()
defer cancel()
resp, err := d.client.ContainerExecCreate(ctx, id, opts)
if ctxErr := contextError(ctx); ctxErr != nil {
return nil, ctxErr
}
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *kubeDockerClient) StartExec(startExec string, opts dockertypes.ExecStartCheck, sopts StreamOptions) error {
ctx, cancel := d.getCancelableContext()
defer cancel()
if opts.Detach {
err := d.client.ContainerExecStart(ctx, startExec, opts)
if ctxErr := contextError(ctx); ctxErr != nil {
return ctxErr
}
return err
}
resp, err := d.client.ContainerExecAttach(ctx, startExec, dockertypes.ExecConfig{
Detach: opts.Detach,
Tty: opts.Tty,
})
if ctxErr := contextError(ctx); ctxErr != nil {
return ctxErr
}
if err != nil {
return err
}
defer resp.Close()
if sopts.ExecStarted != nil {
// Send a message to the channel indicating that the exec has started. This is needed so
// interactive execs can handle resizing correctly - the request to resize the TTY has to happen
// after the call to d.client.ContainerExecAttach, and because d.holdHijackedConnection below
// blocks, we use sopts.ExecStarted to signal the caller that it's ok to resize.
sopts.ExecStarted <- struct{}{}
}
return d.holdHijackedConnection(sopts.RawTerminal || opts.Tty, sopts.InputStream, sopts.OutputStream, sopts.ErrorStream, resp)
}
func (d *kubeDockerClient) InspectExec(id string) (*dockertypes.ContainerExecInspect, error) {
ctx, cancel := d.getTimeoutContext()
defer cancel()
resp, err := d.client.ContainerExecInspect(ctx, id)
if ctxErr := contextError(ctx); ctxErr != nil {
return nil, ctxErr
}
if err != nil {
return nil, err
}
return &resp, nil
}
func (d *kubeDockerClient) AttachToContainer(id string, opts dockertypes.ContainerAttachOptions, sopts StreamOptions) error {
ctx, cancel := d.getCancelableContext()
defer cancel()
resp, err := d.client.ContainerAttach(ctx, id, opts)
if ctxErr := contextError(ctx); ctxErr != nil {
return ctxErr
}
if err != nil {
return err
}
defer resp.Close()
return d.holdHijackedConnection(sopts.RawTerminal, sopts.InputStream, sopts.OutputStream, sopts.ErrorStream, resp)
}
func (d *kubeDockerClient) ResizeExecTTY(id string, height, width uint) error {
ctx, cancel := d.getCancelableContext()
defer cancel()
return d.client.ContainerExecResize(ctx, id, dockertypes.ResizeOptions{
Height: height,
Width: width,
})
}
func (d *kubeDockerClient) ResizeContainerTTY(id string, height, width uint) error {
ctx, cancel := d.getCancelableContext()
defer cancel()
return d.client.ContainerResize(ctx, id, dockertypes.ResizeOptions{
Height: height,
Width: width,
})
}
// GetContainerStats is currently only used for Windows container stats
func (d *kubeDockerClient) GetContainerStats(id string) (*dockertypes.StatsJSON, error) {
ctx, cancel := d.getCancelableContext()
defer cancel()
response, err := d.client.ContainerStats(ctx, id, false)
if err != nil {
return nil, err
}
dec := json.NewDecoder(response.Body)
var stats dockertypes.StatsJSON
err = dec.Decode(&stats)
if err != nil {
return nil, err
}
defer response.Body.Close()
return &stats, nil
}
// redirectResponseToOutputStream redirect the response stream to stdout and stderr. When tty is true, all stream will
// only be redirected to stdout.
func (d *kubeDockerClient) redirectResponseToOutputStream(tty bool, outputStream, errorStream io.Writer, resp io.Reader) error {
if outputStream == nil {
outputStream = ioutil.Discard
}
if errorStream == nil {
errorStream = ioutil.Discard
}
var err error
if tty {
_, err = io.Copy(outputStream, resp)
} else {
_, err = dockerstdcopy.StdCopy(outputStream, errorStream, resp)
}
return err
}
// holdHijackedConnection hold the HijackedResponse, redirect the inputStream to the connection, and redirect the response
// stream to stdout and stderr. NOTE: If needed, we could also add context in this function.
func (d *kubeDockerClient) holdHijackedConnection(tty bool, inputStream io.Reader, outputStream, errorStream io.Writer, resp dockertypes.HijackedResponse) error {
receiveStdout := make(chan error)
if outputStream != nil || errorStream != nil {
go func() {
receiveStdout <- d.redirectResponseToOutputStream(tty, outputStream, errorStream, resp.Reader)
}()
}
stdinDone := make(chan struct{})
go func() {
if inputStream != nil {
io.Copy(resp.Conn, inputStream)
}
resp.CloseWrite()
close(stdinDone)
}()
select {
case err := <-receiveStdout:
return err
case <-stdinDone:
if outputStream != nil || errorStream != nil {
return <-receiveStdout
}
}
return nil
}
// getCancelableContext returns a new cancelable context. For long running requests without timeout, we use cancelable
// context to avoid potential resource leak, although the current implementation shouldn't leak resource.
func (d *kubeDockerClient) getCancelableContext() (context.Context, context.CancelFunc) {
return context.WithCancel(context.Background())
}
// getTimeoutContext returns a new context with default request timeout
func (d *kubeDockerClient) getTimeoutContext() (context.Context, context.CancelFunc) {
return context.WithTimeout(context.Background(), d.timeout)
}
// getCustomTimeoutContext returns a new context with a specific request timeout
func (d *kubeDockerClient) getCustomTimeoutContext(timeout time.Duration) (context.Context, context.CancelFunc) {
// Pick the larger of the two
if d.timeout > timeout {
timeout = d.timeout
}
return context.WithTimeout(context.Background(), timeout)
}
// contextError checks the context, and returns error if the context is timeout.
func contextError(ctx context.Context) error {
if ctx.Err() == context.DeadlineExceeded {
return operationTimeout{err: ctx.Err()}
}
return ctx.Err()
}
// StreamOptions are the options used to configure the stream redirection
type StreamOptions struct {
RawTerminal bool
InputStream io.Reader
OutputStream io.Writer
ErrorStream io.Writer
ExecStarted chan struct{}
}
// operationTimeout is the error returned when the docker operations are timeout.
type operationTimeout struct {
err error
}
func (e operationTimeout) Error() string {
return fmt.Sprintf("operation timeout: %v", e.err)
}
// containerNotFoundErrorRegx is the regexp of container not found error message.
var containerNotFoundErrorRegx = regexp.MustCompile(`No such container: [0-9a-z]+`)
// IsContainerNotFoundError checks whether the error is container not found error.
func IsContainerNotFoundError(err error) bool {
return containerNotFoundErrorRegx.MatchString(err.Error())
}
// ImageNotFoundError is the error returned by InspectImage when image not found.
// Expose this to inject error in dockershim for testing.
type ImageNotFoundError struct {
ID string
}
func (e ImageNotFoundError) Error() string {
return fmt.Sprintf("no such image: %q", e.ID)
}
// IsImageNotFoundError checks whether the error is image not found error. This is exposed
// to share with dockershim.
func IsImageNotFoundError(err error) bool {
_, ok := err.(ImageNotFoundError)
return ok
}

View File

@ -1,33 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package libdocker
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
)
func TestIsContainerNotFoundError(t *testing.T) {
// Expected error message from docker.
containerNotFoundError := fmt.Errorf("Error response from daemon: No such container: 96e914f31579e44fe49b239266385330a9b2125abeb9254badd9fca74580c95a")
otherError := fmt.Errorf("Error response from daemon: Other errors")
assert.True(t, IsContainerNotFoundError(containerNotFoundError))
assert.False(t, IsContainerNotFoundError(otherError))
}

View File

@ -1,23 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["metrics.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/metrics",
visibility = ["//visibility:public"],
deps = ["//vendor/github.com/prometheus/client_golang/prometheus:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,96 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
)
const (
// DockerOperationsKey is the key for docker operation metrics.
DockerOperationsKey = "docker_operations"
// DockerOperationsLatencyKey is the key for the operation latency metrics.
DockerOperationsLatencyKey = "docker_operations_latency_microseconds"
// DockerOperationsErrorsKey is the key for the operation error metrics.
DockerOperationsErrorsKey = "docker_operations_errors"
// DockerOperationsTimeoutKey is the key for the operation timoeut metrics.
DockerOperationsTimeoutKey = "docker_operations_timeout"
// Keep the "kubelet" subsystem for backward compatibility.
kubeletSubsystem = "kubelet"
)
var (
// DockerOperationsLatency collects operation latency numbers by operation
// type.
DockerOperationsLatency = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Subsystem: kubeletSubsystem,
Name: DockerOperationsLatencyKey,
Help: "Latency in microseconds of Docker operations. Broken down by operation type.",
},
[]string{"operation_type"},
)
// DockerOperations collects operation counts by operation type.
DockerOperations = prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: kubeletSubsystem,
Name: DockerOperationsKey,
Help: "Cumulative number of Docker operations by operation type.",
},
[]string{"operation_type"},
)
// DockerOperationsErrors collects operation errors by operation
// type.
DockerOperationsErrors = prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: kubeletSubsystem,
Name: DockerOperationsErrorsKey,
Help: "Cumulative number of Docker operation errors by operation type.",
},
[]string{"operation_type"},
)
// DockerOperationsTimeout collects operation timeouts by operation type.
DockerOperationsTimeout = prometheus.NewCounterVec(
prometheus.CounterOpts{
Subsystem: kubeletSubsystem,
Name: DockerOperationsTimeoutKey,
Help: "Cumulative number of Docker operation timeout by operation type.",
},
[]string{"operation_type"},
)
)
var registerMetrics sync.Once
// Register all metrics.
func Register() {
registerMetrics.Do(func() {
prometheus.MustRegister(DockerOperationsLatency)
prometheus.MustRegister(DockerOperations)
prometheus.MustRegister(DockerOperationsErrors)
prometheus.MustRegister(DockerOperationsTimeout)
})
}
// SinceInMicroseconds gets the time since the specified start in microseconds.
func SinceInMicroseconds(start time.Time) float64 {
return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
}

View File

@ -1,149 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"fmt"
"math/rand"
"strconv"
"strings"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/leaky"
)
// Container "names" are implementation details that do not concern
// kubelet/CRI. This CRI shim uses names to fulfill the CRI requirement to
// make sandbox/container creation idempotent. CRI states that there can
// only exist one sandbox/container with the given metadata. To enforce this,
// this shim constructs a name using the fields in the metadata so that
// docker will reject the creation request if the name already exists.
//
// Note that changes to naming will likely break the backward compatibility.
// Code must be added to ensure the shim knows how to recognize and extract
// information the older containers.
//
// TODO: Add code to handle backward compatibility, i.e., making sure we can
// recognize older containers and extract information from their names if
// necessary.
const (
// kubePrefix is used to identify the containers/sandboxes on the node managed by kubelet
kubePrefix = "k8s"
// sandboxContainerName is a string to include in the docker container so
// that users can easily identify the sandboxes.
sandboxContainerName = leaky.PodInfraContainerName
// Delimiter used to construct docker container names.
nameDelimiter = "_"
// DockerImageIDPrefix is the prefix of image id in container status.
DockerImageIDPrefix = "docker://"
// DockerPullableImageIDPrefix is the prefix of pullable image id in container status.
DockerPullableImageIDPrefix = "docker-pullable://"
)
func makeSandboxName(s *runtimeapi.PodSandboxConfig) string {
return strings.Join([]string{
kubePrefix, // 0
sandboxContainerName, // 1
s.Metadata.Name, // 2
s.Metadata.Namespace, // 3
s.Metadata.Uid, // 4
fmt.Sprintf("%d", s.Metadata.Attempt), // 5
}, nameDelimiter)
}
func makeContainerName(s *runtimeapi.PodSandboxConfig, c *runtimeapi.ContainerConfig) string {
return strings.Join([]string{
kubePrefix, // 0
c.Metadata.Name, // 1:
s.Metadata.Name, // 2: sandbox name
s.Metadata.Namespace, // 3: sandbox namesapce
s.Metadata.Uid, // 4 sandbox uid
fmt.Sprintf("%d", c.Metadata.Attempt), // 5
}, nameDelimiter)
}
// randomizeName randomizes the container name. This should only be used when we hit the
// docker container name conflict bug.
func randomizeName(name string) string {
return strings.Join([]string{
name,
fmt.Sprintf("%08x", rand.Uint32()),
}, nameDelimiter)
}
func parseUint32(s string) (uint32, error) {
n, err := strconv.ParseUint(s, 10, 32)
if err != nil {
return 0, err
}
return uint32(n), nil
}
// TODO: Evaluate whether we should rely on labels completely.
func parseSandboxName(name string) (*runtimeapi.PodSandboxMetadata, error) {
// Docker adds a "/" prefix to names. so trim it.
name = strings.TrimPrefix(name, "/")
parts := strings.Split(name, nameDelimiter)
// Tolerate the random suffix.
// TODO(random-liu): Remove 7 field case when docker 1.11 is deprecated.
if len(parts) != 6 && len(parts) != 7 {
return nil, fmt.Errorf("failed to parse the sandbox name: %q", name)
}
if parts[0] != kubePrefix {
return nil, fmt.Errorf("container is not managed by kubernetes: %q", name)
}
attempt, err := parseUint32(parts[5])
if err != nil {
return nil, fmt.Errorf("failed to parse the sandbox name %q: %v", name, err)
}
return &runtimeapi.PodSandboxMetadata{
Name: parts[2],
Namespace: parts[3],
Uid: parts[4],
Attempt: attempt,
}, nil
}
// TODO: Evaluate whether we should rely on labels completely.
func parseContainerName(name string) (*runtimeapi.ContainerMetadata, error) {
// Docker adds a "/" prefix to names. so trim it.
name = strings.TrimPrefix(name, "/")
parts := strings.Split(name, nameDelimiter)
// Tolerate the random suffix.
// TODO(random-liu): Remove 7 field case when docker 1.11 is deprecated.
if len(parts) != 6 && len(parts) != 7 {
return nil, fmt.Errorf("failed to parse the container name: %q", name)
}
if parts[0] != kubePrefix {
return nil, fmt.Errorf("container is not managed by kubernetes: %q", name)
}
attempt, err := parseUint32(parts[5])
if err != nil {
return nil, fmt.Errorf("failed to parse the container name %q: %v", name, err)
}
return &runtimeapi.ContainerMetadata{
Name: parts[1],
Attempt: attempt,
}, nil
}

View File

@ -1,106 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"testing"
"github.com/stretchr/testify/assert"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
)
func TestSandboxNameRoundTrip(t *testing.T) {
config := makeSandboxConfig("foo", "bar", "iamuid", 3)
actualName := makeSandboxName(config)
assert.Equal(t, "k8s_POD_foo_bar_iamuid_3", actualName)
actualMetadata, err := parseSandboxName(actualName)
assert.NoError(t, err)
assert.Equal(t, config.Metadata, actualMetadata)
}
func TestNonParsableSandboxNames(t *testing.T) {
// All names must start with the kubernetes prefix "k8s".
_, err := parseSandboxName("owner_POD_foo_bar_iamuid_4")
assert.Error(t, err)
// All names must contain exactly 6 parts.
_, err = parseSandboxName("k8s_POD_dummy_foo_bar_iamuid_4")
assert.Error(t, err)
_, err = parseSandboxName("k8s_foo_bar_iamuid_4")
assert.Error(t, err)
// Should be able to parse attempt number.
_, err = parseSandboxName("k8s_POD_foo_bar_iamuid_notanumber")
assert.Error(t, err)
}
func TestContainerNameRoundTrip(t *testing.T) {
sConfig := makeSandboxConfig("foo", "bar", "iamuid", 3)
name, attempt := "pause", uint32(5)
config := &runtimeapi.ContainerConfig{
Metadata: &runtimeapi.ContainerMetadata{
Name: name,
Attempt: attempt,
},
}
actualName := makeContainerName(sConfig, config)
assert.Equal(t, "k8s_pause_foo_bar_iamuid_5", actualName)
actualMetadata, err := parseContainerName(actualName)
assert.NoError(t, err)
assert.Equal(t, config.Metadata, actualMetadata)
}
func TestNonParsableContainerNames(t *testing.T) {
// All names must start with the kubernetes prefix "k8s".
_, err := parseContainerName("owner_frontend_foo_bar_iamuid_4")
assert.Error(t, err)
// All names must contain exactly 6 parts.
_, err = parseContainerName("k8s_frontend_dummy_foo_bar_iamuid_4")
assert.Error(t, err)
_, err = parseContainerName("k8s_foo_bar_iamuid_4")
assert.Error(t, err)
// Should be able to parse attempt number.
_, err = parseContainerName("k8s_frontend_foo_bar_iamuid_notanumber")
assert.Error(t, err)
}
func TestParseRandomizedNames(t *testing.T) {
// Test randomized sandbox name.
sConfig := makeSandboxConfig("foo", "bar", "iamuid", 3)
sActualName := randomizeName(makeSandboxName(sConfig))
sActualMetadata, err := parseSandboxName(sActualName)
assert.NoError(t, err)
assert.Equal(t, sConfig.Metadata, sActualMetadata)
// Test randomized container name.
name, attempt := "pause", uint32(5)
config := &runtimeapi.ContainerConfig{
Metadata: &runtimeapi.ContainerMetadata{
Name: name,
Attempt: attempt,
},
}
actualName := randomizeName(makeContainerName(sConfig, config))
actualMetadata, err := parseContainerName(actualName)
assert.NoError(t, err)
assert.Equal(t, config.Metadata, actualMetadata)
}

View File

@ -1,46 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"network.go",
"plugins.go",
],
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/network",
visibility = ["//visibility:public"],
deps = [
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockershim/network/hostport:go_default_library",
"//pkg/kubelet/dockershim/network/metrics:go_default_library",
"//pkg/util/sysctl:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/validation:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/kubelet/dockershim/network/cni:all-srcs",
"//pkg/kubelet/dockershim/network/hairpin:all-srcs",
"//pkg/kubelet/dockershim/network/hostport:all-srcs",
"//pkg/kubelet/dockershim/network/kubenet:all-srcs",
"//pkg/kubelet/dockershim/network/metrics:all-srcs",
"//pkg/kubelet/dockershim/network/testing:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,9 +0,0 @@
approvers:
- thockin
- dchen1107
- matchstick
- freehan
- dcbw
reviewers:
- sig-network-reviewers

View File

@ -1,110 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"cni.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"cni_others.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"cni_others.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"cni_others.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"cni_others.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"cni_others.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"cni_others.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"cni_others.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"cni_others.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"cni_others.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"cni_others.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"cni_windows.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni",
deps = [
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
"//vendor/github.com/containernetworking/cni/libcni:go_default_library",
"//vendor/github.com/containernetworking/cni/pkg/types:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
] + select({
"@io_bazel_rules_go//go/platform:windows": [
"//vendor/github.com/containernetworking/cni/pkg/types/020:go_default_library",
],
"//conditions:default": [],
}),
)
go_test(
name = "go_default_test",
srcs = select({
"@io_bazel_rules_go//go/platform:linux": [
"cni_test.go",
],
"//conditions:default": [],
}),
embed = [":go_default_library"],
deps = select({
"@io_bazel_rules_go//go/platform:linux": [
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/container/testing:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
"//pkg/kubelet/dockershim/network/cni/testing:go_default_library",
"//pkg/kubelet/dockershim/network/hostport:go_default_library",
"//pkg/kubelet/dockershim/network/testing:go_default_library",
"//vendor/github.com/containernetworking/cni/pkg/types/020:go_default_library",
"//vendor/github.com/stretchr/testify/mock:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/k8s.io/client-go/util/testing:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],
"//conditions:default": [],
}),
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/kubelet/dockershim/network/cni/testing:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,325 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cni
import (
"errors"
"fmt"
"sort"
"strings"
"sync"
"github.com/containernetworking/cni/libcni"
cnitypes "github.com/containernetworking/cni/pkg/types"
"github.com/golang/glog"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
utilexec "k8s.io/utils/exec"
)
const (
CNIPluginName = "cni"
DefaultConfDir = "/etc/cni/net.d"
DefaultBinDir = "/opt/cni/bin"
)
type cniNetworkPlugin struct {
network.NoopNetworkPlugin
loNetwork *cniNetwork
sync.RWMutex
defaultNetwork *cniNetwork
host network.Host
execer utilexec.Interface
nsenterPath string
confDir string
binDirs []string
}
type cniNetwork struct {
name string
NetworkConfig *libcni.NetworkConfigList
CNIConfig libcni.CNI
}
// cniPortMapping maps to the standard CNI portmapping Capability
// see: https://github.com/containernetworking/cni/blob/master/CONVENTIONS.md
type cniPortMapping struct {
HostPort int32 `json:"hostPort"`
ContainerPort int32 `json:"containerPort"`
Protocol string `json:"protocol"`
HostIP string `json:"hostIP"`
}
func SplitDirs(dirs string) []string {
// Use comma rather than colon to work better with Windows too
return strings.Split(dirs, ",")
}
func ProbeNetworkPlugins(confDir string, binDirs []string) []network.NetworkPlugin {
old := binDirs
binDirs = make([]string, 0, len(binDirs))
for _, dir := range old {
if dir != "" {
binDirs = append(binDirs, dir)
}
}
if len(binDirs) == 0 {
binDirs = []string{DefaultBinDir}
}
if confDir == "" {
confDir = DefaultConfDir
}
plugin := &cniNetworkPlugin{
defaultNetwork: nil,
loNetwork: getLoNetwork(binDirs),
execer: utilexec.New(),
confDir: confDir,
binDirs: binDirs,
}
// sync NetworkConfig in best effort during probing.
plugin.syncNetworkConfig()
return []network.NetworkPlugin{plugin}
}
func getDefaultCNINetwork(confDir string, binDirs []string) (*cniNetwork, error) {
files, err := libcni.ConfFiles(confDir, []string{".conf", ".conflist", ".json"})
switch {
case err != nil:
return nil, err
case len(files) == 0:
return nil, fmt.Errorf("No networks found in %s", confDir)
}
sort.Strings(files)
for _, confFile := range files {
var confList *libcni.NetworkConfigList
if strings.HasSuffix(confFile, ".conflist") {
confList, err = libcni.ConfListFromFile(confFile)
if err != nil {
glog.Warningf("Error loading CNI config list file %s: %v", confFile, err)
continue
}
} else {
conf, err := libcni.ConfFromFile(confFile)
if err != nil {
glog.Warningf("Error loading CNI config file %s: %v", confFile, err)
continue
}
// Ensure the config has a "type" so we know what plugin to run.
// Also catches the case where somebody put a conflist into a conf file.
if conf.Network.Type == "" {
glog.Warningf("Error loading CNI config file %s: no 'type'; perhaps this is a .conflist?", confFile)
continue
}
confList, err = libcni.ConfListFromConf(conf)
if err != nil {
glog.Warningf("Error converting CNI config file %s to list: %v", confFile, err)
continue
}
}
if len(confList.Plugins) == 0 {
glog.Warningf("CNI config list %s has no networks, skipping", confFile)
continue
}
network := &cniNetwork{
name: confList.Name,
NetworkConfig: confList,
CNIConfig: &libcni.CNIConfig{Path: binDirs},
}
return network, nil
}
return nil, fmt.Errorf("No valid networks found in %s", confDir)
}
func (plugin *cniNetworkPlugin) Init(host network.Host, hairpinMode kubeletconfig.HairpinMode, nonMasqueradeCIDR string, mtu int) error {
err := plugin.platformInit()
if err != nil {
return err
}
plugin.host = host
plugin.syncNetworkConfig()
return nil
}
func (plugin *cniNetworkPlugin) syncNetworkConfig() {
network, err := getDefaultCNINetwork(plugin.confDir, plugin.binDirs)
if err != nil {
glog.Warningf("Unable to update cni config: %s", err)
return
}
plugin.setDefaultNetwork(network)
}
func (plugin *cniNetworkPlugin) getDefaultNetwork() *cniNetwork {
plugin.RLock()
defer plugin.RUnlock()
return plugin.defaultNetwork
}
func (plugin *cniNetworkPlugin) setDefaultNetwork(n *cniNetwork) {
plugin.Lock()
defer plugin.Unlock()
plugin.defaultNetwork = n
}
func (plugin *cniNetworkPlugin) checkInitialized() error {
if plugin.getDefaultNetwork() == nil {
return errors.New("cni config uninitialized")
}
return nil
}
func (plugin *cniNetworkPlugin) Name() string {
return CNIPluginName
}
func (plugin *cniNetworkPlugin) Status() error {
// sync network config from confDir periodically to detect network config updates
plugin.syncNetworkConfig()
// Can't set up pods if we don't have any CNI network configs yet
return plugin.checkInitialized()
}
func (plugin *cniNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID, annotations map[string]string) error {
if err := plugin.checkInitialized(); err != nil {
return err
}
netnsPath, err := plugin.host.GetNetNS(id.ID)
if err != nil {
return fmt.Errorf("CNI failed to retrieve network namespace path: %v", err)
}
// Windows doesn't have loNetwork. It comes only with Linux
if plugin.loNetwork != nil {
if _, err = plugin.addToNetwork(plugin.loNetwork, name, namespace, id, netnsPath); err != nil {
glog.Errorf("Error while adding to cni lo network: %s", err)
return err
}
}
_, err = plugin.addToNetwork(plugin.getDefaultNetwork(), name, namespace, id, netnsPath)
if err != nil {
glog.Errorf("Error while adding to cni network: %s", err)
return err
}
return err
}
func (plugin *cniNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.ContainerID) error {
if err := plugin.checkInitialized(); err != nil {
return err
}
// Lack of namespace should not be fatal on teardown
netnsPath, err := plugin.host.GetNetNS(id.ID)
if err != nil {
glog.Warningf("CNI failed to retrieve network namespace path: %v", err)
}
return plugin.deleteFromNetwork(plugin.getDefaultNetwork(), name, namespace, id, netnsPath)
}
func (plugin *cniNetworkPlugin) addToNetwork(network *cniNetwork, podName string, podNamespace string, podSandboxID kubecontainer.ContainerID, podNetnsPath string) (cnitypes.Result, error) {
rt, err := plugin.buildCNIRuntimeConf(podName, podNamespace, podSandboxID, podNetnsPath)
if err != nil {
glog.Errorf("Error adding network when building cni runtime conf: %v", err)
return nil, err
}
netConf, cniNet := network.NetworkConfig, network.CNIConfig
glog.V(4).Infof("About to add CNI network %v (type=%v)", netConf.Name, netConf.Plugins[0].Network.Type)
res, err := cniNet.AddNetworkList(netConf, rt)
if err != nil {
glog.Errorf("Error adding network: %v", err)
return nil, err
}
return res, nil
}
func (plugin *cniNetworkPlugin) deleteFromNetwork(network *cniNetwork, podName string, podNamespace string, podSandboxID kubecontainer.ContainerID, podNetnsPath string) error {
rt, err := plugin.buildCNIRuntimeConf(podName, podNamespace, podSandboxID, podNetnsPath)
if err != nil {
glog.Errorf("Error deleting network when building cni runtime conf: %v", err)
return err
}
netConf, cniNet := network.NetworkConfig, network.CNIConfig
glog.V(4).Infof("About to del CNI network %v (type=%v)", netConf.Name, netConf.Plugins[0].Network.Type)
err = cniNet.DelNetworkList(netConf, rt)
// The pod may not get deleted successfully at the first time.
// Ignore "no such file or directory" error in case the network has already been deleted in previous attempts.
if err != nil && !strings.Contains(err.Error(), "no such file or directory") {
glog.Errorf("Error deleting network: %v", err)
return err
}
return nil
}
func (plugin *cniNetworkPlugin) buildCNIRuntimeConf(podName string, podNs string, podSandboxID kubecontainer.ContainerID, podNetnsPath string) (*libcni.RuntimeConf, error) {
glog.V(4).Infof("Got netns path %v", podNetnsPath)
glog.V(4).Infof("Using podns path %v", podNs)
rt := &libcni.RuntimeConf{
ContainerID: podSandboxID.ID,
NetNS: podNetnsPath,
IfName: network.DefaultInterfaceName,
Args: [][2]string{
{"IgnoreUnknown", "1"},
{"K8S_POD_NAMESPACE", podNs},
{"K8S_POD_NAME", podName},
{"K8S_POD_INFRA_CONTAINER_ID", podSandboxID.ID},
},
}
// port mappings are a cni capability-based args, rather than parameters
// to a specific plugin
portMappings, err := plugin.host.GetPodPortMappings(podSandboxID.ID)
if err != nil {
return nil, fmt.Errorf("could not retrieve port mappings: %v", err)
}
portMappingsParam := make([]cniPortMapping, 0, len(portMappings))
for _, p := range portMappings {
if p.HostPort <= 0 {
continue
}
portMappingsParam = append(portMappingsParam, cniPortMapping{
HostPort: p.HostPort,
ContainerPort: p.ContainerPort,
Protocol: strings.ToLower(string(p.Protocol)),
HostIP: p.HostIP,
})
}
rt.CapabilityArgs = map[string]interface{}{
"portMappings": portMappingsParam,
}
return rt, nil
}

View File

@ -1,77 +0,0 @@
// +build !windows
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cni
import (
"fmt"
"github.com/containernetworking/cni/libcni"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
)
func getLoNetwork(binDirs []string) *cniNetwork {
loConfig, err := libcni.ConfListFromBytes([]byte(`{
"cniVersion": "0.2.0",
"name": "cni-loopback",
"plugins":[{
"type": "loopback"
}]
}`))
if err != nil {
// The hardcoded config above should always be valid and unit tests will
// catch this
panic(err)
}
loNetwork := &cniNetwork{
name: "lo",
NetworkConfig: loConfig,
CNIConfig: &libcni.CNIConfig{Path: binDirs},
}
return loNetwork
}
func (plugin *cniNetworkPlugin) platformInit() error {
var err error
plugin.nsenterPath, err = plugin.execer.LookPath("nsenter")
if err != nil {
return err
}
return nil
}
// TODO: Use the addToNetwork function to obtain the IP of the Pod. That will assume idempotent ADD call to the plugin.
// Also fix the runtime's call to Status function to be done only in the case that the IP is lost, no need to do periodic calls
func (plugin *cniNetworkPlugin) GetPodNetworkStatus(namespace string, name string, id kubecontainer.ContainerID) (*network.PodNetworkStatus, error) {
netnsPath, err := plugin.host.GetNetNS(id.ID)
if err != nil {
return nil, fmt.Errorf("CNI failed to retrieve network namespace path: %v", err)
}
if netnsPath == "" {
return nil, fmt.Errorf("Cannot find the network namespace, skipping pod network status for container %q", id)
}
ip, err := network.GetPodIP(plugin.execer, plugin.nsenterPath, netnsPath, network.DefaultInterfaceName)
if err != nil {
return nil, err
}
return &network.PodNetworkStatus{IP: ip}, nil
}

View File

@ -1,301 +0,0 @@
// +build linux
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cni
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"math/rand"
"net"
"os"
"path"
"reflect"
"testing"
"text/template"
types020 "github.com/containernetworking/cni/pkg/types/020"
"github.com/stretchr/testify/mock"
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
utiltesting "k8s.io/client-go/util/testing"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/testing"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport"
networktest "k8s.io/kubernetes/pkg/kubelet/dockershim/network/testing"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
)
// Returns .in file path, .out file path, and .env file path
func installPluginUnderTest(t *testing.T, testBinDir, testConfDir, testDataDir, binName string, confName string) (string, string, string) {
for _, dir := range []string{testBinDir, testConfDir, testDataDir} {
err := os.MkdirAll(dir, 0777)
if err != nil {
t.Fatalf("Failed to create test plugin dir %s: %v", dir, err)
}
}
confFile := path.Join(testConfDir, confName+".conf")
f, err := os.Create(confFile)
if err != nil {
t.Fatalf("Failed to install plugin %s: %v", confFile, err)
}
networkConfig := fmt.Sprintf(`{ "name": "%s", "type": "%s", "capabilities": {"portMappings": true} }`, confName, binName)
_, err = f.WriteString(networkConfig)
if err != nil {
t.Fatalf("Failed to write network config file (%v)", err)
}
f.Close()
pluginExec := path.Join(testBinDir, binName)
f, err = os.Create(pluginExec)
const execScriptTempl = `#!/usr/bin/env bash
cat > {{.InputFile}}
env > {{.OutputEnv}}
echo "%@" >> {{.OutputEnv}}
export $(echo ${CNI_ARGS} | sed 's/;/ /g') &> /dev/null
mkdir -p {{.OutputDir}} &> /dev/null
echo -n "$CNI_COMMAND $CNI_NETNS $K8S_POD_NAMESPACE $K8S_POD_NAME $K8S_POD_INFRA_CONTAINER_ID" >& {{.OutputFile}}
echo -n "{ \"ip4\": { \"ip\": \"10.1.0.23/24\" } }"
`
inputFile := path.Join(testDataDir, binName+".in")
outputFile := path.Join(testDataDir, binName+".out")
envFile := path.Join(testDataDir, binName+".env")
execTemplateData := &map[string]interface{}{
"InputFile": inputFile,
"OutputFile": outputFile,
"OutputEnv": envFile,
"OutputDir": testDataDir,
}
tObj := template.Must(template.New("test").Parse(execScriptTempl))
buf := &bytes.Buffer{}
if err := tObj.Execute(buf, *execTemplateData); err != nil {
t.Fatalf("Error in executing script template - %v", err)
}
execScript := buf.String()
_, err = f.WriteString(execScript)
if err != nil {
t.Fatalf("Failed to write plugin exec - %v", err)
}
err = f.Chmod(0777)
if err != nil {
t.Fatalf("Failed to set exec perms on plugin")
}
f.Close()
return inputFile, outputFile, envFile
}
func tearDownPlugin(tmpDir string) {
err := os.RemoveAll(tmpDir)
if err != nil {
fmt.Printf("Error in cleaning up test: %v", err)
}
}
type fakeNetworkHost struct {
networktest.FakePortMappingGetter
kubeClient clientset.Interface
runtime kubecontainer.Runtime
}
func NewFakeHost(kubeClient clientset.Interface, pods []*containertest.FakePod, ports map[string][]*hostport.PortMapping) *fakeNetworkHost {
host := &fakeNetworkHost{
networktest.FakePortMappingGetter{PortMaps: ports},
kubeClient,
&containertest.FakeRuntime{
AllPodList: pods,
},
}
return host
}
func (fnh *fakeNetworkHost) GetPodByName(name, namespace string) (*v1.Pod, bool) {
return nil, false
}
func (fnh *fakeNetworkHost) GetKubeClient() clientset.Interface {
return fnh.kubeClient
}
func (fnh *fakeNetworkHost) GetRuntime() kubecontainer.Runtime {
return fnh.runtime
}
func (fnh *fakeNetworkHost) GetNetNS(containerID string) (string, error) {
return fnh.GetRuntime().GetNetNS(kubecontainer.ContainerID{Type: "test", ID: containerID})
}
func (fnh *fakeNetworkHost) SupportsLegacyFeatures() bool {
return true
}
func TestCNIPlugin(t *testing.T) {
// install some random plugin
netName := fmt.Sprintf("test%d", rand.Intn(1000))
binName := fmt.Sprintf("test_vendor%d", rand.Intn(1000))
podIP := "10.0.0.2"
podIPOutput := fmt.Sprintf("4: eth0 inet %s/24 scope global dynamic eth0\\ valid_lft forever preferred_lft forever", podIP)
fakeCmds := []fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
func() ([]byte, error) {
return []byte(podIPOutput), nil
},
},
}, cmd, args...)
},
}
fexec := &fakeexec.FakeExec{
CommandScript: fakeCmds,
LookPathFunc: func(file string) (string, error) {
return fmt.Sprintf("/fake-bin/%s", file), nil
},
}
mockLoCNI := &mock_cni.MockCNI{}
// TODO mock for the test plugin too
tmpDir := utiltesting.MkTmpdirOrDie("cni-test")
testConfDir := path.Join(tmpDir, "etc", "cni", "net.d")
testBinDir := path.Join(tmpDir, "opt", "cni", "bin")
testDataDir := path.Join(tmpDir, "output")
defer tearDownPlugin(tmpDir)
inputFile, outputFile, outputEnv := installPluginUnderTest(t, testBinDir, testConfDir, testDataDir, binName, netName)
containerID := kubecontainer.ContainerID{Type: "test", ID: "test_infra_container"}
pods := []*containertest.FakePod{{
Pod: &kubecontainer.Pod{
Containers: []*kubecontainer.Container{
{ID: containerID},
},
},
NetnsPath: "/proc/12345/ns/net",
}}
plugins := ProbeNetworkPlugins(testConfDir, []string{testBinDir})
if len(plugins) != 1 {
t.Fatalf("Expected only one network plugin, got %d", len(plugins))
}
if plugins[0].Name() != "cni" {
t.Fatalf("Expected CNI network plugin, got %q", plugins[0].Name())
}
cniPlugin, ok := plugins[0].(*cniNetworkPlugin)
if !ok {
t.Fatalf("Not a CNI network plugin!")
}
cniPlugin.execer = fexec
cniPlugin.loNetwork.CNIConfig = mockLoCNI
mockLoCNI.On("AddNetworkList", cniPlugin.loNetwork.NetworkConfig, mock.AnythingOfType("*libcni.RuntimeConf")).Return(&types020.Result{IP4: &types020.IPConfig{IP: net.IPNet{IP: []byte{127, 0, 0, 1}}}}, nil)
ports := map[string][]*hostport.PortMapping{
containerID.ID: {
{
Name: "name",
HostPort: 8008,
ContainerPort: 80,
Protocol: "UDP",
HostIP: "0.0.0.0",
},
},
}
fakeHost := NewFakeHost(nil, pods, ports)
plug, err := network.InitNetworkPlugin(plugins, "cni", fakeHost, kubeletconfig.HairpinNone, "10.0.0.0/8", network.UseDefaultMTU)
if err != nil {
t.Fatalf("Failed to select the desired plugin: %v", err)
}
// Set up the pod
err = plug.SetUpPod("podNamespace", "podName", containerID, map[string]string{})
if err != nil {
t.Errorf("Expected nil: %v", err)
}
eo, eerr := ioutil.ReadFile(outputEnv)
output, err := ioutil.ReadFile(outputFile)
if err != nil || eerr != nil {
t.Errorf("Failed to read output file %s: %v (env %s err %v)", outputFile, err, eo, eerr)
}
expectedOutput := "ADD /proc/12345/ns/net podNamespace podName test_infra_container"
if string(output) != expectedOutput {
t.Errorf("Mismatch in expected output for setup hook. Expected '%s', got '%s'", expectedOutput, string(output))
}
// Verify the correct network configuration was passed
inputConfig := struct {
RuntimeConfig struct {
PortMappings []map[string]interface{} `json:"portMappings"`
} `json:"runtimeConfig"`
}{}
inputBytes, inerr := ioutil.ReadFile(inputFile)
parseerr := json.Unmarshal(inputBytes, &inputConfig)
if inerr != nil || parseerr != nil {
t.Errorf("failed to parse reported cni input config %s: (%v %v)", inputFile, inerr, parseerr)
}
expectedMappings := []map[string]interface{}{
// hah, golang always unmarshals unstructured json numbers as float64
{"hostPort": 8008.0, "containerPort": 80.0, "protocol": "udp", "hostIP": "0.0.0.0"},
}
if !reflect.DeepEqual(inputConfig.RuntimeConfig.PortMappings, expectedMappings) {
t.Errorf("mismatch in expected port mappings. expected %v got %v", expectedMappings, inputConfig.RuntimeConfig.PortMappings)
}
// Get its IP address
status, err := plug.GetPodNetworkStatus("podNamespace", "podName", containerID)
if err != nil {
t.Errorf("Failed to read pod network status: %v", err)
}
if status.IP.String() != podIP {
t.Errorf("Expected pod IP %q but got %q", podIP, status.IP.String())
}
// Tear it down
err = plug.TearDownPod("podNamespace", "podName", containerID)
if err != nil {
t.Errorf("Expected nil: %v", err)
}
output, err = ioutil.ReadFile(outputFile)
expectedOutput = "DEL /proc/12345/ns/net podNamespace podName test_infra_container"
if string(output) != expectedOutput {
t.Errorf("Mismatch in expected output for setup hook. Expected '%s', got '%s'", expectedOutput, string(output))
}
mockLoCNI.AssertExpectations(t)
}
func TestLoNetNonNil(t *testing.T) {
if conf := getLoNetwork(nil); conf == nil {
t.Error("Expected non-nil lo network")
}
}

View File

@ -1,61 +0,0 @@
// +build windows
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package cni
import (
"fmt"
cniTypes020 "github.com/containernetworking/cni/pkg/types/020"
"github.com/golang/glog"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
)
func getLoNetwork(binDirs []string) *cniNetwork {
return nil
}
func (plugin *cniNetworkPlugin) platformInit() error {
return nil
}
// GetPodNetworkStatus : Assuming addToNetwork is idempotent, we can call this API as many times as required to get the IPAddress
func (plugin *cniNetworkPlugin) GetPodNetworkStatus(namespace string, name string, id kubecontainer.ContainerID) (*network.PodNetworkStatus, error) {
netnsPath, err := plugin.host.GetNetNS(id.ID)
if err != nil {
return nil, fmt.Errorf("CNI failed to retrieve network namespace path: %v", err)
}
result, err := plugin.addToNetwork(plugin.getDefaultNetwork(), name, namespace, id, netnsPath)
glog.V(5).Infof("GetPodNetworkStatus result %+v", result)
if err != nil {
glog.Errorf("error while adding to cni network: %s", err)
return nil, err
}
// Parse the result and get the IPAddress
var result020 *cniTypes020.Result
result020, err = cniTypes020.GetResult(result)
if err != nil {
glog.Errorf("error while cni parsing result: %s", err)
return nil, err
}
return &network.PodNetworkStatus{IP: result020.IP4.IP.IP}, nil
}

View File

@ -1,30 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["mock_cni.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/testing",
deps = [
"//vendor/github.com/containernetworking/cni/libcni:go_default_library",
"//vendor/github.com/containernetworking/cni/pkg/types:go_default_library",
"//vendor/github.com/stretchr/testify/mock:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,49 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// mock_cni is a mock of the `libcni.CNI` interface. It's a handwritten mock
// because there are only two functions to deal with.
package mock_cni
import (
"github.com/containernetworking/cni/libcni"
"github.com/containernetworking/cni/pkg/types"
"github.com/stretchr/testify/mock"
)
type MockCNI struct {
mock.Mock
}
func (m *MockCNI) AddNetwork(net *libcni.NetworkConfig, rt *libcni.RuntimeConf) (types.Result, error) {
args := m.Called(net, rt)
return args.Get(0).(types.Result), args.Error(1)
}
func (m *MockCNI) DelNetwork(net *libcni.NetworkConfig, rt *libcni.RuntimeConf) error {
args := m.Called(net, rt)
return args.Error(0)
}
func (m *MockCNI) DelNetworkList(net *libcni.NetworkConfigList, rt *libcni.RuntimeConf) error {
args := m.Called(net, rt)
return args.Error(0)
}
func (m *MockCNI) AddNetworkList(net *libcni.NetworkConfigList, rt *libcni.RuntimeConf) (types.Result, error) {
args := m.Called(net, rt)
return args.Get(0).(types.Result), args.Error(1)
}

View File

@ -1,40 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = ["hairpin.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/network/hairpin",
deps = [
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["hairpin_test.go"],
embed = [":go_default_library"],
deps = [
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,87 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hairpin
import (
"fmt"
"io/ioutil"
"net"
"os"
"path"
"regexp"
"strconv"
"github.com/golang/glog"
"k8s.io/utils/exec"
)
const (
sysfsNetPath = "/sys/devices/virtual/net"
brportRelativePath = "brport"
hairpinModeRelativePath = "hairpin_mode"
hairpinEnable = "1"
)
var (
ethtoolOutputRegex = regexp.MustCompile("peer_ifindex: (\\d+)")
)
func findPairInterfaceOfContainerInterface(e exec.Interface, containerInterfaceName, containerDesc string, nsenterArgs []string) (string, error) {
nsenterPath, err := e.LookPath("nsenter")
if err != nil {
return "", err
}
ethtoolPath, err := e.LookPath("ethtool")
if err != nil {
return "", err
}
nsenterArgs = append(nsenterArgs, "-F", "--", ethtoolPath, "--statistics", containerInterfaceName)
output, err := e.Command(nsenterPath, nsenterArgs...).CombinedOutput()
if err != nil {
return "", fmt.Errorf("Unable to query interface %s of container %s: %v: %s", containerInterfaceName, containerDesc, err, string(output))
}
// look for peer_ifindex
match := ethtoolOutputRegex.FindSubmatch(output)
if match == nil {
return "", fmt.Errorf("No peer_ifindex in interface statistics for %s of container %s", containerInterfaceName, containerDesc)
}
peerIfIndex, err := strconv.Atoi(string(match[1]))
if err != nil { // seems impossible (\d+ not numeric)
return "", fmt.Errorf("peer_ifindex wasn't numeric: %s: %v", match[1], err)
}
iface, err := net.InterfaceByIndex(peerIfIndex)
if err != nil {
return "", err
}
return iface.Name, nil
}
func setUpInterface(ifName string) error {
glog.V(3).Infof("Enabling hairpin on interface %s", ifName)
ifPath := path.Join(sysfsNetPath, ifName)
if _, err := os.Stat(ifPath); err != nil {
return err
}
brportPath := path.Join(ifPath, brportRelativePath)
if _, err := os.Stat(brportPath); err != nil && os.IsNotExist(err) {
// Device is not on a bridge, so doesn't need hairpin mode
return nil
}
hairpinModeFile := path.Join(brportPath, hairpinModeRelativePath)
return ioutil.WriteFile(hairpinModeFile, []byte(hairpinEnable), 0644)
}

View File

@ -1,109 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hairpin
import (
"errors"
"fmt"
"net"
"os"
"strings"
"testing"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
)
func TestFindPairInterfaceOfContainerInterface(t *testing.T) {
// there should be at least "lo" on any system
interfaces, _ := net.Interfaces()
validOutput := fmt.Sprintf("garbage\n peer_ifindex: %d", interfaces[0].Index)
invalidOutput := fmt.Sprintf("garbage\n unknown: %d", interfaces[0].Index)
tests := []struct {
output string
err error
expectedName string
expectErr bool
}{
{
output: validOutput,
expectedName: interfaces[0].Name,
},
{
output: invalidOutput,
expectErr: true,
},
{
output: validOutput,
err: errors.New("error"),
expectErr: true,
},
}
for _, test := range tests {
fcmd := fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
func() ([]byte, error) { return []byte(test.output), test.err },
},
}
fexec := fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{
func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fcmd, cmd, args...)
},
},
LookPathFunc: func(file string) (string, error) {
return fmt.Sprintf("/fake-bin/%s", file), nil
},
}
nsenterArgs := []string{"-t", "123", "-n"}
name, err := findPairInterfaceOfContainerInterface(&fexec, "eth0", "123", nsenterArgs)
if test.expectErr {
if err == nil {
t.Errorf("unexpected non-error")
}
} else {
if err != nil {
t.Errorf("unexpected error: %v", err)
}
}
if name != test.expectedName {
t.Errorf("unexpected name: %s (expected: %s)", name, test.expectedName)
}
}
}
func TestSetUpInterfaceNonExistent(t *testing.T) {
err := setUpInterface("non-existent")
if err == nil {
t.Errorf("unexpected non-error")
}
deviceDir := fmt.Sprintf("%s/%s", sysfsNetPath, "non-existent")
if !strings.Contains(fmt.Sprintf("%v", err), deviceDir) {
t.Errorf("should have tried to open %s", deviceDir)
}
}
func TestSetUpInterfaceNotBridged(t *testing.T) {
err := setUpInterface("lo")
if err != nil {
if os.IsNotExist(err) {
t.Skipf("'lo' device does not exist??? (%v)", err)
}
t.Errorf("unexpected error: %v", err)
}
}

View File

@ -1,62 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"fake_iptables.go",
"hostport.go",
"hostport_manager.go",
"hostport_syncer.go",
],
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport",
deps = [
"//pkg/proxy/iptables:go_default_library",
"//pkg/util/conntrack:go_default_library",
"//pkg/util/iptables:go_default_library",
"//pkg/util/net:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"fake_iptables_test.go",
"hostport_manager_test.go",
"hostport_syncer_test.go",
"hostport_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/util/iptables:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//pkg/kubelet/dockershim/network/hostport/testing:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,353 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hostport
import (
"bytes"
"fmt"
"net"
"strings"
"k8s.io/apimachinery/pkg/util/sets"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
)
type fakeChain struct {
name utiliptables.Chain
rules []string
}
type fakeTable struct {
name utiliptables.Table
chains map[string]*fakeChain
}
type fakeIPTables struct {
tables map[string]*fakeTable
builtinChains map[string]sets.String
}
func NewFakeIPTables() *fakeIPTables {
return &fakeIPTables{
tables: make(map[string]*fakeTable, 0),
builtinChains: map[string]sets.String{
string(utiliptables.TableFilter): sets.NewString("INPUT", "FORWARD", "OUTPUT"),
string(utiliptables.TableNAT): sets.NewString("PREROUTING", "INPUT", "OUTPUT", "POSTROUTING"),
string(utiliptables.TableMangle): sets.NewString("PREROUTING", "INPUT", "FORWARD", "OUTPUT", "POSTROUTING"),
},
}
}
func (f *fakeIPTables) GetVersion() (string, error) {
return "1.4.21", nil
}
func (f *fakeIPTables) getTable(tableName utiliptables.Table) (*fakeTable, error) {
table, ok := f.tables[string(tableName)]
if !ok {
return nil, fmt.Errorf("Table %s does not exist", tableName)
}
return table, nil
}
func (f *fakeIPTables) getChain(tableName utiliptables.Table, chainName utiliptables.Chain) (*fakeTable, *fakeChain, error) {
table, err := f.getTable(tableName)
if err != nil {
return nil, nil, err
}
chain, ok := table.chains[string(chainName)]
if !ok {
return table, nil, fmt.Errorf("Chain %s/%s does not exist", tableName, chainName)
}
return table, chain, nil
}
func (f *fakeIPTables) ensureChain(tableName utiliptables.Table, chainName utiliptables.Chain) (bool, *fakeChain) {
table, chain, err := f.getChain(tableName, chainName)
if err != nil {
// either table or table+chain don't exist yet
if table == nil {
table = &fakeTable{
name: tableName,
chains: make(map[string]*fakeChain),
}
f.tables[string(tableName)] = table
}
chain := &fakeChain{
name: chainName,
rules: make([]string, 0),
}
table.chains[string(chainName)] = chain
return false, chain
}
return true, chain
}
func (f *fakeIPTables) EnsureChain(tableName utiliptables.Table, chainName utiliptables.Chain) (bool, error) {
existed, _ := f.ensureChain(tableName, chainName)
return existed, nil
}
func (f *fakeIPTables) FlushChain(tableName utiliptables.Table, chainName utiliptables.Chain) error {
_, chain, err := f.getChain(tableName, chainName)
if err != nil {
return err
}
chain.rules = make([]string, 0)
return nil
}
func (f *fakeIPTables) DeleteChain(tableName utiliptables.Table, chainName utiliptables.Chain) error {
table, _, err := f.getChain(tableName, chainName)
if err != nil {
return err
}
delete(table.chains, string(chainName))
return nil
}
// Returns index of rule in array; < 0 if rule is not found
func findRule(chain *fakeChain, rule string) int {
for i, candidate := range chain.rules {
if rule == candidate {
return i
}
}
return -1
}
func (f *fakeIPTables) ensureRule(position utiliptables.RulePosition, tableName utiliptables.Table, chainName utiliptables.Chain, rule string) (bool, error) {
_, chain, err := f.getChain(tableName, chainName)
if err != nil {
_, chain = f.ensureChain(tableName, chainName)
}
rule, err = normalizeRule(rule)
if err != nil {
return false, err
}
ruleIdx := findRule(chain, rule)
if ruleIdx >= 0 {
return true, nil
}
if position == utiliptables.Prepend {
chain.rules = append([]string{rule}, chain.rules...)
} else if position == utiliptables.Append {
chain.rules = append(chain.rules, rule)
} else {
return false, fmt.Errorf("Unknown position argument %q", position)
}
return false, nil
}
func normalizeRule(rule string) (string, error) {
normalized := ""
remaining := strings.TrimSpace(rule)
for {
var end int
if strings.HasPrefix(remaining, "--to-destination=") {
remaining = strings.Replace(remaining, "=", " ", 1)
}
if remaining[0] == '"' {
end = strings.Index(remaining[1:], "\"")
if end < 0 {
return "", fmt.Errorf("Invalid rule syntax: mismatched quotes")
}
end += 2
} else {
end = strings.Index(remaining, " ")
if end < 0 {
end = len(remaining)
}
}
arg := remaining[:end]
// Normalize un-prefixed IP addresses like iptables does
if net.ParseIP(arg) != nil {
arg = arg + "/32"
}
if len(normalized) > 0 {
normalized += " "
}
normalized += strings.TrimSpace(arg)
if len(remaining) == end {
break
}
remaining = remaining[end+1:]
}
return normalized, nil
}
func (f *fakeIPTables) EnsureRule(position utiliptables.RulePosition, tableName utiliptables.Table, chainName utiliptables.Chain, args ...string) (bool, error) {
ruleArgs := make([]string, 0)
for _, arg := range args {
// quote args with internal spaces (like comments)
if strings.Index(arg, " ") >= 0 {
arg = fmt.Sprintf("\"%s\"", arg)
}
ruleArgs = append(ruleArgs, arg)
}
return f.ensureRule(position, tableName, chainName, strings.Join(ruleArgs, " "))
}
func (f *fakeIPTables) DeleteRule(tableName utiliptables.Table, chainName utiliptables.Chain, args ...string) error {
_, chain, err := f.getChain(tableName, chainName)
if err == nil {
rule := strings.Join(args, " ")
ruleIdx := findRule(chain, rule)
if ruleIdx < 0 {
return nil
}
chain.rules = append(chain.rules[:ruleIdx], chain.rules[ruleIdx+1:]...)
}
return nil
}
func (f *fakeIPTables) IsIpv6() bool {
return false
}
func saveChain(chain *fakeChain, data *bytes.Buffer) {
for _, rule := range chain.rules {
data.WriteString(fmt.Sprintf("-A %s %s\n", chain.name, rule))
}
}
func (f *fakeIPTables) SaveInto(tableName utiliptables.Table, buffer *bytes.Buffer) error {
table, err := f.getTable(tableName)
if err != nil {
return err
}
buffer.WriteString(fmt.Sprintf("*%s\n", table.name))
rules := bytes.NewBuffer(nil)
for _, chain := range table.chains {
buffer.WriteString(fmt.Sprintf(":%s - [0:0]\n", string(chain.name)))
saveChain(chain, rules)
}
buffer.Write(rules.Bytes())
buffer.WriteString("COMMIT\n")
return nil
}
func (f *fakeIPTables) restore(restoreTableName utiliptables.Table, data []byte, flush utiliptables.FlushFlag) error {
allLines := string(data)
buf := bytes.NewBuffer(data)
var tableName utiliptables.Table
for {
line, err := buf.ReadString('\n')
if err != nil {
break
}
if line[0] == '#' {
continue
}
line = strings.TrimSuffix(line, "\n")
if strings.HasPrefix(line, "*") {
tableName = utiliptables.Table(line[1:])
}
if tableName != "" {
if restoreTableName != "" && restoreTableName != tableName {
continue
}
if strings.HasPrefix(line, ":") {
chainName := utiliptables.Chain(strings.Split(line[1:], " ")[0])
if flush == utiliptables.FlushTables {
table, chain, _ := f.getChain(tableName, chainName)
if chain != nil {
delete(table.chains, string(chainName))
}
}
_, _ = f.ensureChain(tableName, chainName)
// The --noflush option for iptables-restore doesn't work for user-defined chains, only builtin chains.
// We should flush user-defined chains if the chain is not to be deleted
if !f.isBuiltinChain(tableName, chainName) && !strings.Contains(allLines, "-X "+string(chainName)) {
if err := f.FlushChain(tableName, chainName); err != nil {
return err
}
}
} else if strings.HasPrefix(line, "-A") {
parts := strings.Split(line, " ")
if len(parts) < 3 {
return fmt.Errorf("Invalid iptables rule '%s'", line)
}
chainName := utiliptables.Chain(parts[1])
rule := strings.TrimPrefix(line, fmt.Sprintf("-A %s ", chainName))
_, err := f.ensureRule(utiliptables.Append, tableName, chainName, rule)
if err != nil {
return err
}
} else if strings.HasPrefix(line, "-I") {
parts := strings.Split(line, " ")
if len(parts) < 3 {
return fmt.Errorf("Invalid iptables rule '%s'", line)
}
chainName := utiliptables.Chain(parts[1])
rule := strings.TrimPrefix(line, fmt.Sprintf("-I %s ", chainName))
_, err := f.ensureRule(utiliptables.Prepend, tableName, chainName, rule)
if err != nil {
return err
}
} else if strings.HasPrefix(line, "-X") {
parts := strings.Split(line, " ")
if len(parts) < 2 {
return fmt.Errorf("Invalid iptables rule '%s'", line)
}
if err := f.DeleteChain(tableName, utiliptables.Chain(parts[1])); err != nil {
return err
}
} else if line == "COMMIT" {
if restoreTableName == tableName {
return nil
}
tableName = ""
}
}
}
return nil
}
func (f *fakeIPTables) Restore(tableName utiliptables.Table, data []byte, flush utiliptables.FlushFlag, counters utiliptables.RestoreCountersFlag) error {
return f.restore(tableName, data, flush)
}
func (f *fakeIPTables) RestoreAll(data []byte, flush utiliptables.FlushFlag, counters utiliptables.RestoreCountersFlag) error {
return f.restore("", data, flush)
}
func (f *fakeIPTables) AddReloadFunc(reloadFunc func()) {
}
func (f *fakeIPTables) Destroy() {
}
func (f *fakeIPTables) isBuiltinChain(tableName utiliptables.Table, chainName utiliptables.Chain) bool {
if builtinChains, ok := f.builtinChains[string(tableName)]; ok && builtinChains.Has(string(chainName)) {
return true
}
return false
}

View File

@ -1,56 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hostport
import (
"bytes"
"testing"
"github.com/stretchr/testify/assert"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
)
func TestRestoreFlushRules(t *testing.T) {
iptables := NewFakeIPTables()
rules := [][]string{
{"-A", "KUBE-HOSTPORTS", "-m comment --comment \"pod3_ns1 hostport 8443\" -m tcp -p tcp --dport 8443 -j KUBE-HP-5N7UH5JAXCVP5UJR"},
{"-A", "POSTROUTING", "-m comment --comment \"SNAT for localhost access to hostports\" -o cbr0 -s 127.0.0.0/8 -j MASQUERADE"},
}
natRules := bytes.NewBuffer(nil)
writeLine(natRules, "*nat")
for _, rule := range rules {
_, err := iptables.EnsureChain(utiliptables.TableNAT, utiliptables.Chain(rule[1]))
assert.NoError(t, err)
_, err = iptables.ensureRule(utiliptables.RulePosition(rule[0]), utiliptables.TableNAT, utiliptables.Chain(rule[1]), rule[2])
assert.NoError(t, err)
writeLine(natRules, utiliptables.MakeChainLine(utiliptables.Chain(rule[1])))
}
writeLine(natRules, "COMMIT")
assert.NoError(t, iptables.Restore(utiliptables.TableNAT, natRules.Bytes(), utiliptables.NoFlushTables, utiliptables.RestoreCounters))
natTable, ok := iptables.tables[string(utiliptables.TableNAT)]
assert.True(t, ok)
// check KUBE-HOSTPORTS chain, should have been cleaned up
hostportChain, ok := natTable.chains["KUBE-HOSTPORTS"]
assert.True(t, ok)
assert.Equal(t, 0, len(hostportChain.rules))
// check builtin chains, should not been cleaned up
postroutingChain, ok := natTable.chains["POSTROUTING"]
assert.True(t, ok, string(postroutingChain.name))
assert.Equal(t, 1, len(postroutingChain.rules))
}

View File

@ -1,143 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hostport
import (
"fmt"
"net"
"strings"
"github.com/golang/glog"
"k8s.io/api/core/v1"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
)
const (
// the hostport chain
kubeHostportsChain utiliptables.Chain = "KUBE-HOSTPORTS"
// prefix for hostport chains
kubeHostportChainPrefix string = "KUBE-HP-"
)
// PortMapping represents a network port in a container
type PortMapping struct {
Name string
HostPort int32
ContainerPort int32
Protocol v1.Protocol
HostIP string
}
// PodPortMapping represents a pod's network state and associated container port mappings
type PodPortMapping struct {
Namespace string
Name string
PortMappings []*PortMapping
HostNetwork bool
IP net.IP
}
type hostport struct {
port int32
protocol string
}
type hostportOpener func(*hostport) (closeable, error)
type closeable interface {
Close() error
}
func openLocalPort(hp *hostport) (closeable, error) {
// For ports on node IPs, open the actual port and hold it, even though we
// use iptables to redirect traffic.
// This ensures a) that it's safe to use that port and b) that (a) stays
// true. The risk is that some process on the node (e.g. sshd or kubelet)
// is using a port and we give that same port out to a Service. That would
// be bad because iptables would silently claim the traffic but the process
// would never know.
// NOTE: We should not need to have a real listen()ing socket - bind()
// should be enough, but I can't figure out a way to e2e test without
// it. Tools like 'ss' and 'netstat' do not show sockets that are
// bind()ed but not listen()ed, and at least the default debian netcat
// has no way to avoid about 10 seconds of retries.
var socket closeable
switch hp.protocol {
case "tcp":
listener, err := net.Listen("tcp", fmt.Sprintf(":%d", hp.port))
if err != nil {
return nil, err
}
socket = listener
case "udp":
addr, err := net.ResolveUDPAddr("udp", fmt.Sprintf(":%d", hp.port))
if err != nil {
return nil, err
}
conn, err := net.ListenUDP("udp", addr)
if err != nil {
return nil, err
}
socket = conn
default:
return nil, fmt.Errorf("unknown protocol %q", hp.protocol)
}
glog.V(3).Infof("Opened local port %s", hp.String())
return socket, nil
}
// portMappingToHostport creates hostport structure based on input portmapping
func portMappingToHostport(portMapping *PortMapping) hostport {
return hostport{
port: portMapping.HostPort,
protocol: strings.ToLower(string(portMapping.Protocol)),
}
}
// ensureKubeHostportChains ensures the KUBE-HOSTPORTS chain is setup correctly
func ensureKubeHostportChains(iptables utiliptables.Interface, natInterfaceName string) error {
glog.V(4).Info("Ensuring kubelet hostport chains")
// Ensure kubeHostportChain
if _, err := iptables.EnsureChain(utiliptables.TableNAT, kubeHostportsChain); err != nil {
return fmt.Errorf("Failed to ensure that %s chain %s exists: %v", utiliptables.TableNAT, kubeHostportsChain, err)
}
tableChainsNeedJumpServices := []struct {
table utiliptables.Table
chain utiliptables.Chain
}{
{utiliptables.TableNAT, utiliptables.ChainOutput},
{utiliptables.TableNAT, utiliptables.ChainPrerouting},
}
args := []string{"-m", "comment", "--comment", "kube hostport portals",
"-m", "addrtype", "--dst-type", "LOCAL",
"-j", string(kubeHostportsChain)}
for _, tc := range tableChainsNeedJumpServices {
// KUBE-HOSTPORTS chain needs to be appended to the system chains.
// This ensures KUBE-SERVICES chain gets processed first.
// Since rules in KUBE-HOSTPORTS chain matches broader cases, allow the more specific rules to be processed first.
if _, err := iptables.EnsureRule(utiliptables.Append, tc.table, tc.chain, args...); err != nil {
return fmt.Errorf("Failed to ensure that %s chain %s jumps to %s: %v", tc.table, tc.chain, kubeHostportsChain, err)
}
}
// Need to SNAT traffic from localhost
args = []string{"-m", "comment", "--comment", "SNAT for localhost access to hostports", "-o", natInterfaceName, "-s", "127.0.0.0/8", "-j", "MASQUERADE"}
if _, err := iptables.EnsureRule(utiliptables.Append, utiliptables.TableNAT, utiliptables.ChainPostrouting, args...); err != nil {
return fmt.Errorf("Failed to ensure that %s chain %s jumps to MASQUERADE: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, err)
}
return nil
}

View File

@ -1,383 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hostport
import (
"bytes"
"crypto/sha256"
"encoding/base32"
"fmt"
"strconv"
"strings"
"sync"
"github.com/golang/glog"
"k8s.io/api/core/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables"
"k8s.io/kubernetes/pkg/util/conntrack"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilnet "k8s.io/kubernetes/pkg/util/net"
"k8s.io/utils/exec"
)
// HostPortManager is an interface for adding and removing hostport for a given pod sandbox.
type HostPortManager interface {
// Add implements port mappings.
// id should be a unique identifier for a pod, e.g. podSandboxID.
// podPortMapping is the associated port mapping information for the pod.
// natInterfaceName is the interface that localhost used to talk to the given pod.
Add(id string, podPortMapping *PodPortMapping, natInterfaceName string) error
// Remove cleans up matching port mappings
// Remove must be able to clean up port mappings without pod IP
Remove(id string, podPortMapping *PodPortMapping) error
}
type hostportManager struct {
hostPortMap map[hostport]closeable
execer exec.Interface
conntrackFound bool
iptables utiliptables.Interface
portOpener hostportOpener
mu sync.Mutex
}
func NewHostportManager(iptables utiliptables.Interface) HostPortManager {
h := &hostportManager{
hostPortMap: make(map[hostport]closeable),
execer: exec.New(),
iptables: iptables,
portOpener: openLocalPort,
}
h.conntrackFound = conntrack.Exists(h.execer)
if !h.conntrackFound {
glog.Warningf("The binary conntrack is not installed, this can cause failures in network connection cleanup.")
}
return h
}
func (hm *hostportManager) Add(id string, podPortMapping *PodPortMapping, natInterfaceName string) (err error) {
if podPortMapping == nil || podPortMapping.HostNetwork {
return nil
}
podFullName := getPodFullName(podPortMapping)
// skip if there is no hostport needed
hostportMappings := gatherHostportMappings(podPortMapping)
if len(hostportMappings) == 0 {
return nil
}
if podPortMapping.IP.To4() == nil {
return fmt.Errorf("invalid or missing IP of pod %s", podFullName)
}
podIP := podPortMapping.IP.String()
if err = ensureKubeHostportChains(hm.iptables, natInterfaceName); err != nil {
return err
}
// Ensure atomicity for port opening and iptables operations
hm.mu.Lock()
defer hm.mu.Unlock()
// try to open hostports
ports, err := hm.openHostports(podPortMapping)
if err != nil {
return err
}
for hostport, socket := range ports {
hm.hostPortMap[hostport] = socket
}
natChains := bytes.NewBuffer(nil)
natRules := bytes.NewBuffer(nil)
writeLine(natChains, "*nat")
existingChains, existingRules, err := getExistingHostportIPTablesRules(hm.iptables)
if err != nil {
// clean up opened host port if encounter any error
return utilerrors.NewAggregate([]error{err, hm.closeHostports(hostportMappings)})
}
newChains := []utiliptables.Chain{}
conntrackPortsToRemove := []int{}
for _, pm := range hostportMappings {
protocol := strings.ToLower(string(pm.Protocol))
chain := getHostportChain(id, pm)
newChains = append(newChains, chain)
if pm.Protocol == v1.ProtocolUDP {
conntrackPortsToRemove = append(conntrackPortsToRemove, int(pm.HostPort))
}
// Add new hostport chain
writeLine(natChains, utiliptables.MakeChainLine(chain))
// Prepend the new chain to KUBE-HOSTPORTS
// This avoids any leaking iptables rule that takes up the same port
writeLine(natRules, "-I", string(kubeHostportsChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, podFullName, pm.HostPort),
"-m", protocol, "-p", protocol, "--dport", fmt.Sprintf("%d", pm.HostPort),
"-j", string(chain),
)
// SNAT if the traffic comes from the pod itself
writeLine(natRules, "-A", string(chain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, podFullName, pm.HostPort),
"-s", podIP,
"-j", string(iptablesproxy.KubeMarkMasqChain))
// DNAT to the podIP:containerPort
writeLine(natRules, "-A", string(chain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, podFullName, pm.HostPort),
"-m", protocol, "-p", protocol,
"-j", "DNAT", fmt.Sprintf("--to-destination=%s:%d", podIP, pm.ContainerPort))
}
// getHostportChain should be able to provide unique hostport chain name using hash
// if there is a chain conflict or multiple Adds have been triggered for a single pod,
// filtering should be able to avoid further problem
filterChains(existingChains, newChains)
existingRules = filterRules(existingRules, newChains)
for _, chain := range existingChains {
writeLine(natChains, chain)
}
for _, rule := range existingRules {
writeLine(natRules, rule)
}
writeLine(natRules, "COMMIT")
if err = hm.syncIPTables(append(natChains.Bytes(), natRules.Bytes()...)); err != nil {
// clean up opened host port if encounter any error
return utilerrors.NewAggregate([]error{err, hm.closeHostports(hostportMappings)})
}
isIpv6 := utilnet.IsIPv6(podPortMapping.IP)
// Remove conntrack entries just after adding the new iptables rules. If the conntrack entry is removed along with
// the IP tables rule, it can be the case that the packets received by the node after iptables rule removal will
// create a new conntrack entry without any DNAT. That will result in blackhole of the traffic even after correct
// iptables rules have been added back.
if hm.execer != nil && hm.conntrackFound {
glog.Infof("Starting to delete udp conntrack entries: %v, isIPv6 - %v", conntrackPortsToRemove, isIpv6)
for _, port := range conntrackPortsToRemove {
err = conntrack.ClearEntriesForPort(hm.execer, port, isIpv6, v1.ProtocolUDP)
if err != nil {
glog.Errorf("Failed to clear udp conntrack for port %d, error: %v", port, err)
}
}
}
return nil
}
func (hm *hostportManager) Remove(id string, podPortMapping *PodPortMapping) (err error) {
if podPortMapping == nil || podPortMapping.HostNetwork {
return nil
}
hostportMappings := gatherHostportMappings(podPortMapping)
if len(hostportMappings) <= 0 {
return nil
}
// Ensure atomicity for port closing and iptables operations
hm.mu.Lock()
defer hm.mu.Unlock()
var existingChains map[utiliptables.Chain]string
var existingRules []string
existingChains, existingRules, err = getExistingHostportIPTablesRules(hm.iptables)
if err != nil {
return err
}
// Gather target hostport chains for removal
chainsToRemove := []utiliptables.Chain{}
for _, pm := range hostportMappings {
chainsToRemove = append(chainsToRemove, getHostportChain(id, pm))
}
// remove rules that consists of target chains
remainingRules := filterRules(existingRules, chainsToRemove)
// gather target hostport chains that exists in iptables-save result
existingChainsToRemove := []utiliptables.Chain{}
for _, chain := range chainsToRemove {
if _, ok := existingChains[chain]; ok {
existingChainsToRemove = append(existingChainsToRemove, chain)
}
}
natChains := bytes.NewBuffer(nil)
natRules := bytes.NewBuffer(nil)
writeLine(natChains, "*nat")
for _, chain := range existingChains {
writeLine(natChains, chain)
}
for _, rule := range remainingRules {
writeLine(natRules, rule)
}
for _, chain := range existingChainsToRemove {
writeLine(natRules, "-X", string(chain))
}
writeLine(natRules, "COMMIT")
if err = hm.syncIPTables(append(natChains.Bytes(), natRules.Bytes()...)); err != nil {
return err
}
// clean up opened pod host ports
return hm.closeHostports(hostportMappings)
}
// syncIPTables executes iptables-restore with given lines
func (hm *hostportManager) syncIPTables(lines []byte) error {
glog.V(3).Infof("Restoring iptables rules: %s", lines)
err := hm.iptables.RestoreAll(lines, utiliptables.NoFlushTables, utiliptables.RestoreCounters)
if err != nil {
return fmt.Errorf("Failed to execute iptables-restore: %v", err)
}
return nil
}
// openHostports opens all given hostports using the given hostportOpener
// If encounter any error, clean up and return the error
// If all ports are opened successfully, return the hostport and socket mapping
func (hm *hostportManager) openHostports(podPortMapping *PodPortMapping) (map[hostport]closeable, error) {
var retErr error
ports := make(map[hostport]closeable)
for _, pm := range podPortMapping.PortMappings {
if pm.HostPort <= 0 {
continue
}
hp := portMappingToHostport(pm)
socket, err := hm.portOpener(&hp)
if err != nil {
retErr = fmt.Errorf("cannot open hostport %d for pod %s: %v", pm.HostPort, getPodFullName(podPortMapping), err)
break
}
ports[hp] = socket
}
// If encounter any error, close all hostports that just got opened.
if retErr != nil {
for hp, socket := range ports {
if err := socket.Close(); err != nil {
glog.Errorf("Cannot clean up hostport %d for pod %s: %v", hp.port, getPodFullName(podPortMapping), err)
}
}
return nil, retErr
}
return ports, nil
}
// closeHostports tries to close all the listed host ports
func (hm *hostportManager) closeHostports(hostportMappings []*PortMapping) error {
errList := []error{}
for _, pm := range hostportMappings {
hp := portMappingToHostport(pm)
if socket, ok := hm.hostPortMap[hp]; ok {
glog.V(2).Infof("Closing host port %s", hp.String())
if err := socket.Close(); err != nil {
errList = append(errList, fmt.Errorf("failed to close host port %s: %v", hp.String(), err))
continue
}
delete(hm.hostPortMap, hp)
}
}
return utilerrors.NewAggregate(errList)
}
// getHostportChain takes id, hostport and protocol for a pod and returns associated iptables chain.
// This is computed by hashing (sha256) then encoding to base32 and truncating with the prefix
// "KUBE-HP-". We do this because IPTables Chain Names must be <= 28 chars long, and the longer
// they are the harder they are to read.
// WARNING: Please do not change this function. Otherwise, HostportManager may not be able to
// identify existing iptables chains.
func getHostportChain(id string, pm *PortMapping) utiliptables.Chain {
hash := sha256.Sum256([]byte(id + strconv.Itoa(int(pm.HostPort)) + string(pm.Protocol)))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16])
}
// gatherHostportMappings returns all the PortMappings which has hostport for a pod
func gatherHostportMappings(podPortMapping *PodPortMapping) []*PortMapping {
mappings := []*PortMapping{}
for _, pm := range podPortMapping.PortMappings {
if pm.HostPort <= 0 {
continue
}
mappings = append(mappings, pm)
}
return mappings
}
// getExistingHostportIPTablesRules retrieves raw data from iptables-save, parse it,
// return all the hostport related chains and rules
func getExistingHostportIPTablesRules(iptables utiliptables.Interface) (map[utiliptables.Chain]string, []string, error) {
iptablesData := bytes.NewBuffer(nil)
err := iptables.SaveInto(utiliptables.TableNAT, iptablesData)
if err != nil { // if we failed to get any rules
return nil, nil, fmt.Errorf("failed to execute iptables-save: %v", err)
}
existingNATChains := utiliptables.GetChainLines(utiliptables.TableNAT, iptablesData.Bytes())
existingHostportChains := make(map[utiliptables.Chain]string)
existingHostportRules := []string{}
for chain := range existingNATChains {
if strings.HasPrefix(string(chain), string(kubeHostportsChain)) || strings.HasPrefix(string(chain), kubeHostportChainPrefix) {
existingHostportChains[chain] = existingNATChains[chain]
}
}
for _, line := range strings.Split(string(iptablesData.Bytes()), "\n") {
if strings.HasPrefix(line, fmt.Sprintf("-A %s", kubeHostportChainPrefix)) ||
strings.HasPrefix(line, fmt.Sprintf("-A %s", string(kubeHostportsChain))) {
existingHostportRules = append(existingHostportRules, line)
}
}
return existingHostportChains, existingHostportRules, nil
}
// filterRules filters input rules with input chains. Rules that did not involve any filter chain will be returned.
// The order of the input rules is important and is preserved.
func filterRules(rules []string, filters []utiliptables.Chain) []string {
filtered := []string{}
for _, rule := range rules {
skip := false
for _, filter := range filters {
if strings.Contains(rule, string(filter)) {
skip = true
break
}
}
if !skip {
filtered = append(filtered, rule)
}
}
return filtered
}
// filterChains deletes all entries of filter chains from chain map
func filterChains(chains map[utiliptables.Chain]string, filterChains []utiliptables.Chain) {
for _, chain := range filterChains {
if _, ok := chains[chain]; ok {
delete(chains, chain)
}
}
}

View File

@ -1,335 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hostport
import (
"bytes"
"net"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
"k8s.io/utils/exec"
)
func TestOpenCloseHostports(t *testing.T) {
openPortCases := []struct {
podPortMapping *PodPortMapping
expectError bool
}{
{
&PodPortMapping{
Namespace: "ns1",
Name: "n0",
},
false,
},
{
&PodPortMapping{
Namespace: "ns1",
Name: "n1",
PortMappings: []*PortMapping{
{HostPort: 80, Protocol: v1.Protocol("TCP")},
{HostPort: 8080, Protocol: v1.Protocol("TCP")},
{HostPort: 443, Protocol: v1.Protocol("TCP")},
},
},
false,
},
{
&PodPortMapping{
Namespace: "ns1",
Name: "n2",
PortMappings: []*PortMapping{
{HostPort: 80, Protocol: v1.Protocol("TCP")},
},
},
true,
},
{
&PodPortMapping{
Namespace: "ns1",
Name: "n3",
PortMappings: []*PortMapping{
{HostPort: 8081, Protocol: v1.Protocol("TCP")},
{HostPort: 8080, Protocol: v1.Protocol("TCP")},
},
},
true,
},
{
&PodPortMapping{
Namespace: "ns1",
Name: "n3",
PortMappings: []*PortMapping{
{HostPort: 8081, Protocol: v1.Protocol("TCP")},
},
},
false,
},
}
iptables := NewFakeIPTables()
portOpener := NewFakeSocketManager()
manager := &hostportManager{
hostPortMap: make(map[hostport]closeable),
iptables: iptables,
portOpener: portOpener.openFakeSocket,
execer: exec.New(),
}
for _, tc := range openPortCases {
mapping, err := manager.openHostports(tc.podPortMapping)
if tc.expectError {
assert.Error(t, err)
continue
}
assert.NoError(t, err)
assert.EqualValues(t, len(mapping), len(tc.podPortMapping.PortMappings))
}
// We have 4 ports: 80, 443, 8080, 8081 open now.
closePortCases := []struct {
portMappings []*PortMapping
expectError bool
}{
{
portMappings: nil,
},
{
portMappings: []*PortMapping{
{HostPort: 80, Protocol: v1.Protocol("TCP")},
{HostPort: 8080, Protocol: v1.Protocol("TCP")},
{HostPort: 443, Protocol: v1.Protocol("TCP")},
},
},
{
portMappings: []*PortMapping{
{HostPort: 80, Protocol: v1.Protocol("TCP")},
},
},
{
portMappings: []*PortMapping{
{HostPort: 8081, Protocol: v1.Protocol("TCP")},
{HostPort: 8080, Protocol: v1.Protocol("TCP")},
},
},
{
portMappings: []*PortMapping{
{HostPort: 8081, Protocol: v1.Protocol("TCP")},
},
},
{
portMappings: []*PortMapping{
{HostPort: 7070, Protocol: v1.Protocol("TCP")},
},
},
}
for _, tc := range closePortCases {
err := manager.closeHostports(tc.portMappings)
if tc.expectError {
assert.Error(t, err)
continue
}
assert.NoError(t, err)
}
// Clear all elements in hostPortMap
assert.Zero(t, len(manager.hostPortMap))
}
func TestHostportManager(t *testing.T) {
iptables := NewFakeIPTables()
portOpener := NewFakeSocketManager()
manager := &hostportManager{
hostPortMap: make(map[hostport]closeable),
iptables: iptables,
portOpener: portOpener.openFakeSocket,
execer: exec.New(),
}
testCases := []struct {
mapping *PodPortMapping
expectError bool
}{
{
mapping: &PodPortMapping{
Name: "pod1",
Namespace: "ns1",
IP: net.ParseIP("10.1.1.2"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
HostPort: 8080,
ContainerPort: 80,
Protocol: v1.ProtocolTCP,
},
{
HostPort: 8081,
ContainerPort: 81,
Protocol: v1.ProtocolUDP,
},
},
},
expectError: false,
},
{
mapping: &PodPortMapping{
Name: "pod2",
Namespace: "ns1",
IP: net.ParseIP("10.1.1.3"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
HostPort: 8082,
ContainerPort: 80,
Protocol: v1.ProtocolTCP,
},
{
HostPort: 8081,
ContainerPort: 81,
Protocol: v1.ProtocolUDP,
},
},
},
expectError: true,
},
{
mapping: &PodPortMapping{
Name: "pod3",
Namespace: "ns1",
IP: net.ParseIP("10.1.1.4"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
HostPort: 8443,
ContainerPort: 443,
Protocol: v1.ProtocolTCP,
},
},
},
expectError: false,
},
}
// Add Hostports
for _, tc := range testCases {
err := manager.Add("id", tc.mapping, "cbr0")
if tc.expectError {
assert.Error(t, err)
continue
}
assert.NoError(t, err)
}
// Check port opened
expectedPorts := []hostport{{8080, "tcp"}, {8081, "udp"}, {8443, "tcp"}}
openedPorts := make(map[hostport]bool)
for hp, port := range portOpener.mem {
if !port.closed {
openedPorts[hp] = true
}
}
assert.EqualValues(t, len(openedPorts), len(expectedPorts))
for _, hp := range expectedPorts {
_, ok := openedPorts[hp]
assert.EqualValues(t, true, ok)
}
// Check Iptables-save result after adding hostports
raw := bytes.NewBuffer(nil)
err := iptables.SaveInto(utiliptables.TableNAT, raw)
assert.NoError(t, err)
lines := strings.Split(string(raw.Bytes()), "\n")
expectedLines := map[string]bool{
`*nat`: true,
`:KUBE-HOSTPORTS - [0:0]`: true,
`:OUTPUT - [0:0]`: true,
`:PREROUTING - [0:0]`: true,
`:POSTROUTING - [0:0]`: true,
`:KUBE-HP-IJHALPHTORMHHPPK - [0:0]`: true,
`:KUBE-HP-63UPIDJXVRSZGSUZ - [0:0]`: true,
`:KUBE-HP-WFBOALXEP42XEMJK - [0:0]`: true,
"-A KUBE-HOSTPORTS -m comment --comment \"pod3_ns1 hostport 8443\" -m tcp -p tcp --dport 8443 -j KUBE-HP-WFBOALXEP42XEMJK": true,
"-A KUBE-HOSTPORTS -m comment --comment \"pod1_ns1 hostport 8081\" -m udp -p udp --dport 8081 -j KUBE-HP-63UPIDJXVRSZGSUZ": true,
"-A KUBE-HOSTPORTS -m comment --comment \"pod1_ns1 hostport 8080\" -m tcp -p tcp --dport 8080 -j KUBE-HP-IJHALPHTORMHHPPK": true,
"-A OUTPUT -m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS": true,
"-A PREROUTING -m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS": true,
"-A POSTROUTING -m comment --comment \"SNAT for localhost access to hostports\" -o cbr0 -s 127.0.0.0/8 -j MASQUERADE": true,
"-A KUBE-HP-IJHALPHTORMHHPPK -m comment --comment \"pod1_ns1 hostport 8080\" -s 10.1.1.2/32 -j KUBE-MARK-MASQ": true,
"-A KUBE-HP-IJHALPHTORMHHPPK -m comment --comment \"pod1_ns1 hostport 8080\" -m tcp -p tcp -j DNAT --to-destination 10.1.1.2:80": true,
"-A KUBE-HP-63UPIDJXVRSZGSUZ -m comment --comment \"pod1_ns1 hostport 8081\" -s 10.1.1.2/32 -j KUBE-MARK-MASQ": true,
"-A KUBE-HP-63UPIDJXVRSZGSUZ -m comment --comment \"pod1_ns1 hostport 8081\" -m udp -p udp -j DNAT --to-destination 10.1.1.2:81": true,
"-A KUBE-HP-WFBOALXEP42XEMJK -m comment --comment \"pod3_ns1 hostport 8443\" -s 10.1.1.4/32 -j KUBE-MARK-MASQ": true,
"-A KUBE-HP-WFBOALXEP42XEMJK -m comment --comment \"pod3_ns1 hostport 8443\" -m tcp -p tcp -j DNAT --to-destination 10.1.1.4:443": true,
`COMMIT`: true,
}
for _, line := range lines {
if len(strings.TrimSpace(line)) > 0 {
_, ok := expectedLines[strings.TrimSpace(line)]
assert.EqualValues(t, true, ok)
}
}
// Remove all added hostports
for _, tc := range testCases {
if !tc.expectError {
err := manager.Remove("id", tc.mapping)
assert.NoError(t, err)
}
}
// Check Iptables-save result after deleting hostports
raw.Reset()
err = iptables.SaveInto(utiliptables.TableNAT, raw)
assert.NoError(t, err)
lines = strings.Split(string(raw.Bytes()), "\n")
remainingChains := make(map[string]bool)
for _, line := range lines {
if strings.HasPrefix(line, ":") {
remainingChains[strings.TrimSpace(line)] = true
}
}
expectDeletedChains := []string{"KUBE-HP-4YVONL46AKYWSKS3", "KUBE-HP-7THKRFSEH4GIIXK7", "KUBE-HP-5N7UH5JAXCVP5UJR"}
for _, chain := range expectDeletedChains {
_, ok := remainingChains[chain]
assert.EqualValues(t, false, ok)
}
// check if all ports are closed
for _, port := range portOpener.mem {
assert.EqualValues(t, true, port.closed)
}
}
func TestGetHostportChain(t *testing.T) {
m := make(map[string]int)
chain := getHostportChain("testrdma-2", &PortMapping{HostPort: 57119, Protocol: "TCP", ContainerPort: 57119})
m[string(chain)] = 1
chain = getHostportChain("testrdma-2", &PortMapping{HostPort: 55429, Protocol: "TCP", ContainerPort: 55429})
m[string(chain)] = 1
chain = getHostportChain("testrdma-2", &PortMapping{HostPort: 56833, Protocol: "TCP", ContainerPort: 56833})
m[string(chain)] = 1
if len(m) != 3 {
t.Fatal(m)
}
}

View File

@ -1,304 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hostport
import (
"bytes"
"crypto/sha256"
"encoding/base32"
"fmt"
"strconv"
"strings"
"time"
"github.com/golang/glog"
iptablesproxy "k8s.io/kubernetes/pkg/proxy/iptables"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
)
// HostportSyncer takes a list of PodPortMappings and implements hostport all at once
type HostportSyncer interface {
// SyncHostports gathers all hostports on node and setup iptables rules to enable them.
// On each invocation existing ports are synced and stale rules are deleted.
SyncHostports(natInterfaceName string, activePodPortMappings []*PodPortMapping) error
// OpenPodHostportsAndSync opens hostports for a new PodPortMapping, gathers all hostports on
// node, sets up iptables rules enable them. On each invocation existing ports are synced and stale rules are deleted.
// 'newPortMapping' must also be present in 'activePodPortMappings'.
OpenPodHostportsAndSync(newPortMapping *PodPortMapping, natInterfaceName string, activePodPortMappings []*PodPortMapping) error
}
type hostportSyncer struct {
hostPortMap map[hostport]closeable
iptables utiliptables.Interface
portOpener hostportOpener
}
func NewHostportSyncer(iptables utiliptables.Interface) HostportSyncer {
return &hostportSyncer{
hostPortMap: make(map[hostport]closeable),
iptables: iptables,
portOpener: openLocalPort,
}
}
type targetPod struct {
podFullName string
podIP string
}
func (hp *hostport) String() string {
return fmt.Sprintf("%s:%d", hp.protocol, hp.port)
}
// openHostports opens all hostport for pod and returns the map of hostport and socket
func (h *hostportSyncer) openHostports(podHostportMapping *PodPortMapping) error {
var retErr error
ports := make(map[hostport]closeable)
for _, port := range podHostportMapping.PortMappings {
if port.HostPort <= 0 {
// Assume hostport is not specified in this portmapping. So skip
continue
}
hp := hostport{
port: port.HostPort,
protocol: strings.ToLower(string(port.Protocol)),
}
socket, err := h.portOpener(&hp)
if err != nil {
retErr = fmt.Errorf("cannot open hostport %d for pod %s: %v", port.HostPort, getPodFullName(podHostportMapping), err)
break
}
ports[hp] = socket
}
// If encounter any error, close all hostports that just got opened.
if retErr != nil {
for hp, socket := range ports {
if err := socket.Close(); err != nil {
glog.Errorf("Cannot clean up hostport %d for pod %s: %v", hp.port, getPodFullName(podHostportMapping), err)
}
}
return retErr
}
for hostPort, socket := range ports {
h.hostPortMap[hostPort] = socket
}
return nil
}
func getPodFullName(pod *PodPortMapping) string {
// Use underscore as the delimiter because it is not allowed in pod name
// (DNS subdomain format), while allowed in the container name format.
return pod.Name + "_" + pod.Namespace
}
// gatherAllHostports returns all hostports that should be presented on node,
// given the list of pods running on that node and ignoring host network
// pods (which don't need hostport <-> container port mapping).
func gatherAllHostports(activePodPortMappings []*PodPortMapping) (map[*PortMapping]targetPod, error) {
podHostportMap := make(map[*PortMapping]targetPod)
for _, pm := range activePodPortMappings {
if pm.IP.To4() == nil {
return nil, fmt.Errorf("Invalid or missing pod %s IP", getPodFullName(pm))
}
// should not handle hostports for hostnetwork pods
if pm.HostNetwork {
continue
}
for _, port := range pm.PortMappings {
if port.HostPort != 0 {
podHostportMap[port] = targetPod{podFullName: getPodFullName(pm), podIP: pm.IP.String()}
}
}
}
return podHostportMap, nil
}
// Join all words with spaces, terminate with newline and write to buf.
func writeLine(buf *bytes.Buffer, words ...string) {
buf.WriteString(strings.Join(words, " ") + "\n")
}
//hostportChainName takes containerPort for a pod and returns associated iptables chain.
// This is computed by hashing (sha256)
// then encoding to base32 and truncating with the prefix "KUBE-SVC-". We do
// this because IPTables Chain Names must be <= 28 chars long, and the longer
// they are the harder they are to read.
func hostportChainName(pm *PortMapping, podFullName string) utiliptables.Chain {
hash := sha256.Sum256([]byte(strconv.Itoa(int(pm.HostPort)) + string(pm.Protocol) + podFullName))
encoded := base32.StdEncoding.EncodeToString(hash[:])
return utiliptables.Chain(kubeHostportChainPrefix + encoded[:16])
}
// OpenPodHostportsAndSync opens hostports for a new PodPortMapping, gathers all hostports on
// node, sets up iptables rules enable them. And finally clean up stale hostports.
// 'newPortMapping' must also be present in 'activePodPortMappings'.
func (h *hostportSyncer) OpenPodHostportsAndSync(newPortMapping *PodPortMapping, natInterfaceName string, activePodPortMappings []*PodPortMapping) error {
// try to open pod host port if specified
if err := h.openHostports(newPortMapping); err != nil {
return err
}
// Add the new pod to active pods if it's not present.
var found bool
for _, pm := range activePodPortMappings {
if pm.Namespace == newPortMapping.Namespace && pm.Name == newPortMapping.Name {
found = true
break
}
}
if !found {
activePodPortMappings = append(activePodPortMappings, newPortMapping)
}
return h.SyncHostports(natInterfaceName, activePodPortMappings)
}
// SyncHostports gathers all hostports on node and setup iptables rules enable them. And finally clean up stale hostports
func (h *hostportSyncer) SyncHostports(natInterfaceName string, activePodPortMappings []*PodPortMapping) error {
start := time.Now()
defer func() {
glog.V(4).Infof("syncHostportsRules took %v", time.Since(start))
}()
hostportPodMap, err := gatherAllHostports(activePodPortMappings)
if err != nil {
return err
}
// Ensure KUBE-HOSTPORTS chains
ensureKubeHostportChains(h.iptables, natInterfaceName)
// Get iptables-save output so we can check for existing chains and rules.
// This will be a map of chain name to chain with rules as stored in iptables-save/iptables-restore
existingNATChains := make(map[utiliptables.Chain]string)
iptablesData := bytes.NewBuffer(nil)
err = h.iptables.SaveInto(utiliptables.TableNAT, iptablesData)
if err != nil { // if we failed to get any rules
glog.Errorf("Failed to execute iptables-save, syncing all rules: %v", err)
} else { // otherwise parse the output
existingNATChains = utiliptables.GetChainLines(utiliptables.TableNAT, iptablesData.Bytes())
}
natChains := bytes.NewBuffer(nil)
natRules := bytes.NewBuffer(nil)
writeLine(natChains, "*nat")
// Make sure we keep stats for the top-level chains, if they existed
// (which most should have because we created them above).
if chain, ok := existingNATChains[kubeHostportsChain]; ok {
writeLine(natChains, chain)
} else {
writeLine(natChains, utiliptables.MakeChainLine(kubeHostportsChain))
}
// Accumulate NAT chains to keep.
activeNATChains := map[utiliptables.Chain]bool{} // use a map as a set
for port, target := range hostportPodMap {
protocol := strings.ToLower(string(port.Protocol))
hostportChain := hostportChainName(port, target.podFullName)
if chain, ok := existingNATChains[hostportChain]; ok {
writeLine(natChains, chain)
} else {
writeLine(natChains, utiliptables.MakeChainLine(hostportChain))
}
activeNATChains[hostportChain] = true
// Redirect to hostport chain
args := []string{
"-A", string(kubeHostportsChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, target.podFullName, port.HostPort),
"-m", protocol, "-p", protocol,
"--dport", fmt.Sprintf("%d", port.HostPort),
"-j", string(hostportChain),
}
writeLine(natRules, args...)
// Assuming kubelet is syncing iptables KUBE-MARK-MASQ chain
// If the request comes from the pod that is serving the hostport, then SNAT
args = []string{
"-A", string(hostportChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, target.podFullName, port.HostPort),
"-s", target.podIP, "-j", string(iptablesproxy.KubeMarkMasqChain),
}
writeLine(natRules, args...)
// Create hostport chain to DNAT traffic to final destination
// IPTables will maintained the stats for this chain
args = []string{
"-A", string(hostportChain),
"-m", "comment", "--comment", fmt.Sprintf(`"%s hostport %d"`, target.podFullName, port.HostPort),
"-m", protocol, "-p", protocol,
"-j", "DNAT", fmt.Sprintf("--to-destination=%s:%d", target.podIP, port.ContainerPort),
}
writeLine(natRules, args...)
}
// Delete chains no longer in use.
for chain := range existingNATChains {
if !activeNATChains[chain] {
chainString := string(chain)
if !strings.HasPrefix(chainString, kubeHostportChainPrefix) {
// Ignore chains that aren't ours.
continue
}
// We must (as per iptables) write a chain-line for it, which has
// the nice effect of flushing the chain. Then we can remove the
// chain.
writeLine(natChains, existingNATChains[chain])
writeLine(natRules, "-X", chainString)
}
}
writeLine(natRules, "COMMIT")
natLines := append(natChains.Bytes(), natRules.Bytes()...)
glog.V(3).Infof("Restoring iptables rules: %s", natLines)
err = h.iptables.RestoreAll(natLines, utiliptables.NoFlushTables, utiliptables.RestoreCounters)
if err != nil {
return fmt.Errorf("Failed to execute iptables-restore: %v", err)
}
h.cleanupHostportMap(hostportPodMap)
return nil
}
// cleanupHostportMap closes obsolete hostports
func (h *hostportSyncer) cleanupHostportMap(containerPortMap map[*PortMapping]targetPod) {
// compute hostports that are supposed to be open
currentHostports := make(map[hostport]bool)
for containerPort := range containerPortMap {
hp := hostport{
port: containerPort.HostPort,
protocol: strings.ToLower(string(containerPort.Protocol)),
}
currentHostports[hp] = true
}
// close and delete obsolete hostports
for hp, socket := range h.hostPortMap {
if _, ok := currentHostports[hp]; !ok {
socket.Close()
glog.V(3).Infof("Closed local port %s", hp.String())
delete(h.hostPortMap, hp)
}
}
}

View File

@ -1,312 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hostport
import (
"net"
"reflect"
"strings"
"testing"
"github.com/stretchr/testify/assert"
"k8s.io/api/core/v1"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
)
type ruleMatch struct {
hostport int
chain string
match string
}
func TestOpenPodHostports(t *testing.T) {
fakeIPTables := NewFakeIPTables()
fakeOpener := NewFakeSocketManager()
h := &hostportSyncer{
hostPortMap: make(map[hostport]closeable),
iptables: fakeIPTables,
portOpener: fakeOpener.openFakeSocket,
}
tests := []struct {
mapping *PodPortMapping
matches []*ruleMatch
}{
// New pod that we are going to add
{
&PodPortMapping{
Name: "test-pod",
Namespace: v1.NamespaceDefault,
IP: net.ParseIP("10.1.1.2"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
HostPort: 4567,
ContainerPort: 80,
Protocol: v1.ProtocolTCP,
},
{
HostPort: 5678,
ContainerPort: 81,
Protocol: v1.ProtocolUDP,
},
},
},
[]*ruleMatch{
{
-1,
"KUBE-HOSTPORTS",
"-m comment --comment \"test-pod_default hostport 4567\" -m tcp -p tcp --dport 4567",
},
{
4567,
"",
"-m comment --comment \"test-pod_default hostport 4567\" -s 10.1.1.2/32 -j KUBE-MARK-MASQ",
},
{
4567,
"",
"-m comment --comment \"test-pod_default hostport 4567\" -m tcp -p tcp -j DNAT --to-destination 10.1.1.2:80",
},
{
-1,
"KUBE-HOSTPORTS",
"-m comment --comment \"test-pod_default hostport 5678\" -m udp -p udp --dport 5678",
},
{
5678,
"",
"-m comment --comment \"test-pod_default hostport 5678\" -s 10.1.1.2/32 -j KUBE-MARK-MASQ",
},
{
5678,
"",
"-m comment --comment \"test-pod_default hostport 5678\" -m udp -p udp -j DNAT --to-destination 10.1.1.2:81",
},
},
},
// Already running pod
{
&PodPortMapping{
Name: "another-test-pod",
Namespace: v1.NamespaceDefault,
IP: net.ParseIP("10.1.1.5"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
HostPort: 123,
ContainerPort: 654,
Protocol: v1.ProtocolTCP,
},
},
},
[]*ruleMatch{
{
-1,
"KUBE-HOSTPORTS",
"-m comment --comment \"another-test-pod_default hostport 123\" -m tcp -p tcp --dport 123",
},
{
123,
"",
"-m comment --comment \"another-test-pod_default hostport 123\" -s 10.1.1.5/32 -j KUBE-MARK-MASQ",
},
{
123,
"",
"-m comment --comment \"another-test-pod_default hostport 123\" -m tcp -p tcp -j DNAT --to-destination 10.1.1.5:654",
},
},
},
}
activePodPortMapping := make([]*PodPortMapping, 0)
// Fill in any match rules missing chain names
for _, test := range tests {
for _, match := range test.matches {
if match.hostport >= 0 {
found := false
for _, pm := range test.mapping.PortMappings {
if int(pm.HostPort) == match.hostport {
match.chain = string(hostportChainName(pm, getPodFullName(test.mapping)))
found = true
break
}
}
if !found {
t.Fatalf("Failed to find ContainerPort for match %d/'%s'", match.hostport, match.match)
}
}
}
activePodPortMapping = append(activePodPortMapping, test.mapping)
}
// Already running pod's host port
hp := hostport{
tests[1].mapping.PortMappings[0].HostPort,
strings.ToLower(string(tests[1].mapping.PortMappings[0].Protocol)),
}
h.hostPortMap[hp] = &fakeSocket{
tests[1].mapping.PortMappings[0].HostPort,
strings.ToLower(string(tests[1].mapping.PortMappings[0].Protocol)),
false,
}
err := h.OpenPodHostportsAndSync(tests[0].mapping, "br0", activePodPortMapping)
if err != nil {
t.Fatalf("Failed to OpenPodHostportsAndSync: %v", err)
}
// Generic rules
genericRules := []*ruleMatch{
{-1, "POSTROUTING", "-m comment --comment \"SNAT for localhost access to hostports\" -o br0 -s 127.0.0.0/8 -j MASQUERADE"},
{-1, "PREROUTING", "-m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS"},
{-1, "OUTPUT", "-m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS"},
}
for _, rule := range genericRules {
_, chain, err := fakeIPTables.getChain(utiliptables.TableNAT, utiliptables.Chain(rule.chain))
if err != nil {
t.Fatalf("Expected NAT chain %s did not exist", rule.chain)
}
if !matchRule(chain, rule.match) {
t.Fatalf("Expected %s chain rule match '%s' not found", rule.chain, rule.match)
}
}
// Pod rules
for _, test := range tests {
for _, match := range test.matches {
// Ensure chain exists
_, chain, err := fakeIPTables.getChain(utiliptables.TableNAT, utiliptables.Chain(match.chain))
if err != nil {
t.Fatalf("Expected NAT chain %s did not exist", match.chain)
}
if !matchRule(chain, match.match) {
t.Fatalf("Expected NAT chain %s rule containing '%s' not found", match.chain, match.match)
}
}
}
// Socket
hostPortMap := map[hostport]closeable{
{123, "tcp"}: &fakeSocket{123, "tcp", false},
{4567, "tcp"}: &fakeSocket{4567, "tcp", false},
{5678, "udp"}: &fakeSocket{5678, "udp", false},
}
if !reflect.DeepEqual(hostPortMap, h.hostPortMap) {
t.Fatalf("Mismatch in expected hostPortMap. Expected '%v', got '%v'", hostPortMap, h.hostPortMap)
}
}
func matchRule(chain *fakeChain, match string) bool {
for _, rule := range chain.rules {
if strings.Contains(rule, match) {
return true
}
}
return false
}
func TestHostportChainName(t *testing.T) {
m := make(map[string]int)
chain := hostportChainName(&PortMapping{HostPort: 57119, Protocol: "TCP", ContainerPort: 57119}, "testrdma-2")
m[string(chain)] = 1
chain = hostportChainName(&PortMapping{HostPort: 55429, Protocol: "TCP", ContainerPort: 55429}, "testrdma-2")
m[string(chain)] = 1
chain = hostportChainName(&PortMapping{HostPort: 56833, Protocol: "TCP", ContainerPort: 56833}, "testrdma-2")
m[string(chain)] = 1
if len(m) != 3 {
t.Fatal(m)
}
}
func TestHostPortSyncerRemoveLegacyRules(t *testing.T) {
iptables := NewFakeIPTables()
legacyRules := [][]string{
{"-A", "KUBE-HOSTPORTS", "-m comment --comment \"pod3_ns1 hostport 8443\" -m tcp -p tcp --dport 8443 -j KUBE-HP-5N7UH5JAXCVP5UJR"},
{"-A", "KUBE-HOSTPORTS", "-m comment --comment \"pod1_ns1 hostport 8081\" -m udp -p udp --dport 8081 -j KUBE-HP-7THKRFSEH4GIIXK7"},
{"-A", "KUBE-HOSTPORTS", "-m comment --comment \"pod1_ns1 hostport 8080\" -m tcp -p tcp --dport 8080 -j KUBE-HP-4YVONL46AKYWSKS3"},
{"-A", "OUTPUT", "-m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS"},
{"-A", "PREROUTING", "-m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS"},
{"-A", "POSTROUTING", "-m comment --comment \"SNAT for localhost access to hostports\" -o cbr0 -s 127.0.0.0/8 -j MASQUERADE"},
{"-A", "KUBE-HP-4YVONL46AKYWSKS3", "-m comment --comment \"pod1_ns1 hostport 8080\" -s 10.1.1.2/32 -j KUBE-MARK-MASQ"},
{"-A", "KUBE-HP-4YVONL46AKYWSKS3", "-m comment --comment \"pod1_ns1 hostport 8080\" -m tcp -p tcp -j DNAT --to-destination 10.1.1.2:80"},
{"-A", "KUBE-HP-7THKRFSEH4GIIXK7", "-m comment --comment \"pod1_ns1 hostport 8081\" -s 10.1.1.2/32 -j KUBE-MARK-MASQ"},
{"-A", "KUBE-HP-7THKRFSEH4GIIXK7", "-m comment --comment \"pod1_ns1 hostport 8081\" -m udp -p udp -j DNAT --to-destination 10.1.1.2:81"},
{"-A", "KUBE-HP-5N7UH5JAXCVP5UJR", "-m comment --comment \"pod3_ns1 hostport 8443\" -s 10.1.1.4/32 -j KUBE-MARK-MASQ"},
{"-A", "KUBE-HP-5N7UH5JAXCVP5UJR", "-m comment --comment \"pod3_ns1 hostport 8443\" -m tcp -p tcp -j DNAT --to-destination 10.1.1.4:443"},
}
for _, rule := range legacyRules {
_, err := iptables.EnsureChain(utiliptables.TableNAT, utiliptables.Chain(rule[1]))
assert.NoError(t, err)
_, err = iptables.ensureRule(utiliptables.RulePosition(rule[0]), utiliptables.TableNAT, utiliptables.Chain(rule[1]), rule[2])
assert.NoError(t, err)
}
portOpener := NewFakeSocketManager()
h := &hostportSyncer{
hostPortMap: make(map[hostport]closeable),
iptables: iptables,
portOpener: portOpener.openFakeSocket,
}
// check preserve pod3's rules and remove pod1's rules
pod3PortMapping := &PodPortMapping{
Name: "pod3",
Namespace: "ns1",
IP: net.ParseIP("10.1.1.4"),
HostNetwork: false,
PortMappings: []*PortMapping{
{
HostPort: 8443,
ContainerPort: 443,
Protocol: v1.ProtocolTCP,
},
},
}
h.SyncHostports("cbr0", []*PodPortMapping{pod3PortMapping})
newChainName := string(hostportChainName(pod3PortMapping.PortMappings[0], getPodFullName(pod3PortMapping)))
expectRules := [][]string{
{"KUBE-HOSTPORTS", "-m comment --comment \"pod3_ns1 hostport 8443\" -m tcp -p tcp --dport 8443 -j " + newChainName},
{newChainName, "-m comment --comment \"pod3_ns1 hostport 8443\" -s 10.1.1.4/32 -j KUBE-MARK-MASQ"},
{newChainName, "-m comment --comment \"pod3_ns1 hostport 8443\" -m tcp -p tcp -j DNAT --to-destination 10.1.1.4:443"},
}
natTable, ok := iptables.tables[string(utiliptables.TableNAT)]
assert.True(t, ok)
// check pod1's rules in KUBE-HOSTPORTS chain should be cleaned up
hostportChain, ok := natTable.chains["KUBE-HOSTPORTS"]
assert.True(t, ok, string(hostportChain.name))
assert.Equal(t, 1, len(hostportChain.rules), "%v", hostportChain.rules)
// check pod3's rules left
assert.Equal(t, expectRules[0][1], hostportChain.rules[0])
chain, ok := natTable.chains[newChainName]
assert.True(t, ok)
assert.Equal(t, 2, len(chain.rules))
assert.Equal(t, expectRules[1][1], chain.rules[0])
assert.Equal(t, expectRules[2][1], chain.rules[1])
// check legacy KUBE-HP-* chains should be deleted
for _, name := range []string{"KUBE-HP-4YVONL46AKYWSKS3", "KUBE-HP-7THKRFSEH4GIIXK7", "KUBE-HP-5N7UH5JAXCVP5UJR"} {
_, ok := natTable.chains[name]
assert.False(t, ok)
}
}

View File

@ -1,82 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package hostport
import (
"fmt"
"testing"
"github.com/stretchr/testify/assert"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
)
type fakeSocket struct {
port int32
protocol string
closed bool
}
func (f *fakeSocket) Close() error {
if f.closed {
return fmt.Errorf("Socket %q.%s already closed!", f.port, f.protocol)
}
f.closed = true
return nil
}
func NewFakeSocketManager() *fakeSocketManager {
return &fakeSocketManager{mem: make(map[hostport]*fakeSocket)}
}
type fakeSocketManager struct {
mem map[hostport]*fakeSocket
}
func (f *fakeSocketManager) openFakeSocket(hp *hostport) (closeable, error) {
if socket, ok := f.mem[*hp]; ok && !socket.closed {
return nil, fmt.Errorf("hostport is occupied")
}
fs := &fakeSocket{hp.port, hp.protocol, false}
f.mem[*hp] = fs
return fs, nil
}
func TestEnsureKubeHostportChains(t *testing.T) {
interfaceName := "cbr0"
builtinChains := []string{"PREROUTING", "OUTPUT"}
jumpRule := "-m comment --comment \"kube hostport portals\" -m addrtype --dst-type LOCAL -j KUBE-HOSTPORTS"
masqRule := "-m comment --comment \"SNAT for localhost access to hostports\" -o cbr0 -s 127.0.0.0/8 -j MASQUERADE"
fakeIPTables := NewFakeIPTables()
assert.NoError(t, ensureKubeHostportChains(fakeIPTables, interfaceName))
_, _, err := fakeIPTables.getChain(utiliptables.TableNAT, utiliptables.Chain("KUBE-HOSTPORTS"))
assert.NoError(t, err)
_, chain, err := fakeIPTables.getChain(utiliptables.TableNAT, utiliptables.ChainPostrouting)
assert.NoError(t, err)
assert.EqualValues(t, len(chain.rules), 1)
assert.Contains(t, chain.rules[0], masqRule)
for _, chainName := range builtinChains {
_, chain, err := fakeIPTables.getChain(utiliptables.TableNAT, utiliptables.Chain(chainName))
assert.NoError(t, err)
assert.EqualValues(t, len(chain.rules), 1)
assert.Contains(t, chain.rules[0], jumpRule)
}
}

View File

@ -1,26 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["fake.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/testing",
deps = ["//pkg/kubelet/dockershim/network/hostport:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,43 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"fmt"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport"
)
type fakeSyncer struct{}
func NewFakeHostportSyncer() hostport.HostportSyncer {
return &fakeSyncer{}
}
func (h *fakeSyncer) OpenPodHostportsAndSync(newPortMapping *hostport.PodPortMapping, natInterfaceName string, activePortMapping []*hostport.PodPortMapping) error {
return h.SyncHostports(natInterfaceName, activePortMapping)
}
func (h *fakeSyncer) SyncHostports(natInterfaceName string, activePortMapping []*hostport.PodPortMapping) error {
for _, r := range activePortMapping {
if r.IP.To4() == nil {
return fmt.Errorf("Invalid or missing pod %s/%s IP", r.Namespace, r.Name)
}
}
return nil
}

View File

@ -1,166 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"kubenet.go",
] + select({
"@io_bazel_rules_go//go/platform:android": [
"kubenet_unsupported.go",
],
"@io_bazel_rules_go//go/platform:darwin": [
"kubenet_unsupported.go",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"kubenet_unsupported.go",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"kubenet_unsupported.go",
],
"@io_bazel_rules_go//go/platform:linux": [
"kubenet_linux.go",
],
"@io_bazel_rules_go//go/platform:nacl": [
"kubenet_unsupported.go",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"kubenet_unsupported.go",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"kubenet_unsupported.go",
],
"@io_bazel_rules_go//go/platform:plan9": [
"kubenet_unsupported.go",
],
"@io_bazel_rules_go//go/platform:solaris": [
"kubenet_unsupported.go",
],
"@io_bazel_rules_go//go/platform:windows": [
"kubenet_unsupported.go",
],
"//conditions:default": [],
}),
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet",
deps = select({
"@io_bazel_rules_go//go/platform:android": [
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
],
"@io_bazel_rules_go//go/platform:darwin": [
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
],
"@io_bazel_rules_go//go/platform:dragonfly": [
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
],
"@io_bazel_rules_go//go/platform:freebsd": [
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
],
"@io_bazel_rules_go//go/platform:linux": [
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
"//pkg/kubelet/dockershim/network/hostport:go_default_library",
"//pkg/util/bandwidth:go_default_library",
"//pkg/util/dbus:go_default_library",
"//pkg/util/ebtables:go_default_library",
"//pkg/util/iptables:go_default_library",
"//pkg/util/sysctl:go_default_library",
"//vendor/github.com/containernetworking/cni/libcni:go_default_library",
"//vendor/github.com/containernetworking/cni/pkg/types:go_default_library",
"//vendor/github.com/containernetworking/cni/pkg/types/020:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/github.com/vishvananda/netlink:go_default_library",
"//vendor/golang.org/x/sys/unix:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
"@io_bazel_rules_go//go/platform:nacl": [
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
],
"@io_bazel_rules_go//go/platform:netbsd": [
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
],
"@io_bazel_rules_go//go/platform:openbsd": [
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
],
"@io_bazel_rules_go//go/platform:plan9": [
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
],
"@io_bazel_rules_go//go/platform:solaris": [
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
],
"@io_bazel_rules_go//go/platform:windows": [
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
],
"//conditions:default": [],
}),
)
go_test(
name = "go_default_test",
srcs = select({
"@io_bazel_rules_go//go/platform:linux": [
"kubenet_linux_test.go",
],
"//conditions:default": [],
}),
embed = [":go_default_library"],
deps = select({
"@io_bazel_rules_go//go/platform:linux": [
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
"//pkg/kubelet/dockershim/network/cni/testing:go_default_library",
"//pkg/kubelet/dockershim/network/hostport/testing:go_default_library",
"//pkg/kubelet/dockershim/network/testing:go_default_library",
"//pkg/util/bandwidth:go_default_library",
"//pkg/util/iptables/testing:go_default_library",
"//pkg/util/sysctl/testing:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/github.com/stretchr/testify/mock:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
"//vendor/k8s.io/utils/exec/testing:go_default_library",
],
"//conditions:default": [],
}),
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,21 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubenet
const (
KubenetPluginName = "kubenet"
)

View File

@ -1,688 +0,0 @@
// +build linux
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubenet
import (
"fmt"
"io/ioutil"
"net"
"strings"
"sync"
"time"
"github.com/containernetworking/cni/libcni"
cnitypes "github.com/containernetworking/cni/pkg/types"
cnitypes020 "github.com/containernetworking/cni/pkg/types/020"
"github.com/golang/glog"
"github.com/vishvananda/netlink"
"golang.org/x/sys/unix"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilnet "k8s.io/apimachinery/pkg/util/net"
utilsets "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport"
"k8s.io/kubernetes/pkg/util/bandwidth"
utildbus "k8s.io/kubernetes/pkg/util/dbus"
utilebtables "k8s.io/kubernetes/pkg/util/ebtables"
utiliptables "k8s.io/kubernetes/pkg/util/iptables"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
utilexec "k8s.io/utils/exec"
)
const (
BridgeName = "cbr0"
DefaultCNIDir = "/opt/cni/bin"
sysctlBridgeCallIPTables = "net/bridge/bridge-nf-call-iptables"
// fallbackMTU is used if an MTU is not specified, and we cannot determine the MTU
fallbackMTU = 1460
// ebtables Chain to store dedup rules
dedupChain = utilebtables.Chain("KUBE-DEDUP")
// defaultIPAMDir is the default location for the checkpoint files stored by host-local ipam
// https://github.com/containernetworking/cni/tree/master/plugins/ipam/host-local#backends
defaultIPAMDir = "/var/lib/cni/networks"
)
// CNI plugins required by kubenet in /opt/cni/bin or user-specified directory
var requiredCNIPlugins = [...]string{"bridge", "host-local", "loopback"}
type kubenetNetworkPlugin struct {
network.NoopNetworkPlugin
host network.Host
netConfig *libcni.NetworkConfig
loConfig *libcni.NetworkConfig
cniConfig libcni.CNI
bandwidthShaper bandwidth.BandwidthShaper
mu sync.Mutex //Mutex for protecting podIPs map, netConfig, and shaper initialization
podIPs map[kubecontainer.ContainerID]string
mtu int
execer utilexec.Interface
nsenterPath string
hairpinMode kubeletconfig.HairpinMode
// kubenet can use either hostportSyncer and hostportManager to implement hostports
// Currently, if network host supports legacy features, hostportSyncer will be used,
// otherwise, hostportManager will be used.
hostportSyncer hostport.HostportSyncer
hostportManager hostport.HostPortManager
iptables utiliptables.Interface
sysctl utilsysctl.Interface
ebtables utilebtables.Interface
// binDirs is passed by kubelet cni-bin-dir parameter.
// kubenet will search for CNI binaries in DefaultCNIDir first, then continue to binDirs.
binDirs []string
nonMasqueradeCIDR string
podCidr string
gateway net.IP
}
func NewPlugin(networkPluginDirs []string) network.NetworkPlugin {
protocol := utiliptables.ProtocolIpv4
execer := utilexec.New()
dbus := utildbus.New()
sysctl := utilsysctl.New()
iptInterface := utiliptables.New(execer, dbus, protocol)
return &kubenetNetworkPlugin{
podIPs: make(map[kubecontainer.ContainerID]string),
execer: utilexec.New(),
iptables: iptInterface,
sysctl: sysctl,
binDirs: append([]string{DefaultCNIDir}, networkPluginDirs...),
hostportSyncer: hostport.NewHostportSyncer(iptInterface),
hostportManager: hostport.NewHostportManager(iptInterface),
nonMasqueradeCIDR: "10.0.0.0/8",
}
}
func (plugin *kubenetNetworkPlugin) Init(host network.Host, hairpinMode kubeletconfig.HairpinMode, nonMasqueradeCIDR string, mtu int) error {
plugin.host = host
plugin.hairpinMode = hairpinMode
plugin.nonMasqueradeCIDR = nonMasqueradeCIDR
plugin.cniConfig = &libcni.CNIConfig{Path: plugin.binDirs}
if mtu == network.UseDefaultMTU {
if link, err := findMinMTU(); err == nil {
plugin.mtu = link.MTU
glog.V(5).Infof("Using interface %s MTU %d as bridge MTU", link.Name, link.MTU)
} else {
plugin.mtu = fallbackMTU
glog.Warningf("Failed to find default bridge MTU, using %d: %v", fallbackMTU, err)
}
} else {
plugin.mtu = mtu
}
// Since this plugin uses a Linux bridge, set bridge-nf-call-iptables=1
// is necessary to ensure kube-proxy functions correctly.
//
// This will return an error on older kernel version (< 3.18) as the module
// was built-in, we simply ignore the error here. A better thing to do is
// to check the kernel version in the future.
plugin.execer.Command("modprobe", "br-netfilter").CombinedOutput()
err := plugin.sysctl.SetSysctl(sysctlBridgeCallIPTables, 1)
if err != nil {
glog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIPTables, err)
}
plugin.loConfig, err = libcni.ConfFromBytes([]byte(`{
"cniVersion": "0.1.0",
"name": "kubenet-loopback",
"type": "loopback"
}`))
if err != nil {
return fmt.Errorf("Failed to generate loopback config: %v", err)
}
plugin.nsenterPath, err = plugin.execer.LookPath("nsenter")
if err != nil {
return fmt.Errorf("Failed to find nsenter binary: %v", err)
}
// Need to SNAT outbound traffic from cluster
if err = plugin.ensureMasqRule(); err != nil {
return err
}
return nil
}
// TODO: move thic logic into cni bridge plugin and remove this from kubenet
func (plugin *kubenetNetworkPlugin) ensureMasqRule() error {
if plugin.nonMasqueradeCIDR != "0.0.0.0/0" {
if _, err := plugin.iptables.EnsureRule(utiliptables.Append, utiliptables.TableNAT, utiliptables.ChainPostrouting,
"-m", "comment", "--comment", "kubenet: SNAT for outbound traffic from cluster",
"-m", "addrtype", "!", "--dst-type", "LOCAL",
"!", "-d", plugin.nonMasqueradeCIDR,
"-j", "MASQUERADE"); err != nil {
return fmt.Errorf("Failed to ensure that %s chain %s jumps to MASQUERADE: %v", utiliptables.TableNAT, utiliptables.ChainPostrouting, err)
}
}
return nil
}
func findMinMTU() (*net.Interface, error) {
intfs, err := net.Interfaces()
if err != nil {
return nil, err
}
mtu := 999999
defIntfIndex := -1
for i, intf := range intfs {
if ((intf.Flags & net.FlagUp) != 0) && (intf.Flags&(net.FlagLoopback|net.FlagPointToPoint) == 0) {
if intf.MTU < mtu {
mtu = intf.MTU
defIntfIndex = i
}
}
}
if mtu >= 999999 || mtu < 576 || defIntfIndex < 0 {
return nil, fmt.Errorf("no suitable interface: %v", BridgeName)
}
return &intfs[defIntfIndex], nil
}
const NET_CONFIG_TEMPLATE = `{
"cniVersion": "0.1.0",
"name": "kubenet",
"type": "bridge",
"bridge": "%s",
"mtu": %d,
"addIf": "%s",
"isGateway": true,
"ipMasq": false,
"hairpinMode": %t,
"ipam": {
"type": "host-local",
"subnet": "%s",
"gateway": "%s",
"routes": [
{ "dst": "0.0.0.0/0" }
]
}
}`
func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interface{}) {
if name != network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE {
return
}
plugin.mu.Lock()
defer plugin.mu.Unlock()
podCIDR, ok := details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR].(string)
if !ok {
glog.Warningf("%s event didn't contain pod CIDR", network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE)
return
}
if plugin.netConfig != nil {
glog.Warningf("Ignoring subsequent pod CIDR update to %s", podCIDR)
return
}
glog.V(5).Infof("PodCIDR is set to %q", podCIDR)
_, cidr, err := net.ParseCIDR(podCIDR)
if err == nil {
setHairpin := plugin.hairpinMode == kubeletconfig.HairpinVeth
// Set bridge address to first address in IPNet
cidr.IP[len(cidr.IP)-1] += 1
json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.mtu, network.DefaultInterfaceName, setHairpin, podCIDR, cidr.IP.String())
glog.V(2).Infof("CNI network config set to %v", json)
plugin.netConfig, err = libcni.ConfFromBytes([]byte(json))
if err == nil {
glog.V(5).Infof("CNI network config:\n%s", json)
// Ensure cbr0 has no conflicting addresses; CNI's 'bridge'
// plugin will bail out if the bridge has an unexpected one
plugin.clearBridgeAddressesExcept(cidr)
}
plugin.podCidr = podCIDR
plugin.gateway = cidr.IP
}
if err != nil {
glog.Warningf("Failed to generate CNI network config: %v", err)
}
}
func (plugin *kubenetNetworkPlugin) clearBridgeAddressesExcept(keep *net.IPNet) {
bridge, err := netlink.LinkByName(BridgeName)
if err != nil {
return
}
addrs, err := netlink.AddrList(bridge, unix.AF_INET)
if err != nil {
return
}
for _, addr := range addrs {
if !utilnet.IPNetEqual(addr.IPNet, keep) {
glog.V(2).Infof("Removing old address %s from %s", addr.IPNet.String(), BridgeName)
netlink.AddrDel(bridge, &addr)
}
}
}
func (plugin *kubenetNetworkPlugin) Name() string {
return KubenetPluginName
}
func (plugin *kubenetNetworkPlugin) Capabilities() utilsets.Int {
return utilsets.NewInt()
}
// setup sets up networking through CNI using the given ns/name and sandbox ID.
func (plugin *kubenetNetworkPlugin) setup(namespace string, name string, id kubecontainer.ContainerID, annotations map[string]string) error {
// Disable DAD so we skip the kernel delay on bringing up new interfaces.
if err := plugin.disableContainerDAD(id); err != nil {
glog.V(3).Infof("Failed to disable DAD in container: %v", err)
}
// Bring up container loopback interface
if _, err := plugin.addContainerToNetwork(plugin.loConfig, "lo", namespace, name, id); err != nil {
return err
}
// Hook container up with our bridge
resT, err := plugin.addContainerToNetwork(plugin.netConfig, network.DefaultInterfaceName, namespace, name, id)
if err != nil {
return err
}
// Coerce the CNI result version
res, err := cnitypes020.GetResult(resT)
if err != nil {
return fmt.Errorf("unable to understand network config: %v", err)
}
if res.IP4 == nil {
return fmt.Errorf("CNI plugin reported no IPv4 address for container %v.", id)
}
ip4 := res.IP4.IP.IP.To4()
if ip4 == nil {
return fmt.Errorf("CNI plugin reported an invalid IPv4 address for container %v: %+v.", id, res.IP4)
}
// Put the container bridge into promiscuous mode to force it to accept hairpin packets.
// TODO: Remove this once the kernel bug (#20096) is fixed.
if plugin.hairpinMode == kubeletconfig.PromiscuousBridge {
link, err := netlink.LinkByName(BridgeName)
if err != nil {
return fmt.Errorf("failed to lookup %q: %v", BridgeName, err)
}
if link.Attrs().Promisc != 1 {
// promiscuous mode is not on, then turn it on.
err := netlink.SetPromiscOn(link)
if err != nil {
return fmt.Errorf("Error setting promiscuous mode on %s: %v", BridgeName, err)
}
}
// configure the ebtables rules to eliminate duplicate packets by best effort
plugin.syncEbtablesDedupRules(link.Attrs().HardwareAddr)
}
plugin.podIPs[id] = ip4.String()
// The first SetUpPod call creates the bridge; get a shaper for the sake of initialization
// TODO: replace with CNI traffic shaper plugin
shaper := plugin.shaper()
ingress, egress, err := bandwidth.ExtractPodBandwidthResources(annotations)
if err != nil {
return fmt.Errorf("Error reading pod bandwidth annotations: %v", err)
}
if egress != nil || ingress != nil {
if err := shaper.ReconcileCIDR(fmt.Sprintf("%s/32", ip4.String()), egress, ingress); err != nil {
return fmt.Errorf("Failed to add pod to shaper: %v", err)
}
}
// TODO: replace with CNI port-forwarding plugin
portMappings, err := plugin.host.GetPodPortMappings(id.ID)
if err != nil {
return err
}
if portMappings != nil && len(portMappings) > 0 {
if err := plugin.hostportManager.Add(id.ID, &hostport.PodPortMapping{
Namespace: namespace,
Name: name,
PortMappings: portMappings,
IP: ip4,
HostNetwork: false,
}, BridgeName); err != nil {
return err
}
}
return nil
}
func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID, annotations map[string]string) error {
plugin.mu.Lock()
defer plugin.mu.Unlock()
start := time.Now()
defer func() {
glog.V(4).Infof("SetUpPod took %v for %s/%s", time.Since(start), namespace, name)
}()
if err := plugin.Status(); err != nil {
return fmt.Errorf("Kubenet cannot SetUpPod: %v", err)
}
if err := plugin.setup(namespace, name, id, annotations); err != nil {
// Make sure everything gets cleaned up on errors
podIP, _ := plugin.podIPs[id]
if err := plugin.teardown(namespace, name, id, podIP); err != nil {
// Not a hard error or warning
glog.V(4).Infof("Failed to clean up %s/%s after SetUpPod failure: %v", namespace, name, err)
}
return err
}
// Need to SNAT outbound traffic from cluster
if err := plugin.ensureMasqRule(); err != nil {
glog.Errorf("Failed to ensure MASQ rule: %v", err)
}
return nil
}
// Tears down as much of a pod's network as it can even if errors occur. Returns
// an aggregate error composed of all errors encountered during the teardown.
func (plugin *kubenetNetworkPlugin) teardown(namespace string, name string, id kubecontainer.ContainerID, podIP string) error {
errList := []error{}
if podIP != "" {
glog.V(5).Infof("Removing pod IP %s from shaper", podIP)
// shaper wants /32
if err := plugin.shaper().Reset(fmt.Sprintf("%s/32", podIP)); err != nil {
// Possible bandwidth shaping wasn't enabled for this pod anyways
glog.V(4).Infof("Failed to remove pod IP %s from shaper: %v", podIP, err)
}
delete(plugin.podIPs, id)
}
if err := plugin.delContainerFromNetwork(plugin.netConfig, network.DefaultInterfaceName, namespace, name, id); err != nil {
// This is to prevent returning error when TearDownPod is called twice on the same pod. This helps to reduce event pollution.
if podIP != "" {
glog.Warningf("Failed to delete container from kubenet: %v", err)
} else {
errList = append(errList, err)
}
}
portMappings, err := plugin.host.GetPodPortMappings(id.ID)
if err != nil {
errList = append(errList, err)
} else if portMappings != nil && len(portMappings) > 0 {
if err = plugin.hostportManager.Remove(id.ID, &hostport.PodPortMapping{
Namespace: namespace,
Name: name,
PortMappings: portMappings,
HostNetwork: false,
}); err != nil {
errList = append(errList, err)
}
}
return utilerrors.NewAggregate(errList)
}
func (plugin *kubenetNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.ContainerID) error {
plugin.mu.Lock()
defer plugin.mu.Unlock()
start := time.Now()
defer func() {
glog.V(4).Infof("TearDownPod took %v for %s/%s", time.Since(start), namespace, name)
}()
if plugin.netConfig == nil {
return fmt.Errorf("Kubenet needs a PodCIDR to tear down pods")
}
// no cached IP is Ok during teardown
podIP, _ := plugin.podIPs[id]
if err := plugin.teardown(namespace, name, id, podIP); err != nil {
return err
}
// Need to SNAT outbound traffic from cluster
if err := plugin.ensureMasqRule(); err != nil {
glog.Errorf("Failed to ensure MASQ rule: %v", err)
}
return nil
}
// TODO: Use the addToNetwork function to obtain the IP of the Pod. That will assume idempotent ADD call to the plugin.
// Also fix the runtime's call to Status function to be done only in the case that the IP is lost, no need to do periodic calls
func (plugin *kubenetNetworkPlugin) GetPodNetworkStatus(namespace string, name string, id kubecontainer.ContainerID) (*network.PodNetworkStatus, error) {
plugin.mu.Lock()
defer plugin.mu.Unlock()
// Assuming the ip of pod does not change. Try to retrieve ip from kubenet map first.
if podIP, ok := plugin.podIPs[id]; ok {
return &network.PodNetworkStatus{IP: net.ParseIP(podIP)}, nil
}
netnsPath, err := plugin.host.GetNetNS(id.ID)
if err != nil {
return nil, fmt.Errorf("Kubenet failed to retrieve network namespace path: %v", err)
}
if netnsPath == "" {
return nil, fmt.Errorf("Cannot find the network namespace, skipping pod network status for container %q", id)
}
ip, err := network.GetPodIP(plugin.execer, plugin.nsenterPath, netnsPath, network.DefaultInterfaceName)
if err != nil {
return nil, err
}
plugin.podIPs[id] = ip.String()
return &network.PodNetworkStatus{IP: ip}, nil
}
func (plugin *kubenetNetworkPlugin) Status() error {
// Can't set up pods if we don't have a PodCIDR yet
if plugin.netConfig == nil {
return fmt.Errorf("Kubenet does not have netConfig. This is most likely due to lack of PodCIDR")
}
if !plugin.checkRequiredCNIPlugins() {
return fmt.Errorf("could not locate kubenet required CNI plugins %v at %q", requiredCNIPlugins, plugin.binDirs)
}
return nil
}
// checkRequiredCNIPlugins returns if all kubenet required cni plugins can be found at /opt/cni/bin or user specified NetworkPluginDir.
func (plugin *kubenetNetworkPlugin) checkRequiredCNIPlugins() bool {
for _, dir := range plugin.binDirs {
if plugin.checkRequiredCNIPluginsInOneDir(dir) {
return true
}
}
return false
}
// checkRequiredCNIPluginsInOneDir returns true if all required cni plugins are placed in dir
func (plugin *kubenetNetworkPlugin) checkRequiredCNIPluginsInOneDir(dir string) bool {
files, err := ioutil.ReadDir(dir)
if err != nil {
return false
}
for _, cniPlugin := range requiredCNIPlugins {
found := false
for _, file := range files {
if strings.TrimSpace(file.Name()) == cniPlugin {
found = true
break
}
}
if !found {
return false
}
}
return true
}
func (plugin *kubenetNetworkPlugin) buildCNIRuntimeConf(ifName string, id kubecontainer.ContainerID, needNetNs bool) (*libcni.RuntimeConf, error) {
netnsPath, err := plugin.host.GetNetNS(id.ID)
if needNetNs && err != nil {
glog.Errorf("Kubenet failed to retrieve network namespace path: %v", err)
}
return &libcni.RuntimeConf{
ContainerID: id.ID,
NetNS: netnsPath,
IfName: ifName,
}, nil
}
func (plugin *kubenetNetworkPlugin) addContainerToNetwork(config *libcni.NetworkConfig, ifName, namespace, name string, id kubecontainer.ContainerID) (cnitypes.Result, error) {
rt, err := plugin.buildCNIRuntimeConf(ifName, id, true)
if err != nil {
return nil, fmt.Errorf("Error building CNI config: %v", err)
}
glog.V(3).Infof("Adding %s/%s to '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt)
// The network plugin can take up to 3 seconds to execute,
// so yield the lock while it runs.
plugin.mu.Unlock()
res, err := plugin.cniConfig.AddNetwork(config, rt)
plugin.mu.Lock()
if err != nil {
return nil, fmt.Errorf("Error adding container to network: %v", err)
}
return res, nil
}
func (plugin *kubenetNetworkPlugin) delContainerFromNetwork(config *libcni.NetworkConfig, ifName, namespace, name string, id kubecontainer.ContainerID) error {
rt, err := plugin.buildCNIRuntimeConf(ifName, id, false)
if err != nil {
return fmt.Errorf("Error building CNI config: %v", err)
}
glog.V(3).Infof("Removing %s/%s from '%s' with CNI '%s' plugin and runtime: %+v", namespace, name, config.Network.Name, config.Network.Type, rt)
err = plugin.cniConfig.DelNetwork(config, rt)
// The pod may not get deleted successfully at the first time.
// Ignore "no such file or directory" error in case the network has already been deleted in previous attempts.
if err != nil && !strings.Contains(err.Error(), "no such file or directory") {
return fmt.Errorf("Error removing container from network: %v", err)
}
return nil
}
// shaper retrieves the bandwidth shaper and, if it hasn't been fetched before,
// initializes it and ensures the bridge is appropriately configured
// This function should only be called while holding the `plugin.mu` lock
func (plugin *kubenetNetworkPlugin) shaper() bandwidth.BandwidthShaper {
if plugin.bandwidthShaper == nil {
plugin.bandwidthShaper = bandwidth.NewTCShaper(BridgeName)
plugin.bandwidthShaper.ReconcileInterface()
}
return plugin.bandwidthShaper
}
//TODO: make this into a goroutine and rectify the dedup rules periodically
func (plugin *kubenetNetworkPlugin) syncEbtablesDedupRules(macAddr net.HardwareAddr) {
if plugin.ebtables == nil {
plugin.ebtables = utilebtables.New(plugin.execer)
glog.V(3).Infof("Flushing dedup chain")
if err := plugin.ebtables.FlushChain(utilebtables.TableFilter, dedupChain); err != nil {
glog.Errorf("Failed to flush dedup chain: %v", err)
}
}
_, err := plugin.ebtables.GetVersion()
if err != nil {
glog.Warningf("Failed to get ebtables version. Skip syncing ebtables dedup rules: %v", err)
return
}
glog.V(3).Infof("Filtering packets with ebtables on mac address: %v, gateway: %v, pod CIDR: %v", macAddr.String(), plugin.gateway.String(), plugin.podCidr)
_, err = plugin.ebtables.EnsureChain(utilebtables.TableFilter, dedupChain)
if err != nil {
glog.Errorf("Failed to ensure %v chain %v", utilebtables.TableFilter, dedupChain)
return
}
_, err = plugin.ebtables.EnsureRule(utilebtables.Append, utilebtables.TableFilter, utilebtables.ChainOutput, "-j", string(dedupChain))
if err != nil {
glog.Errorf("Failed to ensure %v chain %v jump to %v chain: %v", utilebtables.TableFilter, utilebtables.ChainOutput, dedupChain, err)
return
}
commonArgs := []string{"-p", "IPv4", "-s", macAddr.String(), "-o", "veth+"}
_, err = plugin.ebtables.EnsureRule(utilebtables.Prepend, utilebtables.TableFilter, dedupChain, append(commonArgs, "--ip-src", plugin.gateway.String(), "-j", "ACCEPT")...)
if err != nil {
glog.Errorf("Failed to ensure packets from cbr0 gateway to be accepted")
return
}
_, err = plugin.ebtables.EnsureRule(utilebtables.Append, utilebtables.TableFilter, dedupChain, append(commonArgs, "--ip-src", plugin.podCidr, "-j", "DROP")...)
if err != nil {
glog.Errorf("Failed to ensure packets from podCidr but has mac address of cbr0 to get dropped.")
return
}
}
// disableContainerDAD disables duplicate address detection in the container.
// DAD has a negative affect on pod creation latency, since we have to wait
// a second or more for the addresses to leave the "tentative" state. Since
// we're sure there won't be an address conflict (since we manage them manually),
// this is safe. See issue 54651.
//
// This sets net.ipv6.conf.default.dad_transmits to 0. It must be run *before*
// the CNI plugins are run.
func (plugin *kubenetNetworkPlugin) disableContainerDAD(id kubecontainer.ContainerID) error {
key := "net/ipv6/conf/default/dad_transmits"
sysctlBin, err := plugin.execer.LookPath("sysctl")
if err != nil {
return fmt.Errorf("Could not find sysctl binary: %s", err)
}
netnsPath, err := plugin.host.GetNetNS(id.ID)
if err != nil {
return fmt.Errorf("Failed to get netns: %v", err)
}
if netnsPath == "" {
return fmt.Errorf("Pod has no network namespace")
}
// If the sysctl doesn't exist, it means ipv6 is disabled; log and move on
if _, err := plugin.sysctl.GetSysctl(key); err != nil {
return fmt.Errorf("Ipv6 not enabled: %v", err)
}
output, err := plugin.execer.Command(plugin.nsenterPath,
fmt.Sprintf("--net=%s", netnsPath), "-F", "--",
sysctlBin, "-w", fmt.Sprintf("%s=%s", key, "0"),
).CombinedOutput()
if err != nil {
return fmt.Errorf("Failed to write sysctl: output: %s error: %s",
output, err)
}
return nil
}

View File

@ -1,262 +0,0 @@
/*
Copyright 2015 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubenet
import (
"fmt"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"testing"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/cni/testing"
hostporttest "k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport/testing"
nettest "k8s.io/kubernetes/pkg/kubelet/dockershim/network/testing"
"k8s.io/kubernetes/pkg/util/bandwidth"
ipttest "k8s.io/kubernetes/pkg/util/iptables/testing"
sysctltest "k8s.io/kubernetes/pkg/util/sysctl/testing"
"k8s.io/utils/exec"
fakeexec "k8s.io/utils/exec/testing"
)
// test it fulfills the NetworkPlugin interface
var _ network.NetworkPlugin = &kubenetNetworkPlugin{}
func newFakeKubenetPlugin(initMap map[kubecontainer.ContainerID]string, execer exec.Interface, host network.Host) *kubenetNetworkPlugin {
return &kubenetNetworkPlugin{
podIPs: initMap,
execer: execer,
mtu: 1460,
host: host,
}
}
func TestGetPodNetworkStatus(t *testing.T) {
podIPMap := make(map[kubecontainer.ContainerID]string)
podIPMap[kubecontainer.ContainerID{ID: "1"}] = "10.245.0.2"
podIPMap[kubecontainer.ContainerID{ID: "2"}] = "10.245.0.3"
testCases := []struct {
id string
expectError bool
expectIP string
}{
//in podCIDR map
{
"1",
false,
"10.245.0.2",
},
{
"2",
false,
"10.245.0.3",
},
//not in podCIDR map
{
"3",
true,
"",
},
//TODO: add test cases for retrieving ip inside container network namespace
}
fakeCmds := make([]fakeexec.FakeCommandAction, 0)
for _, t := range testCases {
// the fake commands return the IP from the given index, or an error
fCmd := fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
func() ([]byte, error) {
ip, ok := podIPMap[kubecontainer.ContainerID{ID: t.id}]
if !ok {
return nil, fmt.Errorf("Pod IP %q not found", t.id)
}
return []byte(ip), nil
},
},
}
fakeCmds = append(fakeCmds, func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fCmd, cmd, args...)
})
}
fexec := fakeexec.FakeExec{
CommandScript: fakeCmds,
LookPathFunc: func(file string) (string, error) {
return fmt.Sprintf("/fake-bin/%s", file), nil
},
}
fhost := nettest.NewFakeHost(nil)
fakeKubenet := newFakeKubenetPlugin(podIPMap, &fexec, fhost)
for i, tc := range testCases {
out, err := fakeKubenet.GetPodNetworkStatus("", "", kubecontainer.ContainerID{ID: tc.id})
if tc.expectError {
if err == nil {
t.Errorf("Test case %d expects error but got none", i)
}
continue
} else {
if err != nil {
t.Errorf("Test case %d expects error but got error: %v", i, err)
}
}
if tc.expectIP != out.IP.String() {
t.Errorf("Test case %d expects ip %s but got %s", i, tc.expectIP, out.IP.String())
}
}
}
// TestTeardownCallsShaper tests that a `TearDown` call does call
// `shaper.Reset`
func TestTeardownCallsShaper(t *testing.T) {
fexec := &fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{},
LookPathFunc: func(file string) (string, error) {
return fmt.Sprintf("/fake-bin/%s", file), nil
},
}
fhost := nettest.NewFakeHost(nil)
fshaper := &bandwidth.FakeShaper{}
mockcni := &mock_cni.MockCNI{}
kubenet := newFakeKubenetPlugin(map[kubecontainer.ContainerID]string{}, fexec, fhost)
kubenet.cniConfig = mockcni
kubenet.iptables = ipttest.NewFake()
kubenet.bandwidthShaper = fshaper
kubenet.hostportSyncer = hostporttest.NewFakeHostportSyncer()
mockcni.On("DelNetwork", mock.AnythingOfType("*libcni.NetworkConfig"), mock.AnythingOfType("*libcni.RuntimeConf")).Return(nil)
details := make(map[string]interface{})
details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = "10.0.0.1/24"
kubenet.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, details)
existingContainerID := kubecontainer.BuildContainerID("docker", "123")
kubenet.podIPs[existingContainerID] = "10.0.0.1"
if err := kubenet.TearDownPod("namespace", "name", existingContainerID); err != nil {
t.Fatalf("Unexpected error in TearDownPod: %v", err)
}
assert.Equal(t, []string{"10.0.0.1/32"}, fshaper.ResetCIDRs, "shaper.Reset should have been called")
mockcni.AssertExpectations(t)
}
// TestInit tests that a `Init` call with an MTU sets the MTU
func TestInit_MTU(t *testing.T) {
var fakeCmds []fakeexec.FakeCommandAction
{
// modprobe br-netfilter
fCmd := fakeexec.FakeCmd{
CombinedOutputScript: []fakeexec.FakeCombinedOutputAction{
func() ([]byte, error) {
return make([]byte, 0), nil
},
},
}
fakeCmds = append(fakeCmds, func(cmd string, args ...string) exec.Cmd {
return fakeexec.InitFakeCmd(&fCmd, cmd, args...)
})
}
fexec := &fakeexec.FakeExec{
CommandScript: fakeCmds,
LookPathFunc: func(file string) (string, error) {
return fmt.Sprintf("/fake-bin/%s", file), nil
},
}
fhost := nettest.NewFakeHost(nil)
kubenet := newFakeKubenetPlugin(map[kubecontainer.ContainerID]string{}, fexec, fhost)
kubenet.iptables = ipttest.NewFake()
sysctl := sysctltest.NewFake()
sysctl.Settings["net/bridge/bridge-nf-call-iptables"] = 0
kubenet.sysctl = sysctl
if err := kubenet.Init(nettest.NewFakeHost(nil), kubeletconfig.HairpinNone, "10.0.0.0/8", 1234); err != nil {
t.Fatalf("Unexpected error in Init: %v", err)
}
assert.Equal(t, 1234, kubenet.mtu, "kubenet.mtu should have been set")
assert.Equal(t, 1, sysctl.Settings["net/bridge/bridge-nf-call-iptables"], "net/bridge/bridge-nf-call-iptables sysctl should have been set")
}
// TestInvocationWithoutRuntime invokes the plugin without a runtime.
// This is how kubenet is invoked from the cri.
func TestTearDownWithoutRuntime(t *testing.T) {
testCases := []struct {
podCIDR string
ip string
expectedGateway string
}{
{
podCIDR: "10.0.0.1/24",
ip: "10.0.0.1",
expectedGateway: "10.0.0.1",
},
{
podCIDR: "2001:beef::1/48",
ip: "2001:beef::1",
expectedGateway: "2001:beef::1",
},
}
for _, tc := range testCases {
fhost := nettest.NewFakeHost(nil)
fhost.Legacy = false
fhost.Runtime = nil
mockcni := &mock_cni.MockCNI{}
fexec := &fakeexec.FakeExec{
CommandScript: []fakeexec.FakeCommandAction{},
LookPathFunc: func(file string) (string, error) {
return fmt.Sprintf("/fake-bin/%s", file), nil
},
}
kubenet := newFakeKubenetPlugin(map[kubecontainer.ContainerID]string{}, fexec, fhost)
kubenet.cniConfig = mockcni
kubenet.iptables = ipttest.NewFake()
details := make(map[string]interface{})
details[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = tc.podCIDR
kubenet.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, details)
if kubenet.gateway.String() != tc.expectedGateway {
t.Errorf("generated gateway: %q, expecting: %q", kubenet.gateway.String(), tc.expectedGateway)
}
if kubenet.podCidr != tc.podCIDR {
t.Errorf("generated podCidr: %q, expecting: %q", kubenet.podCidr, tc.podCIDR)
}
existingContainerID := kubecontainer.BuildContainerID("docker", "123")
kubenet.podIPs[existingContainerID] = tc.ip
mockcni.On("DelNetwork", mock.AnythingOfType("*libcni.NetworkConfig"), mock.AnythingOfType("*libcni.RuntimeConf")).Return(nil)
if err := kubenet.TearDownPod("namespace", "name", existingContainerID); err != nil {
t.Fatalf("Unexpected error in TearDownPod: %v", err)
}
// Assert that the CNI DelNetwork made it through and we didn't crash
// without a runtime.
mockcni.AssertExpectations(t)
}
}
//TODO: add unit test for each implementation of network plugin interface

View File

@ -1,55 +0,0 @@
// +build !linux
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package kubenet
import (
"fmt"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
)
type kubenetNetworkPlugin struct {
network.NoopNetworkPlugin
}
func NewPlugin(networkPluginDirs []string) network.NetworkPlugin {
return &kubenetNetworkPlugin{}
}
func (plugin *kubenetNetworkPlugin) Init(host network.Host, hairpinMode kubeletconfig.HairpinMode, nonMasqueradeCIDR string, mtu int) error {
return fmt.Errorf("Kubenet is not supported in this build")
}
func (plugin *kubenetNetworkPlugin) Name() string {
return "kubenet"
}
func (plugin *kubenetNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID, annotations map[string]string) error {
return fmt.Errorf("Kubenet is not supported in this build")
}
func (plugin *kubenetNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.ContainerID) error {
return fmt.Errorf("Kubenet is not supported in this build")
}
func (plugin *kubenetNetworkPlugin) GetPodNetworkStatus(namespace string, name string, id kubecontainer.ContainerID) (*network.PodNetworkStatus, error) {
return nil, fmt.Errorf("Kubenet is not supported in this build")
}

View File

@ -1,23 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["metrics.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics",
visibility = ["//visibility:public"],
deps = ["//vendor/github.com/prometheus/client_golang/prometheus:go_default_library"],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,61 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package metrics
import (
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
)
const (
// NetworkPluginOperationsKey is the key for operation count metrics.
NetworkPluginOperationsKey = "network_plugin_operations"
// NetworkPluginOperationsLatencyKey is the key for the operation latency metrics.
NetworkPluginOperationsLatencyKey = "network_plugin_operations_latency_microseconds"
// Keep the "kubelet" subsystem for backward compatibility.
kubeletSubsystem = "kubelet"
)
var (
// NetworkPluginOperationsLatency collects operation latency numbers by operation
// type.
NetworkPluginOperationsLatency = prometheus.NewSummaryVec(
prometheus.SummaryOpts{
Subsystem: kubeletSubsystem,
Name: NetworkPluginOperationsLatencyKey,
Help: "Latency in microseconds of network plugin operations. Broken down by operation type.",
},
[]string{"operation_type"},
)
)
var registerMetrics sync.Once
// Register all metrics.
func Register() {
registerMetrics.Do(func() {
prometheus.MustRegister(NetworkPluginOperationsLatency)
})
}
// SinceInMicroseconds gets the time since the specified start in microseconds.
func SinceInMicroseconds(start time.Time) float64 {
return float64(time.Since(start).Nanoseconds() / time.Microsecond.Nanoseconds())
}

View File

@ -1,24 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
// TODO: Consider making this value configurable.
const DefaultInterfaceName = "eth0"
// UseDefaultMTU is a marker value that indicates the plugin should determine its own MTU
// It is the zero value, so a non-initialized value will mean "UseDefault"
const UseDefaultMTU = 0

View File

@ -1,397 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package network
import (
"fmt"
"net"
"strings"
"sync"
"time"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilsets "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/metrics"
utilsysctl "k8s.io/kubernetes/pkg/util/sysctl"
utilexec "k8s.io/utils/exec"
)
const DefaultPluginName = "kubernetes.io/no-op"
// Called when the node's Pod CIDR is known when using the
// controller manager's --allocate-node-cidrs=true option
const NET_PLUGIN_EVENT_POD_CIDR_CHANGE = "pod-cidr-change"
const NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR = "pod-cidr"
// Plugin is an interface to network plugins for the kubelet
type NetworkPlugin interface {
// Init initializes the plugin. This will be called exactly once
// before any other methods are called.
Init(host Host, hairpinMode kubeletconfig.HairpinMode, nonMasqueradeCIDR string, mtu int) error
// Called on various events like:
// NET_PLUGIN_EVENT_POD_CIDR_CHANGE
Event(name string, details map[string]interface{})
// Name returns the plugin's name. This will be used when searching
// for a plugin by name, e.g.
Name() string
// Returns a set of NET_PLUGIN_CAPABILITY_*
Capabilities() utilsets.Int
// SetUpPod is the method called after the infra container of
// the pod has been created but before the other containers of the
// pod are launched.
SetUpPod(namespace string, name string, podSandboxID kubecontainer.ContainerID, annotations map[string]string) error
// TearDownPod is the method called before a pod's infra container will be deleted
TearDownPod(namespace string, name string, podSandboxID kubecontainer.ContainerID) error
// GetPodNetworkStatus is the method called to obtain the ipv4 or ipv6 addresses of the container
GetPodNetworkStatus(namespace string, name string, podSandboxID kubecontainer.ContainerID) (*PodNetworkStatus, error)
// Status returns error if the network plugin is in error state
Status() error
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PodNetworkStatus stores the network status of a pod (currently just the primary IP address)
// This struct represents version "v1beta1"
type PodNetworkStatus struct {
metav1.TypeMeta `json:",inline"`
// IP is the primary ipv4/ipv6 address of the pod. Among other things it is the address that -
// - kube expects to be reachable across the cluster
// - service endpoints are constructed with
// - will be reported in the PodStatus.PodIP field (will override the IP reported by docker)
IP net.IP `json:"ip" description:"Primary IP address of the pod"`
}
// Host is an interface that plugins can use to access the kubelet.
// TODO(#35457): get rid of this backchannel to the kubelet. The scope of
// the back channel is restricted to host-ports/testing, and restricted
// to kubenet. No other network plugin wrapper needs it. Other plugins
// only require a way to access namespace information and port mapping
// information , which they can do directly through the embedded interfaces.
type Host interface {
// NamespaceGetter is a getter for sandbox namespace information.
NamespaceGetter
// PortMappingGetter is a getter for sandbox port mapping information.
PortMappingGetter
}
// NamespaceGetter is an interface to retrieve namespace information for a given
// podSandboxID. Typically implemented by runtime shims that are closely coupled to
// CNI plugin wrappers like kubenet.
type NamespaceGetter interface {
// GetNetNS returns network namespace information for the given containerID.
// Runtimes should *never* return an empty namespace and nil error for
// a container; if error is nil then the namespace string must be valid.
GetNetNS(containerID string) (string, error)
}
// PortMappingGetter is an interface to retrieve port mapping information for a given
// podSandboxID. Typically implemented by runtime shims that are closely coupled to
// CNI plugin wrappers like kubenet.
type PortMappingGetter interface {
// GetPodPortMappings returns sandbox port mappings information.
GetPodPortMappings(containerID string) ([]*hostport.PortMapping, error)
}
// InitNetworkPlugin inits the plugin that matches networkPluginName. Plugins must have unique names.
func InitNetworkPlugin(plugins []NetworkPlugin, networkPluginName string, host Host, hairpinMode kubeletconfig.HairpinMode, nonMasqueradeCIDR string, mtu int) (NetworkPlugin, error) {
if networkPluginName == "" {
// default to the no_op plugin
plug := &NoopNetworkPlugin{}
plug.Sysctl = utilsysctl.New()
if err := plug.Init(host, hairpinMode, nonMasqueradeCIDR, mtu); err != nil {
return nil, err
}
return plug, nil
}
pluginMap := map[string]NetworkPlugin{}
allErrs := []error{}
for _, plugin := range plugins {
name := plugin.Name()
if errs := validation.IsQualifiedName(name); len(errs) != 0 {
allErrs = append(allErrs, fmt.Errorf("network plugin has invalid name: %q: %s", name, strings.Join(errs, ";")))
continue
}
if _, found := pluginMap[name]; found {
allErrs = append(allErrs, fmt.Errorf("network plugin %q was registered more than once", name))
continue
}
pluginMap[name] = plugin
}
chosenPlugin := pluginMap[networkPluginName]
if chosenPlugin != nil {
err := chosenPlugin.Init(host, hairpinMode, nonMasqueradeCIDR, mtu)
if err != nil {
allErrs = append(allErrs, fmt.Errorf("Network plugin %q failed init: %v", networkPluginName, err))
} else {
glog.V(1).Infof("Loaded network plugin %q", networkPluginName)
}
} else {
allErrs = append(allErrs, fmt.Errorf("Network plugin %q not found.", networkPluginName))
}
return chosenPlugin, utilerrors.NewAggregate(allErrs)
}
type NoopNetworkPlugin struct {
Sysctl utilsysctl.Interface
}
const sysctlBridgeCallIPTables = "net/bridge/bridge-nf-call-iptables"
const sysctlBridgeCallIP6Tables = "net/bridge/bridge-nf-call-ip6tables"
func (plugin *NoopNetworkPlugin) Init(host Host, hairpinMode kubeletconfig.HairpinMode, nonMasqueradeCIDR string, mtu int) error {
// Set bridge-nf-call-iptables=1 to maintain compatibility with older
// kubernetes versions to ensure the iptables-based kube proxy functions
// correctly. Other plugins are responsible for setting this correctly
// depending on whether or not they connect containers to Linux bridges
// or use some other mechanism (ie, SDN vswitch).
// Ensure the netfilter module is loaded on kernel >= 3.18; previously
// it was built-in.
utilexec.New().Command("modprobe", "br-netfilter").CombinedOutput()
if err := plugin.Sysctl.SetSysctl(sysctlBridgeCallIPTables, 1); err != nil {
glog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIPTables, err)
}
if val, err := plugin.Sysctl.GetSysctl(sysctlBridgeCallIP6Tables); err == nil {
if val != 1 {
if err = plugin.Sysctl.SetSysctl(sysctlBridgeCallIP6Tables, 1); err != nil {
glog.Warningf("can't set sysctl %s: %v", sysctlBridgeCallIP6Tables, err)
}
}
}
return nil
}
func (plugin *NoopNetworkPlugin) Event(name string, details map[string]interface{}) {
}
func (plugin *NoopNetworkPlugin) Name() string {
return DefaultPluginName
}
func (plugin *NoopNetworkPlugin) Capabilities() utilsets.Int {
return utilsets.NewInt()
}
func (plugin *NoopNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID, annotations map[string]string) error {
return nil
}
func (plugin *NoopNetworkPlugin) TearDownPod(namespace string, name string, id kubecontainer.ContainerID) error {
return nil
}
func (plugin *NoopNetworkPlugin) GetPodNetworkStatus(namespace string, name string, id kubecontainer.ContainerID) (*PodNetworkStatus, error) {
return nil, nil
}
func (plugin *NoopNetworkPlugin) Status() error {
return nil
}
func getOnePodIP(execer utilexec.Interface, nsenterPath, netnsPath, interfaceName, addrType string) (net.IP, error) {
// Try to retrieve ip inside container network namespace
output, err := execer.Command(nsenterPath, fmt.Sprintf("--net=%s", netnsPath), "-F", "--",
"ip", "-o", addrType, "addr", "show", "dev", interfaceName, "scope", "global").CombinedOutput()
if err != nil {
return nil, fmt.Errorf("Unexpected command output %s with error: %v", output, err)
}
lines := strings.Split(string(output), "\n")
if len(lines) < 1 {
return nil, fmt.Errorf("Unexpected command output %s", output)
}
fields := strings.Fields(lines[0])
if len(fields) < 4 {
return nil, fmt.Errorf("Unexpected address output %s ", lines[0])
}
ip, _, err := net.ParseCIDR(fields[3])
if err != nil {
return nil, fmt.Errorf("CNI failed to parse ip from output %s due to %v", output, err)
}
return ip, nil
}
// GetPodIP gets the IP of the pod by inspecting the network info inside the pod's network namespace.
func GetPodIP(execer utilexec.Interface, nsenterPath, netnsPath, interfaceName string) (net.IP, error) {
ip, err := getOnePodIP(execer, nsenterPath, netnsPath, interfaceName, "-4")
if err != nil {
// Fall back to IPv6 address if no IPv4 address is present
ip, err = getOnePodIP(execer, nsenterPath, netnsPath, interfaceName, "-6")
}
if err != nil {
return nil, err
}
return ip, nil
}
type NoopPortMappingGetter struct{}
func (*NoopPortMappingGetter) GetPodPortMappings(containerID string) ([]*hostport.PortMapping, error) {
return nil, nil
}
// The PluginManager wraps a kubelet network plugin and provides synchronization
// for a given pod's network operations. Each pod's setup/teardown/status operations
// are synchronized against each other, but network operations of other pods can
// proceed in parallel.
type PluginManager struct {
// Network plugin being wrapped
plugin NetworkPlugin
// Pod list and lock
podsLock sync.Mutex
pods map[string]*podLock
}
func NewPluginManager(plugin NetworkPlugin) *PluginManager {
metrics.Register()
return &PluginManager{
plugin: plugin,
pods: make(map[string]*podLock),
}
}
func (pm *PluginManager) PluginName() string {
return pm.plugin.Name()
}
func (pm *PluginManager) Event(name string, details map[string]interface{}) {
pm.plugin.Event(name, details)
}
func (pm *PluginManager) Status() error {
return pm.plugin.Status()
}
type podLock struct {
// Count of in-flight operations for this pod; when this reaches zero
// the lock can be removed from the pod map
refcount uint
// Lock to synchronize operations for this specific pod
mu sync.Mutex
}
// Lock network operations for a specific pod. If that pod is not yet in
// the pod map, it will be added. The reference count for the pod will
// be increased.
func (pm *PluginManager) podLock(fullPodName string) *sync.Mutex {
pm.podsLock.Lock()
defer pm.podsLock.Unlock()
lock, ok := pm.pods[fullPodName]
if !ok {
lock = &podLock{}
pm.pods[fullPodName] = lock
}
lock.refcount++
return &lock.mu
}
// Unlock network operations for a specific pod. The reference count for the
// pod will be decreased. If the reference count reaches zero, the pod will be
// removed from the pod map.
func (pm *PluginManager) podUnlock(fullPodName string) {
pm.podsLock.Lock()
defer pm.podsLock.Unlock()
lock, ok := pm.pods[fullPodName]
if !ok {
glog.Warningf("Unbalanced pod lock unref for %s", fullPodName)
return
} else if lock.refcount == 0 {
// This should never ever happen, but handle it anyway
delete(pm.pods, fullPodName)
glog.Warningf("Pod lock for %s still in map with zero refcount", fullPodName)
return
}
lock.refcount--
lock.mu.Unlock()
if lock.refcount == 0 {
delete(pm.pods, fullPodName)
}
}
// recordOperation records operation and duration
func recordOperation(operation string, start time.Time) {
metrics.NetworkPluginOperationsLatency.WithLabelValues(operation).Observe(metrics.SinceInMicroseconds(start))
}
func (pm *PluginManager) GetPodNetworkStatus(podNamespace, podName string, id kubecontainer.ContainerID) (*PodNetworkStatus, error) {
defer recordOperation("get_pod_network_status", time.Now())
fullPodName := kubecontainer.BuildPodFullName(podName, podNamespace)
pm.podLock(fullPodName).Lock()
defer pm.podUnlock(fullPodName)
netStatus, err := pm.plugin.GetPodNetworkStatus(podNamespace, podName, id)
if err != nil {
return nil, fmt.Errorf("NetworkPlugin %s failed on the status hook for pod %q: %v", pm.plugin.Name(), fullPodName, err)
}
return netStatus, nil
}
func (pm *PluginManager) SetUpPod(podNamespace, podName string, id kubecontainer.ContainerID, annotations map[string]string) error {
defer recordOperation("set_up_pod", time.Now())
fullPodName := kubecontainer.BuildPodFullName(podName, podNamespace)
pm.podLock(fullPodName).Lock()
defer pm.podUnlock(fullPodName)
glog.V(3).Infof("Calling network plugin %s to set up pod %q", pm.plugin.Name(), fullPodName)
if err := pm.plugin.SetUpPod(podNamespace, podName, id, annotations); err != nil {
return fmt.Errorf("NetworkPlugin %s failed to set up pod %q network: %v", pm.plugin.Name(), fullPodName, err)
}
return nil
}
func (pm *PluginManager) TearDownPod(podNamespace, podName string, id kubecontainer.ContainerID) error {
defer recordOperation("tear_down_pod", time.Now())
fullPodName := kubecontainer.BuildPodFullName(podName, podNamespace)
pm.podLock(fullPodName).Lock()
defer pm.podUnlock(fullPodName)
glog.V(3).Infof("Calling network plugin %s to tear down pod %q", pm.plugin.Name(), fullPodName)
if err := pm.plugin.TearDownPod(podNamespace, podName, id); err != nil {
return fmt.Errorf("NetworkPlugin %s failed to teardown pod %q network: %v", pm.plugin.Name(), fullPodName, err)
}
return nil
}

View File

@ -1,55 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"fake_host.go",
"mock_network_plugin.go",
],
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/network/testing",
deps = [
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/container/testing:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
"//pkg/kubelet/dockershim/network/hostport:go_default_library",
"//vendor/github.com/golang/mock/gomock:go_default_library",
"//vendor/k8s.io/api/core/v1:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/k8s.io/client-go/kubernetes:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["plugins_test.go"],
embed = [":go_default_library"],
deps = [
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
"//pkg/kubelet/container:go_default_library",
"//pkg/kubelet/dockershim/network:go_default_library",
"//pkg/util/sysctl/testing:go_default_library",
"//vendor/github.com/golang/mock/gomock:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
"//vendor/k8s.io/apimachinery/pkg/util/sets:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,73 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
// helper for testing plugins
// a fake host is created here that can be used by plugins for testing
import (
"k8s.io/api/core/v1"
clientset "k8s.io/client-go/kubernetes"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network/hostport"
)
type fakeNetworkHost struct {
fakeNamespaceGetter
FakePortMappingGetter
kubeClient clientset.Interface
Legacy bool
Runtime *containertest.FakeRuntime
}
func NewFakeHost(kubeClient clientset.Interface) *fakeNetworkHost {
host := &fakeNetworkHost{kubeClient: kubeClient, Legacy: true, Runtime: &containertest.FakeRuntime{}}
return host
}
func (fnh *fakeNetworkHost) GetPodByName(name, namespace string) (*v1.Pod, bool) {
return nil, false
}
func (fnh *fakeNetworkHost) GetKubeClient() clientset.Interface {
return nil
}
func (nh *fakeNetworkHost) GetRuntime() kubecontainer.Runtime {
return nh.Runtime
}
func (nh *fakeNetworkHost) SupportsLegacyFeatures() bool {
return nh.Legacy
}
type fakeNamespaceGetter struct {
ns string
}
func (nh *fakeNamespaceGetter) GetNetNS(containerID string) (string, error) {
return nh.ns, nil
}
type FakePortMappingGetter struct {
PortMaps map[string][]*hostport.PortMapping
}
func (pm *FakePortMappingGetter) GetPodPortMappings(containerID string) ([]*hostport.PortMapping, error) {
return pm.PortMaps[containerID], nil
}

View File

@ -1,133 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Generated code, generated via: `mockgen k8s.io/kubernetes/pkg/kubelet/network NetworkPlugin > $GOPATH/src/k8s.io/kubernetes/pkg/kubelet/network/testing/mock_network_plugin.go`
// Edited by hand for boilerplate and gofmt.
// TODO, this should be autogenerated/autoupdated by scripts.
package testing
import (
gomock "github.com/golang/mock/gomock"
sets "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
container "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
)
// Mock of NetworkPlugin interface
type MockNetworkPlugin struct {
ctrl *gomock.Controller
recorder *_MockNetworkPluginRecorder
}
// Recorder for MockNetworkPlugin (not exported)
type _MockNetworkPluginRecorder struct {
mock *MockNetworkPlugin
}
func NewMockNetworkPlugin(ctrl *gomock.Controller) *MockNetworkPlugin {
mock := &MockNetworkPlugin{ctrl: ctrl}
mock.recorder = &_MockNetworkPluginRecorder{mock}
return mock
}
func (_m *MockNetworkPlugin) EXPECT() *_MockNetworkPluginRecorder {
return _m.recorder
}
func (_m *MockNetworkPlugin) Capabilities() sets.Int {
ret := _m.ctrl.Call(_m, "Capabilities")
ret0, _ := ret[0].(sets.Int)
return ret0
}
func (_m *MockNetworkPlugin) Finish() {
_m.ctrl.Finish()
}
func (_mr *_MockNetworkPluginRecorder) Capabilities() *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "Capabilities")
}
func (_m *MockNetworkPlugin) Event(_param0 string, _param1 map[string]interface{}) {
_m.ctrl.Call(_m, "Event", _param0, _param1)
}
func (_mr *_MockNetworkPluginRecorder) Event(arg0, arg1 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "Event", arg0, arg1)
}
func (_m *MockNetworkPlugin) GetPodNetworkStatus(_param0 string, _param1 string, _param2 container.ContainerID) (*network.PodNetworkStatus, error) {
ret := _m.ctrl.Call(_m, "GetPodNetworkStatus", _param0, _param1, _param2)
ret0, _ := ret[0].(*network.PodNetworkStatus)
ret1, _ := ret[1].(error)
return ret0, ret1
}
func (_mr *_MockNetworkPluginRecorder) GetPodNetworkStatus(arg0, arg1, arg2 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "GetPodNetworkStatus", arg0, arg1, arg2)
}
func (_m *MockNetworkPlugin) Init(_param0 network.Host, _param1 kubeletconfig.HairpinMode, nonMasqueradeCIDR string, mtu int) error {
ret := _m.ctrl.Call(_m, "Init", _param0, _param1)
ret0, _ := ret[0].(error)
return ret0
}
func (_mr *_MockNetworkPluginRecorder) Init(arg0, arg1 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "Init", arg0, arg1)
}
func (_m *MockNetworkPlugin) Name() string {
ret := _m.ctrl.Call(_m, "Name")
ret0, _ := ret[0].(string)
return ret0
}
func (_mr *_MockNetworkPluginRecorder) Name() *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "Name")
}
func (_m *MockNetworkPlugin) SetUpPod(_param0 string, _param1 string, _param2 container.ContainerID, annotations map[string]string) error {
ret := _m.ctrl.Call(_m, "SetUpPod", _param0, _param1, _param2)
ret0, _ := ret[0].(error)
return ret0
}
func (_mr *_MockNetworkPluginRecorder) SetUpPod(arg0, arg1, arg2 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "SetUpPod", arg0, arg1, arg2)
}
func (_m *MockNetworkPlugin) Status() error {
ret := _m.ctrl.Call(_m, "Status")
ret0, _ := ret[0].(error)
return ret0
}
func (_mr *_MockNetworkPluginRecorder) Status() *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "Status")
}
func (_m *MockNetworkPlugin) TearDownPod(_param0 string, _param1 string, _param2 container.ContainerID) error {
ret := _m.ctrl.Call(_m, "TearDownPod", _param0, _param1, _param2)
ret0, _ := ret[0].(error)
return ret0
}
func (_mr *_MockNetworkPluginRecorder) TearDownPod(arg0, arg1, arg2 interface{}) *gomock.Call {
return _mr.mock.ctrl.RecordCall(_mr.mock, "TearDownPod", arg0, arg1, arg2)
}

View File

@ -1,249 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package testing
import (
"fmt"
"net"
"sync"
"testing"
utilsets "k8s.io/apimachinery/pkg/util/sets"
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
"k8s.io/kubernetes/pkg/kubelet/dockershim/network"
sysctltest "k8s.io/kubernetes/pkg/util/sysctl/testing"
"github.com/golang/mock/gomock"
"github.com/stretchr/testify/assert"
)
func TestSelectDefaultPlugin(t *testing.T) {
all_plugins := []network.NetworkPlugin{}
plug, err := network.InitNetworkPlugin(all_plugins, "", NewFakeHost(nil), kubeletconfig.HairpinNone, "10.0.0.0/8", network.UseDefaultMTU)
if err != nil {
t.Fatalf("Unexpected error in selecting default plugin: %v", err)
}
if plug == nil {
t.Fatalf("Failed to select the default plugin.")
}
if plug.Name() != network.DefaultPluginName {
t.Errorf("Failed to select the default plugin. Expected %s. Got %s", network.DefaultPluginName, plug.Name())
}
}
func TestInit(t *testing.T) {
tests := []struct {
setting string
expectedLen int
}{
{
setting: "net/bridge/bridge-nf-call-iptables",
expectedLen: 1,
},
{
setting: "net/bridge/bridge-nf-call-ip6tables",
expectedLen: 2,
},
}
for _, tt := range tests {
sysctl := sysctltest.NewFake()
sysctl.Settings[tt.setting] = 0
plug := &network.NoopNetworkPlugin{}
plug.Sysctl = sysctl
plug.Init(NewFakeHost(nil), kubeletconfig.HairpinNone, "10.0.0.0/8", network.UseDefaultMTU)
// Verify the sysctl specified is set
assert.Equal(t, 1, sysctl.Settings[tt.setting], tt.setting+" sysctl should have been set")
// Verify iptables is always set
assert.Equal(t, 1, sysctl.Settings["net/bridge/bridge-nf-call-iptables"], "net/bridge/bridge-nf-call-iptables sysctl should have been set")
// Verify ip6tables is only set if it existed
assert.Len(t, sysctl.Settings, tt.expectedLen, "length wrong for "+tt.setting)
}
}
func TestPluginManager(t *testing.T) {
ctrl := gomock.NewController(t)
fnp := NewMockNetworkPlugin(ctrl)
defer fnp.Finish()
pm := network.NewPluginManager(fnp)
fnp.EXPECT().Name().Return("someNetworkPlugin").AnyTimes()
allCreatedWg := sync.WaitGroup{}
allCreatedWg.Add(1)
allDoneWg := sync.WaitGroup{}
// 10 pods, 4 setup/status/teardown runs each. Ensure that network locking
// works and the pod map isn't concurrently accessed
for i := 0; i < 10; i++ {
podName := fmt.Sprintf("pod%d", i)
containerID := kubecontainer.ContainerID{ID: podName}
fnp.EXPECT().SetUpPod("", podName, containerID).Return(nil).Times(4)
fnp.EXPECT().GetPodNetworkStatus("", podName, containerID).Return(&network.PodNetworkStatus{IP: net.ParseIP("1.2.3.4")}, nil).Times(4)
fnp.EXPECT().TearDownPod("", podName, containerID).Return(nil).Times(4)
for x := 0; x < 4; x++ {
allDoneWg.Add(1)
go func(name string, id kubecontainer.ContainerID, num int) {
defer allDoneWg.Done()
// Block all goroutines from running until all have
// been created and are ready. This ensures we
// have more pod network operations running
// concurrently.
allCreatedWg.Wait()
if err := pm.SetUpPod("", name, id, nil); err != nil {
t.Errorf("Failed to set up pod %q: %v", name, err)
return
}
if _, err := pm.GetPodNetworkStatus("", name, id); err != nil {
t.Errorf("Failed to inspect pod %q: %v", name, err)
return
}
if err := pm.TearDownPod("", name, id); err != nil {
t.Errorf("Failed to tear down pod %q: %v", name, err)
return
}
}(podName, containerID, x)
}
}
// Block all goroutines from running until all have been created and started
allCreatedWg.Done()
// Wait for them all to finish
allDoneWg.Wait()
}
type hookableFakeNetworkPluginSetupHook func(namespace, name string, id kubecontainer.ContainerID)
type hookableFakeNetworkPlugin struct {
setupHook hookableFakeNetworkPluginSetupHook
}
func newHookableFakeNetworkPlugin(setupHook hookableFakeNetworkPluginSetupHook) *hookableFakeNetworkPlugin {
return &hookableFakeNetworkPlugin{
setupHook: setupHook,
}
}
func (p *hookableFakeNetworkPlugin) Init(host network.Host, hairpinMode kubeletconfig.HairpinMode, nonMasqueradeCIDR string, mtu int) error {
return nil
}
func (p *hookableFakeNetworkPlugin) Event(name string, details map[string]interface{}) {
}
func (p *hookableFakeNetworkPlugin) Name() string {
return "fakeplugin"
}
func (p *hookableFakeNetworkPlugin) Capabilities() utilsets.Int {
return utilsets.NewInt()
}
func (p *hookableFakeNetworkPlugin) SetUpPod(namespace string, name string, id kubecontainer.ContainerID, annotations map[string]string) error {
if p.setupHook != nil {
p.setupHook(namespace, name, id)
}
return nil
}
func (p *hookableFakeNetworkPlugin) TearDownPod(string, string, kubecontainer.ContainerID) error {
return nil
}
func (p *hookableFakeNetworkPlugin) GetPodNetworkStatus(string, string, kubecontainer.ContainerID) (*network.PodNetworkStatus, error) {
return &network.PodNetworkStatus{IP: net.ParseIP("10.1.2.3")}, nil
}
func (p *hookableFakeNetworkPlugin) Status() error {
return nil
}
// Ensure that one pod's network operations don't block another's. If the
// test is successful (eg, first pod doesn't block on second) the test
// will complete. If unsuccessful, it will hang and get killed.
func TestMultiPodParallelNetworkOps(t *testing.T) {
podWg := sync.WaitGroup{}
podWg.Add(1)
// Can't do this with MockNetworkPlugin because the gomock controller
// has its own locks which don't allow the parallel network operation
// to proceed.
didWait := false
fakePlugin := newHookableFakeNetworkPlugin(func(podNamespace, podName string, id kubecontainer.ContainerID) {
if podName == "waiter" {
podWg.Wait()
didWait = true
}
})
pm := network.NewPluginManager(fakePlugin)
opsWg := sync.WaitGroup{}
// Start the pod that will wait for the other to complete
opsWg.Add(1)
go func() {
defer opsWg.Done()
podName := "waiter"
containerID := kubecontainer.ContainerID{ID: podName}
// Setup will block on the runner pod completing. If network
// operations locking isn't correct (eg pod network operations
// block other pods) setUpPod() will never return.
if err := pm.SetUpPod("", podName, containerID, nil); err != nil {
t.Errorf("Failed to set up waiter pod: %v", err)
return
}
if err := pm.TearDownPod("", podName, containerID); err != nil {
t.Errorf("Failed to tear down waiter pod: %v", err)
return
}
}()
opsWg.Add(1)
go func() {
defer opsWg.Done()
// Let other pod proceed
defer podWg.Done()
podName := "runner"
containerID := kubecontainer.ContainerID{ID: podName}
if err := pm.SetUpPod("", podName, containerID, nil); err != nil {
t.Errorf("Failed to set up runner pod: %v", err)
return
}
if err := pm.TearDownPod("", podName, containerID); err != nil {
t.Errorf("Failed to tear down runner pod: %v", err)
return
}
}()
opsWg.Wait()
if !didWait {
t.Errorf("waiter pod didn't wait for runner pod!")
}
}

View File

@ -1,32 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
)
go_library(
name = "go_default_library",
srcs = ["docker_server.go"],
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/remote",
deps = [
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
"//pkg/kubelet/dockershim:go_default_library",
"//pkg/kubelet/util:go_default_library",
"//vendor/github.com/golang/glog:go_default_library",
"//vendor/google.golang.org/grpc:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,77 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remote
import (
"fmt"
"github.com/golang/glog"
"google.golang.org/grpc"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
"k8s.io/kubernetes/pkg/kubelet/dockershim"
"k8s.io/kubernetes/pkg/kubelet/util"
)
// maxMsgSize use 8MB as the default message size limit.
// grpc library default is 4MB
const maxMsgSize = 1024 * 1024 * 8
// DockerServer is the grpc server of dockershim.
type DockerServer struct {
// endpoint is the endpoint to serve on.
endpoint string
// service is the docker service which implements runtime and image services.
service dockershim.CRIService
// server is the grpc server.
server *grpc.Server
}
// NewDockerServer creates the dockershim grpc server.
func NewDockerServer(endpoint string, s dockershim.CRIService) *DockerServer {
return &DockerServer{
endpoint: endpoint,
service: s,
}
}
// Start starts the dockershim grpc server.
func (s *DockerServer) Start() error {
// Start the internal service.
if err := s.service.Start(); err != nil {
glog.Errorf("Unable to start docker service")
return err
}
glog.V(2).Infof("Start dockershim grpc server")
l, err := util.CreateListener(s.endpoint)
if err != nil {
return fmt.Errorf("failed to listen on %q: %v", s.endpoint, err)
}
// Create the grpc server and register runtime and image services.
s.server = grpc.NewServer(
grpc.MaxRecvMsgSize(maxMsgSize),
grpc.MaxSendMsgSize(maxMsgSize),
)
runtimeapi.RegisterRuntimeServiceServer(s.server, s.service)
runtimeapi.RegisterImageServiceServer(s.server, s.service)
go func() {
if err := s.server.Serve(l); err != nil {
glog.Fatalf("Failed to serve connections: %v", err)
}
}()
return nil
}

View File

@ -1,219 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"fmt"
"strconv"
"strings"
"github.com/blang/semver"
dockercontainer "github.com/docker/docker/api/types/container"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
knetwork "k8s.io/kubernetes/pkg/kubelet/dockershim/network"
)
// applySandboxSecurityContext updates docker sandbox options according to security context.
func applySandboxSecurityContext(lc *runtimeapi.LinuxPodSandboxConfig, config *dockercontainer.Config, hc *dockercontainer.HostConfig, network *knetwork.PluginManager, separator rune) error {
if lc == nil {
return nil
}
var sc *runtimeapi.LinuxContainerSecurityContext
if lc.SecurityContext != nil {
sc = &runtimeapi.LinuxContainerSecurityContext{
SupplementalGroups: lc.SecurityContext.SupplementalGroups,
RunAsUser: lc.SecurityContext.RunAsUser,
RunAsGroup: lc.SecurityContext.RunAsGroup,
ReadonlyRootfs: lc.SecurityContext.ReadonlyRootfs,
SelinuxOptions: lc.SecurityContext.SelinuxOptions,
NamespaceOptions: lc.SecurityContext.NamespaceOptions,
}
}
err := modifyContainerConfig(sc, config)
if err != nil {
return err
}
if err := modifyHostConfig(sc, hc, separator); err != nil {
return err
}
modifySandboxNamespaceOptions(sc.GetNamespaceOptions(), hc, network)
return nil
}
// applyContainerSecurityContext updates docker container options according to security context.
func applyContainerSecurityContext(lc *runtimeapi.LinuxContainerConfig, podSandboxID string, config *dockercontainer.Config, hc *dockercontainer.HostConfig, separator rune) error {
if lc == nil {
return nil
}
err := modifyContainerConfig(lc.SecurityContext, config)
if err != nil {
return err
}
if err := modifyHostConfig(lc.SecurityContext, hc, separator); err != nil {
return err
}
modifyContainerNamespaceOptions(lc.SecurityContext.GetNamespaceOptions(), podSandboxID, hc)
return nil
}
// modifyContainerConfig applies container security context config to dockercontainer.Config.
func modifyContainerConfig(sc *runtimeapi.LinuxContainerSecurityContext, config *dockercontainer.Config) error {
if sc == nil {
return nil
}
if sc.RunAsUser != nil {
config.User = strconv.FormatInt(sc.GetRunAsUser().Value, 10)
}
if sc.RunAsUsername != "" {
config.User = sc.RunAsUsername
}
user := config.User
if sc.RunAsGroup != nil {
if user == "" {
return fmt.Errorf("runAsGroup is specified without a runAsUser.")
}
user = fmt.Sprintf("%s:%d", config.User, sc.GetRunAsGroup().Value)
}
config.User = user
return nil
}
// modifyHostConfig applies security context config to dockercontainer.HostConfig.
func modifyHostConfig(sc *runtimeapi.LinuxContainerSecurityContext, hostConfig *dockercontainer.HostConfig, separator rune) error {
if sc == nil {
return nil
}
// Apply supplemental groups.
for _, group := range sc.SupplementalGroups {
hostConfig.GroupAdd = append(hostConfig.GroupAdd, strconv.FormatInt(group, 10))
}
// Apply security context for the container.
hostConfig.Privileged = sc.Privileged
hostConfig.ReadonlyRootfs = sc.ReadonlyRootfs
if sc.Capabilities != nil {
hostConfig.CapAdd = sc.GetCapabilities().AddCapabilities
hostConfig.CapDrop = sc.GetCapabilities().DropCapabilities
}
if sc.SelinuxOptions != nil {
hostConfig.SecurityOpt = addSELinuxOptions(
hostConfig.SecurityOpt,
sc.SelinuxOptions,
separator,
)
}
// Apply apparmor options.
apparmorSecurityOpts, err := getApparmorSecurityOpts(sc, separator)
if err != nil {
return fmt.Errorf("failed to generate apparmor security options: %v", err)
}
hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, apparmorSecurityOpts...)
if sc.NoNewPrivs {
hostConfig.SecurityOpt = append(hostConfig.SecurityOpt, "no-new-privileges")
}
return nil
}
// modifySandboxNamespaceOptions apply namespace options for sandbox
func modifySandboxNamespaceOptions(nsOpts *runtimeapi.NamespaceOption, hostConfig *dockercontainer.HostConfig, network *knetwork.PluginManager) {
// The sandbox's PID namespace is the one that's shared, so CONTAINER and POD are equivalent for it
modifyCommonNamespaceOptions(nsOpts, hostConfig)
modifyHostOptionsForSandbox(nsOpts, network, hostConfig)
}
// modifyContainerNamespaceOptions apply namespace options for container
func modifyContainerNamespaceOptions(nsOpts *runtimeapi.NamespaceOption, podSandboxID string, hostConfig *dockercontainer.HostConfig) {
if nsOpts.GetPid() == runtimeapi.NamespaceMode_POD {
hostConfig.PidMode = dockercontainer.PidMode(fmt.Sprintf("container:%v", podSandboxID))
}
modifyCommonNamespaceOptions(nsOpts, hostConfig)
modifyHostOptionsForContainer(nsOpts, podSandboxID, hostConfig)
}
// modifyCommonNamespaceOptions apply common namespace options for sandbox and container
func modifyCommonNamespaceOptions(nsOpts *runtimeapi.NamespaceOption, hostConfig *dockercontainer.HostConfig) {
if nsOpts.GetPid() == runtimeapi.NamespaceMode_NODE {
hostConfig.PidMode = namespaceModeHost
}
}
// modifyHostOptionsForSandbox applies NetworkMode/UTSMode to sandbox's dockercontainer.HostConfig.
func modifyHostOptionsForSandbox(nsOpts *runtimeapi.NamespaceOption, network *knetwork.PluginManager, hc *dockercontainer.HostConfig) {
if nsOpts.GetIpc() == runtimeapi.NamespaceMode_NODE {
hc.IpcMode = namespaceModeHost
}
if nsOpts.GetNetwork() == runtimeapi.NamespaceMode_NODE {
hc.NetworkMode = namespaceModeHost
return
}
if network == nil {
hc.NetworkMode = "default"
return
}
switch network.PluginName() {
case "cni":
fallthrough
case "kubenet":
hc.NetworkMode = "none"
default:
hc.NetworkMode = "default"
}
}
// modifyHostOptionsForContainer applies NetworkMode/UTSMode to container's dockercontainer.HostConfig.
func modifyHostOptionsForContainer(nsOpts *runtimeapi.NamespaceOption, podSandboxID string, hc *dockercontainer.HostConfig) {
sandboxNSMode := fmt.Sprintf("container:%v", podSandboxID)
hc.NetworkMode = dockercontainer.NetworkMode(sandboxNSMode)
hc.IpcMode = dockercontainer.IpcMode(sandboxNSMode)
hc.UTSMode = ""
if nsOpts.GetNetwork() == runtimeapi.NamespaceMode_NODE {
hc.UTSMode = namespaceModeHost
}
}
// modifyPIDNamespaceOverrides implements two temporary overrides for the default PID namespace sharing for Docker:
// 1. Docker engine prior to API Version 1.24 doesn't support attaching to another container's
// PID namespace, and it didn't stabilize until 1.26. This check can be removed when Kubernetes'
// minimum Docker version is at least 1.13.1 (API version 1.26).
// 2. The administrator can override the API behavior by using the deprecated --docker-disable-shared-pid=false
// flag. Until this flag is removed, this causes pods to use NamespaceMode_POD instead of
// NamespaceMode_CONTAINER regardless of pod configuration.
// TODO(verb): remove entirely once these two conditions are satisfied
func modifyContainerPIDNamespaceOverrides(disableSharedPID bool, version *semver.Version, hc *dockercontainer.HostConfig, podSandboxID string) {
if version.LT(semver.Version{Major: 1, Minor: 26}) {
if strings.HasPrefix(string(hc.PidMode), "container:") {
hc.PidMode = ""
}
} else if !disableSharedPID && hc.PidMode == "" {
hc.PidMode = dockercontainer.PidMode(fmt.Sprintf("container:%v", podSandboxID))
}
}

View File

@ -1,502 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"fmt"
"strconv"
"testing"
"github.com/blang/semver"
dockercontainer "github.com/docker/docker/api/types/container"
"github.com/stretchr/testify/assert"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
)
func TestModifyContainerConfig(t *testing.T) {
var uid int64 = 123
var username = "testuser"
var gid int64 = 423
cases := []struct {
name string
sc *runtimeapi.LinuxContainerSecurityContext
expected *dockercontainer.Config
isErr bool
}{
{
name: "container.SecurityContext.RunAsUser set",
sc: &runtimeapi.LinuxContainerSecurityContext{
RunAsUser: &runtimeapi.Int64Value{Value: uid},
},
expected: &dockercontainer.Config{
User: strconv.FormatInt(uid, 10),
},
isErr: false,
},
{
name: "container.SecurityContext.RunAsUsername set",
sc: &runtimeapi.LinuxContainerSecurityContext{
RunAsUsername: username,
},
expected: &dockercontainer.Config{
User: username,
},
isErr: false,
},
{
name: "no RunAsUser value set",
sc: &runtimeapi.LinuxContainerSecurityContext{},
expected: &dockercontainer.Config{},
isErr: false,
},
{
name: "RunAsUser value set, RunAsGroup set",
sc: &runtimeapi.LinuxContainerSecurityContext{
RunAsUser: &runtimeapi.Int64Value{Value: uid},
RunAsGroup: &runtimeapi.Int64Value{Value: gid},
},
expected: &dockercontainer.Config{
User: "123:423",
},
isErr: false,
},
{
name: "RunAsUsername value set, RunAsGroup set",
sc: &runtimeapi.LinuxContainerSecurityContext{
RunAsUsername: username,
RunAsGroup: &runtimeapi.Int64Value{Value: gid},
},
expected: &dockercontainer.Config{
User: "testuser:423",
},
isErr: false,
},
{
name: "RunAsUser/RunAsUsername not set, RunAsGroup set",
sc: &runtimeapi.LinuxContainerSecurityContext{
RunAsGroup: &runtimeapi.Int64Value{Value: gid},
},
isErr: true,
},
}
for _, tc := range cases {
dockerCfg := &dockercontainer.Config{}
err := modifyContainerConfig(tc.sc, dockerCfg)
if tc.isErr {
assert.NotNil(t, err)
} else {
assert.Nil(t, err)
assert.Equal(t, tc.expected, dockerCfg, "[Test case %q]", tc.name)
}
}
}
func TestModifyHostConfig(t *testing.T) {
setNetworkHC := &dockercontainer.HostConfig{}
setPrivSC := &runtimeapi.LinuxContainerSecurityContext{}
setPrivSC.Privileged = true
setPrivHC := &dockercontainer.HostConfig{
Privileged: true,
}
setCapsHC := &dockercontainer.HostConfig{
CapAdd: []string{"addCapA", "addCapB"},
CapDrop: []string{"dropCapA", "dropCapB"},
}
setSELinuxHC := &dockercontainer.HostConfig{
SecurityOpt: []string{
fmt.Sprintf("%s:%s", selinuxLabelUser('='), "user"),
fmt.Sprintf("%s:%s", selinuxLabelRole('='), "role"),
fmt.Sprintf("%s:%s", selinuxLabelType('='), "type"),
fmt.Sprintf("%s:%s", selinuxLabelLevel('='), "level"),
},
}
cases := []struct {
name string
sc *runtimeapi.LinuxContainerSecurityContext
expected *dockercontainer.HostConfig
}{
{
name: "fully set container.SecurityContext",
sc: fullValidSecurityContext(),
expected: fullValidHostConfig(),
},
{
name: "empty container.SecurityContext",
sc: &runtimeapi.LinuxContainerSecurityContext{},
expected: setNetworkHC,
},
{
name: "container.SecurityContext.Privileged",
sc: setPrivSC,
expected: setPrivHC,
},
{
name: "container.SecurityContext.Capabilities",
sc: &runtimeapi.LinuxContainerSecurityContext{
Capabilities: inputCapabilities(),
},
expected: setCapsHC,
},
{
name: "container.SecurityContext.SELinuxOptions",
sc: &runtimeapi.LinuxContainerSecurityContext{
SelinuxOptions: inputSELinuxOptions(),
},
expected: setSELinuxHC,
},
}
for _, tc := range cases {
dockerCfg := &dockercontainer.HostConfig{}
modifyHostConfig(tc.sc, dockerCfg, '=')
assert.Equal(t, tc.expected, dockerCfg, "[Test case %q]", tc.name)
}
}
func TestModifyHostConfigWithGroups(t *testing.T) {
supplementalGroupsSC := &runtimeapi.LinuxContainerSecurityContext{}
supplementalGroupsSC.SupplementalGroups = []int64{2222}
supplementalGroupHC := &dockercontainer.HostConfig{}
supplementalGroupHC.GroupAdd = []string{"2222"}
testCases := []struct {
name string
securityContext *runtimeapi.LinuxContainerSecurityContext
expected *dockercontainer.HostConfig
}{
{
name: "nil",
securityContext: nil,
expected: &dockercontainer.HostConfig{},
},
{
name: "SupplementalGroup",
securityContext: supplementalGroupsSC,
expected: supplementalGroupHC,
},
}
for _, tc := range testCases {
dockerCfg := &dockercontainer.HostConfig{}
modifyHostConfig(tc.securityContext, dockerCfg, '=')
assert.Equal(t, tc.expected, dockerCfg, "[Test case %q]", tc.name)
}
}
func TestModifyHostConfigAndNamespaceOptionsForContainer(t *testing.T) {
priv := true
sandboxID := "sandbox"
sandboxNSMode := fmt.Sprintf("container:%v", sandboxID)
setPrivSC := &runtimeapi.LinuxContainerSecurityContext{}
setPrivSC.Privileged = priv
setPrivHC := &dockercontainer.HostConfig{
Privileged: true,
IpcMode: dockercontainer.IpcMode(sandboxNSMode),
NetworkMode: dockercontainer.NetworkMode(sandboxNSMode),
PidMode: dockercontainer.PidMode(sandboxNSMode),
}
setCapsHC := &dockercontainer.HostConfig{
CapAdd: []string{"addCapA", "addCapB"},
CapDrop: []string{"dropCapA", "dropCapB"},
IpcMode: dockercontainer.IpcMode(sandboxNSMode),
NetworkMode: dockercontainer.NetworkMode(sandboxNSMode),
PidMode: dockercontainer.PidMode(sandboxNSMode),
}
setSELinuxHC := &dockercontainer.HostConfig{
SecurityOpt: []string{
fmt.Sprintf("%s:%s", selinuxLabelUser('='), "user"),
fmt.Sprintf("%s:%s", selinuxLabelRole('='), "role"),
fmt.Sprintf("%s:%s", selinuxLabelType('='), "type"),
fmt.Sprintf("%s:%s", selinuxLabelLevel('='), "level"),
},
IpcMode: dockercontainer.IpcMode(sandboxNSMode),
NetworkMode: dockercontainer.NetworkMode(sandboxNSMode),
PidMode: dockercontainer.PidMode(sandboxNSMode),
}
cases := []struct {
name string
sc *runtimeapi.LinuxContainerSecurityContext
expected *dockercontainer.HostConfig
}{
{
name: "container.SecurityContext.Privileged",
sc: setPrivSC,
expected: setPrivHC,
},
{
name: "container.SecurityContext.Capabilities",
sc: &runtimeapi.LinuxContainerSecurityContext{
Capabilities: inputCapabilities(),
},
expected: setCapsHC,
},
{
name: "container.SecurityContext.SELinuxOptions",
sc: &runtimeapi.LinuxContainerSecurityContext{
SelinuxOptions: inputSELinuxOptions(),
},
expected: setSELinuxHC,
},
}
for _, tc := range cases {
dockerCfg := &dockercontainer.HostConfig{}
modifyHostConfig(tc.sc, dockerCfg, '=')
modifyContainerNamespaceOptions(tc.sc.GetNamespaceOptions(), sandboxID, dockerCfg)
assert.Equal(t, tc.expected, dockerCfg, "[Test case %q]", tc.name)
}
}
func TestModifySandboxNamespaceOptions(t *testing.T) {
cases := []struct {
name string
nsOpt *runtimeapi.NamespaceOption
expected *dockercontainer.HostConfig
}{
{
name: "Host Network NamespaceOption",
nsOpt: &runtimeapi.NamespaceOption{
Network: runtimeapi.NamespaceMode_NODE,
},
expected: &dockercontainer.HostConfig{
NetworkMode: namespaceModeHost,
},
},
{
name: "Host IPC NamespaceOption",
nsOpt: &runtimeapi.NamespaceOption{
Ipc: runtimeapi.NamespaceMode_NODE,
},
expected: &dockercontainer.HostConfig{
IpcMode: namespaceModeHost,
NetworkMode: "default",
},
},
{
name: "Host PID NamespaceOption",
nsOpt: &runtimeapi.NamespaceOption{
Pid: runtimeapi.NamespaceMode_NODE,
},
expected: &dockercontainer.HostConfig{
PidMode: namespaceModeHost,
NetworkMode: "default",
},
},
}
for _, tc := range cases {
dockerCfg := &dockercontainer.HostConfig{}
modifySandboxNamespaceOptions(tc.nsOpt, dockerCfg, nil)
assert.Equal(t, tc.expected, dockerCfg, "[Test case %q]", tc.name)
}
}
func TestModifyContainerNamespaceOptions(t *testing.T) {
sandboxID := "sandbox"
sandboxNSMode := fmt.Sprintf("container:%v", sandboxID)
cases := []struct {
name string
nsOpt *runtimeapi.NamespaceOption
expected *dockercontainer.HostConfig
}{
{
name: "Host Network NamespaceOption",
nsOpt: &runtimeapi.NamespaceOption{
Network: runtimeapi.NamespaceMode_NODE,
},
expected: &dockercontainer.HostConfig{
NetworkMode: dockercontainer.NetworkMode(sandboxNSMode),
IpcMode: dockercontainer.IpcMode(sandboxNSMode),
UTSMode: namespaceModeHost,
PidMode: dockercontainer.PidMode(sandboxNSMode),
},
},
{
name: "Host IPC NamespaceOption",
nsOpt: &runtimeapi.NamespaceOption{
Ipc: runtimeapi.NamespaceMode_NODE,
},
expected: &dockercontainer.HostConfig{
NetworkMode: dockercontainer.NetworkMode(sandboxNSMode),
IpcMode: dockercontainer.IpcMode(sandboxNSMode),
PidMode: dockercontainer.PidMode(sandboxNSMode),
},
},
{
name: "Host PID NamespaceOption",
nsOpt: &runtimeapi.NamespaceOption{
Pid: runtimeapi.NamespaceMode_NODE,
},
expected: &dockercontainer.HostConfig{
NetworkMode: dockercontainer.NetworkMode(sandboxNSMode),
IpcMode: dockercontainer.IpcMode(sandboxNSMode),
PidMode: namespaceModeHost,
},
},
}
for _, tc := range cases {
dockerCfg := &dockercontainer.HostConfig{}
modifyContainerNamespaceOptions(tc.nsOpt, sandboxID, dockerCfg)
assert.Equal(t, tc.expected, dockerCfg, "[Test case %q]", tc.name)
}
}
func TestModifyContainerNamespacePIDOverride(t *testing.T) {
cases := []struct {
name string
disable bool
version *semver.Version
input, expected dockercontainer.PidMode
}{
{
name: "mode:CONTAINER docker:NEW flag:UNSET",
disable: true,
version: &semver.Version{Major: 1, Minor: 26},
input: "",
expected: "",
},
{
name: "mode:CONTAINER docker:NEW flag:SET",
disable: false,
version: &semver.Version{Major: 1, Minor: 26},
input: "",
expected: "container:sandbox",
},
{
name: "mode:CONTAINER docker:OLD flag:UNSET",
disable: true,
version: &semver.Version{Major: 1, Minor: 25},
input: "",
expected: "",
},
{
name: "mode:CONTAINER docker:OLD flag:SET",
disable: false,
version: &semver.Version{Major: 1, Minor: 25},
input: "",
expected: "",
},
{
name: "mode:HOST docker:NEW flag:UNSET",
disable: true,
version: &semver.Version{Major: 1, Minor: 26},
input: "host",
expected: "host",
},
{
name: "mode:HOST docker:NEW flag:SET",
disable: false,
version: &semver.Version{Major: 1, Minor: 26},
input: "host",
expected: "host",
},
{
name: "mode:HOST docker:OLD flag:UNSET",
disable: true,
version: &semver.Version{Major: 1, Minor: 25},
input: "host",
expected: "host",
},
{
name: "mode:HOST docker:OLD flag:SET",
disable: false,
version: &semver.Version{Major: 1, Minor: 25},
input: "host",
expected: "host",
},
{
name: "mode:POD docker:NEW flag:UNSET",
disable: true,
version: &semver.Version{Major: 1, Minor: 26},
input: "container:sandbox",
expected: "container:sandbox",
},
{
name: "mode:POD docker:NEW flag:SET",
disable: false,
version: &semver.Version{Major: 1, Minor: 26},
input: "container:sandbox",
expected: "container:sandbox",
},
{
name: "mode:POD docker:OLD flag:UNSET",
disable: true,
version: &semver.Version{Major: 1, Minor: 25},
input: "container:sandbox",
expected: "",
},
{
name: "mode:POD docker:OLD flag:SET",
disable: false,
version: &semver.Version{Major: 1, Minor: 25},
input: "container:sandbox",
expected: "",
},
}
for _, tc := range cases {
dockerCfg := &dockercontainer.HostConfig{PidMode: tc.input}
modifyContainerPIDNamespaceOverrides(tc.disable, tc.version, dockerCfg, "sandbox")
assert.Equal(t, tc.expected, dockerCfg.PidMode, "[Test case %q]", tc.name)
}
}
func fullValidSecurityContext() *runtimeapi.LinuxContainerSecurityContext {
return &runtimeapi.LinuxContainerSecurityContext{
Privileged: true,
Capabilities: inputCapabilities(),
SelinuxOptions: inputSELinuxOptions(),
}
}
func inputCapabilities() *runtimeapi.Capability {
return &runtimeapi.Capability{
AddCapabilities: []string{"addCapA", "addCapB"},
DropCapabilities: []string{"dropCapA", "dropCapB"},
}
}
func inputSELinuxOptions() *runtimeapi.SELinuxOption {
user := "user"
role := "role"
stype := "type"
level := "level"
return &runtimeapi.SELinuxOption{
User: user,
Role: role,
Type: stype,
Level: level,
}
}
func fullValidHostConfig() *dockercontainer.HostConfig {
return &dockercontainer.HostConfig{
Privileged: true,
CapAdd: []string{"addCapA", "addCapB"},
CapDrop: []string{"dropCapA", "dropCapB"},
SecurityOpt: []string{
fmt.Sprintf("%s:%s", selinuxLabelUser('='), "user"),
fmt.Sprintf("%s:%s", selinuxLabelRole('='), "role"),
fmt.Sprintf("%s:%s", selinuxLabelType('='), "type"),
fmt.Sprintf("%s:%s", selinuxLabelLevel('='), "level"),
},
}
}

View File

@ -1,86 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"fmt"
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
)
// selinuxLabelUser returns the fragment of a Docker security opt that
// describes the SELinux user. Note that strictly speaking this is not
// actually the name of the security opt, but a fragment of the whole key-
// value pair necessary to set the opt.
func selinuxLabelUser(separator rune) string {
return fmt.Sprintf("label%cuser", separator)
}
// selinuxLabelRole returns the fragment of a Docker security opt that
// describes the SELinux role. Note that strictly speaking this is not
// actually the name of the security opt, but a fragment of the whole key-
// value pair necessary to set the opt.
func selinuxLabelRole(separator rune) string {
return fmt.Sprintf("label%crole", separator)
}
// selinuxLabelType returns the fragment of a Docker security opt that
// describes the SELinux type. Note that strictly speaking this is not
// actually the name of the security opt, but a fragment of the whole key-
// value pair necessary to set the opt.
func selinuxLabelType(separator rune) string {
return fmt.Sprintf("label%ctype", separator)
}
// selinuxLabelLevel returns the fragment of a Docker security opt that
// describes the SELinux level. Note that strictly speaking this is not
// actually the name of the security opt, but a fragment of the whole key-
// value pair necessary to set the opt.
func selinuxLabelLevel(separator rune) string {
return fmt.Sprintf("label%clevel", separator)
}
// addSELinuxOptions adds SELinux options to config using the given
// separator.
func addSELinuxOptions(config []string, selinuxOpts *runtimeapi.SELinuxOption, separator rune) []string {
// Note, strictly speaking, we are actually mutating the values of these
// keys, rather than formatting name and value into a string. Docker re-
// uses the same option name multiple times (it's just 'label') with
// different values which are themselves key-value pairs. For example,
// the SELinux type is represented by the security opt:
//
// label<separator>type:<selinux_type>
//
// In Docker API versions before 1.23, the separator was the `:` rune; in
// API version 1.23 it changed to the `=` rune.
config = modifySecurityOption(config, selinuxLabelUser(separator), selinuxOpts.User)
config = modifySecurityOption(config, selinuxLabelRole(separator), selinuxOpts.Role)
config = modifySecurityOption(config, selinuxLabelType(separator), selinuxOpts.Type)
config = modifySecurityOption(config, selinuxLabelLevel(separator), selinuxOpts.Level)
return config
}
// modifySecurityOption adds the security option of name to the config array
// with value in the form of name:value.
func modifySecurityOption(config []string, name, value string) []string {
if len(value) > 0 {
config = append(config, fmt.Sprintf("%s:%s", name, value))
}
return config
}

View File

@ -1,54 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package dockershim
import (
"reflect"
"testing"
)
func TestModifySecurityOptions(t *testing.T) {
testCases := []struct {
name string
config []string
optName string
optVal string
expected []string
}{
{
name: "Empty val",
config: []string{"a:b", "c:d"},
optName: "optA",
optVal: "",
expected: []string{"a:b", "c:d"},
},
{
name: "Valid",
config: []string{"a:b", "c:d"},
optName: "e",
optVal: "f",
expected: []string{"a:b", "c:d", "e:f"},
},
}
for _, tc := range testCases {
actual := modifySecurityOption(tc.config, tc.optName, tc.optVal)
if !reflect.DeepEqual(tc.expected, actual) {
t.Errorf("Failed to apply options correctly for tc: %s. Expected: %v but got %v", tc.name, tc.expected, actual)
}
}
}