mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-06-13 18:43:34 +00:00
vendor updates
This commit is contained in:
88
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD
generated
vendored
88
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/BUILD
generated
vendored
@ -1,10 +1,4 @@
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
load(
|
||||
"@io_bazel_rules_go//go:def.bzl",
|
||||
"go_library",
|
||||
"go_test",
|
||||
)
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
@ -14,24 +8,68 @@ go_library(
|
||||
"docker_checkpoint.go",
|
||||
"docker_container.go",
|
||||
"docker_image.go",
|
||||
"docker_image_unsupported.go",
|
||||
"docker_legacy_service.go",
|
||||
"docker_logs.go",
|
||||
"docker_sandbox.go",
|
||||
"docker_service.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"docker_streaming.go",
|
||||
"exec.go",
|
||||
"helpers.go",
|
||||
"helpers_unsupported.go",
|
||||
"naming.go",
|
||||
"security_context.go",
|
||||
"selinux_util.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"docker_image_unsupported.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"docker_image_unsupported.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"docker_image_unsupported.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"docker_image_unsupported.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"docker_image_linux.go",
|
||||
"docker_stats_linux.go",
|
||||
"helpers_linux.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"docker_image_unsupported.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"docker_image_unsupported.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"docker_image_unsupported.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"docker_image_unsupported.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"docker_image_unsupported.go",
|
||||
"docker_stats_unsupported.go",
|
||||
"helpers_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"docker_image_windows.go",
|
||||
"docker_stats_windows.go",
|
||||
"helpers_windows.go",
|
||||
@ -39,16 +77,17 @@ go_library(
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//pkg/credentialprovider:go_default_library",
|
||||
"//pkg/kubelet/apis/cri:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
"//pkg/kubelet/apis/kubeletconfig:go_default_library",
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/dockershim/cm:go_default_library",
|
||||
"//pkg/kubelet/dockershim/libdocker:go_default_library",
|
||||
"//pkg/kubelet/dockershim/metrics:go_default_library",
|
||||
"//pkg/kubelet/kuberuntime:go_default_library",
|
||||
"//pkg/kubelet/leaky:go_default_library",
|
||||
"//pkg/kubelet/network:go_default_library",
|
||||
"//pkg/kubelet/network/cni:go_default_library",
|
||||
@ -73,12 +112,20 @@ go_library(
|
||||
"//vendor/github.com/docker/docker/pkg/jsonmessage:go_default_library",
|
||||
"//vendor/github.com/docker/go-connections/nat:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/k8s.io/api/core/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/types:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/errors:go_default_library",
|
||||
"//vendor/k8s.io/client-go/tools/remotecommand:go_default_library",
|
||||
],
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"//pkg/kubelet/apis:go_default_library",
|
||||
"//pkg/kubelet/winstats:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
||||
go_test(
|
||||
@ -95,17 +142,14 @@ go_test(
|
||||
"security_context_test.go",
|
||||
"selinux_util_test.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"helpers_linux_test.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
data = [
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
"//pkg/kubelet/container:go_default_library",
|
||||
"//pkg/kubelet/container/testing:go_default_library",
|
||||
"//pkg/kubelet/dockershim/libdocker:go_default_library",
|
||||
@ -123,6 +167,7 @@ go_test(
|
||||
"//vendor/github.com/golang/mock/gomock:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/require:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/clock:go_default_library",
|
||||
],
|
||||
)
|
||||
@ -145,4 +190,5 @@ filegroup(
|
||||
"//pkg/kubelet/dockershim/testing:all-srcs",
|
||||
],
|
||||
tags = ["automanaged"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
69
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD
generated
vendored
69
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/BUILD
generated
vendored
@ -9,22 +9,59 @@ go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"container_manager.go",
|
||||
"container_manager_unsupported.go",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"container_manager_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"container_manager_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"container_manager_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"container_manager_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"container_manager_linux.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows_amd64": [
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"container_manager_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"container_manager_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"container_manager_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"container_manager_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"container_manager_unsupported.go",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"container_manager_windows.go",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/cm",
|
||||
deps = [
|
||||
"//pkg/kubelet/dockershim/libdocker:go_default_library",
|
||||
] + select({
|
||||
"@io_bazel_rules_go//go/platform:linux_amd64": [
|
||||
deps = select({
|
||||
"@io_bazel_rules_go//go/platform:android": [
|
||||
"//pkg/kubelet/dockershim/libdocker:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:darwin": [
|
||||
"//pkg/kubelet/dockershim/libdocker:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:dragonfly": [
|
||||
"//pkg/kubelet/dockershim/libdocker:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:freebsd": [
|
||||
"//pkg/kubelet/dockershim/libdocker:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:linux": [
|
||||
"//pkg/kubelet/cm:go_default_library",
|
||||
"//pkg/kubelet/dockershim/libdocker:go_default_library",
|
||||
"//pkg/kubelet/qos:go_default_library",
|
||||
"//pkg/util/version:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
@ -32,6 +69,24 @@ go_library(
|
||||
"//vendor/github.com/opencontainers/runc/libcontainer/configs:go_default_library",
|
||||
"//vendor/k8s.io/apimachinery/pkg/util/wait:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:nacl": [
|
||||
"//pkg/kubelet/dockershim/libdocker:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:netbsd": [
|
||||
"//pkg/kubelet/dockershim/libdocker:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:openbsd": [
|
||||
"//pkg/kubelet/dockershim/libdocker:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:plan9": [
|
||||
"//pkg/kubelet/dockershim/libdocker:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:solaris": [
|
||||
"//pkg/kubelet/dockershim/libdocker:go_default_library",
|
||||
],
|
||||
"@io_bazel_rules_go//go/platform:windows": [
|
||||
"//pkg/kubelet/dockershim/libdocker:go_default_library",
|
||||
],
|
||||
"//conditions:default": [],
|
||||
}),
|
||||
)
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/cm/container_manager_linux.go
generated
vendored
@ -95,7 +95,7 @@ func (m *containerManager) doWork() {
|
||||
// 1. Ensure processes run in the cgroups if m.cgroupsManager is not nil.
|
||||
// 2. Ensure processes have the OOM score applied.
|
||||
if err := kubecm.EnsureDockerInContainer(version, dockerOOMScoreAdj, m.cgroupsManager); err != nil {
|
||||
glog.Errorf("Unable to ensure the docker processes run in the desired containers")
|
||||
glog.Errorf("Unable to ensure the docker processes run in the desired containers: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/convert.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/convert.go
generated
vendored
@ -23,7 +23,7 @@ import (
|
||||
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/convert_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/convert_test.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
)
|
||||
|
||||
func TestConvertDockerStatusToRuntimeAPIState(t *testing.T) {
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/doc.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/doc.go
generated
vendored
@ -14,5 +14,5 @@ See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Docker integration using pkg/kubelet/apis/cri/v1alpha1/runtime/api.pb.go
|
||||
// Docker integration using pkg/kubelet/apis/cri/runtime/v1alpha2/api.pb.go
|
||||
package dockershim
|
||||
|
81
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container.go
generated
vendored
81
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container.go
generated
vendored
@ -27,13 +27,15 @@ import (
|
||||
dockerfilters "github.com/docker/docker/api/types/filters"
|
||||
dockerstrslice "github.com/docker/docker/api/types/strslice"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
|
||||
)
|
||||
|
||||
// ListContainers lists all containers matching the filter.
|
||||
func (ds *dockerService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*runtimeapi.Container, error) {
|
||||
func (ds *dockerService) ListContainers(_ context.Context, r *runtimeapi.ListContainersRequest) (*runtimeapi.ListContainersResponse, error) {
|
||||
filter := r.GetFilter()
|
||||
opts := dockertypes.ContainerListOptions{All: true}
|
||||
|
||||
opts.Filters = dockerfilters.NewArgs()
|
||||
@ -75,19 +77,24 @@ func (ds *dockerService) ListContainers(filter *runtimeapi.ContainerFilter) ([]*
|
||||
|
||||
result = append(result, converted)
|
||||
}
|
||||
return result, nil
|
||||
|
||||
return &runtimeapi.ListContainersResponse{Containers: result}, nil
|
||||
}
|
||||
|
||||
// CreateContainer creates a new container in the given PodSandbox
|
||||
// Docker cannot store the log to an arbitrary location (yet), so we create an
|
||||
// symlink at LogPath, linking to the actual path of the log.
|
||||
// TODO: check if the default values returned by the runtime API are ok.
|
||||
func (ds *dockerService) CreateContainer(podSandboxID string, config *runtimeapi.ContainerConfig, sandboxConfig *runtimeapi.PodSandboxConfig) (string, error) {
|
||||
func (ds *dockerService) CreateContainer(_ context.Context, r *runtimeapi.CreateContainerRequest) (*runtimeapi.CreateContainerResponse, error) {
|
||||
podSandboxID := r.PodSandboxId
|
||||
config := r.GetConfig()
|
||||
sandboxConfig := r.GetSandboxConfig()
|
||||
|
||||
if config == nil {
|
||||
return "", fmt.Errorf("container config is nil")
|
||||
return nil, fmt.Errorf("container config is nil")
|
||||
}
|
||||
if sandboxConfig == nil {
|
||||
return "", fmt.Errorf("sandbox config is nil for container %q", config.Metadata.Name)
|
||||
return nil, fmt.Errorf("sandbox config is nil for container %q", config.Metadata.Name)
|
||||
}
|
||||
|
||||
labels := makeLabels(config.GetLabels(), config.GetAnnotations())
|
||||
@ -100,9 +107,8 @@ func (ds *dockerService) CreateContainer(podSandboxID string, config *runtimeapi
|
||||
|
||||
apiVersion, err := ds.getDockerAPIVersion()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to get the docker API version: %v", err)
|
||||
return nil, fmt.Errorf("unable to get the docker API version: %v", err)
|
||||
}
|
||||
securityOptSep := getSecurityOptSeparator(apiVersion)
|
||||
|
||||
image := ""
|
||||
if iSpec := config.GetImage(); iSpec != nil {
|
||||
@ -134,7 +140,7 @@ func (ds *dockerService) CreateContainer(podSandboxID string, config *runtimeapi
|
||||
}
|
||||
|
||||
hc := createConfig.HostConfig
|
||||
ds.updateCreateConfig(&createConfig, config, sandboxConfig, podSandboxID, securityOptSep, apiVersion)
|
||||
ds.updateCreateConfig(&createConfig, config, sandboxConfig, podSandboxID, securityOptSeparator, apiVersion)
|
||||
// Set devices for container.
|
||||
devices := make([]dockercontainer.DeviceMapping, len(config.Devices))
|
||||
for i, device := range config.Devices {
|
||||
@ -146,9 +152,9 @@ func (ds *dockerService) CreateContainer(podSandboxID string, config *runtimeapi
|
||||
}
|
||||
hc.Resources.Devices = devices
|
||||
|
||||
securityOpts, err := ds.getSecurityOpts(config.GetLinux().GetSecurityContext().GetSeccompProfilePath(), securityOptSep)
|
||||
securityOpts, err := ds.getSecurityOpts(config.GetLinux().GetSecurityContext().GetSeccompProfilePath(), securityOptSeparator)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate security options for container %q: %v", config.Metadata.Name, err)
|
||||
return nil, fmt.Errorf("failed to generate security options for container %q: %v", config.Metadata.Name, err)
|
||||
}
|
||||
|
||||
hc.SecurityOpt = append(hc.SecurityOpt, securityOpts...)
|
||||
@ -159,9 +165,9 @@ func (ds *dockerService) CreateContainer(podSandboxID string, config *runtimeapi
|
||||
}
|
||||
|
||||
if createResp != nil {
|
||||
return createResp.ID, err
|
||||
return &runtimeapi.CreateContainerResponse{ContainerId: createResp.ID}, nil
|
||||
}
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// getContainerLogPath returns the container log path specified by kubelet and the real
|
||||
@ -230,45 +236,49 @@ func (ds *dockerService) removeContainerLogSymlink(containerID string) error {
|
||||
}
|
||||
|
||||
// StartContainer starts the container.
|
||||
func (ds *dockerService) StartContainer(containerID string) error {
|
||||
err := ds.client.StartContainer(containerID)
|
||||
func (ds *dockerService) StartContainer(_ context.Context, r *runtimeapi.StartContainerRequest) (*runtimeapi.StartContainerResponse, error) {
|
||||
err := ds.client.StartContainer(r.ContainerId)
|
||||
|
||||
// Create container log symlink for all containers (including failed ones).
|
||||
if linkError := ds.createContainerLogSymlink(containerID); linkError != nil {
|
||||
if linkError := ds.createContainerLogSymlink(r.ContainerId); linkError != nil {
|
||||
// Do not stop the container if we failed to create symlink because:
|
||||
// 1. This is not a critical failure.
|
||||
// 2. We don't have enough information to properly stop container here.
|
||||
// Kubelet will surface this error to user via an event.
|
||||
return linkError
|
||||
return nil, linkError
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
err = transformStartContainerError(err)
|
||||
return fmt.Errorf("failed to start container %q: %v", containerID, err)
|
||||
return nil, fmt.Errorf("failed to start container %q: %v", r.ContainerId, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
return &runtimeapi.StartContainerResponse{}, nil
|
||||
}
|
||||
|
||||
// StopContainer stops a running container with a grace period (i.e., timeout).
|
||||
func (ds *dockerService) StopContainer(containerID string, timeout int64) error {
|
||||
return ds.client.StopContainer(containerID, time.Duration(timeout)*time.Second)
|
||||
func (ds *dockerService) StopContainer(_ context.Context, r *runtimeapi.StopContainerRequest) (*runtimeapi.StopContainerResponse, error) {
|
||||
err := ds.client.StopContainer(r.ContainerId, time.Duration(r.Timeout)*time.Second)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.StopContainerResponse{}, nil
|
||||
}
|
||||
|
||||
// RemoveContainer removes the container.
|
||||
func (ds *dockerService) RemoveContainer(containerID string) error {
|
||||
func (ds *dockerService) RemoveContainer(_ context.Context, r *runtimeapi.RemoveContainerRequest) (*runtimeapi.RemoveContainerResponse, error) {
|
||||
// Ideally, log lifecycle should be independent of container lifecycle.
|
||||
// However, docker will remove container log after container is removed,
|
||||
// we can't prevent that now, so we also clean up the symlink here.
|
||||
err := ds.removeContainerLogSymlink(containerID)
|
||||
err := ds.removeContainerLogSymlink(r.ContainerId)
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
err = ds.client.RemoveContainer(containerID, dockertypes.ContainerRemoveOptions{RemoveVolumes: true, Force: true})
|
||||
err = ds.client.RemoveContainer(r.ContainerId, dockertypes.ContainerRemoveOptions{RemoveVolumes: true, Force: true})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove container %q: %v", containerID, err)
|
||||
return nil, fmt.Errorf("failed to remove container %q: %v", r.ContainerId, err)
|
||||
}
|
||||
return nil
|
||||
return &runtimeapi.RemoveContainerResponse{}, nil
|
||||
}
|
||||
|
||||
func getContainerTimestamps(r *dockertypes.ContainerJSON) (time.Time, time.Time, time.Time, error) {
|
||||
@ -291,7 +301,8 @@ func getContainerTimestamps(r *dockertypes.ContainerJSON) (time.Time, time.Time,
|
||||
}
|
||||
|
||||
// ContainerStatus inspects the docker container and returns the status.
|
||||
func (ds *dockerService) ContainerStatus(containerID string) (*runtimeapi.ContainerStatus, error) {
|
||||
func (ds *dockerService) ContainerStatus(_ context.Context, req *runtimeapi.ContainerStatusRequest) (*runtimeapi.ContainerStatusResponse, error) {
|
||||
containerID := req.ContainerId
|
||||
r, err := ds.client.InspectContainer(containerID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -374,7 +385,7 @@ func (ds *dockerService) ContainerStatus(containerID string) (*runtimeapi.Contai
|
||||
if len(ir.RepoTags) > 0 {
|
||||
imageName = ir.RepoTags[0]
|
||||
}
|
||||
return &runtimeapi.ContainerStatus{
|
||||
status := &runtimeapi.ContainerStatus{
|
||||
Id: r.ID,
|
||||
Metadata: metadata,
|
||||
Image: &runtimeapi.ImageSpec{Image: imageName},
|
||||
@ -390,10 +401,12 @@ func (ds *dockerService) ContainerStatus(containerID string) (*runtimeapi.Contai
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
LogPath: r.Config.Labels[containerLogPathLabelKey],
|
||||
}, nil
|
||||
}
|
||||
return &runtimeapi.ContainerStatusResponse{Status: status}, nil
|
||||
}
|
||||
|
||||
func (ds *dockerService) UpdateContainerResources(containerID string, resources *runtimeapi.LinuxContainerResources) error {
|
||||
func (ds *dockerService) UpdateContainerResources(_ context.Context, r *runtimeapi.UpdateContainerResourcesRequest) (*runtimeapi.UpdateContainerResourcesResponse, error) {
|
||||
resources := r.Linux
|
||||
updateConfig := dockercontainer.UpdateConfig{
|
||||
Resources: dockercontainer.Resources{
|
||||
CPUPeriod: resources.CpuPeriod,
|
||||
@ -405,9 +418,9 @@ func (ds *dockerService) UpdateContainerResources(containerID string, resources
|
||||
},
|
||||
}
|
||||
|
||||
err := ds.client.UpdateContainerResources(containerID, updateConfig)
|
||||
err := ds.client.UpdateContainerResources(r.ContainerId, updateConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update container %q: %v", containerID, err)
|
||||
return nil, fmt.Errorf("failed to update container %q: %v", r.ContainerId, err)
|
||||
}
|
||||
return nil
|
||||
return &runtimeapi.UpdateContainerResourcesResponse{}, nil
|
||||
}
|
||||
|
88
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container_test.go
generated
vendored
88
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_container_test.go
generated
vendored
@ -26,8 +26,9 @@ import (
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
)
|
||||
|
||||
@ -44,6 +45,10 @@ func makeContainerConfig(sConfig *runtimeapi.PodSandboxConfig, name, image strin
|
||||
}
|
||||
}
|
||||
|
||||
func getTestCTX() context.Context {
|
||||
return context.Background()
|
||||
}
|
||||
|
||||
// TestListContainers creates several containers and then list them to check
|
||||
// whether the correct metadatas, states, and labels are returned.
|
||||
func TestListContainers(t *testing.T) {
|
||||
@ -70,10 +75,12 @@ func TestListContainers(t *testing.T) {
|
||||
for i := range configs {
|
||||
// We don't care about the sandbox id; pass a bogus one.
|
||||
sandboxID := fmt.Sprintf("sandboxid%d", i)
|
||||
id, err := ds.CreateContainer(sandboxID, configs[i], sConfigs[i])
|
||||
assert.NoError(t, err)
|
||||
err = ds.StartContainer(id)
|
||||
assert.NoError(t, err)
|
||||
req := &runtimeapi.CreateContainerRequest{PodSandboxId: sandboxID, Config: configs[i], SandboxConfig: sConfigs[i]}
|
||||
createResp, err := ds.CreateContainer(getTestCTX(), req)
|
||||
require.NoError(t, err)
|
||||
id := createResp.ContainerId
|
||||
_, err = ds.StartContainer(getTestCTX(), &runtimeapi.StartContainerRequest{ContainerId: id})
|
||||
require.NoError(t, err)
|
||||
|
||||
imageRef := "" // FakeDockerClient doesn't populate ImageRef yet.
|
||||
// Prepend to the expected list because ListContainers returns
|
||||
@ -90,10 +97,10 @@ func TestListContainers(t *testing.T) {
|
||||
Annotations: configs[i].Annotations,
|
||||
}}, expected...)
|
||||
}
|
||||
containers, err := ds.ListContainers(nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, containers, len(expected))
|
||||
assert.Equal(t, expected, containers)
|
||||
listResp, err := ds.ListContainers(getTestCTX(), &runtimeapi.ListContainersRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, listResp.Containers, len(expected))
|
||||
assert.Equal(t, expected, listResp.Containers)
|
||||
}
|
||||
|
||||
// TestContainerStatus tests the basic lifecycle operations and verify that
|
||||
@ -137,31 +144,36 @@ func TestContainerStatus(t *testing.T) {
|
||||
fClock.SetTime(time.Now().Add(-1 * time.Hour))
|
||||
expected.CreatedAt = fClock.Now().UnixNano()
|
||||
const sandboxId = "sandboxid"
|
||||
id, err := ds.CreateContainer(sandboxId, config, sConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
req := &runtimeapi.CreateContainerRequest{PodSandboxId: sandboxId, Config: config, SandboxConfig: sConfig}
|
||||
createResp, err := ds.CreateContainer(getTestCTX(), req)
|
||||
require.NoError(t, err)
|
||||
id := createResp.ContainerId
|
||||
|
||||
// Check internal labels
|
||||
c, err := fDocker.InspectContainer(id)
|
||||
assert.NoError(t, err)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, c.Config.Labels[containerTypeLabelKey], containerTypeLabelContainer)
|
||||
assert.Equal(t, c.Config.Labels[sandboxIDLabelKey], sandboxId)
|
||||
|
||||
// Set the id manually since we don't know the id until it's created.
|
||||
expected.Id = id
|
||||
assert.NoError(t, err)
|
||||
status, err := ds.ContainerStatus(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expected, status)
|
||||
resp, err := ds.ContainerStatus(getTestCTX(), &runtimeapi.ContainerStatusRequest{ContainerId: id})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, resp.Status)
|
||||
|
||||
// Advance the clock and start the container.
|
||||
fClock.SetTime(time.Now())
|
||||
expected.StartedAt = fClock.Now().UnixNano()
|
||||
expected.State = runtimeapi.ContainerState_CONTAINER_RUNNING
|
||||
|
||||
err = ds.StartContainer(id)
|
||||
assert.NoError(t, err)
|
||||
status, err = ds.ContainerStatus(id)
|
||||
assert.Equal(t, expected, status)
|
||||
_, err = ds.StartContainer(getTestCTX(), &runtimeapi.StartContainerRequest{ContainerId: id})
|
||||
require.NoError(t, err)
|
||||
|
||||
resp, err = ds.ContainerStatus(getTestCTX(), &runtimeapi.ContainerStatusRequest{ContainerId: id})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, resp.Status)
|
||||
|
||||
// Advance the clock and stop the container.
|
||||
fClock.SetTime(time.Now().Add(1 * time.Hour))
|
||||
@ -169,16 +181,17 @@ func TestContainerStatus(t *testing.T) {
|
||||
expected.State = runtimeapi.ContainerState_CONTAINER_EXITED
|
||||
expected.Reason = "Completed"
|
||||
|
||||
err = ds.StopContainer(id, 0)
|
||||
_, err = ds.StopContainer(getTestCTX(), &runtimeapi.StopContainerRequest{ContainerId: id, Timeout: int64(0)})
|
||||
assert.NoError(t, err)
|
||||
status, err = ds.ContainerStatus(id)
|
||||
assert.Equal(t, expected, status)
|
||||
resp, err = ds.ContainerStatus(getTestCTX(), &runtimeapi.ContainerStatusRequest{ContainerId: id})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, resp.Status)
|
||||
|
||||
// Remove the container.
|
||||
err = ds.RemoveContainer(id)
|
||||
assert.NoError(t, err)
|
||||
status, err = ds.ContainerStatus(id)
|
||||
assert.Error(t, err, fmt.Sprintf("status of container: %+v", status))
|
||||
_, err = ds.RemoveContainer(getTestCTX(), &runtimeapi.RemoveContainerRequest{ContainerId: id})
|
||||
require.NoError(t, err)
|
||||
resp, err = ds.ContainerStatus(getTestCTX(), &runtimeapi.ContainerStatusRequest{ContainerId: id})
|
||||
assert.Error(t, err, fmt.Sprintf("status of container: %+v", resp))
|
||||
}
|
||||
|
||||
// TestContainerLogPath tests the container log creation logic.
|
||||
@ -193,7 +206,10 @@ func TestContainerLogPath(t *testing.T) {
|
||||
config.LogPath = containerLogPath
|
||||
|
||||
const sandboxId = "sandboxid"
|
||||
id, err := ds.CreateContainer(sandboxId, config, sConfig)
|
||||
req := &runtimeapi.CreateContainerRequest{PodSandboxId: sandboxId, Config: config, SandboxConfig: sConfig}
|
||||
createResp, err := ds.CreateContainer(getTestCTX(), req)
|
||||
require.NoError(t, err)
|
||||
id := createResp.ContainerId
|
||||
|
||||
// Check internal container log label
|
||||
c, err := fDocker.InspectContainer(id)
|
||||
@ -211,16 +227,16 @@ func TestContainerLogPath(t *testing.T) {
|
||||
assert.Equal(t, kubeletContainerLogPath, newname)
|
||||
return nil
|
||||
}
|
||||
err = ds.StartContainer(id)
|
||||
assert.NoError(t, err)
|
||||
_, err = ds.StartContainer(getTestCTX(), &runtimeapi.StartContainerRequest{ContainerId: id})
|
||||
require.NoError(t, err)
|
||||
|
||||
err = ds.StopContainer(id, 0)
|
||||
assert.NoError(t, err)
|
||||
_, err = ds.StopContainer(getTestCTX(), &runtimeapi.StopContainerRequest{ContainerId: id, Timeout: int64(0)})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Verify container log symlink deletion
|
||||
// symlink is also tentatively deleted at startup
|
||||
err = ds.RemoveContainer(id)
|
||||
assert.NoError(t, err)
|
||||
_, err = ds.RemoveContainer(getTestCTX(), &runtimeapi.RemoveContainerRequest{ContainerId: id})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{kubeletContainerLogPath, kubeletContainerLogPath}, fakeOS.Removes)
|
||||
}
|
||||
|
||||
@ -280,11 +296,13 @@ func TestContainerCreationConflict(t *testing.T) {
|
||||
if test.removeError != nil {
|
||||
fDocker.InjectError("remove", test.removeError)
|
||||
}
|
||||
id, err := ds.CreateContainer(sandboxId, config, sConfig)
|
||||
|
||||
req := &runtimeapi.CreateContainerRequest{PodSandboxId: sandboxId, Config: config, SandboxConfig: sConfig}
|
||||
createResp, err := ds.CreateContainer(getTestCTX(), req)
|
||||
require.Equal(t, test.expectError, err)
|
||||
assert.NoError(t, fDocker.AssertCalls(test.expectCalls))
|
||||
if err == nil {
|
||||
c, err := fDocker.InspectContainer(id)
|
||||
c, err := fDocker.InspectContainer(createResp.ContainerId)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, strings.Split(c.Name, nameDelimiter), test.expectFields)
|
||||
}
|
||||
|
53
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image.go
generated
vendored
53
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image.go
generated
vendored
@ -23,15 +23,17 @@ import (
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
dockerfilters "github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
|
||||
)
|
||||
|
||||
// This file implements methods in ImageManagerService.
|
||||
|
||||
// ListImages lists existing images.
|
||||
func (ds *dockerService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimeapi.Image, error) {
|
||||
func (ds *dockerService) ListImages(_ context.Context, r *runtimeapi.ListImagesRequest) (*runtimeapi.ListImagesResponse, error) {
|
||||
filter := r.GetFilter()
|
||||
opts := dockertypes.ImageListOptions{}
|
||||
if filter != nil {
|
||||
if filter.GetImage().GetImage() != "" {
|
||||
@ -54,24 +56,39 @@ func (ds *dockerService) ListImages(filter *runtimeapi.ImageFilter) ([]*runtimea
|
||||
}
|
||||
result = append(result, apiImage)
|
||||
}
|
||||
return result, nil
|
||||
return &runtimeapi.ListImagesResponse{Images: result}, nil
|
||||
}
|
||||
|
||||
// ImageStatus returns the status of the image, returns nil if the image doesn't present.
|
||||
func (ds *dockerService) ImageStatus(image *runtimeapi.ImageSpec) (*runtimeapi.Image, error) {
|
||||
func (ds *dockerService) ImageStatus(_ context.Context, r *runtimeapi.ImageStatusRequest) (*runtimeapi.ImageStatusResponse, error) {
|
||||
image := r.GetImage()
|
||||
|
||||
imageInspect, err := ds.client.InspectImageByRef(image.Image)
|
||||
if err != nil {
|
||||
if libdocker.IsImageNotFoundError(err) {
|
||||
return nil, nil
|
||||
return &runtimeapi.ImageStatusResponse{}, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return imageInspectToRuntimeAPIImage(imageInspect)
|
||||
|
||||
imageStatus, err := imageInspectToRuntimeAPIImage(imageInspect)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res := runtimeapi.ImageStatusResponse{Image: imageStatus}
|
||||
if r.GetVerbose() {
|
||||
res.Info = imageInspect.Config.Labels
|
||||
}
|
||||
return &res, nil
|
||||
}
|
||||
|
||||
// PullImage pulls an image with authentication config.
|
||||
func (ds *dockerService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi.AuthConfig) (string, error) {
|
||||
func (ds *dockerService) PullImage(_ context.Context, r *runtimeapi.PullImageRequest) (*runtimeapi.PullImageResponse, error) {
|
||||
image := r.GetImage()
|
||||
auth := r.GetAuth()
|
||||
authConfig := dockertypes.AuthConfig{}
|
||||
|
||||
if auth != nil {
|
||||
authConfig.Username = auth.Username
|
||||
authConfig.Password = auth.Password
|
||||
@ -84,14 +101,20 @@ func (ds *dockerService) PullImage(image *runtimeapi.ImageSpec, auth *runtimeapi
|
||||
dockertypes.ImagePullOptions{},
|
||||
)
|
||||
if err != nil {
|
||||
return "", filterHTTPError(err, image.Image)
|
||||
return nil, filterHTTPError(err, image.Image)
|
||||
}
|
||||
|
||||
return getImageRef(ds.client, image.Image)
|
||||
imageRef, err := getImageRef(ds.client, image.Image)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &runtimeapi.PullImageResponse{ImageRef: imageRef}, nil
|
||||
}
|
||||
|
||||
// RemoveImage removes the image.
|
||||
func (ds *dockerService) RemoveImage(image *runtimeapi.ImageSpec) error {
|
||||
func (ds *dockerService) RemoveImage(_ context.Context, r *runtimeapi.RemoveImageRequest) (*runtimeapi.RemoveImageResponse, error) {
|
||||
image := r.GetImage()
|
||||
// If the image has multiple tags, we need to remove all the tags
|
||||
// TODO: We assume image.Image is image ID here, which is true in the current implementation
|
||||
// of kubelet, but we should still clarify this in CRI.
|
||||
@ -99,22 +122,22 @@ func (ds *dockerService) RemoveImage(image *runtimeapi.ImageSpec) error {
|
||||
if err == nil && imageInspect != nil && len(imageInspect.RepoTags) > 1 {
|
||||
for _, tag := range imageInspect.RepoTags {
|
||||
if _, err := ds.client.RemoveImage(tag, dockertypes.ImageRemoveOptions{PruneChildren: true}); err != nil && !libdocker.IsImageNotFoundError(err) {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return &runtimeapi.RemoveImageResponse{}, nil
|
||||
}
|
||||
// dockerclient.InspectImageByID doesn't work with digest and repoTags,
|
||||
// it is safe to continue removing it since there is another check below.
|
||||
if err != nil && !libdocker.IsImageNotFoundError(err) {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
_, err = ds.client.RemoveImage(image.Image, dockertypes.ImageRemoveOptions{PruneChildren: true})
|
||||
if err != nil && !libdocker.IsImageNotFoundError(err) {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
return nil
|
||||
return &runtimeapi.RemoveImageResponse{}, nil
|
||||
}
|
||||
|
||||
// getImageRef returns the image digest if exists, or else returns the image ID.
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_linux.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_linux.go
generated
vendored
@ -21,10 +21,12 @@ package dockershim
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
)
|
||||
|
||||
// ImageFsInfo returns information of the filesystem that is used to store images.
|
||||
func (ds *dockerService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) {
|
||||
func (ds *dockerService) ImageFsInfo(_ context.Context, r *runtimeapi.ImageFsInfoRequest) (*runtimeapi.ImageFsInfoResponse, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_test.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_test.go
generated
vendored
@ -23,8 +23,9 @@ import (
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
|
||||
)
|
||||
|
||||
@ -32,7 +33,7 @@ func TestRemoveImage(t *testing.T) {
|
||||
ds, fakeDocker, _ := newTestDockerService()
|
||||
id := "1111"
|
||||
fakeDocker.InjectImageInspects([]dockertypes.ImageInspect{{ID: id, RepoTags: []string{"foo"}}})
|
||||
ds.RemoveImage(&runtimeapi.ImageSpec{Image: id})
|
||||
ds.RemoveImage(getTestCTX(), &runtimeapi.RemoveImageRequest{Image: &runtimeapi.ImageSpec{Image: id}})
|
||||
fakeDocker.AssertCallDetails(libdocker.NewCalledDetail("inspect_image", nil),
|
||||
libdocker.NewCalledDetail("remove_image", []interface{}{id, dockertypes.ImageRemoveOptions{PruneChildren: true}}))
|
||||
}
|
||||
@ -41,7 +42,7 @@ func TestRemoveImageWithMultipleTags(t *testing.T) {
|
||||
ds, fakeDocker, _ := newTestDockerService()
|
||||
id := "1111"
|
||||
fakeDocker.InjectImageInspects([]dockertypes.ImageInspect{{ID: id, RepoTags: []string{"foo", "bar"}}})
|
||||
ds.RemoveImage(&runtimeapi.ImageSpec{Image: id})
|
||||
ds.RemoveImage(getTestCTX(), &runtimeapi.RemoveImageRequest{Image: &runtimeapi.ImageSpec{Image: id}})
|
||||
fakeDocker.AssertCallDetails(libdocker.NewCalledDetail("inspect_image", nil),
|
||||
libdocker.NewCalledDetail("remove_image", []interface{}{"foo", dockertypes.ImageRemoveOptions{PruneChildren: true}}),
|
||||
libdocker.NewCalledDetail("remove_image", []interface{}{"bar", dockertypes.ImageRemoveOptions{PruneChildren: true}}))
|
||||
@ -67,8 +68,8 @@ func TestPullWithJSONError(t *testing.T) {
|
||||
}
|
||||
for key, test := range tests {
|
||||
fakeDocker.InjectError("pull", test.err)
|
||||
_, err := ds.PullImage(test.image, &runtimeapi.AuthConfig{})
|
||||
assert.Error(t, err, fmt.Sprintf("TestCase [%s]", key))
|
||||
_, err := ds.PullImage(getTestCTX(), &runtimeapi.PullImageRequest{Image: test.image, Auth: &runtimeapi.AuthConfig{}})
|
||||
require.Error(t, err, fmt.Sprintf("TestCase [%s]", key))
|
||||
assert.Contains(t, err.Error(), test.expectedError)
|
||||
}
|
||||
}
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_unsupported.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_unsupported.go
generated
vendored
@ -21,10 +21,12 @@ package dockershim
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
)
|
||||
|
||||
// ImageFsInfo returns information of the filesystem that is used to store images.
|
||||
func (ds *dockerService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) {
|
||||
func (ds *dockerService) ImageFsInfo(_ context.Context, r *runtimeapi.ImageFsInfoRequest) (*runtimeapi.ImageFsInfoResponse, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_windows.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_image_windows.go
generated
vendored
@ -21,19 +21,38 @@ package dockershim
|
||||
import (
|
||||
"time"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/winstats"
|
||||
)
|
||||
|
||||
// ImageFsInfo returns information of the filesystem that is used to store images.
|
||||
func (ds *dockerService) ImageFsInfo() ([]*runtimeapi.FilesystemUsage, error) {
|
||||
// For Windows Stats to work correctly, a file system must be provided. For now, provide a fake filesystem.
|
||||
func (ds *dockerService) ImageFsInfo(_ context.Context, _ *runtimeapi.ImageFsInfoRequest) (*runtimeapi.ImageFsInfoResponse, error) {
|
||||
info, err := ds.client.Info()
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get docker info: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
statsClient := &winstats.StatsClient{}
|
||||
fsinfo, err := statsClient.GetDirFsInfo(info.DockerRootDir)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get dir fsInfo for %q: %v", info.DockerRootDir, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filesystems := []*runtimeapi.FilesystemUsage{
|
||||
{
|
||||
Timestamp: time.Now().UnixNano(),
|
||||
UsedBytes: &runtimeapi.UInt64Value{Value: 0},
|
||||
UsedBytes: &runtimeapi.UInt64Value{Value: fsinfo.Usage},
|
||||
InodesUsed: &runtimeapi.UInt64Value{Value: 0},
|
||||
FsId: &runtimeapi.FilesystemIdentifier{
|
||||
Mountpoint: info.DockerRootDir,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return filesystems, nil
|
||||
return &runtimeapi.ImageFsInfoResponse{ImageFilesystems: filesystems}, nil
|
||||
}
|
||||
|
123
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_legacy_service.go
generated
vendored
Normal file
123
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_legacy_service.go
generated
vendored
Normal file
@ -0,0 +1,123 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/armon/circbuf"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubetypes "k8s.io/apimachinery/pkg/types"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/kuberuntime"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
|
||||
)
|
||||
|
||||
// DockerLegacyService interface embeds some legacy methods for backward compatibility.
|
||||
// This file/interface will be removed in the near future. Do not modify or add
|
||||
// more functions.
|
||||
type DockerLegacyService interface {
|
||||
// GetContainerLogs gets logs for a specific container.
|
||||
GetContainerLogs(*v1.Pod, kubecontainer.ContainerID, *v1.PodLogOptions, io.Writer, io.Writer) error
|
||||
|
||||
// IsCRISupportedLogDriver checks whether the logging driver used by docker is
|
||||
// supported by native CRI integration.
|
||||
// TODO(resouer): remove this when deprecating unsupported log driver
|
||||
IsCRISupportedLogDriver() (bool, error)
|
||||
|
||||
kuberuntime.LegacyLogProvider
|
||||
}
|
||||
|
||||
// GetContainerLogs get container logs directly from docker daemon.
|
||||
func (d *dockerService) GetContainerLogs(pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error {
|
||||
container, err := d.client.InspectContainer(containerID.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var since int64
|
||||
if logOptions.SinceSeconds != nil {
|
||||
t := metav1.Now().Add(-time.Duration(*logOptions.SinceSeconds) * time.Second)
|
||||
since = t.Unix()
|
||||
}
|
||||
if logOptions.SinceTime != nil {
|
||||
since = logOptions.SinceTime.Unix()
|
||||
}
|
||||
opts := dockertypes.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Since: strconv.FormatInt(since, 10),
|
||||
Timestamps: logOptions.Timestamps,
|
||||
Follow: logOptions.Follow,
|
||||
}
|
||||
if logOptions.TailLines != nil {
|
||||
opts.Tail = strconv.FormatInt(*logOptions.TailLines, 10)
|
||||
}
|
||||
|
||||
sopts := libdocker.StreamOptions{
|
||||
OutputStream: stdout,
|
||||
ErrorStream: stderr,
|
||||
RawTerminal: container.Config.Tty,
|
||||
}
|
||||
return d.client.Logs(containerID.ID, opts, sopts)
|
||||
}
|
||||
|
||||
// GetContainerLogTail attempts to read up to MaxContainerTerminationMessageLogLength
|
||||
// from the end of the log when docker is configured with a log driver other than json-log.
|
||||
// It reads up to MaxContainerTerminationMessageLogLines lines.
|
||||
func (d *dockerService) GetContainerLogTail(uid kubetypes.UID, name, namespace string, containerId kubecontainer.ContainerID) (string, error) {
|
||||
value := int64(kubecontainer.MaxContainerTerminationMessageLogLines)
|
||||
buf, _ := circbuf.NewBuffer(kubecontainer.MaxContainerTerminationMessageLogLength)
|
||||
// Although this is not a full spec pod, dockerLegacyService.GetContainerLogs() currently completely ignores its pod param
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uid,
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
err := d.GetContainerLogs(pod, containerId, &v1.PodLogOptions{TailLines: &value}, buf, buf)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// criSupportedLogDrivers are log drivers supported by native CRI integration.
|
||||
var criSupportedLogDrivers = []string{"json-file"}
|
||||
|
||||
// IsCRISupportedLogDriver checks whether the logging driver used by docker is
|
||||
// supported by native CRI integration.
|
||||
func (d *dockerService) IsCRISupportedLogDriver() (bool, error) {
|
||||
info, err := d.client.Info()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get docker info: %v", err)
|
||||
}
|
||||
for _, driver := range criSupportedLogDrivers {
|
||||
if info.LoggingDriver == driver {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
30
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_logs.go
generated
vendored
Normal file
30
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_logs.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
/*
|
||||
Copyright 2018 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package dockershim
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
)
|
||||
|
||||
// ReopenContainerLog reopens the container log file.
|
||||
func (ds *dockerService) ReopenContainerLog(_ context.Context, _ *runtimeapi.ReopenContainerLogRequest) (*runtimeapi.ReopenContainerLogResponse, error) {
|
||||
return nil, fmt.Errorf("docker does not support reopening container log files")
|
||||
}
|
170
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go
generated
vendored
170
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox.go
generated
vendored
@ -26,9 +26,10 @@ import (
|
||||
dockercontainer "github.com/docker/docker/api/types/container"
|
||||
dockerfilters "github.com/docker/docker/api/types/filters"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
|
||||
"k8s.io/kubernetes/pkg/kubelet/qos"
|
||||
@ -36,7 +37,7 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
defaultSandboxImage = "gcr.io/google_containers/pause-amd64:3.0"
|
||||
defaultSandboxImage = "k8s.gcr.io/pause-amd64:3.1"
|
||||
|
||||
// Various default sandbox resources requests/limits.
|
||||
defaultSandboxCPUshares int64 = 2
|
||||
@ -75,7 +76,9 @@ func (ds *dockerService) clearNetworkReady(podSandboxID string) {
|
||||
// For docker, PodSandbox is implemented by a container holding the network
|
||||
// namespace for the pod.
|
||||
// Note: docker doesn't use LogDirectory (yet).
|
||||
func (ds *dockerService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (id string, err error) {
|
||||
func (ds *dockerService) RunPodSandbox(ctx context.Context, r *runtimeapi.RunPodSandboxRequest) (*runtimeapi.RunPodSandboxResponse, error) {
|
||||
config := r.GetConfig()
|
||||
|
||||
// Step 1: Pull the image for the sandbox.
|
||||
image := defaultSandboxImage
|
||||
podSandboxImage := ds.podSandboxImage
|
||||
@ -87,13 +90,13 @@ func (ds *dockerService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (id
|
||||
// see: http://kubernetes.io/docs/user-guide/images/#configuring-nodes-to-authenticate-to-a-private-repository
|
||||
// Only pull sandbox image when it's not present - v1.PullIfNotPresent.
|
||||
if err := ensureSandboxImageExists(ds.client, image); err != nil {
|
||||
return "", err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Step 2: Create the sandbox container.
|
||||
createConfig, err := ds.makeSandboxDockerConfig(config, image)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to make sandbox docker config for pod %q: %v", config.Metadata.Name, err)
|
||||
return nil, fmt.Errorf("failed to make sandbox docker config for pod %q: %v", config.Metadata.Name, err)
|
||||
}
|
||||
createResp, err := ds.client.CreateContainer(*createConfig)
|
||||
if err != nil {
|
||||
@ -101,8 +104,9 @@ func (ds *dockerService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (id
|
||||
}
|
||||
|
||||
if err != nil || createResp == nil {
|
||||
return "", fmt.Errorf("failed to create a sandbox for pod %q: %v", config.Metadata.Name, err)
|
||||
return nil, fmt.Errorf("failed to create a sandbox for pod %q: %v", config.Metadata.Name, err)
|
||||
}
|
||||
resp := &runtimeapi.RunPodSandboxResponse{PodSandboxId: createResp.ID}
|
||||
|
||||
ds.setNetworkReady(createResp.ID, false)
|
||||
defer func(e *error) {
|
||||
@ -115,7 +119,7 @@ func (ds *dockerService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (id
|
||||
|
||||
// Step 3: Create Sandbox Checkpoint.
|
||||
if err = ds.checkpointHandler.CreateCheckpoint(createResp.ID, constructPodSandboxCheckpoint(config)); err != nil {
|
||||
return createResp.ID, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Step 4: Start the sandbox container.
|
||||
@ -123,7 +127,7 @@ func (ds *dockerService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (id
|
||||
// startContainer failed.
|
||||
err = ds.client.StartContainer(createResp.ID)
|
||||
if err != nil {
|
||||
return createResp.ID, fmt.Errorf("failed to start sandbox container for pod %q: %v", config.Metadata.Name, err)
|
||||
return nil, fmt.Errorf("failed to start sandbox container for pod %q: %v", config.Metadata.Name, err)
|
||||
}
|
||||
|
||||
// Rewrite resolv.conf file generated by docker.
|
||||
@ -135,17 +139,17 @@ func (ds *dockerService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (id
|
||||
if dnsConfig := config.GetDnsConfig(); dnsConfig != nil {
|
||||
containerInfo, err := ds.client.InspectContainer(createResp.ID)
|
||||
if err != nil {
|
||||
return createResp.ID, fmt.Errorf("failed to inspect sandbox container for pod %q: %v", config.Metadata.Name, err)
|
||||
return nil, fmt.Errorf("failed to inspect sandbox container for pod %q: %v", config.Metadata.Name, err)
|
||||
}
|
||||
|
||||
if err := rewriteResolvFile(containerInfo.ResolvConfPath, dnsConfig.Servers, dnsConfig.Searches, dnsConfig.Options); err != nil {
|
||||
return createResp.ID, fmt.Errorf("rewrite resolv.conf failed for pod %q: %v", config.Metadata.Name, err)
|
||||
return nil, fmt.Errorf("rewrite resolv.conf failed for pod %q: %v", config.Metadata.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
// Do not invoke network plugins if in hostNetwork mode.
|
||||
if nsOptions := config.GetLinux().GetSecurityContext().GetNamespaceOptions(); nsOptions != nil && nsOptions.HostNetwork {
|
||||
return createResp.ID, nil
|
||||
if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetNetwork() == runtimeapi.NamespaceMode_NODE {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// Step 5: Setup networking for the sandbox.
|
||||
@ -163,7 +167,7 @@ func (ds *dockerService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (id
|
||||
glog.Warningf("Failed to stop sandbox container %q for pod %q: %v", createResp.ID, config.Metadata.Name, err)
|
||||
}
|
||||
}
|
||||
return createResp.ID, err
|
||||
return resp, err
|
||||
}
|
||||
|
||||
// StopPodSandbox stops the sandbox. If there are any running containers in the
|
||||
@ -171,22 +175,21 @@ func (ds *dockerService) RunPodSandbox(config *runtimeapi.PodSandboxConfig) (id
|
||||
// TODO: This function blocks sandbox teardown on networking teardown. Is it
|
||||
// better to cut our losses assuming an out of band GC routine will cleanup
|
||||
// after us?
|
||||
func (ds *dockerService) StopPodSandbox(podSandboxID string) error {
|
||||
func (ds *dockerService) StopPodSandbox(ctx context.Context, r *runtimeapi.StopPodSandboxRequest) (*runtimeapi.StopPodSandboxResponse, error) {
|
||||
var namespace, name string
|
||||
var hostNetwork bool
|
||||
var checkpointErr, statusErr error
|
||||
|
||||
// Try to retrieve sandbox information from docker daemon or sandbox checkpoint
|
||||
status, statusErr := ds.PodSandboxStatus(podSandboxID)
|
||||
podSandboxID := r.PodSandboxId
|
||||
resp := &runtimeapi.StopPodSandboxResponse{}
|
||||
|
||||
// Try to retrieve minimal sandbox information from docker daemon or sandbox checkpoint.
|
||||
inspectResult, metadata, statusErr := ds.getPodSandboxDetails(podSandboxID)
|
||||
if statusErr == nil {
|
||||
nsOpts := status.GetLinux().GetNamespaces().GetOptions()
|
||||
hostNetwork = nsOpts != nil && nsOpts.HostNetwork
|
||||
m := status.GetMetadata()
|
||||
namespace = m.Namespace
|
||||
name = m.Name
|
||||
namespace = metadata.Namespace
|
||||
name = metadata.Name
|
||||
hostNetwork = (networkNamespaceMode(inspectResult) == runtimeapi.NamespaceMode_NODE)
|
||||
} else {
|
||||
var checkpoint *PodSandboxCheckpoint
|
||||
checkpoint, checkpointErr = ds.checkpointHandler.GetCheckpoint(podSandboxID)
|
||||
checkpoint, checkpointErr := ds.checkpointHandler.GetCheckpoint(podSandboxID)
|
||||
|
||||
// Proceed if both sandbox container and checkpoint could not be found. This means that following
|
||||
// actions will only have sandbox ID and not have pod namespace and name information.
|
||||
@ -196,7 +199,7 @@ func (ds *dockerService) StopPodSandbox(podSandboxID string) error {
|
||||
glog.Warningf("Both sandbox container and checkpoint for id %q could not be found. "+
|
||||
"Proceed without further sandbox information.", podSandboxID)
|
||||
} else {
|
||||
return utilerrors.NewAggregate([]error{
|
||||
return nil, utilerrors.NewAggregate([]error{
|
||||
fmt.Errorf("failed to get checkpoint for sandbox %q: %v", podSandboxID, checkpointErr),
|
||||
fmt.Errorf("failed to get sandbox status: %v", statusErr)})
|
||||
}
|
||||
@ -237,14 +240,21 @@ func (ds *dockerService) StopPodSandbox(podSandboxID string) error {
|
||||
ds.checkpointHandler.RemoveCheckpoint(podSandboxID)
|
||||
}
|
||||
}
|
||||
return utilerrors.NewAggregate(errList)
|
||||
|
||||
if len(errList) == 0 {
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// TODO: Stop all running containers in the sandbox.
|
||||
return nil, utilerrors.NewAggregate(errList)
|
||||
}
|
||||
|
||||
// RemovePodSandbox removes the sandbox. If there are running containers in the
|
||||
// sandbox, they should be forcibly removed.
|
||||
func (ds *dockerService) RemovePodSandbox(podSandboxID string) error {
|
||||
func (ds *dockerService) RemovePodSandbox(ctx context.Context, r *runtimeapi.RemovePodSandboxRequest) (*runtimeapi.RemovePodSandboxResponse, error) {
|
||||
podSandboxID := r.PodSandboxId
|
||||
var errs []error
|
||||
|
||||
opts := dockertypes.ContainerListOptions{All: true}
|
||||
|
||||
opts.Filters = dockerfilters.NewArgs()
|
||||
@ -258,7 +268,7 @@ func (ds *dockerService) RemovePodSandbox(podSandboxID string) error {
|
||||
|
||||
// Remove all containers in the sandbox.
|
||||
for i := range containers {
|
||||
if err := ds.RemoveContainer(containers[i].ID); err != nil && !libdocker.IsContainerNotFoundError(err) {
|
||||
if _, err := ds.RemoveContainer(ctx, &runtimeapi.RemoveContainerRequest{ContainerId: containers[i].ID}); err != nil && !libdocker.IsContainerNotFoundError(err) {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
}
|
||||
@ -277,7 +287,10 @@ func (ds *dockerService) RemovePodSandbox(podSandboxID string) error {
|
||||
if err := ds.checkpointHandler.RemoveCheckpoint(podSandboxID); err != nil {
|
||||
errs = append(errs, err)
|
||||
}
|
||||
return utilerrors.NewAggregate(errs)
|
||||
if len(errs) == 0 {
|
||||
return &runtimeapi.RemovePodSandboxResponse{}, nil
|
||||
}
|
||||
return nil, utilerrors.NewAggregate(errs)
|
||||
}
|
||||
|
||||
// getIPFromPlugin interrogates the network plugin for an IP.
|
||||
@ -305,7 +318,7 @@ func (ds *dockerService) getIP(podSandboxID string, sandbox *dockertypes.Contain
|
||||
if sandbox.NetworkSettings == nil {
|
||||
return ""
|
||||
}
|
||||
if sharesHostNetwork(sandbox) {
|
||||
if networkNamespaceMode(sandbox) == runtimeapi.NamespaceMode_NODE {
|
||||
// For sandboxes using host network, the shim is not responsible for
|
||||
// reporting the IP.
|
||||
return ""
|
||||
@ -341,10 +354,26 @@ func (ds *dockerService) getIP(podSandboxID string, sandbox *dockertypes.Contain
|
||||
return ""
|
||||
}
|
||||
|
||||
// Returns the inspect container response, the sandbox metadata, and network namespace mode
|
||||
func (ds *dockerService) getPodSandboxDetails(podSandboxID string) (*dockertypes.ContainerJSON, *runtimeapi.PodSandboxMetadata, error) {
|
||||
resp, err := ds.client.InspectContainer(podSandboxID)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
metadata, err := parseSandboxName(resp.Name)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
return resp, metadata, nil
|
||||
}
|
||||
|
||||
// PodSandboxStatus returns the status of the PodSandbox.
|
||||
func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodSandboxStatus, error) {
|
||||
// Inspect the container.
|
||||
r, err := ds.client.InspectContainer(podSandboxID)
|
||||
func (ds *dockerService) PodSandboxStatus(ctx context.Context, req *runtimeapi.PodSandboxStatusRequest) (*runtimeapi.PodSandboxStatusResponse, error) {
|
||||
podSandboxID := req.PodSandboxId
|
||||
|
||||
r, metadata, err := ds.getPodSandboxDetails(podSandboxID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -368,14 +397,9 @@ func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodS
|
||||
if IP = ds.determinePodIPBySandboxID(podSandboxID); IP == "" {
|
||||
IP = ds.getIP(podSandboxID, r)
|
||||
}
|
||||
hostNetwork := sharesHostNetwork(r)
|
||||
|
||||
metadata, err := parseSandboxName(r.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
labels, annotations := extractLabels(r.Config.Labels)
|
||||
return &runtimeapi.PodSandboxStatus{
|
||||
status := &runtimeapi.PodSandboxStatus{
|
||||
Id: r.ID,
|
||||
State: state,
|
||||
CreatedAt: ct,
|
||||
@ -388,17 +412,20 @@ func (ds *dockerService) PodSandboxStatus(podSandboxID string) (*runtimeapi.PodS
|
||||
Linux: &runtimeapi.LinuxPodSandboxStatus{
|
||||
Namespaces: &runtimeapi.Namespace{
|
||||
Options: &runtimeapi.NamespaceOption{
|
||||
HostNetwork: hostNetwork,
|
||||
HostPid: sharesHostPid(r),
|
||||
HostIpc: sharesHostIpc(r),
|
||||
Network: networkNamespaceMode(r),
|
||||
Pid: pidNamespaceMode(r),
|
||||
Ipc: ipcNamespaceMode(r),
|
||||
},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
return &runtimeapi.PodSandboxStatusResponse{Status: status}, nil
|
||||
}
|
||||
|
||||
// ListPodSandbox returns a list of Sandbox.
|
||||
func (ds *dockerService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]*runtimeapi.PodSandbox, error) {
|
||||
func (ds *dockerService) ListPodSandbox(_ context.Context, r *runtimeapi.ListPodSandboxRequest) (*runtimeapi.ListPodSandboxResponse, error) {
|
||||
filter := r.GetFilter()
|
||||
|
||||
// By default, list all containers whether they are running or not.
|
||||
opts := dockertypes.ContainerListOptions{All: true}
|
||||
filterOutReadySandboxes := false
|
||||
@ -482,7 +509,7 @@ func (ds *dockerService) ListPodSandbox(filter *runtimeapi.PodSandboxFilter) ([]
|
||||
result = append(result, checkpointToRuntimeAPISandbox(id, checkpoint))
|
||||
}
|
||||
|
||||
return result, nil
|
||||
return &runtimeapi.ListPodSandboxResponse{Items: result}, nil
|
||||
}
|
||||
|
||||
// applySandboxLinuxOptions applies LinuxPodSandboxConfig to dockercontainer.HostConfig and dockercontainer.ContainerCreateConfig.
|
||||
@ -528,12 +555,6 @@ func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig,
|
||||
// TODO(random-liu): Deprecate this label once container metrics is directly got from CRI.
|
||||
labels[types.KubernetesContainerNameLabel] = sandboxContainerName
|
||||
|
||||
apiVersion, err := ds.getDockerAPIVersion()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to get the docker API version: %v", err)
|
||||
}
|
||||
securityOptSep := getSecurityOptSeparator(apiVersion)
|
||||
|
||||
hc := &dockercontainer.HostConfig{}
|
||||
createConfig := &dockertypes.ContainerCreateConfig{
|
||||
Name: makeSandboxName(c),
|
||||
@ -547,7 +568,7 @@ func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig,
|
||||
}
|
||||
|
||||
// Apply linux-specific options.
|
||||
if err := ds.applySandboxLinuxOptions(hc, c.GetLinux(), createConfig, image, securityOptSep); err != nil {
|
||||
if err := ds.applySandboxLinuxOptions(hc, c.GetLinux(), createConfig, image, securityOptSeparator); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -565,39 +586,42 @@ func (ds *dockerService) makeSandboxDockerConfig(c *runtimeapi.PodSandboxConfig,
|
||||
}
|
||||
|
||||
// Set security options.
|
||||
securityOpts, err := ds.getSecurityOpts(c.GetLinux().GetSecurityContext().GetSeccompProfilePath(), securityOptSep)
|
||||
securityOpts, err := ds.getSecurityOpts(c.GetLinux().GetSecurityContext().GetSeccompProfilePath(), securityOptSeparator)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to generate sandbox security options for sandbox %q: %v", c.Metadata.Name, err)
|
||||
}
|
||||
hc.SecurityOpt = append(hc.SecurityOpt, securityOpts...)
|
||||
|
||||
applyExperimentalCreateConfig(createConfig, c.Annotations)
|
||||
return createConfig, nil
|
||||
}
|
||||
|
||||
// sharesHostNetwork returns true if the given container is sharing the host's
|
||||
// network namespace.
|
||||
func sharesHostNetwork(container *dockertypes.ContainerJSON) bool {
|
||||
if container != nil && container.HostConfig != nil {
|
||||
return string(container.HostConfig.NetworkMode) == namespaceModeHost
|
||||
// networkNamespaceMode returns the network runtimeapi.NamespaceMode for this container.
|
||||
// Supports: POD, NODE
|
||||
func networkNamespaceMode(container *dockertypes.ContainerJSON) runtimeapi.NamespaceMode {
|
||||
if container != nil && container.HostConfig != nil && string(container.HostConfig.NetworkMode) == namespaceModeHost {
|
||||
return runtimeapi.NamespaceMode_NODE
|
||||
}
|
||||
return false
|
||||
return runtimeapi.NamespaceMode_POD
|
||||
}
|
||||
|
||||
// sharesHostPid returns true if the given container is sharing the host's pid
|
||||
// namespace.
|
||||
func sharesHostPid(container *dockertypes.ContainerJSON) bool {
|
||||
if container != nil && container.HostConfig != nil {
|
||||
return string(container.HostConfig.PidMode) == namespaceModeHost
|
||||
// pidNamespaceMode returns the PID runtimeapi.NamespaceMode for this container.
|
||||
// Supports: CONTAINER, NODE
|
||||
// TODO(verb): add support for POD PID namespace sharing
|
||||
func pidNamespaceMode(container *dockertypes.ContainerJSON) runtimeapi.NamespaceMode {
|
||||
if container != nil && container.HostConfig != nil && string(container.HostConfig.PidMode) == namespaceModeHost {
|
||||
return runtimeapi.NamespaceMode_NODE
|
||||
}
|
||||
return false
|
||||
return runtimeapi.NamespaceMode_CONTAINER
|
||||
}
|
||||
|
||||
// sharesHostIpc returns true if the given container is sharing the host's ipc
|
||||
// namespace.
|
||||
func sharesHostIpc(container *dockertypes.ContainerJSON) bool {
|
||||
if container != nil && container.HostConfig != nil {
|
||||
return string(container.HostConfig.IpcMode) == namespaceModeHost
|
||||
// ipcNamespaceMode returns the IPC runtimeapi.NamespaceMode for this container.
|
||||
// Supports: POD, NODE
|
||||
func ipcNamespaceMode(container *dockertypes.ContainerJSON) runtimeapi.NamespaceMode {
|
||||
if container != nil && container.HostConfig != nil && string(container.HostConfig.IpcMode) == namespaceModeHost {
|
||||
return runtimeapi.NamespaceMode_NODE
|
||||
}
|
||||
return false
|
||||
return runtimeapi.NamespaceMode_POD
|
||||
}
|
||||
|
||||
func constructPodSandboxCheckpoint(config *runtimeapi.PodSandboxConfig) *PodSandboxCheckpoint {
|
||||
@ -610,8 +634,8 @@ func constructPodSandboxCheckpoint(config *runtimeapi.PodSandboxConfig) *PodSand
|
||||
Protocol: &proto,
|
||||
})
|
||||
}
|
||||
if nsOptions := config.GetLinux().GetSecurityContext().GetNamespaceOptions(); nsOptions != nil {
|
||||
checkpoint.Data.HostNetwork = nsOptions.HostNetwork
|
||||
if config.GetLinux().GetSecurityContext().GetNamespaceOptions().GetNetwork() == runtimeapi.NamespaceMode_NODE {
|
||||
checkpoint.Data.HostNetwork = true
|
||||
}
|
||||
return checkpoint
|
||||
}
|
||||
|
134
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox_test.go
generated
vendored
134
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_sandbox_test.go
generated
vendored
@ -19,13 +19,15 @@ package dockershim
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
@ -69,23 +71,23 @@ func TestListSandboxes(t *testing.T) {
|
||||
state := runtimeapi.PodSandboxState_SANDBOX_READY
|
||||
var createdAt int64 = fakeClock.Now().UnixNano()
|
||||
for i := range configs {
|
||||
id, err := ds.RunPodSandbox(configs[i])
|
||||
assert.NoError(t, err)
|
||||
runResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{Config: configs[i]})
|
||||
require.NoError(t, err)
|
||||
// Prepend to the expected list because ListPodSandbox returns
|
||||
// the most recent sandbox first.
|
||||
expected = append([]*runtimeapi.PodSandbox{{
|
||||
Metadata: configs[i].Metadata,
|
||||
Id: id,
|
||||
Id: runResp.PodSandboxId,
|
||||
State: state,
|
||||
CreatedAt: createdAt,
|
||||
Labels: configs[i].Labels,
|
||||
Annotations: configs[i].Annotations,
|
||||
}}, expected...)
|
||||
}
|
||||
sandboxes, err := ds.ListPodSandbox(nil)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, sandboxes, len(expected))
|
||||
assert.Equal(t, expected, sandboxes)
|
||||
listResp, err := ds.ListPodSandbox(getTestCTX(), &runtimeapi.ListPodSandboxRequest{})
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, listResp.Items, len(expected))
|
||||
assert.Equal(t, expected, listResp.Items)
|
||||
}
|
||||
|
||||
// TestSandboxStatus tests the basic lifecycle operations and verify that
|
||||
@ -95,20 +97,23 @@ func TestSandboxStatus(t *testing.T) {
|
||||
labels := map[string]string{"label": "foobar1"}
|
||||
annotations := map[string]string{"annotation": "abc"}
|
||||
config := makeSandboxConfigWithLabelsAndAnnotations("foo", "bar", "1", 0, labels, annotations)
|
||||
|
||||
// TODO: The following variables depend on the internal
|
||||
// implementation of FakeDockerClient, and should be fixed.
|
||||
fakeIP := "2.3.4.5"
|
||||
r := rand.New(rand.NewSource(0)).Uint32()
|
||||
podIP := fmt.Sprintf("10.%d.%d.%d", byte(r>>16), byte(r>>8), byte(r))
|
||||
|
||||
state := runtimeapi.PodSandboxState_SANDBOX_READY
|
||||
ct := int64(0)
|
||||
hostNetwork := false
|
||||
expected := &runtimeapi.PodSandboxStatus{
|
||||
State: state,
|
||||
CreatedAt: ct,
|
||||
Metadata: config.Metadata,
|
||||
Network: &runtimeapi.PodSandboxNetworkStatus{Ip: fakeIP},
|
||||
Linux: &runtimeapi.LinuxPodSandboxStatus{Namespaces: &runtimeapi.Namespace{Options: &runtimeapi.NamespaceOption{HostNetwork: hostNetwork}}},
|
||||
State: state,
|
||||
CreatedAt: ct,
|
||||
Metadata: config.Metadata,
|
||||
Network: &runtimeapi.PodSandboxNetworkStatus{Ip: podIP},
|
||||
Linux: &runtimeapi.LinuxPodSandboxStatus{
|
||||
Namespaces: &runtimeapi.Namespace{
|
||||
Options: &runtimeapi.NamespaceOption{
|
||||
Pid: runtimeapi.NamespaceMode_CONTAINER,
|
||||
},
|
||||
},
|
||||
},
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
}
|
||||
@ -116,7 +121,9 @@ func TestSandboxStatus(t *testing.T) {
|
||||
// Create the sandbox.
|
||||
fClock.SetTime(time.Now())
|
||||
expected.CreatedAt = fClock.Now().UnixNano()
|
||||
id, err := ds.RunPodSandbox(config)
|
||||
runResp, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{Config: config})
|
||||
require.NoError(t, err)
|
||||
id := runResp.PodSandboxId
|
||||
|
||||
// Check internal labels
|
||||
c, err := fDocker.InspectContainer(id)
|
||||
@ -125,24 +132,25 @@ func TestSandboxStatus(t *testing.T) {
|
||||
assert.Equal(t, c.Config.Labels[types.KubernetesContainerNameLabel], sandboxContainerName)
|
||||
|
||||
expected.Id = id // ID is only known after the creation.
|
||||
status, err := ds.PodSandboxStatus(id)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expected, status)
|
||||
statusResp, err := ds.PodSandboxStatus(getTestCTX(), &runtimeapi.PodSandboxStatusRequest{PodSandboxId: id})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, statusResp.Status)
|
||||
|
||||
// Stop the sandbox.
|
||||
expected.State = runtimeapi.PodSandboxState_SANDBOX_NOTREADY
|
||||
err = ds.StopPodSandbox(id)
|
||||
assert.NoError(t, err)
|
||||
_, err = ds.StopPodSandbox(getTestCTX(), &runtimeapi.StopPodSandboxRequest{PodSandboxId: id})
|
||||
require.NoError(t, err)
|
||||
// IP not valid after sandbox stop
|
||||
expected.Network.Ip = ""
|
||||
status, err = ds.PodSandboxStatus(id)
|
||||
assert.Equal(t, expected, status)
|
||||
statusResp, err = ds.PodSandboxStatus(getTestCTX(), &runtimeapi.PodSandboxStatusRequest{PodSandboxId: id})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, statusResp.Status)
|
||||
|
||||
// Remove the container.
|
||||
err = ds.RemovePodSandbox(id)
|
||||
assert.NoError(t, err)
|
||||
status, err = ds.PodSandboxStatus(id)
|
||||
assert.Error(t, err, fmt.Sprintf("status of sandbox: %+v", status))
|
||||
_, err = ds.RemovePodSandbox(getTestCTX(), &runtimeapi.RemovePodSandboxRequest{PodSandboxId: id})
|
||||
require.NoError(t, err)
|
||||
statusResp, err = ds.PodSandboxStatus(getTestCTX(), &runtimeapi.PodSandboxStatusRequest{PodSandboxId: id})
|
||||
assert.Error(t, err, fmt.Sprintf("status of sandbox: %+v", statusResp))
|
||||
}
|
||||
|
||||
// TestSandboxStatusAfterRestart tests that retrieving sandbox status returns
|
||||
@ -151,20 +159,23 @@ func TestSandboxStatus(t *testing.T) {
|
||||
func TestSandboxStatusAfterRestart(t *testing.T) {
|
||||
ds, _, fClock := newTestDockerService()
|
||||
config := makeSandboxConfig("foo", "bar", "1", 0)
|
||||
|
||||
// TODO: The following variables depend on the internal
|
||||
// implementation of FakeDockerClient, and should be fixed.
|
||||
fakeIP := "2.3.4.5"
|
||||
r := rand.New(rand.NewSource(0)).Uint32()
|
||||
podIP := fmt.Sprintf("10.%d.%d.%d", byte(r>>16), byte(r>>8), byte(r))
|
||||
|
||||
state := runtimeapi.PodSandboxState_SANDBOX_READY
|
||||
ct := int64(0)
|
||||
hostNetwork := false
|
||||
expected := &runtimeapi.PodSandboxStatus{
|
||||
State: state,
|
||||
CreatedAt: ct,
|
||||
Metadata: config.Metadata,
|
||||
Network: &runtimeapi.PodSandboxNetworkStatus{Ip: fakeIP},
|
||||
Linux: &runtimeapi.LinuxPodSandboxStatus{Namespaces: &runtimeapi.Namespace{Options: &runtimeapi.NamespaceOption{HostNetwork: hostNetwork}}},
|
||||
State: state,
|
||||
CreatedAt: ct,
|
||||
Metadata: config.Metadata,
|
||||
Network: &runtimeapi.PodSandboxNetworkStatus{Ip: podIP},
|
||||
Linux: &runtimeapi.LinuxPodSandboxStatus{
|
||||
Namespaces: &runtimeapi.Namespace{
|
||||
Options: &runtimeapi.NamespaceOption{
|
||||
Pid: runtimeapi.NamespaceMode_CONTAINER,
|
||||
},
|
||||
},
|
||||
},
|
||||
Labels: map[string]string{},
|
||||
Annotations: map[string]string{},
|
||||
}
|
||||
@ -183,9 +194,10 @@ func TestSandboxStatusAfterRestart(t *testing.T) {
|
||||
|
||||
// Check status without RunPodSandbox() having set up networking
|
||||
expected.Id = createResp.ID // ID is only known after the creation.
|
||||
status, err := ds.PodSandboxStatus(createResp.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, expected, status)
|
||||
|
||||
statusResp, err := ds.PodSandboxStatus(getTestCTX(), &runtimeapi.PodSandboxStatusRequest{PodSandboxId: createResp.ID})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expected, statusResp.Status)
|
||||
}
|
||||
|
||||
// TestNetworkPluginInvocation checks that the right SetUpPod and TearDownPod
|
||||
@ -207,15 +219,12 @@ func TestNetworkPluginInvocation(t *testing.T) {
|
||||
|
||||
mockPlugin.EXPECT().Name().Return("mockNetworkPlugin").AnyTimes()
|
||||
setup := mockPlugin.EXPECT().SetUpPod(ns, name, cID)
|
||||
// StopPodSandbox performs a lookup on status to figure out if the sandbox
|
||||
// is running with hostnetworking, as all its given is the ID.
|
||||
mockPlugin.EXPECT().GetPodNetworkStatus(ns, name, cID)
|
||||
mockPlugin.EXPECT().TearDownPod(ns, name, cID).After(setup)
|
||||
|
||||
_, err := ds.RunPodSandbox(c)
|
||||
assert.NoError(t, err)
|
||||
err = ds.StopPodSandbox(cID.ID)
|
||||
assert.NoError(t, err)
|
||||
_, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{Config: c})
|
||||
require.NoError(t, err)
|
||||
_, err = ds.StopPodSandbox(getTestCTX(), &runtimeapi.StopPodSandboxRequest{PodSandboxId: cID.ID})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// TestHostNetworkPluginInvocation checks that *no* SetUp/TearDown calls happen
|
||||
@ -233,20 +242,21 @@ func TestHostNetworkPluginInvocation(t *testing.T) {
|
||||
map[string]string{"label": name},
|
||||
map[string]string{"annotation": ns},
|
||||
)
|
||||
hostNetwork := true
|
||||
c.Linux = &runtimeapi.LinuxPodSandboxConfig{
|
||||
SecurityContext: &runtimeapi.LinuxSandboxSecurityContext{
|
||||
NamespaceOptions: &runtimeapi.NamespaceOption{
|
||||
HostNetwork: hostNetwork,
|
||||
Network: runtimeapi.NamespaceMode_NODE,
|
||||
},
|
||||
},
|
||||
}
|
||||
cID := kubecontainer.ContainerID{Type: runtimeName, ID: libdocker.GetFakeContainerID(fmt.Sprintf("/%v", makeSandboxName(c)))}
|
||||
|
||||
// No calls to network plugin are expected
|
||||
_, err := ds.RunPodSandbox(c)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, ds.StopPodSandbox(cID.ID))
|
||||
_, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{Config: c})
|
||||
require.NoError(t, err)
|
||||
|
||||
_, err = ds.StopPodSandbox(getTestCTX(), &runtimeapi.StopPodSandboxRequest{PodSandboxId: cID.ID})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// TestSetUpPodFailure checks that the sandbox should be not ready when it
|
||||
@ -271,19 +281,19 @@ func TestSetUpPodFailure(t *testing.T) {
|
||||
mockPlugin.EXPECT().GetPodNetworkStatus(ns, name, cID).Return(&network.PodNetworkStatus{IP: net.IP("127.0.0.01")}, nil).AnyTimes()
|
||||
|
||||
t.Logf("RunPodSandbox should return error")
|
||||
_, err := ds.RunPodSandbox(c)
|
||||
_, err := ds.RunPodSandbox(getTestCTX(), &runtimeapi.RunPodSandboxRequest{Config: c})
|
||||
assert.Error(t, err)
|
||||
|
||||
t.Logf("PodSandboxStatus should be not ready")
|
||||
status, err := ds.PodSandboxStatus(cID.ID)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, status.State)
|
||||
statusResp, err := ds.PodSandboxStatus(getTestCTX(), &runtimeapi.PodSandboxStatusRequest{PodSandboxId: cID.ID})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, runtimeapi.PodSandboxState_SANDBOX_NOTREADY, statusResp.Status.State)
|
||||
|
||||
t.Logf("ListPodSandbox should also show not ready")
|
||||
sandboxes, err := ds.ListPodSandbox(nil)
|
||||
assert.NoError(t, err)
|
||||
listResp, err := ds.ListPodSandbox(getTestCTX(), &runtimeapi.ListPodSandboxRequest{})
|
||||
require.NoError(t, err)
|
||||
var sandbox *runtimeapi.PodSandbox
|
||||
for _, s := range sandboxes {
|
||||
for _, s := range listResp.Items {
|
||||
if s.Id == cID.ID {
|
||||
sandbox = s
|
||||
break
|
||||
|
169
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go
generated
vendored
169
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service.go
generated
vendored
@ -18,22 +18,17 @@ package dockershim
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"strconv"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/armon/circbuf"
|
||||
"github.com/blang/semver"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
kubetypes "k8s.io/apimachinery/pkg/types"
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/apis/kubeletconfig"
|
||||
kubecm "k8s.io/kubernetes/pkg/kubelet/cm"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
@ -61,8 +56,6 @@ const (
|
||||
|
||||
dockerNetNSFmt = "/proc/%v/ns/net"
|
||||
|
||||
defaultSeccompProfile = "unconfined"
|
||||
|
||||
// Internal docker labels used to identify whether a container is a sandbox
|
||||
// or a regular container.
|
||||
// TODO: This is not backward compatible with older containers. We will
|
||||
@ -86,6 +79,25 @@ const (
|
||||
// to kubelet behavior and system settings in addition to any API flags that may be introduced.
|
||||
)
|
||||
|
||||
// CRIService includes all methods necessary for a CRI server.
|
||||
type CRIService interface {
|
||||
runtimeapi.RuntimeServiceServer
|
||||
runtimeapi.ImageServiceServer
|
||||
Start() error
|
||||
}
|
||||
|
||||
// DockerService is an interface that embeds the new RuntimeService and
|
||||
// ImageService interfaces.
|
||||
type DockerService interface {
|
||||
CRIService
|
||||
|
||||
// For serving streaming calls.
|
||||
http.Handler
|
||||
|
||||
// For supporting legacy features.
|
||||
DockerLegacyService
|
||||
}
|
||||
|
||||
// NetworkPluginSettings is the subset of kubelet runtime args we pass
|
||||
// to the container runtime shim so it can probe for network plugins.
|
||||
// In the future we will feed these directly to a standalone container
|
||||
@ -138,7 +150,7 @@ func (p *portMappingGetter) GetPodPortMappings(containerID string) ([]*hostport.
|
||||
}
|
||||
|
||||
// dockerNetworkHost implements network.Host by wrapping the legacy host passed in by the kubelet
|
||||
// and dockerServices which implementes the rest of the network host interfaces.
|
||||
// and dockerServices which implements the rest of the network host interfaces.
|
||||
// The legacy host methods are slated for deletion.
|
||||
type dockerNetworkHost struct {
|
||||
network.LegacyHost
|
||||
@ -262,25 +274,6 @@ func NewDockerService(config *ClientConfig, podSandboxImage string, streamingCon
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
// DockerService is an interface that embeds the new RuntimeService and
|
||||
// ImageService interfaces.
|
||||
type DockerService interface {
|
||||
internalapi.RuntimeService
|
||||
internalapi.ImageManagerService
|
||||
Start() error
|
||||
// For serving streaming calls.
|
||||
http.Handler
|
||||
|
||||
// IsCRISupportedLogDriver checks whether the logging driver used by docker is
|
||||
// suppoted by native CRI integration.
|
||||
// TODO(resouer): remove this when deprecating unsupported log driver
|
||||
IsCRISupportedLogDriver() (bool, error)
|
||||
|
||||
// NewDockerLegacyService created docker legacy service when log driver is not supported.
|
||||
// TODO(resouer): remove this when deprecating unsupported log driver
|
||||
NewDockerLegacyService() DockerLegacyService
|
||||
}
|
||||
|
||||
type dockerService struct {
|
||||
client libdocker.Interface
|
||||
os kubecontainer.OSInterface
|
||||
@ -309,8 +302,10 @@ type dockerService struct {
|
||||
disableSharedPID bool
|
||||
}
|
||||
|
||||
// TODO: handle context.
|
||||
|
||||
// Version returns the runtime name, runtime version and runtime API version
|
||||
func (ds *dockerService) Version(_ string) (*runtimeapi.VersionResponse, error) {
|
||||
func (ds *dockerService) Version(_ context.Context, r *runtimeapi.VersionRequest) (*runtimeapi.VersionResponse, error) {
|
||||
v, err := ds.getDockerVersion()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -336,17 +331,20 @@ func (ds *dockerService) getDockerVersion() (*dockertypes.Version, error) {
|
||||
}
|
||||
|
||||
// UpdateRuntimeConfig updates the runtime config. Currently only handles podCIDR updates.
|
||||
func (ds *dockerService) UpdateRuntimeConfig(runtimeConfig *runtimeapi.RuntimeConfig) (err error) {
|
||||
func (ds *dockerService) UpdateRuntimeConfig(_ context.Context, r *runtimeapi.UpdateRuntimeConfigRequest) (*runtimeapi.UpdateRuntimeConfigResponse, error) {
|
||||
runtimeConfig := r.GetRuntimeConfig()
|
||||
if runtimeConfig == nil {
|
||||
return
|
||||
return &runtimeapi.UpdateRuntimeConfigResponse{}, nil
|
||||
}
|
||||
|
||||
glog.Infof("docker cri received runtime config %+v", runtimeConfig)
|
||||
if ds.network != nil && runtimeConfig.NetworkConfig.PodCidr != "" {
|
||||
event := make(map[string]interface{})
|
||||
event[network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE_DETAIL_CIDR] = runtimeConfig.NetworkConfig.PodCidr
|
||||
ds.network.Event(network.NET_PLUGIN_EVENT_POD_CIDR_CHANGE, event)
|
||||
}
|
||||
return
|
||||
|
||||
return &runtimeapi.UpdateRuntimeConfigResponse{}, nil
|
||||
}
|
||||
|
||||
// GetNetNS returns the network namespace of the given containerID. The ID
|
||||
@ -392,7 +390,7 @@ func (ds *dockerService) Start() error {
|
||||
|
||||
// Status returns the status of the runtime.
|
||||
// TODO(random-liu): Set network condition accordingly here.
|
||||
func (ds *dockerService) Status() (*runtimeapi.RuntimeStatus, error) {
|
||||
func (ds *dockerService) Status(_ context.Context, r *runtimeapi.StatusRequest) (*runtimeapi.StatusResponse, error) {
|
||||
runtimeReady := &runtimeapi.RuntimeCondition{
|
||||
Type: runtimeapi.RuntimeReady,
|
||||
Status: true,
|
||||
@ -412,7 +410,8 @@ func (ds *dockerService) Status() (*runtimeapi.RuntimeStatus, error) {
|
||||
networkReady.Reason = "NetworkPluginNotReady"
|
||||
networkReady.Message = fmt.Sprintf("docker: network plugin is not ready: %v", err)
|
||||
}
|
||||
return &runtimeapi.RuntimeStatus{Conditions: conditions}, nil
|
||||
status := &runtimeapi.RuntimeStatus{Conditions: conditions}
|
||||
return &runtimeapi.StatusResponse{Status: status}, nil
|
||||
}
|
||||
|
||||
func (ds *dockerService) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||
@ -507,103 +506,3 @@ func toAPIProtocol(protocol Protocol) v1.Protocol {
|
||||
glog.Warningf("Unknown protocol %q: defaulting to TCP", protocol)
|
||||
return v1.ProtocolTCP
|
||||
}
|
||||
|
||||
// DockerLegacyService interface embeds some legacy methods for backward compatibility.
|
||||
type DockerLegacyService interface {
|
||||
// GetContainerLogs gets logs for a specific container.
|
||||
GetContainerLogs(*v1.Pod, kubecontainer.ContainerID, *v1.PodLogOptions, io.Writer, io.Writer) error
|
||||
}
|
||||
|
||||
// dockerLegacyService implements the DockerLegacyService. We add this for non json-log driver
|
||||
// support. (See #41996)
|
||||
type dockerLegacyService struct {
|
||||
client libdocker.Interface
|
||||
}
|
||||
|
||||
// NewDockerLegacyService created docker legacy service when log driver is not supported.
|
||||
// TODO(resouer): remove this when deprecating unsupported log driver
|
||||
func (d *dockerService) NewDockerLegacyService() DockerLegacyService {
|
||||
return &dockerLegacyService{client: d.client}
|
||||
}
|
||||
|
||||
// GetContainerLogs get container logs directly from docker daemon.
|
||||
func (d *dockerLegacyService) GetContainerLogs(pod *v1.Pod, containerID kubecontainer.ContainerID, logOptions *v1.PodLogOptions, stdout, stderr io.Writer) error {
|
||||
container, err := d.client.InspectContainer(containerID.ID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var since int64
|
||||
if logOptions.SinceSeconds != nil {
|
||||
t := metav1.Now().Add(-time.Duration(*logOptions.SinceSeconds) * time.Second)
|
||||
since = t.Unix()
|
||||
}
|
||||
if logOptions.SinceTime != nil {
|
||||
since = logOptions.SinceTime.Unix()
|
||||
}
|
||||
opts := dockertypes.ContainerLogsOptions{
|
||||
ShowStdout: true,
|
||||
ShowStderr: true,
|
||||
Since: strconv.FormatInt(since, 10),
|
||||
Timestamps: logOptions.Timestamps,
|
||||
Follow: logOptions.Follow,
|
||||
}
|
||||
if logOptions.TailLines != nil {
|
||||
opts.Tail = strconv.FormatInt(*logOptions.TailLines, 10)
|
||||
}
|
||||
|
||||
sopts := libdocker.StreamOptions{
|
||||
OutputStream: stdout,
|
||||
ErrorStream: stderr,
|
||||
RawTerminal: container.Config.Tty,
|
||||
}
|
||||
return d.client.Logs(containerID.ID, opts, sopts)
|
||||
}
|
||||
|
||||
// LegacyLogProvider implements the kuberuntime.LegacyLogProvider interface
|
||||
type LegacyLogProvider struct {
|
||||
dls DockerLegacyService
|
||||
}
|
||||
|
||||
func NewLegacyLogProvider(dls DockerLegacyService) LegacyLogProvider {
|
||||
return LegacyLogProvider{dls: dls}
|
||||
}
|
||||
|
||||
// GetContainerLogTail attempts to read up to MaxContainerTerminationMessageLogLength
|
||||
// from the end of the log when docker is configured with a log driver other than json-log.
|
||||
// It reads up to MaxContainerTerminationMessageLogLines lines.
|
||||
func (l LegacyLogProvider) GetContainerLogTail(uid kubetypes.UID, name, namespace string, containerId kubecontainer.ContainerID) (string, error) {
|
||||
value := int64(kubecontainer.MaxContainerTerminationMessageLogLines)
|
||||
buf, _ := circbuf.NewBuffer(kubecontainer.MaxContainerTerminationMessageLogLength)
|
||||
// Although this is not a full spec pod, dockerLegacyService.GetContainerLogs() currently completely ignores its pod param
|
||||
pod := &v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
UID: uid,
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
}
|
||||
err := l.dls.GetContainerLogs(pod, containerId, &v1.PodLogOptions{TailLines: &value}, buf, buf)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return buf.String(), nil
|
||||
}
|
||||
|
||||
// criSupportedLogDrivers are log drivers supported by native CRI integration.
|
||||
var criSupportedLogDrivers = []string{"json-file"}
|
||||
|
||||
// IsCRISupportedLogDriver checks whether the logging driver used by docker is
|
||||
// suppoted by native CRI integration.
|
||||
func (d *dockerService) IsCRISupportedLogDriver() (bool, error) {
|
||||
info, err := d.client.Info()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get docker info: %v", err)
|
||||
}
|
||||
for _, driver := range criSupportedLogDrivers {
|
||||
if info.LoggingDriver == driver {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
19
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service_test.go
generated
vendored
19
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_service_test.go
generated
vendored
@ -18,6 +18,7 @@ package dockershim
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"math/rand"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
@ -28,7 +29,7 @@ import (
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
|
||||
"k8s.io/kubernetes/pkg/kubelet/network"
|
||||
@ -44,7 +45,7 @@ func newTestNetworkPlugin(t *testing.T) *nettest.MockNetworkPlugin {
|
||||
|
||||
func newTestDockerService() (*dockerService, *libdocker.FakeDockerClient, *clock.FakeClock) {
|
||||
fakeClock := clock.NewFakeClock(time.Time{})
|
||||
c := libdocker.NewFakeDockerClient().WithClock(fakeClock).WithVersion("1.11.2", "1.23")
|
||||
c := libdocker.NewFakeDockerClient().WithClock(fakeClock).WithVersion("1.11.2", "1.23").WithRandSource(rand.NewSource(0))
|
||||
pm := network.NewPluginManager(&network.NoopNetworkPlugin{})
|
||||
return &dockerService{
|
||||
client: c,
|
||||
@ -83,33 +84,33 @@ func TestStatus(t *testing.T) {
|
||||
}
|
||||
|
||||
// Should report ready status if version returns no error.
|
||||
status, err := ds.Status()
|
||||
assert.NoError(t, err)
|
||||
statusResp, err := ds.Status(getTestCTX(), &runtimeapi.StatusRequest{})
|
||||
require.NoError(t, err)
|
||||
assertStatus(map[string]bool{
|
||||
runtimeapi.RuntimeReady: true,
|
||||
runtimeapi.NetworkReady: true,
|
||||
}, status)
|
||||
}, statusResp.Status)
|
||||
|
||||
// Should not report ready status if version returns error.
|
||||
fDocker.InjectError("version", errors.New("test error"))
|
||||
status, err = ds.Status()
|
||||
statusResp, err = ds.Status(getTestCTX(), &runtimeapi.StatusRequest{})
|
||||
assert.NoError(t, err)
|
||||
assertStatus(map[string]bool{
|
||||
runtimeapi.RuntimeReady: false,
|
||||
runtimeapi.NetworkReady: true,
|
||||
}, status)
|
||||
}, statusResp.Status)
|
||||
|
||||
// Should not report ready status is network plugin returns error.
|
||||
mockPlugin := newTestNetworkPlugin(t)
|
||||
ds.network = network.NewPluginManager(mockPlugin)
|
||||
defer mockPlugin.Finish()
|
||||
mockPlugin.EXPECT().Status().Return(errors.New("network error"))
|
||||
status, err = ds.Status()
|
||||
statusResp, err = ds.Status(getTestCTX(), &runtimeapi.StatusRequest{})
|
||||
assert.NoError(t, err)
|
||||
assertStatus(map[string]bool{
|
||||
runtimeapi.RuntimeReady: true,
|
||||
runtimeapi.NetworkReady: false,
|
||||
}, status)
|
||||
}, statusResp.Status)
|
||||
}
|
||||
|
||||
func TestVersion(t *testing.T) {
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_linux.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_linux.go
generated
vendored
@ -20,15 +20,17 @@ package dockershim
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
)
|
||||
|
||||
// ContainerStats returns stats for a container stats request based on container id.
|
||||
func (ds *dockerService) ContainerStats(string) (*runtimeapi.ContainerStats, error) {
|
||||
func (ds *dockerService) ContainerStats(_ context.Context, r *runtimeapi.ContainerStatsRequest) (*runtimeapi.ContainerStatsResponse, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
// ListContainerStats returns stats for a list container stats request based on a filter.
|
||||
func (ds *dockerService) ListContainerStats(*runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) {
|
||||
func (ds *dockerService) ListContainerStats(_ context.Context, r *runtimeapi.ListContainerStatsRequest) (*runtimeapi.ListContainerStatsResponse, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
8
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_unsupported.go
generated
vendored
8
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_unsupported.go
generated
vendored
@ -21,15 +21,17 @@ package dockershim
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
)
|
||||
|
||||
// ContainerStats returns stats for a container stats request based on container id.
|
||||
func (ds *dockerService) ContainerStats(string) (*runtimeapi.ContainerStats, error) {
|
||||
func (ds *dockerService) ContainerStats(_ context.Context, r *runtimeapi.ContainerStatsRequest) (*runtimeapi.ContainerStatsResponse, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
// ListContainerStats returns stats for a list container stats request based on a filter.
|
||||
func (ds *dockerService) ListContainerStats(*runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) {
|
||||
func (ds *dockerService) ListContainerStats(_ context.Context, r *runtimeapi.ListContainerStatsRequest) (*runtimeapi.ListContainerStatsResponse, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
22
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_windows.go
generated
vendored
22
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_stats_windows.go
generated
vendored
@ -21,20 +21,23 @@ package dockershim
|
||||
import (
|
||||
"time"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
)
|
||||
|
||||
// ContainerStats returns stats for a container stats request based on container id.
|
||||
func (ds *dockerService) ContainerStats(containerID string) (*runtimeapi.ContainerStats, error) {
|
||||
containerStats, err := ds.getContainerStats(containerID)
|
||||
func (ds *dockerService) ContainerStats(_ context.Context, r *runtimeapi.ContainerStatsRequest) (*runtimeapi.ContainerStatsResponse, error) {
|
||||
stats, err := ds.getContainerStats(r.ContainerId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return containerStats, nil
|
||||
return &runtimeapi.ContainerStatsResponse{Stats: stats}, nil
|
||||
}
|
||||
|
||||
// ListContainerStats returns stats for a list container stats request based on a filter.
|
||||
func (ds *dockerService) ListContainerStats(containerStatsFilter *runtimeapi.ContainerStatsFilter) ([]*runtimeapi.ContainerStats, error) {
|
||||
func (ds *dockerService) ListContainerStats(ctx context.Context, r *runtimeapi.ListContainerStatsRequest) (*runtimeapi.ListContainerStatsResponse, error) {
|
||||
containerStatsFilter := r.GetFilter()
|
||||
filter := &runtimeapi.ContainerFilter{}
|
||||
|
||||
if containerStatsFilter != nil {
|
||||
@ -43,13 +46,13 @@ func (ds *dockerService) ListContainerStats(containerStatsFilter *runtimeapi.Con
|
||||
filter.LabelSelector = containerStatsFilter.LabelSelector
|
||||
}
|
||||
|
||||
containers, err := ds.ListContainers(filter)
|
||||
listResp, err := ds.ListContainers(ctx, &runtimeapi.ListContainersRequest{Filter: filter})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var stats []*runtimeapi.ContainerStats
|
||||
for _, container := range containers {
|
||||
for _, container := range listResp.Containers {
|
||||
containerStats, err := ds.getContainerStats(container.Id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -58,7 +61,7 @@ func (ds *dockerService) ListContainerStats(containerStatsFilter *runtimeapi.Con
|
||||
stats = append(stats, containerStats)
|
||||
}
|
||||
|
||||
return stats, nil
|
||||
return &runtimeapi.ListContainerStatsResponse{Stats: stats}, nil
|
||||
}
|
||||
|
||||
func (ds *dockerService) getContainerStats(containerID string) (*runtimeapi.ContainerStats, error) {
|
||||
@ -72,10 +75,11 @@ func (ds *dockerService) getContainerStats(containerID string) (*runtimeapi.Cont
|
||||
return nil, err
|
||||
}
|
||||
|
||||
status, err := ds.ContainerStatus(containerID)
|
||||
statusResp, err := ds.ContainerStatus(context.Background(), &runtimeapi.ContainerStatusRequest{ContainerId: containerID})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
status := statusResp.GetStatus()
|
||||
|
||||
dockerStats := statsJSON.Stats
|
||||
timestamp := time.Now().UnixNano()
|
||||
|
32
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go
generated
vendored
32
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go
generated
vendored
@ -26,14 +26,15 @@ import (
|
||||
"time"
|
||||
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"k8s.io/client-go/tools/remotecommand"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
|
||||
"k8s.io/kubernetes/pkg/kubelet/server/streaming"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util/ioutils"
|
||||
utilexec "k8s.io/utils/exec"
|
||||
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
|
||||
)
|
||||
@ -76,20 +77,35 @@ func (r *streamingRuntime) PortForward(podSandboxID string, port int32, stream i
|
||||
|
||||
// ExecSync executes a command in the container, and returns the stdout output.
|
||||
// If command exits with a non-zero exit code, an error is returned.
|
||||
func (ds *dockerService) ExecSync(containerID string, cmd []string, timeout time.Duration) (stdout []byte, stderr []byte, err error) {
|
||||
func (ds *dockerService) ExecSync(_ context.Context, req *runtimeapi.ExecSyncRequest) (*runtimeapi.ExecSyncResponse, error) {
|
||||
timeout := time.Duration(req.Timeout) * time.Second
|
||||
var stdoutBuffer, stderrBuffer bytes.Buffer
|
||||
err = ds.streamingRuntime.exec(containerID, cmd,
|
||||
err := ds.streamingRuntime.exec(req.ContainerId, req.Cmd,
|
||||
nil, // in
|
||||
ioutils.WriteCloserWrapper(&stdoutBuffer),
|
||||
ioutils.WriteCloserWrapper(&stderrBuffer),
|
||||
false, // tty
|
||||
nil, // resize
|
||||
timeout)
|
||||
return stdoutBuffer.Bytes(), stderrBuffer.Bytes(), err
|
||||
|
||||
var exitCode int32
|
||||
if err != nil {
|
||||
exitError, ok := err.(utilexec.ExitError)
|
||||
if !ok {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
exitCode = int32(exitError.ExitStatus())
|
||||
}
|
||||
return &runtimeapi.ExecSyncResponse{
|
||||
Stdout: stdoutBuffer.Bytes(),
|
||||
Stderr: stderrBuffer.Bytes(),
|
||||
ExitCode: exitCode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Exec prepares a streaming endpoint to execute a command in the container, and returns the address.
|
||||
func (ds *dockerService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
|
||||
func (ds *dockerService) Exec(_ context.Context, req *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
|
||||
if ds.streamingServer == nil {
|
||||
return nil, streaming.ErrorStreamingDisabled("exec")
|
||||
}
|
||||
@ -101,7 +117,7 @@ func (ds *dockerService) Exec(req *runtimeapi.ExecRequest) (*runtimeapi.ExecResp
|
||||
}
|
||||
|
||||
// Attach prepares a streaming endpoint to attach to a running container, and returns the address.
|
||||
func (ds *dockerService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
|
||||
func (ds *dockerService) Attach(_ context.Context, req *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
|
||||
if ds.streamingServer == nil {
|
||||
return nil, streaming.ErrorStreamingDisabled("attach")
|
||||
}
|
||||
@ -113,7 +129,7 @@ func (ds *dockerService) Attach(req *runtimeapi.AttachRequest) (*runtimeapi.Atta
|
||||
}
|
||||
|
||||
// PortForward prepares a streaming endpoint to forward ports from a PodSandbox, and returns the address.
|
||||
func (ds *dockerService) PortForward(req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
|
||||
func (ds *dockerService) PortForward(_ context.Context, req *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
|
||||
if ds.streamingServer == nil {
|
||||
return nil, streaming.ErrorStreamingDisabled("port forward")
|
||||
}
|
||||
|
30
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers.go
generated
vendored
30
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers.go
generated
vendored
@ -22,7 +22,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/blang/semver"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
dockercontainer "github.com/docker/docker/api/types/container"
|
||||
dockerfilters "github.com/docker/docker/api/types/filters"
|
||||
@ -31,7 +30,7 @@ import (
|
||||
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
"k8s.io/kubernetes/pkg/credentialprovider"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
|
||||
"k8s.io/kubernetes/pkg/kubelet/types"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
@ -39,12 +38,8 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
annotationPrefix = "annotation."
|
||||
|
||||
// Docker changed the API for specifying options in v1.11
|
||||
securityOptSeparatorChangeVersion = "1.23.0" // Corresponds to docker 1.11.x
|
||||
securityOptSeparatorOld = ':'
|
||||
securityOptSeparatorNew = '='
|
||||
annotationPrefix = "annotation."
|
||||
securityOptSeparator = '='
|
||||
)
|
||||
|
||||
var (
|
||||
@ -54,10 +49,6 @@ var (
|
||||
// if a container starts but the executable file is not found, runc gives a message that matches
|
||||
startRE = regexp.MustCompile(`\\\\\\\"(.*)\\\\\\\": executable file not found`)
|
||||
|
||||
// Docker changes the security option separator from ':' to '=' in the 1.23
|
||||
// API version.
|
||||
optsSeparatorChangeVersion = semver.MustParse(securityOptSeparatorChangeVersion)
|
||||
|
||||
defaultSeccompOpt = []dockerOpt{{"seccomp", "unconfined", ""}}
|
||||
)
|
||||
|
||||
@ -321,21 +312,6 @@ func transformStartContainerError(err error) error {
|
||||
return err
|
||||
}
|
||||
|
||||
// getSecurityOptSeparator returns the security option separator based on the
|
||||
// docker API version.
|
||||
// TODO: Remove this function along with the relevant code when we no longer
|
||||
// need to support docker 1.10.
|
||||
func getSecurityOptSeparator(v *semver.Version) rune {
|
||||
switch v.Compare(optsSeparatorChangeVersion) {
|
||||
case -1:
|
||||
// Current version is less than the API change version; use the old
|
||||
// separator.
|
||||
return securityOptSeparatorOld
|
||||
default:
|
||||
return securityOptSeparatorNew
|
||||
}
|
||||
}
|
||||
|
||||
// ensureSandboxImageExists pulls the sandbox image when it's not present.
|
||||
func ensureSandboxImageExists(client libdocker.Interface, image string) error {
|
||||
_, err := client.InspectImageByRef(image)
|
||||
|
11
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_linux.go
generated
vendored
11
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_linux.go
generated
vendored
@ -30,7 +30,7 @@ import (
|
||||
"github.com/blang/semver"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
dockercontainer "github.com/docker/docker/api/types/container"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
)
|
||||
|
||||
func DefaultMemorySwap() int64 {
|
||||
@ -104,8 +104,9 @@ func (ds *dockerService) updateCreateConfig(
|
||||
rOpts := lc.GetResources()
|
||||
if rOpts != nil {
|
||||
createConfig.HostConfig.Resources = dockercontainer.Resources{
|
||||
// Memory and MemorySwap are set to the same value, this prevents containers from using any swap.
|
||||
Memory: rOpts.MemoryLimitInBytes,
|
||||
MemorySwap: DefaultMemorySwap(),
|
||||
MemorySwap: rOpts.MemoryLimitInBytes,
|
||||
CPUShares: rOpts.CpuShares,
|
||||
CPUQuota: rOpts.CpuQuota,
|
||||
CPUPeriod: rOpts.CpuPeriod,
|
||||
@ -118,7 +119,7 @@ func (ds *dockerService) updateCreateConfig(
|
||||
if err := applyContainerSecurityContext(lc, podSandboxID, createConfig.Config, createConfig.HostConfig, securityOptSep); err != nil {
|
||||
return fmt.Errorf("failed to apply container security context for container %q: %v", config.Metadata.Name, err)
|
||||
}
|
||||
modifyPIDNamespaceOverrides(ds.disableSharedPID, apiVersion, createConfig.HostConfig)
|
||||
modifyContainerPIDNamespaceOverrides(ds.disableSharedPID, apiVersion, createConfig.HostConfig, podSandboxID)
|
||||
}
|
||||
|
||||
// Apply cgroupsParent derived from the sandbox config.
|
||||
@ -145,3 +146,7 @@ func getNetworkNamespace(c *dockertypes.ContainerJSON) (string, error) {
|
||||
}
|
||||
return fmt.Sprintf(dockerNetNSFmt, c.State.Pid), nil
|
||||
}
|
||||
|
||||
// applyExperimentalCreateConfig applys experimental configures from sandbox annotations.
|
||||
func applyExperimentalCreateConfig(createConfig *dockertypes.ContainerCreateConfig, annotations map[string]string) {
|
||||
}
|
||||
|
29
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_test.go
generated
vendored
29
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_test.go
generated
vendored
@ -23,13 +23,12 @@ import (
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/blang/semver"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
dockernat "github.com/docker/go-connections/nat"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker"
|
||||
"k8s.io/kubernetes/pkg/security/apparmor"
|
||||
)
|
||||
@ -129,30 +128,6 @@ func TestParsingCreationConflictError(t *testing.T) {
|
||||
require.Equal(t, matches[1], "24666ab8c814d16f986449e504ea0159468ddf8da01897144a770f66dce0e14e")
|
||||
}
|
||||
|
||||
func TestGetSecurityOptSeparator(t *testing.T) {
|
||||
for c, test := range map[string]struct {
|
||||
desc string
|
||||
version *semver.Version
|
||||
expected rune
|
||||
}{
|
||||
"older docker version": {
|
||||
version: &semver.Version{Major: 1, Minor: 22, Patch: 0},
|
||||
expected: ':',
|
||||
},
|
||||
"changed docker version": {
|
||||
version: &semver.Version{Major: 1, Minor: 23, Patch: 0},
|
||||
expected: '=',
|
||||
},
|
||||
"newer docker version": {
|
||||
version: &semver.Version{Major: 1, Minor: 24, Patch: 0},
|
||||
expected: '=',
|
||||
},
|
||||
} {
|
||||
actual := getSecurityOptSeparator(test.version)
|
||||
assert.Equal(t, test.expected, actual, c)
|
||||
}
|
||||
}
|
||||
|
||||
// writeDockerConfig will write a config file into a temporary dir, and return that dir.
|
||||
// Caller is responsible for deleting the dir and its contents.
|
||||
func writeDockerConfig(cfg string) (string, error) {
|
||||
@ -268,7 +243,7 @@ func TestMakePortsAndBindings(t *testing.T) {
|
||||
},
|
||||
},
|
||||
},
|
||||
"multipe port mappings": {
|
||||
"multiple port mappings": {
|
||||
pm: []*runtimeapi.PortMapping{
|
||||
{
|
||||
Protocol: runtimeapi.Protocol_TCP,
|
||||
|
6
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_unsupported.go
generated
vendored
6
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_unsupported.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
"github.com/blang/semver"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
"github.com/golang/glog"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
)
|
||||
|
||||
func DefaultMemorySwap() int64 {
|
||||
@ -53,3 +53,7 @@ func (ds *dockerService) determinePodIPBySandboxID(uid string) string {
|
||||
func getNetworkNamespace(c *dockertypes.ContainerJSON) (string, error) {
|
||||
return "", fmt.Errorf("unsupported platform")
|
||||
}
|
||||
|
||||
// applyExperimentalCreateConfig applys experimental configures from sandbox annotations.
|
||||
func applyExperimentalCreateConfig(createConfig *dockertypes.ContainerCreateConfig, annotations map[string]string) {
|
||||
}
|
||||
|
49
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_windows.go
generated
vendored
49
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/helpers_windows.go
generated
vendored
@ -26,7 +26,9 @@ import (
|
||||
dockercontainer "github.com/docker/docker/api/types/container"
|
||||
dockerfilters "github.com/docker/docker/api/types/filters"
|
||||
"github.com/golang/glog"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
|
||||
kubeletapis "k8s.io/kubernetes/pkg/kubelet/apis"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
)
|
||||
|
||||
func DefaultMemorySwap() int64 {
|
||||
@ -40,6 +42,17 @@ func (ds *dockerService) getSecurityOpts(seccompProfile string, separator rune)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// applyExperimentalCreateConfig applys experimental configures from sandbox annotations.
|
||||
func applyExperimentalCreateConfig(createConfig *dockertypes.ContainerCreateConfig, annotations map[string]string) {
|
||||
if kubeletapis.ShouldIsolatedByHyperV(annotations) {
|
||||
createConfig.HostConfig.Isolation = kubeletapis.HypervIsolationValue
|
||||
|
||||
if networkMode := os.Getenv("CONTAINER_NETWORK"); networkMode == "" {
|
||||
createConfig.HostConfig.NetworkMode = dockercontainer.NetworkMode("none")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (ds *dockerService) updateCreateConfig(
|
||||
createConfig *dockertypes.ContainerCreateConfig,
|
||||
config *runtimeapi.ContainerConfig,
|
||||
@ -47,11 +60,26 @@ func (ds *dockerService) updateCreateConfig(
|
||||
podSandboxID string, securityOptSep rune, apiVersion *semver.Version) error {
|
||||
if networkMode := os.Getenv("CONTAINER_NETWORK"); networkMode != "" {
|
||||
createConfig.HostConfig.NetworkMode = dockercontainer.NetworkMode(networkMode)
|
||||
} else {
|
||||
} else if !kubeletapis.ShouldIsolatedByHyperV(sandboxConfig.Annotations) {
|
||||
// Todo: Refactor this call in future for calling methods directly in security_context.go
|
||||
modifyHostNetworkOptionForContainer(false, podSandboxID, createConfig.HostConfig)
|
||||
modifyHostOptionsForContainer(nil, podSandboxID, createConfig.HostConfig)
|
||||
}
|
||||
|
||||
// Apply Windows-specific options if applicable.
|
||||
if wc := config.GetWindows(); wc != nil {
|
||||
rOpts := wc.GetResources()
|
||||
if rOpts != nil {
|
||||
createConfig.HostConfig.Resources = dockercontainer.Resources{
|
||||
Memory: rOpts.MemoryLimitInBytes,
|
||||
CPUShares: rOpts.CpuShares,
|
||||
CPUCount: rOpts.CpuCount,
|
||||
CPUPercent: rOpts.CpuMaximum,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
applyExperimentalCreateConfig(createConfig, sandboxConfig.Annotations)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -82,13 +110,22 @@ func (ds *dockerService) determinePodIPBySandboxID(sandboxID string) string {
|
||||
// Windows version < Windows Server 2016 is Not Supported
|
||||
|
||||
// Sandbox support in Windows mandates CNI Plugin.
|
||||
// Presense of CONTAINER_NETWORK flag is considered as non-Sandbox cases here
|
||||
// Presence of CONTAINER_NETWORK flag is considered as non-Sandbox cases here
|
||||
|
||||
// Todo: Add a kernel version check for more validation
|
||||
|
||||
if networkMode := os.Getenv("CONTAINER_NETWORK"); networkMode == "" {
|
||||
// Do not return any IP, so that we would continue and get the IP of the Sandbox
|
||||
ds.getIP(sandboxID, r)
|
||||
if r.HostConfig.Isolation == kubeletapis.HypervIsolationValue {
|
||||
// Hyper-V only supports one container per Pod yet and the container will have a different
|
||||
// IP address from sandbox. Return the first non-sandbox container IP as POD IP.
|
||||
// TODO(feiskyer): remove this workaround after Hyper-V supports multiple containers per Pod.
|
||||
if containerIP := ds.getIP(c.ID, r); containerIP != "" {
|
||||
return containerIP
|
||||
}
|
||||
} else {
|
||||
// Do not return any IP, so that we would continue and get the IP of the Sandbox
|
||||
ds.getIP(sandboxID, r)
|
||||
}
|
||||
} else {
|
||||
// On Windows, every container that is created in a Sandbox, needs to invoke CNI plugin again for adding the Network,
|
||||
// with the shared container name as NetNS info,
|
||||
|
3
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/BUILD
generated
vendored
3
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/BUILD
generated
vendored
@ -12,8 +12,7 @@ go_test(
|
||||
"helpers_test.go",
|
||||
"kube_docker_client_test.go",
|
||||
],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker",
|
||||
library = ":go_default_library",
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//vendor/github.com/docker/docker/api/types:go_default_library",
|
||||
"//vendor/github.com/stretchr/testify/assert:go_default_library",
|
||||
|
35
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/client.go
generated
vendored
35
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/client.go
generated
vendored
@ -17,7 +17,6 @@ limitations under the License.
|
||||
package libdocker
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
@ -29,18 +28,14 @@ import (
|
||||
|
||||
const (
|
||||
// https://docs.docker.com/engine/reference/api/docker_remote_api/
|
||||
// docker version should be at least 1.10.x
|
||||
MinimumDockerAPIVersion = "1.22.0"
|
||||
// docker version should be at least 1.11.x
|
||||
MinimumDockerAPIVersion = "1.23.0"
|
||||
|
||||
// Status of a container returned by ListContainers.
|
||||
StatusRunningPrefix = "Up"
|
||||
StatusCreatedPrefix = "Created"
|
||||
StatusExitedPrefix = "Exited"
|
||||
|
||||
// This is only used by GetKubeletDockerContainers(), and should be removed
|
||||
// along with the function.
|
||||
containerNamePrefix = "k8s"
|
||||
|
||||
// Fake docker endpoint
|
||||
FakeDockerEndpoint = "fake://"
|
||||
)
|
||||
@ -109,29 +104,3 @@ func ConnectToDockerOrDie(dockerEndpoint string, requestTimeout, imagePullProgre
|
||||
glog.Infof("Start docker client with request timeout=%v", requestTimeout)
|
||||
return newKubeDockerClient(client, requestTimeout, imagePullProgressDeadline)
|
||||
}
|
||||
|
||||
// GetKubeletDockerContainers lists all container or just the running ones.
|
||||
// Returns a list of docker containers that we manage
|
||||
// TODO: This function should be deleted after migrating
|
||||
// test/e2e_node/garbage_collector_test.go off of it.
|
||||
func GetKubeletDockerContainers(client Interface, allContainers bool) ([]*dockertypes.Container, error) {
|
||||
result := []*dockertypes.Container{}
|
||||
containers, err := client.ListContainers(dockertypes.ContainerListOptions{All: allContainers})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i := range containers {
|
||||
container := &containers[i]
|
||||
if len(container.Names) == 0 {
|
||||
continue
|
||||
}
|
||||
// Skip containers that we didn't create to allow users to manually
|
||||
// spin up their own containers if they want.
|
||||
if !strings.HasPrefix(container.Names[0], "/"+containerNamePrefix+"_") {
|
||||
glog.V(5).Infof("Docker Container: %s is not managed by kubelet.", container.Names[0])
|
||||
continue
|
||||
}
|
||||
result = append(result, container)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
12
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/fake_client.go
generated
vendored
12
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/libdocker/fake_client.go
generated
vendored
@ -61,6 +61,7 @@ type FakeDockerClient struct {
|
||||
called []calledDetail
|
||||
pulled []string
|
||||
EnableTrace bool
|
||||
RandGenerator *rand.Rand
|
||||
|
||||
// Created, Started, Stopped and Removed all contain container docker ID
|
||||
Created []string
|
||||
@ -99,6 +100,7 @@ func NewFakeDockerClient() *FakeDockerClient {
|
||||
EnableTrace: true,
|
||||
ImageInspects: make(map[string]*dockertypes.ImageInspect),
|
||||
ImageIDsNeedingAuth: make(map[string]dockertypes.AuthConfig),
|
||||
RandGenerator: rand.New(rand.NewSource(time.Now().UnixNano())),
|
||||
}
|
||||
}
|
||||
|
||||
@ -123,6 +125,13 @@ func (f *FakeDockerClient) WithTraceDisabled() *FakeDockerClient {
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) WithRandSource(source rand.Source) *FakeDockerClient {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
f.RandGenerator = rand.New(source)
|
||||
return f
|
||||
}
|
||||
|
||||
func (f *FakeDockerClient) appendCalled(callDetail calledDetail) {
|
||||
if f.EnableTrace {
|
||||
f.called = append(f.called, callDetail)
|
||||
@ -597,7 +606,8 @@ func (f *FakeDockerClient) StartContainer(id string) error {
|
||||
container.State.Running = true
|
||||
container.State.Pid = os.Getpid()
|
||||
container.State.StartedAt = dockerTimestampToString(timestamp)
|
||||
container.NetworkSettings.IPAddress = "2.3.4.5"
|
||||
r := f.RandGenerator.Uint32()
|
||||
container.NetworkSettings.IPAddress = fmt.Sprintf("10.%d.%d.%d", byte(r>>16), byte(r>>8), byte(r))
|
||||
f.ContainerMap[id] = container
|
||||
f.updateContainerStatus(id, StatusRunningPrefix)
|
||||
f.normalSleep(200, 50, 50)
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/naming.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/naming.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/leaky"
|
||||
)
|
||||
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/naming_test.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/naming_test.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
)
|
||||
|
||||
func TestSandboxNameRoundTrip(t *testing.T) {
|
||||
|
10
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/BUILD
generated
vendored
10
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/BUILD
generated
vendored
@ -7,21 +7,15 @@ load(
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"docker_server.go",
|
||||
"docker_service.go",
|
||||
],
|
||||
srcs = ["docker_server.go"],
|
||||
importpath = "k8s.io/kubernetes/pkg/kubelet/dockershim/remote",
|
||||
deps = [
|
||||
"//pkg/kubelet/apis/cri:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/v1alpha1/runtime:go_default_library",
|
||||
"//pkg/kubelet/apis/cri/runtime/v1alpha2:go_default_library",
|
||||
"//pkg/kubelet/dockershim:go_default_library",
|
||||
"//pkg/kubelet/util:go_default_library",
|
||||
"//pkg/util/interrupt:go_default_library",
|
||||
"//vendor/github.com/golang/glog:go_default_library",
|
||||
"//vendor/golang.org/x/net/context:go_default_library",
|
||||
"//vendor/google.golang.org/grpc:go_default_library",
|
||||
"//vendor/k8s.io/utils/exec:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
14
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/docker_server.go
generated
vendored
14
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/docker_server.go
generated
vendored
@ -22,7 +22,7 @@ import (
|
||||
"github.com/golang/glog"
|
||||
"google.golang.org/grpc"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim"
|
||||
"k8s.io/kubernetes/pkg/kubelet/util"
|
||||
"k8s.io/kubernetes/pkg/util/interrupt"
|
||||
@ -33,21 +33,27 @@ type DockerServer struct {
|
||||
// endpoint is the endpoint to serve on.
|
||||
endpoint string
|
||||
// service is the docker service which implements runtime and image services.
|
||||
service DockerService
|
||||
service dockershim.CRIService
|
||||
// server is the grpc server.
|
||||
server *grpc.Server
|
||||
}
|
||||
|
||||
// NewDockerServer creates the dockershim grpc server.
|
||||
func NewDockerServer(endpoint string, s dockershim.DockerService) *DockerServer {
|
||||
func NewDockerServer(endpoint string, s dockershim.CRIService) *DockerServer {
|
||||
return &DockerServer{
|
||||
endpoint: endpoint,
|
||||
service: NewDockerService(s),
|
||||
service: s,
|
||||
}
|
||||
}
|
||||
|
||||
// Start starts the dockershim grpc server.
|
||||
func (s *DockerServer) Start() error {
|
||||
// Start the internal service.
|
||||
if err := s.service.Start(); err != nil {
|
||||
glog.Errorf("Unable to start docker service")
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(2).Infof("Start dockershim grpc server")
|
||||
l, err := util.CreateListener(s.endpoint)
|
||||
if err != nil {
|
||||
|
249
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/docker_service.go
generated
vendored
249
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/remote/docker_service.go
generated
vendored
@ -1,249 +0,0 @@
|
||||
/*
|
||||
Copyright 2016 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package remote
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
internalapi "k8s.io/kubernetes/pkg/kubelet/apis/cri"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
"k8s.io/kubernetes/pkg/kubelet/dockershim"
|
||||
utilexec "k8s.io/utils/exec"
|
||||
)
|
||||
|
||||
// DockerService is the interface implement CRI remote service server.
|
||||
type DockerService interface {
|
||||
runtimeapi.RuntimeServiceServer
|
||||
runtimeapi.ImageServiceServer
|
||||
}
|
||||
|
||||
// dockerService uses dockershim service to implement DockerService.
|
||||
// Notice that the contexts in the functions are not used now.
|
||||
// TODO(random-liu): Change the dockershim service to support context, and implement
|
||||
// internal services and remote services with the dockershim service.
|
||||
type dockerService struct {
|
||||
runtimeService internalapi.RuntimeService
|
||||
imageService internalapi.ImageManagerService
|
||||
}
|
||||
|
||||
func NewDockerService(s dockershim.DockerService) DockerService {
|
||||
return &dockerService{runtimeService: s, imageService: s}
|
||||
}
|
||||
|
||||
func (d *dockerService) Version(ctx context.Context, r *runtimeapi.VersionRequest) (*runtimeapi.VersionResponse, error) {
|
||||
return d.runtimeService.Version(r.Version)
|
||||
}
|
||||
|
||||
func (d *dockerService) Status(ctx context.Context, r *runtimeapi.StatusRequest) (*runtimeapi.StatusResponse, error) {
|
||||
status, err := d.runtimeService.Status()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.StatusResponse{Status: status}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) RunPodSandbox(ctx context.Context, r *runtimeapi.RunPodSandboxRequest) (*runtimeapi.RunPodSandboxResponse, error) {
|
||||
podSandboxId, err := d.runtimeService.RunPodSandbox(r.GetConfig())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.RunPodSandboxResponse{PodSandboxId: podSandboxId}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) StopPodSandbox(ctx context.Context, r *runtimeapi.StopPodSandboxRequest) (*runtimeapi.StopPodSandboxResponse, error) {
|
||||
err := d.runtimeService.StopPodSandbox(r.PodSandboxId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.StopPodSandboxResponse{}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) RemovePodSandbox(ctx context.Context, r *runtimeapi.RemovePodSandboxRequest) (*runtimeapi.RemovePodSandboxResponse, error) {
|
||||
err := d.runtimeService.RemovePodSandbox(r.PodSandboxId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.RemovePodSandboxResponse{}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) PodSandboxStatus(ctx context.Context, r *runtimeapi.PodSandboxStatusRequest) (*runtimeapi.PodSandboxStatusResponse, error) {
|
||||
podSandboxStatus, err := d.runtimeService.PodSandboxStatus(r.PodSandboxId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.PodSandboxStatusResponse{Status: podSandboxStatus}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) ListPodSandbox(ctx context.Context, r *runtimeapi.ListPodSandboxRequest) (*runtimeapi.ListPodSandboxResponse, error) {
|
||||
items, err := d.runtimeService.ListPodSandbox(r.GetFilter())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.ListPodSandboxResponse{Items: items}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) CreateContainer(ctx context.Context, r *runtimeapi.CreateContainerRequest) (*runtimeapi.CreateContainerResponse, error) {
|
||||
containerId, err := d.runtimeService.CreateContainer(r.PodSandboxId, r.GetConfig(), r.GetSandboxConfig())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.CreateContainerResponse{ContainerId: containerId}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) StartContainer(ctx context.Context, r *runtimeapi.StartContainerRequest) (*runtimeapi.StartContainerResponse, error) {
|
||||
err := d.runtimeService.StartContainer(r.ContainerId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.StartContainerResponse{}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) StopContainer(ctx context.Context, r *runtimeapi.StopContainerRequest) (*runtimeapi.StopContainerResponse, error) {
|
||||
err := d.runtimeService.StopContainer(r.ContainerId, r.Timeout)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.StopContainerResponse{}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) RemoveContainer(ctx context.Context, r *runtimeapi.RemoveContainerRequest) (*runtimeapi.RemoveContainerResponse, error) {
|
||||
err := d.runtimeService.RemoveContainer(r.ContainerId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.RemoveContainerResponse{}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) ListContainers(ctx context.Context, r *runtimeapi.ListContainersRequest) (*runtimeapi.ListContainersResponse, error) {
|
||||
containers, err := d.runtimeService.ListContainers(r.GetFilter())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.ListContainersResponse{Containers: containers}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) ContainerStatus(ctx context.Context, r *runtimeapi.ContainerStatusRequest) (*runtimeapi.ContainerStatusResponse, error) {
|
||||
status, err := d.runtimeService.ContainerStatus(r.ContainerId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.ContainerStatusResponse{Status: status}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) UpdateContainerResources(ctx context.Context, r *runtimeapi.UpdateContainerResourcesRequest) (*runtimeapi.UpdateContainerResourcesResponse, error) {
|
||||
err := d.runtimeService.UpdateContainerResources(r.ContainerId, r.Linux)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.UpdateContainerResourcesResponse{}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) ExecSync(ctx context.Context, r *runtimeapi.ExecSyncRequest) (*runtimeapi.ExecSyncResponse, error) {
|
||||
stdout, stderr, err := d.runtimeService.ExecSync(r.ContainerId, r.Cmd, time.Duration(r.Timeout)*time.Second)
|
||||
var exitCode int32
|
||||
if err != nil {
|
||||
exitError, ok := err.(utilexec.ExitError)
|
||||
if !ok {
|
||||
return nil, err
|
||||
}
|
||||
exitCode = int32(exitError.ExitStatus())
|
||||
}
|
||||
return &runtimeapi.ExecSyncResponse{
|
||||
Stdout: stdout,
|
||||
Stderr: stderr,
|
||||
ExitCode: exitCode,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) Exec(ctx context.Context, r *runtimeapi.ExecRequest) (*runtimeapi.ExecResponse, error) {
|
||||
return d.runtimeService.Exec(r)
|
||||
}
|
||||
|
||||
func (d *dockerService) Attach(ctx context.Context, r *runtimeapi.AttachRequest) (*runtimeapi.AttachResponse, error) {
|
||||
return d.runtimeService.Attach(r)
|
||||
}
|
||||
|
||||
func (d *dockerService) PortForward(ctx context.Context, r *runtimeapi.PortForwardRequest) (*runtimeapi.PortForwardResponse, error) {
|
||||
return d.runtimeService.PortForward(r)
|
||||
}
|
||||
|
||||
func (d *dockerService) UpdateRuntimeConfig(ctx context.Context, r *runtimeapi.UpdateRuntimeConfigRequest) (*runtimeapi.UpdateRuntimeConfigResponse, error) {
|
||||
err := d.runtimeService.UpdateRuntimeConfig(r.GetRuntimeConfig())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.UpdateRuntimeConfigResponse{}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) ListImages(ctx context.Context, r *runtimeapi.ListImagesRequest) (*runtimeapi.ListImagesResponse, error) {
|
||||
images, err := d.imageService.ListImages(r.GetFilter())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.ListImagesResponse{Images: images}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) ImageStatus(ctx context.Context, r *runtimeapi.ImageStatusRequest) (*runtimeapi.ImageStatusResponse, error) {
|
||||
image, err := d.imageService.ImageStatus(r.GetImage())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.ImageStatusResponse{Image: image}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) PullImage(ctx context.Context, r *runtimeapi.PullImageRequest) (*runtimeapi.PullImageResponse, error) {
|
||||
image, err := d.imageService.PullImage(r.GetImage(), r.GetAuth())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.PullImageResponse{ImageRef: image}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) RemoveImage(ctx context.Context, r *runtimeapi.RemoveImageRequest) (*runtimeapi.RemoveImageResponse, error) {
|
||||
err := d.imageService.RemoveImage(r.GetImage())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.RemoveImageResponse{}, nil
|
||||
}
|
||||
|
||||
// ImageFsInfo returns information of the filesystem that is used to store images.
|
||||
func (d *dockerService) ImageFsInfo(ctx context.Context, r *runtimeapi.ImageFsInfoRequest) (*runtimeapi.ImageFsInfoResponse, error) {
|
||||
filesystems, err := d.imageService.ImageFsInfo()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.ImageFsInfoResponse{ImageFilesystems: filesystems}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) ContainerStats(ctx context.Context, r *runtimeapi.ContainerStatsRequest) (*runtimeapi.ContainerStatsResponse, error) {
|
||||
stats, err := d.runtimeService.ContainerStats(r.ContainerId)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.ContainerStatsResponse{Stats: stats}, nil
|
||||
}
|
||||
|
||||
func (d *dockerService) ListContainerStats(ctx context.Context, r *runtimeapi.ListContainerStatsRequest) (*runtimeapi.ListContainerStatsResponse, error) {
|
||||
stats, err := d.runtimeService.ListContainerStats(r.GetFilter())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeapi.ListContainerStatsResponse{Stats: stats}, nil
|
||||
}
|
61
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context.go
generated
vendored
61
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
||||
"github.com/blang/semver"
|
||||
dockercontainer "github.com/docker/docker/api/types/container"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
knetwork "k8s.io/kubernetes/pkg/kubelet/network"
|
||||
)
|
||||
|
||||
@ -122,40 +122,33 @@ func modifyHostConfig(sc *runtimeapi.LinuxContainerSecurityContext, hostConfig *
|
||||
|
||||
// modifySandboxNamespaceOptions apply namespace options for sandbox
|
||||
func modifySandboxNamespaceOptions(nsOpts *runtimeapi.NamespaceOption, hostConfig *dockercontainer.HostConfig, network *knetwork.PluginManager) {
|
||||
hostNetwork := false
|
||||
if nsOpts != nil {
|
||||
hostNetwork = nsOpts.HostNetwork
|
||||
}
|
||||
// The sandbox's PID namespace is the one that's shared, so CONTAINER and POD are equivalent for it
|
||||
modifyCommonNamespaceOptions(nsOpts, hostConfig)
|
||||
modifyHostNetworkOptionForSandbox(hostNetwork, network, hostConfig)
|
||||
modifyHostOptionsForSandbox(nsOpts, network, hostConfig)
|
||||
}
|
||||
|
||||
// modifyContainerNamespaceOptions apply namespace options for container
|
||||
func modifyContainerNamespaceOptions(nsOpts *runtimeapi.NamespaceOption, podSandboxID string, hostConfig *dockercontainer.HostConfig) {
|
||||
hostNetwork := false
|
||||
if nsOpts != nil {
|
||||
hostNetwork = nsOpts.HostNetwork
|
||||
if nsOpts.GetPid() == runtimeapi.NamespaceMode_POD {
|
||||
hostConfig.PidMode = dockercontainer.PidMode(fmt.Sprintf("container:%v", podSandboxID))
|
||||
}
|
||||
hostConfig.PidMode = dockercontainer.PidMode(fmt.Sprintf("container:%v", podSandboxID))
|
||||
modifyCommonNamespaceOptions(nsOpts, hostConfig)
|
||||
modifyHostNetworkOptionForContainer(hostNetwork, podSandboxID, hostConfig)
|
||||
modifyHostOptionsForContainer(nsOpts, podSandboxID, hostConfig)
|
||||
}
|
||||
|
||||
// modifyCommonNamespaceOptions apply common namespace options for sandbox and container
|
||||
func modifyCommonNamespaceOptions(nsOpts *runtimeapi.NamespaceOption, hostConfig *dockercontainer.HostConfig) {
|
||||
if nsOpts != nil {
|
||||
if nsOpts.HostPid {
|
||||
hostConfig.PidMode = namespaceModeHost
|
||||
}
|
||||
if nsOpts.HostIpc {
|
||||
hostConfig.IpcMode = namespaceModeHost
|
||||
}
|
||||
if nsOpts.GetPid() == runtimeapi.NamespaceMode_NODE {
|
||||
hostConfig.PidMode = namespaceModeHost
|
||||
}
|
||||
}
|
||||
|
||||
// modifyHostNetworkOptionForSandbox applies NetworkMode/UTSMode to sandbox's dockercontainer.HostConfig.
|
||||
func modifyHostNetworkOptionForSandbox(hostNetwork bool, network *knetwork.PluginManager, hc *dockercontainer.HostConfig) {
|
||||
if hostNetwork {
|
||||
// modifyHostOptionsForSandbox applies NetworkMode/UTSMode to sandbox's dockercontainer.HostConfig.
|
||||
func modifyHostOptionsForSandbox(nsOpts *runtimeapi.NamespaceOption, network *knetwork.PluginManager, hc *dockercontainer.HostConfig) {
|
||||
if nsOpts.GetIpc() == runtimeapi.NamespaceMode_NODE {
|
||||
hc.IpcMode = namespaceModeHost
|
||||
}
|
||||
if nsOpts.GetNetwork() == runtimeapi.NamespaceMode_NODE {
|
||||
hc.NetworkMode = namespaceModeHost
|
||||
return
|
||||
}
|
||||
@ -175,14 +168,14 @@ func modifyHostNetworkOptionForSandbox(hostNetwork bool, network *knetwork.Plugi
|
||||
}
|
||||
}
|
||||
|
||||
// modifyHostNetworkOptionForContainer applies NetworkMode/UTSMode to container's dockercontainer.HostConfig.
|
||||
func modifyHostNetworkOptionForContainer(hostNetwork bool, podSandboxID string, hc *dockercontainer.HostConfig) {
|
||||
// modifyHostOptionsForContainer applies NetworkMode/UTSMode to container's dockercontainer.HostConfig.
|
||||
func modifyHostOptionsForContainer(nsOpts *runtimeapi.NamespaceOption, podSandboxID string, hc *dockercontainer.HostConfig) {
|
||||
sandboxNSMode := fmt.Sprintf("container:%v", podSandboxID)
|
||||
hc.NetworkMode = dockercontainer.NetworkMode(sandboxNSMode)
|
||||
hc.IpcMode = dockercontainer.IpcMode(sandboxNSMode)
|
||||
hc.UTSMode = ""
|
||||
|
||||
if hostNetwork {
|
||||
if nsOpts.GetNetwork() == runtimeapi.NamespaceMode_NODE {
|
||||
hc.UTSMode = namespaceModeHost
|
||||
}
|
||||
}
|
||||
@ -191,14 +184,16 @@ func modifyHostNetworkOptionForContainer(hostNetwork bool, podSandboxID string,
|
||||
// 1. Docker engine prior to API Version 1.24 doesn't support attaching to another container's
|
||||
// PID namespace, and it didn't stabilize until 1.26. This check can be removed when Kubernetes'
|
||||
// minimum Docker version is at least 1.13.1 (API version 1.26).
|
||||
// 2. The administrator has overridden the default behavior by means of a kubelet flag. This is an
|
||||
// "escape hatch" to return to previous behavior of isolated namespaces and should be removed once
|
||||
// no longer needed.
|
||||
func modifyPIDNamespaceOverrides(disableSharedPID bool, version *semver.Version, hc *dockercontainer.HostConfig) {
|
||||
if !strings.HasPrefix(string(hc.PidMode), "container:") {
|
||||
return
|
||||
}
|
||||
if disableSharedPID || version.LT(semver.Version{Major: 1, Minor: 26}) {
|
||||
hc.PidMode = ""
|
||||
// 2. The administrator can override the API behavior by using the deprecated --docker-disable-shared-pid=false
|
||||
// flag. Until this flag is removed, this causes pods to use NamespaceMode_POD instead of
|
||||
// NamespaceMode_CONTAINER regardless of pod configuration.
|
||||
// TODO(verb): remove entirely once these two conditions are satisfied
|
||||
func modifyContainerPIDNamespaceOverrides(disableSharedPID bool, version *semver.Version, hc *dockercontainer.HostConfig, podSandboxID string) {
|
||||
if version.LT(semver.Version{Major: 1, Minor: 26}) {
|
||||
if strings.HasPrefix(string(hc.PidMode), "container:") {
|
||||
hc.PidMode = ""
|
||||
}
|
||||
} else if !disableSharedPID && hc.PidMode == "" {
|
||||
hc.PidMode = dockercontainer.PidMode(fmt.Sprintf("container:%v", podSandboxID))
|
||||
}
|
||||
}
|
||||
|
126
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context_test.go
generated
vendored
126
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/security_context_test.go
generated
vendored
@ -25,7 +25,7 @@ import (
|
||||
dockercontainer "github.com/docker/docker/api/types/container"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
)
|
||||
|
||||
func TestModifyContainerConfig(t *testing.T) {
|
||||
@ -228,25 +228,24 @@ func TestModifyHostConfigAndNamespaceOptionsForContainer(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestModifySandboxNamespaceOptions(t *testing.T) {
|
||||
set := true
|
||||
cases := []struct {
|
||||
name string
|
||||
nsOpt *runtimeapi.NamespaceOption
|
||||
expected *dockercontainer.HostConfig
|
||||
}{
|
||||
{
|
||||
name: "NamespaceOption.HostNetwork",
|
||||
name: "Host Network NamespaceOption",
|
||||
nsOpt: &runtimeapi.NamespaceOption{
|
||||
HostNetwork: set,
|
||||
Network: runtimeapi.NamespaceMode_NODE,
|
||||
},
|
||||
expected: &dockercontainer.HostConfig{
|
||||
NetworkMode: namespaceModeHost,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NamespaceOption.HostIpc",
|
||||
name: "Host IPC NamespaceOption",
|
||||
nsOpt: &runtimeapi.NamespaceOption{
|
||||
HostIpc: set,
|
||||
Ipc: runtimeapi.NamespaceMode_NODE,
|
||||
},
|
||||
expected: &dockercontainer.HostConfig{
|
||||
IpcMode: namespaceModeHost,
|
||||
@ -254,9 +253,9 @@ func TestModifySandboxNamespaceOptions(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NamespaceOption.HostPid",
|
||||
name: "Host PID NamespaceOption",
|
||||
nsOpt: &runtimeapi.NamespaceOption{
|
||||
HostPid: set,
|
||||
Pid: runtimeapi.NamespaceMode_NODE,
|
||||
},
|
||||
expected: &dockercontainer.HostConfig{
|
||||
PidMode: namespaceModeHost,
|
||||
@ -272,7 +271,6 @@ func TestModifySandboxNamespaceOptions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestModifyContainerNamespaceOptions(t *testing.T) {
|
||||
set := true
|
||||
sandboxID := "sandbox"
|
||||
sandboxNSMode := fmt.Sprintf("container:%v", sandboxID)
|
||||
cases := []struct {
|
||||
@ -281,9 +279,9 @@ func TestModifyContainerNamespaceOptions(t *testing.T) {
|
||||
expected *dockercontainer.HostConfig
|
||||
}{
|
||||
{
|
||||
name: "NamespaceOption.HostNetwork",
|
||||
name: "Host Network NamespaceOption",
|
||||
nsOpt: &runtimeapi.NamespaceOption{
|
||||
HostNetwork: set,
|
||||
Network: runtimeapi.NamespaceMode_NODE,
|
||||
},
|
||||
expected: &dockercontainer.HostConfig{
|
||||
NetworkMode: dockercontainer.NetworkMode(sandboxNSMode),
|
||||
@ -293,9 +291,9 @@ func TestModifyContainerNamespaceOptions(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NamespaceOption.HostIpc",
|
||||
name: "Host IPC NamespaceOption",
|
||||
nsOpt: &runtimeapi.NamespaceOption{
|
||||
HostIpc: set,
|
||||
Ipc: runtimeapi.NamespaceMode_NODE,
|
||||
},
|
||||
expected: &dockercontainer.HostConfig{
|
||||
NetworkMode: dockercontainer.NetworkMode(sandboxNSMode),
|
||||
@ -304,9 +302,9 @@ func TestModifyContainerNamespaceOptions(t *testing.T) {
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "NamespaceOption.HostPid",
|
||||
name: "Host PID NamespaceOption",
|
||||
nsOpt: &runtimeapi.NamespaceOption{
|
||||
HostPid: set,
|
||||
Pid: runtimeapi.NamespaceMode_NODE,
|
||||
},
|
||||
expected: &dockercontainer.HostConfig{
|
||||
NetworkMode: dockercontainer.NetworkMode(sandboxNSMode),
|
||||
@ -330,51 +328,93 @@ func TestModifyContainerNamespacePIDOverride(t *testing.T) {
|
||||
input, expected dockercontainer.PidMode
|
||||
}{
|
||||
{
|
||||
name: "SharedPID.Enable",
|
||||
disable: false,
|
||||
version: &semver.Version{Major: 1, Minor: 26},
|
||||
input: "container:sandbox",
|
||||
expected: "container:sandbox",
|
||||
},
|
||||
{
|
||||
name: "SharedPID.Disable",
|
||||
name: "mode:CONTAINER docker:NEW flag:UNSET",
|
||||
disable: true,
|
||||
version: &semver.Version{Major: 1, Minor: 26},
|
||||
input: "container:sandbox",
|
||||
input: "",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "SharedPID.OldDocker",
|
||||
name: "mode:CONTAINER docker:NEW flag:SET",
|
||||
disable: false,
|
||||
version: &semver.Version{Major: 1, Minor: 25},
|
||||
input: "container:sandbox",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "SharedPID.HostPid",
|
||||
disable: true,
|
||||
version: &semver.Version{Major: 1, Minor: 27},
|
||||
input: "host",
|
||||
expected: "host",
|
||||
},
|
||||
{
|
||||
name: "SharedPID.DistantFuture",
|
||||
disable: false,
|
||||
version: &semver.Version{Major: 2, Minor: 10},
|
||||
input: "container:sandbox",
|
||||
version: &semver.Version{Major: 1, Minor: 26},
|
||||
input: "",
|
||||
expected: "container:sandbox",
|
||||
},
|
||||
{
|
||||
name: "SharedPID.EmptyPidMode",
|
||||
name: "mode:CONTAINER docker:OLD flag:UNSET",
|
||||
disable: true,
|
||||
version: &semver.Version{Major: 1, Minor: 25},
|
||||
input: "",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "mode:CONTAINER docker:OLD flag:SET",
|
||||
disable: false,
|
||||
version: &semver.Version{Major: 1, Minor: 25},
|
||||
input: "",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "mode:HOST docker:NEW flag:UNSET",
|
||||
disable: true,
|
||||
version: &semver.Version{Major: 1, Minor: 26},
|
||||
input: "host",
|
||||
expected: "host",
|
||||
},
|
||||
{
|
||||
name: "mode:HOST docker:NEW flag:SET",
|
||||
disable: false,
|
||||
version: &semver.Version{Major: 1, Minor: 26},
|
||||
input: "host",
|
||||
expected: "host",
|
||||
},
|
||||
{
|
||||
name: "mode:HOST docker:OLD flag:UNSET",
|
||||
disable: true,
|
||||
version: &semver.Version{Major: 1, Minor: 25},
|
||||
input: "host",
|
||||
expected: "host",
|
||||
},
|
||||
{
|
||||
name: "mode:HOST docker:OLD flag:SET",
|
||||
disable: false,
|
||||
version: &semver.Version{Major: 1, Minor: 25},
|
||||
input: "host",
|
||||
expected: "host",
|
||||
},
|
||||
{
|
||||
name: "mode:POD docker:NEW flag:UNSET",
|
||||
disable: true,
|
||||
version: &semver.Version{Major: 1, Minor: 26},
|
||||
input: "container:sandbox",
|
||||
expected: "container:sandbox",
|
||||
},
|
||||
{
|
||||
name: "mode:POD docker:NEW flag:SET",
|
||||
disable: false,
|
||||
version: &semver.Version{Major: 1, Minor: 26},
|
||||
input: "container:sandbox",
|
||||
expected: "container:sandbox",
|
||||
},
|
||||
{
|
||||
name: "mode:POD docker:OLD flag:UNSET",
|
||||
disable: true,
|
||||
version: &semver.Version{Major: 1, Minor: 25},
|
||||
input: "container:sandbox",
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "mode:POD docker:OLD flag:SET",
|
||||
disable: false,
|
||||
version: &semver.Version{Major: 1, Minor: 25},
|
||||
input: "container:sandbox",
|
||||
expected: "",
|
||||
},
|
||||
}
|
||||
for _, tc := range cases {
|
||||
dockerCfg := &dockercontainer.HostConfig{PidMode: tc.input}
|
||||
modifyPIDNamespaceOverrides(tc.disable, tc.version, dockerCfg)
|
||||
modifyContainerPIDNamespaceOverrides(tc.disable, tc.version, dockerCfg, "sandbox")
|
||||
assert.Equal(t, tc.expected, dockerCfg.PidMode, "[Test case %q]", tc.name)
|
||||
}
|
||||
}
|
||||
|
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/selinux_util.go
generated
vendored
2
vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/selinux_util.go
generated
vendored
@ -19,7 +19,7 @@ package dockershim
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/v1alpha1/runtime"
|
||||
runtimeapi "k8s.io/kubernetes/pkg/kubelet/apis/cri/runtime/v1alpha2"
|
||||
)
|
||||
|
||||
// selinuxLabelUser returns the fragment of a Docker security opt that
|
||||
|
Reference in New Issue
Block a user