vendor update for CSI 0.3.0

This commit is contained in:
gman
2018-07-18 16:47:22 +02:00
parent 6f484f92fc
commit 8ea659f0d5
6810 changed files with 438061 additions and 193861 deletions

View File

@ -18663,10 +18663,6 @@
"type": "string",
"description": "PodCIDR represents the pod IP range assigned to the node."
},
"externalID": {
"type": "string",
"description": "External ID of the node assigned by some machine database (e.g. a cloud provider). Deprecated."
},
"providerID": {
"type": "string",
"description": "ID of the node assigned by the cloud provider in the format: \u003cProviderName\u003e://\u003cProviderSpecificNodeID\u003e"
@ -18685,6 +18681,10 @@
"configSource": {
"$ref": "v1.NodeConfigSource",
"description": "If specified, the source to get node configuration from The DynamicKubeletConfig feature gate must be enabled for the Kubelet to use this field"
},
"externalID": {
"type": "string",
"description": "Deprecated. Not all kubelets will set this field. Remove field after 1.13. see: https://issues.k8s.io/61966"
}
}
},
@ -18718,16 +18718,40 @@
"id": "v1.NodeConfigSource",
"description": "NodeConfigSource specifies a source of node configuration. Exactly one subfield (excluding metadata) must be non-nil.",
"properties": {
"kind": {
"configMap": {
"$ref": "v1.ConfigMapNodeConfigSource",
"description": "ConfigMap is a reference to a Node's ConfigMap"
}
}
},
"v1.ConfigMapNodeConfigSource": {
"id": "v1.ConfigMapNodeConfigSource",
"description": "ConfigMapNodeConfigSource contains the information to reference a ConfigMap as a config source for the Node.",
"required": [
"namespace",
"name",
"kubeletConfigKey"
],
"properties": {
"namespace": {
"type": "string",
"description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds"
"description": "Namespace is the metadata.namespace of the referenced ConfigMap. This field is required in all cases."
},
"apiVersion": {
"name": {
"type": "string",
"description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources"
"description": "Name is the metadata.name of the referenced ConfigMap. This field is required in all cases."
},
"configMapRef": {
"$ref": "v1.ObjectReference"
"uid": {
"type": "string",
"description": "UID is the metadata.UID of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status."
},
"resourceVersion": {
"type": "string",
"description": "ResourceVersion is the metadata.ResourceVersion of the referenced ConfigMap. This field is forbidden in Node.Spec, and required in Node.Status."
},
"kubeletConfigKey": {
"type": "string",
"description": "KubeletConfigKey declares which key of the referenced ConfigMap corresponds to the KubeletConfiguration structure This field is required in all cases."
}
}
},
@ -18789,6 +18813,10 @@
"$ref": "v1.AttachedVolume"
},
"description": "List of volumes that are attached to the node."
},
"config": {
"$ref": "v1.NodeConfigStatus",
"description": "Status of the config assigned to the node via the dynamic Kubelet config feature."
}
}
},
@ -18969,6 +18997,28 @@
}
}
},
"v1.NodeConfigStatus": {
"id": "v1.NodeConfigStatus",
"description": "NodeConfigStatus describes the status of the config assigned by Node.Spec.ConfigSource.",
"properties": {
"assigned": {
"$ref": "v1.NodeConfigSource",
"description": "Assigned reports the checkpointed config the node will try to use. When Node.Spec.ConfigSource is updated, the node checkpoints the associated config payload to local disk, along with a record indicating intended config. The node refers to this record to choose its config checkpoint, and reports this record in Assigned. Assigned only updates in the status after the record has been checkpointed to disk. When the Kubelet is restarted, it tries to make the Assigned config the Active config by loading and validating the checkpointed payload identified by Assigned."
},
"active": {
"$ref": "v1.NodeConfigSource",
"description": "Active reports the checkpointed config the node is actively using. Active will represent either the current version of the Assigned config, or the current LastKnownGood config, depending on whether attempting to use the Assigned config results in an error."
},
"lastKnownGood": {
"$ref": "v1.NodeConfigSource",
"description": "LastKnownGood reports the checkpointed config the node will fall back to when it encounters an error attempting to use the Assigned config. The Assigned config becomes the LastKnownGood config when the node determines that the Assigned config is stable and correct. This is currently implemented as a 10-minute soak period starting when the local record of Assigned config is updated. If the Assigned config is Active at the end of this period, it becomes the LastKnownGood. Note that if Spec.ConfigSource is reset to nil (use local defaults), the LastKnownGood is also immediately reset to nil, because the local default config is always assumed good. You should not make assumptions about the node's method of determining config stability and correctness, as this may change or become configurable in the future."
},
"error": {
"type": "string",
"description": "Error describes any problems reconciling the Spec.ConfigSource to the Active config. Errors may occur, for example, attempting to checkpoint Spec.ConfigSource to the local Assigned record, attempting to checkpoint the payload associated with Spec.ConfigSource, attempting to load or validate the Assigned config, etc. Errors may occur at different points while syncing config. Earlier errors (e.g. download or checkpointing errors) will not result in a rollback to LastKnownGood, and may resolve across Kubelet retries. Later errors (e.g. loading or validating a checkpointed config) will result in a rollback to LastKnownGood. In the latter case, it is usually possible to resolve the error by fixing the config assigned in Spec.ConfigSource. You can find additional information for debugging by searching the error message in the Kubelet log. Error is a human-readable description of the error state; machines can check whether or not Error is empty, but should not rely on the stability of the Error text across Kubelet versions."
}
}
},
"v1.PersistentVolumeClaimList": {
"id": "v1.PersistentVolumeClaimList",
"description": "PersistentVolumeClaimList is a list of PersistentVolumeClaim items.",
@ -19271,7 +19321,7 @@
"description": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. Provisioned by an admin."
},
"cinder": {
"$ref": "v1.CinderVolumeSource",
"$ref": "v1.CinderPersistentVolumeSource",
"description": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md"
},
"cephfs": {
@ -19601,8 +19651,8 @@
}
}
},
"v1.CinderVolumeSource": {
"id": "v1.CinderVolumeSource",
"v1.CinderPersistentVolumeSource": {
"id": "v1.CinderPersistentVolumeSource",
"description": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.",
"required": [
"volumeID"
@ -19619,6 +19669,10 @@
"readOnly": {
"type": "boolean",
"description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md"
},
"secretRef": {
"$ref": "v1.SecretReference",
"description": "Optional: points to a secret object containing parameters used to connect to OpenStack."
}
}
},
@ -19949,14 +20003,14 @@
},
"v1.LocalVolumeSource": {
"id": "v1.LocalVolumeSource",
"description": "Local represents directly-attached storage with node affinity",
"description": "Local represents directly-attached storage with node affinity (Beta feature)",
"required": [
"path"
],
"properties": {
"path": {
"type": "string",
"description": "The full path to the volume on the node For alpha, this path must be a directory Once block as a source is supported, then this path can point to a block device"
"description": "The full path to the volume on the node. It can be either a directory or block device (disk, partition, ...). Directories can be represented only by PersistentVolume with VolumeMode=Filesystem. Block devices can be represented only by VolumeMode=Block, which also requires the BlockVolume alpha feature gate to be enabled."
}
}
},
@ -20008,7 +20062,7 @@
},
"fsType": {
"type": "string",
"description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified."
"description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\"."
},
"volumeAttributes": {
"type": "object",
@ -20056,17 +20110,21 @@
},
"v1.NodeSelectorTerm": {
"id": "v1.NodeSelectorTerm",
"description": "A null or empty node selector term matches no objects.",
"required": [
"matchExpressions"
],
"description": "A null or empty node selector term matches no objects. The requirements of them are ANDed. The TopologySelectorTerm type implements a subset of the NodeSelectorTerm.",
"properties": {
"matchExpressions": {
"type": "array",
"items": {
"$ref": "v1.NodeSelectorRequirement"
},
"description": "Required. A list of node selector requirements. The requirements are ANDed."
"description": "A list of node selector requirements by node's labels."
},
"matchFields": {
"type": "array",
"items": {
"$ref": "v1.NodeSelectorRequirement"
},
"description": "A list of node selector requirements by node's fields."
}
}
},
@ -20302,6 +20360,13 @@
"dnsConfig": {
"$ref": "v1.PodDNSConfig",
"description": "Specifies the DNS parameters of a pod. Parameters specified here will be merged to the generated DNS configuration based on DNSPolicy."
},
"readinessGates": {
"type": "array",
"items": {
"$ref": "v1.PodReadinessGate"
},
"description": "If specified, all readiness gates will be evaluated for pod readiness. A pod is ready when all its containers are ready AND all conditions specified in the readiness gates have status equal to \"True\" More info: https://github.com/kubernetes/community/blob/master/keps/sig-network/0007-pod-ready%2B%2B.md"
}
}
},
@ -20334,7 +20399,7 @@
},
"gitRepo": {
"$ref": "v1.GitRepoVolumeSource",
"description": "GitRepo represents a git repository at a particular revision."
"description": "GitRepo represents a git repository at a particular revision. DEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container."
},
"secret": {
"$ref": "v1.SecretVolumeSource",
@ -20442,7 +20507,7 @@
},
"v1.GitRepoVolumeSource": {
"id": "v1.GitRepoVolumeSource",
"description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.",
"description": "Represents a volume that is populated with the contents of a git repository. Git repo volumes do not support ownership management. Git repo volumes support SELinux relabeling.\n\nDEPRECATED: GitRepo is deprecated. To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.",
"required": [
"repository"
],
@ -20670,6 +20735,31 @@
}
}
},
"v1.CinderVolumeSource": {
"id": "v1.CinderVolumeSource",
"description": "Represents a cinder volume resource in Openstack. A Cinder volume must exist before mounting to a container. The volume must also be in the same region as the kubelet. Cinder volumes support ownership management and SELinux relabeling.",
"required": [
"volumeID"
],
"properties": {
"volumeID": {
"type": "string",
"description": "volume id used to identify the volume in cinder More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md"
},
"fsType": {
"type": "string",
"description": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Examples: \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md"
},
"readOnly": {
"type": "boolean",
"description": "Optional: Defaults to false (read/write). ReadOnly here will force the ReadOnly setting in VolumeMounts. More info: https://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md"
},
"secretRef": {
"$ref": "v1.LocalObjectReference",
"description": "Optional: points to a secret object containing parameters used to connect to OpenStack."
}
}
},
"v1.CephFSVolumeSource": {
"id": "v1.CephFSVolumeSource",
"description": "Represents a Ceph Filesystem mount that lasts the lifetime of a pod Cephfs volumes do not support ownership management or SELinux relabeling.",
@ -20872,6 +20962,10 @@
"configMap": {
"$ref": "v1.ConfigMapProjection",
"description": "information about the configMap data to project"
},
"serviceAccountToken": {
"$ref": "v1.ServiceAccountTokenProjection",
"description": "information about the serviceAccountToken data to project"
}
}
},
@ -20930,6 +21024,28 @@
}
}
},
"v1.ServiceAccountTokenProjection": {
"id": "v1.ServiceAccountTokenProjection",
"description": "ServiceAccountTokenProjection represents a projected service account token volume. This projection can be used to insert a service account token into the pods runtime filesystem for use against APIs (Kubernetes API Server or otherwise).",
"required": [
"path"
],
"properties": {
"audience": {
"type": "string",
"description": "Audience is the intended audience of the token. A recipient of a token must identify itself with an identifier specified in the audience of the token, and otherwise should reject the token. The audience defaults to the identifier of the apiserver."
},
"expirationSeconds": {
"type": "integer",
"format": "int64",
"description": "ExpirationSeconds is the requested duration of validity of the service account token. As the token approaches expiration, the kubelet volume plugin will proactively rotate the service account token. The kubelet will start trying to rotate the token if the token is older than 80 percent of its time to live or if the token is older than 24 hours.Defaults to 1 hour and must be at least 10 minutes."
},
"path": {
"type": "string",
"description": "Path is the path relative to the mount point of the file to project the token into."
}
}
},
"v1.ScaleIOVolumeSource": {
"id": "v1.ScaleIOVolumeSource",
"description": "ScaleIOVolumeSource represents a persistent ScaleIO volume",
@ -21309,7 +21425,7 @@
},
"mountPropagation": {
"$ref": "v1.MountPropagationMode",
"description": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationHostToContainer is used. This field is alpha in 1.8 and can be reworked or removed in a future release."
"description": "mountPropagation determines how mounts are propagated from the host to container and the other way around. When not set, MountPropagationNone is used. This field is beta in 1.10."
}
}
},
@ -21511,6 +21627,11 @@
"format": "int64",
"description": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."
},
"runAsGroup": {
"type": "integer",
"format": "int64",
"description": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."
},
"runAsNonRoot": {
"type": "boolean",
"description": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in PodSecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."
@ -21584,6 +21705,11 @@
"format": "int64",
"description": "The UID to run the entrypoint of the container process. Defaults to user specified in image metadata if unspecified. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."
},
"runAsGroup": {
"type": "integer",
"format": "int64",
"description": "The GID to run the entrypoint of the container process. Uses runtime default if unset. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence for that container."
},
"runAsNonRoot": {
"type": "boolean",
"description": "Indicates that the container must run as a non-root user. If true, the Kubelet will validate the image at runtime to ensure that it does not run as UID 0 (root) and fail to start the container if it does. If unset or false, no such validation will be performed. May also be set in SecurityContext. If set in both SecurityContext and PodSecurityContext, the value specified in SecurityContext takes precedence."
@ -21599,6 +21725,31 @@
"type": "integer",
"format": "int64",
"description": "A special supplemental group that applies to all containers in a pod. Some volume types allow the Kubelet to change the ownership of that volume to be owned by the pod:\n\n1. The owning GID will be the FSGroup 2. The setgid bit is set (new files created in the volume will be owned by FSGroup) 3. The permission bits are OR'd with rw-rw "
},
"sysctls": {
"type": "array",
"items": {
"$ref": "v1.Sysctl"
},
"description": "Sysctls hold a list of namespaced sysctls used for the pod. Pods with unsupported sysctls (by the container runtime) might fail to launch."
}
}
},
"v1.Sysctl": {
"id": "v1.Sysctl",
"description": "Sysctl defines a kernel parameter to be set",
"required": [
"name",
"value"
],
"properties": {
"name": {
"type": "string",
"description": "Name of a property to set"
},
"value": {
"type": "string",
"description": "Value of a property to set"
}
}
},
@ -21823,13 +21974,26 @@
}
}
},
"v1.PodReadinessGate": {
"id": "v1.PodReadinessGate",
"description": "PodReadinessGate contains the reference to a pod condition",
"required": [
"conditionType"
],
"properties": {
"conditionType": {
"type": "string",
"description": "ConditionType refers to a condition in the pod's condition list with matching type."
}
}
},
"v1.PodStatus": {
"id": "v1.PodStatus",
"description": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system.",
"description": "PodStatus represents information about the status of a pod. Status may trail the actual state of a system, especially if the node that hosts the pod cannot contact the control plane.",
"properties": {
"phase": {
"type": "string",
"description": "Current condition of the pod. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase"
"description": "The phase of a Pod is a simple, high-level summary of where the Pod is in its lifecycle. The conditions array, the reason and message fields, and the individual container status arrays contain more detail about the pod's status. There are five possible phase values:\n\nPending: The pod has been accepted by the Kubernetes system, but one or more of the container images has not been created. This includes time before being scheduled as well as time spent downloading images over the network, which could take a while. Running: The pod has been bound to a node, and all of the containers have been created. At least one container is still running, or is in the process of starting or restarting. Succeeded: All containers in the pod have terminated in success, and will not be restarted. Failed: All containers in the pod have terminated, and at least one container has terminated in failure. The container either exited with non-zero status or was terminated by the system. Unknown: For some reason the state of the pod could not be obtained, typically due to an error in communicating with the host of the pod.\n\nMore info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-phase"
},
"conditions": {
"type": "array",
@ -21892,7 +22056,7 @@
"properties": {
"type": {
"type": "string",
"description": "Type is the type of the condition. Currently only Ready. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions"
"description": "Type is the type of the condition. More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#pod-conditions"
},
"status": {
"type": "string",
@ -22393,7 +22557,7 @@
"properties": {
"hard": {
"type": "object",
"description": "Hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/"
"description": "hard is the set of desired hard limits for each named resource. More info: https://kubernetes.io/docs/concepts/policy/resource-quotas/"
},
"scopes": {
"type": "array",
@ -22401,6 +22565,10 @@
"$ref": "v1.ResourceQuotaScope"
},
"description": "A collection of filters that must match each object tracked by a quota. If not specified, the quota matches all objects."
},
"scopeSelector": {
"$ref": "v1.ScopeSelector",
"description": "scopeSelector is also a collection of filters like scopes that must match each object tracked by a quota but expressed using ScopeSelectorOperator in combination with possible values. For a resource to match, both scopes AND scopeSelector (if specified in spec), must be matched."
}
}
},
@ -22408,6 +22576,44 @@
"id": "v1.ResourceQuotaScope",
"properties": {}
},
"v1.ScopeSelector": {
"id": "v1.ScopeSelector",
"description": "A scope selector represents the AND of the selectors represented by the scoped-resource selector requirements.",
"properties": {
"matchExpressions": {
"type": "array",
"items": {
"$ref": "v1.ScopedResourceSelectorRequirement"
},
"description": "A list of scope selector requirements by scope of the resources."
}
}
},
"v1.ScopedResourceSelectorRequirement": {
"id": "v1.ScopedResourceSelectorRequirement",
"description": "A scoped-resource selector requirement is a selector that contains values, a scope name, and an operator that relates the scope name and values.",
"required": [
"scopeName",
"operator"
],
"properties": {
"scopeName": {
"type": "string",
"description": "The name of the scope that the selector applies to."
},
"operator": {
"type": "string",
"description": "Represents a scope's relationship to a set of values. Valid operators are In, NotIn, Exists, DoesNotExist."
},
"values": {
"type": "array",
"items": {
"type": "string"
},
"description": "An array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch."
}
}
},
"v1.ResourceQuotaStatus": {
"id": "v1.ResourceQuotaStatus",
"description": "ResourceQuotaStatus defines the enforced hard limits and observed use.",
@ -22658,7 +22864,7 @@
},
"publishNotReadyAddresses": {
"type": "boolean",
"description": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery. This field will replace the service.alpha.kubernetes.io/tolerate-unready-endpoints when that annotation is deprecated and all clients have been converted to use this field."
"description": "publishNotReadyAddresses, when set to true, indicates that DNS implementations must publish the notReadyAddresses of subsets for the Endpoints associated with the Service. The default value is false. The primary use case for setting this field is to use a StatefulSet's Headless Service to propagate SRV records for its Pods without respect to their readiness for purpose of peer discovery."
},
"sessionAffinityConfig": {
"$ref": "v1.SessionAffinityConfig",