Merge pull request #31 from sbezverk/v0.2.0

Refactor CSI RBD plugin to V0.2.0
This commit is contained in:
Huamin Chen 2018-03-08 13:23:13 -05:00 committed by GitHub
commit d7ebb18122
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5870 changed files with 248749 additions and 120008 deletions

3
.gitignore vendored
View File

@ -6,6 +6,9 @@
# docker build # docker build
/deploy/docker/rbdplugin /deploy/docker/rbdplugin
# rbdplugin executable
rbdplugin
# Emacs save files # Emacs save files
*~ *~
\#*\# \#*\#

View File

@ -7,6 +7,6 @@ RUN yum install -y centos-release-ceph && \
yum install -y ceph-common e2fsprogs && \ yum install -y ceph-common e2fsprogs && \
yum clean all yum clean all
COPY rbdplugin /rbdplugin COPY _output/rbdplugin /rbdplugin
RUN chmod +x /rbdplugin RUN chmod +x /rbdplugin
ENTRYPOINT ["/rbdplugin"] ENTRYPOINT ["/rbdplugin"]

112
Gopkg.lock generated
View File

@ -2,10 +2,10 @@
[[projects]] [[projects]]
branch = "master"
name = "github.com/container-storage-interface/spec" name = "github.com/container-storage-interface/spec"
packages = ["lib/go/csi"] packages = ["lib/go/csi/v0"]
revision = "7ab01a90da87f9fef3ee1de0494962fdefaf7db7" revision = "35d9f9d77954980e449e52c3f3e43c21bd8171f5"
version = "v0.2.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -14,16 +14,22 @@
revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998"
[[projects]] [[projects]]
branch = "master"
name = "github.com/golang/protobuf" name = "github.com/golang/protobuf"
packages = ["proto","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"] packages = [
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845" "proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp"
]
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
version = "v1.0.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/kubernetes-csi/drivers" name = "github.com/kubernetes-csi/drivers"
packages = ["pkg/csi-common"] packages = ["pkg/csi-common"]
revision = "d1ab787ad5510df08a3a98a091a41adeae4647b4" revision = "1853bd0038cd634f277efda5c6548766a2a51ff3"
[[projects]] [[projects]]
name = "github.com/pborman/uuid" name = "github.com/pborman/uuid"
@ -34,54 +40,110 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/net" name = "golang.org/x/net"
packages = ["context","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"] packages = [
revision = "42fe2e1c20de1054d3d30f82cc9fb5b41e2e3767" "context",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"lex/httplex",
"trace"
]
revision = "d25186b37f34ebdbbea8f488ef055638dfab272d"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/sys" name = "golang.org/x/sys"
packages = ["unix"] packages = ["unix"]
revision = "1792d66dc88e503d3cb2400578221cdf1f7fe26f" revision = "dd2ff4accc098aceecb86b36eaa7829b2a17b1c9"
[[projects]] [[projects]]
branch = "master"
name = "golang.org/x/text" name = "golang.org/x/text"
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"] packages = [
revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3" "collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable"
]
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "google.golang.org/genproto" name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"] packages = ["googleapis/rpc/status"]
revision = "a8101f21cf983e773d0c1133ebc5424792003214" revision = "df60624c1e9b9d2973e889c7a1cff73155da81c4"
[[projects]] [[projects]]
name = "google.golang.org/grpc" name = "google.golang.org/grpc"
packages = [".","balancer","balancer/base","balancer/roundrobin","codes","connectivity","credentials","encoding","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"] packages = [
revision = "f3955b8e9e244dd4dd4bc4f7b7a23a8445400a76" ".",
version = "v1.9.0" "balancer",
"balancer/base",
"balancer/roundrobin",
"codes",
"connectivity",
"credentials",
"encoding",
"encoding/proto",
"grpclb/grpc_lb_v1/messages",
"grpclog",
"internal",
"keepalive",
"metadata",
"naming",
"peer",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
"transport"
]
revision = "8e4536a86ab602859c20df5ebfd0bd4228d08655"
version = "v1.10.0"
[[projects]] [[projects]]
branch = "release-1.9"
name = "k8s.io/apimachinery" name = "k8s.io/apimachinery"
packages = ["pkg/util/runtime","pkg/util/sets","pkg/util/wait"] packages = [
revision = "68f9c3a1feb3140df59c67ced62d3a5df8e6c9c2" "pkg/util/runtime",
"pkg/util/sets",
"pkg/util/wait"
]
revision = "302974c03f7e50f16561ba237db776ab93594ef6"
version = "kubernetes-1.10.0-beta.1"
[[projects]] [[projects]]
name = "k8s.io/kubernetes" name = "k8s.io/kubernetes"
packages = ["pkg/util/io","pkg/util/keymutex","pkg/util/mount","pkg/util/nsenter"] packages = [
revision = "3a1c9449a956b6026f075fa3134ff92f7d55f812" "pkg/util/io",
version = "v1.9.1" "pkg/util/keymutex",
"pkg/util/mount",
"pkg/util/nsenter"
]
revision = "37555e6d24c2f951c40660ea59a80fa251982005"
version = "v1.10.0-beta.1"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "k8s.io/utils" name = "k8s.io/utils"
packages = ["exec"] packages = ["exec"]
revision = "a99a3e11a96751670db62ba77c6d278d1136931e" revision = "258e2a2fa64568210fbd6267cf1d8fd87c3cb86e"
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "8908f89154f277d98fd83b22edf73652d4c4e37bbd827bf11d9605c58ae3fd0e" inputs-digest = "d409396ee410b5443abac732df725cb70ef9e0c940c1b807c0ff2698bab5d102"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

View File

@ -1,23 +1,31 @@
[[constraint]]
name = "github.com/container-storage-interface/spec"
version = "~0.2.0"
[[constraint]] [[constraint]]
branch = "master" branch = "master"
name = "github.com/container-storage-interface/spec" name = "github.com/kubernetes-csi/drivers"
[[constraint]] [[constraint]]
branch = "master" branch = "master"
name = "github.com/golang/glog" name = "github.com/golang/glog"
[[constraint]] [[override]]
name = "google.golang.org/grpc" revision = "5db89f0ca68677abc5eefce8f2a0a772c98ba52d"
version = "1.7.2" name = "github.com/docker/distribution"
[[constraint]] [[constraint]]
name = "github.com/docker/distribution" name = "google.golang.org/grpc"
revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" version = "1.10.0"
[[constraint]]
version = "kubernetes-1.10.0-beta.1"
name = "k8s.io/apimachinery"
[[constraint]] [[constraint]]
name = "k8s.io/kubernetes" name = "k8s.io/kubernetes"
version = "v1.9.1" version = "v1.10.0-beta.1"
[[constraint]] [[override]]
name = "k8s.io/apimachinery" version = "kubernetes-1.10.0-beta.1"
version = "kubernetes-1.9.1" name = "k8s.io/api"

View File

@ -28,12 +28,10 @@ rbdplugin:
CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/rbdplugin ./rbd CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/rbdplugin ./rbd
container: rbdplugin container: rbdplugin
cp _output/rbdplugin deploy/docker docker build -t $(IMAGE_NAME):$(IMAGE_VERSION) .
docker build -t $(IMAGE_NAME):$(IMAGE_VERSION) deploy/docker
push-container: container push-container: container
docker push $(IMAGE_NAME):$(IMAGE_VERSION) docker push $(IMAGE_NAME):$(IMAGE_VERSION)
clean: clean:
go clean -r -x go clean -r -x
rm -f deploy/docker/rbdplugin
-rm -rf _output -rm -rf _output

View File

@ -110,7 +110,7 @@ The following output should be displayed:
``` ```
NAMESPACE NAME READY STATUS RESTARTS AGE NAMESPACE NAME READY STATUS RESTARTS AGE
default csi-attacher-0 1/1 Running 0 1d default csi-attacher-0 1/1 Running 0 1d
default csi-nodeplugin-rbdplugin-qxqtl 2/2 Running 0 1d default csi-rbdplugin-qxqtl 2/2 Running 0 1d
default csi-provisioner-0 1/1 Running 0 1d default csi-provisioner-0 1/1 Running 0 1d
``` ```

View File

@ -12,6 +12,9 @@ apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
name: external-attacher-runner name: external-attacher-runner
rules: rules:
- apiGroups: [""]
resources: ["events"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: [""] - apiGroups: [""]
resources: ["persistentvolumes"] resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "update"] verbs: ["get", "list", "watch", "update"]
@ -66,19 +69,19 @@ spec:
serviceAccount: csi-attacher serviceAccount: csi-attacher
containers: containers:
- name: csi-attacher - name: csi-attacher
image: docker.io/k8scsi/csi-attacher:latest image: quay.io/k8scsi/csi-attacher:v0.2.0
args: args:
- "--v=5" - "--v=5"
- "--csi-address=$(ADDRESS)" - "--csi-address=$(ADDRESS)"
env: env:
- name: ADDRESS - name: ADDRESS
value: /var/lib/kubelet/plugins/rbdplugin/csi.sock value: /var/lib/kubelet/plugins/csi-rbdplugin/csi.sock
imagePullPolicy: "IfNotPresent" imagePullPolicy: "IfNotPresent"
volumeMounts: volumeMounts:
- name: socket-dir - name: socket-dir
mountPath: /var/lib/kubelet/plugins/rbdplugin mountPath: /var/lib/kubelet/plugins/csi-rbdplugin
volumes: volumes:
- name: socket-dir - name: socket-dir
hostPath: hostPath:
path: /var/lib/kubelet/plugins/rbdplugin path: /var/lib/kubelet/plugins/csi-rbdplugin
type: DirectoryOrCreate type: DirectoryOrCreate

View File

@ -18,6 +18,9 @@ apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
name: external-provisioner-runner name: external-provisioner-runner
rules: rules:
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""] - apiGroups: [""]
resources: ["persistentvolumes"] resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"] verbs: ["get", "list", "watch", "create", "delete"]
@ -75,20 +78,20 @@ spec:
serviceAccount: csi-provisioner serviceAccount: csi-provisioner
containers: containers:
- name: csi-provisioner - name: csi-provisioner
image: quay.io/k8scsi/csi-provisioner:latest image: quay.io/k8scsi/csi-provisioner:v0.2.0
args: args:
- "--provisioner=rbdplugin" - "--provisioner=csi-rbdplugin"
- "--csi-address=$(ADDRESS)" - "--csi-address=$(ADDRESS)"
- "--v=5" - "--v=5"
env: env:
- name: ADDRESS - name: ADDRESS
value: /var/lib/kubelet/plugins/rbdplugin/csi.sock value: /var/lib/kubelet/plugins/csi-rbdplugin/csi.sock
imagePullPolicy: "IfNotPresent" imagePullPolicy: "IfNotPresent"
volumeMounts: volumeMounts:
- name: socket-dir - name: socket-dir
mountPath: /var/lib/kubelet/plugins/rbdplugin mountPath: /var/lib/kubelet/plugins/csi-rbdplugin
volumes: volumes:
- name: socket-dir - name: socket-dir
hostPath: hostPath:
path: /var/lib/kubelet/plugins/rbdplugin path: /var/lib/kubelet/plugins/csi-rbdplugin
type: DirectoryOrCreate type: DirectoryOrCreate

View File

@ -1,19 +1,10 @@
apiVersion: v1 apiVersion: v1
kind: Secret kind: Secret
metadata: metadata:
name: ceph-secret-admin name: csi-ceph-secret
namespace: kube-system namespace: default
type: "kubernetes.io/rbd"
data: data:
#Please note this value is base64 encoded. #Please note this value is base64 encoded.
key: QVFDZUhPMVpJTFBQRFJBQTd6dzNkNzZicGxrdlR3em9vc3lidkE9PQo= # Key value corresponds to a user name defined in ceph cluster
type: kubernetes.io/rbd admin: QVFDZUhPMVpJTFBQRFJBQTd6dzNkNzZicGxrdlR3em9vc3lidkE9PQo=
--- kubernetes: QVFDZDR1MVoxSDI0QnhBQWFxdmZIRnFuMSs0RFZlK1pRZ0ZmUEE9PQo=
apiVersion: v1
kind: Secret
metadata:
name: ceph-secret-user
type: "kubernetes.io/rbd"
data:
#Please note this value is base64 encoded.
key: QVFDZDR1MVoxSDI0QnhBQWFxdmZIRnFuMSs0RFZlK1pRZ0ZmUEE9PQo=

View File

@ -1,13 +1,11 @@
apiVersion: storage.k8s.io/v1 apiVersion: storage.k8s.io/v1
kind: StorageClass kind: StorageClass
metadata: metadata:
name: rbd name: csi-rbd
provisioner: rbdplugin provisioner: csi-rbdplugin
parameters: parameters:
monitors: 192.168.80.233:6789 monitors: 192.168.80.233:6789
pool: kubernetes pool: kubernetes
adminID: admin csiProvisionerSecretName: csi-ceph-secret
adminSecret: AQAmsGBap4EoBhAAET/Hc7fBqAZj/cy7cDcoQA== csiProvisionerSecretNamespace: default
userID: kube
userSecret: AQAMgXhVwBCeDhAA9nlPaFyfUSatGD4drFWDvQ==
reclaimPolicy: Delete reclaimPolicy: Delete

View File

@ -3,20 +3,17 @@
apiVersion: v1 apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: csi-nodeplugin name: csi-rbdplugin
--- ---
kind: ClusterRole kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
name: csi-nodeplugin name: csi-rbdplugin
rules: rules:
- apiGroups: [""] - apiGroups: [""]
resources: ["nodes"] resources: ["nodes"]
verbs: ["get", "list", "update"] verbs: ["get", "list", "update"]
- apiGroups: [""]
resources: ["secrets"]
verbs: ["get", "list"]
- apiGroups: [""] - apiGroups: [""]
resources: ["namespaces"] resources: ["namespaces"]
verbs: ["get", "list"] verbs: ["get", "list"]
@ -30,14 +27,14 @@ rules:
kind: ClusterRoleBinding kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1 apiVersion: rbac.authorization.k8s.io/v1
metadata: metadata:
name: csi-nodeplugin name: csi-rbdplugin
subjects: subjects:
- kind: ServiceAccount - kind: ServiceAccount
name: csi-nodeplugin name: csi-rbdplugin
namespace: default namespace: default
roleRef: roleRef:
kind: ClusterRole kind: ClusterRole
name: csi-nodeplugin name: csi-rbdplugin
apiGroup: rbac.authorization.k8s.io apiGroup: rbac.authorization.k8s.io
--- ---
@ -47,35 +44,35 @@ roleRef:
kind: DaemonSet kind: DaemonSet
apiVersion: apps/v1beta2 apiVersion: apps/v1beta2
metadata: metadata:
name: csi-nodeplugin-rbdplugin name: csi-rbdplugin
spec: spec:
selector: selector:
matchLabels: matchLabels:
app: csi-nodeplugin-rbdplugin app: csi-rbdplugin
template: template:
metadata: metadata:
labels: labels:
app: csi-nodeplugin-rbdplugin app: csi-rbdplugin
spec: spec:
serviceAccount: csi-nodeplugin serviceAccount: csi-rbdplugin
hostNetwork: true hostNetwork: true
containers: containers:
- name: driver-registrar - name: driver-registrar
image: docker.io/k8scsi/driver-registrar:latest image: quay.io/k8scsi/driver-registrar:v0.2.0
args: args:
- "--v=5" - "--v=5"
- "--csi-address=$(ADDRESS)" - "--csi-address=$(ADDRESS)"
env: env:
- name: ADDRESS - name: ADDRESS
value: /var/lib/kubelet/plugins/rbdplugin/csi.sock value: /var/lib/kubelet/plugins/csi-rbdplugin/csi.sock
- name: KUBE_NODE_NAME - name: KUBE_NODE_NAME
valueFrom: valueFrom:
fieldRef: fieldRef:
fieldPath: spec.nodeName fieldPath: spec.nodeName
volumeMounts: volumeMounts:
- name: socket-dir - name: socket-dir
mountPath: /var/lib/kubelet/plugins/rbdplugin mountPath: /var/lib/kubelet/plugins/csi-rbdplugin
- name: rbdplugin - name: csi-rbdplugin
securityContext: securityContext:
privileged: true privileged: true
capabilities: capabilities:
@ -86,18 +83,18 @@ spec:
- "--nodeid=$(NODE_ID)" - "--nodeid=$(NODE_ID)"
- "--endpoint=$(CSI_ENDPOINT)" - "--endpoint=$(CSI_ENDPOINT)"
- "--v=5" - "--v=5"
- "--drivername=rbdplugin" - "--drivername=csi-rbdplugin"
env: env:
- name: NODE_ID - name: NODE_ID
valueFrom: valueFrom:
fieldRef: fieldRef:
fieldPath: spec.nodeName fieldPath: spec.nodeName
- name: CSI_ENDPOINT - name: CSI_ENDPOINT
value: unix://var/lib/kubelet/plugins/rbdplugin/csi.sock value: unix://var/lib/kubelet/plugins/csi-rbdplugin/csi.sock
imagePullPolicy: "IfNotPresent" imagePullPolicy: "IfNotPresent"
volumeMounts: volumeMounts:
- name: plugin-dir - name: plugin-dir
mountPath: /var/lib/kubelet/plugins/rbdplugin mountPath: /var/lib/kubelet/plugins/csi-rbdplugin
- name: pods-mount-dir - name: pods-mount-dir
mountPath: /var/lib/kubelet/pods mountPath: /var/lib/kubelet/pods
mountPropagation: "Bidirectional" mountPropagation: "Bidirectional"
@ -111,7 +108,7 @@ spec:
volumes: volumes:
- name: plugin-dir - name: plugin-dir
hostPath: hostPath:
path: /var/lib/kubelet/plugins/rbdplugin path: /var/lib/kubelet/plugins/csi-rbdplugin
type: DirectoryOrCreate type: DirectoryOrCreate
- name: pods-mount-dir - name: pods-mount-dir
hostPath: hostPath:
@ -119,7 +116,7 @@ spec:
type: Directory type: Directory
- name: socket-dir - name: socket-dir
hostPath: hostPath:
path: /var/lib/kubelet/plugins/rbdplugin path: /var/lib/kubelet/plugins/csi-rbdplugin
type: DirectoryOrCreate type: DirectoryOrCreate
- name: host-dev - name: host-dev
hostPath: hostPath:
@ -129,4 +126,4 @@ spec:
path: /sys path: /sys
- name: lib-modules - name: lib-modules
hostPath: hostPath:
path: /lib/modules path: /lib/modules

View File

@ -20,12 +20,13 @@ import (
"fmt" "fmt"
"path" "path"
"github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog" "github.com/golang/glog"
"github.com/kubernetes-csi/drivers/pkg/csi-common"
"github.com/pborman/uuid" "github.com/pborman/uuid"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc/codes"
"github.com/container-storage-interface/spec/lib/go/csi" "google.golang.org/grpc/status"
"github.com/kubernetes-csi/drivers/pkg/csi-common"
) )
const ( const (
@ -36,17 +37,42 @@ type controllerServer struct {
*csicommon.DefaultControllerServer *csicommon.DefaultControllerServer
} }
func GetVersionString(ver *csi.Version) string {
return fmt.Sprintf("%d.%d.%d", ver.Major, ver.Minor, ver.Patch)
}
func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
if err := cs.Driver.ValidateControllerServiceRequest(req.Version, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil { if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.V(3).Infof("invalid create volume req: %v", req) glog.V(3).Infof("invalid create volume req: %v", req)
return nil, err return nil, err
} }
// Check sanity of request Name, Volume Capabilities
if len(req.Name) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume Name cannot be empty")
}
if req.VolumeCapabilities == nil {
return nil, status.Error(codes.InvalidArgument, "Volume Capabilities cannot be empty")
}
volOptions, err := getRBDVolumeOptions(req.GetParameters()) // Need to check for already existing volume name, and if found
// check for the requested capacity and already allocated capacity
if exVol, err := getRBDVolumeByName(req.GetName()); err == nil {
// Since err is nil, it means the volume with the same name already exists
// need to check if the size of exisiting volume is the same as in new
// request
if exVol.VolSize >= int64(req.GetCapacityRange().GetRequiredBytes()) {
// exisiting volume is compatible with new request and should be reused.
// TODO (sbezverk) Do I need to make sure that RBD volume still exists?
return &csi.CreateVolumeResponse{
Volume: &csi.Volume{
Id: exVol.VolID,
CapacityBytes: int64(exVol.VolSize),
Attributes: req.GetParameters(),
},
}, nil
}
return nil, status.Error(codes.AlreadyExists, fmt.Sprintf("Volume with the same name: %s but with different size already exist", req.GetName()))
}
// TODO (sbezverk) Last check for not exceeding total storage capacity
rbdVol, err := getRBDVolumeOptions(req.GetParameters())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -55,21 +81,23 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
volName := req.GetName() volName := req.GetName()
uniqueID := uuid.NewUUID().String() uniqueID := uuid.NewUUID().String()
if len(volName) == 0 { if len(volName) == 0 {
volName = volOptions.Pool + "-dynamic-pvc-" + uniqueID volName = rbdVol.Pool + "-dynamic-pvc-" + uniqueID
} }
volOptions.VolName = volName rbdVol.VolName = volName
volumeID := "csi-rbd-" + uniqueID volumeID := "csi-rbd-" + uniqueID
rbdVol.VolID = volumeID
// Volume Size - Default is 1 GiB // Volume Size - Default is 1 GiB
volSizeBytes := int64(oneGB) volSizeBytes := int64(oneGB)
if req.GetCapacityRange() != nil { if req.GetCapacityRange() != nil {
volSizeBytes = int64(req.GetCapacityRange().GetRequiredBytes()) volSizeBytes = int64(req.GetCapacityRange().GetRequiredBytes())
} }
rbdVol.VolSize = volSizeBytes
volSizeGB := int(volSizeBytes / 1024 / 1024 / 1024) volSizeGB := int(volSizeBytes / 1024 / 1024 / 1024)
// Check if there is already RBD image with requested name // Check if there is already RBD image with requested name
found, _, _ := rbdStatus(volOptions) found, _, _ := rbdStatus(rbdVol, req.GetControllerCreateSecrets())
if !found { if !found {
if err := createRBDImage(volOptions, volSizeGB); err != nil { if err := createRBDImage(rbdVol, volSizeGB, req.GetControllerCreateSecrets()); err != nil {
if err != nil { if err != nil {
glog.Warningf("failed to create volume: %v", err) glog.Warningf("failed to create volume: %v", err)
return nil, err return nil, err
@ -77,12 +105,11 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
} }
glog.V(4).Infof("create volume %s", volName) glog.V(4).Infof("create volume %s", volName)
} }
// Storing volInfo into a persistent file, will need info to delete rbd image // Storing volInfo into a persistent file.
// in ControllerUnpublishVolume if err := persistVolInfo(volumeID, path.Join(PluginFolder, "controller"), rbdVol); err != nil {
if err := persistVolInfo(volumeID, path.Join(PluginFolder, "controller"), volOptions); err != nil {
glog.Warningf("rbd: failed to store volInfo with error: %v", err) glog.Warningf("rbd: failed to store volInfo with error: %v", err)
} }
rbdVolumes[volumeID] = *rbdVol
return &csi.CreateVolumeResponse{ return &csi.CreateVolumeResponse{
Volume: &csi.Volume{ Volume: &csi.Volume{
Id: volumeID, Id: volumeID,
@ -93,23 +120,21 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
} }
func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
if err := cs.Driver.ValidateControllerServiceRequest(req.Version, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil { if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.Warningf("invalid delete volume req: %v", req) glog.Warningf("invalid delete volume req: %v", req)
return nil, err return nil, err
} }
// For now the image get unconditionally deleted, but here retention policy can be checked // For now the image get unconditionally deleted, but here retention policy can be checked
volumeID := req.GetVolumeId() volumeID := req.GetVolumeId()
volOptions := &rbdVolumeOptions{} rbdVol := &rbdVolume{}
if err := loadVolInfo(volumeID, path.Join(PluginFolder, "controller"), volOptions); err != nil { if err := loadVolInfo(volumeID, path.Join(PluginFolder, "controller"), rbdVol); err != nil {
return nil, err return nil, err
} }
volName := rbdVol.VolName
volName := volOptions.VolName
// Deleting rbd image // Deleting rbd image
glog.V(4).Infof("deleting volume %s", volName) glog.V(4).Infof("deleting volume %s", volName)
if err := deleteRBDImage(volOptions); err != nil { if err := deleteRBDImage(rbdVol, req.GetControllerDeleteSecrets()); err != nil {
glog.V(3).Infof("failed to delete rbd image: %s/%s with error: %v", volOptions.Pool, volName, err) glog.V(3).Infof("failed to delete rbd image: %s/%s with error: %v", rbdVol.Pool, volName, err)
return nil, err return nil, err
} }
// Removing persistent storage file for the unmapped volume // Removing persistent storage file for the unmapped volume
@ -117,6 +142,7 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
return nil, err return nil, err
} }
delete(rbdVolumes, volumeID)
return &csi.DeleteVolumeResponse{}, nil return &csi.DeleteVolumeResponse{}, nil
} }

View File

@ -24,7 +24,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"golang.org/x/net/context" "golang.org/x/net/context"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
@ -67,7 +67,7 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
} }
volOptions.VolName = volName volOptions.VolName = volName
// Mapping RBD image // Mapping RBD image
devicePath, err := attachRBDImage(volOptions) devicePath, err := attachRBDImage(volOptions, req.GetNodePublishSecrets())
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -130,3 +130,19 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
return &csi.NodeUnpublishVolumeResponse{}, nil return &csi.NodeUnpublishVolumeResponse{}, nil
} }
func (ns *nodeServer) NodeStageVolume(
ctx context.Context,
req *csi.NodeStageVolumeRequest) (
*csi.NodeStageVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}
func (ns *nodeServer) NodeUnstageVolume(
ctx context.Context,
req *csi.NodeUnstageVolumeRequest) (
*csi.NodeUnstageVolumeResponse, error) {
return nil, status.Error(codes.Unimplemented, "")
}

View File

@ -17,15 +17,25 @@ limitations under the License.
package rbd package rbd
import ( import (
"encoding/json"
"io/ioutil"
"os"
"path"
"strings"
"github.com/golang/glog" "github.com/golang/glog"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/kubernetes-csi/drivers/pkg/csi-common" "github.com/kubernetes-csi/drivers/pkg/csi-common"
) )
// PluginFolder defines the location of rbdplugin // PluginFolder defines the location of rbdplugin
const ( const (
PluginFolder = "/var/lib/kubelet/plugins/rbdplugin" PluginFolder = "/var/lib/kubelet/plugins/csi-rbdplugin"
// RBDUserID used as a key in credentials map to extract the key which is
// passed be the provisioner, the value od RBDUserID must match to the key used
// in Secret object.
RBDUserID = "admin"
) )
type rbd struct { type rbd struct {
@ -41,13 +51,54 @@ type rbd struct {
var ( var (
rbdDriver *rbd rbdDriver *rbd
version = csi.Version{ version = "0.2.0"
Minor: 2,
}
) )
func GetSupportedVersions() []*csi.Version { var rbdVolumes map[string]rbdVolume
return []*csi.Version{&version}
// Init checks for the persistent volume file and loads all found volumes
// into a memory structure
func init() {
rbdVolumes = map[string]rbdVolume{}
if _, err := os.Stat(path.Join(PluginFolder, "controller")); os.IsNotExist(err) {
glog.Infof("rbd: folder %s not found. Creating... \n", path.Join(PluginFolder, "controller"))
if err := os.Mkdir(path.Join(PluginFolder, "controller"), 0755); err != nil {
glog.Fatalf("Failed to create a controller's volumes folder with error: %v\n", err)
}
return
}
// Since "controller" folder exists, it means the rbdplugin has already been running, it means
// there might be some volumes left, they must be re-inserted into rbdVolumes map
loadExVolumes()
}
// loadExVolumes check for any *.json files in the PluginFolder/controller folder
// and loads then into rbdVolumes map
func loadExVolumes() {
rbdVol := rbdVolume{}
files, err := ioutil.ReadDir(path.Join(PluginFolder, "controller"))
if err != nil {
glog.Infof("rbd: failed to read controller's volumes folder: %s error:%v", path.Join(PluginFolder, "controller"), err)
return
}
for _, f := range files {
if !strings.HasSuffix(f.Name(), ".json") {
continue
}
fp, err := os.Open(path.Join(PluginFolder, "controller", f.Name()))
if err != nil {
glog.Infof("rbd: open file: %s err %%v", f.Name(), err)
continue
}
decoder := json.NewDecoder(fp)
if err = decoder.Decode(&rbdVol); err != nil {
glog.Infof("rbd: decode file: %s err: %v", f.Name(), err)
fp.Close()
continue
}
rbdVolumes[rbdVol.VolID] = rbdVol
}
glog.Infof("rbd: Loaded %d volumes from %s", len(rbdVolumes), path.Join(PluginFolder, "controller"))
} }
func GetRBDDriver() *rbd { func GetRBDDriver() *rbd {
@ -73,10 +124,10 @@ func NewNodeServer(d *csicommon.CSIDriver) *nodeServer {
} }
func (rbd *rbd) Run(driverName, nodeID, endpoint string) { func (rbd *rbd) Run(driverName, nodeID, endpoint string) {
glog.Infof("Driver: %v version: %v", driverName, GetVersionString(&version)) glog.Infof("Driver: %v version: %v", driverName, version)
// Initialize default library driver // Initialize default library driver
rbd.driver = csicommon.NewCSIDriver(driverName, &version, GetSupportedVersions(), nodeID) rbd.driver = csicommon.NewCSIDriver(driverName, version, nodeID)
if rbd.driver == nil { if rbd.driver == nil {
glog.Fatalln("Failed to initialize CSI Driver.") glog.Fatalln("Failed to initialize CSI Driver.")
} }

View File

@ -19,15 +19,16 @@ package rbd
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"github.com/golang/glog"
"io/ioutil" "io/ioutil"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/keymutex"
"os" "os"
"os/exec" "os/exec"
"path" "path"
"strings" "strings"
"time" "time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/kubernetes/pkg/util/keymutex"
) )
const ( const (
@ -44,22 +45,29 @@ const (
rbdImageWatcherSteps = 10 rbdImageWatcherSteps = 10
) )
type rbdVolumeOptions struct { type rbdVolume struct {
VolName string `json:"volName"` VolName string `json:"volName"`
Monitors string `json:"monitors"` VolID string `json:"volID"`
Pool string `json:"pool"` Monitors string `json:"monitors"`
AdminID string `json:"adminID"` Pool string `json:"pool"`
AdminSecret string `json:"adminSecret"` ImageFormat string `json:"imageFormat"`
UserID string `json:"userID"` // TODO (sbezverk) check if it is used and how
UserSecret string `json:"userSecret"`
ImageFormat string `json:"imageFormat"`
ImageFeatures []string `json:"imageFeatures"` ImageFeatures []string `json:"imageFeatures"`
VolSize int64 `json:"volSize"`
} }
var attachdetachMutex = keymutex.NewKeyMutex() var attachdetachMutex = keymutex.NewKeyMutex()
func getRBDKey(id string, credentials map[string]string) (string, error) {
if key, ok := credentials[id]; ok {
return key, nil
}
return "", fmt.Errorf("RBD key for ID: %s not found", id)
}
// CreateImage creates a new ceph image with provision and volume options. // CreateImage creates a new ceph image with provision and volume options.
func createRBDImage(pOpts *rbdVolumeOptions, volSz int) error { func createRBDImage(pOpts *rbdVolume, volSz int, credentials map[string]string) error {
var output []byte var output []byte
var err error var err error
@ -68,12 +76,16 @@ func createRBDImage(pOpts *rbdVolumeOptions, volSz int) error {
image := pOpts.VolName image := pOpts.VolName
volSzGB := fmt.Sprintf("%dG", volSz) volSzGB := fmt.Sprintf("%dG", volSz)
if pOpts.ImageFormat == rbdImageFormat2 { key, err := getRBDKey(RBDUserID, credentials)
glog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", image, volSzGB, pOpts.ImageFormat, pOpts.ImageFeatures, mon, pOpts.Pool, pOpts.AdminID, pOpts.AdminSecret) if err != nil {
} else { return err
glog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", image, volSzGB, pOpts.ImageFormat, mon, pOpts.Pool, pOpts.AdminID, pOpts.AdminSecret)
} }
args := []string{"create", image, "--size", volSzGB, "--pool", pOpts.Pool, "--id", pOpts.AdminID, "-m", mon, "--key=" + pOpts.AdminSecret, "--image-format", pOpts.ImageFormat} if pOpts.ImageFormat == rbdImageFormat2 {
glog.V(4).Infof("rbd: create %s size %s format %s (features: %s) using mon %s, pool %s id %s key %s", image, volSzGB, pOpts.ImageFormat, pOpts.ImageFeatures, mon, pOpts.Pool, RBDUserID, key)
} else {
glog.V(4).Infof("rbd: create %s size %s format %s using mon %s, pool %s id %s key %s", image, volSzGB, pOpts.ImageFormat, mon, pOpts.Pool, RBDUserID, key)
}
args := []string{"create", image, "--size", volSzGB, "--pool", pOpts.Pool, "--id", RBDUserID, "-m", mon, "--key=" + key, "--image-format", pOpts.ImageFormat}
if pOpts.ImageFormat == rbdImageFormat2 { if pOpts.ImageFormat == rbdImageFormat2 {
// if no image features is provided, it results in empty string // if no image features is provided, it results in empty string
// which disable all RBD image format 2 features as we expected // which disable all RBD image format 2 features as we expected
@ -91,22 +103,20 @@ func createRBDImage(pOpts *rbdVolumeOptions, volSz int) error {
// rbdStatus checks if there is watcher on the image. // rbdStatus checks if there is watcher on the image.
// It returns true if there is a watcher onthe image, otherwise returns false. // It returns true if there is a watcher onthe image, otherwise returns false.
func rbdStatus(b *rbdVolumeOptions) (bool, string, error) { func rbdStatus(pOpts *rbdVolume, credentials map[string]string) (bool, string, error) {
var err error var err error
var output string var output string
var cmd []byte var cmd []byte
image := b.VolName image := pOpts.VolName
// If we don't have admin id/secret (e.g. attaching), fallback to user id/secret. // If we don't have admin id/secret (e.g. attaching), fallback to user id/secret.
id := b.AdminID key, err := getRBDKey(RBDUserID, credentials)
secret := b.AdminSecret if err != nil {
if id == "" { return false, "", err
id = b.UserID
secret = b.UserSecret
} }
glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", image, b.Monitors, b.Pool, id, secret) glog.V(4).Infof("rbd: status %s using mon %s, pool %s id %s key %s", image, pOpts.Monitors, pOpts.Pool, RBDUserID, key)
args := []string{"status", image, "--pool", b.Pool, "-m", b.Monitors, "--id", id, "--key=" + secret} args := []string{"status", image, "--pool", pOpts.Pool, "-m", pOpts.Monitors, "--id", RBDUserID, "--key=" + key}
cmd, err = execCommand("rbd", args) cmd, err = execCommand("rbd", args)
output = string(cmd) output = string(cmd)
@ -133,10 +143,10 @@ func rbdStatus(b *rbdVolumeOptions) (bool, string, error) {
} }
// DeleteImage deletes a ceph image with provision and volume options. // DeleteImage deletes a ceph image with provision and volume options.
func deleteRBDImage(b *rbdVolumeOptions) error { func deleteRBDImage(pOpts *rbdVolume, credentials map[string]string) error {
var output []byte var output []byte
image := b.VolName image := pOpts.VolName
found, _, err := rbdStatus(b) found, _, err := rbdStatus(pOpts, credentials)
if err != nil { if err != nil {
return err return err
} }
@ -144,15 +154,13 @@ func deleteRBDImage(b *rbdVolumeOptions) error {
glog.Info("rbd is still being used ", image) glog.Info("rbd is still being used ", image)
return fmt.Errorf("rbd %s is still being used", image) return fmt.Errorf("rbd %s is still being used", image)
} }
id := b.AdminID key, err := getRBDKey(RBDUserID, credentials)
secret := b.AdminSecret if err != nil {
if id == "" { return err
id = b.UserID
secret = b.UserSecret
} }
glog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", image, b.Monitors, b.Pool, id, secret) glog.V(4).Infof("rbd: rm %s using mon %s, pool %s id %s key %s", image, pOpts.Monitors, pOpts.Pool, RBDUserID, key)
args := []string{"rm", image, "--pool", b.Pool, "--id", id, "-m", b.Monitors, "--key=" + secret} args := []string{"rm", image, "--pool", pOpts.Pool, "--id", RBDUserID, "-m", pOpts.Monitors, "--key=" + key}
output, err = execCommand("rbd", args) output, err = execCommand("rbd", args)
if err == nil { if err == nil {
return nil return nil
@ -166,42 +174,26 @@ func execCommand(command string, args []string) ([]byte, error) {
return cmd.CombinedOutput() return cmd.CombinedOutput()
} }
func getRBDVolumeOptions(volOptions map[string]string) (*rbdVolumeOptions, error) { func getRBDVolumeOptions(volOptions map[string]string) (*rbdVolume, error) {
rbdVolume := &rbdVolumeOptions{}
var ok bool var ok bool
rbdVolume.AdminID, ok = volOptions["adminID"] rbdVol := &rbdVolume{}
if !ok { rbdVol.Pool, ok = volOptions["pool"]
return nil, fmt.Errorf("Missing required parameter adminID")
}
rbdVolume.AdminSecret, ok = volOptions["adminSecret"]
if !ok {
return nil, fmt.Errorf("Missing required parameter adminSecret")
}
rbdVolume.Pool, ok = volOptions["pool"]
if !ok { if !ok {
return nil, fmt.Errorf("Missing required parameter pool") return nil, fmt.Errorf("Missing required parameter pool")
} }
rbdVolume.Monitors, ok = volOptions["monitors"] rbdVol.Monitors, ok = volOptions["monitors"]
if !ok { if !ok {
return nil, fmt.Errorf("Missing required parameter monitors") return nil, fmt.Errorf("Missing required parameter monitors")
} }
rbdVolume.UserID, ok = volOptions["userID"] rbdVol.ImageFormat, ok = volOptions["imageFormat"]
if !ok { if !ok {
return nil, fmt.Errorf("Missing required parameter userID") rbdVol.ImageFormat = "2"
}
rbdVolume.UserSecret, ok = volOptions["userSecret"]
if !ok {
return nil, fmt.Errorf("Missing required parameter userSecret")
}
rbdVolume.ImageFormat, ok = volOptions["imageFormat"]
if !ok {
rbdVolume.ImageFormat = "2"
} }
return rbdVolume, nil return rbdVol, nil
} }
func attachRBDImage(volOptions *rbdVolumeOptions) (string, error) { func attachRBDImage(volOptions *rbdVolume, credentials map[string]string) (string, error) {
var err error var err error
var output []byte var output []byte
@ -222,7 +214,7 @@ func attachRBDImage(volOptions *rbdVolumeOptions) (string, error) {
Steps: rbdImageWatcherSteps, Steps: rbdImageWatcherSteps,
} }
err := wait.ExponentialBackoff(backoff, func() (bool, error) { err := wait.ExponentialBackoff(backoff, func() (bool, error) {
used, rbdOutput, err := rbdStatus(volOptions) used, rbdOutput, err := rbdStatus(volOptions, credentials)
if err != nil { if err != nil {
return false, fmt.Errorf("fail to check rbd image status with: (%v), rbd output: (%s)", err, rbdOutput) return false, fmt.Errorf("fail to check rbd image status with: (%v), rbd output: (%s)", err, rbdOutput)
} }
@ -238,11 +230,13 @@ func attachRBDImage(volOptions *rbdVolumeOptions) (string, error) {
} }
glog.V(1).Infof("rbd: map mon %s", volOptions.Monitors) glog.V(1).Infof("rbd: map mon %s", volOptions.Monitors)
id := volOptions.UserID key, err := getRBDKey(RBDUserID, credentials)
secret := volOptions.UserSecret if err != nil {
return "", err
}
output, err = execCommand("rbd", []string{ output, err = execCommand("rbd", []string{
"map", image, "--pool", volOptions.Pool, "--id", id, "-m", volOptions.Monitors, "--key=" + secret}) "map", image, "--pool", volOptions.Pool, "--id", RBDUserID, "-m", volOptions.Monitors, "--key=" + key})
if err != nil { if err != nil {
glog.V(1).Infof("rbd: map error %v, rbd output: %s", err, string(output)) glog.V(1).Infof("rbd: map error %v, rbd output: %s", err, string(output))
return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", err, string(output)) return "", fmt.Errorf("rbd: map failed %v, rbd output: %s", err, string(output))
@ -322,23 +316,24 @@ func waitForPath(pool, image string, maxRetries int) (string, bool) {
return "", false return "", false
} }
func persistVolInfo(image string, persistentStoragePath string, volInfo *rbdVolumeOptions) error { func persistVolInfo(image string, persistentStoragePath string, volInfo *rbdVolume) error {
file := path.Join(persistentStoragePath, image+".json") file := path.Join(persistentStoragePath, image+".json")
fp, err := os.Create(file) fp, err := os.Create(file)
if err != nil { if err != nil {
glog.Errorf("rbd: failed to create persistent storage file %s with error: %v\n", file, err)
return fmt.Errorf("rbd: create err %s/%s", file, err) return fmt.Errorf("rbd: create err %s/%s", file, err)
} }
defer fp.Close() defer fp.Close()
encoder := json.NewEncoder(fp) encoder := json.NewEncoder(fp)
if err = encoder.Encode(volInfo); err != nil { if err = encoder.Encode(volInfo); err != nil {
return fmt.Errorf("rbd: encode err: %v.", err) glog.Errorf("rbd: failed to encode volInfo: %+v for file: %s with error: %v\n", volInfo, file, err)
return fmt.Errorf("rbd: encode err: %v", err)
} }
glog.Infof("rbd: successfully saved volInfo: %+v into file: %s\n", volInfo, file)
return nil return nil
} }
func loadVolInfo(image string, persistentStoragePath string, volInfo *rbdVolumeOptions) error { func loadVolInfo(image string, persistentStoragePath string, volInfo *rbdVolume) error {
file := path.Join(persistentStoragePath, image+".json") file := path.Join(persistentStoragePath, image+".json")
fp, err := os.Open(file) fp, err := os.Open(file)
if err != nil { if err != nil {
@ -356,11 +351,28 @@ func loadVolInfo(image string, persistentStoragePath string, volInfo *rbdVolumeO
func deleteVolInfo(image string, persistentStoragePath string) error { func deleteVolInfo(image string, persistentStoragePath string) error {
file := path.Join(persistentStoragePath, image+".json") file := path.Join(persistentStoragePath, image+".json")
glog.Infof("rbd: Deleting file for Volume: %s at: %s resulting path: %+v\n", image, persistentStoragePath, file)
err := os.Remove(file) err := os.Remove(file)
if err != nil { if err != nil {
if err != os.ErrNotExist { if err != os.ErrNotExist {
return fmt.Errorf("rbd: open err %s/%s", file, err) return fmt.Errorf("rbd: error removing file: %s/%s", file, err)
} }
} }
return nil return nil
} }
func getRBDVolumeByID(volumeID string) (rbdVolume, error) {
if rbdVol, ok := rbdVolumes[volumeID]; ok {
return rbdVol, nil
}
return rbdVolume{}, fmt.Errorf("volume id %s does not exit in the volumes list", volumeID)
}
func getRBDVolumeByName(volName string) (rbdVolume, error) {
for _, rbdVol := range rbdVolumes {
if rbdVol.VolName == volName {
return rbdVol, nil
}
}
return rbdVolume{}, fmt.Errorf("volume name %s does not exit in the volumes list", volName)
}

View File

@ -31,7 +31,7 @@ func init() {
var ( var (
endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint") endpoint = flag.String("endpoint", "unix://tmp/csi.sock", "CSI endpoint")
driverName = flag.String("drivername", "rbdplugin", "name of the driver") driverName = flag.String("drivername", "csi-rbdplugin", "name of the driver")
nodeID = flag.String("nodeid", "", "node id") nodeID = flag.String("nodeid", "", "node id")
) )

View File

@ -1,2 +1,3 @@
/csi.proto.tmp *.tmp
.DS_Store .DS_Store
.build

View File

@ -8,9 +8,7 @@ CSI_PROTO := csi.proto
# The temporary file is not versioned, and thus will always be # The temporary file is not versioned, and thus will always be
# built on Travis-CI. # built on Travis-CI.
$(CSI_PROTO).tmp: $(CSI_SPEC) $(CSI_PROTO).tmp: $(CSI_SPEC)
cat $? | \ cat $? | sed -n -e '/```protobuf$$/,/^```$$/ p' | sed '/^```/d' > "$@"
sed -n -e '/```protobuf$$/,/```$$/ p' | \
sed -e 's@^```.*$$@////////@g' > $@
# This is the target for building the CSI protobuf file. # This is the target for building the CSI protobuf file.
# #

View File

@ -1,14 +1,16 @@
////////
syntax = "proto3"; syntax = "proto3";
package csi; package csi.v0;
////////
////////
service Identity {
rpc GetSupportedVersions (GetSupportedVersionsRequest)
returns (GetSupportedVersionsResponse) {}
option go_package = "csi";
service Identity {
rpc GetPluginInfo(GetPluginInfoRequest) rpc GetPluginInfo(GetPluginInfoRequest)
returns (GetPluginInfoResponse) {} returns (GetPluginInfoResponse) {}
rpc GetPluginCapabilities(GetPluginCapabilitiesRequest)
returns (GetPluginCapabilitiesResponse) {}
rpc Probe (ProbeRequest)
returns (ProbeResponse) {}
} }
service Controller { service Controller {
@ -33,14 +35,17 @@ service Controller {
rpc GetCapacity (GetCapacityRequest) rpc GetCapacity (GetCapacityRequest)
returns (GetCapacityResponse) {} returns (GetCapacityResponse) {}
rpc ControllerProbe (ControllerProbeRequest)
returns (ControllerProbeResponse) {}
rpc ControllerGetCapabilities (ControllerGetCapabilitiesRequest) rpc ControllerGetCapabilities (ControllerGetCapabilitiesRequest)
returns (ControllerGetCapabilitiesResponse) {} returns (ControllerGetCapabilitiesResponse) {}
} }
service Node { service Node {
rpc NodeStageVolume (NodeStageVolumeRequest)
returns (NodeStageVolumeResponse) {}
rpc NodeUnstageVolume (NodeUnstageVolumeRequest)
returns (NodeUnstageVolumeResponse) {}
rpc NodePublishVolume (NodePublishVolumeRequest) rpc NodePublishVolume (NodePublishVolumeRequest)
returns (NodePublishVolumeResponse) {} returns (NodePublishVolumeResponse) {}
@ -50,38 +55,10 @@ service Node {
rpc NodeGetId (NodeGetIdRequest) rpc NodeGetId (NodeGetIdRequest)
returns (NodeGetIdResponse) {} returns (NodeGetIdResponse) {}
rpc NodeProbe (NodeProbeRequest)
returns (NodeProbeResponse) {}
rpc NodeGetCapabilities (NodeGetCapabilitiesRequest) rpc NodeGetCapabilities (NodeGetCapabilitiesRequest)
returns (NodeGetCapabilitiesResponse) {} returns (NodeGetCapabilitiesResponse) {}
} }
////////
////////
message GetSupportedVersionsRequest {
}
message GetSupportedVersionsResponse {
// All the CSI versions that the Plugin supports. This field is
// REQUIRED.
repeated Version supported_versions = 1;
}
// Specifies a version in Semantic Version 2.0 format.
// (http://semver.org/spec/v2.0.0.html)
message Version {
// The value of this field MUST NOT be negative.
int32 major = 1; // This field is REQUIRED.
// The value of this field MUST NOT be negative.
int32 minor = 2; // This field is REQUIRED.
// The value of this field MUST NOT be negative.
int32 patch = 3; // This field is REQUIRED.
}
////////
////////
message GetPluginInfoRequest { message GetPluginInfoRequest {
// The API version assumed by the CO. This is a REQUIRED field.
Version version = 1;
} }
message GetPluginInfoResponse { message GetPluginInfoResponse {
@ -100,12 +77,46 @@ message GetPluginInfoResponse {
// This field is OPTIONAL. Values are opaque to the CO. // This field is OPTIONAL. Values are opaque to the CO.
map<string, string> manifest = 3; map<string, string> manifest = 3;
} }
//////// message GetPluginCapabilitiesRequest {
//////// }
message CreateVolumeRequest {
// The API version assumed by the CO. This field is REQUIRED.
Version version = 1;
message GetPluginCapabilitiesResponse {
// All the capabilities that the controller service supports. This
// field is OPTIONAL.
repeated PluginCapability capabilities = 2;
}
// Specifies a capability of the plugin.
message PluginCapability {
message Service {
enum Type {
UNKNOWN = 0;
// CONTROLLER_SERVICE indicates that the Plugin provides RPCs for
// the ControllerService. Plugins SHOULD provide this capability.
// In rare cases certain plugins may wish to omit the
// ControllerService entirely from their implementation, but such
// SHOULD NOT be the common case.
// The presence of this capability determines whether the CO will
// attempt to invoke the REQUIRED ControllerService RPCs, as well
// as specific RPCs as indicated by ControllerGetCapabilities.
CONTROLLER_SERVICE = 1;
}
Type type = 1;
}
oneof type {
// Service that the plugin supports.
Service service = 1;
}
}
message ProbeRequest {
}
message ProbeResponse {
// Intentionally empty.
}
message CreateVolumeRequest {
// The suggested name for the storage space. This field is REQUIRED. // The suggested name for the storage space. This field is REQUIRED.
// It serves two purposes: // It serves two purposes:
// 1) Idempotency - This name is generated by the CO to achieve // 1) Idempotency - This name is generated by the CO to achieve
@ -121,12 +132,12 @@ message CreateVolumeRequest {
// an identifier by which to refer to the newly provisioned // an identifier by which to refer to the newly provisioned
// storage. If a storage system supports this, it can optionally // storage. If a storage system supports this, it can optionally
// use this name as the identifier for the new volume. // use this name as the identifier for the new volume.
string name = 2; string name = 1;
// This field is OPTIONAL. This allows the CO to specify the capacity // This field is OPTIONAL. This allows the CO to specify the capacity
// requirement of the volume to be provisioned. If not specified, the // requirement of the volume to be provisioned. If not specified, the
// Plugin MAY choose an implementation-defined capacity range. // Plugin MAY choose an implementation-defined capacity range.
CapacityRange capacity_range = 3; CapacityRange capacity_range = 2;
// The capabilities that the provisioned volume MUST have: the Plugin // The capabilities that the provisioned volume MUST have: the Plugin
// MUST provision a volume that could satisfy ALL of the // MUST provision a volume that could satisfy ALL of the
@ -136,25 +147,31 @@ message CreateVolumeRequest {
// early validation: if ANY of the specified volume capabilities are // early validation: if ANY of the specified volume capabilities are
// not supported by the Plugin, the call SHALL fail. This field is // not supported by the Plugin, the call SHALL fail. This field is
// REQUIRED. // REQUIRED.
repeated VolumeCapability volume_capabilities = 4; repeated VolumeCapability volume_capabilities = 3;
// Plugin specific parameters passed in as opaque key-value pairs. // Plugin specific parameters passed in as opaque key-value pairs.
// This field is OPTIONAL. The Plugin is responsible for parsing and // This field is OPTIONAL. The Plugin is responsible for parsing and
// validating these parameters. COs will treat these as opaque. // validating these parameters. COs will treat these as opaque.
map<string, string> parameters = 5; map<string, string> parameters = 4;
// Credentials used by Controller plugin to authenticate/authorize // Secrets required by plugin to complete volume creation request.
// volume creation request. // A secret is a string to string map where the key identifies the
// This field contains credential data, for example username and // name of the secret (e.g. "username" or "password"), and the value
// password. Each key must consist of alphanumeric characters, '-', // contains the secret data (e.g. "bob" or "abc123").
// '_' or '.'. Each value MUST contain a valid string. An SP MAY // Each key MUST consist of alphanumeric characters, '-', '_' or '.'.
// choose to accept binary (non-string) data by using a binary-to-text // Each value MUST contain a valid string. An SP MAY choose to accept
// encoding scheme, like base64. An SP SHALL advertise the // binary (non-string) data by using a binary-to-text encoding scheme,
// requirements for credentials in documentation. COs SHALL permit // like base64.
// passing through the required credentials. This information is // An SP SHALL advertise the requirements for required secret keys and
// sensitive and MUST be treated as such (not logged, etc.) by the CO. // values in documentation.
// CO SHALL permit passing through the required secrets.
// A CO MAY pass the same secrets to all RPCs, therefore the keys for
// all unique secrets that an SP expects must be unique across all CSI
// operations.
// This information is sensitive and MUST be treated as such (not
// logged, etc.) by the CO.
// This field is OPTIONAL. // This field is OPTIONAL.
map<string, string> controller_create_credentials = 6; map<string, string> controller_create_secrets = 5;
} }
message CreateVolumeResponse { message CreateVolumeResponse {
@ -263,126 +280,131 @@ message Volume {
// be passed to volume validation and publishing calls. // be passed to volume validation and publishing calls.
map<string,string> attributes = 3; map<string,string> attributes = 3;
} }
////////
////////
message DeleteVolumeRequest { message DeleteVolumeRequest {
// The API version assumed by the CO. This field is REQUIRED.
Version version = 1;
// The ID of the volume to be deprovisioned. // The ID of the volume to be deprovisioned.
// This field is REQUIRED. // This field is REQUIRED.
string volume_id = 2; string volume_id = 1;
// Credentials used by Controller plugin to authenticate/authorize // Secrets required by plugin to complete volume deletion request.
// volume deletion request. // A secret is a string to string map where the key identifies the
// This field contains credential data, for example username and // name of the secret (e.g. "username" or "password"), and the value
// password. Each key must consist of alphanumeric characters, '-', // contains the secret data (e.g. "bob" or "abc123").
// '_' or '.'. Each value MUST contain a valid string. An SP MAY // Each key MUST consist of alphanumeric characters, '-', '_' or '.'.
// choose to accept binary (non-string) data by using a binary-to-text // Each value MUST contain a valid string. An SP MAY choose to accept
// encoding scheme, like base64. An SP SHALL advertise the // binary (non-string) data by using a binary-to-text encoding scheme,
// requirements for credentials in documentation. COs SHALL permit // like base64.
// passing through the required credentials. This information is // An SP SHALL advertise the requirements for required secret keys and
// sensitive and MUST be treated as such (not logged, etc.) by the CO. // values in documentation.
// CO SHALL permit passing through the required secrets.
// A CO MAY pass the same secrets to all RPCs, therefore the keys for
// all unique secrets that an SP expects must be unique across all CSI
// operations.
// This information is sensitive and MUST be treated as such (not
// logged, etc.) by the CO.
// This field is OPTIONAL. // This field is OPTIONAL.
map<string, string> controller_delete_credentials = 3; map<string, string> controller_delete_secrets = 2;
} }
message DeleteVolumeResponse {} message DeleteVolumeResponse {
//////// }
////////
message ControllerPublishVolumeRequest { message ControllerPublishVolumeRequest {
// The API version assumed by the CO. This field is REQUIRED.
Version version = 1;
// The ID of the volume to be used on a node. // The ID of the volume to be used on a node.
// This field is REQUIRED. // This field is REQUIRED.
string volume_id = 2; string volume_id = 1;
// The ID of the node. This field is REQUIRED. The CO SHALL set this // The ID of the node. This field is REQUIRED. The CO SHALL set this
// field to match the node ID returned by `NodeGetId`. // field to match the node ID returned by `NodeGetId`.
string node_id = 3; string node_id = 2;
// The capability of the volume the CO expects the volume to have. // The capability of the volume the CO expects the volume to have.
// This is a REQUIRED field. // This is a REQUIRED field.
VolumeCapability volume_capability = 4; VolumeCapability volume_capability = 3;
// Whether to publish the volume in readonly mode. This field is // Whether to publish the volume in readonly mode. This field is
// REQUIRED. // REQUIRED.
bool readonly = 5; bool readonly = 4;
// Credentials used by Controller plugin to authenticate/authorize // Secrets required by plugin to complete controller publish volume
// controller publish request. // request.
// This field contains credential data, for example username and // A secret is a string to string map where the key identifies the
// password. Each key must consist of alphanumeric characters, '-', // name of the secret (e.g. "username" or "password"), and the value
// '_' or '.'. Each value MUST contain a valid string. An SP MAY // contains the secret data (e.g. "bob" or "abc123").
// choose to accept binary (non-string) data by using a binary-to-text // Each key MUST consist of alphanumeric characters, '-', '_' or '.'.
// encoding scheme, like base64. An SP SHALL advertise the // Each value MUST contain a valid string. An SP MAY choose to accept
// requirements for credentials in documentation. COs SHALL permit // binary (non-string) data by using a binary-to-text encoding scheme,
// passing through the required credentials. This information is // like base64.
// sensitive and MUST be treated as such (not logged, etc.) by the CO. // An SP SHALL advertise the requirements for required secret keys and
// values in documentation.
// CO SHALL permit passing through the required secrets.
// A CO MAY pass the same secrets to all RPCs, therefore the keys for
// all unique secrets that an SP expects must be unique across all CSI
// operations.
// This information is sensitive and MUST be treated as such (not
// logged, etc.) by the CO.
// This field is OPTIONAL. // This field is OPTIONAL.
map<string, string> controller_publish_credentials = 6; map<string, string> controller_publish_secrets = 5;
// Attributes of the volume to be used on a node. This field is // Attributes of the volume to be used on a node. This field is
// OPTIONAL and MUST match the attributes of the Volume identified // OPTIONAL and MUST match the attributes of the Volume identified
// by `volume_id`. // by `volume_id`.
map<string,string> volume_attributes = 7; map<string,string> volume_attributes = 6;
} }
message ControllerPublishVolumeResponse { message ControllerPublishVolumeResponse {
// The SP specific information that will be passed to the Plugin in // The SP specific information that will be passed to the Plugin in
// the subsequent `NodePublishVolume` call for the given volume. // the subsequent `NodeStageVolume` or `NodePublishVolume` calls
// for the given volume.
// This information is opaque to the CO. This field is OPTIONAL. // This information is opaque to the CO. This field is OPTIONAL.
map<string, string> publish_info = 1; map<string, string> publish_info = 1;
} }
////////
////////
message ControllerUnpublishVolumeRequest { message ControllerUnpublishVolumeRequest {
// The API version assumed by the CO. This field is REQUIRED.
Version version = 1;
// The ID of the volume. This field is REQUIRED. // The ID of the volume. This field is REQUIRED.
string volume_id = 2; string volume_id = 1;
// The ID of the node. This field is OPTIONAL. The CO SHOULD set this // The ID of the node. This field is OPTIONAL. The CO SHOULD set this
// field to match the node ID returned by `NodeGetId` or leave it // field to match the node ID returned by `NodeGetId` or leave it
// unset. If the value is set, the SP MUST unpublish the volume from // unset. If the value is set, the SP MUST unpublish the volume from
// the specified node. If the value is unset, the SP MUST unpublish // the specified node. If the value is unset, the SP MUST unpublish
// the volume from all nodes it is published to. // the volume from all nodes it is published to.
string node_id = 3; string node_id = 2;
// Credentials used by Controller plugin to authenticate/authorize // Secrets required by plugin to complete controller unpublish volume
// controller unpublish request. // request. This SHOULD be the same secrets passed to the
// This field contains credential data, for example username and // ControllerPublishVolume.
// password. Each key must consist of alphanumeric characters, '-', // call for the specified volume.
// '_' or '.'. Each value MUST contain a valid string. An SP MAY // A secret is a string to string map where the key identifies the
// choose to accept binary (non-string) data by using a binary-to-text // name of the secret (e.g. "username" or "password"), and the value
// encoding scheme, like base64. An SP SHALL advertise the // contains the secret data (e.g. "bob" or "abc123").
// requirements for credentials in documentation. COs SHALL permit // Each key MUST consist of alphanumeric characters, '-', '_' or '.'.
// passing through the required credentials. This information is // Each value MUST contain a valid string. An SP MAY choose to accept
// sensitive and MUST be treated as such (not logged, etc.) by the CO. // binary (non-string) data by using a binary-to-text encoding scheme,
// like base64.
// An SP SHALL advertise the requirements for required secret keys and
// values in documentation.
// CO SHALL permit passing through the required secrets.
// A CO MAY pass the same secrets to all RPCs, therefore the keys for
// all unique secrets that an SP expects must be unique across all CSI
// operations.
// This information is sensitive and MUST be treated as such (not
// logged, etc.) by the CO.
// This field is OPTIONAL. // This field is OPTIONAL.
map<string, string> controller_unpublish_credentials = 4; map<string, string> controller_unpublish_secrets = 3;
} }
message ControllerUnpublishVolumeResponse {} message ControllerUnpublishVolumeResponse {
//////// }
////////
message ValidateVolumeCapabilitiesRequest { message ValidateVolumeCapabilitiesRequest {
// The API version assumed by the CO. This is a REQUIRED field.
Version version = 1;
// The ID of the volume to check. This field is REQUIRED. // The ID of the volume to check. This field is REQUIRED.
string volume_id = 2; string volume_id = 1;
// The capabilities that the CO wants to check for the volume. This // The capabilities that the CO wants to check for the volume. This
// call SHALL return "supported" only if all the volume capabilities // call SHALL return "supported" only if all the volume capabilities
// specified below are supported. This field is REQUIRED. // specified below are supported. This field is REQUIRED.
repeated VolumeCapability volume_capabilities = 3; repeated VolumeCapability volume_capabilities = 2;
// Attributes of the volume to check. This field is OPTIONAL and MUST // Attributes of the volume to check. This field is OPTIONAL and MUST
// match the attributes of the Volume identified by `volume_id`. // match the attributes of the Volume identified by `volume_id`.
map<string,string> volume_attributes = 4; map<string,string> volume_attributes = 3;
} }
message ValidateVolumeCapabilitiesResponse { message ValidateVolumeCapabilitiesResponse {
@ -395,12 +417,7 @@ message ValidateVolumeCapabilitiesResponse {
// An empty string is equal to an unspecified field value. // An empty string is equal to an unspecified field value.
string message = 2; string message = 2;
} }
////////
////////
message ListVolumesRequest { message ListVolumesRequest {
// The API version assumed by the CO. This field is REQUIRED.
Version version = 1;
// If specified (non-zero value), the Plugin MUST NOT return more // If specified (non-zero value), the Plugin MUST NOT return more
// entries than this number in the response. If the actual number of // entries than this number in the response. If the actual number of
// entries is more than this number, the Plugin MUST set `next_token` // entries is more than this number, the Plugin MUST set `next_token`
@ -409,13 +426,13 @@ message ListVolumesRequest {
// not specified (zero value), it means there is no restriction on the // not specified (zero value), it means there is no restriction on the
// number of entries that can be returned. // number of entries that can be returned.
// The value of this field MUST NOT be negative. // The value of this field MUST NOT be negative.
int32 max_entries = 2; int32 max_entries = 1;
// A token to specify where to start paginating. Set this field to // A token to specify where to start paginating. Set this field to
// `next_token` returned by a previous `ListVolumes` call to get the // `next_token` returned by a previous `ListVolumes` call to get the
// next page of entries. This field is OPTIONAL. // next page of entries. This field is OPTIONAL.
// An empty string is equal to an unspecified field value. // An empty string is equal to an unspecified field value.
string starting_token = 3; string starting_token = 2;
} }
message ListVolumesResponse { message ListVolumesResponse {
@ -433,24 +450,19 @@ message ListVolumesResponse {
// An empty string is equal to an unspecified field value. // An empty string is equal to an unspecified field value.
string next_token = 2; string next_token = 2;
} }
////////
////////
message GetCapacityRequest { message GetCapacityRequest {
// The API version assumed by the CO. This is a REQUIRED field.
Version version = 1;
// If specified, the Plugin SHALL report the capacity of the storage // If specified, the Plugin SHALL report the capacity of the storage
// that can be used to provision volumes that satisfy ALL of the // that can be used to provision volumes that satisfy ALL of the
// specified `volume_capabilities`. These are the same // specified `volume_capabilities`. These are the same
// `volume_capabilities` the CO will use in `CreateVolumeRequest`. // `volume_capabilities` the CO will use in `CreateVolumeRequest`.
// This field is OPTIONAL. // This field is OPTIONAL.
repeated VolumeCapability volume_capabilities = 2; repeated VolumeCapability volume_capabilities = 1;
// If specified, the Plugin SHALL report the capacity of the storage // If specified, the Plugin SHALL report the capacity of the storage
// that can be used to provision volumes with the given Plugin // that can be used to provision volumes with the given Plugin
// specific `parameters`. These are the same `parameters` the CO will // specific `parameters`. These are the same `parameters` the CO will
// use in `CreateVolumeRequest`. This field is OPTIONAL. // use in `CreateVolumeRequest`. This field is OPTIONAL.
map<string, string> parameters = 3; map<string, string> parameters = 2;
} }
message GetCapacityResponse { message GetCapacityResponse {
@ -462,19 +474,7 @@ message GetCapacityResponse {
// The value of this field MUST NOT be negative. // The value of this field MUST NOT be negative.
int64 available_capacity = 1; int64 available_capacity = 1;
} }
////////
////////
message ControllerProbeRequest {
// The API version assumed by the CO. This is a REQUIRED field.
Version version = 1;
}
message ControllerProbeResponse {}
////////
////////
message ControllerGetCapabilitiesRequest { message ControllerGetCapabilitiesRequest {
// The API version assumed by the CO. This is a REQUIRED field.
Version version = 1;
} }
message ControllerGetCapabilitiesResponse { message ControllerGetCapabilitiesResponse {
@ -502,21 +502,85 @@ message ControllerServiceCapability {
RPC rpc = 1; RPC rpc = 1;
} }
} }
//////// message NodeStageVolumeRequest {
////////
message NodePublishVolumeRequest {
// The API version assumed by the CO. This is a REQUIRED field.
Version version = 1;
// The ID of the volume to publish. This field is REQUIRED. // The ID of the volume to publish. This field is REQUIRED.
string volume_id = 2; string volume_id = 1;
// The CO SHALL set this field to the value returned by // The CO SHALL set this field to the value returned by
// `ControllerPublishVolume` if the corresponding Controller Plugin // `ControllerPublishVolume` if the corresponding Controller Plugin
// has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be // has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be
// left unset if the corresponding Controller Plugin does not have // left unset if the corresponding Controller Plugin does not have
// this capability. This is an OPTIONAL field. // this capability. This is an OPTIONAL field.
map<string, string> publish_info = 3; map<string, string> publish_info = 2;
// The path to which the volume will be published. It MUST be an
// absolute path in the root filesystem of the process serving this
// request. The CO SHALL ensure that there is only one
// staging_target_path per volume.
// This is a REQUIRED field.
string staging_target_path = 3;
// The capability of the volume the CO expects the volume to have.
// This is a REQUIRED field.
VolumeCapability volume_capability = 4;
// Secrets required by plugin to complete node stage volume request.
// A secret is a string to string map where the key identifies the
// name of the secret (e.g. "username" or "password"), and the value
// contains the secret data (e.g. "bob" or "abc123").
// Each key MUST consist of alphanumeric characters, '-', '_' or '.'.
// Each value MUST contain a valid string. An SP MAY choose to accept
// binary (non-string) data by using a binary-to-text encoding scheme,
// like base64.
// An SP SHALL advertise the requirements for required secret keys and
// values in documentation.
// CO SHALL permit passing through the required secrets.
// A CO MAY pass the same secrets to all RPCs, therefore the keys for
// all unique secrets that an SP expects must be unique across all CSI
// operations.
// This information is sensitive and MUST be treated as such (not
// logged, etc.) by the CO.
// This field is OPTIONAL.
map<string, string> node_stage_secrets = 5;
// Attributes of the volume to publish. This field is OPTIONAL and
// MUST match the attributes of the VolumeInfo identified by
// `volume_id`.
map<string,string> volume_attributes = 6;
}
message NodeStageVolumeResponse {
}
message NodeUnstageVolumeRequest {
// The ID of the volume. This field is REQUIRED.
string volume_id = 1;
// The path at which the volume was published. It MUST be an absolute
// path in the root filesystem of the process serving this request.
// This is a REQUIRED field.
string staging_target_path = 2;
}
message NodeUnstageVolumeResponse {
}
message NodePublishVolumeRequest {
// The ID of the volume to publish. This field is REQUIRED.
string volume_id = 1;
// The CO SHALL set this field to the value returned by
// `ControllerPublishVolume` if the corresponding Controller Plugin
// has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be
// left unset if the corresponding Controller Plugin does not have
// this capability. This is an OPTIONAL field.
map<string, string> publish_info = 2;
// The path to which the device was mounted by `NodeStageVolume`.
// It MUST be an absolute path in the root filesystem of the process
// serving this request.
// It MUST be set if the Node Plugin implements the
// `STAGE_UNSTAGE_VOLUME` node capability.
// This is an OPTIONAL field.
string staging_target_path = 3;
// The path to which the volume will be published. It MUST be an // The path to which the volume will be published. It MUST be an
// absolute path in the root filesystem of the process serving this // absolute path in the root filesystem of the process serving this
@ -534,18 +598,24 @@ message NodePublishVolumeRequest {
// REQUIRED. // REQUIRED.
bool readonly = 6; bool readonly = 6;
// Credentials used by Node plugin to authenticate/authorize node // Secrets required by plugin to complete node publish volume request.
// publish request. // A secret is a string to string map where the key identifies the
// This field contains credential data, for example username and // name of the secret (e.g. "username" or "password"), and the value
// password. Each key must consist of alphanumeric characters, '-', // contains the secret data (e.g. "bob" or "abc123").
// '_' or '.'. Each value MUST contain a valid string. An SP MAY // Each key MUST consist of alphanumeric characters, '-', '_' or '.'.
// choose to accept binary (non-string) data by using a binary-to-text // Each value MUST contain a valid string. An SP MAY choose to accept
// encoding scheme, like base64. An SP SHALL advertise the // binary (non-string) data by using a binary-to-text encoding scheme,
// requirements for credentials in documentation. COs SHALL permit // like base64.
// passing through the required credentials. This information is // An SP SHALL advertise the requirements for required secret keys and
// sensitive and MUST be treated as such (not logged, etc.) by the CO. // values in documentation.
// CO SHALL permit passing through the required secrets.
// A CO MAY pass the same secrets to all RPCs, therefore the keys for
// all unique secrets that an SP expects must be unique across all CSI
// operations.
// This information is sensitive and MUST be treated as such (not
// logged, etc.) by the CO.
// This field is OPTIONAL. // This field is OPTIONAL.
map<string, string> node_publish_credentials = 7; map<string, string> node_publish_secrets = 7;
// Attributes of the volume to publish. This field is OPTIONAL and // Attributes of the volume to publish. This field is OPTIONAL and
// MUST match the attributes of the Volume identified by // MUST match the attributes of the Volume identified by
@ -553,41 +623,21 @@ message NodePublishVolumeRequest {
map<string,string> volume_attributes = 8; map<string,string> volume_attributes = 8;
} }
message NodePublishVolumeResponse {} message NodePublishVolumeResponse {
//////// }
////////
message NodeUnpublishVolumeRequest { message NodeUnpublishVolumeRequest {
// The API version assumed by the CO. This is a REQUIRED field.
Version version = 1;
// The ID of the volume. This field is REQUIRED. // The ID of the volume. This field is REQUIRED.
string volume_id = 2; string volume_id = 1;
// The path at which the volume was published. It MUST be an absolute // The path at which the volume was published. It MUST be an absolute
// path in the root filesystem of the process serving this request. // path in the root filesystem of the process serving this request.
// This is a REQUIRED field. // This is a REQUIRED field.
string target_path = 3; string target_path = 2;
// Credentials used by Node plugin to authenticate/authorize node
// unpublish request.
// This field contains credential data, for example username and
// password. Each key must consist of alphanumeric characters, '-',
// '_' or '.'. Each value MUST contain a valid string. An SP MAY
// choose to accept binary (non-string) data by using a binary-to-text
// encoding scheme, like base64. An SP SHALL advertise the
// requirements for credentials in documentation. COs SHALL permit
// passing through the required credentials. This information is
// sensitive and MUST be treated as such (not logged, etc.) by the CO.
// This field is OPTIONAL.
map<string, string> node_unpublish_credentials = 4;
} }
message NodeUnpublishVolumeResponse {} message NodeUnpublishVolumeResponse {
//////// }
////////
message NodeGetIdRequest { message NodeGetIdRequest {
// The API version assumed by the CO. This is a REQUIRED field.
Version version = 1;
} }
message NodeGetIdResponse { message NodeGetIdResponse {
@ -596,19 +646,7 @@ message NodeGetIdResponse {
// This is a REQUIRED field. // This is a REQUIRED field.
string node_id = 1; string node_id = 1;
} }
////////
////////
message NodeProbeRequest {
// The API version assumed by the CO. This is a REQUIRED field.
Version version = 1;
}
message NodeProbeResponse {}
////////
////////
message NodeGetCapabilitiesRequest { message NodeGetCapabilitiesRequest {
// The API version assumed by the CO. This is a REQUIRED field.
Version version = 1;
} }
message NodeGetCapabilitiesResponse { message NodeGetCapabilitiesResponse {
@ -622,6 +660,7 @@ message NodeServiceCapability {
message RPC { message RPC {
enum Type { enum Type {
UNKNOWN = 0; UNKNOWN = 0;
STAGE_UNSTAGE_VOLUME = 1;
} }
Type type = 1; Type type = 1;
@ -632,4 +671,3 @@ message NodeServiceCapability {
RPC rpc = 1; RPC rpc = 1;
} }
} }
////////

View File

@ -1,4 +1,3 @@
/protoc /protoc
/protoc-gen-go /protoc-gen-go
/csi.a /csi.a
/csi/.build/

View File

@ -76,10 +76,11 @@ export PATH := $(shell pwd):$(PATH)
######################################################################## ########################################################################
## BUILD ## ## BUILD ##
######################################################################## ########################################################################
CSI_GO := csi/csi.pb.go
CSI_A := csi.a
CSI_PROTO := ../../csi.proto CSI_PROTO := ../../csi.proto
CSI_GO_TMP := csi/.build/csi.pb.go CSI_PKG := $(shell cat $(CSI_PROTO) | sed -n -e 's/^package.\([^;]*\);$$/\1/p'|tr '.' '/')
CSI_GO := $(CSI_PKG)/csi.pb.go
CSI_A := csi.a
CSI_GO_TMP := $(CSI_PKG)/.build/csi.pb.go
# This recipe generates the go language bindings to a temp area. # This recipe generates the go language bindings to a temp area.
$(CSI_GO_TMP): $(CSI_PROTO) | $(PROTOC) $(PROTOC_GEN_GO) $(CSI_GO_TMP): $(CSI_PROTO) | $(PROTOC) $(PROTOC_GEN_GO)
@ -104,8 +105,8 @@ endif
# 3. Build the archive file. # 3. Build the archive file.
$(CSI_A): $(CSI_GO) $(CSI_A): $(CSI_GO)
go get -d ./... go get -d ./...
go install ./csi go install ./$(CSI_PKG)
go build -o "$@" ./csi go build -o "$@" ./$(CSI_PKG)
build: $(CSI_A) build: $(CSI_A)

File diff suppressed because it is too large Load Diff

View File

@ -193,7 +193,8 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeU
// "Generated output always contains 3, 6, or 9 fractional digits, // "Generated output always contains 3, 6, or 9 fractional digits,
// depending on required precision." // depending on required precision."
s, ns := s.Field(0).Int(), s.Field(1).Int() s, ns := s.Field(0).Int(), s.Field(1).Int()
x := fmt.Sprintf("%d.%09d", s, ns) d := time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond
x := fmt.Sprintf("%.9f", d.Seconds())
x = strings.TrimSuffix(x, "000") x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, "000") x = strings.TrimSuffix(x, "000")
out.write(`"`) out.write(`"`)

View File

@ -407,7 +407,6 @@ var marshalingTests = []struct {
{"Any with WKT", marshaler, anyWellKnown, anyWellKnownJSON}, {"Any with WKT", marshaler, anyWellKnown, anyWellKnownJSON},
{"Any with WKT and indent", marshalerAllOptions, anyWellKnown, anyWellKnownPrettyJSON}, {"Any with WKT and indent", marshalerAllOptions, anyWellKnown, anyWellKnownPrettyJSON},
{"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}, `{"dur":"3.000s"}`}, {"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 3}}, `{"dur":"3.000s"}`},
{"Duration", marshaler, &pb.KnownTypes{Dur: &durpb.Duration{Seconds: 100000000, Nanos: 1}}, `{"dur":"100000000.000000001s"}`},
{"Struct", marshaler, &pb.KnownTypes{St: &stpb.Struct{ {"Struct", marshaler, &pb.KnownTypes{St: &stpb.Struct{
Fields: map[string]*stpb.Value{ Fields: map[string]*stpb.Value{
"one": {Kind: &stpb.Value_StringValue{"loneliest number"}}, "one": {Kind: &stpb.Value_StringValue{"loneliest number"}},

151
vendor/github.com/golang/protobuf/proto/discard.go generated vendored Normal file
View File

@ -0,0 +1,151 @@
// Go support for Protocol Buffers - Google's data interchange format
//
// Copyright 2017 The Go Authors. All rights reserved.
// https://github.com/golang/protobuf
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package proto
import (
"fmt"
"reflect"
"strings"
)
// DiscardUnknown recursively discards all unknown fields from this message
// and all embedded messages.
//
// When unmarshaling a message with unrecognized fields, the tags and values
// of such fields are preserved in the Message. This allows a later call to
// marshal to be able to produce a message that continues to have those
// unrecognized fields. To avoid this, DiscardUnknown is used to
// explicitly clear the unknown fields after unmarshaling.
//
// For proto2 messages, the unknown fields of message extensions are only
// discarded from messages that have been accessed via GetExtension.
func DiscardUnknown(m Message) {
discardLegacy(m)
}
func discardLegacy(m Message) {
v := reflect.ValueOf(m)
if v.Kind() != reflect.Ptr || v.IsNil() {
return
}
v = v.Elem()
if v.Kind() != reflect.Struct {
return
}
t := v.Type()
for i := 0; i < v.NumField(); i++ {
f := t.Field(i)
if strings.HasPrefix(f.Name, "XXX_") {
continue
}
vf := v.Field(i)
tf := f.Type
// Unwrap tf to get its most basic type.
var isPointer, isSlice bool
if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 {
isSlice = true
tf = tf.Elem()
}
if tf.Kind() == reflect.Ptr {
isPointer = true
tf = tf.Elem()
}
if isPointer && isSlice && tf.Kind() != reflect.Struct {
panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name))
}
switch tf.Kind() {
case reflect.Struct:
switch {
case !isPointer:
panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name))
case isSlice: // E.g., []*pb.T
for j := 0; j < vf.Len(); j++ {
discardLegacy(vf.Index(j).Interface().(Message))
}
default: // E.g., *pb.T
discardLegacy(vf.Interface().(Message))
}
case reflect.Map:
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name))
default: // E.g., map[K]V
tv := vf.Type().Elem()
if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T)
for _, key := range vf.MapKeys() {
val := vf.MapIndex(key)
discardLegacy(val.Interface().(Message))
}
}
}
case reflect.Interface:
// Must be oneof field.
switch {
case isPointer || isSlice:
panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name))
default: // E.g., test_proto.isCommunique_Union interface
if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" {
vf = vf.Elem() // E.g., *test_proto.Communique_Msg
if !vf.IsNil() {
vf = vf.Elem() // E.g., test_proto.Communique_Msg
vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value
if vf.Kind() == reflect.Ptr {
discardLegacy(vf.Interface().(Message))
}
}
}
}
}
}
if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() {
if vf.Type() != reflect.TypeOf([]byte{}) {
panic("expected XXX_unrecognized to be of type []byte")
}
vf.Set(reflect.ValueOf([]byte(nil)))
}
// For proto2 messages, only discard unknown fields in message extensions
// that have been accessed via GetExtension.
if em, ok := extendable(m); ok {
// Ignore lock since discardLegacy is not concurrency safe.
emm, _ := em.extensionsRead()
for _, mx := range emm {
if m, ok := mx.value.(Message); ok {
discardLegacy(m)
}
}
}
}

View File

@ -7,7 +7,7 @@ go_import_path: github.com/kubernetes-csi/drivers
install: install:
- go get -u github.com/golang/dep/cmd/dep - go get -u github.com/golang/dep/cmd/dep
- dep ensure -vendor-only - dep ensure -vendor-only
- make - make hostpath
script: script:
- go fmt $(go list ./... | grep -v vendor) | wc -l | grep 0 - go fmt $(go list ./... | grep -v vendor) | wc -l | grep 0
- go vet $(go list ./... | grep -v vendor) - go vet $(go list ./... | grep -v vendor)

View File

@ -8,10 +8,10 @@
revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9"
[[projects]] [[projects]]
branch = "master"
name = "github.com/container-storage-interface/spec" name = "github.com/container-storage-interface/spec"
packages = ["lib/go/csi"] packages = ["lib/go/csi/v0"]
revision = "7ab01a90da87f9fef3ee1de0494962fdefaf7db7" revision = "35d9f9d77954980e449e52c3f3e43c21bd8171f5"
version = "v0.2.0-rc1"
[[projects]] [[projects]]
name = "github.com/davecgh/go-spew" name = "github.com/davecgh/go-spew"
@ -21,8 +21,11 @@
[[projects]] [[projects]]
name = "github.com/docker/distribution" name = "github.com/docker/distribution"
packages = ["digestset","reference"] packages = [
revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" "digestset",
"reference"
]
revision = "5db89f0ca68677abc5eefce8f2a0a772c98ba52d"
[[projects]] [[projects]]
name = "github.com/ghodss/yaml" name = "github.com/ghodss/yaml"
@ -32,7 +35,10 @@
[[projects]] [[projects]]
name = "github.com/gogo/protobuf" name = "github.com/gogo/protobuf"
packages = ["proto","sortkeys"] packages = [
"proto",
"sortkeys"
]
revision = "1adfc126b41513cc696b209667c8656ea7aac67c" revision = "1adfc126b41513cc696b209667c8656ea7aac67c"
version = "v1.0.0" version = "v1.0.0"
@ -50,7 +56,13 @@
[[projects]] [[projects]]
name = "github.com/golang/protobuf" name = "github.com/golang/protobuf"
packages = ["proto","ptypes","ptypes/any","ptypes/duration","ptypes/timestamp"] packages = [
"proto",
"ptypes",
"ptypes/any",
"ptypes/duration",
"ptypes/timestamp"
]
revision = "925541529c1fa6821df4e44ce2723319eb2be768" revision = "925541529c1fa6821df4e44ce2723319eb2be768"
version = "v1.0.0" version = "v1.0.0"
@ -68,26 +80,46 @@
[[projects]] [[projects]]
name = "github.com/googleapis/gnostic" name = "github.com/googleapis/gnostic"
packages = ["OpenAPIv2","compiler","extensions"] packages = [
"OpenAPIv2",
"compiler",
"extensions"
]
revision = "ee43cbb60db7bd22502942cccbc39059117352ab" revision = "ee43cbb60db7bd22502942cccbc39059117352ab"
version = "v0.1.0" version = "v0.1.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/gophercloud/gophercloud" name = "github.com/gophercloud/gophercloud"
packages = [".","openstack","openstack/blockstorage/v3/volumes","openstack/compute/v2/extensions/volumeattach","openstack/identity/v2/tenants","openstack/identity/v2/tokens","openstack/identity/v3/tokens","openstack/utils","pagination"] packages = [
revision = "6da026c32e2d622cc242d32984259c77237aefe1" ".",
"openstack",
"openstack/blockstorage/v3/volumes",
"openstack/compute/v2/extensions/volumeattach",
"openstack/identity/v2/tenants",
"openstack/identity/v2/tokens",
"openstack/identity/v3/tokens",
"openstack/utils",
"pagination"
]
revision = "afbf0422412f5dc726fa12be280fa0c3cb31fcbd"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/gregjones/httpcache" name = "github.com/gregjones/httpcache"
packages = [".","diskcache"] packages = [
".",
"diskcache"
]
revision = "2bcd89a1743fd4b373f7370ce8ddc14dfbd18229" revision = "2bcd89a1743fd4b373f7370ce8ddc14dfbd18229"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/hashicorp/golang-lru" name = "github.com/hashicorp/golang-lru"
packages = [".","simplelru"] packages = [
".",
"simplelru"
]
revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3" revision = "0fb14efe8c47ae851c0034ed7a448854d3d34cf3"
[[projects]] [[projects]]
@ -99,8 +131,8 @@
[[projects]] [[projects]]
name = "github.com/json-iterator/go" name = "github.com/json-iterator/go"
packages = ["."] packages = ["."]
revision = "28452fcdec4e44348d2af0d91d1e9e38da3a9e0a" revision = "3353055b2a1a5ae1b6a8dfde887a524e7088f3a2"
version = "1.0.5" version = "1.1.2"
[[projects]] [[projects]]
name = "github.com/juju/ratelimit" name = "github.com/juju/ratelimit"
@ -114,6 +146,18 @@
revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" revision = "3247c84500bff8d9fb6d579d800f20b3e091582c"
version = "v1.0.0" version = "v1.0.0"
[[projects]]
name = "github.com/modern-go/concurrent"
packages = ["."]
revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a"
version = "1.0.0"
[[projects]]
name = "github.com/modern-go/reflect2"
packages = ["."]
revision = "1df9eeb2bb81f327b96228865c5687bc2194af3f"
version = "1.0.0"
[[projects]] [[projects]]
name = "github.com/opencontainers/go-digest" name = "github.com/opencontainers/go-digest"
packages = ["."] packages = ["."]
@ -159,14 +203,23 @@
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/prometheus/common" name = "github.com/prometheus/common"
packages = ["expfmt","internal/bitbucket.org/ww/goautoneg","model"] packages = [
revision = "89604d197083d4781071d3c65855d24ecfb0a563" "expfmt",
"internal/bitbucket.org/ww/goautoneg",
"model"
]
revision = "6fb6fce6f8b75884b92e1889c150403fc0872c5e"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "github.com/prometheus/procfs" name = "github.com/prometheus/procfs"
packages = [".","internal/util","nfs","xfs"] packages = [
revision = "cb4147076ac75738c9a7d279075a253c0cc5acbd" ".",
"internal/util",
"nfs",
"xfs"
]
revision = "d274e363d5759d1c916232217be421f1cc89c5fe"
[[projects]] [[projects]]
name = "github.com/spf13/cobra" name = "github.com/spf13/cobra"
@ -188,49 +241,107 @@
[[projects]] [[projects]]
name = "github.com/stretchr/testify" name = "github.com/stretchr/testify"
packages = ["assert","mock"] packages = [
"assert",
"mock"
]
revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71"
version = "v1.2.1" version = "v1.2.1"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/crypto" name = "golang.org/x/crypto"
packages = ["ed25519","ed25519/internal/edwards25519"] packages = [
revision = "5119cf507ed5294cc409c092980c7497ee5d6fd2" "ed25519",
"ed25519/internal/edwards25519"
]
revision = "91a49db82a88618983a78a06c1cbd4e00ab749ab"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/net" name = "golang.org/x/net"
packages = ["context","http2","http2/hpack","idna","internal/timeseries","lex/httplex","trace"] packages = [
revision = "f5dfe339be1d06f81b22525fe34671ee7d2c8904" "context",
"http2",
"http2/hpack",
"idna",
"internal/timeseries",
"lex/httplex",
"trace"
]
revision = "cbe0f9307d0156177f9dd5dc85da1a31abc5f2fb"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "golang.org/x/sys" name = "golang.org/x/sys"
packages = ["unix"] packages = ["unix"]
revision = "37707fdb30a5b38865cfb95e5aab41707daec7fd" revision = "f6cff0780e542efa0c8e864dc8fa522808f6a598"
[[projects]] [[projects]]
branch = "master"
name = "golang.org/x/text" name = "golang.org/x/text"
packages = ["collate","collate/build","internal/colltab","internal/gen","internal/tag","internal/triegen","internal/ucd","language","secure/bidirule","transform","unicode/bidi","unicode/cldr","unicode/norm","unicode/rangetable"] packages = [
revision = "4e4a3210bb54bb31f6ab2cdca2edcc0b50c420c1" "collate",
"collate/build",
"internal/colltab",
"internal/gen",
"internal/tag",
"internal/triegen",
"internal/ucd",
"language",
"secure/bidirule",
"transform",
"unicode/bidi",
"unicode/cldr",
"unicode/norm",
"unicode/rangetable"
]
revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0"
version = "v0.3.0"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "google.golang.org/genproto" name = "google.golang.org/genproto"
packages = ["googleapis/rpc/status"] packages = ["googleapis/rpc/status"]
revision = "2b5a72b8730b0b16380010cfe5286c42108d88e7" revision = "2d9486acae19cf9bd0c093d7dc236a323726a9e4"
[[projects]] [[projects]]
name = "google.golang.org/grpc" name = "google.golang.org/grpc"
packages = [".","balancer","balancer/base","balancer/roundrobin","codes","connectivity","credentials","encoding","grpclb/grpc_lb_v1/messages","grpclog","internal","keepalive","metadata","naming","peer","resolver","resolver/dns","resolver/passthrough","stats","status","tap","transport"] packages = [
revision = "6b51017f791ae1cfbec89c52efdf444b13b550ef" ".",
version = "v1.9.2" "balancer",
"balancer/base",
"balancer/roundrobin",
"codes",
"connectivity",
"credentials",
"encoding",
"encoding/proto",
"grpclb/grpc_lb_v1/messages",
"grpclog",
"internal",
"keepalive",
"metadata",
"naming",
"peer",
"resolver",
"resolver/dns",
"resolver/passthrough",
"stats",
"status",
"tap",
"transport"
]
revision = "8e4536a86ab602859c20df5ebfd0bd4228d08655"
version = "v1.10.0"
[[projects]] [[projects]]
name = "gopkg.in/gcfg.v1" name = "gopkg.in/gcfg.v1"
packages = [".","scanner","token","types"] packages = [
".",
"scanner",
"token",
"types"
]
revision = "298b7a6a3838f79debfaee8bd3bfb2b8d779e756" revision = "298b7a6a3838f79debfaee8bd3bfb2b8d779e756"
version = "v1.2.1" version = "v1.2.1"
@ -242,9 +353,14 @@
[[projects]] [[projects]]
name = "gopkg.in/square/go-jose.v2" name = "gopkg.in/square/go-jose.v2"
packages = [".","cipher","json","jwt"] packages = [
revision = "f8f38de21b4dcd69d0413faf231983f5fd6634b1" ".",
version = "v2.1.3" "cipher",
"json",
"jwt"
]
revision = "6ee92191fea850cdcab9a18867abf5f521cdbadb"
version = "v2.1.4"
[[projects]] [[projects]]
name = "gopkg.in/warnings.v0" name = "gopkg.in/warnings.v0"
@ -253,38 +369,232 @@
version = "v0.1.2" version = "v0.1.2"
[[projects]] [[projects]]
branch = "v2"
name = "gopkg.in/yaml.v2" name = "gopkg.in/yaml.v2"
packages = ["."] packages = ["."]
revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4" revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5"
version = "v2.1.1"
[[projects]] [[projects]]
branch = "master"
name = "k8s.io/api" name = "k8s.io/api"
packages = ["admissionregistration/v1alpha1","admissionregistration/v1beta1","apps/v1","apps/v1beta1","apps/v1beta2","authentication/v1","authentication/v1beta1","authorization/v1","authorization/v1beta1","autoscaling/v1","autoscaling/v2beta1","batch/v1","batch/v1beta1","batch/v2alpha1","certificates/v1beta1","core/v1","events/v1beta1","extensions/v1beta1","networking/v1","policy/v1beta1","rbac/v1","rbac/v1alpha1","rbac/v1beta1","scheduling/v1alpha1","settings/v1alpha1","storage/v1","storage/v1alpha1","storage/v1beta1"] packages = [
revision = "2694da5be9c4ab4f3fd826112d4c3f71b8bf4b23" "admissionregistration/v1alpha1",
"admissionregistration/v1beta1",
"apps/v1",
"apps/v1beta1",
"apps/v1beta2",
"authentication/v1",
"authentication/v1beta1",
"authorization/v1",
"authorization/v1beta1",
"autoscaling/v1",
"autoscaling/v2beta1",
"batch/v1",
"batch/v1beta1",
"batch/v2alpha1",
"certificates/v1beta1",
"core/v1",
"events/v1beta1",
"extensions/v1beta1",
"networking/v1",
"policy/v1beta1",
"rbac/v1",
"rbac/v1alpha1",
"rbac/v1beta1",
"scheduling/v1alpha1",
"settings/v1alpha1",
"storage/v1",
"storage/v1alpha1",
"storage/v1beta1"
]
revision = "7aac3e00a1b32fa476b83078cebaaca606b2fb48"
version = "kubernetes-1.10.0-beta.1"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "k8s.io/apiextensions-apiserver" name = "k8s.io/apiextensions-apiserver"
packages = ["pkg/features"] packages = ["pkg/features"]
revision = "d64d645c52b769d38dbfa509fcba27186c212117" revision = "cfb732a3dd26c3e6349d0954e1209c9d5c093d1f"
[[projects]] [[projects]]
branch = "master"
name = "k8s.io/apimachinery" name = "k8s.io/apimachinery"
packages = ["pkg/api/equality","pkg/api/errors","pkg/api/meta","pkg/api/resource","pkg/api/validation","pkg/apimachinery","pkg/apimachinery/announced","pkg/apimachinery/registered","pkg/apis/meta/internalversion","pkg/apis/meta/v1","pkg/apis/meta/v1/unstructured","pkg/apis/meta/v1/validation","pkg/apis/meta/v1beta1","pkg/conversion","pkg/conversion/queryparams","pkg/fields","pkg/labels","pkg/runtime","pkg/runtime/schema","pkg/runtime/serializer","pkg/runtime/serializer/json","pkg/runtime/serializer/protobuf","pkg/runtime/serializer/recognizer","pkg/runtime/serializer/streaming","pkg/runtime/serializer/versioning","pkg/selection","pkg/types","pkg/util/cache","pkg/util/clock","pkg/util/diff","pkg/util/errors","pkg/util/framer","pkg/util/intstr","pkg/util/json","pkg/util/mergepatch","pkg/util/net","pkg/util/runtime","pkg/util/sets","pkg/util/strategicpatch","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/util/yaml","pkg/version","pkg/watch","third_party/forked/golang/json","third_party/forked/golang/reflect"] packages = [
revision = "616b23029fa3dc3e0ccefd47963f5651a6543d94" "pkg/api/equality",
"pkg/api/errors",
"pkg/api/meta",
"pkg/api/resource",
"pkg/api/validation",
"pkg/apimachinery",
"pkg/apimachinery/announced",
"pkg/apimachinery/registered",
"pkg/apis/meta/internalversion",
"pkg/apis/meta/v1",
"pkg/apis/meta/v1/unstructured",
"pkg/apis/meta/v1/validation",
"pkg/apis/meta/v1beta1",
"pkg/conversion",
"pkg/conversion/queryparams",
"pkg/fields",
"pkg/labels",
"pkg/runtime",
"pkg/runtime/schema",
"pkg/runtime/serializer",
"pkg/runtime/serializer/json",
"pkg/runtime/serializer/protobuf",
"pkg/runtime/serializer/recognizer",
"pkg/runtime/serializer/streaming",
"pkg/runtime/serializer/versioning",
"pkg/selection",
"pkg/types",
"pkg/util/cache",
"pkg/util/clock",
"pkg/util/diff",
"pkg/util/errors",
"pkg/util/framer",
"pkg/util/intstr",
"pkg/util/json",
"pkg/util/mergepatch",
"pkg/util/net",
"pkg/util/runtime",
"pkg/util/sets",
"pkg/util/strategicpatch",
"pkg/util/validation",
"pkg/util/validation/field",
"pkg/util/wait",
"pkg/util/yaml",
"pkg/version",
"pkg/watch",
"third_party/forked/golang/json",
"third_party/forked/golang/reflect"
]
revision = "302974c03f7e50f16561ba237db776ab93594ef6"
version = "kubernetes-1.10.0-beta.1"
[[projects]] [[projects]]
branch = "master" branch = "master"
name = "k8s.io/apiserver" name = "k8s.io/apiserver"
packages = ["pkg/authentication/authenticator","pkg/authentication/serviceaccount","pkg/authentication/user","pkg/features","pkg/util/feature"] packages = [
revision = "16f07649a010bed64bb9c7986bd9988f68ce0421" "pkg/authentication/authenticator",
"pkg/authentication/serviceaccount",
"pkg/authentication/user",
"pkg/features",
"pkg/util/feature"
]
revision = "74a8a89814a24637e718e271b747a717c24da88f"
[[projects]] [[projects]]
name = "k8s.io/client-go" name = "k8s.io/client-go"
packages = ["discovery","informers","informers/admissionregistration","informers/admissionregistration/v1alpha1","informers/admissionregistration/v1beta1","informers/apps","informers/apps/v1","informers/apps/v1beta1","informers/apps/v1beta2","informers/autoscaling","informers/autoscaling/v1","informers/autoscaling/v2beta1","informers/batch","informers/batch/v1","informers/batch/v1beta1","informers/batch/v2alpha1","informers/certificates","informers/certificates/v1beta1","informers/core","informers/core/v1","informers/events","informers/events/v1beta1","informers/extensions","informers/extensions/v1beta1","informers/internalinterfaces","informers/networking","informers/networking/v1","informers/policy","informers/policy/v1beta1","informers/rbac","informers/rbac/v1","informers/rbac/v1alpha1","informers/rbac/v1beta1","informers/scheduling","informers/scheduling/v1alpha1","informers/settings","informers/settings/v1alpha1","informers/storage","informers/storage/v1","informers/storage/v1alpha1","informers/storage/v1beta1","kubernetes","kubernetes/scheme","kubernetes/typed/admissionregistration/v1alpha1","kubernetes/typed/admissionregistration/v1beta1","kubernetes/typed/apps/v1","kubernetes/typed/apps/v1beta1","kubernetes/typed/apps/v1beta2","kubernetes/typed/authentication/v1","kubernetes/typed/authentication/v1beta1","kubernetes/typed/authorization/v1","kubernetes/typed/authorization/v1beta1","kubernetes/typed/autoscaling/v1","kubernetes/typed/autoscaling/v2beta1","kubernetes/typed/batch/v1","kubernetes/typed/batch/v1beta1","kubernetes/typed/batch/v2alpha1","kubernetes/typed/certificates/v1beta1","kubernetes/typed/core/v1","kubernetes/typed/events/v1beta1","kubernetes/typed/extensions/v1beta1","kubernetes/typed/networking/v1","kubernetes/typed/policy/v1beta1","kubernetes/typed/rbac/v1","kubernetes/typed/rbac/v1alpha1","kubernetes/typed/rbac/v1beta1","kubernetes/typed/scheduling/v1alpha1","kubernetes/typed/settings/v1alpha1","kubernetes/typed/storage/v1","kubernetes/typed/storage/v1alpha1","kubernetes/typed/storage/v1beta1","listers/admissionregistration/v1alpha1","listers/admissionregistration/v1beta1","listers/apps/v1","listers/apps/v1beta1","listers/apps/v1beta2","listers/autoscaling/v1","listers/autoscaling/v2beta1","listers/batch/v1","listers/batch/v1beta1","listers/batch/v2alpha1","listers/certificates/v1beta1","listers/core/v1","listers/events/v1beta1","listers/extensions/v1beta1","listers/networking/v1","listers/policy/v1beta1","listers/rbac/v1","listers/rbac/v1alpha1","listers/rbac/v1beta1","listers/scheduling/v1alpha1","listers/settings/v1alpha1","listers/storage/v1","listers/storage/v1alpha1","listers/storage/v1beta1","pkg/version","rest","rest/watch","tools/cache","tools/clientcmd/api","tools/metrics","tools/pager","tools/record","tools/reference","transport","util/buffer","util/cert","util/flowcontrol","util/integer","util/retry"] packages = [
"discovery",
"informers",
"informers/admissionregistration",
"informers/admissionregistration/v1alpha1",
"informers/admissionregistration/v1beta1",
"informers/apps",
"informers/apps/v1",
"informers/apps/v1beta1",
"informers/apps/v1beta2",
"informers/autoscaling",
"informers/autoscaling/v1",
"informers/autoscaling/v2beta1",
"informers/batch",
"informers/batch/v1",
"informers/batch/v1beta1",
"informers/batch/v2alpha1",
"informers/certificates",
"informers/certificates/v1beta1",
"informers/core",
"informers/core/v1",
"informers/events",
"informers/events/v1beta1",
"informers/extensions",
"informers/extensions/v1beta1",
"informers/internalinterfaces",
"informers/networking",
"informers/networking/v1",
"informers/policy",
"informers/policy/v1beta1",
"informers/rbac",
"informers/rbac/v1",
"informers/rbac/v1alpha1",
"informers/rbac/v1beta1",
"informers/scheduling",
"informers/scheduling/v1alpha1",
"informers/settings",
"informers/settings/v1alpha1",
"informers/storage",
"informers/storage/v1",
"informers/storage/v1alpha1",
"informers/storage/v1beta1",
"kubernetes",
"kubernetes/scheme",
"kubernetes/typed/admissionregistration/v1alpha1",
"kubernetes/typed/admissionregistration/v1beta1",
"kubernetes/typed/apps/v1",
"kubernetes/typed/apps/v1beta1",
"kubernetes/typed/apps/v1beta2",
"kubernetes/typed/authentication/v1",
"kubernetes/typed/authentication/v1beta1",
"kubernetes/typed/authorization/v1",
"kubernetes/typed/authorization/v1beta1",
"kubernetes/typed/autoscaling/v1",
"kubernetes/typed/autoscaling/v2beta1",
"kubernetes/typed/batch/v1",
"kubernetes/typed/batch/v1beta1",
"kubernetes/typed/batch/v2alpha1",
"kubernetes/typed/certificates/v1beta1",
"kubernetes/typed/core/v1",
"kubernetes/typed/events/v1beta1",
"kubernetes/typed/extensions/v1beta1",
"kubernetes/typed/networking/v1",
"kubernetes/typed/policy/v1beta1",
"kubernetes/typed/rbac/v1",
"kubernetes/typed/rbac/v1alpha1",
"kubernetes/typed/rbac/v1beta1",
"kubernetes/typed/scheduling/v1alpha1",
"kubernetes/typed/settings/v1alpha1",
"kubernetes/typed/storage/v1",
"kubernetes/typed/storage/v1alpha1",
"kubernetes/typed/storage/v1beta1",
"listers/admissionregistration/v1alpha1",
"listers/admissionregistration/v1beta1",
"listers/apps/v1",
"listers/apps/v1beta1",
"listers/apps/v1beta2",
"listers/autoscaling/v1",
"listers/autoscaling/v2beta1",
"listers/batch/v1",
"listers/batch/v1beta1",
"listers/batch/v2alpha1",
"listers/certificates/v1beta1",
"listers/core/v1",
"listers/events/v1beta1",
"listers/extensions/v1beta1",
"listers/networking/v1",
"listers/policy/v1beta1",
"listers/rbac/v1",
"listers/rbac/v1alpha1",
"listers/rbac/v1beta1",
"listers/scheduling/v1alpha1",
"listers/settings/v1alpha1",
"listers/storage/v1",
"listers/storage/v1alpha1",
"listers/storage/v1beta1",
"pkg/version",
"rest",
"rest/watch",
"tools/cache",
"tools/clientcmd/api",
"tools/metrics",
"tools/pager",
"tools/record",
"tools/reference",
"transport",
"util/buffer",
"util/cert",
"util/flowcontrol",
"util/integer",
"util/retry"
]
revision = "78700dec6369ba22221b72770783300f143df150" revision = "78700dec6369ba22221b72770783300f143df150"
version = "v6.0.0" version = "v6.0.0"
@ -292,13 +602,52 @@
branch = "master" branch = "master"
name = "k8s.io/kube-openapi" name = "k8s.io/kube-openapi"
packages = ["pkg/util/proto"] packages = ["pkg/util/proto"]
revision = "275e2ce91dec4c05a4094a7b1daee5560b555ac9" revision = "50ae88d24ede7b8bad68e23c805b5d3da5c8abaf"
[[projects]] [[projects]]
branch = "master"
name = "k8s.io/kubernetes" name = "k8s.io/kubernetes"
packages = ["pkg/api/legacyscheme","pkg/api/service","pkg/api/v1/pod","pkg/apis/autoscaling","pkg/apis/core","pkg/apis/core/helper","pkg/apis/core/install","pkg/apis/core/pods","pkg/apis/core/v1","pkg/apis/core/v1/helper","pkg/apis/core/validation","pkg/apis/extensions","pkg/apis/networking","pkg/capabilities","pkg/cloudprovider","pkg/controller","pkg/features","pkg/fieldpath","pkg/kubelet/apis","pkg/kubelet/types","pkg/master/ports","pkg/security/apparmor","pkg/serviceaccount","pkg/util/file","pkg/util/hash","pkg/util/io","pkg/util/mount","pkg/util/net/sets","pkg/util/nsenter","pkg/util/parsers","pkg/util/pointer","pkg/util/taints","pkg/volume","pkg/volume/util"] packages = [
revision = "19829a24f1c1ee608e1862bfe9828f4630f484cd" "pkg/api/legacyscheme",
"pkg/api/service",
"pkg/api/v1/pod",
"pkg/apis/autoscaling",
"pkg/apis/core",
"pkg/apis/core/helper",
"pkg/apis/core/install",
"pkg/apis/core/pods",
"pkg/apis/core/v1",
"pkg/apis/core/v1/helper",
"pkg/apis/core/validation",
"pkg/apis/extensions",
"pkg/apis/networking",
"pkg/capabilities",
"pkg/cloudprovider",
"pkg/controller",
"pkg/features",
"pkg/fieldpath",
"pkg/kubelet/apis",
"pkg/kubelet/types",
"pkg/master/ports",
"pkg/scheduler/api",
"pkg/security/apparmor",
"pkg/serviceaccount",
"pkg/util/file",
"pkg/util/hash",
"pkg/util/io",
"pkg/util/mount",
"pkg/util/net/sets",
"pkg/util/nsenter",
"pkg/util/parsers",
"pkg/util/pointer",
"pkg/util/taints",
"pkg/volume",
"pkg/volume/util",
"pkg/volume/util/fs",
"pkg/volume/util/recyclerclient",
"pkg/volume/util/types"
]
revision = "37555e6d24c2f951c40660ea59a80fa251982005"
version = "v1.10.0-beta.1"
[[projects]] [[projects]]
branch = "master" branch = "master"
@ -309,6 +658,6 @@
[solve-meta] [solve-meta]
analyzer-name = "dep" analyzer-name = "dep"
analyzer-version = 1 analyzer-version = 1
inputs-digest = "ed3e1732651d25ef2fd3f1c44d70fb9a0c067dbb6a6c8e7bbc66088751bbf96c" inputs-digest = "52850d29ee1bd54bcd87d2d39fec1bfd9ee33d5d3611184f8442ce71687e1d1c"
solver-name = "gps-cdcl" solver-name = "gps-cdcl"
solver-version = 1 solver-version = 1

View File

@ -23,32 +23,52 @@
[[constraint]] [[constraint]]
name = "github.com/container-storage-interface/spec" name = "github.com/container-storage-interface/spec"
branch = "master" version = "~0.2.0"
[[constraint]] [[constraint]]
branch = "master" branch = "master"
name = "github.com/golang/glog" name = "github.com/golang/glog"
[[constraint]] [[override]]
name = "google.golang.org/grpc" revision = "5db89f0ca68677abc5eefce8f2a0a772c98ba52d"
version = "1.7.2" name = "github.com/docker/distribution"
[[constraint]] [[constraint]]
name = "github.com/docker/distribution" branch = "master"
revision = "edc3ab29cdff8694dd6feb85cfeb4b5f1b38ed9c" name = "github.com/gophercloud/gophercloud"
[[constraint]]
name = "github.com/pborman/uuid"
version = "1.1.0"
[[constraint]]
name = "github.com/spf13/cobra"
version = "0.0.1"
[[constraint]]
name = "github.com/stretchr/testify"
version = "1.2.1"
[[constraint]]
branch = "master"
name = "golang.org/x/net"
[[constraint]]
name = "google.golang.org/grpc"
version = "1.10.0"
[[constraint]]
name = "gopkg.in/gcfg.v1"
version = "1.2.1"
[[constraint]]
version = "kubernetes-1.10.0-beta.1"
name = "k8s.io/apimachinery"
[[constraint]] [[constraint]]
name = "k8s.io/kubernetes" name = "k8s.io/kubernetes"
branch = "master" version = "v1.10.0-beta.1"
[[constraint]] [[override]]
branch = "master" version = "kubernetes-1.10.0-beta.1"
name = "github.com/kubernetes-csi/csi-test"
[[constraint]]
name = "k8s.io/apimachinery"
branch = "master"
[[constraint]]
name = "k8s.io/api" name = "k8s.io/api"
branch = "master"

View File

@ -12,6 +12,9 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
REGISTRY_NAME = quay.io/k8scsi
IMAGE_VERSION = v0.2.0
.PHONY: all flexadapter nfs hostpath iscsi cinder clean .PHONY: all flexadapter nfs hostpath iscsi cinder clean
all: flexadapter nfs hostpath iscsi cinder all: flexadapter nfs hostpath iscsi cinder
@ -22,19 +25,24 @@ test:
flexadapter: flexadapter:
if [ ! -d ./vendor ]; then dep ensure; fi if [ ! -d ./vendor ]; then dep ensure; fi
go build -i -o _output/flexadapter ./app/flexadapter CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/flexadapter ./app/flexadapter
nfs: nfs:
if [ ! -d ./vendor ]; then dep ensure; fi if [ ! -d ./vendor ]; then dep ensure; fi
go build -i -o _output/nfsplugin ./app/nfsplugin CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/nfsplugin ./app/nfsplugin
hostpath: hostpath:
if [ ! -d ./vendor ]; then dep ensure; fi if [ ! -d ./vendor ]; then dep ensure; fi
go build -i -o _output/hostpathplugin ./app/hostpathplugin CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/hostpathplugin ./app/hostpathplugin
hostpath-container: hostpath
cp _output/hostpathplugin pkg/hostpath/extras/docker
docker build -t $(REGISTRY_NAME)/hostpathplugin:$(IMAGE_VERSION) ./pkg/hostpath/extras/docker
iscsi: iscsi:
if [ ! -d ./vendor ]; then dep ensure; fi if [ ! -d ./vendor ]; then dep ensure; fi
go build -i -o _output/iscsiplugin ./app/iscsiplugin CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/iscsiplugin ./app/iscsiplugin
cinder: cinder:
if [ ! -d ./vendor ]; then dep ensure; fi if [ ! -d ./vendor ]; then dep ensure; fi
go build -i -o _output/cinderplugin ./app/cinderplugin CGO_ENABLED=0 GOOS=linux go build -a -ldflags '-extldflags "-static"' -o _output/cinderplugin ./app/cinderplugin
clean: clean:
go clean -r -x go clean -r -x

View File

@ -3,7 +3,7 @@
These drivers are provided purely for illustrative purposes, and should not be used for production workloads. These drivers are provided purely for illustrative purposes, and should not be used for production workloads.
## Other sample drivers ## Other sample drivers
Please read [Drivers](https://github.com/kubernetes-csi/docs/wiki/Drivers) for more information Please read [Drivers](https://kubernetes-csi.github.io/docs/Drivers.html) for more information
## Adding new sample drivers ## Adding new sample drivers
Please, DO NOT submit PRs to add new drivers here unless they are just examples. Real CSI drivers are to be housed on their own repo separate from this one. You are then welcomed to send a PR to https://github.com/kubernetes-csi/docs to add the [Driver](https://github.com/kubernetes-csi/docs/wiki/Drivers) page. Please, DO NOT submit PRs to add new drivers here unless they are just examples. Real CSI drivers are to be housed on their own repo separate from this one. You are then welcomed to send a PR to https://github.com/kubernetes-csi/docs to add the [Driver](https://github.com/kubernetes-csi/docs/wiki/Drivers) page.

View File

@ -13,6 +13,7 @@ CSI_MOUNTPOINT="/mnt"
APP=hostpathplugin APP=hostpathplugin
SKIP="WithCapacity" SKIP="WithCapacity"
SKIP=""
if [ x${TRAVIS} = x"true" ] ; then if [ x${TRAVIS} = x"true" ] ; then
SKIP="WithCapacity|NodeUnpublishVolume|NodePublishVolume" SKIP="WithCapacity|NodeUnpublishVolume|NodePublishVolume"
fi fi

View File

@ -17,13 +17,13 @@ limitations under the License.
package cinder package cinder
import ( import (
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog" "github.com/golang/glog"
"github.com/kubernetes-csi/drivers/pkg/cinder/openstack" "github.com/kubernetes-csi/drivers/pkg/cinder/openstack"
csicommon "github.com/kubernetes-csi/drivers/pkg/csi-common" csicommon "github.com/kubernetes-csi/drivers/pkg/csi-common"
"github.com/pborman/uuid" "github.com/pborman/uuid"
"golang.org/x/net/context" "golang.org/x/net/context"
"k8s.io/kubernetes/pkg/volume" "k8s.io/kubernetes/pkg/volume/util"
) )
type controllerServer struct { type controllerServer struct {
@ -43,7 +43,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
if req.GetCapacityRange() != nil { if req.GetCapacityRange() != nil {
volSizeBytes = int64(req.GetCapacityRange().GetRequiredBytes()) volSizeBytes = int64(req.GetCapacityRange().GetRequiredBytes())
} }
volSizeGB := int(volume.RoundUpSize(volSizeBytes, 1024*1024*1024)) volSizeGB := int(util.RoundUpSize(volSizeBytes, 1024*1024*1024))
// Volume Type // Volume Type
volType := req.GetParameters()["type"] volType := req.GetParameters()["type"]

View File

@ -19,7 +19,7 @@ package cinder
import ( import (
"testing" "testing"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/kubernetes-csi/drivers/pkg/cinder/openstack" "github.com/kubernetes-csi/drivers/pkg/cinder/openstack"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
@ -49,7 +49,6 @@ func TestCreateVolume(t *testing.T) {
// Fake request // Fake request
fakeReq := &csi.CreateVolumeRequest{ fakeReq := &csi.CreateVolumeRequest{
Version: &version,
Name: fakeVolName, Name: fakeVolName,
VolumeCapabilities: nil, VolumeCapabilities: nil,
} }
@ -82,7 +81,6 @@ func TestDeleteVolume(t *testing.T) {
// Fake request // Fake request
fakeReq := &csi.DeleteVolumeRequest{ fakeReq := &csi.DeleteVolumeRequest{
Version: &version,
VolumeId: fakeVolID, VolumeId: fakeVolID,
} }
@ -117,7 +115,6 @@ func TestControllerPublishVolume(t *testing.T) {
// Fake request // Fake request
fakeReq := &csi.ControllerPublishVolumeRequest{ fakeReq := &csi.ControllerPublishVolumeRequest{
Version: &version,
VolumeId: fakeVolID, VolumeId: fakeVolID,
NodeId: fakeNodeID, NodeId: fakeNodeID,
VolumeCapability: nil, VolumeCapability: nil,
@ -157,7 +154,6 @@ func TestControllerUnpublishVolume(t *testing.T) {
// Fake request // Fake request
fakeReq := &csi.ControllerUnpublishVolumeRequest{ fakeReq := &csi.ControllerUnpublishVolumeRequest{
Version: &version,
VolumeId: fakeVolID, VolumeId: fakeVolID,
NodeId: fakeNodeID, NodeId: fakeNodeID,
} }

View File

@ -17,7 +17,7 @@ limitations under the License.
package cinder package cinder
import ( import (
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog" "github.com/golang/glog"
"github.com/kubernetes-csi/drivers/pkg/cinder/openstack" "github.com/kubernetes-csi/drivers/pkg/cinder/openstack"
@ -42,24 +42,18 @@ const (
) )
var ( var (
version = csi.Version{ version = "0.2.0"
Minor: 1,
}
) )
func GetSupportedVersions() []*csi.Version {
return []*csi.Version{&version}
}
func NewDriver(nodeID, endpoint string, cloudconfig string) *driver { func NewDriver(nodeID, endpoint string, cloudconfig string) *driver {
glog.Infof("Driver: %v version: %v", driverName, csicommon.GetVersionString(&version)) glog.Infof("Driver: %v version: %v", driverName, version)
d := &driver{} d := &driver{}
d.endpoint = endpoint d.endpoint = endpoint
d.cloudconfig = cloudconfig d.cloudconfig = cloudconfig
csiDriver := csicommon.NewCSIDriver(driverName, &version, GetSupportedVersions(), nodeID) csiDriver := csicommon.NewCSIDriver(driverName, version, nodeID)
csiDriver.AddControllerServiceCapabilities( csiDriver.AddControllerServiceCapabilities(
[]csi.ControllerServiceCapability_RPC_Type{ []csi.ControllerServiceCapability_RPC_Type{
csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME,

View File

@ -17,7 +17,7 @@ limitations under the License.
package cinder package cinder
import ( import (
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog" "github.com/golang/glog"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
@ -130,3 +130,11 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
return &csi.NodeUnpublishVolumeResponse{}, nil return &csi.NodeUnpublishVolumeResponse{}, nil
} }
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
return &csi.NodeUnstageVolumeResponse{}, nil
}
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
return &csi.NodeStageVolumeResponse{}, nil
}

View File

@ -19,7 +19,7 @@ package cinder
import ( import (
"testing" "testing"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/kubernetes-csi/drivers/pkg/cinder/mount" "github.com/kubernetes-csi/drivers/pkg/cinder/mount"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock" "github.com/stretchr/testify/mock"
@ -53,9 +53,7 @@ func TestNodeGetId(t *testing.T) {
} }
// Fake request // Fake request
fakeReq := &csi.NodeGetIdRequest{ fakeReq := &csi.NodeGetIdRequest{}
Version: &version,
}
// Invoke NodeGetId // Invoke NodeGetId
actualRes, err := fakeNs.NodeGetId(fakeCtx, fakeReq) actualRes, err := fakeNs.NodeGetId(fakeCtx, fakeReq)
@ -88,7 +86,6 @@ func TestNodePublishVolume(t *testing.T) {
// Fake request // Fake request
fakeReq := &csi.NodePublishVolumeRequest{ fakeReq := &csi.NodePublishVolumeRequest{
Version: &version,
VolumeId: fakeVolID, VolumeId: fakeVolID,
PublishInfo: map[string]string{"DevicePath": fakeDevicePath}, PublishInfo: map[string]string{"DevicePath": fakeDevicePath},
TargetPath: fakeTargetPath, TargetPath: fakeTargetPath,
@ -126,7 +123,6 @@ func TestNodeUnpublishVolume(t *testing.T) {
// Fake request // Fake request
fakeReq := &csi.NodeUnpublishVolumeRequest{ fakeReq := &csi.NodeUnpublishVolumeRequest{
Version: &version,
VolumeId: fakeVolID, VolumeId: fakeVolID,
TargetPath: fakeTargetPath, TargetPath: fakeTargetPath,
} }

View File

@ -17,7 +17,7 @@ limitations under the License.
package csicommon package csicommon
import ( import (
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog" "github.com/golang/glog"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
@ -76,25 +76,11 @@ func (cs *DefaultControllerServer) GetCapacity(ctx context.Context, req *csi.Get
return nil, status.Error(codes.Unimplemented, "") return nil, status.Error(codes.Unimplemented, "")
} }
func (cs *DefaultControllerServer) ControllerProbe(ctx context.Context, req *csi.ControllerProbeRequest) (*csi.ControllerProbeResponse, error) {
glog.V(5).Infof("Using default ControllerProbe")
if err := cs.Driver.ValidateControllerServiceRequest(req.Version, csi.ControllerServiceCapability_RPC_UNKNOWN); err != nil {
return nil, err
}
return &csi.ControllerProbeResponse{}, nil
}
// ControllerGetCapabilities implements the default GRPC callout. // ControllerGetCapabilities implements the default GRPC callout.
// Default supports all capabilities // Default supports all capabilities
func (cs *DefaultControllerServer) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) { func (cs *DefaultControllerServer) ControllerGetCapabilities(ctx context.Context, req *csi.ControllerGetCapabilitiesRequest) (*csi.ControllerGetCapabilitiesResponse, error) {
glog.V(5).Infof("Using default ControllerGetCapabilities") glog.V(5).Infof("Using default ControllerGetCapabilities")
// Check arguments
if req.GetVersion() == nil {
return nil, status.Error(codes.InvalidArgument, "Version missing in request")
}
return &csi.ControllerGetCapabilitiesResponse{ return &csi.ControllerGetCapabilitiesResponse{
Capabilities: cs.Driver.cap, Capabilities: cs.Driver.cap,
}, nil }, nil

View File

@ -17,25 +17,25 @@ limitations under the License.
package csicommon package csicommon
import ( import (
"fmt"
"github.com/golang/glog" "github.com/golang/glog"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
) )
type CSIDriver struct { type CSIDriver struct {
name string name string
nodeID string nodeID string
version *csi.Version version string
supVers []*csi.Version
cap []*csi.ControllerServiceCapability cap []*csi.ControllerServiceCapability
vc []*csi.VolumeCapability_AccessMode vc []*csi.VolumeCapability_AccessMode
} }
// Creates a NewCSIDriver object. Assumes vendor version is equal to driver version & // Creates a NewCSIDriver object. Assumes vendor version is equal to driver version &
// does not support optional driver plugin info manifest field. Refer to CSI spec for more details. // does not support optional driver plugin info manifest field. Refer to CSI spec for more details.
func NewCSIDriver(name string, v *csi.Version, supVers []*csi.Version, nodeID string) *CSIDriver { func NewCSIDriver(name string, v string, nodeID string) *CSIDriver {
if name == "" { if name == "" {
glog.Errorf("Driver name missing") glog.Errorf("Driver name missing")
return nil return nil
@ -45,57 +45,22 @@ func NewCSIDriver(name string, v *csi.Version, supVers []*csi.Version, nodeID st
glog.Errorf("NodeID missing") glog.Errorf("NodeID missing")
return nil return nil
} }
// TODO version format and validation
if v == nil { if len(v) == 0 {
glog.Errorf("Version argument missing") glog.Errorf("Version argument missing")
return nil return nil
} }
found := false
for _, sv := range supVers {
if sv.GetMajor() == v.GetMajor() && sv.GetMinor() == v.GetMinor() && sv.GetPatch() == v.GetPatch() {
found = true
}
}
if !found {
supVers = append(supVers, v)
}
driver := CSIDriver{ driver := CSIDriver{
name: name, name: name,
version: v, version: v,
supVers: supVers,
nodeID: nodeID, nodeID: nodeID,
} }
return &driver return &driver
} }
func (d *CSIDriver) CheckVersion(v *csi.Version) error { func (d *CSIDriver) ValidateControllerServiceRequest(c csi.ControllerServiceCapability_RPC_Type) error {
if v == nil {
return status.Error(codes.InvalidArgument, "Version missing")
}
// Assumes always backward compatible
for _, sv := range d.supVers {
if v.Major == sv.Major && v.Minor <= sv.Minor {
return nil
}
}
return status.Error(codes.InvalidArgument, "Unsupported version: "+GetVersionString(v))
}
func (d *CSIDriver) ValidateControllerServiceRequest(v *csi.Version, c csi.ControllerServiceCapability_RPC_Type) error {
if v == nil {
return status.Error(codes.InvalidArgument, "Version not specified")
}
if err := d.CheckVersion(v); err != nil {
return status.Error(codes.InvalidArgument, "Unsupported version")
}
if c == csi.ControllerServiceCapability_RPC_UNKNOWN { if c == csi.ControllerServiceCapability_RPC_UNKNOWN {
return nil return nil
} }
@ -105,8 +70,7 @@ func (d *CSIDriver) ValidateControllerServiceRequest(v *csi.Version, c csi.Contr
return nil return nil
} }
} }
return status.Error(codes.InvalidArgument, fmt.Sprintf("%s", c))
return status.Error(codes.InvalidArgument, "Unsupported version: "+GetVersionString(v))
} }
func (d *CSIDriver) AddControllerServiceCapabilities(cl []csi.ControllerServiceCapability_RPC_Type) { func (d *CSIDriver) AddControllerServiceCapabilities(cl []csi.ControllerServiceCapability_RPC_Type) {

View File

@ -19,7 +19,7 @@ package csicommon
import ( import (
"testing" "testing"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
@ -31,93 +31,22 @@ const (
) )
var ( var (
fakeVersion = csi.Version{Major: 5, Minor: 2, Patch: 0} vendorVersion = "0.2.0"
fakeVersionsSupported = []*csi.Version{
{
Major: 4, Minor: 0, Patch: 0,
},
{
Major: 4, Minor: 1, Patch: 0,
},
}
) )
func NewFakeDriver() *CSIDriver { func NewFakeDriver() *CSIDriver {
fakeVersion = csi.Version{Major: 5, Minor: 2, Patch: 0}
fakeVersionsSupported = []*csi.Version{
{
Major: 4, Minor: 0, Patch: 0,
},
{
Major: 4, Minor: 1, Patch: 0,
},
}
driver := NewCSIDriver(fakeDriverName, &fakeVersion, fakeVersionsSupported, fakeNodeID) driver := NewCSIDriver(fakeDriverName, vendorVersion, fakeNodeID)
return driver return driver
} }
func TestNewFakeDriver(t *testing.T) { func TestNewFakeDriver(t *testing.T) {
// Test New fake driver with invalid arguments. // Test New fake driver with invalid arguments.
d := NewCSIDriver("", &fakeVersion, fakeVersionsSupported, fakeNodeID) d := NewCSIDriver("", vendorVersion, fakeNodeID)
assert.Nil(t, d) assert.Nil(t, d)
} }
func TestCheckVersion(t *testing.T) {
driver := NewFakeDriver()
// Exact version
v := csi.Version{
Major: 5,
Minor: 1,
Patch: 0,
}
err := driver.CheckVersion(&v)
assert.NoError(t, err)
//Supported version
v = csi.Version{
Major: 4,
Minor: 0,
Patch: 0,
}
err = driver.CheckVersion(&v)
assert.NoError(t, err)
// Unsupported version
v = csi.Version{
Major: 6,
Minor: 0,
Patch: 0,
}
err = driver.CheckVersion(&v)
s, ok := status.FromError(err)
assert.True(t, ok)
assert.Equal(t, s.Code(), codes.InvalidArgument)
// Supported minor version
v = csi.Version{
Major: 5,
Minor: 1,
Patch: 0,
}
err = driver.CheckVersion(&v)
assert.NoError(t, err)
// Unsupported minor version
v = csi.Version{
Major: 5,
Minor: 3,
Patch: 0,
}
err = driver.CheckVersion(&v)
s, ok = status.FromError(err)
assert.True(t, ok)
assert.Equal(t, s.Code(), codes.InvalidArgument)
}
func TestGetVolumeCapabilityAccessModes(t *testing.T) { func TestGetVolumeCapabilityAccessModes(t *testing.T) {
d := NewFakeDriver() d := NewFakeDriver()
@ -137,18 +66,12 @@ func TestGetVolumeCapabilityAccessModes(t *testing.T) {
func TestValidateControllerServiceRequest(t *testing.T) { func TestValidateControllerServiceRequest(t *testing.T) {
d := NewFakeDriver() d := NewFakeDriver()
v := csi.Version{
Major: 5,
Minor: 0,
Patch: 0,
}
// Valid requests which require no capabilities // Valid requests which require no capabilities
err := d.ValidateControllerServiceRequest(&v, csi.ControllerServiceCapability_RPC_UNKNOWN) err := d.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_UNKNOWN)
assert.NoError(t, err) assert.NoError(t, err)
// Test controller service publish/unpublish not supported // Test controller service publish/unpublish not supported
err = d.ValidateControllerServiceRequest(&v, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) err = d.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME)
s, ok := status.FromError(err) s, ok := status.FromError(err)
assert.True(t, ok) assert.True(t, ok)
assert.Equal(t, s.Code(), codes.InvalidArgument) assert.Equal(t, s.Code(), codes.InvalidArgument)
@ -163,19 +86,19 @@ func TestValidateControllerServiceRequest(t *testing.T) {
}) })
// Test controller service publish/unpublish is supported // Test controller service publish/unpublish is supported
err = d.ValidateControllerServiceRequest(&v, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME) err = d.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME)
assert.NoError(t, err) assert.NoError(t, err)
// Test controller service create/delete is supported // Test controller service create/delete is supported
err = d.ValidateControllerServiceRequest(&v, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME) err = d.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME)
assert.NoError(t, err) assert.NoError(t, err)
// Test controller service list volumes is supported // Test controller service list volumes is supported
err = d.ValidateControllerServiceRequest(&v, csi.ControllerServiceCapability_RPC_LIST_VOLUMES) err = d.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_LIST_VOLUMES)
assert.NoError(t, err) assert.NoError(t, err)
// Test controller service get capacity is supported // Test controller service get capacity is supported
err = d.ValidateControllerServiceRequest(&v, csi.ControllerServiceCapability_RPC_GET_CAPACITY) err = d.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_GET_CAPACITY)
assert.NoError(t, err) assert.NoError(t, err)
} }

View File

@ -17,7 +17,7 @@ limitations under the License.
package csicommon package csicommon
import ( import (
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog" "github.com/golang/glog"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
@ -28,15 +28,6 @@ type DefaultIdentityServer struct {
Driver *CSIDriver Driver *CSIDriver
} }
//GetSupportedVersions(context.Context, *GetSupportedVersionsRequest) (*GetSupportedVersionsResponse, error)
//GetPluginInfo(context.Context, *GetPluginInfoRequest) (*GetPluginInfoResponse, error)
func (ids *DefaultIdentityServer) GetSupportedVersions(ctx context.Context, req *csi.GetSupportedVersionsRequest) (*csi.GetSupportedVersionsResponse, error) {
glog.V(5).Infof("Using default GetSupportedVersions")
return &csi.GetSupportedVersionsResponse{
SupportedVersions: ids.Driver.supVers,
}, nil
}
func (ids *DefaultIdentityServer) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) { func (ids *DefaultIdentityServer) GetPluginInfo(ctx context.Context, req *csi.GetPluginInfoRequest) (*csi.GetPluginInfoResponse, error) {
glog.V(5).Infof("Using default GetPluginInnfo") glog.V(5).Infof("Using default GetPluginInnfo")
@ -44,15 +35,31 @@ func (ids *DefaultIdentityServer) GetPluginInfo(ctx context.Context, req *csi.Ge
return nil, status.Error(codes.Unavailable, "Driver name not configured") return nil, status.Error(codes.Unavailable, "Driver name not configured")
} }
err := ids.Driver.CheckVersion(req.GetVersion()) if ids.Driver.version == "" {
if err != nil { return nil, status.Error(codes.Unavailable, "Driver is missing version")
return nil, err
} }
version := GetVersionString(ids.Driver.version)
return &csi.GetPluginInfoResponse{ return &csi.GetPluginInfoResponse{
Name: ids.Driver.name, Name: ids.Driver.name,
VendorVersion: version, VendorVersion: ids.Driver.version,
}, nil
}
func (ids *DefaultIdentityServer) Probe(ctx context.Context, req *csi.ProbeRequest) (*csi.ProbeResponse, error) {
return &csi.ProbeResponse{}, nil
}
func (ids *DefaultIdentityServer) GetPluginCapabilities(ctx context.Context, req *csi.GetPluginCapabilitiesRequest) (*csi.GetPluginCapabilitiesResponse, error) {
glog.V(5).Infof("Using default capabilities")
return &csi.GetPluginCapabilitiesResponse{
Capabilities: []*csi.PluginCapability{
{
Type: &csi.PluginCapability_Service_{
Service: &csi.PluginCapability_Service{
Type: csi.PluginCapability_Service_UNKNOWN,
},
},
},
},
}, nil }, nil
} }

View File

@ -20,49 +20,18 @@ import (
"context" "context"
"testing" "testing"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
) )
func TestGetSupportedVersions(t *testing.T) {
d := NewFakeDriver()
ids := NewDefaultIdentityServer(d)
req := csi.GetSupportedVersionsRequest{}
// Test Get supported versions are valid.
resp, err := ids.GetSupportedVersions(context.Background(), &req)
assert.NoError(t, err)
for _, fv := range fakeVersionsSupported {
found := false
for _, rv := range resp.GetSupportedVersions() {
if fv.GetMajor() == rv.GetMajor() && fv.GetMinor() == rv.GetMinor() && fv.GetPatch() == rv.GetPatch() {
found = true
}
}
assert.True(t, found)
}
}
func TestGetPluginInfo(t *testing.T) { func TestGetPluginInfo(t *testing.T) {
d := NewFakeDriver() d := NewFakeDriver()
ids := NewDefaultIdentityServer(d) ids := NewDefaultIdentityServer(d)
// Test invalid request
req := csi.GetPluginInfoRequest{} req := csi.GetPluginInfoRequest{}
resp, err := ids.GetPluginInfo(context.Background(), &req) resp, err := ids.GetPluginInfo(context.Background(), &req)
s, ok := status.FromError(err)
assert.True(t, ok)
assert.Equal(t, s.Code(), codes.InvalidArgument)
// Test valid request
req.Version = &fakeVersion
resp, err = ids.GetPluginInfo(context.Background(), &req)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, resp.GetName(), fakeDriverName) assert.Equal(t, resp.GetName(), fakeDriverName)
assert.Equal(t, resp.GetVendorVersion(), vendorVersion)
} }

View File

@ -17,7 +17,7 @@ limitations under the License.
package csicommon package csicommon
import ( import (
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog" "github.com/golang/glog"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
@ -29,53 +29,24 @@ type DefaultNodeServer struct {
} }
func (ns *DefaultNodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { func (ns *DefaultNodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
err := ns.Driver.CheckVersion(req.GetVersion())
if err != nil {
return nil, err
}
return nil, status.Error(codes.Unimplemented, "") return nil, status.Error(codes.Unimplemented, "")
} }
func (ns *DefaultNodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { func (ns *DefaultNodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
err := ns.Driver.CheckVersion(req.GetVersion())
if err != nil {
return nil, err
}
return nil, status.Error(codes.Unimplemented, "") return nil, status.Error(codes.Unimplemented, "")
} }
func (ns *DefaultNodeServer) NodeGetId(ctx context.Context, req *csi.NodeGetIdRequest) (*csi.NodeGetIdResponse, error) { func (ns *DefaultNodeServer) NodeGetId(ctx context.Context, req *csi.NodeGetIdRequest) (*csi.NodeGetIdResponse, error) {
glog.V(5).Infof("Using default NodeGetId") glog.V(5).Infof("Using default NodeGetId")
err := ns.Driver.CheckVersion(req.GetVersion())
if err != nil {
return nil, err
}
return &csi.NodeGetIdResponse{ return &csi.NodeGetIdResponse{
NodeId: ns.Driver.nodeID, NodeId: ns.Driver.nodeID,
}, nil }, nil
} }
func (ns *DefaultNodeServer) NodeProbe(ctx context.Context, req *csi.NodeProbeRequest) (*csi.NodeProbeResponse, error) {
glog.V(5).Infof("Using default NodeProbe")
err := ns.Driver.CheckVersion(req.GetVersion())
if err != nil {
return nil, err
}
return &csi.NodeProbeResponse{}, nil
}
func (ns *DefaultNodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) { func (ns *DefaultNodeServer) NodeGetCapabilities(ctx context.Context, req *csi.NodeGetCapabilitiesRequest) (*csi.NodeGetCapabilitiesResponse, error) {
glog.V(5).Infof("Using default NodeGetCapabilities") glog.V(5).Infof("Using default NodeGetCapabilities")
err := ns.Driver.CheckVersion(req.GetVersion())
if err != nil {
return nil, err
}
return &csi.NodeGetCapabilitiesResponse{ return &csi.NodeGetCapabilitiesResponse{
Capabilities: []*csi.NodeServiceCapability{ Capabilities: []*csi.NodeServiceCapability{
{ {

View File

@ -20,7 +20,7 @@ import (
"context" "context"
"testing" "testing"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
@ -31,15 +31,8 @@ func TestNodeGetId(t *testing.T) {
ns := NewDefaultNodeServer(d) ns := NewDefaultNodeServer(d)
// Test invalid request
req := csi.NodeGetIdRequest{}
_, err := ns.NodeGetId(context.Background(), &req)
s, ok := status.FromError(err)
assert.True(t, ok)
assert.Equal(t, s.Code(), codes.InvalidArgument)
// Test valid request // Test valid request
req.Version = &fakeVersion req := csi.NodeGetIdRequest{}
resp, err := ns.NodeGetId(context.Background(), &req) resp, err := ns.NodeGetId(context.Background(), &req)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, resp.GetNodeId(), fakeNodeID) assert.Equal(t, resp.GetNodeId(), fakeNodeID)
@ -50,34 +43,9 @@ func TestNodeGetCapabilities(t *testing.T) {
ns := NewDefaultNodeServer(d) ns := NewDefaultNodeServer(d)
// Test invalid request // Test valid request
req := csi.NodeGetCapabilitiesRequest{} req := csi.NodeGetCapabilitiesRequest{}
_, err := ns.NodeGetCapabilities(context.Background(), &req) _, err := ns.NodeGetCapabilities(context.Background(), &req)
s, ok := status.FromError(err)
assert.True(t, ok)
assert.Equal(t, s.Code(), codes.InvalidArgument)
// Test valid request
req.Version = &fakeVersion
_, err = ns.NodeGetCapabilities(context.Background(), &req)
assert.NoError(t, err)
}
func TestNodeProbe(t *testing.T) {
d := NewFakeDriver()
ns := NewDefaultNodeServer(d)
// Test invalid request
req := csi.NodeProbeRequest{}
_, err := ns.NodeProbe(context.Background(), &req)
s, ok := status.FromError(err)
assert.True(t, ok)
assert.Equal(t, s.Code(), codes.InvalidArgument)
// Test valid request
req.Version = &fakeVersion
_, err = ns.NodeProbe(context.Background(), &req)
assert.NoError(t, err) assert.NoError(t, err)
} }
@ -91,13 +59,6 @@ func TestNodePublishVolume(t *testing.T) {
_, err := ns.NodePublishVolume(context.Background(), &req) _, err := ns.NodePublishVolume(context.Background(), &req)
s, ok := status.FromError(err) s, ok := status.FromError(err)
assert.True(t, ok) assert.True(t, ok)
assert.Equal(t, s.Code(), codes.InvalidArgument)
// Test valid node publish request
req.Version = &fakeVersion
_, err = ns.NodePublishVolume(context.Background(), &req)
s, ok = status.FromError(err)
assert.True(t, ok)
assert.Equal(t, s.Code(), codes.Unimplemented) assert.Equal(t, s.Code(), codes.Unimplemented)
} }
@ -111,12 +72,5 @@ func TestNodeUnpublishVolume(t *testing.T) {
_, err := ns.NodeUnpublishVolume(context.Background(), &req) _, err := ns.NodeUnpublishVolume(context.Background(), &req)
s, ok := status.FromError(err) s, ok := status.FromError(err)
assert.True(t, ok) assert.True(t, ok)
assert.Equal(t, s.Code(), codes.InvalidArgument)
// Test valid node publish request
req.Version = &fakeVersion
_, err = ns.NodeUnpublishVolume(context.Background(), &req)
s, ok = status.FromError(err)
assert.True(t, ok)
assert.Equal(t, s.Code(), codes.Unimplemented) assert.Equal(t, s.Code(), codes.Unimplemented)
} }

View File

@ -24,7 +24,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"google.golang.org/grpc" "google.golang.org/grpc"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
) )
// Defines Non blocking GRPC server interfaces // Defines Non blocking GRPC server interfaces

View File

@ -20,7 +20,7 @@ import (
"fmt" "fmt"
"strings" "strings"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog" "github.com/golang/glog"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc" "google.golang.org/grpc"
@ -36,28 +36,6 @@ func ParseEndpoint(ep string) (string, string, error) {
return "", "", fmt.Errorf("Invalid endpoint: %v", ep) return "", "", fmt.Errorf("Invalid endpoint: %v", ep)
} }
func GetVersionString(v *csi.Version) string {
return fmt.Sprintf("%d.%d.%d", v.Major, v.Minor, v.Patch)
}
func GetVersionFromString(v string) (*csi.Version, error) {
var major, minor, patch int32
n, err := fmt.Sscanf(v, "%d.%d.%d", &major, &minor, &patch)
if err != nil {
return nil, err
}
if n != 3 {
return nil, fmt.Errorf("Invalid format. Specify version in x.y.z format")
}
return &csi.Version{
Major: major,
Minor: minor,
Patch: patch,
}, nil
}
func NewVolumeCapabilityAccessMode(mode csi.VolumeCapability_AccessMode_Mode) *csi.VolumeCapability_AccessMode { func NewVolumeCapabilityAccessMode(mode csi.VolumeCapability_AccessMode_Mode) *csi.VolumeCapability_AccessMode {
return &csi.VolumeCapability_AccessMode{Mode: mode} return &csi.VolumeCapability_AccessMode{Mode: mode}
} }

View File

@ -19,39 +19,9 @@ package csicommon
import ( import (
"testing" "testing"
"github.com/container-storage-interface/spec/lib/go/csi"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestGetVersionFromString(t *testing.T) {
//Invalid version
_, err := GetVersionFromString("")
assert.NotNil(t, err)
v, err := GetVersionFromString("1.2.3")
assert.NoError(t, err)
assert.Equal(t, v.GetMajor(), int32(1))
assert.Equal(t, v.GetMinor(), int32(2))
assert.Equal(t, v.GetPatch(), int32(3))
// Invalid version
_, err = GetVersionFromString("1.2")
assert.NotNil(t, err)
}
func TestGetVersionString(t *testing.T) {
v := &csi.Version{
Major: 1,
Minor: 0,
Patch: 0,
}
//Invalid version
vStr := GetVersionString(v)
assert.Equal(t, vStr, "1.0.0")
}
func TestParseEndpoint(t *testing.T) { func TestParseEndpoint(t *testing.T) {
//Valid unix domain socket endpoint //Valid unix domain socket endpoint

View File

@ -17,9 +17,7 @@ limitations under the License.
package flexadapter package flexadapter
import ( import (
"fmt" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/container-storage-interface/spec/lib/go/csi"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
@ -36,12 +34,8 @@ type controllerServer struct {
*csicommon.DefaultControllerServer *csicommon.DefaultControllerServer
} }
func GetVersionString(ver *csi.Version) string {
return fmt.Sprintf("%d.%d.%d", ver.Major, ver.Minor, ver.Patch)
}
func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) { func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *csi.ControllerPublishVolumeRequest) (*csi.ControllerPublishVolumeResponse, error) {
if err := cs.Driver.ValidateControllerServiceRequest(req.Version, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME); err != nil { if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME); err != nil {
return nil, err return nil, err
} }
@ -73,7 +67,7 @@ func (cs *controllerServer) ControllerPublishVolume(ctx context.Context, req *cs
} }
func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) { func (cs *controllerServer) ControllerUnpublishVolume(ctx context.Context, req *csi.ControllerUnpublishVolumeRequest) (*csi.ControllerUnpublishVolumeResponse, error) {
if err := cs.Driver.ValidateControllerServiceRequest(req.Version, csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME); err != nil { if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME); err != nil {
return nil, err return nil, err
} }

View File

@ -19,7 +19,7 @@ package flexadapter
import ( import (
"os" "os"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog" "github.com/golang/glog"
"github.com/kubernetes-csi/drivers/pkg/csi-common" "github.com/kubernetes-csi/drivers/pkg/csi-common"
@ -38,15 +38,9 @@ type flexAdapter struct {
} }
var ( var (
version = csi.Version{ version = "0.2.0"
Minor: 1,
}
) )
func GetSupportedVersions() []*csi.Version {
return []*csi.Version{&version}
}
func New() *flexAdapter { func New() *flexAdapter {
return &flexAdapter{} return &flexAdapter{}
} }
@ -68,7 +62,7 @@ func NewNodeServer(d *csicommon.CSIDriver, f *flexVolumeDriver) *nodeServer {
func (f *flexAdapter) Run(driverName, driverPath, nodeID, endpoint string) { func (f *flexAdapter) Run(driverName, driverPath, nodeID, endpoint string) {
var err error var err error
glog.Infof("Driver: %v version: %v", driverName, GetVersionString(&version)) glog.Infof("Driver: %v version: %v", driverName, version)
// Create flex volume driver // Create flex volume driver
f.flexDriver, err = NewFlexVolumeDriver(driverName, driverPath) f.flexDriver, err = NewFlexVolumeDriver(driverName, driverPath)
@ -78,7 +72,7 @@ func (f *flexAdapter) Run(driverName, driverPath, nodeID, endpoint string) {
} }
// Initialize default library driver // Initialize default library driver
f.driver = csicommon.NewCSIDriver(driverName, &version, GetSupportedVersions(), nodeID) f.driver = csicommon.NewCSIDriver(driverName, version, nodeID)
if f.flexDriver.capabilities.Attach { if f.flexDriver.capabilities.Attach {
f.driver.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME}) f.driver.AddControllerServiceCapabilities([]csi.ControllerServiceCapability_RPC_Type{csi.ControllerServiceCapability_RPC_PUBLISH_UNPUBLISH_VOLUME})
} }

View File

@ -19,7 +19,7 @@ package flexadapter
import ( import (
"os" "os"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
@ -160,3 +160,11 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
// WaitForDetach is ignored in current K8S plugins // WaitForDetach is ignored in current K8S plugins
return &csi.NodeUnpublishVolumeResponse{}, nil return &csi.NodeUnpublishVolumeResponse{}, nil
} }
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
return &csi.NodeUnstageVolumeResponse{}, nil
}
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
return &csi.NodeStageVolumeResponse{}, nil
}

View File

@ -17,7 +17,6 @@ limitations under the License.
package hostpath package hostpath
import ( import (
"fmt"
"os" "os"
"github.com/golang/glog" "github.com/golang/glog"
@ -26,7 +25,7 @@ import (
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/kubernetes-csi/drivers/pkg/csi-common" "github.com/kubernetes-csi/drivers/pkg/csi-common"
) )
@ -39,16 +38,9 @@ type controllerServer struct {
*csicommon.DefaultControllerServer *csicommon.DefaultControllerServer
} }
func GetVersionString(ver *csi.Version) string {
return fmt.Sprintf("%d.%d.%d", ver.Major, ver.Minor, ver.Patch)
}
func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) { func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVolumeRequest) (*csi.CreateVolumeResponse, error) {
// Check arguments // Check arguments
if req.GetVersion() == nil {
return nil, status.Error(codes.InvalidArgument, "Version missing in request")
}
if len(req.GetName()) == 0 { if len(req.GetName()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Name missing in request") return nil, status.Error(codes.InvalidArgument, "Name missing in request")
} }
@ -56,7 +48,7 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
return nil, status.Error(codes.InvalidArgument, "Volume Capabilities missing in request") return nil, status.Error(codes.InvalidArgument, "Volume Capabilities missing in request")
} }
if err := cs.Driver.ValidateControllerServiceRequest(req.Version, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil { if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.V(3).Infof("invalid create volume req: %v", req) glog.V(3).Infof("invalid create volume req: %v", req)
return nil, err return nil, err
} }
@ -70,7 +62,8 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
glog.V(4).Infof("create volume %s", path) glog.V(4).Infof("create volume %s", path)
return &csi.CreateVolumeResponse{ return &csi.CreateVolumeResponse{
Volume: &csi.Volume{ Volume: &csi.Volume{
Id: volumeId, Id: volumeId,
CapacityBytes: req.GetCapacityRange().GetRequiredBytes(),
}, },
}, nil }, nil
} }
@ -78,14 +71,11 @@ func (cs *controllerServer) CreateVolume(ctx context.Context, req *csi.CreateVol
func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) { func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVolumeRequest) (*csi.DeleteVolumeResponse, error) {
// Check arguments // Check arguments
if req.GetVersion() == nil {
return nil, status.Error(codes.InvalidArgument, "Version missing in request")
}
if len(req.GetVolumeId()) == 0 { if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request") return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
} }
if err := cs.Driver.ValidateControllerServiceRequest(req.Version, csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil { if err := cs.Driver.ValidateControllerServiceRequest(csi.ControllerServiceCapability_RPC_CREATE_DELETE_VOLUME); err != nil {
glog.V(3).Infof("invalid delete volume req: %v", req) glog.V(3).Infof("invalid delete volume req: %v", req)
return nil, err return nil, err
} }
@ -100,9 +90,6 @@ func (cs *controllerServer) DeleteVolume(ctx context.Context, req *csi.DeleteVol
func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) { func (cs *controllerServer) ValidateVolumeCapabilities(ctx context.Context, req *csi.ValidateVolumeCapabilitiesRequest) (*csi.ValidateVolumeCapabilitiesResponse, error) {
// Check arguments // Check arguments
if req.GetVersion() == nil {
return nil, status.Error(codes.InvalidArgument, "Version missing in request")
}
if len(req.GetVolumeId()) == 0 { if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request") return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
} }

View File

@ -17,7 +17,7 @@ limitations under the License.
package hostpath package hostpath
import ( import (
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog" "github.com/golang/glog"
"github.com/kubernetes-csi/drivers/pkg/csi-common" "github.com/kubernetes-csi/drivers/pkg/csi-common"
@ -36,15 +36,9 @@ type hostPath struct {
var ( var (
hostPathDriver *hostPath hostPathDriver *hostPath
version = csi.Version{ vendorVersion = "0.2.0"
Minor: 2,
}
) )
func GetSupportedVersions() []*csi.Version {
return []*csi.Version{&version}
}
func GetHostPathDriver() *hostPath { func GetHostPathDriver() *hostPath {
return &hostPath{} return &hostPath{}
} }
@ -68,10 +62,10 @@ func NewNodeServer(d *csicommon.CSIDriver) *nodeServer {
} }
func (hp *hostPath) Run(driverName, nodeID, endpoint string) { func (hp *hostPath) Run(driverName, nodeID, endpoint string) {
glog.Infof("Driver: %v version: %v", driverName, GetVersionString(&version)) glog.Infof("Driver: %v ", driverName)
// Initialize default library driver // Initialize default library driver
hp.driver = csicommon.NewCSIDriver(driverName, &version, GetSupportedVersions(), nodeID) hp.driver = csicommon.NewCSIDriver(driverName, vendorVersion, nodeID)
if hp.driver == nil { if hp.driver == nil {
glog.Fatalln("Failed to initialize CSI Driver.") glog.Fatalln("Failed to initialize CSI Driver.")
} }

View File

@ -22,7 +22,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"golang.org/x/net/context" "golang.org/x/net/context"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
@ -37,9 +37,6 @@ type nodeServer struct {
func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) { func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublishVolumeRequest) (*csi.NodePublishVolumeResponse, error) {
// Check arguments // Check arguments
if req.GetVersion() == nil {
return nil, status.Error(codes.InvalidArgument, "Version missing in request")
}
if req.GetVolumeCapability() == nil { if req.GetVolumeCapability() == nil {
return nil, status.Error(codes.InvalidArgument, "Volume capability missing in request") return nil, status.Error(codes.InvalidArgument, "Volume capability missing in request")
} }
@ -98,15 +95,47 @@ func (ns *nodeServer) NodePublishVolume(ctx context.Context, req *csi.NodePublis
func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) { func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpublishVolumeRequest) (*csi.NodeUnpublishVolumeResponse, error) {
// Check arguments // Check arguments
if req.GetVersion() == nil {
return nil, status.Error(codes.InvalidArgument, "Version missing in request")
}
if len(req.GetVolumeId()) == 0 { if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request") return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
} }
if len(req.GetTargetPath()) == 0 { if len(req.GetTargetPath()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request") return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
} }
targetPath := req.GetTargetPath()
volumeID := req.GetVolumeId()
// Unmounting the image
err := mount.New("").Unmount(req.GetTargetPath())
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
glog.V(4).Infof("hostpath: volume %s/%s has been unmounted.", targetPath, volumeID)
return &csi.NodeUnpublishVolumeResponse{}, nil return &csi.NodeUnpublishVolumeResponse{}, nil
} }
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(req.GetStagingTargetPath()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
return &csi.NodeStageVolumeResponse{}, nil
}
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
// Check arguments
if len(req.GetVolumeId()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Volume ID missing in request")
}
if len(req.GetStagingTargetPath()) == 0 {
return nil, status.Error(codes.InvalidArgument, "Target path missing in request")
}
return &csi.NodeUnstageVolumeResponse{}, nil
}

View File

@ -17,7 +17,7 @@ limitations under the License.
package iscsi package iscsi
import ( import (
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog" "github.com/golang/glog"
"github.com/kubernetes-csi/drivers/pkg/csi-common" "github.com/kubernetes-csi/drivers/pkg/csi-common"
@ -39,23 +39,17 @@ const (
) )
var ( var (
version = csi.Version{ version = "0.2.0"
Minor: 1,
}
) )
func GetSupportedVersions() []*csi.Version {
return []*csi.Version{&version}
}
func NewDriver(nodeID, endpoint string) *driver { func NewDriver(nodeID, endpoint string) *driver {
glog.Infof("Driver: %v version: %v", driverName, csicommon.GetVersionString(&version)) glog.Infof("Driver: %v version: %v", driverName, version)
d := &driver{} d := &driver{}
d.endpoint = endpoint d.endpoint = endpoint
csiDriver := csicommon.NewCSIDriver(driverName, &version, GetSupportedVersions(), nodeID) csiDriver := csicommon.NewCSIDriver(driverName, version, nodeID)
csiDriver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER}) csiDriver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_SINGLE_NODE_WRITER})
d.csiDriver = csiDriver d.csiDriver = csiDriver

View File

@ -21,7 +21,7 @@ import (
"fmt" "fmt"
"strings" "strings"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/util/mount"
"k8s.io/kubernetes/pkg/volume/util" "k8s.io/kubernetes/pkg/volume/util"
) )

View File

@ -17,7 +17,7 @@ limitations under the License.
package iscsi package iscsi
import ( import (
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
@ -57,3 +57,11 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
return &csi.NodeUnpublishVolumeResponse{}, nil return &csi.NodeUnpublishVolumeResponse{}, nil
} }
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
return &csi.NodeUnstageVolumeResponse{}, nil
}
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
return &csi.NodeStageVolumeResponse{}, nil
}

View File

@ -17,7 +17,7 @@ limitations under the License.
package nfs package nfs
import ( import (
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"github.com/golang/glog" "github.com/golang/glog"
"github.com/kubernetes-csi/drivers/pkg/csi-common" "github.com/kubernetes-csi/drivers/pkg/csi-common"
@ -39,23 +39,17 @@ const (
) )
var ( var (
version = csi.Version{ version = "0.2.0"
Minor: 1,
}
) )
func GetSupportedVersions() []*csi.Version {
return []*csi.Version{&version}
}
func NewDriver(nodeID, endpoint string) *driver { func NewDriver(nodeID, endpoint string) *driver {
glog.Infof("Driver: %v version: %v", driverName, csicommon.GetVersionString(&version)) glog.Infof("Driver: %v version: %v", driverName, version)
d := &driver{} d := &driver{}
d.endpoint = endpoint d.endpoint = endpoint
csiDriver := csicommon.NewCSIDriver(driverName, &version, GetSupportedVersions(), nodeID) csiDriver := csicommon.NewCSIDriver(driverName, version, nodeID)
csiDriver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER}) csiDriver.AddVolumeCapabilityAccessModes([]csi.VolumeCapability_AccessMode_Mode{csi.VolumeCapability_AccessMode_MULTI_NODE_MULTI_WRITER})
d.csiDriver = csiDriver d.csiDriver = csiDriver

View File

@ -21,7 +21,7 @@ import (
"os" "os"
"strings" "strings"
"github.com/container-storage-interface/spec/lib/go/csi" "github.com/container-storage-interface/spec/lib/go/csi/v0"
"golang.org/x/net/context" "golang.org/x/net/context"
"google.golang.org/grpc/codes" "google.golang.org/grpc/codes"
"google.golang.org/grpc/status" "google.golang.org/grpc/status"
@ -99,3 +99,11 @@ func (ns *nodeServer) NodeUnpublishVolume(ctx context.Context, req *csi.NodeUnpu
return &csi.NodeUnpublishVolumeResponse{}, nil return &csi.NodeUnpublishVolumeResponse{}, nil
} }
func (ns *nodeServer) NodeUnstageVolume(ctx context.Context, req *csi.NodeUnstageVolumeRequest) (*csi.NodeUnstageVolumeResponse, error) {
return &csi.NodeUnstageVolumeResponse{}, nil
}
func (ns *nodeServer) NodeStageVolume(ctx context.Context, req *csi.NodeStageVolumeRequest) (*csi.NodeStageVolumeResponse, error) {
return &csi.NodeStageVolumeResponse{}, nil
}

View File

@ -198,7 +198,7 @@ func (a LoadConstant) Assemble() (RawInstruction, error) {
return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val) return assembleLoad(a.Dst, 4, opAddrModeImmediate, a.Val)
} }
// String returns the the instruction in assembler notation. // String returns the instruction in assembler notation.
func (a LoadConstant) String() string { func (a LoadConstant) String() string {
switch a.Dst { switch a.Dst {
case RegA: case RegA:
@ -224,7 +224,7 @@ func (a LoadScratch) Assemble() (RawInstruction, error) {
return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N)) return assembleLoad(a.Dst, 4, opAddrModeScratch, uint32(a.N))
} }
// String returns the the instruction in assembler notation. // String returns the instruction in assembler notation.
func (a LoadScratch) String() string { func (a LoadScratch) String() string {
switch a.Dst { switch a.Dst {
case RegA: case RegA:
@ -248,7 +248,7 @@ func (a LoadAbsolute) Assemble() (RawInstruction, error) {
return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off) return assembleLoad(RegA, a.Size, opAddrModeAbsolute, a.Off)
} }
// String returns the the instruction in assembler notation. // String returns the instruction in assembler notation.
func (a LoadAbsolute) String() string { func (a LoadAbsolute) String() string {
switch a.Size { switch a.Size {
case 1: // byte case 1: // byte
@ -277,7 +277,7 @@ func (a LoadIndirect) Assemble() (RawInstruction, error) {
return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off) return assembleLoad(RegA, a.Size, opAddrModeIndirect, a.Off)
} }
// String returns the the instruction in assembler notation. // String returns the instruction in assembler notation.
func (a LoadIndirect) String() string { func (a LoadIndirect) String() string {
switch a.Size { switch a.Size {
case 1: // byte case 1: // byte
@ -306,7 +306,7 @@ func (a LoadMemShift) Assemble() (RawInstruction, error) {
return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off) return assembleLoad(RegX, 1, opAddrModeMemShift, a.Off)
} }
// String returns the the instruction in assembler notation. // String returns the instruction in assembler notation.
func (a LoadMemShift) String() string { func (a LoadMemShift) String() string {
return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off) return fmt.Sprintf("ldx 4*([%d]&0xf)", a.Off)
} }
@ -325,7 +325,7 @@ func (a LoadExtension) Assemble() (RawInstruction, error) {
return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num)) return assembleLoad(RegA, 4, opAddrModeAbsolute, uint32(extOffset+a.Num))
} }
// String returns the the instruction in assembler notation. // String returns the instruction in assembler notation.
func (a LoadExtension) String() string { func (a LoadExtension) String() string {
switch a.Num { switch a.Num {
case ExtLen: case ExtLen:
@ -392,7 +392,7 @@ func (a StoreScratch) Assemble() (RawInstruction, error) {
}, nil }, nil
} }
// String returns the the instruction in assembler notation. // String returns the instruction in assembler notation.
func (a StoreScratch) String() string { func (a StoreScratch) String() string {
switch a.Src { switch a.Src {
case RegA: case RegA:
@ -418,7 +418,7 @@ func (a ALUOpConstant) Assemble() (RawInstruction, error) {
}, nil }, nil
} }
// String returns the the instruction in assembler notation. // String returns the instruction in assembler notation.
func (a ALUOpConstant) String() string { func (a ALUOpConstant) String() string {
switch a.Op { switch a.Op {
case ALUOpAdd: case ALUOpAdd:
@ -458,7 +458,7 @@ func (a ALUOpX) Assemble() (RawInstruction, error) {
}, nil }, nil
} }
// String returns the the instruction in assembler notation. // String returns the instruction in assembler notation.
func (a ALUOpX) String() string { func (a ALUOpX) String() string {
switch a.Op { switch a.Op {
case ALUOpAdd: case ALUOpAdd:
@ -496,7 +496,7 @@ func (a NegateA) Assemble() (RawInstruction, error) {
}, nil }, nil
} }
// String returns the the instruction in assembler notation. // String returns the instruction in assembler notation.
func (a NegateA) String() string { func (a NegateA) String() string {
return fmt.Sprintf("neg") return fmt.Sprintf("neg")
} }
@ -514,7 +514,7 @@ func (a Jump) Assemble() (RawInstruction, error) {
}, nil }, nil
} }
// String returns the the instruction in assembler notation. // String returns the instruction in assembler notation.
func (a Jump) String() string { func (a Jump) String() string {
return fmt.Sprintf("ja %d", a.Skip) return fmt.Sprintf("ja %d", a.Skip)
} }
@ -566,7 +566,7 @@ func (a JumpIf) Assemble() (RawInstruction, error) {
}, nil }, nil
} }
// String returns the the instruction in assembler notation. // String returns the instruction in assembler notation.
func (a JumpIf) String() string { func (a JumpIf) String() string {
switch a.Cond { switch a.Cond {
// K == A // K == A
@ -621,7 +621,7 @@ func (a RetA) Assemble() (RawInstruction, error) {
}, nil }, nil
} }
// String returns the the instruction in assembler notation. // String returns the instruction in assembler notation.
func (a RetA) String() string { func (a RetA) String() string {
return fmt.Sprintf("ret a") return fmt.Sprintf("ret a")
} }
@ -639,7 +639,7 @@ func (a RetConstant) Assemble() (RawInstruction, error) {
}, nil }, nil
} }
// String returns the the instruction in assembler notation. // String returns the instruction in assembler notation.
func (a RetConstant) String() string { func (a RetConstant) String() string {
return fmt.Sprintf("ret #%d", a.Val) return fmt.Sprintf("ret #%d", a.Val)
} }
@ -654,7 +654,7 @@ func (a TXA) Assemble() (RawInstruction, error) {
}, nil }, nil
} }
// String returns the the instruction in assembler notation. // String returns the instruction in assembler notation.
func (a TXA) String() string { func (a TXA) String() string {
return fmt.Sprintf("txa") return fmt.Sprintf("txa")
} }
@ -669,7 +669,7 @@ func (a TAX) Assemble() (RawInstruction, error) {
}, nil }, nil
} }
// String returns the the instruction in assembler notation. // String returns the instruction in assembler notation.
func (a TAX) String() string { func (a TAX) String() string {
return fmt.Sprintf("tax") return fmt.Sprintf("tax")
} }

View File

@ -37,20 +37,20 @@ func ExampleParser() {
}, },
Answers: []dnsmessage.Resource{ Answers: []dnsmessage.Resource{
{ {
dnsmessage.ResourceHeader{ Header: dnsmessage.ResourceHeader{
Name: mustNewName("foo.bar.example.com."), Name: mustNewName("foo.bar.example.com."),
Type: dnsmessage.TypeA, Type: dnsmessage.TypeA,
Class: dnsmessage.ClassINET, Class: dnsmessage.ClassINET,
}, },
&dnsmessage.AResource{[4]byte{127, 0, 0, 1}}, Body: &dnsmessage.AResource{A: [4]byte{127, 0, 0, 1}},
}, },
{ {
dnsmessage.ResourceHeader{ Header: dnsmessage.ResourceHeader{
Name: mustNewName("bar.example.com."), Name: mustNewName("bar.example.com."),
Type: dnsmessage.TypeA, Type: dnsmessage.TypeA,
Class: dnsmessage.ClassINET, Class: dnsmessage.ClassINET,
}, },
&dnsmessage.AResource{[4]byte{127, 0, 0, 2}}, Body: &dnsmessage.AResource{A: [4]byte{127, 0, 0, 2}},
}, },
}, },
} }

View File

@ -13,7 +13,7 @@ import (
"errors" "errors"
) )
// Packet formats // Message formats
// A Type is a type of DNS request and response. // A Type is a type of DNS request and response.
type Type uint16 type Type uint16
@ -273,25 +273,25 @@ type Resource struct {
// A ResourceBody is a DNS resource record minus the header. // A ResourceBody is a DNS resource record minus the header.
type ResourceBody interface { type ResourceBody interface {
// pack packs a Resource except for its header. // pack packs a Resource except for its header.
pack(msg []byte, compression map[string]int) ([]byte, error) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error)
// realType returns the actual type of the Resource. This is used to // realType returns the actual type of the Resource. This is used to
// fill in the header Type field. // fill in the header Type field.
realType() Type realType() Type
} }
func (r *Resource) pack(msg []byte, compression map[string]int) ([]byte, error) { func (r *Resource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
if r.Body == nil { if r.Body == nil {
return msg, errNilResouceBody return msg, errNilResouceBody
} }
oldMsg := msg oldMsg := msg
r.Header.Type = r.Body.realType() r.Header.Type = r.Body.realType()
msg, length, err := r.Header.pack(msg, compression) msg, length, err := r.Header.pack(msg, compression, compressionOff)
if err != nil { if err != nil {
return msg, &nestedError{"ResourceHeader", err} return msg, &nestedError{"ResourceHeader", err}
} }
preLen := len(msg) preLen := len(msg)
msg, err = r.Body.pack(msg, compression) msg, err = r.Body.pack(msg, compression, compressionOff)
if err != nil { if err != nil {
return msg, &nestedError{"content", err} return msg, &nestedError{"content", err}
} }
@ -436,7 +436,13 @@ func (p *Parser) Question() (Question, error) {
// AllQuestions parses all Questions. // AllQuestions parses all Questions.
func (p *Parser) AllQuestions() ([]Question, error) { func (p *Parser) AllQuestions() ([]Question, error) {
qs := make([]Question, 0, p.header.questions) // Multiple questions are valid according to the spec,
// but servers don't actually support them. There will
// be at most one question here.
//
// Do not pre-allocate based on info in p.header, since
// the data is untrusted.
qs := []Question{}
for { for {
q, err := p.Question() q, err := p.Question()
if err == ErrSectionDone { if err == ErrSectionDone {
@ -492,7 +498,16 @@ func (p *Parser) Answer() (Resource, error) {
// AllAnswers parses all Answer Resources. // AllAnswers parses all Answer Resources.
func (p *Parser) AllAnswers() ([]Resource, error) { func (p *Parser) AllAnswers() ([]Resource, error) {
as := make([]Resource, 0, p.header.answers) // The most common query is for A/AAAA, which usually returns
// a handful of IPs.
//
// Pre-allocate up to a certain limit, since p.header is
// untrusted data.
n := int(p.header.answers)
if n > 20 {
n = 20
}
as := make([]Resource, 0, n)
for { for {
a, err := p.Answer() a, err := p.Answer()
if err == ErrSectionDone { if err == ErrSectionDone {
@ -533,7 +548,16 @@ func (p *Parser) Authority() (Resource, error) {
// AllAuthorities parses all Authority Resources. // AllAuthorities parses all Authority Resources.
func (p *Parser) AllAuthorities() ([]Resource, error) { func (p *Parser) AllAuthorities() ([]Resource, error) {
as := make([]Resource, 0, p.header.authorities) // Authorities contains SOA in case of NXDOMAIN and friends,
// otherwise it is empty.
//
// Pre-allocate up to a certain limit, since p.header is
// untrusted data.
n := int(p.header.authorities)
if n > 10 {
n = 10
}
as := make([]Resource, 0, n)
for { for {
a, err := p.Authority() a, err := p.Authority()
if err == ErrSectionDone { if err == ErrSectionDone {
@ -574,7 +598,16 @@ func (p *Parser) Additional() (Resource, error) {
// AllAdditionals parses all Additional Resources. // AllAdditionals parses all Additional Resources.
func (p *Parser) AllAdditionals() ([]Resource, error) { func (p *Parser) AllAdditionals() ([]Resource, error) {
as := make([]Resource, 0, p.header.additionals) // Additionals usually contain OPT, and sometimes A/AAAA
// glue records.
//
// Pre-allocate up to a certain limit, since p.header is
// untrusted data.
n := int(p.header.additionals)
if n > 10 {
n = 10
}
as := make([]Resource, 0, n)
for { for {
a, err := p.Additional() a, err := p.Additional()
if err == ErrSectionDone { if err == ErrSectionDone {
@ -819,6 +852,7 @@ func (m *Message) AppendPack(b []byte) ([]byte, error) {
h.authorities = uint16(len(m.Authorities)) h.authorities = uint16(len(m.Authorities))
h.additionals = uint16(len(m.Additionals)) h.additionals = uint16(len(m.Additionals))
compressionOff := len(b)
msg := h.pack(b) msg := h.pack(b)
// RFC 1035 allows (but does not require) compression for packing. RFC // RFC 1035 allows (but does not require) compression for packing. RFC
@ -826,32 +860,32 @@ func (m *Message) AppendPack(b []byte) ([]byte, error) {
// unconditionally enabling it is fine. // unconditionally enabling it is fine.
// //
// DNS lookups are typically done over UDP, and RFC 1035 states that UDP // DNS lookups are typically done over UDP, and RFC 1035 states that UDP
// DNS packets can be a maximum of 512 bytes long. Without compression, // DNS messages can be a maximum of 512 bytes long. Without compression,
// many DNS response packets are over this limit, so enabling // many DNS response messages are over this limit, so enabling
// compression will help ensure compliance. // compression will help ensure compliance.
compression := map[string]int{} compression := map[string]int{}
for i := range m.Questions { for i := range m.Questions {
var err error var err error
if msg, err = m.Questions[i].pack(msg, compression); err != nil { if msg, err = m.Questions[i].pack(msg, compression, compressionOff); err != nil {
return nil, &nestedError{"packing Question", err} return nil, &nestedError{"packing Question", err}
} }
} }
for i := range m.Answers { for i := range m.Answers {
var err error var err error
if msg, err = m.Answers[i].pack(msg, compression); err != nil { if msg, err = m.Answers[i].pack(msg, compression, compressionOff); err != nil {
return nil, &nestedError{"packing Answer", err} return nil, &nestedError{"packing Answer", err}
} }
} }
for i := range m.Authorities { for i := range m.Authorities {
var err error var err error
if msg, err = m.Authorities[i].pack(msg, compression); err != nil { if msg, err = m.Authorities[i].pack(msg, compression, compressionOff); err != nil {
return nil, &nestedError{"packing Authority", err} return nil, &nestedError{"packing Authority", err}
} }
} }
for i := range m.Additionals { for i := range m.Additionals {
var err error var err error
if msg, err = m.Additionals[i].pack(msg, compression); err != nil { if msg, err = m.Additionals[i].pack(msg, compression, compressionOff); err != nil {
return nil, &nestedError{"packing Additional", err} return nil, &nestedError{"packing Additional", err}
} }
} }
@ -860,36 +894,69 @@ func (m *Message) AppendPack(b []byte) ([]byte, error) {
} }
// A Builder allows incrementally packing a DNS message. // A Builder allows incrementally packing a DNS message.
//
// Example usage:
// buf := make([]byte, 2, 514)
// b := NewBuilder(buf, Header{...})
// b.EnableCompression()
// // Optionally start a section and add things to that section.
// // Repeat adding sections as necessary.
// buf, err := b.Finish()
// // If err is nil, buf[2:] will contain the built bytes.
type Builder struct { type Builder struct {
msg []byte // msg is the storage for the message being built.
header header msg []byte
section section
// section keeps track of the current section being built.
section section
// header keeps track of what should go in the header when Finish is
// called.
header header
// start is the starting index of the bytes allocated in msg for header.
start int
// compression is a mapping from name suffixes to their starting index
// in msg.
compression map[string]int compression map[string]int
} }
// Start initializes the builder. // NewBuilder creates a new builder with compression disabled.
// //
// buf is optional (nil is fine), but if provided, Start takes ownership of buf. // Note: Most users will want to immediately enable compression with the
func (b *Builder) Start(buf []byte, h Header) { // EnableCompression method. See that method's comment for why you may or may
b.StartWithoutCompression(buf, h) // not want to enable compression.
b.compression = map[string]int{} //
// The DNS message is appended to the provided initial buffer buf (which may be
// nil) as it is built. The final message is returned by the (*Builder).Finish
// method, which may return the same underlying array if there was sufficient
// capacity in the slice.
func NewBuilder(buf []byte, h Header) Builder {
if buf == nil {
buf = make([]byte, 0, packStartingCap)
}
b := Builder{msg: buf, start: len(buf)}
b.header.id, b.header.bits = h.pack()
var hb [headerLen]byte
b.msg = append(b.msg, hb[:]...)
b.section = sectionHeader
return b
} }
// StartWithoutCompression initializes the builder with compression disabled. // EnableCompression enables compression in the Builder.
// //
// This avoids compression related allocations, but can result in larger message // Leaving compression disabled avoids compression related allocations, but can
// sizes. Be careful with this mode as it can cause messages to exceed the UDP // result in larger message sizes. Be careful with this mode as it can cause
// size limit. // messages to exceed the UDP size limit.
// //
// buf is optional (nil is fine), but if provided, Start takes ownership of buf. // According to RFC 1035, section 4.1.4, the use of compression is optional, but
func (b *Builder) StartWithoutCompression(buf []byte, h Header) { // all implementations must accept both compressed and uncompressed DNS
*b = Builder{msg: buf} // messages.
b.header.id, b.header.bits = h.pack() //
if cap(b.msg) < headerLen { // Compression should be enabled before any sections are added for best results.
b.msg = make([]byte, 0, packStartingCap) func (b *Builder) EnableCompression() {
} b.compression = map[string]int{}
b.msg = b.msg[:headerLen]
b.section = sectionHeader
} }
func (b *Builder) startCheck(s section) error { func (b *Builder) startCheck(s section) error {
@ -970,7 +1037,7 @@ func (b *Builder) Question(q Question) error {
if b.section > sectionQuestions { if b.section > sectionQuestions {
return ErrSectionDone return ErrSectionDone
} }
msg, err := q.pack(b.msg, b.compression) msg, err := q.pack(b.msg, b.compression, b.start)
if err != nil { if err != nil {
return err return err
} }
@ -997,12 +1064,12 @@ func (b *Builder) CNAMEResource(h ResourceHeader, r CNAMEResource) error {
return err return err
} }
h.Type = r.realType() h.Type = r.realType()
msg, length, err := h.pack(b.msg, b.compression) msg, length, err := h.pack(b.msg, b.compression, b.start)
if err != nil { if err != nil {
return &nestedError{"ResourceHeader", err} return &nestedError{"ResourceHeader", err}
} }
preLen := len(msg) preLen := len(msg)
if msg, err = r.pack(msg, b.compression); err != nil { if msg, err = r.pack(msg, b.compression, b.start); err != nil {
return &nestedError{"CNAMEResource body", err} return &nestedError{"CNAMEResource body", err}
} }
if err := h.fixLen(msg, length, preLen); err != nil { if err := h.fixLen(msg, length, preLen); err != nil {
@ -1021,12 +1088,12 @@ func (b *Builder) MXResource(h ResourceHeader, r MXResource) error {
return err return err
} }
h.Type = r.realType() h.Type = r.realType()
msg, length, err := h.pack(b.msg, b.compression) msg, length, err := h.pack(b.msg, b.compression, b.start)
if err != nil { if err != nil {
return &nestedError{"ResourceHeader", err} return &nestedError{"ResourceHeader", err}
} }
preLen := len(msg) preLen := len(msg)
if msg, err = r.pack(msg, b.compression); err != nil { if msg, err = r.pack(msg, b.compression, b.start); err != nil {
return &nestedError{"MXResource body", err} return &nestedError{"MXResource body", err}
} }
if err := h.fixLen(msg, length, preLen); err != nil { if err := h.fixLen(msg, length, preLen); err != nil {
@ -1045,12 +1112,12 @@ func (b *Builder) NSResource(h ResourceHeader, r NSResource) error {
return err return err
} }
h.Type = r.realType() h.Type = r.realType()
msg, length, err := h.pack(b.msg, b.compression) msg, length, err := h.pack(b.msg, b.compression, b.start)
if err != nil { if err != nil {
return &nestedError{"ResourceHeader", err} return &nestedError{"ResourceHeader", err}
} }
preLen := len(msg) preLen := len(msg)
if msg, err = r.pack(msg, b.compression); err != nil { if msg, err = r.pack(msg, b.compression, b.start); err != nil {
return &nestedError{"NSResource body", err} return &nestedError{"NSResource body", err}
} }
if err := h.fixLen(msg, length, preLen); err != nil { if err := h.fixLen(msg, length, preLen); err != nil {
@ -1069,12 +1136,12 @@ func (b *Builder) PTRResource(h ResourceHeader, r PTRResource) error {
return err return err
} }
h.Type = r.realType() h.Type = r.realType()
msg, length, err := h.pack(b.msg, b.compression) msg, length, err := h.pack(b.msg, b.compression, b.start)
if err != nil { if err != nil {
return &nestedError{"ResourceHeader", err} return &nestedError{"ResourceHeader", err}
} }
preLen := len(msg) preLen := len(msg)
if msg, err = r.pack(msg, b.compression); err != nil { if msg, err = r.pack(msg, b.compression, b.start); err != nil {
return &nestedError{"PTRResource body", err} return &nestedError{"PTRResource body", err}
} }
if err := h.fixLen(msg, length, preLen); err != nil { if err := h.fixLen(msg, length, preLen); err != nil {
@ -1093,12 +1160,12 @@ func (b *Builder) SOAResource(h ResourceHeader, r SOAResource) error {
return err return err
} }
h.Type = r.realType() h.Type = r.realType()
msg, length, err := h.pack(b.msg, b.compression) msg, length, err := h.pack(b.msg, b.compression, b.start)
if err != nil { if err != nil {
return &nestedError{"ResourceHeader", err} return &nestedError{"ResourceHeader", err}
} }
preLen := len(msg) preLen := len(msg)
if msg, err = r.pack(msg, b.compression); err != nil { if msg, err = r.pack(msg, b.compression, b.start); err != nil {
return &nestedError{"SOAResource body", err} return &nestedError{"SOAResource body", err}
} }
if err := h.fixLen(msg, length, preLen); err != nil { if err := h.fixLen(msg, length, preLen); err != nil {
@ -1117,12 +1184,12 @@ func (b *Builder) TXTResource(h ResourceHeader, r TXTResource) error {
return err return err
} }
h.Type = r.realType() h.Type = r.realType()
msg, length, err := h.pack(b.msg, b.compression) msg, length, err := h.pack(b.msg, b.compression, b.start)
if err != nil { if err != nil {
return &nestedError{"ResourceHeader", err} return &nestedError{"ResourceHeader", err}
} }
preLen := len(msg) preLen := len(msg)
if msg, err = r.pack(msg, b.compression); err != nil { if msg, err = r.pack(msg, b.compression, b.start); err != nil {
return &nestedError{"TXTResource body", err} return &nestedError{"TXTResource body", err}
} }
if err := h.fixLen(msg, length, preLen); err != nil { if err := h.fixLen(msg, length, preLen); err != nil {
@ -1141,12 +1208,12 @@ func (b *Builder) SRVResource(h ResourceHeader, r SRVResource) error {
return err return err
} }
h.Type = r.realType() h.Type = r.realType()
msg, length, err := h.pack(b.msg, b.compression) msg, length, err := h.pack(b.msg, b.compression, b.start)
if err != nil { if err != nil {
return &nestedError{"ResourceHeader", err} return &nestedError{"ResourceHeader", err}
} }
preLen := len(msg) preLen := len(msg)
if msg, err = r.pack(msg, b.compression); err != nil { if msg, err = r.pack(msg, b.compression, b.start); err != nil {
return &nestedError{"SRVResource body", err} return &nestedError{"SRVResource body", err}
} }
if err := h.fixLen(msg, length, preLen); err != nil { if err := h.fixLen(msg, length, preLen); err != nil {
@ -1165,12 +1232,12 @@ func (b *Builder) AResource(h ResourceHeader, r AResource) error {
return err return err
} }
h.Type = r.realType() h.Type = r.realType()
msg, length, err := h.pack(b.msg, b.compression) msg, length, err := h.pack(b.msg, b.compression, b.start)
if err != nil { if err != nil {
return &nestedError{"ResourceHeader", err} return &nestedError{"ResourceHeader", err}
} }
preLen := len(msg) preLen := len(msg)
if msg, err = r.pack(msg, b.compression); err != nil { if msg, err = r.pack(msg, b.compression, b.start); err != nil {
return &nestedError{"AResource body", err} return &nestedError{"AResource body", err}
} }
if err := h.fixLen(msg, length, preLen); err != nil { if err := h.fixLen(msg, length, preLen); err != nil {
@ -1189,12 +1256,12 @@ func (b *Builder) AAAAResource(h ResourceHeader, r AAAAResource) error {
return err return err
} }
h.Type = r.realType() h.Type = r.realType()
msg, length, err := h.pack(b.msg, b.compression) msg, length, err := h.pack(b.msg, b.compression, b.start)
if err != nil { if err != nil {
return &nestedError{"ResourceHeader", err} return &nestedError{"ResourceHeader", err}
} }
preLen := len(msg) preLen := len(msg)
if msg, err = r.pack(msg, b.compression); err != nil { if msg, err = r.pack(msg, b.compression, b.start); err != nil {
return &nestedError{"AAAAResource body", err} return &nestedError{"AAAAResource body", err}
} }
if err := h.fixLen(msg, length, preLen); err != nil { if err := h.fixLen(msg, length, preLen); err != nil {
@ -1207,13 +1274,14 @@ func (b *Builder) AAAAResource(h ResourceHeader, r AAAAResource) error {
return nil return nil
} }
// Finish ends message building and generates a binary packet. // Finish ends message building and generates a binary message.
func (b *Builder) Finish() ([]byte, error) { func (b *Builder) Finish() ([]byte, error) {
if b.section < sectionHeader { if b.section < sectionHeader {
return nil, ErrNotStarted return nil, ErrNotStarted
} }
b.section = sectionDone b.section = sectionDone
b.header.pack(b.msg[:0]) // Space for the header was allocated in NewBuilder.
b.header.pack(b.msg[b.start:b.start])
return b.msg, nil return b.msg, nil
} }
@ -1246,9 +1314,9 @@ type ResourceHeader struct {
// pack packs all of the fields in a ResourceHeader except for the length. The // pack packs all of the fields in a ResourceHeader except for the length. The
// length bytes are returned as a slice so they can be filled in after the rest // length bytes are returned as a slice so they can be filled in after the rest
// of the Resource has been packed. // of the Resource has been packed.
func (h *ResourceHeader) pack(oldMsg []byte, compression map[string]int) (msg []byte, length []byte, err error) { func (h *ResourceHeader) pack(oldMsg []byte, compression map[string]int, compressionOff int) (msg []byte, length []byte, err error) {
msg = oldMsg msg = oldMsg
if msg, err = h.Name.pack(msg, compression); err != nil { if msg, err = h.Name.pack(msg, compression, compressionOff); err != nil {
return oldMsg, nil, &nestedError{"Name", err} return oldMsg, nil, &nestedError{"Name", err}
} }
msg = packType(msg, h.Type) msg = packType(msg, h.Type)
@ -1473,7 +1541,7 @@ func (n Name) String() string {
// //
// The compression map will be updated with new domain suffixes. If compression // The compression map will be updated with new domain suffixes. If compression
// is nil, compression will not be used. // is nil, compression will not be used.
func (n *Name) pack(msg []byte, compression map[string]int) ([]byte, error) { func (n *Name) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
oldMsg := msg oldMsg := msg
// Add a trailing dot to canonicalize name. // Add a trailing dot to canonicalize name.
@ -1525,7 +1593,7 @@ func (n *Name) pack(msg []byte, compression map[string]int) ([]byte, error) {
// Miss. Add the suffix to the compression table if the // Miss. Add the suffix to the compression table if the
// offset can be stored in the available 14 bytes. // offset can be stored in the available 14 bytes.
if len(msg) <= int(^uint16(0)>>2) { if len(msg) <= int(^uint16(0)>>2) {
compression[string(n.Data[i:])] = len(msg) compression[string(n.Data[i:])] = len(msg) - compressionOff
} }
} }
} }
@ -1648,8 +1716,8 @@ type Question struct {
Class Class Class Class
} }
func (q *Question) pack(msg []byte, compression map[string]int) ([]byte, error) { func (q *Question) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
msg, err := q.Name.pack(msg, compression) msg, err := q.Name.pack(msg, compression, compressionOff)
if err != nil { if err != nil {
return msg, &nestedError{"Name", err} return msg, &nestedError{"Name", err}
} }
@ -1728,8 +1796,8 @@ func (r *CNAMEResource) realType() Type {
return TypeCNAME return TypeCNAME
} }
func (r *CNAMEResource) pack(msg []byte, compression map[string]int) ([]byte, error) { func (r *CNAMEResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
return r.CNAME.pack(msg, compression) return r.CNAME.pack(msg, compression, compressionOff)
} }
func unpackCNAMEResource(msg []byte, off int) (CNAMEResource, error) { func unpackCNAMEResource(msg []byte, off int) (CNAMEResource, error) {
@ -1750,10 +1818,10 @@ func (r *MXResource) realType() Type {
return TypeMX return TypeMX
} }
func (r *MXResource) pack(msg []byte, compression map[string]int) ([]byte, error) { func (r *MXResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
oldMsg := msg oldMsg := msg
msg = packUint16(msg, r.Pref) msg = packUint16(msg, r.Pref)
msg, err := r.MX.pack(msg, compression) msg, err := r.MX.pack(msg, compression, compressionOff)
if err != nil { if err != nil {
return oldMsg, &nestedError{"MXResource.MX", err} return oldMsg, &nestedError{"MXResource.MX", err}
} }
@ -1781,8 +1849,8 @@ func (r *NSResource) realType() Type {
return TypeNS return TypeNS
} }
func (r *NSResource) pack(msg []byte, compression map[string]int) ([]byte, error) { func (r *NSResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
return r.NS.pack(msg, compression) return r.NS.pack(msg, compression, compressionOff)
} }
func unpackNSResource(msg []byte, off int) (NSResource, error) { func unpackNSResource(msg []byte, off int) (NSResource, error) {
@ -1802,8 +1870,8 @@ func (r *PTRResource) realType() Type {
return TypePTR return TypePTR
} }
func (r *PTRResource) pack(msg []byte, compression map[string]int) ([]byte, error) { func (r *PTRResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
return r.PTR.pack(msg, compression) return r.PTR.pack(msg, compression, compressionOff)
} }
func unpackPTRResource(msg []byte, off int) (PTRResource, error) { func unpackPTRResource(msg []byte, off int) (PTRResource, error) {
@ -1833,13 +1901,13 @@ func (r *SOAResource) realType() Type {
return TypeSOA return TypeSOA
} }
func (r *SOAResource) pack(msg []byte, compression map[string]int) ([]byte, error) { func (r *SOAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
oldMsg := msg oldMsg := msg
msg, err := r.NS.pack(msg, compression) msg, err := r.NS.pack(msg, compression, compressionOff)
if err != nil { if err != nil {
return oldMsg, &nestedError{"SOAResource.NS", err} return oldMsg, &nestedError{"SOAResource.NS", err}
} }
msg, err = r.MBox.pack(msg, compression) msg, err = r.MBox.pack(msg, compression, compressionOff)
if err != nil { if err != nil {
return oldMsg, &nestedError{"SOAResource.MBox", err} return oldMsg, &nestedError{"SOAResource.MBox", err}
} }
@ -1892,7 +1960,7 @@ func (r *TXTResource) realType() Type {
return TypeTXT return TypeTXT
} }
func (r *TXTResource) pack(msg []byte, compression map[string]int) ([]byte, error) { func (r *TXTResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
return packText(msg, r.Txt), nil return packText(msg, r.Txt), nil
} }
@ -1926,12 +1994,12 @@ func (r *SRVResource) realType() Type {
return TypeSRV return TypeSRV
} }
func (r *SRVResource) pack(msg []byte, compression map[string]int) ([]byte, error) { func (r *SRVResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
oldMsg := msg oldMsg := msg
msg = packUint16(msg, r.Priority) msg = packUint16(msg, r.Priority)
msg = packUint16(msg, r.Weight) msg = packUint16(msg, r.Weight)
msg = packUint16(msg, r.Port) msg = packUint16(msg, r.Port)
msg, err := r.Target.pack(msg, nil) msg, err := r.Target.pack(msg, nil, compressionOff)
if err != nil { if err != nil {
return oldMsg, &nestedError{"SRVResource.Target", err} return oldMsg, &nestedError{"SRVResource.Target", err}
} }
@ -1967,7 +2035,7 @@ func (r *AResource) realType() Type {
return TypeA return TypeA
} }
func (r *AResource) pack(msg []byte, compression map[string]int) ([]byte, error) { func (r *AResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
return packBytes(msg, r.A[:]), nil return packBytes(msg, r.A[:]), nil
} }
@ -1988,7 +2056,7 @@ func (r *AAAAResource) realType() Type {
return TypeAAAA return TypeAAAA
} }
func (r *AAAAResource) pack(msg []byte, compression map[string]int) ([]byte, error) { func (r *AAAAResource) pack(msg []byte, compression map[string]int, compressionOff int) ([]byte, error) {
return packBytes(msg, r.AAAA[:]), nil return packBytes(msg, r.AAAA[:]), nil
} }

View File

@ -62,7 +62,7 @@ func TestQuestionPackUnpack(t *testing.T) {
Type: TypeA, Type: TypeA,
Class: ClassINET, Class: ClassINET,
} }
buf, err := want.pack(make([]byte, 1, 50), map[string]int{}) buf, err := want.pack(make([]byte, 1, 50), map[string]int{}, 1)
if err != nil { if err != nil {
t.Fatal("Packing failed:", err) t.Fatal("Packing failed:", err)
} }
@ -129,7 +129,7 @@ func TestNamePackUnpack(t *testing.T) {
for _, test := range tests { for _, test := range tests {
in := mustNewName(test.in) in := mustNewName(test.in)
want := mustNewName(test.want) want := mustNewName(test.want)
buf, err := in.pack(make([]byte, 0, 30), map[string]int{}) buf, err := in.pack(make([]byte, 0, 30), map[string]int{}, 0)
if err != test.err { if err != test.err {
t.Errorf("Packing of %q: got err = %v, want err = %v", test.in, err, test.err) t.Errorf("Packing of %q: got err = %v, want err = %v", test.in, err, test.err)
continue continue
@ -248,6 +248,40 @@ func TestDNSPackUnpack(t *testing.T) {
} }
} }
func TestDNSAppendPackUnpack(t *testing.T) {
wants := []Message{
{
Questions: []Question{
{
Name: mustNewName("."),
Type: TypeAAAA,
Class: ClassINET,
},
},
Answers: []Resource{},
Authorities: []Resource{},
Additionals: []Resource{},
},
largeTestMsg(),
}
for i, want := range wants {
b := make([]byte, 2, 514)
b, err := want.AppendPack(b)
if err != nil {
t.Fatalf("%d: packing failed: %v", i, err)
}
b = b[2:]
var got Message
err = got.Unpack(b)
if err != nil {
t.Fatalf("%d: unpacking failed: %v", i, err)
}
if !reflect.DeepEqual(got, want) {
t.Errorf("%d: got = %+v, want = %+v", i, &got, &want)
}
}
}
func TestSkipAll(t *testing.T) { func TestSkipAll(t *testing.T) {
msg := largeTestMsg() msg := largeTestMsg()
buf, err := msg.Pack() buf, err := msg.Pack()
@ -412,7 +446,7 @@ func TestVeryLongTxt(t *testing.T) {
}, },
&TXTResource{loremIpsum}, &TXTResource{loremIpsum},
} }
buf, err := want.pack(make([]byte, 0, 8000), map[string]int{}) buf, err := want.pack(make([]byte, 0, 8000), map[string]int{}, 0)
if err != nil { if err != nil {
t.Fatal("Packing failed:", err) t.Fatal("Packing failed:", err)
} }
@ -434,6 +468,26 @@ func TestVeryLongTxt(t *testing.T) {
} }
} }
func TestStartAppends(t *testing.T) {
buf := make([]byte, 2, 514)
wantBuf := []byte{4, 44}
copy(buf, wantBuf)
b := NewBuilder(buf, Header{})
b.EnableCompression()
buf, err := b.Finish()
if err != nil {
t.Fatal("Building failed:", err)
}
if got, want := len(buf), headerLen+2; got != want {
t.Errorf("Got len(buf} = %d, want = %d", got, want)
}
if string(buf[:2]) != string(wantBuf) {
t.Errorf("Original data not preserved, got = %v, want = %v", buf[:2], wantBuf)
}
}
func TestStartError(t *testing.T) { func TestStartError(t *testing.T) {
tests := []struct { tests := []struct {
name string name string
@ -514,8 +568,8 @@ func TestBuilder(t *testing.T) {
t.Fatal("Packing without builder:", err) t.Fatal("Packing without builder:", err)
} }
var b Builder b := NewBuilder(nil, msg.Header)
b.Start(nil, msg.Header) b.EnableCompression()
if err := b.StartQuestions(); err != nil { if err := b.StartQuestions(); err != nil {
t.Fatal("b.StartQuestions():", err) t.Fatal("b.StartQuestions():", err)
@ -653,9 +707,7 @@ func TestResourcePack(t *testing.T) {
} }
} }
func BenchmarkParsing(b *testing.B) { func benchmarkParsingSetup() ([]byte, error) {
b.ReportAllocs()
name := mustNewName("foo.bar.example.com.") name := mustNewName("foo.bar.example.com.")
msg := Message{ msg := Message{
Header: Header{Response: true, Authoritative: true}, Header: Header{Response: true, Authoritative: true},
@ -700,111 +752,148 @@ func BenchmarkParsing(b *testing.B) {
buf, err := msg.Pack() buf, err := msg.Pack()
if err != nil { if err != nil {
b.Fatal("msg.Pack():", err) return nil, fmt.Errorf("msg.Pack(): %v", err)
}
return buf, nil
}
func benchmarkParsing(tb testing.TB, buf []byte) {
var p Parser
if _, err := p.Start(buf); err != nil {
tb.Fatal("p.Start(buf):", err)
} }
for i := 0; i < b.N; i++ { for {
var p Parser _, err := p.Question()
if _, err := p.Start(buf); err != nil { if err == ErrSectionDone {
b.Fatal("p.Start(buf):", err) break
}
if err != nil {
tb.Fatal("p.Question():", err)
}
}
for {
h, err := p.AnswerHeader()
if err == ErrSectionDone {
break
}
if err != nil {
panic(err)
} }
for { switch h.Type {
_, err := p.Question() case TypeA:
if err == ErrSectionDone { if _, err := p.AResource(); err != nil {
break tb.Fatal("p.AResource():", err)
} }
if err != nil { case TypeAAAA:
b.Fatal("p.Question():", err) if _, err := p.AAAAResource(); err != nil {
tb.Fatal("p.AAAAResource():", err)
} }
} case TypeCNAME:
if _, err := p.CNAMEResource(); err != nil {
for { tb.Fatal("p.CNAMEResource():", err)
h, err := p.AnswerHeader()
if err == ErrSectionDone {
break
} }
if err != nil { case TypeNS:
panic(err) if _, err := p.NSResource(); err != nil {
} tb.Fatal("p.NSResource():", err)
switch h.Type {
case TypeA:
if _, err := p.AResource(); err != nil {
b.Fatal("p.AResource():", err)
}
case TypeAAAA:
if _, err := p.AAAAResource(); err != nil {
b.Fatal("p.AAAAResource():", err)
}
case TypeCNAME:
if _, err := p.CNAMEResource(); err != nil {
b.Fatal("p.CNAMEResource():", err)
}
case TypeNS:
if _, err := p.NSResource(); err != nil {
b.Fatal("p.NSResource():", err)
}
default:
b.Fatalf("unknown type: %T", h)
} }
default:
tb.Fatalf("unknown type: %T", h)
} }
} }
} }
func BenchmarkBuilding(b *testing.B) { func BenchmarkParsing(b *testing.B) {
b.ReportAllocs() buf, err := benchmarkParsingSetup()
if err != nil {
b.Fatal(err)
}
b.ReportAllocs()
for i := 0; i < b.N; i++ {
benchmarkParsing(b, buf)
}
}
func TestParsingAllocs(t *testing.T) {
buf, err := benchmarkParsingSetup()
if err != nil {
t.Fatal(err)
}
if allocs := testing.AllocsPerRun(100, func() { benchmarkParsing(t, buf) }); allocs > 0.5 {
t.Errorf("Allocations during parsing: got = %f, want ~0", allocs)
}
}
func benchmarkBuildingSetup() (Name, []byte) {
name := mustNewName("foo.bar.example.com.") name := mustNewName("foo.bar.example.com.")
buf := make([]byte, 0, packStartingCap) buf := make([]byte, 0, packStartingCap)
return name, buf
}
func benchmarkBuilding(tb testing.TB, name Name, buf []byte) {
bld := NewBuilder(buf, Header{Response: true, Authoritative: true})
if err := bld.StartQuestions(); err != nil {
tb.Fatal("bld.StartQuestions():", err)
}
q := Question{
Name: name,
Type: TypeA,
Class: ClassINET,
}
if err := bld.Question(q); err != nil {
tb.Fatalf("bld.Question(%+v): %v", q, err)
}
hdr := ResourceHeader{
Name: name,
Class: ClassINET,
}
if err := bld.StartAnswers(); err != nil {
tb.Fatal("bld.StartQuestions():", err)
}
ar := AResource{[4]byte{}}
if err := bld.AResource(hdr, ar); err != nil {
tb.Fatalf("bld.AResource(%+v, %+v): %v", hdr, ar, err)
}
aaar := AAAAResource{[16]byte{}}
if err := bld.AAAAResource(hdr, aaar); err != nil {
tb.Fatalf("bld.AAAAResource(%+v, %+v): %v", hdr, aaar, err)
}
cnr := CNAMEResource{name}
if err := bld.CNAMEResource(hdr, cnr); err != nil {
tb.Fatalf("bld.CNAMEResource(%+v, %+v): %v", hdr, cnr, err)
}
nsr := NSResource{name}
if err := bld.NSResource(hdr, nsr); err != nil {
tb.Fatalf("bld.NSResource(%+v, %+v): %v", hdr, nsr, err)
}
if _, err := bld.Finish(); err != nil {
tb.Fatal("bld.Finish():", err)
}
}
func BenchmarkBuilding(b *testing.B) {
name, buf := benchmarkBuildingSetup()
b.ReportAllocs()
for i := 0; i < b.N; i++ { for i := 0; i < b.N; i++ {
var bld Builder benchmarkBuilding(b, name, buf)
bld.StartWithoutCompression(buf, Header{Response: true, Authoritative: true}) }
}
if err := bld.StartQuestions(); err != nil { func TestBuildingAllocs(t *testing.T) {
b.Fatal("bld.StartQuestions():", err) name, buf := benchmarkBuildingSetup()
} if allocs := testing.AllocsPerRun(100, func() { benchmarkBuilding(t, name, buf) }); allocs > 0.5 {
q := Question{ t.Errorf("Allocations during building: got = %f, want ~0", allocs)
Name: name,
Type: TypeA,
Class: ClassINET,
}
if err := bld.Question(q); err != nil {
b.Fatalf("bld.Question(%+v): %v", q, err)
}
hdr := ResourceHeader{
Name: name,
Class: ClassINET,
}
if err := bld.StartAnswers(); err != nil {
b.Fatal("bld.StartQuestions():", err)
}
ar := AResource{[4]byte{}}
if err := bld.AResource(hdr, ar); err != nil {
b.Fatalf("bld.AResource(%+v, %+v): %v", hdr, ar, err)
}
aaar := AAAAResource{[16]byte{}}
if err := bld.AAAAResource(hdr, aaar); err != nil {
b.Fatalf("bld.AAAAResource(%+v, %+v): %v", hdr, aaar, err)
}
cnr := CNAMEResource{name}
if err := bld.CNAMEResource(hdr, cnr); err != nil {
b.Fatalf("bld.CNAMEResource(%+v, %+v): %v", hdr, cnr, err)
}
nsr := NSResource{name}
if err := bld.NSResource(hdr, nsr); err != nil {
b.Fatalf("bld.NSResource(%+v, %+v): %v", hdr, nsr, err)
}
if _, err := bld.Finish(); err != nil {
b.Fatal("bld.Finish():", err)
}
} }
} }

View File

@ -665,6 +665,7 @@ var eventHandlers = []string{
// extra are ad-hoc values not covered by any of the lists above. // extra are ad-hoc values not covered by any of the lists above.
var extra = []string{ var extra = []string{
"acronym",
"align", "align",
"annotation", "annotation",
"annotation-xml", "annotation-xml",

File diff suppressed because it is too large Load Diff

View File

@ -10,6 +10,7 @@ var testAtomList = []string{
"accept", "accept",
"accept-charset", "accept-charset",
"accesskey", "accesskey",
"acronym",
"action", "action",
"address", "address",
"align", "align",

View File

@ -4,7 +4,7 @@
package html package html
// Section 12.2.3.2 of the HTML5 specification says "The following elements // Section 12.2.4.2 of the HTML5 specification says "The following elements
// have varying levels of special parsing rules". // have varying levels of special parsing rules".
// https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements // https://html.spec.whatwg.org/multipage/syntax.html#the-stack-of-open-elements
var isSpecialElementMap = map[string]bool{ var isSpecialElementMap = map[string]bool{

View File

@ -67,7 +67,7 @@ func mathMLTextIntegrationPoint(n *Node) bool {
return false return false
} }
// Section 12.2.5.5. // Section 12.2.6.5.
var breakout = map[string]bool{ var breakout = map[string]bool{
"b": true, "b": true,
"big": true, "big": true,
@ -115,7 +115,7 @@ var breakout = map[string]bool{
"var": true, "var": true,
} }
// Section 12.2.5.5. // Section 12.2.6.5.
var svgTagNameAdjustments = map[string]string{ var svgTagNameAdjustments = map[string]string{
"altglyph": "altGlyph", "altglyph": "altGlyph",
"altglyphdef": "altGlyphDef", "altglyphdef": "altGlyphDef",
@ -155,7 +155,7 @@ var svgTagNameAdjustments = map[string]string{
"textpath": "textPath", "textpath": "textPath",
} }
// Section 12.2.5.1 // Section 12.2.6.1
var mathMLAttributeAdjustments = map[string]string{ var mathMLAttributeAdjustments = map[string]string{
"definitionurl": "definitionURL", "definitionurl": "definitionURL",
} }

View File

@ -21,9 +21,10 @@ const (
scopeMarkerNode scopeMarkerNode
) )
// Section 12.2.3.3 says "scope markers are inserted when entering applet // Section 12.2.4.3 says "The markers are inserted when entering applet,
// elements, buttons, object elements, marquees, table cells, and table // object, marquee, template, td, th, and caption elements, and are used
// captions, and are used to prevent formatting from 'leaking'". // to prevent formatting from "leaking" into applet, object, marquee,
// template, td, th, and caption elements".
var scopeMarker = Node{Type: scopeMarkerNode} var scopeMarker = Node{Type: scopeMarkerNode}
// A Node consists of a NodeType and some Data (tag name for element nodes, // A Node consists of a NodeType and some Data (tag name for element nodes,

View File

@ -25,12 +25,12 @@ type parser struct {
hasSelfClosingToken bool hasSelfClosingToken bool
// doc is the document root element. // doc is the document root element.
doc *Node doc *Node
// The stack of open elements (section 12.2.3.2) and active formatting // The stack of open elements (section 12.2.4.2) and active formatting
// elements (section 12.2.3.3). // elements (section 12.2.4.3).
oe, afe nodeStack oe, afe nodeStack
// Element pointers (section 12.2.3.4). // Element pointers (section 12.2.4.4).
head, form *Node head, form *Node
// Other parsing state flags (section 12.2.3.5). // Other parsing state flags (section 12.2.4.5).
scripting, framesetOK bool scripting, framesetOK bool
// im is the current insertion mode. // im is the current insertion mode.
im insertionMode im insertionMode
@ -38,7 +38,7 @@ type parser struct {
// or inTableText insertion mode. // or inTableText insertion mode.
originalIM insertionMode originalIM insertionMode
// fosterParenting is whether new elements should be inserted according to // fosterParenting is whether new elements should be inserted according to
// the foster parenting rules (section 12.2.5.3). // the foster parenting rules (section 12.2.6.1).
fosterParenting bool fosterParenting bool
// quirks is whether the parser is operating in "quirks mode." // quirks is whether the parser is operating in "quirks mode."
quirks bool quirks bool
@ -56,7 +56,7 @@ func (p *parser) top() *Node {
return p.doc return p.doc
} }
// Stop tags for use in popUntil. These come from section 12.2.3.2. // Stop tags for use in popUntil. These come from section 12.2.4.2.
var ( var (
defaultScopeStopTags = map[string][]a.Atom{ defaultScopeStopTags = map[string][]a.Atom{
"": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template}, "": {a.Applet, a.Caption, a.Html, a.Table, a.Td, a.Th, a.Marquee, a.Object, a.Template},
@ -79,7 +79,7 @@ const (
// popUntil pops the stack of open elements at the highest element whose tag // popUntil pops the stack of open elements at the highest element whose tag
// is in matchTags, provided there is no higher element in the scope's stop // is in matchTags, provided there is no higher element in the scope's stop
// tags (as defined in section 12.2.3.2). It returns whether or not there was // tags (as defined in section 12.2.4.2). It returns whether or not there was
// such an element. If there was not, popUntil leaves the stack unchanged. // such an element. If there was not, popUntil leaves the stack unchanged.
// //
// For example, the set of stop tags for table scope is: "html", "table". If // For example, the set of stop tags for table scope is: "html", "table". If
@ -234,7 +234,7 @@ func (p *parser) shouldFosterParent() bool {
} }
// fosterParent adds a child node according to the foster parenting rules. // fosterParent adds a child node according to the foster parenting rules.
// Section 12.2.5.3, "foster parenting". // Section 12.2.6.1, "foster parenting".
func (p *parser) fosterParent(n *Node) { func (p *parser) fosterParent(n *Node) {
var table, parent, prev *Node var table, parent, prev *Node
var i int var i int
@ -304,7 +304,7 @@ func (p *parser) addElement() {
}) })
} }
// Section 12.2.3.3. // Section 12.2.4.3.
func (p *parser) addFormattingElement() { func (p *parser) addFormattingElement() {
tagAtom, attr := p.tok.DataAtom, p.tok.Attr tagAtom, attr := p.tok.DataAtom, p.tok.Attr
p.addElement() p.addElement()
@ -351,7 +351,7 @@ findIdenticalElements:
p.afe = append(p.afe, p.top()) p.afe = append(p.afe, p.top())
} }
// Section 12.2.3.3. // Section 12.2.4.3.
func (p *parser) clearActiveFormattingElements() { func (p *parser) clearActiveFormattingElements() {
for { for {
n := p.afe.pop() n := p.afe.pop()
@ -361,7 +361,7 @@ func (p *parser) clearActiveFormattingElements() {
} }
} }
// Section 12.2.3.3. // Section 12.2.4.3.
func (p *parser) reconstructActiveFormattingElements() { func (p *parser) reconstructActiveFormattingElements() {
n := p.afe.top() n := p.afe.top()
if n == nil { if n == nil {
@ -390,12 +390,12 @@ func (p *parser) reconstructActiveFormattingElements() {
} }
} }
// Section 12.2.4. // Section 12.2.5.
func (p *parser) acknowledgeSelfClosingTag() { func (p *parser) acknowledgeSelfClosingTag() {
p.hasSelfClosingToken = false p.hasSelfClosingToken = false
} }
// An insertion mode (section 12.2.3.1) is the state transition function from // An insertion mode (section 12.2.4.1) is the state transition function from
// a particular state in the HTML5 parser's state machine. It updates the // a particular state in the HTML5 parser's state machine. It updates the
// parser's fields depending on parser.tok (where ErrorToken means EOF). // parser's fields depending on parser.tok (where ErrorToken means EOF).
// It returns whether the token was consumed. // It returns whether the token was consumed.
@ -403,7 +403,7 @@ type insertionMode func(*parser) bool
// setOriginalIM sets the insertion mode to return to after completing a text or // setOriginalIM sets the insertion mode to return to after completing a text or
// inTableText insertion mode. // inTableText insertion mode.
// Section 12.2.3.1, "using the rules for". // Section 12.2.4.1, "using the rules for".
func (p *parser) setOriginalIM() { func (p *parser) setOriginalIM() {
if p.originalIM != nil { if p.originalIM != nil {
panic("html: bad parser state: originalIM was set twice") panic("html: bad parser state: originalIM was set twice")
@ -411,7 +411,7 @@ func (p *parser) setOriginalIM() {
p.originalIM = p.im p.originalIM = p.im
} }
// Section 12.2.3.1, "reset the insertion mode". // Section 12.2.4.1, "reset the insertion mode".
func (p *parser) resetInsertionMode() { func (p *parser) resetInsertionMode() {
for i := len(p.oe) - 1; i >= 0; i-- { for i := len(p.oe) - 1; i >= 0; i-- {
n := p.oe[i] n := p.oe[i]
@ -452,7 +452,7 @@ func (p *parser) resetInsertionMode() {
const whitespace = " \t\r\n\f" const whitespace = " \t\r\n\f"
// Section 12.2.5.4.1. // Section 12.2.6.4.1.
func initialIM(p *parser) bool { func initialIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case TextToken: case TextToken:
@ -479,7 +479,7 @@ func initialIM(p *parser) bool {
return false return false
} }
// Section 12.2.5.4.2. // Section 12.2.6.4.2.
func beforeHTMLIM(p *parser) bool { func beforeHTMLIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case DoctypeToken: case DoctypeToken:
@ -517,7 +517,7 @@ func beforeHTMLIM(p *parser) bool {
return false return false
} }
// Section 12.2.5.4.3. // Section 12.2.6.4.3.
func beforeHeadIM(p *parser) bool { func beforeHeadIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case TextToken: case TextToken:
@ -560,7 +560,7 @@ func beforeHeadIM(p *parser) bool {
return false return false
} }
// Section 12.2.5.4.4. // Section 12.2.6.4.4.
func inHeadIM(p *parser) bool { func inHeadIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case TextToken: case TextToken:
@ -622,7 +622,7 @@ func inHeadIM(p *parser) bool {
return false return false
} }
// Section 12.2.5.4.6. // Section 12.2.6.4.6.
func afterHeadIM(p *parser) bool { func afterHeadIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case TextToken: case TextToken:
@ -697,7 +697,7 @@ func copyAttributes(dst *Node, src Token) {
} }
} }
// Section 12.2.5.4.7. // Section 12.2.6.4.7.
func inBodyIM(p *parser) bool { func inBodyIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case TextToken: case TextToken:
@ -1160,7 +1160,7 @@ func (p *parser) inBodyEndTagFormatting(tagAtom a.Atom) {
} }
// inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM. // inBodyEndTagOther performs the "any other end tag" algorithm for inBodyIM.
// "Any other end tag" handling from 12.2.5.5 The rules for parsing tokens in foreign content // "Any other end tag" handling from 12.2.6.5 The rules for parsing tokens in foreign content
// https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign // https://html.spec.whatwg.org/multipage/syntax.html#parsing-main-inforeign
func (p *parser) inBodyEndTagOther(tagAtom a.Atom) { func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
for i := len(p.oe) - 1; i >= 0; i-- { for i := len(p.oe) - 1; i >= 0; i-- {
@ -1174,7 +1174,7 @@ func (p *parser) inBodyEndTagOther(tagAtom a.Atom) {
} }
} }
// Section 12.2.5.4.8. // Section 12.2.6.4.8.
func textIM(p *parser) bool { func textIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case ErrorToken: case ErrorToken:
@ -1203,7 +1203,7 @@ func textIM(p *parser) bool {
return p.tok.Type == EndTagToken return p.tok.Type == EndTagToken
} }
// Section 12.2.5.4.9. // Section 12.2.6.4.9.
func inTableIM(p *parser) bool { func inTableIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case ErrorToken: case ErrorToken:
@ -1309,7 +1309,7 @@ func inTableIM(p *parser) bool {
return inBodyIM(p) return inBodyIM(p)
} }
// Section 12.2.5.4.11. // Section 12.2.6.4.11.
func inCaptionIM(p *parser) bool { func inCaptionIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case StartTagToken: case StartTagToken:
@ -1355,7 +1355,7 @@ func inCaptionIM(p *parser) bool {
return inBodyIM(p) return inBodyIM(p)
} }
// Section 12.2.5.4.12. // Section 12.2.6.4.12.
func inColumnGroupIM(p *parser) bool { func inColumnGroupIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case TextToken: case TextToken:
@ -1408,7 +1408,7 @@ func inColumnGroupIM(p *parser) bool {
return true return true
} }
// Section 12.2.5.4.13. // Section 12.2.6.4.13.
func inTableBodyIM(p *parser) bool { func inTableBodyIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case StartTagToken: case StartTagToken:
@ -1460,7 +1460,7 @@ func inTableBodyIM(p *parser) bool {
return inTableIM(p) return inTableIM(p)
} }
// Section 12.2.5.4.14. // Section 12.2.6.4.14.
func inRowIM(p *parser) bool { func inRowIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case StartTagToken: case StartTagToken:
@ -1511,7 +1511,7 @@ func inRowIM(p *parser) bool {
return inTableIM(p) return inTableIM(p)
} }
// Section 12.2.5.4.15. // Section 12.2.6.4.15.
func inCellIM(p *parser) bool { func inCellIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case StartTagToken: case StartTagToken:
@ -1560,7 +1560,7 @@ func inCellIM(p *parser) bool {
return inBodyIM(p) return inBodyIM(p)
} }
// Section 12.2.5.4.16. // Section 12.2.6.4.16.
func inSelectIM(p *parser) bool { func inSelectIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case ErrorToken: case ErrorToken:
@ -1632,7 +1632,7 @@ func inSelectIM(p *parser) bool {
return true return true
} }
// Section 12.2.5.4.17. // Section 12.2.6.4.17.
func inSelectInTableIM(p *parser) bool { func inSelectInTableIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case StartTagToken, EndTagToken: case StartTagToken, EndTagToken:
@ -1650,7 +1650,7 @@ func inSelectInTableIM(p *parser) bool {
return inSelectIM(p) return inSelectIM(p)
} }
// Section 12.2.5.4.18. // Section 12.2.6.4.19.
func afterBodyIM(p *parser) bool { func afterBodyIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case ErrorToken: case ErrorToken:
@ -1688,7 +1688,7 @@ func afterBodyIM(p *parser) bool {
return false return false
} }
// Section 12.2.5.4.19. // Section 12.2.6.4.20.
func inFramesetIM(p *parser) bool { func inFramesetIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case CommentToken: case CommentToken:
@ -1738,7 +1738,7 @@ func inFramesetIM(p *parser) bool {
return true return true
} }
// Section 12.2.5.4.20. // Section 12.2.6.4.21.
func afterFramesetIM(p *parser) bool { func afterFramesetIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case CommentToken: case CommentToken:
@ -1777,7 +1777,7 @@ func afterFramesetIM(p *parser) bool {
return true return true
} }
// Section 12.2.5.4.21. // Section 12.2.6.4.22.
func afterAfterBodyIM(p *parser) bool { func afterAfterBodyIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case ErrorToken: case ErrorToken:
@ -1806,7 +1806,7 @@ func afterAfterBodyIM(p *parser) bool {
return false return false
} }
// Section 12.2.5.4.22. // Section 12.2.6.4.23.
func afterAfterFramesetIM(p *parser) bool { func afterAfterFramesetIM(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case CommentToken: case CommentToken:
@ -1844,7 +1844,7 @@ func afterAfterFramesetIM(p *parser) bool {
const whitespaceOrNUL = whitespace + "\x00" const whitespaceOrNUL = whitespace + "\x00"
// Section 12.2.5.5. // Section 12.2.6.5
func parseForeignContent(p *parser) bool { func parseForeignContent(p *parser) bool {
switch p.tok.Type { switch p.tok.Type {
case TextToken: case TextToken:
@ -1924,7 +1924,7 @@ func parseForeignContent(p *parser) bool {
return true return true
} }
// Section 12.2.5. // Section 12.2.6.
func (p *parser) inForeignContent() bool { func (p *parser) inForeignContent() bool {
if len(p.oe) == 0 { if len(p.oe) == 0 {
return false return false

View File

@ -1161,8 +1161,8 @@ func (z *Tokenizer) TagAttr() (key, val []byte, moreAttr bool) {
return nil, nil, false return nil, nil, false
} }
// Token returns the next Token. The result's Data and Attr values remain valid // Token returns the current Token. The result's Data and Attr values remain
// after subsequent Next calls. // valid after subsequent Next calls.
func (z *Tokenizer) Token() Token { func (z *Tokenizer) Token() Token {
t := Token{Type: z.tt} t := Token{Type: z.tt}
switch z.tt { switch z.tt {

View File

@ -5,7 +5,7 @@
package http2 package http2
// A list of the possible cipher suite ids. Taken from // A list of the possible cipher suite ids. Taken from
// http://www.iana.org/assignments/tls-parameters/tls-parameters.txt // https://www.iana.org/assignments/tls-parameters/tls-parameters.txt
const ( const (
cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000

View File

@ -73,7 +73,7 @@ type noDialH2RoundTripper struct{ t *Transport }
func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
res, err := rt.t.RoundTrip(req) res, err := rt.t.RoundTrip(req)
if err == ErrNoCachedConn { if isNoCachedConnError(err) {
return nil, http.ErrSkipAltProtocol return nil, http.ErrSkipAltProtocol
} }
return res, err return res, err

View File

@ -3,3 +3,4 @@ h2demo.linux
client-id.dat client-id.dat
client-secret.dat client-secret.dat
token.dat token.dat
ca-certificates.crt

11
vendor/golang.org/x/net/http2/h2demo/Dockerfile generated vendored Normal file
View File

@ -0,0 +1,11 @@
# Copyright 2018 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
FROM scratch
LABEL maintainer "golang-dev@googlegroups.com"
COPY ca-certificates.crt /etc/ssl/certs/
COPY h2demo /
ENTRYPOINT ["/h2demo", "-prod"]

134
vendor/golang.org/x/net/http2/h2demo/Dockerfile.0 generated vendored Normal file
View File

@ -0,0 +1,134 @@
# Copyright 2018 The Go Authors. All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
FROM golang:1.9
LABEL maintainer "golang-dev@googlegroups.com"
ENV CGO_ENABLED=0
# BEGIN deps (run `make update-deps` to update)
# Repo cloud.google.com/go at 1d0c2da (2018-01-30)
ENV REV=1d0c2da40456a9b47f5376165f275424acc15c09
RUN go get -d cloud.google.com/go/compute/metadata `#and 6 other pkgs` &&\
(cd /go/src/cloud.google.com/go && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo github.com/golang/protobuf at 9255415 (2018-01-25)
ENV REV=925541529c1fa6821df4e44ce2723319eb2be768
RUN go get -d github.com/golang/protobuf/proto `#and 6 other pkgs` &&\
(cd /go/src/github.com/golang/protobuf && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo github.com/googleapis/gax-go at 317e000 (2017-09-15)
ENV REV=317e0006254c44a0ac427cc52a0e083ff0b9622f
RUN go get -d github.com/googleapis/gax-go &&\
(cd /go/src/github.com/googleapis/gax-go && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo go4.org at 034d17a (2017-05-25)
ENV REV=034d17a462f7b2dcd1a4a73553ec5357ff6e6c6e
RUN go get -d go4.org/syncutil/singleflight &&\
(cd /go/src/go4.org && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo golang.org/x/build at 8aa9ee0 (2018-02-01)
ENV REV=8aa9ee0e557fd49c14113e5ba106e13a5b455460
RUN go get -d golang.org/x/build/autocertcache &&\
(cd /go/src/golang.org/x/build && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo golang.org/x/crypto at 1875d0a (2018-01-27)
ENV REV=1875d0a70c90e57f11972aefd42276df65e895b9
RUN go get -d golang.org/x/crypto/acme `#and 2 other pkgs` &&\
(cd /go/src/golang.org/x/crypto && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo golang.org/x/oauth2 at 30785a2 (2018-01-04)
ENV REV=30785a2c434e431ef7c507b54617d6a951d5f2b4
RUN go get -d golang.org/x/oauth2 `#and 5 other pkgs` &&\
(cd /go/src/golang.org/x/oauth2 && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo golang.org/x/text at e19ae14 (2017-12-27)
ENV REV=e19ae1496984b1c655b8044a65c0300a3c878dd3
RUN go get -d golang.org/x/text/secure/bidirule `#and 4 other pkgs` &&\
(cd /go/src/golang.org/x/text && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo google.golang.org/api at 7d0e2d3 (2018-01-30)
ENV REV=7d0e2d350555821bef5a5b8aecf0d12cc1def633
RUN go get -d google.golang.org/api/gensupport `#and 9 other pkgs` &&\
(cd /go/src/google.golang.org/api && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo google.golang.org/genproto at 4eb30f4 (2018-01-25)
ENV REV=4eb30f4778eed4c258ba66527a0d4f9ec8a36c45
RUN go get -d google.golang.org/genproto/googleapis/api/annotations `#and 3 other pkgs` &&\
(cd /go/src/google.golang.org/genproto && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Repo google.golang.org/grpc at 0bd008f (2018-01-25)
ENV REV=0bd008f5fadb62d228f12b18d016709e8139a7af
RUN go get -d google.golang.org/grpc `#and 23 other pkgs` &&\
(cd /go/src/google.golang.org/grpc && (git cat-file -t $REV 2>/dev/null || git fetch -q origin $REV) && git reset --hard $REV)
# Optimization to speed up iterative development, not necessary for correctness:
RUN go install cloud.google.com/go/compute/metadata \
cloud.google.com/go/iam \
cloud.google.com/go/internal \
cloud.google.com/go/internal/optional \
cloud.google.com/go/internal/version \
cloud.google.com/go/storage \
github.com/golang/protobuf/proto \
github.com/golang/protobuf/protoc-gen-go/descriptor \
github.com/golang/protobuf/ptypes \
github.com/golang/protobuf/ptypes/any \
github.com/golang/protobuf/ptypes/duration \
github.com/golang/protobuf/ptypes/timestamp \
github.com/googleapis/gax-go \
go4.org/syncutil/singleflight \
golang.org/x/build/autocertcache \
golang.org/x/crypto/acme \
golang.org/x/crypto/acme/autocert \
golang.org/x/oauth2 \
golang.org/x/oauth2/google \
golang.org/x/oauth2/internal \
golang.org/x/oauth2/jws \
golang.org/x/oauth2/jwt \
golang.org/x/text/secure/bidirule \
golang.org/x/text/transform \
golang.org/x/text/unicode/bidi \
golang.org/x/text/unicode/norm \
google.golang.org/api/gensupport \
google.golang.org/api/googleapi \
google.golang.org/api/googleapi/internal/uritemplates \
google.golang.org/api/googleapi/transport \
google.golang.org/api/internal \
google.golang.org/api/iterator \
google.golang.org/api/option \
google.golang.org/api/storage/v1 \
google.golang.org/api/transport/http \
google.golang.org/genproto/googleapis/api/annotations \
google.golang.org/genproto/googleapis/iam/v1 \
google.golang.org/genproto/googleapis/rpc/status \
google.golang.org/grpc \
google.golang.org/grpc/balancer \
google.golang.org/grpc/balancer/base \
google.golang.org/grpc/balancer/roundrobin \
google.golang.org/grpc/codes \
google.golang.org/grpc/connectivity \
google.golang.org/grpc/credentials \
google.golang.org/grpc/encoding \
google.golang.org/grpc/encoding/proto \
google.golang.org/grpc/grpclb/grpc_lb_v1/messages \
google.golang.org/grpc/grpclog \
google.golang.org/grpc/internal \
google.golang.org/grpc/keepalive \
google.golang.org/grpc/metadata \
google.golang.org/grpc/naming \
google.golang.org/grpc/peer \
google.golang.org/grpc/resolver \
google.golang.org/grpc/resolver/dns \
google.golang.org/grpc/resolver/passthrough \
google.golang.org/grpc/stats \
google.golang.org/grpc/status \
google.golang.org/grpc/tap \
google.golang.org/grpc/transport
# END deps
COPY . /go/src/golang.org/x/net/
RUN go install -tags "h2demo netgo" -ldflags "-linkmode=external -extldflags '-static -pthread'" golang.org/x/net/http2/h2demo

View File

@ -1,8 +1,55 @@
h2demo.linux: h2demo.go # Copyright 2018 The Go Authors. All rights reserved.
GOOS=linux go build --tags=h2demo -o h2demo.linux . # Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
MUTABLE_VERSION ?= latest
VERSION ?= $(shell git rev-parse --short HEAD)
IMAGE_STAGING := gcr.io/go-dashboard-dev/h2demo
IMAGE_PROD := gcr.io/symbolic-datum-552/h2demo
DOCKER_IMAGE_build0=build0/h2demo:latest
DOCKER_CTR_build0=h2demo-build0
build0: *.go Dockerfile.0
docker build --force-rm -f Dockerfile.0 --tag=$(DOCKER_IMAGE_build0) ../..
h2demo: build0
docker create --name $(DOCKER_CTR_build0) $(DOCKER_IMAGE_build0)
docker cp $(DOCKER_CTR_build0):/go/bin/$@ $@
docker rm $(DOCKER_CTR_build0)
ca-certificates.crt:
docker create --name $(DOCKER_CTR_build0) $(DOCKER_IMAGE_build0)
docker cp $(DOCKER_CTR_build0):/etc/ssl/certs/$@ $@
docker rm $(DOCKER_CTR_build0)
update-deps:
go install golang.org/x/build/cmd/gitlock
gitlock --update=Dockerfile.0 --ignore=golang.org/x/net --tags=h2demo golang.org/x/net/http2/h2demo
docker-prod: Dockerfile h2demo ca-certificates.crt
docker build --force-rm --tag=$(IMAGE_PROD):$(VERSION) .
docker tag $(IMAGE_PROD):$(VERSION) $(IMAGE_PROD):$(MUTABLE_VERSION)
docker-staging: Dockerfile h2demo ca-certificates.crt
docker build --force-rm --tag=$(IMAGE_STAGING):$(VERSION) .
docker tag $(IMAGE_STAGING):$(VERSION) $(IMAGE_STAGING):$(MUTABLE_VERSION)
push-prod: docker-prod
gcloud docker -- push $(IMAGE_PROD):$(MUTABLE_VERSION)
gcloud docker -- push $(IMAGE_PROD):$(VERSION)
push-staging: docker-staging
gcloud docker -- push $(IMAGE_STAGING):$(MUTABLE_VERSION)
gcloud docker -- push $(IMAGE_STAGING):$(VERSION)
deploy-prod: push-prod
kubectl set image deployment/h2demo-deployment h2demo=$(IMAGE_PROD):$(VERSION)
deploy-staging: push-staging
kubectl set image deployment/h2demo-deployment h2demo=$(IMAGE_STAGING):$(VERSION)
.PHONY: clean
clean:
$(RM) h2demo
$(RM) ca-certificates.crt
FORCE: FORCE:
upload: FORCE
go install golang.org/x/build/cmd/upload
upload --verbose --osarch=linux-amd64 --tags=h2demo --file=go:golang.org/x/net/http2/h2demo --public http2-demo-server-tls/h2demo

View File

@ -0,0 +1,28 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: h2demo-deployment
spec:
replicas: 1
template:
metadata:
labels:
app: h2demo
annotations:
container.seccomp.security.alpha.kubernetes.io/h2demo: docker/default
container.apparmor.security.beta.kubernetes.io/h2demo: runtime/default
spec:
containers:
- name: h2demo
image: gcr.io/symbolic-datum-552/h2demo:latest
imagePullPolicy: Always
command: ["/h2demo", "-prod"]
ports:
- containerPort: 80
- containerPort: 443
resources:
requests:
cpu: "1"
memory: "1Gi"
limits:
memory: "2Gi"

View File

@ -8,6 +8,7 @@ package main
import ( import (
"bytes" "bytes"
"context"
"crypto/tls" "crypto/tls"
"flag" "flag"
"fmt" "fmt"
@ -19,7 +20,6 @@ import (
"log" "log"
"net" "net"
"net/http" "net/http"
"os"
"path" "path"
"regexp" "regexp"
"runtime" "runtime"
@ -28,7 +28,9 @@ import (
"sync" "sync"
"time" "time"
"cloud.google.com/go/storage"
"go4.org/syncutil/singleflight" "go4.org/syncutil/singleflight"
"golang.org/x/build/autocertcache"
"golang.org/x/crypto/acme/autocert" "golang.org/x/crypto/acme/autocert"
"golang.org/x/net/http2" "golang.org/x/net/http2"
) )
@ -426,19 +428,10 @@ func httpHost() string {
} }
} }
func serveProdTLS() error { func serveProdTLS(autocertManager *autocert.Manager) error {
const cacheDir = "/var/cache/autocert"
if err := os.MkdirAll(cacheDir, 0700); err != nil {
return err
}
m := autocert.Manager{
Cache: autocert.DirCache(cacheDir),
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist("http2.golang.org"),
}
srv := &http.Server{ srv := &http.Server{
TLSConfig: &tls.Config{ TLSConfig: &tls.Config{
GetCertificate: m.GetCertificate, GetCertificate: autocertManager.GetCertificate,
}, },
} }
http2.ConfigureServer(srv, &http2.Server{ http2.ConfigureServer(srv, &http2.Server{
@ -468,9 +461,21 @@ func (ln tcpKeepAliveListener) Accept() (c net.Conn, err error) {
} }
func serveProd() error { func serveProd() error {
log.Printf("running in production mode")
storageClient, err := storage.NewClient(context.Background())
if err != nil {
log.Fatalf("storage.NewClient: %v", err)
}
autocertManager := &autocert.Manager{
Prompt: autocert.AcceptTOS,
HostPolicy: autocert.HostWhitelist("http2.golang.org"),
Cache: autocertcache.NewGoogleCloudStorageCache(storageClient, "golang-h2demo-autocert"),
}
errc := make(chan error, 2) errc := make(chan error, 2)
go func() { errc <- http.ListenAndServe(":80", nil) }() go func() { errc <- http.ListenAndServe(":80", autocertManager.HTTPHandler(http.DefaultServeMux)) }()
go func() { errc <- serveProdTLS() }() go func() { errc <- serveProdTLS(autocertManager) }()
return <-errc return <-errc
} }

16
vendor/golang.org/x/net/http2/h2demo/service.yaml generated vendored Normal file
View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: h2demo
spec:
ports:
- port: 80
targetPort: 80
name: http
- port: 443
targetPort: 443
name: https
selector:
app: h2demo
type: LoadBalancer
loadBalancerIP: 130.211.116.44

View File

@ -206,7 +206,7 @@ func appendVarInt(dst []byte, n byte, i uint64) []byte {
} }
// appendHpackString appends s, as encoded in "String Literal" // appendHpackString appends s, as encoded in "String Literal"
// representation, to dst and returns the the extended buffer. // representation, to dst and returns the extended buffer.
// //
// s will be encoded in Huffman codes only when it produces strictly // s will be encoded in Huffman codes only when it produces strictly
// shorter byte string. // shorter byte string.

View File

@ -312,7 +312,7 @@ func mustUint31(v int32) uint32 {
} }
// bodyAllowedForStatus reports whether a given response status code // bodyAllowedForStatus reports whether a given response status code
// permits a body. See RFC 2616, section 4.4. // permits a body. See RFC 7230, section 3.3.
func bodyAllowedForStatus(status int) bool { func bodyAllowedForStatus(status int) bool {
switch { switch {
case status >= 100 && status <= 199: case status >= 100 && status <= 199:

View File

@ -406,7 +406,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
// addresses during development. // addresses during development.
// //
// TODO: optionally enforce? Or enforce at the time we receive // TODO: optionally enforce? Or enforce at the time we receive
// a new request, and verify the the ServerName matches the :authority? // a new request, and verify the ServerName matches the :authority?
// But that precludes proxy situations, perhaps. // But that precludes proxy situations, perhaps.
// //
// So for now, do nothing here again. // So for now, do nothing here again.
@ -2285,7 +2285,7 @@ func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) !=
func (rws *responseWriterState) declareTrailer(k string) { func (rws *responseWriterState) declareTrailer(k string) {
k = http.CanonicalHeaderKey(k) k = http.CanonicalHeaderKey(k)
if !ValidTrailerHeader(k) { if !ValidTrailerHeader(k) {
// Forbidden by RFC 2616 14.40. // Forbidden by RFC 7230, section 4.1.2.
rws.conn.logf("ignoring invalid trailer %q", k) rws.conn.logf("ignoring invalid trailer %q", k)
return return
} }
@ -2406,7 +2406,7 @@ const TrailerPrefix = "Trailer:"
// after the header has already been flushed. Because the Go // after the header has already been flushed. Because the Go
// ResponseWriter interface has no way to set Trailers (only the // ResponseWriter interface has no way to set Trailers (only the
// Header), and because we didn't want to expand the ResponseWriter // Header), and because we didn't want to expand the ResponseWriter
// interface, and because nobody used trailers, and because RFC 2616 // interface, and because nobody used trailers, and because RFC 7230
// says you SHOULD (but not must) predeclare any trailers in the // says you SHOULD (but not must) predeclare any trailers in the
// header, the official ResponseWriter rules said trailers in Go must // header, the official ResponseWriter rules said trailers in Go must
// be predeclared, and then we reuse the same ResponseWriter.Header() // be predeclared, and then we reuse the same ResponseWriter.Header()
@ -2790,7 +2790,7 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
} }
// foreachHeaderElement splits v according to the "#rule" construction // foreachHeaderElement splits v according to the "#rule" construction
// in RFC 2616 section 2.1 and calls fn for each non-empty element. // in RFC 7230 section 7 and calls fn for each non-empty element.
func foreachHeaderElement(v string, fn func(string)) { func foreachHeaderElement(v string, fn func(string)) {
v = textproto.TrimString(v) v = textproto.TrimString(v)
if v == "" { if v == "" {

View File

@ -2877,9 +2877,9 @@ func testServerWritesTrailers(t *testing.T, withFlush bool) {
w.Header().Set("Trailer:post-header-trailer2", "hi2") w.Header().Set("Trailer:post-header-trailer2", "hi2")
w.Header().Set("Trailer:Range", "invalid") w.Header().Set("Trailer:Range", "invalid")
w.Header().Set("Trailer:Foo\x01Bogus", "invalid") w.Header().Set("Trailer:Foo\x01Bogus", "invalid")
w.Header().Set("Transfer-Encoding", "should not be included; Forbidden by RFC 2616 14.40") w.Header().Set("Transfer-Encoding", "should not be included; Forbidden by RFC 7230 4.1.2")
w.Header().Set("Content-Length", "should not be included; Forbidden by RFC 2616 14.40") w.Header().Set("Content-Length", "should not be included; Forbidden by RFC 7230 4.1.2")
w.Header().Set("Trailer", "should not be included; Forbidden by RFC 2616 14.40") w.Header().Set("Trailer", "should not be included; Forbidden by RFC 7230 4.1.2")
return nil return nil
}, func(st *serverTester) { }, func(st *serverTester) {
getSlash(st) getSlash(st)
@ -2971,7 +2971,7 @@ func BenchmarkServerGets(b *testing.B) {
defer st.Close() defer st.Close()
st.greet() st.greet()
// Give the server quota to reply. (plus it has the the 64KB) // Give the server quota to reply. (plus it has the 64KB)
if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -3009,7 +3009,7 @@ func BenchmarkServerPosts(b *testing.B) {
defer st.Close() defer st.Close()
st.greet() st.greet()
// Give the server quota to reply. (plus it has the the 64KB) // Give the server quota to reply. (plus it has the 64KB)
if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -3316,7 +3316,7 @@ func BenchmarkServer_GetRequest(b *testing.B) {
defer st.Close() defer st.Close()
st.greet() st.greet()
// Give the server quota to reply. (plus it has the the 64KB) // Give the server quota to reply. (plus it has the 64KB)
if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
b.Fatal(err) b.Fatal(err)
} }
@ -3347,7 +3347,7 @@ func BenchmarkServer_PostRequest(b *testing.B) {
}) })
defer st.Close() defer st.Close()
st.greet() st.greet()
// Give the server quota to reply. (plus it has the the 64KB) // Give the server quota to reply. (plus it has the 64KB)
if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil { if err := st.fr.WriteWindowUpdate(0, uint32(b.N*len(msg))); err != nil {
b.Fatal(err) b.Fatal(err)
} }

View File

@ -306,7 +306,26 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) {
return return
} }
var ErrNoCachedConn = errors.New("http2: no cached connection was available") // noCachedConnError is the concrete type of ErrNoCachedConn, which
// needs to be detected by net/http regardless of whether it's its
// bundled version (in h2_bundle.go with a rewritten type name) or
// from a user's x/net/http2. As such, as it has a unique method name
// (IsHTTP2NoCachedConnError) that net/http sniffs for via func
// isNoCachedConnError.
type noCachedConnError struct{}
func (noCachedConnError) IsHTTP2NoCachedConnError() {}
func (noCachedConnError) Error() string { return "http2: no cached connection was available" }
// isNoCachedConnError reports whether err is of type noCachedConnError
// or its equivalent renamed type in net/http2's h2_bundle.go. Both types
// may coexist in the same running program.
func isNoCachedConnError(err error) bool {
_, ok := err.(interface{ IsHTTP2NoCachedConnError() })
return ok
}
var ErrNoCachedConn error = noCachedConnError{}
// RoundTripOpt are options for the Transport.RoundTripOpt method. // RoundTripOpt are options for the Transport.RoundTripOpt method.
type RoundTripOpt struct { type RoundTripOpt struct {

View File

@ -1693,7 +1693,7 @@ func TestTransportChecksResponseHeaderListSize(t *testing.T) {
ct.run() ct.run()
} }
// Test that the the Transport returns a typed error from Response.Body.Read calls // Test that the Transport returns a typed error from Response.Body.Read calls
// when the server sends an error. (here we use a panic, since that should generate // when the server sends an error. (here we use a panic, since that should generate
// a stream error, but others like cancel should be similar) // a stream error, but others like cancel should be similar)
func TestTransportBodyReadErrorType(t *testing.T) { func TestTransportBodyReadErrorType(t *testing.T) {

View File

@ -28,15 +28,15 @@ var registries = []struct {
parse func(io.Writer, io.Reader) error parse func(io.Writer, io.Reader) error
}{ }{
{ {
"http://www.iana.org/assignments/dscp-registry/dscp-registry.xml", "https://www.iana.org/assignments/dscp-registry/dscp-registry.xml",
parseDSCPRegistry, parseDSCPRegistry,
}, },
{ {
"http://www.iana.org/assignments/ipv4-tos-byte/ipv4-tos-byte.xml", "https://www.iana.org/assignments/ipv4-tos-byte/ipv4-tos-byte.xml",
parseTOSTCByte, parseTOSTCByte,
}, },
{ {
"http://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml", "https://www.iana.org/assignments/protocol-numbers/protocol-numbers.xml",
parseProtocolNumbers, parseProtocolNumbers,
}, },
} }

View File

@ -72,7 +72,7 @@ var registries = []struct {
parse func(io.Writer, io.Reader) error parse func(io.Writer, io.Reader) error
}{ }{
{ {
"http://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml", "https://www.iana.org/assignments/icmp-parameters/icmp-parameters.xml",
parseICMPv4Parameters, parseICMPv4Parameters,
}, },
} }

View File

@ -72,7 +72,7 @@ var registries = []struct {
parse func(io.Writer, io.Reader) error parse func(io.Writer, io.Reader) error
}{ }{
{ {
"http://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml", "https://www.iana.org/assignments/icmpv6-parameters/icmpv6-parameters.xml",
parseICMPv6Parameters, parseICMPv6Parameters,
}, },
} }

File diff suppressed because it is too large Load Diff

View File

@ -302,32 +302,78 @@ var rules = [...]string{
"bo", "bo",
"com.bo", "com.bo",
"edu.bo", "edu.bo",
"gov.bo",
"gob.bo", "gob.bo",
"int.bo", "int.bo",
"org.bo", "org.bo",
"net.bo", "net.bo",
"mil.bo", "mil.bo",
"tv.bo", "tv.bo",
"web.bo",
"academia.bo",
"agro.bo",
"arte.bo",
"blog.bo",
"bolivia.bo",
"ciencia.bo",
"cooperativa.bo",
"democracia.bo",
"deporte.bo",
"ecologia.bo",
"economia.bo",
"empresa.bo",
"indigena.bo",
"industria.bo",
"info.bo",
"medicina.bo",
"movimiento.bo",
"musica.bo",
"natural.bo",
"nombre.bo",
"noticias.bo",
"patria.bo",
"politica.bo",
"profesional.bo",
"plurinacional.bo",
"pueblo.bo",
"revista.bo",
"salud.bo",
"tecnologia.bo",
"tksat.bo",
"transporte.bo",
"wiki.bo",
"br", "br",
"9guacu.br",
"abc.br",
"adm.br", "adm.br",
"adv.br", "adv.br",
"agr.br", "agr.br",
"aju.br",
"am.br", "am.br",
"anani.br",
"aparecida.br",
"arq.br", "arq.br",
"art.br", "art.br",
"ato.br", "ato.br",
"b.br", "b.br",
"belem.br", "belem.br",
"bhz.br",
"bio.br", "bio.br",
"blog.br", "blog.br",
"bmd.br", "bmd.br",
"boavista.br",
"bsb.br",
"campinagrande.br",
"campinas.br",
"caxias.br",
"cim.br", "cim.br",
"cng.br", "cng.br",
"cnt.br", "cnt.br",
"com.br", "com.br",
"contagem.br",
"coop.br", "coop.br",
"cri.br", "cri.br",
"cuiaba.br",
"curitiba.br",
"def.br", "def.br",
"ecn.br", "ecn.br",
"eco.br", "eco.br",
@ -338,14 +384,18 @@ var rules = [...]string{
"etc.br", "etc.br",
"eti.br", "eti.br",
"far.br", "far.br",
"feira.br",
"flog.br", "flog.br",
"floripa.br", "floripa.br",
"fm.br", "fm.br",
"fnd.br", "fnd.br",
"fortal.br",
"fot.br", "fot.br",
"foz.br",
"fst.br", "fst.br",
"g12.br", "g12.br",
"ggf.br", "ggf.br",
"goiania.br",
"gov.br", "gov.br",
"ac.gov.br", "ac.gov.br",
"al.gov.br", "al.gov.br",
@ -374,42 +424,72 @@ var rules = [...]string{
"se.gov.br", "se.gov.br",
"sp.gov.br", "sp.gov.br",
"to.gov.br", "to.gov.br",
"gru.br",
"imb.br", "imb.br",
"ind.br", "ind.br",
"inf.br", "inf.br",
"jab.br",
"jampa.br", "jampa.br",
"jdf.br",
"joinville.br",
"jor.br", "jor.br",
"jus.br", "jus.br",
"leg.br", "leg.br",
"lel.br", "lel.br",
"londrina.br",
"macapa.br",
"maceio.br",
"manaus.br",
"maringa.br",
"mat.br", "mat.br",
"med.br", "med.br",
"mil.br", "mil.br",
"morena.br",
"mp.br", "mp.br",
"mus.br", "mus.br",
"natal.br",
"net.br", "net.br",
"niteroi.br",
"*.nom.br", "*.nom.br",
"not.br", "not.br",
"ntr.br", "ntr.br",
"odo.br", "odo.br",
"org.br", "org.br",
"osasco.br",
"palmas.br",
"poa.br", "poa.br",
"ppg.br", "ppg.br",
"pro.br", "pro.br",
"psc.br", "psc.br",
"psi.br", "psi.br",
"pvh.br",
"qsl.br", "qsl.br",
"radio.br", "radio.br",
"rec.br", "rec.br",
"recife.br", "recife.br",
"ribeirao.br",
"rio.br",
"riobranco.br",
"riopreto.br",
"salvador.br",
"sampa.br",
"santamaria.br",
"santoandre.br",
"saobernardo.br",
"saogonca.br",
"sjc.br",
"slg.br", "slg.br",
"slz.br",
"sorocaba.br",
"srv.br", "srv.br",
"taxi.br", "taxi.br",
"teo.br", "teo.br",
"the.br",
"tmp.br", "tmp.br",
"trd.br", "trd.br",
"tur.br", "tur.br",
"tv.br", "tv.br",
"udi.br",
"vet.br", "vet.br",
"vix.br", "vix.br",
"vlog.br", "vlog.br",
@ -3114,7 +3194,16 @@ var rules = [...]string{
"uenohara.yamanashi.jp", "uenohara.yamanashi.jp",
"yamanakako.yamanashi.jp", "yamanakako.yamanashi.jp",
"yamanashi.yamanashi.jp", "yamanashi.yamanashi.jp",
"*.ke", "ke",
"ac.ke",
"co.ke",
"go.ke",
"info.ke",
"me.ke",
"mobi.ke",
"ne.ke",
"or.ke",
"sc.ke",
"kg", "kg",
"org.kg", "org.kg",
"net.kg", "net.kg",
@ -6168,7 +6257,6 @@ var rules = [...]string{
"chat", "chat",
"cheap", "cheap",
"chintai", "chintai",
"chloe",
"christmas", "christmas",
"chrome", "chrome",
"chrysler", "chrysler",
@ -6459,7 +6547,6 @@ var rules = [...]string{
"house", "house",
"how", "how",
"hsbc", "hsbc",
"htc",
"hughes", "hughes",
"hyatt", "hyatt",
"hyundai", "hyundai",
@ -6611,8 +6698,6 @@ var rules = [...]string{
"maserati", "maserati",
"mattel", "mattel",
"mba", "mba",
"mcd",
"mcdonalds",
"mckinsey", "mckinsey",
"med", "med",
"media", "media",
@ -6643,7 +6728,6 @@ var rules = [...]string{
"monash", "monash",
"money", "money",
"monster", "monster",
"montblanc",
"mopar", "mopar",
"mormon", "mormon",
"mortgage", "mortgage",
@ -6721,7 +6805,6 @@ var rules = [...]string{
"ott", "ott",
"ovh", "ovh",
"page", "page",
"pamperedchef",
"panasonic", "panasonic",
"panerai", "panerai",
"paris", "paris",
@ -7195,10 +7278,13 @@ var rules = [...]string{
"cc.ua", "cc.ua",
"inf.ua", "inf.ua",
"ltd.ua", "ltd.ua",
"1password.ca",
"1password.com",
"1password.eu",
"beep.pl", "beep.pl",
"*.compute.estate", "*.compute.estate",
"*.alces.network", "*.alces.network",
"*.alwaysdata.net", "alwaysdata.net",
"cloudfront.net", "cloudfront.net",
"*.compute.amazonaws.com", "*.compute.amazonaws.com",
"*.compute-1.amazonaws.com", "*.compute-1.amazonaws.com",
@ -7215,6 +7301,7 @@ var rules = [...]string{
"eu-central-1.elasticbeanstalk.com", "eu-central-1.elasticbeanstalk.com",
"eu-west-1.elasticbeanstalk.com", "eu-west-1.elasticbeanstalk.com",
"eu-west-2.elasticbeanstalk.com", "eu-west-2.elasticbeanstalk.com",
"eu-west-3.elasticbeanstalk.com",
"sa-east-1.elasticbeanstalk.com", "sa-east-1.elasticbeanstalk.com",
"us-east-1.elasticbeanstalk.com", "us-east-1.elasticbeanstalk.com",
"us-east-2.elasticbeanstalk.com", "us-east-2.elasticbeanstalk.com",
@ -7233,6 +7320,7 @@ var rules = [...]string{
"s3-eu-central-1.amazonaws.com", "s3-eu-central-1.amazonaws.com",
"s3-eu-west-1.amazonaws.com", "s3-eu-west-1.amazonaws.com",
"s3-eu-west-2.amazonaws.com", "s3-eu-west-2.amazonaws.com",
"s3-eu-west-3.amazonaws.com",
"s3-external-1.amazonaws.com", "s3-external-1.amazonaws.com",
"s3-fips-us-gov-west-1.amazonaws.com", "s3-fips-us-gov-west-1.amazonaws.com",
"s3-sa-east-1.amazonaws.com", "s3-sa-east-1.amazonaws.com",
@ -7246,6 +7334,7 @@ var rules = [...]string{
"s3.ca-central-1.amazonaws.com", "s3.ca-central-1.amazonaws.com",
"s3.eu-central-1.amazonaws.com", "s3.eu-central-1.amazonaws.com",
"s3.eu-west-2.amazonaws.com", "s3.eu-west-2.amazonaws.com",
"s3.eu-west-3.amazonaws.com",
"s3.us-east-2.amazonaws.com", "s3.us-east-2.amazonaws.com",
"s3.dualstack.ap-northeast-1.amazonaws.com", "s3.dualstack.ap-northeast-1.amazonaws.com",
"s3.dualstack.ap-northeast-2.amazonaws.com", "s3.dualstack.ap-northeast-2.amazonaws.com",
@ -7256,6 +7345,7 @@ var rules = [...]string{
"s3.dualstack.eu-central-1.amazonaws.com", "s3.dualstack.eu-central-1.amazonaws.com",
"s3.dualstack.eu-west-1.amazonaws.com", "s3.dualstack.eu-west-1.amazonaws.com",
"s3.dualstack.eu-west-2.amazonaws.com", "s3.dualstack.eu-west-2.amazonaws.com",
"s3.dualstack.eu-west-3.amazonaws.com",
"s3.dualstack.sa-east-1.amazonaws.com", "s3.dualstack.sa-east-1.amazonaws.com",
"s3.dualstack.us-east-1.amazonaws.com", "s3.dualstack.us-east-1.amazonaws.com",
"s3.dualstack.us-east-2.amazonaws.com", "s3.dualstack.us-east-2.amazonaws.com",
@ -7272,6 +7362,7 @@ var rules = [...]string{
"s3-website.ca-central-1.amazonaws.com", "s3-website.ca-central-1.amazonaws.com",
"s3-website.eu-central-1.amazonaws.com", "s3-website.eu-central-1.amazonaws.com",
"s3-website.eu-west-2.amazonaws.com", "s3-website.eu-west-2.amazonaws.com",
"s3-website.eu-west-3.amazonaws.com",
"s3-website.us-east-2.amazonaws.com", "s3-website.us-east-2.amazonaws.com",
"t3l3p0rt.net", "t3l3p0rt.net",
"tele.amune.org", "tele.amune.org",
@ -7363,6 +7454,8 @@ var rules = [...]string{
"cloudns.us", "cloudns.us",
"co.nl", "co.nl",
"co.no", "co.no",
"webhosting.be",
"hosting-cluster.nl",
"dyn.cosidns.de", "dyn.cosidns.de",
"dynamisches-dns.de", "dynamisches-dns.de",
"dnsupdater.de", "dnsupdater.de",
@ -7863,6 +7956,8 @@ var rules = [...]string{
"fedorainfracloud.org", "fedorainfracloud.org",
"fedorapeople.org", "fedorapeople.org",
"cloud.fedoraproject.org", "cloud.fedoraproject.org",
"app.os.fedoraproject.org",
"app.os.stg.fedoraproject.org",
"filegear.me", "filegear.me",
"firebaseapp.com", "firebaseapp.com",
"flynnhub.com", "flynnhub.com",
@ -7873,7 +7968,6 @@ var rules = [...]string{
"fbxos.fr", "fbxos.fr",
"freebox-os.fr", "freebox-os.fr",
"freeboxos.fr", "freeboxos.fr",
"myfusion.cloud",
"*.futurecms.at", "*.futurecms.at",
"futurehosting.at", "futurehosting.at",
"futuremailing.at", "futuremailing.at",
@ -8049,6 +8143,7 @@ var rules = [...]string{
"netlify.com", "netlify.com",
"4u.com", "4u.com",
"ngrok.io", "ngrok.io",
"nh-serv.co.uk",
"nfshost.com", "nfshost.com",
"nsupdate.info", "nsupdate.info",
"nerdpol.ovh", "nerdpol.ovh",
@ -8214,6 +8309,8 @@ var rules = [...]string{
"rackmaze.com", "rackmaze.com",
"rackmaze.net", "rackmaze.net",
"rhcloud.com", "rhcloud.com",
"resindevice.io",
"devices.resinstaging.io",
"hzc.io", "hzc.io",
"wellbeingzone.eu", "wellbeingzone.eu",
"ptplus.fit", "ptplus.fit",
@ -8221,6 +8318,7 @@ var rules = [...]string{
"sandcats.io", "sandcats.io",
"logoip.de", "logoip.de",
"logoip.com", "logoip.com",
"scrysec.com",
"firewall-gateway.com", "firewall-gateway.com",
"firewall-gateway.de", "firewall-gateway.de",
"my-gateway.de", "my-gateway.de",
@ -8231,6 +8329,7 @@ var rules = [...]string{
"my-firewall.org", "my-firewall.org",
"myfirewall.org", "myfirewall.org",
"spdns.org", "spdns.org",
"*.s5y.io",
"*.sensiosite.cloud", "*.sensiosite.cloud",
"biz.ua", "biz.ua",
"co.ua", "co.ua",
@ -8591,7 +8690,6 @@ var nodeLabels = [...]string{
"chat", "chat",
"cheap", "cheap",
"chintai", "chintai",
"chloe",
"christmas", "christmas",
"chrome", "chrome",
"chrysler", "chrysler",
@ -8942,7 +9040,6 @@ var nodeLabels = [...]string{
"hr", "hr",
"hsbc", "hsbc",
"ht", "ht",
"htc",
"hu", "hu",
"hughes", "hughes",
"hyatt", "hyatt",
@ -9136,8 +9233,6 @@ var nodeLabels = [...]string{
"mattel", "mattel",
"mba", "mba",
"mc", "mc",
"mcd",
"mcdonalds",
"mckinsey", "mckinsey",
"md", "md",
"me", "me",
@ -9179,7 +9274,6 @@ var nodeLabels = [...]string{
"monash", "monash",
"money", "money",
"monster", "monster",
"montblanc",
"mopar", "mopar",
"mormon", "mormon",
"mortgage", "mortgage",
@ -9287,7 +9381,6 @@ var nodeLabels = [...]string{
"ovh", "ovh",
"pa", "pa",
"page", "page",
"pamperedchef",
"panasonic", "panasonic",
"panerai", "panerai",
"paris", "paris",
@ -10138,6 +10231,7 @@ var nodeLabels = [...]string{
"ac", "ac",
"blogspot", "blogspot",
"transurl", "transurl",
"webhosting",
"gov", "gov",
"0", "0",
"1", "1",
@ -10208,33 +10302,79 @@ var nodeLabels = [...]string{
"gov", "gov",
"net", "net",
"org", "org",
"academia",
"agro",
"arte",
"blog",
"bolivia",
"ciencia",
"com", "com",
"cooperativa",
"democracia",
"deporte",
"ecologia",
"economia",
"edu", "edu",
"empresa",
"gob", "gob",
"gov", "indigena",
"industria",
"info",
"int", "int",
"medicina",
"mil", "mil",
"movimiento",
"musica",
"natural",
"net", "net",
"nombre",
"noticias",
"org", "org",
"patria",
"plurinacional",
"politica",
"profesional",
"pueblo",
"revista",
"salud",
"tecnologia",
"tksat",
"transporte",
"tv", "tv",
"web",
"wiki",
"9guacu",
"abc",
"adm", "adm",
"adv", "adv",
"agr", "agr",
"aju",
"am", "am",
"anani",
"aparecida",
"arq", "arq",
"art", "art",
"ato", "ato",
"b", "b",
"belem", "belem",
"bhz",
"bio", "bio",
"blog", "blog",
"bmd", "bmd",
"boavista",
"bsb",
"campinagrande",
"campinas",
"caxias",
"cim", "cim",
"cng", "cng",
"cnt", "cnt",
"com", "com",
"contagem",
"coop", "coop",
"cri", "cri",
"cuiaba",
"curitiba",
"def", "def",
"ecn", "ecn",
"eco", "eco",
@ -10245,51 +10385,85 @@ var nodeLabels = [...]string{
"etc", "etc",
"eti", "eti",
"far", "far",
"feira",
"flog", "flog",
"floripa", "floripa",
"fm", "fm",
"fnd", "fnd",
"fortal",
"fot", "fot",
"foz",
"fst", "fst",
"g12", "g12",
"ggf", "ggf",
"goiania",
"gov", "gov",
"gru",
"imb", "imb",
"ind", "ind",
"inf", "inf",
"jab",
"jampa", "jampa",
"jdf",
"joinville",
"jor", "jor",
"jus", "jus",
"leg", "leg",
"lel", "lel",
"londrina",
"macapa",
"maceio",
"manaus",
"maringa",
"mat", "mat",
"med", "med",
"mil", "mil",
"morena",
"mp", "mp",
"mus", "mus",
"natal",
"net", "net",
"niteroi",
"nom", "nom",
"not", "not",
"ntr", "ntr",
"odo", "odo",
"org", "org",
"osasco",
"palmas",
"poa", "poa",
"ppg", "ppg",
"pro", "pro",
"psc", "psc",
"psi", "psi",
"pvh",
"qsl", "qsl",
"radio", "radio",
"rec", "rec",
"recife", "recife",
"ribeirao",
"rio",
"riobranco",
"riopreto",
"salvador",
"sampa",
"santamaria",
"santoandre",
"saobernardo",
"saogonca",
"sjc",
"slg", "slg",
"slz",
"sorocaba",
"srv", "srv",
"taxi", "taxi",
"teo", "teo",
"the",
"tmp", "tmp",
"trd", "trd",
"tur", "tur",
"tv", "tv",
"udi",
"vet", "vet",
"vix", "vix",
"vlog", "vlog",
@ -10376,6 +10550,7 @@ var nodeLabels = [...]string{
"nym", "nym",
"org", "org",
"za", "za",
"1password",
"ab", "ab",
"awdev", "awdev",
"bc", "bc",
@ -10434,7 +10609,6 @@ var nodeLabels = [...]string{
"mil", "mil",
"nom", "nom",
"magentosite", "magentosite",
"myfusion",
"sensiosite", "sensiosite",
"statics", "statics",
"trafficplex", "trafficplex",
@ -10512,6 +10686,7 @@ var nodeLabels = [...]string{
"blogspot", "blogspot",
"0emm", "0emm",
"1kapp", "1kapp",
"1password",
"3utilities", "3utilities",
"4u", "4u",
"africa", "africa",
@ -10759,6 +10934,7 @@ var nodeLabels = [...]string{
"ru", "ru",
"sa", "sa",
"saves-the-whales", "saves-the-whales",
"scrysec",
"se", "se",
"securitytactics", "securitytactics",
"selfip", "selfip",
@ -10812,6 +10988,7 @@ var nodeLabels = [...]string{
"eu-central-1", "eu-central-1",
"eu-west-1", "eu-west-1",
"eu-west-2", "eu-west-2",
"eu-west-3",
"s3", "s3",
"s3-ap-northeast-1", "s3-ap-northeast-1",
"s3-ap-northeast-2", "s3-ap-northeast-2",
@ -10822,6 +10999,7 @@ var nodeLabels = [...]string{
"s3-eu-central-1", "s3-eu-central-1",
"s3-eu-west-1", "s3-eu-west-1",
"s3-eu-west-2", "s3-eu-west-2",
"s3-eu-west-3",
"s3-external-1", "s3-external-1",
"s3-fips-us-gov-west-1", "s3-fips-us-gov-west-1",
"s3-sa-east-1", "s3-sa-east-1",
@ -10870,6 +11048,10 @@ var nodeLabels = [...]string{
"s3", "s3",
"dualstack", "dualstack",
"s3", "s3",
"s3-website",
"s3",
"dualstack",
"s3",
"dualstack", "dualstack",
"s3", "s3",
"dualstack", "dualstack",
@ -10887,6 +11069,7 @@ var nodeLabels = [...]string{
"eu-central-1", "eu-central-1",
"eu-west-1", "eu-west-1",
"eu-west-2", "eu-west-2",
"eu-west-3",
"sa-east-1", "sa-east-1",
"us-east-1", "us-east-1",
"us-east-2", "us-east-2",
@ -11077,6 +11260,7 @@ var nodeLabels = [...]string{
"name", "name",
"net", "net",
"org", "org",
"1password",
"barsy", "barsy",
"cloudns", "cloudns",
"diskstation", "diskstation",
@ -11360,6 +11544,9 @@ var nodeLabels = [...]string{
"nodum", "nodum",
"pantheonsite", "pantheonsite",
"protonet", "protonet",
"resindevice",
"resinstaging",
"s5y",
"sandcats", "sandcats",
"shiftedit", "shiftedit",
"spacekit", "spacekit",
@ -11370,6 +11557,7 @@ var nodeLabels = [...]string{
"customer", "customer",
"apps", "apps",
"stage", "stage",
"devices",
"dev", "dev",
"disrec", "disrec",
"prod", "prod",
@ -13572,7 +13760,15 @@ var nodeLabels = [...]string{
"yamanakako", "yamanakako",
"yamanashi", "yamanashi",
"city", "city",
"ac",
"co", "co",
"go",
"info",
"me",
"mobi",
"ne",
"or",
"sc",
"blogspot", "blogspot",
"com", "com",
"edu", "edu",
@ -14590,6 +14786,7 @@ var nodeLabels = [...]string{
"cistron", "cistron",
"co", "co",
"demon", "demon",
"hosting-cluster",
"transurl", "transurl",
"virtueeldomein", "virtueeldomein",
"aa", "aa",
@ -15560,6 +15757,11 @@ var nodeLabels = [...]string{
"uk", "uk",
"us", "us",
"cloud", "cloud",
"os",
"stg",
"app",
"os",
"app",
"nerdpol", "nerdpol",
"abo", "abo",
"ac", "ac",
@ -16393,6 +16595,7 @@ var nodeLabels = [...]string{
"police", "police",
"sch", "sch",
"blogspot", "blogspot",
"nh-serv",
"no-ip", "no-ip",
"wellbeingzone", "wellbeingzone",
"homeoffice", "homeoffice",

25
vendor/golang.org/x/sys/plan9/asm_plan9_arm.s generated vendored Normal file
View File

@ -0,0 +1,25 @@
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
#include "textflag.h"
// System call support for plan9 on arm
// Just jump to package syscall's implementation for all these functions.
// The runtime may know about them.
TEXT ·Syscall(SB),NOSPLIT,$0-32
JMP syscall·Syscall(SB)
TEXT ·Syscall6(SB),NOSPLIT,$0-44
JMP syscall·Syscall6(SB)
TEXT ·RawSyscall(SB),NOSPLIT,$0-28
JMP syscall·RawSyscall(SB)
TEXT ·RawSyscall6(SB),NOSPLIT,$0-40
JMP syscall·RawSyscall6(SB)
TEXT ·seek(SB),NOSPLIT,$0-36
JMP syscall·exit(SB)

View File

@ -25,3 +25,7 @@ func Clearenv() {
func Environ() []string { func Environ() []string {
return syscall.Environ() return syscall.Environ()
} }
func Unsetenv(key string) error {
return syscall.Unsetenv(key)
}

View File

@ -1,14 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build go1.4
package plan9
import "syscall"
func Unsetenv(key string) error {
// This was added in Go 1.4.
return syscall.Unsetenv(key)
}

284
vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go generated vendored Normal file
View File

@ -0,0 +1,284 @@
// mksyscall.pl -l32 -plan9 -tags plan9,arm syscall_plan9.go
// MACHINE GENERATED BY THE COMMAND ABOVE; DO NOT EDIT
// +build plan9,arm
package plan9
import "unsafe"
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func fd2path(fd int, buf []byte) (err error) {
var _p0 unsafe.Pointer
if len(buf) > 0 {
_p0 = unsafe.Pointer(&buf[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_FD2PATH, uintptr(fd), uintptr(_p0), uintptr(len(buf)))
if int32(r0) == -1 {
err = e1
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func pipe(p *[2]int32) (err error) {
r0, _, e1 := Syscall(SYS_PIPE, uintptr(unsafe.Pointer(p)), 0, 0)
if int32(r0) == -1 {
err = e1
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func await(s []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(s) > 0 {
_p0 = unsafe.Pointer(&s[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_AWAIT, uintptr(_p0), uintptr(len(s)), 0)
n = int(r0)
if int32(r0) == -1 {
err = e1
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func open(path string, mode int) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
r0, _, e1 := Syscall(SYS_OPEN, uintptr(unsafe.Pointer(_p0)), uintptr(mode), 0)
fd = int(r0)
if int32(r0) == -1 {
err = e1
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func create(path string, mode int, perm uint32) (fd int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
r0, _, e1 := Syscall(SYS_CREATE, uintptr(unsafe.Pointer(_p0)), uintptr(mode), uintptr(perm))
fd = int(r0)
if int32(r0) == -1 {
err = e1
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func remove(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
r0, _, e1 := Syscall(SYS_REMOVE, uintptr(unsafe.Pointer(_p0)), 0, 0)
if int32(r0) == -1 {
err = e1
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func stat(path string, edir []byte) (n int, err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(edir) > 0 {
_p1 = unsafe.Pointer(&edir[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_STAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir)))
n = int(r0)
if int32(r0) == -1 {
err = e1
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func bind(name string, old string, flag int) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(name)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(old)
if err != nil {
return
}
r0, _, e1 := Syscall(SYS_BIND, uintptr(unsafe.Pointer(_p0)), uintptr(unsafe.Pointer(_p1)), uintptr(flag))
if int32(r0) == -1 {
err = e1
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func mount(fd int, afd int, old string, flag int, aname string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(old)
if err != nil {
return
}
var _p1 *byte
_p1, err = BytePtrFromString(aname)
if err != nil {
return
}
r0, _, e1 := Syscall6(SYS_MOUNT, uintptr(fd), uintptr(afd), uintptr(unsafe.Pointer(_p0)), uintptr(flag), uintptr(unsafe.Pointer(_p1)), 0)
if int32(r0) == -1 {
err = e1
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func wstat(path string, edir []byte) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
var _p1 unsafe.Pointer
if len(edir) > 0 {
_p1 = unsafe.Pointer(&edir[0])
} else {
_p1 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_WSTAT, uintptr(unsafe.Pointer(_p0)), uintptr(_p1), uintptr(len(edir)))
if int32(r0) == -1 {
err = e1
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func chdir(path string) (err error) {
var _p0 *byte
_p0, err = BytePtrFromString(path)
if err != nil {
return
}
r0, _, e1 := Syscall(SYS_CHDIR, uintptr(unsafe.Pointer(_p0)), 0, 0)
if int32(r0) == -1 {
err = e1
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Dup(oldfd int, newfd int) (fd int, err error) {
r0, _, e1 := Syscall(SYS_DUP, uintptr(oldfd), uintptr(newfd), 0)
fd = int(r0)
if int32(r0) == -1 {
err = e1
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pread(fd int, p []byte, offset int64) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_PREAD, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
n = int(r0)
if int32(r0) == -1 {
err = e1
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Pwrite(fd int, p []byte, offset int64) (n int, err error) {
var _p0 unsafe.Pointer
if len(p) > 0 {
_p0 = unsafe.Pointer(&p[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall6(SYS_PWRITE, uintptr(fd), uintptr(_p0), uintptr(len(p)), uintptr(offset), uintptr(offset>>32), 0)
n = int(r0)
if int32(r0) == -1 {
err = e1
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Close(fd int) (err error) {
r0, _, e1 := Syscall(SYS_CLOSE, uintptr(fd), 0, 0)
if int32(r0) == -1 {
err = e1
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fstat(fd int, edir []byte) (n int, err error) {
var _p0 unsafe.Pointer
if len(edir) > 0 {
_p0 = unsafe.Pointer(&edir[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_FSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir)))
n = int(r0)
if int32(r0) == -1 {
err = e1
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func Fwstat(fd int, edir []byte) (err error) {
var _p0 unsafe.Pointer
if len(edir) > 0 {
_p0 = unsafe.Pointer(&edir[0])
} else {
_p0 = unsafe.Pointer(&_zero)
}
r0, _, e1 := Syscall(SYS_FWSTAT, uintptr(fd), uintptr(_p0), uintptr(len(edir)))
if int32(r0) == -1 {
err = e1
}
return
}

View File

@ -16,7 +16,7 @@ const cpuSetSize = _CPU_SETSIZE / _NCPUBITS
type CPUSet [cpuSetSize]cpuMask type CPUSet [cpuSetSize]cpuMask
func schedAffinity(trap uintptr, pid int, set *CPUSet) error { func schedAffinity(trap uintptr, pid int, set *CPUSet) error {
_, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(set)), uintptr(unsafe.Pointer(set))) _, _, e := RawSyscall(trap, uintptr(pid), uintptr(unsafe.Sizeof(*set)), uintptr(unsafe.Pointer(set)))
if e != 0 { if e != 0 {
return errnoErr(e) return errnoErr(e)
} }

View File

@ -35,7 +35,6 @@ TEXT ·SyscallNoError(SB),NOSPLIT,$0-24
BL runtime·exitsyscall(SB) BL runtime·exitsyscall(SB)
RET RET
TEXT ·RawSyscall(SB),NOSPLIT,$0-28 TEXT ·RawSyscall(SB),NOSPLIT,$0-28
B syscall·RawSyscall(SB) B syscall·RawSyscall(SB)
@ -53,5 +52,5 @@ TEXT ·RawSyscallNoError(SB),NOSPLIT,$0-24
MOVW R0, r2+20(FP) MOVW R0, r2+20(FP)
RET RET
TEXT ·seek(SB),NOSPLIT,$0-32 TEXT ·seek(SB),NOSPLIT,$0-28
B syscall·seek(SB) B syscall·seek(SB)

View File

@ -6,97 +6,12 @@
package unix package unix
import "unsafe" import "syscall"
// readInt returns the size-bytes unsigned integer in native byte order at offset off.
func readInt(b []byte, off, size uintptr) (u uint64, ok bool) {
if len(b) < int(off+size) {
return 0, false
}
if isBigEndian {
return readIntBE(b[off:], size), true
}
return readIntLE(b[off:], size), true
}
func readIntBE(b []byte, size uintptr) uint64 {
switch size {
case 1:
return uint64(b[0])
case 2:
_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[1]) | uint64(b[0])<<8
case 4:
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[3]) | uint64(b[2])<<8 | uint64(b[1])<<16 | uint64(b[0])<<24
case 8:
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[7]) | uint64(b[6])<<8 | uint64(b[5])<<16 | uint64(b[4])<<24 |
uint64(b[3])<<32 | uint64(b[2])<<40 | uint64(b[1])<<48 | uint64(b[0])<<56
default:
panic("syscall: readInt with unsupported size")
}
}
func readIntLE(b []byte, size uintptr) uint64 {
switch size {
case 1:
return uint64(b[0])
case 2:
_ = b[1] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8
case 4:
_ = b[3] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24
case 8:
_ = b[7] // bounds check hint to compiler; see golang.org/issue/14808
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
default:
panic("syscall: readInt with unsupported size")
}
}
// ParseDirent parses up to max directory entries in buf, // ParseDirent parses up to max directory entries in buf,
// appending the names to names. It returns the number of // appending the names to names. It returns the number of
// bytes consumed from buf, the number of entries added // bytes consumed from buf, the number of entries added
// to names, and the new names slice. // to names, and the new names slice.
func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) { func ParseDirent(buf []byte, max int, names []string) (consumed int, count int, newnames []string) {
origlen := len(buf) return syscall.ParseDirent(buf, max, names)
count = 0
for max != 0 && len(buf) > 0 {
reclen, ok := direntReclen(buf)
if !ok || reclen > uint64(len(buf)) {
return origlen, count, names
}
rec := buf[:reclen]
buf = buf[reclen:]
ino, ok := direntIno(rec)
if !ok {
break
}
if ino == 0 { // File absent in directory.
continue
}
const namoff = uint64(unsafe.Offsetof(Dirent{}.Name))
namlen, ok := direntNamlen(rec)
if !ok || namoff+namlen > uint64(len(rec)) {
break
}
name := rec[namoff : namoff+namlen]
for i, c := range name {
if c == 0 {
name = name[:i]
break
}
}
// Check for useless names before allocating a string.
if string(name) == "." || string(name) == ".." {
continue
}
max--
count++
names = append(names, string(name))
}
return origlen - len(buf), count, names
} }

View File

@ -11,9 +11,19 @@ import "syscall"
// We can't use the gc-syntax .s files for gccgo. On the plus side // We can't use the gc-syntax .s files for gccgo. On the plus side
// much of the functionality can be written directly in Go. // much of the functionality can be written directly in Go.
//extern gccgoRealSyscallNoError
func realSyscallNoError(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r uintptr)
//extern gccgoRealSyscall //extern gccgoRealSyscall
func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr) func realSyscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r, errno uintptr)
func SyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
syscall.Entersyscall()
r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
syscall.Exitsyscall()
return r, 0
}
func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { func Syscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
syscall.Entersyscall() syscall.Entersyscall()
r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0) r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
@ -35,6 +45,11 @@ func Syscall9(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr,
return r, 0, syscall.Errno(errno) return r, 0, syscall.Errno(errno)
} }
func RawSyscallNoError(trap, a1, a2, a3 uintptr) (r1, r2 uintptr) {
r := realSyscallNoError(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
return r, 0
}
func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) { func RawSyscall(trap, a1, a2, a3 uintptr) (r1, r2 uintptr, err syscall.Errno) {
r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0) r, errno := realSyscall(trap, a1, a2, a3, 0, 0, 0, 0, 0, 0)
return r, 0, syscall.Errno(errno) return r, 0, syscall.Errno(errno)

View File

@ -31,6 +31,12 @@ gccgoRealSyscall(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintp
return r; return r;
} }
uintptr_t
gccgoRealSyscallNoError(uintptr_t trap, uintptr_t a1, uintptr_t a2, uintptr_t a3, uintptr_t a4, uintptr_t a5, uintptr_t a6, uintptr_t a7, uintptr_t a8, uintptr_t a9)
{
return syscall(trap, a1, a2, a3, a4, a5, a6, a7, a8, a9);
}
// Define the use function in C so that it is not inlined. // Define the use function in C so that it is not inlined.
extern void use(void *) __asm__ (GOSYM_PREFIX GOPKGPATH ".use") __attribute__((noinline)); extern void use(void *) __asm__ (GOSYM_PREFIX GOPKGPATH ".use") __attribute__((noinline));

View File

@ -1,7 +1,4 @@
FROM ubuntu:16.04 FROM ubuntu:17.10
# Use the most recent ubuntu sources
RUN echo 'deb http://en.archive.ubuntu.com/ubuntu/ artful main universe' >> /etc/apt/sources.list
# Dependencies to get the git sources and go binaries # Dependencies to get the git sources and go binaries
RUN apt-get update && apt-get install -y \ RUN apt-get update && apt-get install -y \
@ -12,15 +9,15 @@ RUN apt-get update && apt-get install -y \
# Get the git sources. If not cached, this takes O(5 minutes). # Get the git sources. If not cached, this takes O(5 minutes).
WORKDIR /git WORKDIR /git
RUN git config --global advice.detachedHead false RUN git config --global advice.detachedHead false
# Linux Kernel: Released 03 Sep 2017 # Linux Kernel: Released 28 Jan 2018
RUN git clone --branch v4.13 --depth 1 https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux RUN git clone --branch v4.15 --depth 1 https://kernel.googlesource.com/pub/scm/linux/kernel/git/torvalds/linux
# GNU C library: Released 02 Aug 2017 (we should try to get a secure way to clone this) # GNU C library: Released 01 Feb 2018 (we should try to get a secure way to clone this)
RUN git clone --branch glibc-2.26 --depth 1 git://sourceware.org/git/glibc.git RUN git clone --branch glibc-2.27 --depth 1 git://sourceware.org/git/glibc.git
# Get Go 1.9.2 # Get Go 1.10
ENV GOLANG_VERSION 1.9.2 ENV GOLANG_VERSION 1.10
ENV GOLANG_DOWNLOAD_URL https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz ENV GOLANG_DOWNLOAD_URL https://golang.org/dl/go$GOLANG_VERSION.linux-amd64.tar.gz
ENV GOLANG_DOWNLOAD_SHA256 de874549d9a8d8d8062be05808509c09a88a248e77ec14eb77453530829ac02b ENV GOLANG_DOWNLOAD_SHA256 b5a64335f1490277b585832d1f6c7f8c6c11206cba5cd3f771dcb87b98ad1a33
RUN curl -fsSL "$GOLANG_DOWNLOAD_URL" -o golang.tar.gz \ RUN curl -fsSL "$GOLANG_DOWNLOAD_URL" -o golang.tar.gz \
&& echo "$GOLANG_DOWNLOAD_SHA256 golang.tar.gz" | sha256sum -c - \ && echo "$GOLANG_DOWNLOAD_SHA256 golang.tar.gz" | sha256sum -c - \
@ -31,7 +28,7 @@ ENV PATH /usr/local/go/bin:$PATH
# Linux and Glibc build dependencies # Linux and Glibc build dependencies
RUN apt-get update && apt-get install -y \ RUN apt-get update && apt-get install -y \
gawk make python \ bison gawk make python \
gcc gcc-multilib \ gcc gcc-multilib \
gettext texinfo \ gettext texinfo \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*

Some files were not shown because too many files have changed in this diff Show More