mirror of
https://github.com/ceph/ceph-csi.git
synced 2025-01-19 03:09:30 +00:00
Merge pull request #68 from ceph/devel
Sync rhs/ceph-csi:devel with ceph/ceph-csi:devel
This commit is contained in:
commit
a8ea28f185
40
.mergify.yml
40
.mergify.yml
@ -175,6 +175,46 @@ pull_request_rules:
|
|||||||
name: default
|
name: default
|
||||||
dismiss_reviews: {}
|
dismiss_reviews: {}
|
||||||
delete_head_branch: {}
|
delete_head_branch: {}
|
||||||
|
- name: backport patches to release-v3.5 branch
|
||||||
|
conditions:
|
||||||
|
- base=devel
|
||||||
|
- label=backport-to-release-v3.5
|
||||||
|
actions:
|
||||||
|
backport:
|
||||||
|
branches:
|
||||||
|
- release-v3.5
|
||||||
|
# automerge backports if CI successfully ran
|
||||||
|
- name: automerge backport release-v3.5
|
||||||
|
conditions:
|
||||||
|
- author=mergify[bot]
|
||||||
|
- base=release-v3.5
|
||||||
|
- label!=DNM
|
||||||
|
- "#approved-reviews-by>=2"
|
||||||
|
- "approved-reviews-by=@ceph/ceph-csi-contributors"
|
||||||
|
- "approved-reviews-by=@ceph/ceph-csi-maintainers"
|
||||||
|
- "#changes-requested-reviews-by=0"
|
||||||
|
- "status-success=codespell"
|
||||||
|
- "status-success=multi-arch-build"
|
||||||
|
- "status-success=go-test"
|
||||||
|
- "status-success=commitlint"
|
||||||
|
- "status-success=golangci-lint"
|
||||||
|
- "status-success=mod-check"
|
||||||
|
- "status-success=lint-extras"
|
||||||
|
- "status-success=ci/centos/mini-e2e-helm/k8s-1.21"
|
||||||
|
- "status-success=ci/centos/mini-e2e-helm/k8s-1.22"
|
||||||
|
- "status-success=ci/centos/mini-e2e-helm/k8s-1.23"
|
||||||
|
- "status-success=ci/centos/mini-e2e/k8s-1.21"
|
||||||
|
- "status-success=ci/centos/mini-e2e/k8s-1.22"
|
||||||
|
- "status-success=ci/centos/mini-e2e/k8s-1.23"
|
||||||
|
- "status-success=ci/centos/k8s-e2e-external-storage/1.21"
|
||||||
|
- "status-success=ci/centos/k8s-e2e-external-storage/1.22"
|
||||||
|
- "status-success=ci/centos/k8s-e2e-external-storage/1.23"
|
||||||
|
- "status-success=ci/centos/upgrade-tests-cephfs"
|
||||||
|
- "status-success=ci/centos/upgrade-tests-rbd"
|
||||||
|
- "status-success=DCO"
|
||||||
|
actions:
|
||||||
|
queue:
|
||||||
|
name: default
|
||||||
- name: backport patches to release-v3.4 branch
|
- name: backport patches to release-v3.4 branch
|
||||||
conditions:
|
conditions:
|
||||||
- base=devel
|
- base=devel
|
||||||
|
33
README.md
33
README.md
@ -54,8 +54,8 @@ environments.
|
|||||||
|
|
||||||
| Ceph CSI Version | Container Orchestrator Name | Version Tested|
|
| Ceph CSI Version | Container Orchestrator Name | Version Tested|
|
||||||
| -----------------| --------------------------- | --------------|
|
| -----------------| --------------------------- | --------------|
|
||||||
|
| v3.5.0 | Kubernetes | v1.21, v1.22, v1.23|
|
||||||
| v3.4.0 | Kubernetes | v1.20, v1.21, v1.22|
|
| v3.4.0 | Kubernetes | v1.20, v1.21, v1.22|
|
||||||
| v3.3.0 | Kubernetes | v1.20, v1.21, v1.22|
|
|
||||||
|
|
||||||
There is work in progress to make this CO independent and thus
|
There is work in progress to make this CO independent and thus
|
||||||
support other orchestration environments (Nomad, Mesos..etc) in the future.
|
support other orchestration environments (Nomad, Mesos..etc) in the future.
|
||||||
@ -65,8 +65,8 @@ NOTE:
|
|||||||
The supported window of Ceph CSI versions is known as "N.(x-1)":
|
The supported window of Ceph CSI versions is known as "N.(x-1)":
|
||||||
(N (Latest major release) . (x (Latest minor release) - 1)).
|
(N (Latest major release) . (x (Latest minor release) - 1)).
|
||||||
|
|
||||||
For example, if Ceph CSI latest major version is `3.4.0` today, support is
|
For example, if Ceph CSI latest major version is `3.5.0` today, support is
|
||||||
provided for the versions above `3.3.0`. If users are running an unsupported
|
provided for the versions above `3.4.0`. If users are running an unsupported
|
||||||
Ceph CSI version, they will be asked to upgrade when requesting support for the
|
Ceph CSI version, they will be asked to upgrade when requesting support for the
|
||||||
cluster.
|
cluster.
|
||||||
|
|
||||||
@ -88,22 +88,22 @@ for its support details.
|
|||||||
| | Provision File Mode ROX volume from another volume | Alpha | >= v3.0.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.16.0 |
|
| | Provision File Mode ROX volume from another volume | Alpha | >= v3.0.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.16.0 |
|
||||||
| | Provision Block Mode ROX volume from snapshot | Alpha | >= v3.0.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.17.0 |
|
| | Provision Block Mode ROX volume from snapshot | Alpha | >= v3.0.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.17.0 |
|
||||||
| | Provision Block Mode ROX volume from another volume | Alpha | >= v3.0.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.16.0 |
|
| | Provision Block Mode ROX volume from another volume | Alpha | >= v3.0.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.16.0 |
|
||||||
| | Creating and deleting snapshot | Beta | >= v1.0.0 | >= v1.0.0 | Nautilus (>=14.0.0) | >= v1.17.0 |
|
| | Creating and deleting snapshot | GA | >= v1.0.0 | >= v1.0.0 | Nautilus (>=14.0.0) | >= v1.17.0 |
|
||||||
| | Provision volume from snapshot | Beta | >= v1.0.0 | >= v1.0.0 | Nautilus (>=14.0.0) | >= v1.17.0 |
|
| | Provision volume from snapshot | GA | >= v1.0.0 | >= v1.0.0 | Nautilus (>=14.0.0) | >= v1.17.0 |
|
||||||
| | Provision volume from another volume | Beta | >= v1.0.0 | >= v1.0.0 | Nautilus (>=14.0.0) | >= v1.16.0 |
|
| | Provision volume from another volume | GA | >= v1.0.0 | >= v1.0.0 | Nautilus (>=14.0.0) | >= v1.16.0 |
|
||||||
| | Expand volume | Beta | >= v2.0.0 | >= v1.1.0 | Nautilus (>=14.0.0) | >= v1.15.0 |
|
| | Expand volume | Beta | >= v2.0.0 | >= v1.1.0 | Nautilus (>=14.0.0) | >= v1.15.0 |
|
||||||
| | Volume/PV Metrics of File Mode Volume | Beta | >= v1.2.0 | >= v1.1.0 | Nautilus (>=14.0.0) | >= v1.15.0 |
|
| | Volume/PV Metrics of File Mode Volume | GA | >= v1.2.0 | >= v1.1.0 | Nautilus (>=14.0.0) | >= v1.15.0 |
|
||||||
| | Volume/PV Metrics of Block Mode Volume | Beta | >= v1.2.0 | >= v1.1.0 | Nautilus (>=14.0.0) | >= v1.21.0 |
|
| | Volume/PV Metrics of Block Mode Volume | GA | >= v1.2.0 | >= v1.1.0 | Nautilus (>=14.0.0) | >= v1.21.0 |
|
||||||
| | Topology Aware Provisioning Support | Alpha | >= v2.1.0 | >= v1.1.0 | Nautilus (>=14.0.0) | >= v1.14.0 |
|
| | Topology Aware Provisioning Support | Alpha | >= v2.1.0 | >= v1.1.0 | Nautilus (>=14.0.0) | >= v1.14.0 |
|
||||||
| CephFS | Dynamically provision, de-provision File mode RWO volume | Beta | >= v1.1.0 | >= v1.0.0 | Nautilus (>=14.2.2) | >= v1.14.0 |
|
| CephFS | Dynamically provision, de-provision File mode RWO volume | GA | >= v1.1.0 | >= v1.0.0 | Nautilus (>=14.2.2) | >= v1.14.0 |
|
||||||
| | Dynamically provision, de-provision File mode RWX volume | Beta | >= v1.1.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.14.0 |
|
| | Dynamically provision, de-provision File mode RWX volume | GA | >= v1.1.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.14.0 |
|
||||||
| | Dynamically provision, de-provision File mode ROX volume | Alpha | >= v3.0.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.14.0 |
|
| | Dynamically provision, de-provision File mode ROX volume | Alpha | >= v3.0.0 | >= v1.0.0 | Nautilus (>=v14.2.2) | >= v1.14.0 |
|
||||||
| | Dynamically provision, de-provision File mode RWOP volume | Alpha | >= v3.5.0 | >= v1.5.0 | Nautilus (>=14.0.0) | >= v1.22.0 |
|
| | Dynamically provision, de-provision File mode RWOP volume | Alpha | >= v3.5.0 | >= v1.5.0 | Nautilus (>=14.0.0) | >= v1.22.0 |
|
||||||
| | Creating and deleting snapshot | Beta | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.17.0 |
|
| | Creating and deleting snapshot | GA | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.17.0 |
|
||||||
| | Provision volume from snapshot | Beta | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.17.0 |
|
| | Provision volume from snapshot | GA | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.17.0 |
|
||||||
| | Provision volume from another volume | Beta | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.16.0 |
|
| | Provision volume from another volume | GA | >= v3.1.0 | >= v1.0.0 | Octopus (>=v15.2.4) | >= v1.16.0 |
|
||||||
| | Expand volume | Beta | >= v2.0.0 | >= v1.1.0 | Nautilus (>=v14.2.2) | >= v1.15.0 |
|
| | Expand volume | Beta | >= v2.0.0 | >= v1.1.0 | Nautilus (>=v14.2.2) | >= v1.15.0 |
|
||||||
| | Volume/PV Metrics of File Mode Volume | Beta | >= v1.2.0 | >= v1.1.0 | Nautilus (>=v14.2.2) | >= v1.15.0 |
|
| | Volume/PV Metrics of File Mode Volume | GA | >= v1.2.0 | >= v1.1.0 | Nautilus (>=v14.2.2) | >= v1.15.0 |
|
||||||
|
|
||||||
`NOTE`: The `Alpha` status reflects possible non-backward
|
`NOTE`: The `Alpha` status reflects possible non-backward
|
||||||
compatible changes in the future, and is thus not recommended
|
compatible changes in the future, and is thus not recommended
|
||||||
@ -119,12 +119,13 @@ in the Kubernetes documentation.
|
|||||||
| Ceph CSI Release/Branch | Container image name | Image Tag |
|
| Ceph CSI Release/Branch | Container image name | Image Tag |
|
||||||
| ----------------------- | ---------------------------- | --------- |
|
| ----------------------- | ---------------------------- | --------- |
|
||||||
| devel (Branch) | quay.io/cephcsi/cephcsi | canary |
|
| devel (Branch) | quay.io/cephcsi/cephcsi | canary |
|
||||||
|
| v3.5.0 (Release) | quay.io/cephcsi/cephcsi | v3.5.0 |
|
||||||
| v3.4.0 (Release) | quay.io/cephcsi/cephcsi | v3.4.0 |
|
| v3.4.0 (Release) | quay.io/cephcsi/cephcsi | v3.4.0 |
|
||||||
| v3.3.1 (Release) | quay.io/cephcsi/cephcsi | v3.3.1 |
|
|
||||||
| v3.3.0 (Release) | quay.io/cephcsi/cephcsi | v3.3.0 |
|
|
||||||
|
|
||||||
| Deprecated Ceph CSI Release/Branch | Container image name | Image Tag |
|
| Deprecated Ceph CSI Release/Branch | Container image name | Image Tag |
|
||||||
| ----------------------- | --------------------------------| --------- |
|
| ----------------------- | --------------------------------| --------- |
|
||||||
|
| v3.3.1 (Release) | quay.io/cephcsi/cephcsi | v3.3.1 |
|
||||||
|
| v3.3.0 (Release) | quay.io/cephcsi/cephcsi | v3.3.0 |
|
||||||
| v3.2.2 (Release) | quay.io/cephcsi/cephcsi | v3.2.2 |
|
| v3.2.2 (Release) | quay.io/cephcsi/cephcsi | v3.2.2 |
|
||||||
| v3.2.1 (Release) | quay.io/cephcsi/cephcsi | v3.2.1 |
|
| v3.2.1 (Release) | quay.io/cephcsi/cephcsi | v3.2.1 |
|
||||||
| v3.2.0 (Release) | quay.io/cephcsi/cephcsi | v3.2.0 |
|
| v3.2.0 (Release) | quay.io/cephcsi/cephcsi | v3.2.0 |
|
||||||
|
@ -48,11 +48,11 @@ ROOK_VERSION=v1.6.2
|
|||||||
ROOK_CEPH_CLUSTER_IMAGE=docker.io/ceph/ceph:v16
|
ROOK_CEPH_CLUSTER_IMAGE=docker.io/ceph/ceph:v16
|
||||||
|
|
||||||
# CSI sidecar version
|
# CSI sidecar version
|
||||||
CSI_ATTACHER_VERSION=v3.3.0
|
CSI_ATTACHER_VERSION=v3.4.0
|
||||||
CSI_SNAPSHOTTER_VERSION=v4.2.0
|
CSI_SNAPSHOTTER_VERSION=v4.2.0
|
||||||
CSI_PROVISIONER_VERSION=v3.0.0
|
CSI_PROVISIONER_VERSION=v3.1.0
|
||||||
CSI_RESIZER_VERSION=v1.3.0
|
CSI_RESIZER_VERSION=v1.3.0
|
||||||
CSI_NODE_DRIVER_REGISTRAR_VERSION=v2.3.0
|
CSI_NODE_DRIVER_REGISTRAR_VERSION=v2.4.0
|
||||||
|
|
||||||
# e2e settings
|
# e2e settings
|
||||||
# - enable CEPH_CSI_RUN_ALL_TESTS when running tests with if it has root
|
# - enable CEPH_CSI_RUN_ALL_TESTS when running tests with if it has root
|
||||||
|
@ -80,7 +80,7 @@ nodeplugin:
|
|||||||
registrar:
|
registrar:
|
||||||
image:
|
image:
|
||||||
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
|
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
|
||||||
tag: v2.3.0
|
tag: v2.4.0
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
resources: {}
|
resources: {}
|
||||||
|
|
||||||
@ -161,7 +161,7 @@ provisioner:
|
|||||||
provisioner:
|
provisioner:
|
||||||
image:
|
image:
|
||||||
repository: k8s.gcr.io/sig-storage/csi-provisioner
|
repository: k8s.gcr.io/sig-storage/csi-provisioner
|
||||||
tag: v3.0.0
|
tag: v3.1.0
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
resources: {}
|
resources: {}
|
||||||
|
|
||||||
@ -170,7 +170,7 @@ provisioner:
|
|||||||
enabled: true
|
enabled: true
|
||||||
image:
|
image:
|
||||||
repository: k8s.gcr.io/sig-storage/csi-attacher
|
repository: k8s.gcr.io/sig-storage/csi-attacher
|
||||||
tag: v3.3.0
|
tag: v3.4.0
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
resources: {}
|
resources: {}
|
||||||
|
|
||||||
|
@ -102,7 +102,7 @@ nodeplugin:
|
|||||||
registrar:
|
registrar:
|
||||||
image:
|
image:
|
||||||
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
|
repository: k8s.gcr.io/sig-storage/csi-node-driver-registrar
|
||||||
tag: v2.3.0
|
tag: v2.4.0
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
resources: {}
|
resources: {}
|
||||||
|
|
||||||
@ -198,7 +198,7 @@ provisioner:
|
|||||||
provisioner:
|
provisioner:
|
||||||
image:
|
image:
|
||||||
repository: k8s.gcr.io/sig-storage/csi-provisioner
|
repository: k8s.gcr.io/sig-storage/csi-provisioner
|
||||||
tag: v3.0.0
|
tag: v3.1.0
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
resources: {}
|
resources: {}
|
||||||
|
|
||||||
@ -207,7 +207,7 @@ provisioner:
|
|||||||
enabled: true
|
enabled: true
|
||||||
image:
|
image:
|
||||||
repository: k8s.gcr.io/sig-storage/csi-attacher
|
repository: k8s.gcr.io/sig-storage/csi-attacher
|
||||||
tag: v3.3.0
|
tag: v3.4.0
|
||||||
pullPolicy: IfNotPresent
|
pullPolicy: IfNotPresent
|
||||||
resources: {}
|
resources: {}
|
||||||
|
|
||||||
|
@ -43,7 +43,7 @@ spec:
|
|||||||
priorityClassName: system-cluster-critical
|
priorityClassName: system-cluster-critical
|
||||||
containers:
|
containers:
|
||||||
- name: csi-provisioner
|
- name: csi-provisioner
|
||||||
image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
|
image: k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0
|
||||||
args:
|
args:
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
@ -90,7 +90,7 @@ spec:
|
|||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
mountPath: /csi
|
mountPath: /csi
|
||||||
- name: csi-cephfsplugin-attacher
|
- name: csi-cephfsplugin-attacher
|
||||||
image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
|
image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
|
||||||
args:
|
args:
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
|
@ -25,7 +25,7 @@ spec:
|
|||||||
# created by privileged CSI driver container.
|
# created by privileged CSI driver container.
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
|
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
|
||||||
args:
|
args:
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
- "--csi-address=/csi/csi.sock"
|
- "--csi-address=/csi/csi.sock"
|
||||||
|
@ -47,7 +47,7 @@ spec:
|
|||||||
priorityClassName: system-cluster-critical
|
priorityClassName: system-cluster-critical
|
||||||
containers:
|
containers:
|
||||||
- name: csi-provisioner
|
- name: csi-provisioner
|
||||||
image: k8s.gcr.io/sig-storage/csi-provisioner:v3.0.0
|
image: k8s.gcr.io/sig-storage/csi-provisioner:v3.1.0
|
||||||
args:
|
args:
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
@ -81,7 +81,7 @@ spec:
|
|||||||
- name: socket-dir
|
- name: socket-dir
|
||||||
mountPath: /csi
|
mountPath: /csi
|
||||||
- name: csi-attacher
|
- name: csi-attacher
|
||||||
image: k8s.gcr.io/sig-storage/csi-attacher:v3.3.0
|
image: k8s.gcr.io/sig-storage/csi-attacher:v3.4.0
|
||||||
args:
|
args:
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
- "--csi-address=$(ADDRESS)"
|
- "--csi-address=$(ADDRESS)"
|
||||||
|
@ -28,7 +28,7 @@ spec:
|
|||||||
# created by privileged CSI driver container.
|
# created by privileged CSI driver container.
|
||||||
securityContext:
|
securityContext:
|
||||||
privileged: true
|
privileged: true
|
||||||
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.3.0
|
image: k8s.gcr.io/sig-storage/csi-node-driver-registrar:v2.4.0
|
||||||
args:
|
args:
|
||||||
- "--v=5"
|
- "--v=5"
|
||||||
- "--csi-address=/csi/csi.sock"
|
- "--csi-address=/csi/csi.sock"
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
- [Snapshot API version support matrix](#snapshot-api-version-support-matrix)
|
- [Snapshot API version support matrix](#snapshot-api-version-support-matrix)
|
||||||
- [Upgrading from v3.2 to v3.3](#upgrading-from-v32-to-v33)
|
- [Upgrading from v3.2 to v3.3](#upgrading-from-v32-to-v33)
|
||||||
- [Upgrading from v3.3 to v3.4](#upgrading-from-v33-to-v34)
|
- [Upgrading from v3.3 to v3.4](#upgrading-from-v33-to-v34)
|
||||||
|
- [Upgrading from v3.4 to v3.5](#upgrading-from-v34-to-v35)
|
||||||
- [Upgrading CephFS](#upgrading-cephfs)
|
- [Upgrading CephFS](#upgrading-cephfs)
|
||||||
- [1. Upgrade CephFS Provisioner resources](#1-upgrade-cephfs-provisioner-resources)
|
- [1. Upgrade CephFS Provisioner resources](#1-upgrade-cephfs-provisioner-resources)
|
||||||
- [1.1 Update the CephFS Provisioner RBAC](#11-update-the-cephfs-provisioner-rbac)
|
- [1.1 Update the CephFS Provisioner RBAC](#11-update-the-cephfs-provisioner-rbac)
|
||||||
@ -43,7 +44,7 @@ To avoid this issue in future upgrades, we recommend that you do not use the
|
|||||||
fuse client as of now.
|
fuse client as of now.
|
||||||
|
|
||||||
This guide will walk you through the steps to upgrade the software in a cluster
|
This guide will walk you through the steps to upgrade the software in a cluster
|
||||||
from v3.3 to v3.4
|
from v3.4 to v3.5
|
||||||
|
|
||||||
### Snapshot-controller and snapshot crd
|
### Snapshot-controller and snapshot crd
|
||||||
|
|
||||||
@ -67,6 +68,11 @@ to upgrade from cephcsi v3.2 to v3.3
|
|||||||
|
|
||||||
## Upgrading from v3.3 to v3.4
|
## Upgrading from v3.3 to v3.4
|
||||||
|
|
||||||
|
Refer [upgrade-from-v3.3-v3.4](https://github.com/ceph/ceph-csi/blob/v3.4.0/docs/ceph-csi-upgrade.md)
|
||||||
|
to upgrade from cephcsi v3.3 to v3.4
|
||||||
|
|
||||||
|
## Upgrading from v3.4 to v3.5
|
||||||
|
|
||||||
**Ceph-csi releases from devel are expressly unsupported.** It is strongly
|
**Ceph-csi releases from devel are expressly unsupported.** It is strongly
|
||||||
recommended that you use [official
|
recommended that you use [official
|
||||||
releases](https://github.com/ceph/ceph-csi/releases) of Ceph-csi. Unreleased
|
releases](https://github.com/ceph/ceph-csi/releases) of Ceph-csi. Unreleased
|
||||||
@ -75,23 +81,23 @@ that will not be supported in the official releases. Builds from the devel
|
|||||||
branch can have functionality changed and even removed at any time without
|
branch can have functionality changed and even removed at any time without
|
||||||
compatibility support and without prior notice.
|
compatibility support and without prior notice.
|
||||||
|
|
||||||
**Also, we do not recommend any direct upgrades to 3.4 except from 3.3 to 3.4.**
|
**Also, we do not recommend any direct upgrades to 3.5 except from 3.4 to 3.5.**
|
||||||
For example, upgrading from 3.2 to 3.4 is not recommended.
|
For example, upgrading from 3.3 to 3.5 is not recommended.
|
||||||
|
|
||||||
git checkout v3.4.0 tag
|
git checkout v3.5.0 tag
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
git clone https://github.com/ceph/ceph-csi.git
|
git clone https://github.com/ceph/ceph-csi.git
|
||||||
cd ./ceph-csi
|
cd ./ceph-csi
|
||||||
git checkout v3.4.0
|
git checkout v3.5.0
|
||||||
```
|
```
|
||||||
|
|
||||||
**Note:** While upgrading please Ignore warning messages from kubectl output
|
|
||||||
|
|
||||||
```console
|
```console
|
||||||
Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
|
Warning: kubectl apply should be used on resource created by either kubectl create --save-config or kubectl apply
|
||||||
```
|
```
|
||||||
|
|
||||||
|
**Note:** While upgrading please Ignore above warning messages from kubectl output
|
||||||
|
|
||||||
### Upgrading CephFS
|
### Upgrading CephFS
|
||||||
|
|
||||||
Upgrading cephfs csi includes upgrade of cephfs driver and as well as
|
Upgrading cephfs csi includes upgrade of cephfs driver and as well as
|
||||||
@ -208,7 +214,7 @@ For each node:
|
|||||||
- The pod deletion causes the pods to be restarted and updated automatically
|
- The pod deletion causes the pods to be restarted and updated automatically
|
||||||
on the node.
|
on the node.
|
||||||
|
|
||||||
we have successfully upgraded cephfs csi from v3.3 to v3.4
|
we have successfully upgraded cephfs csi from v3.4 to v3.5
|
||||||
|
|
||||||
### Upgrading RBD
|
### Upgrading RBD
|
||||||
|
|
||||||
@ -274,7 +280,7 @@ daemonset.apps/csi-rbdplugin configured
|
|||||||
service/csi-metrics-rbdplugin configured
|
service/csi-metrics-rbdplugin configured
|
||||||
```
|
```
|
||||||
|
|
||||||
we have successfully upgraded RBD csi from v3.3 to v3.4
|
we have successfully upgraded RBD csi from v3.4 to v3.5
|
||||||
|
|
||||||
### CSI Sidecar containers consideration
|
### CSI Sidecar containers consideration
|
||||||
|
|
||||||
|
143
docs/intree-migrate.md
Normal file
143
docs/intree-migrate.md
Normal file
@ -0,0 +1,143 @@
|
|||||||
|
# In-tree storage plugin to CSI Driver Migration
|
||||||
|
|
||||||
|
This document covers the example usage of in-tree RBD storage plugin to CSI
|
||||||
|
migration feature which can be enabled in a Kubernetes cluster. At present, this
|
||||||
|
feature is only supported for RBD in-tree plugin. Once this feature is enabled,
|
||||||
|
the in-tree volume requests (`kubernetes.io/rbd`) will be redirected to a
|
||||||
|
corresponding CSI (`rbd.csi.ceph.com`) driver.
|
||||||
|
|
||||||
|
## RBD
|
||||||
|
|
||||||
|
- [Prerequisite](#prerequisite)
|
||||||
|
- [Volume operations after enabling CSI migration](#volume-operations-after-enabling-csi-migration)
|
||||||
|
- [Create volume](#create-volume)
|
||||||
|
- [Mount volume to a POD](#mount-volume-to-a-pod)
|
||||||
|
- [Resize volume](#resize-volume)
|
||||||
|
- [Unmount volume](#unmount-volume)
|
||||||
|
- [Delete volume](#delete-volume)
|
||||||
|
- [References](#additional-references)
|
||||||
|
|
||||||
|
### Prerequisite
|
||||||
|
|
||||||
|
For in-tree RBD migration to CSI driver to be supported for your Kubernetes
|
||||||
|
cluster, the Kubernetes version running in your cluster should be >= v1.23. We
|
||||||
|
also need sidecar controllers (`csi-provisioner` and `csi-resizer`) which are
|
||||||
|
compatible with the Kubernetes version v1.23 to be available in your cluster.
|
||||||
|
You can enable the migration with a couple of feature gates in your Kubernetes
|
||||||
|
cluster. These feature gates are alpha in Kubernetes 1.23 release.
|
||||||
|
|
||||||
|
- `CSIMigrationRBD`: when enabled, it will redirect traffic from in-tree rbd
|
||||||
|
plugin (`kubernetes.io/rbd`) to CSI driver (`rbd.csi.ceph.com`), default
|
||||||
|
to `false` now.
|
||||||
|
|
||||||
|
- `IntreePluginRBDUnregister`: Disables the RBD in-tree driver
|
||||||
|
|
||||||
|
To enable feature gates, refer [feature gates](https://kubernetes.io/docs/reference/command-line-tools-reference/feature-gates/)
|
||||||
|
|
||||||
|
As a Kubernetes cluster operator that administers storage, here are the
|
||||||
|
prerequisites that you must complete before you attempt migration to the RBD CSI
|
||||||
|
driver:
|
||||||
|
|
||||||
|
- You must install the Ceph CSI driver (`rbd.csi.ceph.com`), v3.5.0 or above,
|
||||||
|
into your Kubernetes cluster.
|
||||||
|
- Configure `clusterID` field in the configmap as
|
||||||
|
discussed [here](https://github.com/ceph/ceph-csi/blob/devel/docs/design/proposals/intree-migrate.md#clusterid-field-in-the-migration-request)
|
||||||
|
- Configure migration secret as
|
||||||
|
discussed [here](https://github.com/ceph/ceph-csi/blob/devel/docs/design/proposals/intree-migrate.md#migration-secret-for-csi-operations)
|
||||||
|
|
||||||
|
In below examples, `fast-rbd` is in-tree storageclass with provisioner
|
||||||
|
referencing in-tree provisioner `Kubernetes.io/rbd`.
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ kubectl describe sc fast-rbd |grep -i provisioner
|
||||||
|
Provisioner: Kubernetes.io/rbd
|
||||||
|
```
|
||||||
|
|
||||||
|
### Volume operations after enabling CSI migration
|
||||||
|
|
||||||
|
This section covers the operations on volumes provisioned after enabling CSI
|
||||||
|
migration in a cluster.
|
||||||
|
|
||||||
|
#### Create Volume
|
||||||
|
|
||||||
|
``` console
|
||||||
|
$ kubectl create -f pvc.yaml
|
||||||
|
persistentvolumeclaim/testpvc created
|
||||||
|
|
||||||
|
$ kubectl get pvc,pv
|
||||||
|
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
|
||||||
|
persistentvolumeclaim/testpvc Bound pvc-c4e7dca5-4be6-4168-8eb5-af6ade04261f 1Gi RWO fast-rbd 24s
|
||||||
|
|
||||||
|
NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE
|
||||||
|
persistentvolume/pvc-c4e7dca5-4be6-4168-8eb5-af6ade04261f 1Gi RWO Delete Bound default/testpvc fast-rbd 18s
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Mount Volume to a POD
|
||||||
|
|
||||||
|
Create a pod with PVC and verify the mount inside the POD
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ kubectl create -f pod.yaml
|
||||||
|
pod/task-pv-pod created
|
||||||
|
|
||||||
|
$ kubectl get pods
|
||||||
|
NAME READY STATUS RESTARTS AGE
|
||||||
|
task-pv-pod 1/1 Running 0 2m36s
|
||||||
|
|
||||||
|
$ kubectl get pvc
|
||||||
|
NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE
|
||||||
|
testpvc Bound pvc-c4e7dca5-4be6-4168-8eb5-af6ade04261f 1Gi RWO fast-rbd 4m40s
|
||||||
|
|
||||||
|
$ kubectl exec -ti task-pv-pod -- df -kh |grep nginx
|
||||||
|
/dev/rbd0 976M 2.6M 958M 1% /usr/share/nginx/html
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Resize Volume
|
||||||
|
|
||||||
|
Resize PVC from 1Gi to 2Gi and verify the new size change in the POD
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ kubectl patch pvc testpvc -p '{"spec":{"resources":{"requests":{"storage":"2Gi"}}}}}'
|
||||||
|
persistentvolumeclaim/testpvc patched
|
||||||
|
|
||||||
|
$ kubectl exec -ti task-pv-pod -- df -kh |grep nginx
|
||||||
|
/dev/rbd0 2.0G 3.0M 2.0G 1% /usr/share/nginx/html
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Unmount Volume
|
||||||
|
|
||||||
|
Delete POD and verify pod deleted successfully
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ kubectl get pods
|
||||||
|
NAME READY STATUS RESTARTS AGE
|
||||||
|
task-pv-pod 1/1 Running 0 5m31s
|
||||||
|
|
||||||
|
$ kubectl delete pod task-pv-pod
|
||||||
|
pod "task-pv-pod" deleted
|
||||||
|
|
||||||
|
$ kubectl get pods
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Delete volume
|
||||||
|
|
||||||
|
Delete PVC and verify PVC and PV objects are deleted
|
||||||
|
|
||||||
|
```console
|
||||||
|
$ kubectl delete pvc testpvc
|
||||||
|
persistentvolumeclaim "testpvc" deleted
|
||||||
|
|
||||||
|
$ kubectl get pvc
|
||||||
|
No resources found in default namespace.
|
||||||
|
|
||||||
|
$ kubectl get pv
|
||||||
|
No resources found
|
||||||
|
```
|
||||||
|
|
||||||
|
### Additional References
|
||||||
|
|
||||||
|
To know more about in-tree to CSI migration:
|
||||||
|
|
||||||
|
- [design doc](./design/proposals/intree-migrate.md)
|
||||||
|
- [Kubernetes 1.17 Feature: Kubernetes In-Tree to CSI Volume Migration Moves to Beta](https://Kubernetes.io/blog/2019/12/09/Kubernetes-1-17-feature-csi-migration-beta/)
|
||||||
|
|
@ -29,6 +29,9 @@ client-side, which is inside the `csi-rbdplugin` node plugin.
|
|||||||
To use the rbd-nbd mounter for RBD-backed PVs, set `mounter` to `rbd-nbd`
|
To use the rbd-nbd mounter for RBD-backed PVs, set `mounter` to `rbd-nbd`
|
||||||
in the StorageClass.
|
in the StorageClass.
|
||||||
|
|
||||||
|
Please note that the minimum recommended kernel version to use rbd-nbd is
|
||||||
|
5.4 or higher.
|
||||||
|
|
||||||
### Configuring logging path
|
### Configuring logging path
|
||||||
|
|
||||||
If you are using the default rbd nodePlugin DaemonSet and StorageClass
|
If you are using the default rbd nodePlugin DaemonSet and StorageClass
|
||||||
|
18
go.mod
18
go.mod
@ -20,21 +20,21 @@ require (
|
|||||||
github.com/onsi/ginkgo v1.16.5
|
github.com/onsi/ginkgo v1.16.5
|
||||||
github.com/onsi/gomega v1.17.0
|
github.com/onsi/gomega v1.17.0
|
||||||
github.com/pborman/uuid v1.2.1
|
github.com/pborman/uuid v1.2.1
|
||||||
github.com/prometheus/client_golang v1.11.0
|
github.com/prometheus/client_golang v1.12.0
|
||||||
github.com/stretchr/testify v1.7.0
|
github.com/stretchr/testify v1.7.0
|
||||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
|
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5
|
||||||
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
|
||||||
google.golang.org/grpc v1.42.0
|
google.golang.org/grpc v1.43.0
|
||||||
google.golang.org/protobuf v1.27.1
|
google.golang.org/protobuf v1.27.1
|
||||||
k8s.io/api v0.23.0
|
k8s.io/api v0.23.0
|
||||||
k8s.io/apimachinery v0.23.0
|
k8s.io/apimachinery v0.23.0
|
||||||
k8s.io/client-go v12.0.0+incompatible
|
k8s.io/client-go v12.0.0+incompatible
|
||||||
k8s.io/cloud-provider v0.23.0
|
k8s.io/cloud-provider v0.23.0
|
||||||
k8s.io/klog/v2 v2.30.0
|
k8s.io/klog/v2 v2.40.1
|
||||||
//
|
//
|
||||||
// when updating k8s.io/kubernetes, make sure to update the replace section too
|
// when updating k8s.io/kubernetes, make sure to update the replace section too
|
||||||
//
|
//
|
||||||
k8s.io/kubernetes v1.23.0
|
k8s.io/kubernetes v1.23.1
|
||||||
k8s.io/mount-utils v0.23.0
|
k8s.io/mount-utils v0.23.0
|
||||||
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b
|
k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b
|
||||||
sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2
|
sigs.k8s.io/controller-runtime v0.11.0-beta.0.0.20211208212546-f236f0345ad2
|
||||||
@ -47,7 +47,7 @@ require (
|
|||||||
github.com/bits-and-blooms/bitset v1.2.0 // indirect
|
github.com/bits-and-blooms/bitset v1.2.0 // indirect
|
||||||
github.com/blang/semver v3.5.1+incompatible // indirect
|
github.com/blang/semver v3.5.1+incompatible // indirect
|
||||||
github.com/cenkalti/backoff/v3 v3.0.0 // indirect
|
github.com/cenkalti/backoff/v3 v3.0.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.1 // indirect
|
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||||
github.com/cyphar/filepath-securejoin v0.2.2 // indirect
|
github.com/cyphar/filepath-securejoin v0.2.2 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||||
@ -110,8 +110,8 @@ require (
|
|||||||
github.com/pkg/errors v0.9.1 // indirect
|
github.com/pkg/errors v0.9.1 // indirect
|
||||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||||
github.com/prometheus/client_model v0.2.0 // indirect
|
github.com/prometheus/client_model v0.2.0 // indirect
|
||||||
github.com/prometheus/common v0.28.0 // indirect
|
github.com/prometheus/common v0.32.1 // indirect
|
||||||
github.com/prometheus/procfs v0.6.0 // indirect
|
github.com/prometheus/procfs v0.7.3 // indirect
|
||||||
github.com/ryanuber/go-glob v1.0.0 // indirect
|
github.com/ryanuber/go-glob v1.0.0 // indirect
|
||||||
github.com/spf13/cobra v1.2.1 // indirect
|
github.com/spf13/cobra v1.2.1 // indirect
|
||||||
github.com/spf13/pflag v1.0.5 // indirect
|
github.com/spf13/pflag v1.0.5 // indirect
|
||||||
@ -126,7 +126,7 @@ require (
|
|||||||
go.opentelemetry.io/otel/trace v0.20.0 // indirect
|
go.opentelemetry.io/otel/trace v0.20.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v0.7.0 // indirect
|
go.opentelemetry.io/proto/otlp v0.7.0 // indirect
|
||||||
go.uber.org/atomic v1.9.0 // indirect
|
go.uber.org/atomic v1.9.0 // indirect
|
||||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a // indirect
|
golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
|
||||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
||||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
|
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
|
||||||
golang.org/x/text v0.3.7 // indirect
|
golang.org/x/text v0.3.7 // indirect
|
||||||
|
29
go.sum
29
go.sum
@ -174,8 +174,9 @@ github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6
|
|||||||
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
|
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||||
|
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
|
github.com/chai2010/gettext-go v0.0.0-20160711120539-c6fed771bfd5/go.mod h1:/iP1qXHoty45bqomnu2LM+VVyAEdWN+vtSHGlQgyxbw=
|
||||||
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M=
|
||||||
github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0/go.mod h1:5d8DqS60xkj9k3aXfL3+mXBH0DPYO0FQjcKosxl+b/Q=
|
github.com/chrismalek/oktasdk-go v0.0.0-20181212195951-3430665dfaa0/go.mod h1:5d8DqS60xkj9k3aXfL3+mXBH0DPYO0FQjcKosxl+b/Q=
|
||||||
@ -929,8 +930,9 @@ github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDf
|
|||||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||||
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU=
|
||||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||||
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
|
|
||||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||||
|
github.com/prometheus/client_golang v1.12.0 h1:C+UIj/QWtmqY13Arb8kwMt5j34/0Z2iKamrJ+ryC0Gg=
|
||||||
|
github.com/prometheus/client_golang v1.12.0/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||||
@ -946,8 +948,9 @@ github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
|
|||||||
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4=
|
||||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||||
github.com/prometheus/common v0.28.0 h1:vGVfV9KrDTvWt5boZO0I19g2E3CsWfpPPKZM9dt3mEw=
|
|
||||||
github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||||
|
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
|
||||||
|
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||||
@ -957,8 +960,9 @@ github.com/prometheus/procfs v0.0.0-20190522114515-bc1a522cf7b1/go.mod h1:TjEm7z
|
|||||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||||
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A=
|
||||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||||
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
|
|
||||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
|
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
||||||
|
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||||
github.com/quobyte/api v0.1.8/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI=
|
github.com/quobyte/api v0.1.8/go.mod h1:jL7lIHrmqQ7yh05OJ+eEEdHr0u/kmT1Ff9iHd+4H6VI=
|
||||||
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
|
||||||
@ -1256,8 +1260,9 @@ golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qx
|
|||||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a h1:bRuuGXV8wwSdGTB+CtJf+FjgO1APK1CoO39T4BN/XBw=
|
|
||||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
|
golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY=
|
||||||
|
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||||
golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||||
@ -1379,8 +1384,9 @@ golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8 h1:M69LAlWZCshgp0QSzyDcSsSIejIEeuaCVpmwcKwyLMk=
|
|
||||||
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20211029165221-6e7872819dc8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
||||||
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
|
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
|
||||||
@ -1606,8 +1612,8 @@ google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
|
|||||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||||
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
||||||
google.golang.org/grpc v1.42.0 h1:XT2/MFpuPFsEX2fWh3YQtHkZ+WYZFQRfaUgLZYj/p6A=
|
google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM=
|
||||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||||
@ -1709,8 +1715,9 @@ k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
|||||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||||
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||||
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||||
k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw=
|
|
||||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||||
|
k8s.io/klog/v2 v2.40.1 h1:P4RRucWk/lFOlDdkAr3mc7iWFkgKrZY9qZMAgek06S4=
|
||||||
|
k8s.io/klog/v2 v2.40.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||||
k8s.io/kube-aggregator v0.23.0/go.mod h1:b1vpoaTWKZjCzvbe1KXFw3vPbISrghJsg7/RI8oZUME=
|
k8s.io/kube-aggregator v0.23.0/go.mod h1:b1vpoaTWKZjCzvbe1KXFw3vPbISrghJsg7/RI8oZUME=
|
||||||
k8s.io/kube-controller-manager v0.23.0/go.mod h1:iHapRJJBe+fWu6hG3ye43YMFEeZcnIlRxDUS72bwJoE=
|
k8s.io/kube-controller-manager v0.23.0/go.mod h1:iHapRJJBe+fWu6hG3ye43YMFEeZcnIlRxDUS72bwJoE=
|
||||||
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
k8s.io/kube-openapi v0.0.0-20180731170545-e3762e86a74c/go.mod h1:BXM9ceUBTj2QnfH2MK1odQs778ajze1RxcmP6S8RVVc=
|
||||||
@ -1723,8 +1730,8 @@ k8s.io/kubectl v0.23.0 h1:WABWfj+Z4tC3SfKBCtZr5sIVHsFtkU9Azii4DR9IT6Y=
|
|||||||
k8s.io/kubectl v0.23.0/go.mod h1:TfcGEs3u4dkmoC2eku1GYymdGaMtPMcaLLFrX/RB2kI=
|
k8s.io/kubectl v0.23.0/go.mod h1:TfcGEs3u4dkmoC2eku1GYymdGaMtPMcaLLFrX/RB2kI=
|
||||||
k8s.io/kubelet v0.23.0 h1:hHdHe/Hp3R2HzxnYI8/f173gDUOTRYERd7S7+to9MZw=
|
k8s.io/kubelet v0.23.0 h1:hHdHe/Hp3R2HzxnYI8/f173gDUOTRYERd7S7+to9MZw=
|
||||||
k8s.io/kubelet v0.23.0/go.mod h1:A4DxfIt5Ka+rz54HAFhs1bgiFjJT6lcaAYUcACZl1/k=
|
k8s.io/kubelet v0.23.0/go.mod h1:A4DxfIt5Ka+rz54HAFhs1bgiFjJT6lcaAYUcACZl1/k=
|
||||||
k8s.io/kubernetes v1.23.0 h1:r2DrryCpnmFfBuelpUNSWXHtD6Zy7SdwaCcycV5DsJE=
|
k8s.io/kubernetes v1.23.1 h1:iJfubd03CDap4m69Ue+u2I6quNUYiYlC8+TakEHATjc=
|
||||||
k8s.io/kubernetes v1.23.0/go.mod h1:sgD3+Qzb8FHlRKlZnNCN+np3zZuHEAb/0PKLJkYyCUI=
|
k8s.io/kubernetes v1.23.1/go.mod h1:baMGbPpwwP0kT/+eAPtdqoWNRoXyyTJ2Zf+fw/Y8t04=
|
||||||
k8s.io/legacy-cloud-providers v0.23.0/go.mod h1:tM5owPlhLyEYJC2FLHgcGu1jks5ANvH2JlY03mnUYU4=
|
k8s.io/legacy-cloud-providers v0.23.0/go.mod h1:tM5owPlhLyEYJC2FLHgcGu1jks5ANvH2JlY03mnUYU4=
|
||||||
k8s.io/metrics v0.23.0/go.mod h1:NDiZTwppEtAuKJ1Rxt3S4dhyRzdp6yUcJf0vo023dPo=
|
k8s.io/metrics v0.23.0/go.mod h1:NDiZTwppEtAuKJ1Rxt3S4dhyRzdp6yUcJf0vo023dPo=
|
||||||
k8s.io/mount-utils v0.23.0 h1:8sGMlbbQOA268SidZVoL7wOgEcbByoa6+bvFZCywhbg=
|
k8s.io/mount-utils v0.23.0 h1:8sGMlbbQOA268SidZVoL7wOgEcbByoa6+bvFZCywhbg=
|
||||||
|
@ -124,6 +124,11 @@ func CheckVolExists(ctx context.Context,
|
|||||||
return nil, cerrors.ErrClonePending
|
return nil, cerrors.ErrClonePending
|
||||||
}
|
}
|
||||||
if cloneState == cephFSCloneFailed {
|
if cloneState == cephFSCloneFailed {
|
||||||
|
log.ErrorLog(ctx,
|
||||||
|
"clone failed, deleting subvolume clone. vol=%s, subvol=%s subvolgroup=%s",
|
||||||
|
volOptions.FsName,
|
||||||
|
vid.FsSubvolName,
|
||||||
|
volOptions.SubvolumeGroup)
|
||||||
err = volOptions.PurgeVolume(ctx, fsutil.VolumeID(vid.FsSubvolName), true)
|
err = volOptions.PurgeVolume(ctx, fsutil.VolumeID(vid.FsSubvolName), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.ErrorLog(ctx, "failed to delete volume %s: %v", vid.FsSubvolName, err)
|
log.ErrorLog(ctx, "failed to delete volume %s: %v", vid.FsSubvolName, err)
|
||||||
|
@ -237,7 +237,7 @@ func (rv *rbdVolume) doSnapClone(ctx context.Context, parentVol *rbdVolume) erro
|
|||||||
}()
|
}()
|
||||||
|
|
||||||
if rv.ThickProvision {
|
if rv.ThickProvision {
|
||||||
err = tempClone.DeepCopy(rv)
|
err = tempClone.DeepCopy(&rv.rbdImage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to deep copy %q into %q: %w", parentVol, rv, err)
|
return fmt.Errorf("failed to deep copy %q into %q: %w", parentVol, rv, err)
|
||||||
}
|
}
|
||||||
|
@ -215,6 +215,12 @@ func checkValidCreateVolumeRequest(rbdVol, parentVol *rbdVolume, rbdSnap *rbdSna
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return status.Errorf(codes.InvalidArgument, "cannot restore from snapshot %s: %s", rbdSnap, err.Error())
|
return status.Errorf(codes.InvalidArgument, "cannot restore from snapshot %s: %s", rbdSnap, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = rbdSnap.isCompabitableClone(&rbdVol.rbdImage)
|
||||||
|
if err != nil {
|
||||||
|
return status.Errorf(codes.InvalidArgument, "cannot restore from snapshot %s: %s", rbdSnap, err.Error())
|
||||||
|
}
|
||||||
|
|
||||||
case parentVol != nil:
|
case parentVol != nil:
|
||||||
err = parentVol.isCompatibleEncryption(&rbdVol.rbdImage)
|
err = parentVol.isCompatibleEncryption(&rbdVol.rbdImage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
@ -225,6 +231,11 @@ func checkValidCreateVolumeRequest(rbdVol, parentVol *rbdVolume, rbdSnap *rbdSna
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return status.Errorf(codes.InvalidArgument, "cannot clone from volume %s: %s", parentVol, err.Error())
|
return status.Errorf(codes.InvalidArgument, "cannot clone from volume %s: %s", parentVol, err.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
err = parentVol.isCompabitableClone(&rbdVol.rbdImage)
|
||||||
|
if err != nil {
|
||||||
|
return status.Errorf(codes.InvalidArgument, "cannot clone from volume %s: %s", parentVol, err.Error())
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -582,7 +593,7 @@ func (cs *ControllerServer) createVolumeFromSnapshot(
|
|||||||
parentVol.conn = rbdVol.conn.Copy()
|
parentVol.conn = rbdVol.conn.Copy()
|
||||||
|
|
||||||
if rbdVol.ThickProvision {
|
if rbdVol.ThickProvision {
|
||||||
err = parentVol.DeepCopy(rbdVol)
|
err = parentVol.DeepCopy(&rbdVol.rbdImage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return status.Errorf(codes.Internal, "failed to deep copy %q into %q: %v", parentVol, rbdVol, err)
|
return status.Errorf(codes.Internal, "failed to deep copy %q into %q: %v", parentVol, rbdVol, err)
|
||||||
}
|
}
|
||||||
|
@ -131,6 +131,8 @@ type rbdImage struct {
|
|||||||
// Parent Pool is the pool that contains the parent image.
|
// Parent Pool is the pool that contains the parent image.
|
||||||
ParentPool string
|
ParentPool string
|
||||||
ImageFeatureSet librbd.FeatureSet
|
ImageFeatureSet librbd.FeatureSet
|
||||||
|
// Primary represent if the image is primary or not.
|
||||||
|
Primary bool
|
||||||
|
|
||||||
// encryption provides access to optional VolumeEncryption functions
|
// encryption provides access to optional VolumeEncryption functions
|
||||||
encryption *util.VolumeEncryption
|
encryption *util.VolumeEncryption
|
||||||
@ -172,7 +174,6 @@ type rbdVolume struct {
|
|||||||
RequestedVolSize int64
|
RequestedVolSize int64
|
||||||
DisableInUseChecks bool
|
DisableInUseChecks bool
|
||||||
readOnly bool
|
readOnly bool
|
||||||
Primary bool
|
|
||||||
ThickProvision bool
|
ThickProvision bool
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -424,11 +425,11 @@ func (ri *rbdImage) openIoctx() error {
|
|||||||
|
|
||||||
// getImageID queries rbd about the given image and stores its id, returns
|
// getImageID queries rbd about the given image and stores its id, returns
|
||||||
// ErrImageNotFound if provided image is not found.
|
// ErrImageNotFound if provided image is not found.
|
||||||
func (rv *rbdVolume) getImageID() error {
|
func (ri *rbdImage) getImageID() error {
|
||||||
if rv.ImageID != "" {
|
if ri.ImageID != "" {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
image, err := rv.open()
|
image, err := ri.open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -438,7 +439,7 @@ func (rv *rbdVolume) getImageID() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rv.ImageID = id
|
ri.ImageID = id
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -467,17 +468,17 @@ func (ri *rbdImage) open() (*librbd.Image, error) {
|
|||||||
|
|
||||||
// allocate uses the stripe-period of the image to fully allocate (thick
|
// allocate uses the stripe-period of the image to fully allocate (thick
|
||||||
// provision) the image.
|
// provision) the image.
|
||||||
func (rv *rbdVolume) allocate(offset uint64) error {
|
func (ri *rbdImage) allocate(offset uint64) error {
|
||||||
// We do not want to call discard, we really want to write zeros to get
|
// We do not want to call discard, we really want to write zeros to get
|
||||||
// the allocation. This sets the option for the re-used connection, and
|
// the allocation. This sets the option for the re-used connection, and
|
||||||
// all subsequent images that are opened. That is not a problem, as
|
// all subsequent images that are opened. That is not a problem, as
|
||||||
// this is the only place images get written.
|
// this is the only place images get written.
|
||||||
err := rv.conn.DisableDiscardOnZeroedWriteSame()
|
err := ri.conn.DisableDiscardOnZeroedWriteSame()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
image, err := rv.open()
|
image, err := ri.open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -543,8 +544,8 @@ func (rv *rbdVolume) allocate(offset uint64) error {
|
|||||||
|
|
||||||
// isInUse checks if there is a watcher on the image. It returns true if there
|
// isInUse checks if there is a watcher on the image. It returns true if there
|
||||||
// is a watcher on the image, otherwise returns false.
|
// is a watcher on the image, otherwise returns false.
|
||||||
func (rv *rbdVolume) isInUse() (bool, error) {
|
func (ri *rbdImage) isInUse() (bool, error) {
|
||||||
image, err := rv.open()
|
image, err := ri.open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if errors.Is(err, ErrImageNotFound) || errors.Is(err, util.ErrPoolNotFound) {
|
if errors.Is(err, ErrImageNotFound) || errors.Is(err, util.ErrPoolNotFound) {
|
||||||
return false, err
|
return false, err
|
||||||
@ -563,11 +564,11 @@ func (rv *rbdVolume) isInUse() (bool, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
rv.Primary = mirrorInfo.Primary
|
ri.Primary = mirrorInfo.Primary
|
||||||
|
|
||||||
// because we opened the image, there is at least one watcher
|
// because we opened the image, there is at least one watcher
|
||||||
defaultWatchers := 1
|
defaultWatchers := 1
|
||||||
if rv.Primary {
|
if ri.Primary {
|
||||||
// if rbd mirror daemon is running, a watcher will be added by the rbd
|
// if rbd mirror daemon is running, a watcher will be added by the rbd
|
||||||
// mirror daemon for mirrored images.
|
// mirror daemon for mirrored images.
|
||||||
defaultWatchers++
|
defaultWatchers++
|
||||||
@ -619,18 +620,18 @@ func isCephMgrSupported(ctx context.Context, clusterID string, err error) bool {
|
|||||||
|
|
||||||
// ensureImageCleanup finds image in trash and if found removes it
|
// ensureImageCleanup finds image in trash and if found removes it
|
||||||
// from trash.
|
// from trash.
|
||||||
func (rv *rbdVolume) ensureImageCleanup(ctx context.Context) error {
|
func (ri *rbdImage) ensureImageCleanup(ctx context.Context) error {
|
||||||
trashInfoList, err := librbd.GetTrashList(rv.ioctx)
|
trashInfoList, err := librbd.GetTrashList(ri.ioctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.ErrorLog(ctx, "failed to list images in trash: %v", err)
|
log.ErrorLog(ctx, "failed to list images in trash: %v", err)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, val := range trashInfoList {
|
for _, val := range trashInfoList {
|
||||||
if val.Name == rv.RbdImageName {
|
if val.Name == ri.RbdImageName {
|
||||||
rv.ImageID = val.Id
|
ri.ImageID = val.Id
|
||||||
|
|
||||||
return rv.trashRemoveImage(ctx)
|
return ri.trashRemoveImage(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -638,82 +639,82 @@ func (rv *rbdVolume) ensureImageCleanup(ctx context.Context) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// deleteImage deletes a ceph image with provision and volume options.
|
// deleteImage deletes a ceph image with provision and volume options.
|
||||||
func (rv *rbdVolume) deleteImage(ctx context.Context) error {
|
func (ri *rbdImage) deleteImage(ctx context.Context) error {
|
||||||
image := rv.RbdImageName
|
image := ri.RbdImageName
|
||||||
|
|
||||||
log.DebugLog(ctx, "rbd: delete %s using mon %s, pool %s", image, rv.Monitors, rv.Pool)
|
log.DebugLog(ctx, "rbd: delete %s using mon %s, pool %s", image, ri.Monitors, ri.Pool)
|
||||||
|
|
||||||
// Support deleting the older rbd images whose imageID is not stored in omap
|
// Support deleting the older rbd images whose imageID is not stored in omap
|
||||||
err := rv.getImageID()
|
err := ri.getImageID()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if rv.isEncrypted() {
|
if ri.isEncrypted() {
|
||||||
log.DebugLog(ctx, "rbd: going to remove DEK for %q", rv)
|
log.DebugLog(ctx, "rbd: going to remove DEK for %q", ri)
|
||||||
if err = rv.encryption.RemoveDEK(rv.VolID); err != nil {
|
if err = ri.encryption.RemoveDEK(ri.VolID); err != nil {
|
||||||
log.WarningLog(ctx, "failed to clean the passphrase for volume %s: %s", rv.VolID, err)
|
log.WarningLog(ctx, "failed to clean the passphrase for volume %s: %s", ri.VolID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
err = rv.openIoctx()
|
err = ri.openIoctx()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
rbdImage := librbd.GetImage(rv.ioctx, image)
|
rbdImage := librbd.GetImage(ri.ioctx, image)
|
||||||
err = rbdImage.Trash(0)
|
err = rbdImage.Trash(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.ErrorLog(ctx, "failed to delete rbd image: %s, error: %v", rv, err)
|
log.ErrorLog(ctx, "failed to delete rbd image: %s, error: %v", ri, err)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return rv.trashRemoveImage(ctx)
|
return ri.trashRemoveImage(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
// trashRemoveImage adds a task to trash remove an image using ceph manager if supported,
|
// trashRemoveImage adds a task to trash remove an image using ceph manager if supported,
|
||||||
// otherwise removes the image from trash.
|
// otherwise removes the image from trash.
|
||||||
func (rv *rbdVolume) trashRemoveImage(ctx context.Context) error {
|
func (ri *rbdImage) trashRemoveImage(ctx context.Context) error {
|
||||||
// attempt to use Ceph manager based deletion support if available
|
// attempt to use Ceph manager based deletion support if available
|
||||||
log.DebugLog(ctx, "rbd: adding task to remove image %q with id %q from trash", rv, rv.ImageID)
|
log.DebugLog(ctx, "rbd: adding task to remove image %q with id %q from trash", ri, ri.ImageID)
|
||||||
|
|
||||||
ta, err := rv.conn.GetTaskAdmin()
|
ta, err := ri.conn.GetTaskAdmin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = ta.AddTrashRemove(admin.NewImageSpec(rv.Pool, rv.RadosNamespace, rv.ImageID))
|
_, err = ta.AddTrashRemove(admin.NewImageSpec(ri.Pool, ri.RadosNamespace, ri.ImageID))
|
||||||
|
|
||||||
rbdCephMgrSupported := isCephMgrSupported(ctx, rv.ClusterID, err)
|
rbdCephMgrSupported := isCephMgrSupported(ctx, ri.ClusterID, err)
|
||||||
if rbdCephMgrSupported && err != nil {
|
if rbdCephMgrSupported && err != nil {
|
||||||
log.ErrorLog(ctx, "failed to add task to delete rbd image: %s, %v", rv, err)
|
log.ErrorLog(ctx, "failed to add task to delete rbd image: %s, %v", ri, err)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if !rbdCephMgrSupported {
|
if !rbdCephMgrSupported {
|
||||||
err = librbd.TrashRemove(rv.ioctx, rv.ImageID, true)
|
err = librbd.TrashRemove(ri.ioctx, ri.ImageID, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.ErrorLog(ctx, "failed to delete rbd image: %s, %v", rv, err)
|
log.ErrorLog(ctx, "failed to delete rbd image: %s, %v", ri, err)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
log.DebugLog(ctx, "rbd: successfully added task to move image %q with id %q to trash", rv, rv.ImageID)
|
log.DebugLog(ctx, "rbd: successfully added task to move image %q with id %q to trash", ri, ri.ImageID)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rv *rbdVolume) getCloneDepth(ctx context.Context) (uint, error) {
|
func (ri *rbdImage) getCloneDepth(ctx context.Context) (uint, error) {
|
||||||
var depth uint
|
var depth uint
|
||||||
vol := rbdVolume{}
|
vol := rbdVolume{}
|
||||||
|
|
||||||
vol.Pool = rv.Pool
|
vol.Pool = ri.Pool
|
||||||
vol.Monitors = rv.Monitors
|
vol.Monitors = ri.Monitors
|
||||||
vol.RbdImageName = rv.RbdImageName
|
vol.RbdImageName = ri.RbdImageName
|
||||||
vol.conn = rv.conn.Copy()
|
vol.conn = ri.conn.Copy()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if vol.RbdImageName == "" {
|
if vol.RbdImageName == "" {
|
||||||
@ -800,7 +801,7 @@ func flattenClonedRbdImages(
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rv *rbdVolume) flattenRbdImage(
|
func (ri *rbdImage) flattenRbdImage(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
forceFlatten bool,
|
forceFlatten bool,
|
||||||
hardlimit, softlimit uint) error {
|
hardlimit, softlimit uint) error {
|
||||||
@ -809,7 +810,7 @@ func (rv *rbdVolume) flattenRbdImage(
|
|||||||
|
|
||||||
// skip clone depth check if request is for force flatten
|
// skip clone depth check if request is for force flatten
|
||||||
if !forceFlatten {
|
if !forceFlatten {
|
||||||
depth, err = rv.getCloneDepth(ctx)
|
depth, err = ri.getCloneDepth(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -819,37 +820,37 @@ func (rv *rbdVolume) flattenRbdImage(
|
|||||||
depth,
|
depth,
|
||||||
softlimit,
|
softlimit,
|
||||||
hardlimit,
|
hardlimit,
|
||||||
rv)
|
ri)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !forceFlatten && (depth < hardlimit) && (depth < softlimit) {
|
if !forceFlatten && (depth < hardlimit) && (depth < softlimit) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log.DebugLog(ctx, "rbd: adding task to flatten image %q", rv)
|
log.DebugLog(ctx, "rbd: adding task to flatten image %q", ri)
|
||||||
|
|
||||||
ta, err := rv.conn.GetTaskAdmin()
|
ta, err := ri.conn.GetTaskAdmin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = ta.AddFlatten(admin.NewImageSpec(rv.Pool, rv.RadosNamespace, rv.RbdImageName))
|
_, err = ta.AddFlatten(admin.NewImageSpec(ri.Pool, ri.RadosNamespace, ri.RbdImageName))
|
||||||
rbdCephMgrSupported := isCephMgrSupported(ctx, rv.ClusterID, err)
|
rbdCephMgrSupported := isCephMgrSupported(ctx, ri.ClusterID, err)
|
||||||
if rbdCephMgrSupported {
|
if rbdCephMgrSupported {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// discard flattening error if the image does not have any parent
|
// discard flattening error if the image does not have any parent
|
||||||
rbdFlattenNoParent := fmt.Sprintf("Image %s/%s does not have a parent", rv.Pool, rv.RbdImageName)
|
rbdFlattenNoParent := fmt.Sprintf("Image %s/%s does not have a parent", ri.Pool, ri.RbdImageName)
|
||||||
if strings.Contains(err.Error(), rbdFlattenNoParent) {
|
if strings.Contains(err.Error(), rbdFlattenNoParent) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
log.ErrorLog(ctx, "failed to add task flatten for %s : %v", rv, err)
|
log.ErrorLog(ctx, "failed to add task flatten for %s : %v", ri, err)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if forceFlatten || depth >= hardlimit {
|
if forceFlatten || depth >= hardlimit {
|
||||||
return fmt.Errorf("%w: flatten is in progress for image %s", ErrFlattenInProgress, rv.RbdImageName)
|
return fmt.Errorf("%w: flatten is in progress for image %s", ErrFlattenInProgress, ri.RbdImageName)
|
||||||
}
|
}
|
||||||
log.DebugLog(ctx, "successfully added task to flatten image %q", rv)
|
log.DebugLog(ctx, "successfully added task to flatten image %q", ri)
|
||||||
}
|
}
|
||||||
if !rbdCephMgrSupported {
|
if !rbdCephMgrSupported {
|
||||||
log.ErrorLog(
|
log.ErrorLog(
|
||||||
@ -857,9 +858,9 @@ func (rv *rbdVolume) flattenRbdImage(
|
|||||||
"task manager does not support flatten,image will be flattened once hardlimit is reached: %v",
|
"task manager does not support flatten,image will be flattened once hardlimit is reached: %v",
|
||||||
err)
|
err)
|
||||||
if forceFlatten || depth >= hardlimit {
|
if forceFlatten || depth >= hardlimit {
|
||||||
err := rv.flatten()
|
err := ri.flatten()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.ErrorLog(ctx, "rbd failed to flatten image %s %s: %v", rv.Pool, rv.RbdImageName, err)
|
log.ErrorLog(ctx, "rbd failed to flatten image %s %s: %v", ri.Pool, ri.RbdImageName, err)
|
||||||
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -869,8 +870,8 @@ func (rv *rbdVolume) flattenRbdImage(
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rv *rbdVolume) getParentName() (string, error) {
|
func (ri *rbdImage) getParentName() (string, error) {
|
||||||
rbdImage, err := rv.open()
|
rbdImage, err := ri.open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -884,8 +885,8 @@ func (rv *rbdVolume) getParentName() (string, error) {
|
|||||||
return parentInfo.Image.ImageName, nil
|
return parentInfo.Image.ImageName, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rv *rbdVolume) flatten() error {
|
func (ri *rbdImage) flatten() error {
|
||||||
rbdImage, err := rv.open()
|
rbdImage, err := ri.open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -894,7 +895,7 @@ func (rv *rbdVolume) flatten() error {
|
|||||||
err = rbdImage.Flatten()
|
err = rbdImage.Flatten()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// rbd image flatten will fail if the rbd image does not have a parent
|
// rbd image flatten will fail if the rbd image does not have a parent
|
||||||
parent, pErr := rv.getParentName()
|
parent, pErr := ri.getParentName()
|
||||||
if pErr != nil {
|
if pErr != nil {
|
||||||
return util.JoinErrors(err, pErr)
|
return util.JoinErrors(err, pErr)
|
||||||
}
|
}
|
||||||
@ -906,33 +907,33 @@ func (rv *rbdVolume) flatten() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rv *rbdVolume) hasFeature(feature uint64) bool {
|
func (ri *rbdImage) hasFeature(feature uint64) bool {
|
||||||
return (uint64(rv.ImageFeatureSet) & feature) == feature
|
return (uint64(ri.ImageFeatureSet) & feature) == feature
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rv *rbdVolume) checkImageChainHasFeature(ctx context.Context, feature uint64) (bool, error) {
|
func (ri *rbdImage) checkImageChainHasFeature(ctx context.Context, feature uint64) (bool, error) {
|
||||||
vol := rbdVolume{}
|
rbdImg := rbdImage{}
|
||||||
|
|
||||||
vol.Pool = rv.Pool
|
rbdImg.Pool = ri.Pool
|
||||||
vol.RadosNamespace = rv.RadosNamespace
|
rbdImg.RadosNamespace = ri.RadosNamespace
|
||||||
vol.Monitors = rv.Monitors
|
rbdImg.Monitors = ri.Monitors
|
||||||
vol.RbdImageName = rv.RbdImageName
|
rbdImg.RbdImageName = ri.RbdImageName
|
||||||
vol.conn = rv.conn.Copy()
|
rbdImg.conn = ri.conn.Copy()
|
||||||
|
|
||||||
for {
|
for {
|
||||||
if vol.RbdImageName == "" {
|
if rbdImg.RbdImageName == "" {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
err := vol.openIoctx()
|
err := rbdImg.openIoctx()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = vol.getImageInfo()
|
err = rbdImg.getImageInfo()
|
||||||
// FIXME: create and destroy the vol inside the loop.
|
// FIXME: create and destroy the vol inside the loop.
|
||||||
// see https://github.com/ceph/ceph-csi/pull/1838#discussion_r598530807
|
// see https://github.com/ceph/ceph-csi/pull/1838#discussion_r598530807
|
||||||
vol.ioctx.Destroy()
|
rbdImg.ioctx.Destroy()
|
||||||
vol.ioctx = nil
|
rbdImg.ioctx = nil
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// call to getImageInfo returns the parent name even if the parent
|
// call to getImageInfo returns the parent name even if the parent
|
||||||
// is in the trash, when we try to open the parent image to get its
|
// is in the trash, when we try to open the parent image to get its
|
||||||
@ -941,15 +942,15 @@ func (rv *rbdVolume) checkImageChainHasFeature(ctx context.Context, feature uint
|
|||||||
if errors.Is(err, ErrImageNotFound) {
|
if errors.Is(err, ErrImageNotFound) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
log.ErrorLog(ctx, "failed to get image info for %s: %s", vol.String(), err)
|
log.ErrorLog(ctx, "failed to get image info for %s: %s", rbdImg.String(), err)
|
||||||
|
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
if f := vol.hasFeature(feature); f {
|
if f := rbdImg.hasFeature(feature); f {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
vol.RbdImageName = vol.ParentName
|
rbdImg.RbdImageName = rbdImg.ParentName
|
||||||
vol.Pool = vol.ParentPool
|
rbdImg.Pool = rbdImg.ParentPool
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1367,13 +1368,13 @@ func genSnapFromOptions(ctx context.Context, rbdVol *rbdVolume, snapOptions map[
|
|||||||
}
|
}
|
||||||
|
|
||||||
// hasSnapshotFeature checks if Layering is enabled for this image.
|
// hasSnapshotFeature checks if Layering is enabled for this image.
|
||||||
func (rv *rbdVolume) hasSnapshotFeature() bool {
|
func (ri *rbdImage) hasSnapshotFeature() bool {
|
||||||
return (uint64(rv.ImageFeatureSet) & librbd.FeatureLayering) == librbd.FeatureLayering
|
return (uint64(ri.ImageFeatureSet) & librbd.FeatureLayering) == librbd.FeatureLayering
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rv *rbdVolume) createSnapshot(ctx context.Context, pOpts *rbdSnapshot) error {
|
func (ri *rbdImage) createSnapshot(ctx context.Context, pOpts *rbdSnapshot) error {
|
||||||
log.DebugLog(ctx, "rbd: snap create %s using mon %s", pOpts, pOpts.Monitors)
|
log.DebugLog(ctx, "rbd: snap create %s using mon %s", pOpts, pOpts.Monitors)
|
||||||
image, err := rv.open()
|
image, err := ri.open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1384,9 +1385,9 @@ func (rv *rbdVolume) createSnapshot(ctx context.Context, pOpts *rbdSnapshot) err
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rv *rbdVolume) deleteSnapshot(ctx context.Context, pOpts *rbdSnapshot) error {
|
func (ri *rbdImage) deleteSnapshot(ctx context.Context, pOpts *rbdSnapshot) error {
|
||||||
log.DebugLog(ctx, "rbd: snap rm %s using mon %s", pOpts, pOpts.Monitors)
|
log.DebugLog(ctx, "rbd: snap rm %s using mon %s", pOpts, pOpts.Monitors)
|
||||||
image, err := rv.open()
|
image, err := ri.open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1497,8 +1498,8 @@ func (rv *rbdVolume) cloneRbdImageFromSnapshot(
|
|||||||
|
|
||||||
// getImageInfo queries rbd about the given image and returns its metadata, and returns
|
// getImageInfo queries rbd about the given image and returns its metadata, and returns
|
||||||
// ErrImageNotFound if provided image is not found.
|
// ErrImageNotFound if provided image is not found.
|
||||||
func (rv *rbdVolume) getImageInfo() error {
|
func (ri *rbdImage) getImageInfo() error {
|
||||||
image, err := rv.open()
|
image, err := ri.open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1509,13 +1510,13 @@ func (rv *rbdVolume) getImageInfo() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// TODO: can rv.VolSize not be a uint64? Or initialize it to -1?
|
// TODO: can rv.VolSize not be a uint64? Or initialize it to -1?
|
||||||
rv.VolSize = int64(imageInfo.Size)
|
ri.VolSize = int64(imageInfo.Size)
|
||||||
|
|
||||||
features, err := image.GetFeatures()
|
features, err := image.GetFeatures()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rv.ImageFeatureSet = librbd.FeatureSet(features)
|
ri.ImageFeatureSet = librbd.FeatureSet(features)
|
||||||
|
|
||||||
// Get parent information.
|
// Get parent information.
|
||||||
parentInfo, err := image.GetParent()
|
parentInfo, err := image.GetParent()
|
||||||
@ -1523,13 +1524,13 @@ func (rv *rbdVolume) getImageInfo() error {
|
|||||||
// Caller should decide whether not finding
|
// Caller should decide whether not finding
|
||||||
// the parent is an error or not.
|
// the parent is an error or not.
|
||||||
if errors.Is(err, librbd.ErrNotFound) {
|
if errors.Is(err, librbd.ErrNotFound) {
|
||||||
rv.ParentName = ""
|
ri.ParentName = ""
|
||||||
} else {
|
} else {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
rv.ParentName = parentInfo.Image.ImageName
|
ri.ParentName = parentInfo.Image.ImageName
|
||||||
rv.ParentPool = parentInfo.Image.PoolName
|
ri.ParentPool = parentInfo.Image.PoolName
|
||||||
}
|
}
|
||||||
// Get image creation time
|
// Get image creation time
|
||||||
tm, err := image.GetCreateTimestamp()
|
tm, err := image.GetCreateTimestamp()
|
||||||
@ -1541,7 +1542,7 @@ func (rv *rbdVolume) getImageInfo() error {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rv.CreatedAt = protoTime
|
ri.CreatedAt = protoTime
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1551,8 +1552,8 @@ checkSnapExists queries rbd about the snapshots of the given image and returns
|
|||||||
ErrImageNotFound if provided image is not found, and ErrSnapNotFound if
|
ErrImageNotFound if provided image is not found, and ErrSnapNotFound if
|
||||||
provided snap is not found in the images snapshot list.
|
provided snap is not found in the images snapshot list.
|
||||||
*/
|
*/
|
||||||
func (rv *rbdVolume) checkSnapExists(rbdSnap *rbdSnapshot) error {
|
func (ri *rbdImage) checkSnapExists(rbdSnap *rbdSnapshot) error {
|
||||||
image, err := rv.open()
|
image, err := ri.open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1703,14 +1704,14 @@ func (rv *rbdVolume) expand() error {
|
|||||||
|
|
||||||
// resize the given volume to new size.
|
// resize the given volume to new size.
|
||||||
// updates Volsize of rbdVolume object to newSize in case of success.
|
// updates Volsize of rbdVolume object to newSize in case of success.
|
||||||
func (rv *rbdVolume) resize(newSize int64) error {
|
func (ri *rbdImage) resize(newSize int64) error {
|
||||||
image, err := rv.open()
|
image, err := ri.open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer image.Close()
|
defer image.Close()
|
||||||
|
|
||||||
thick, err := rv.isThickProvisioned()
|
thick, err := ri.isThickProvisioned()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1733,7 +1734,7 @@ func (rv *rbdVolume) resize(newSize int64) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if thick {
|
if thick {
|
||||||
err = rv.allocate(offset)
|
err = ri.allocate(offset)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
resizeErr := image.Resize(offset)
|
resizeErr := image.Resize(offset)
|
||||||
if resizeErr != nil {
|
if resizeErr != nil {
|
||||||
@ -1745,7 +1746,7 @@ func (rv *rbdVolume) resize(newSize int64) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// update Volsize of rbdVolume object to newSize.
|
// update Volsize of rbdVolume object to newSize.
|
||||||
rv.VolSize = newSize
|
ri.VolSize = newSize
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1851,35 +1852,35 @@ func (ri *rbdImage) isThickProvisioned() (bool, error) {
|
|||||||
// RepairThickProvision writes zero bytes to the volume so that it will be
|
// RepairThickProvision writes zero bytes to the volume so that it will be
|
||||||
// completely allocated. In case the volume is already marked as
|
// completely allocated. In case the volume is already marked as
|
||||||
// thick-provisioned, nothing will be done.
|
// thick-provisioned, nothing will be done.
|
||||||
func (rv *rbdVolume) RepairThickProvision() error {
|
func (ri *rbdImage) RepairThickProvision() error {
|
||||||
// if the image has the thick-provisioned metadata, it has been fully
|
// if the image has the thick-provisioned metadata, it has been fully
|
||||||
// allocated
|
// allocated
|
||||||
done, err := rv.isThickProvisioned()
|
done, err := ri.isThickProvisioned()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to repair thick-provisioning of %q: %w", rv, err)
|
return fmt.Errorf("failed to repair thick-provisioning of %q: %w", ri, err)
|
||||||
} else if done {
|
} else if done {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// in case there are watchers, assume allocating is still happening in
|
// in case there are watchers, assume allocating is still happening in
|
||||||
// the background (by an other process?)
|
// the background (by an other process?)
|
||||||
background, err := rv.isInUse()
|
background, err := ri.isInUse()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to get users of %q: %w", rv, err)
|
return fmt.Errorf("failed to get users of %q: %w", ri, err)
|
||||||
} else if background {
|
} else if background {
|
||||||
return fmt.Errorf("not going to restart thick-provisioning of in-use image %q", rv)
|
return fmt.Errorf("not going to restart thick-provisioning of in-use image %q", ri)
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: can this be improved by starting at the offset where
|
// TODO: can this be improved by starting at the offset where
|
||||||
// allocating was aborted/restarted?
|
// allocating was aborted/restarted?
|
||||||
err = rv.allocate(0)
|
err = ri.allocate(0)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to continue thick-provisioning of %q: %w", rv, err)
|
return fmt.Errorf("failed to continue thick-provisioning of %q: %w", ri, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
err = rv.setThickProvisioned()
|
err = ri.setThickProvisioned()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to continue thick-provisioning of %q: %w", rv, err)
|
return fmt.Errorf("failed to continue thick-provisioning of %q: %w", ri, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
@ -1887,7 +1888,7 @@ func (rv *rbdVolume) RepairThickProvision() error {
|
|||||||
|
|
||||||
// DeepCopy creates an independent image (dest) from the source image. This
|
// DeepCopy creates an independent image (dest) from the source image. This
|
||||||
// process may take some time when the image is large.
|
// process may take some time when the image is large.
|
||||||
func (rv *rbdVolume) DeepCopy(dest *rbdVolume) error {
|
func (ri *rbdImage) DeepCopy(dest *rbdImage) error {
|
||||||
opts := librbd.NewRbdImageOptions()
|
opts := librbd.NewRbdImageOptions()
|
||||||
defer opts.Destroy()
|
defer opts.Destroy()
|
||||||
|
|
||||||
@ -1902,7 +1903,7 @@ func (rv *rbdVolume) DeepCopy(dest *rbdVolume) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
image, err := rv.open()
|
image, err := ri.open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1918,8 +1919,8 @@ func (rv *rbdVolume) DeepCopy(dest *rbdVolume) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// DisableDeepFlatten removed the deep-flatten feature from the image.
|
// DisableDeepFlatten removed the deep-flatten feature from the image.
|
||||||
func (rv *rbdVolume) DisableDeepFlatten() error {
|
func (ri *rbdImage) DisableDeepFlatten() error {
|
||||||
image, err := rv.open()
|
image, err := ri.open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
@ -1928,8 +1929,8 @@ func (rv *rbdVolume) DisableDeepFlatten() error {
|
|||||||
return image.UpdateFeatures(librbd.FeatureDeepFlatten, false)
|
return image.UpdateFeatures(librbd.FeatureDeepFlatten, false)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rv *rbdVolume) listSnapshots() ([]librbd.SnapInfo, error) {
|
func (ri *rbdImage) listSnapshots() ([]librbd.SnapInfo, error) {
|
||||||
image, err := rv.open()
|
image, err := ri.open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -1944,8 +1945,8 @@ func (rv *rbdVolume) listSnapshots() ([]librbd.SnapInfo, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// isTrashSnap returns true if the snapshot belongs to trash namespace.
|
// isTrashSnap returns true if the snapshot belongs to trash namespace.
|
||||||
func (rv *rbdVolume) isTrashSnap(snapID uint64) (bool, error) {
|
func (ri *rbdImage) isTrashSnap(snapID uint64) (bool, error) {
|
||||||
image, err := rv.open()
|
image, err := ri.open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
@ -1966,8 +1967,8 @@ func (rv *rbdVolume) isTrashSnap(snapID uint64) (bool, error) {
|
|||||||
|
|
||||||
// getOrigSnapName returns the original snap name for
|
// getOrigSnapName returns the original snap name for
|
||||||
// the snapshots in Trash Namespace.
|
// the snapshots in Trash Namespace.
|
||||||
func (rv *rbdVolume) getOrigSnapName(snapID uint64) (string, error) {
|
func (ri *rbdImage) getOrigSnapName(snapID uint64) (string, error) {
|
||||||
image, err := rv.open()
|
image, err := ri.open()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
@ -1993,6 +1994,17 @@ func (ri *rbdImage) isCompatibleEncryption(dst *rbdImage) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ri *rbdImage) isCompabitableClone(dst *rbdImage) error {
|
||||||
|
if dst.VolSize < ri.VolSize {
|
||||||
|
return fmt.Errorf(
|
||||||
|
"volume size %d is smaller than source volume size %d",
|
||||||
|
dst.VolSize,
|
||||||
|
ri.VolSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (ri *rbdImage) isCompatibleThickProvision(dst *rbdVolume) error {
|
func (ri *rbdImage) isCompatibleThickProvision(dst *rbdVolume) error {
|
||||||
thick, err := ri.isThickProvisioned()
|
thick, err := ri.isThickProvisioned()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
@ -1,8 +0,0 @@
|
|||||||
language: go
|
|
||||||
go:
|
|
||||||
- "1.x"
|
|
||||||
- master
|
|
||||||
env:
|
|
||||||
- TAGS=""
|
|
||||||
- TAGS="-tags purego"
|
|
||||||
script: go test $TAGS -v ./...
|
|
6
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
6
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@ -1,7 +1,7 @@
|
|||||||
# xxhash
|
# xxhash
|
||||||
|
|
||||||
[![GoDoc](https://godoc.org/github.com/cespare/xxhash?status.svg)](https://godoc.org/github.com/cespare/xxhash)
|
[![Go Reference](https://pkg.go.dev/badge/github.com/cespare/xxhash/v2.svg)](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||||
[![Build Status](https://travis-ci.org/cespare/xxhash.svg?branch=master)](https://travis-ci.org/cespare/xxhash)
|
[![Test](https://github.com/cespare/xxhash/actions/workflows/test.yml/badge.svg)](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||||
|
|
||||||
xxhash is a Go implementation of the 64-bit
|
xxhash is a Go implementation of the 64-bit
|
||||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||||
@ -64,4 +64,6 @@ $ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
|||||||
|
|
||||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||||
|
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||||
- [FreeCache](https://github.com/coocood/freecache)
|
- [FreeCache](https://github.com/coocood/freecache)
|
||||||
|
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||||
|
1
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
1
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@ -193,7 +193,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error {
|
|||||||
b, d.v4 = consumeUint64(b)
|
b, d.v4 = consumeUint64(b)
|
||||||
b, d.total = consumeUint64(b)
|
b, d.total = consumeUint64(b)
|
||||||
copy(d.mem[:], b)
|
copy(d.mem[:], b)
|
||||||
b = b[len(d.mem):]
|
|
||||||
d.n = int(d.total % uint64(len(d.mem)))
|
d.n = int(d.total % uint64(len(d.mem)))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
62
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
62
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
// Register allocation:
|
// Register allocation:
|
||||||
// AX h
|
// AX h
|
||||||
// CX pointer to advance through b
|
// SI pointer to advance through b
|
||||||
// DX n
|
// DX n
|
||||||
// BX loop end
|
// BX loop end
|
||||||
// R8 v1, k1
|
// R8 v1, k1
|
||||||
@ -16,39 +16,39 @@
|
|||||||
// R12 tmp
|
// R12 tmp
|
||||||
// R13 prime1v
|
// R13 prime1v
|
||||||
// R14 prime2v
|
// R14 prime2v
|
||||||
// R15 prime4v
|
// DI prime4v
|
||||||
|
|
||||||
// round reads from and advances the buffer pointer in CX.
|
// round reads from and advances the buffer pointer in SI.
|
||||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||||
#define round(r) \
|
#define round(r) \
|
||||||
MOVQ (CX), R12 \
|
MOVQ (SI), R12 \
|
||||||
ADDQ $8, CX \
|
ADDQ $8, SI \
|
||||||
IMULQ R14, R12 \
|
IMULQ R14, R12 \
|
||||||
ADDQ R12, r \
|
ADDQ R12, r \
|
||||||
ROLQ $31, r \
|
ROLQ $31, r \
|
||||||
IMULQ R13, r
|
IMULQ R13, r
|
||||||
|
|
||||||
// mergeRound applies a merge round on the two registers acc and val.
|
// mergeRound applies a merge round on the two registers acc and val.
|
||||||
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||||
#define mergeRound(acc, val) \
|
#define mergeRound(acc, val) \
|
||||||
IMULQ R14, val \
|
IMULQ R14, val \
|
||||||
ROLQ $31, val \
|
ROLQ $31, val \
|
||||||
IMULQ R13, val \
|
IMULQ R13, val \
|
||||||
XORQ val, acc \
|
XORQ val, acc \
|
||||||
IMULQ R13, acc \
|
IMULQ R13, acc \
|
||||||
ADDQ R15, acc
|
ADDQ DI, acc
|
||||||
|
|
||||||
// func Sum64(b []byte) uint64
|
// func Sum64(b []byte) uint64
|
||||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||||
// Load fixed primes.
|
// Load fixed primes.
|
||||||
MOVQ ·prime1v(SB), R13
|
MOVQ ·prime1v(SB), R13
|
||||||
MOVQ ·prime2v(SB), R14
|
MOVQ ·prime2v(SB), R14
|
||||||
MOVQ ·prime4v(SB), R15
|
MOVQ ·prime4v(SB), DI
|
||||||
|
|
||||||
// Load slice.
|
// Load slice.
|
||||||
MOVQ b_base+0(FP), CX
|
MOVQ b_base+0(FP), SI
|
||||||
MOVQ b_len+8(FP), DX
|
MOVQ b_len+8(FP), DX
|
||||||
LEAQ (CX)(DX*1), BX
|
LEAQ (SI)(DX*1), BX
|
||||||
|
|
||||||
// The first loop limit will be len(b)-32.
|
// The first loop limit will be len(b)-32.
|
||||||
SUBQ $32, BX
|
SUBQ $32, BX
|
||||||
@ -65,14 +65,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32
|
|||||||
XORQ R11, R11
|
XORQ R11, R11
|
||||||
SUBQ R13, R11
|
SUBQ R13, R11
|
||||||
|
|
||||||
// Loop until CX > BX.
|
// Loop until SI > BX.
|
||||||
blockLoop:
|
blockLoop:
|
||||||
round(R8)
|
round(R8)
|
||||||
round(R9)
|
round(R9)
|
||||||
round(R10)
|
round(R10)
|
||||||
round(R11)
|
round(R11)
|
||||||
|
|
||||||
CMPQ CX, BX
|
CMPQ SI, BX
|
||||||
JLE blockLoop
|
JLE blockLoop
|
||||||
|
|
||||||
MOVQ R8, AX
|
MOVQ R8, AX
|
||||||
@ -100,16 +100,16 @@ noBlocks:
|
|||||||
afterBlocks:
|
afterBlocks:
|
||||||
ADDQ DX, AX
|
ADDQ DX, AX
|
||||||
|
|
||||||
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||||
ADDQ $24, BX
|
ADDQ $24, BX
|
||||||
|
|
||||||
CMPQ CX, BX
|
CMPQ SI, BX
|
||||||
JG fourByte
|
JG fourByte
|
||||||
|
|
||||||
wordLoop:
|
wordLoop:
|
||||||
// Calculate k1.
|
// Calculate k1.
|
||||||
MOVQ (CX), R8
|
MOVQ (SI), R8
|
||||||
ADDQ $8, CX
|
ADDQ $8, SI
|
||||||
IMULQ R14, R8
|
IMULQ R14, R8
|
||||||
ROLQ $31, R8
|
ROLQ $31, R8
|
||||||
IMULQ R13, R8
|
IMULQ R13, R8
|
||||||
@ -117,18 +117,18 @@ wordLoop:
|
|||||||
XORQ R8, AX
|
XORQ R8, AX
|
||||||
ROLQ $27, AX
|
ROLQ $27, AX
|
||||||
IMULQ R13, AX
|
IMULQ R13, AX
|
||||||
ADDQ R15, AX
|
ADDQ DI, AX
|
||||||
|
|
||||||
CMPQ CX, BX
|
CMPQ SI, BX
|
||||||
JLE wordLoop
|
JLE wordLoop
|
||||||
|
|
||||||
fourByte:
|
fourByte:
|
||||||
ADDQ $4, BX
|
ADDQ $4, BX
|
||||||
CMPQ CX, BX
|
CMPQ SI, BX
|
||||||
JG singles
|
JG singles
|
||||||
|
|
||||||
MOVL (CX), R8
|
MOVL (SI), R8
|
||||||
ADDQ $4, CX
|
ADDQ $4, SI
|
||||||
IMULQ R13, R8
|
IMULQ R13, R8
|
||||||
XORQ R8, AX
|
XORQ R8, AX
|
||||||
|
|
||||||
@ -138,19 +138,19 @@ fourByte:
|
|||||||
|
|
||||||
singles:
|
singles:
|
||||||
ADDQ $4, BX
|
ADDQ $4, BX
|
||||||
CMPQ CX, BX
|
CMPQ SI, BX
|
||||||
JGE finalize
|
JGE finalize
|
||||||
|
|
||||||
singlesLoop:
|
singlesLoop:
|
||||||
MOVBQZX (CX), R12
|
MOVBQZX (SI), R12
|
||||||
ADDQ $1, CX
|
ADDQ $1, SI
|
||||||
IMULQ ·prime5v(SB), R12
|
IMULQ ·prime5v(SB), R12
|
||||||
XORQ R12, AX
|
XORQ R12, AX
|
||||||
|
|
||||||
ROLQ $11, AX
|
ROLQ $11, AX
|
||||||
IMULQ R13, AX
|
IMULQ R13, AX
|
||||||
|
|
||||||
CMPQ CX, BX
|
CMPQ SI, BX
|
||||||
JL singlesLoop
|
JL singlesLoop
|
||||||
|
|
||||||
finalize:
|
finalize:
|
||||||
@ -179,9 +179,9 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
|||||||
MOVQ ·prime2v(SB), R14
|
MOVQ ·prime2v(SB), R14
|
||||||
|
|
||||||
// Load slice.
|
// Load slice.
|
||||||
MOVQ b_base+8(FP), CX
|
MOVQ b_base+8(FP), SI
|
||||||
MOVQ b_len+16(FP), DX
|
MOVQ b_len+16(FP), DX
|
||||||
LEAQ (CX)(DX*1), BX
|
LEAQ (SI)(DX*1), BX
|
||||||
SUBQ $32, BX
|
SUBQ $32, BX
|
||||||
|
|
||||||
// Load vN from d.
|
// Load vN from d.
|
||||||
@ -199,7 +199,7 @@ blockLoop:
|
|||||||
round(R10)
|
round(R10)
|
||||||
round(R11)
|
round(R11)
|
||||||
|
|
||||||
CMPQ CX, BX
|
CMPQ SI, BX
|
||||||
JLE blockLoop
|
JLE blockLoop
|
||||||
|
|
||||||
// Copy vN back to d.
|
// Copy vN back to d.
|
||||||
@ -208,8 +208,8 @@ blockLoop:
|
|||||||
MOVQ R10, 16(AX)
|
MOVQ R10, 16(AX)
|
||||||
MOVQ R11, 24(AX)
|
MOVQ R11, 24(AX)
|
||||||
|
|
||||||
// The number of bytes written is CX minus the old base pointer.
|
// The number of bytes written is SI minus the old base pointer.
|
||||||
SUBQ b_base+8(FP), CX
|
SUBQ b_base+8(FP), SI
|
||||||
MOVQ CX, ret+32(FP)
|
MOVQ SI, ret+32(FP)
|
||||||
|
|
||||||
RET
|
RET
|
||||||
|
55
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
55
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@ -6,41 +6,52 @@
|
|||||||
package xxhash
|
package xxhash
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"reflect"
|
|
||||||
"unsafe"
|
"unsafe"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Notes:
|
|
||||||
//
|
|
||||||
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
|
|
||||||
// for some discussion about these unsafe conversions.
|
|
||||||
//
|
|
||||||
// In the future it's possible that compiler optimizations will make these
|
// In the future it's possible that compiler optimizations will make these
|
||||||
// unsafe operations unnecessary: https://golang.org/issue/2205.
|
// XxxString functions unnecessary by realizing that calls such as
|
||||||
|
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
||||||
|
// If that happens, even if we keep these functions they can be replaced with
|
||||||
|
// the trivial safe code.
|
||||||
|
|
||||||
|
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
||||||
//
|
//
|
||||||
// Both of these wrapper functions still incur function call overhead since they
|
// var b []byte
|
||||||
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
|
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||||
// for strings to squeeze out a bit more speed. Mid-stack inlining should
|
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||||
// eventually fix this.
|
// bh.Len = len(s)
|
||||||
|
// bh.Cap = len(s)
|
||||||
|
//
|
||||||
|
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
||||||
|
// weight to this sequence of expressions that any function that uses it will
|
||||||
|
// not be inlined. Instead, the functions below use a different unsafe
|
||||||
|
// conversion designed to minimize the inliner weight and allow both to be
|
||||||
|
// inlined. There is also a test (TestInlining) which verifies that these are
|
||||||
|
// inlined.
|
||||||
|
//
|
||||||
|
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||||
|
|
||||||
// Sum64String computes the 64-bit xxHash digest of s.
|
// Sum64String computes the 64-bit xxHash digest of s.
|
||||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||||
func Sum64String(s string) uint64 {
|
func Sum64String(s string) uint64 {
|
||||||
var b []byte
|
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
|
||||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
|
||||||
bh.Len = len(s)
|
|
||||||
bh.Cap = len(s)
|
|
||||||
return Sum64(b)
|
return Sum64(b)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WriteString adds more data to d. It always returns len(s), nil.
|
// WriteString adds more data to d. It always returns len(s), nil.
|
||||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||||
var b []byte
|
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
||||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
// d.Write always returns len(s), nil.
|
||||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
// Ignoring the return output and returning these fixed values buys a
|
||||||
bh.Len = len(s)
|
// savings of 6 in the inliner's cost model.
|
||||||
bh.Cap = len(s)
|
return len(s), nil
|
||||||
return d.Write(b)
|
}
|
||||||
|
|
||||||
|
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
||||||
|
// of the first two words is the same as the layout of a string.
|
||||||
|
type sliceHeader struct {
|
||||||
|
s string
|
||||||
|
cap int
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
2
vendor/github.com/prometheus/client_golang/prometheus/README.md
generated
vendored
@ -1 +1 @@
|
|||||||
See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus).
|
See [![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/client_golang/prometheus.svg)](https://pkg.go.dev/github.com/prometheus/client_golang/prometheus).
|
||||||
|
38
vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
generated
vendored
Normal file
38
vendor/github.com/prometheus/client_golang/prometheus/build_info_collector.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
|||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import "runtime/debug"
|
||||||
|
|
||||||
|
// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
|
||||||
|
// See there for documentation.
|
||||||
|
//
|
||||||
|
// Deprecated: Use collectors.NewBuildInfoCollector instead.
|
||||||
|
func NewBuildInfoCollector() Collector {
|
||||||
|
path, version, sum := "unknown", "unknown", "unknown"
|
||||||
|
if bi, ok := debug.ReadBuildInfo(); ok {
|
||||||
|
path = bi.Main.Path
|
||||||
|
version = bi.Main.Version
|
||||||
|
sum = bi.Main.Sum
|
||||||
|
}
|
||||||
|
c := &selfCollector{MustNewConstMetric(
|
||||||
|
NewDesc(
|
||||||
|
"go_build_info",
|
||||||
|
"Build information about the main Go module.",
|
||||||
|
nil, Labels{"path": path, "version": version, "checksum": sum},
|
||||||
|
),
|
||||||
|
GaugeValue, 1)}
|
||||||
|
c.init(c.self)
|
||||||
|
return c
|
||||||
|
}
|
@ -11,6 +11,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build go1.15
|
||||||
// +build go1.15
|
// +build go1.15
|
||||||
|
|
||||||
package collectors
|
package collectors
|
||||||
|
@ -11,6 +11,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !go1.15
|
||||||
// +build !go1.15
|
// +build !go1.15
|
||||||
|
|
||||||
package collectors
|
package collectors
|
||||||
|
8
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
8
vendor/github.com/prometheus/client_golang/prometheus/counter.go
generated
vendored
@ -133,10 +133,14 @@ func (c *counter) Inc() {
|
|||||||
atomic.AddUint64(&c.valInt, 1)
|
atomic.AddUint64(&c.valInt, 1)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *counter) Write(out *dto.Metric) error {
|
func (c *counter) get() float64 {
|
||||||
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
|
fval := math.Float64frombits(atomic.LoadUint64(&c.valBits))
|
||||||
ival := atomic.LoadUint64(&c.valInt)
|
ival := atomic.LoadUint64(&c.valInt)
|
||||||
val := fval + float64(ival)
|
return fval + float64(ival)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *counter) Write(out *dto.Metric) error {
|
||||||
|
val := c.get()
|
||||||
|
|
||||||
var exemplar *dto.Exemplar
|
var exemplar *dto.Exemplar
|
||||||
if e := c.exemplar.Load(); e != nil {
|
if e := c.exemplar.Load(); e != nil {
|
||||||
|
164
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
164
vendor/github.com/prometheus/client_golang/prometheus/go_collector.go
generated
vendored
@ -16,53 +16,11 @@ package prometheus
|
|||||||
import (
|
import (
|
||||||
"runtime"
|
"runtime"
|
||||||
"runtime/debug"
|
"runtime/debug"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type goCollector struct {
|
func goRuntimeMemStats() memStatsMetrics {
|
||||||
goroutinesDesc *Desc
|
return memStatsMetrics{
|
||||||
threadsDesc *Desc
|
|
||||||
gcDesc *Desc
|
|
||||||
goInfoDesc *Desc
|
|
||||||
|
|
||||||
// ms... are memstats related.
|
|
||||||
msLast *runtime.MemStats // Previously collected memstats.
|
|
||||||
msLastTimestamp time.Time
|
|
||||||
msMtx sync.Mutex // Protects msLast and msLastTimestamp.
|
|
||||||
msMetrics memStatsMetrics
|
|
||||||
msRead func(*runtime.MemStats) // For mocking in tests.
|
|
||||||
msMaxWait time.Duration // Wait time for fresh memstats.
|
|
||||||
msMaxAge time.Duration // Maximum allowed age of old memstats.
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
|
|
||||||
// See there for documentation.
|
|
||||||
//
|
|
||||||
// Deprecated: Use collectors.NewGoCollector instead.
|
|
||||||
func NewGoCollector() Collector {
|
|
||||||
return &goCollector{
|
|
||||||
goroutinesDesc: NewDesc(
|
|
||||||
"go_goroutines",
|
|
||||||
"Number of goroutines that currently exist.",
|
|
||||||
nil, nil),
|
|
||||||
threadsDesc: NewDesc(
|
|
||||||
"go_threads",
|
|
||||||
"Number of OS threads created.",
|
|
||||||
nil, nil),
|
|
||||||
gcDesc: NewDesc(
|
|
||||||
"go_gc_duration_seconds",
|
|
||||||
"A summary of the pause duration of garbage collection cycles.",
|
|
||||||
nil, nil),
|
|
||||||
goInfoDesc: NewDesc(
|
|
||||||
"go_info",
|
|
||||||
"Information about the Go environment.",
|
|
||||||
nil, Labels{"version": runtime.Version()}),
|
|
||||||
msLast: &runtime.MemStats{},
|
|
||||||
msRead: runtime.ReadMemStats,
|
|
||||||
msMaxWait: time.Second,
|
|
||||||
msMaxAge: 5 * time.Minute,
|
|
||||||
msMetrics: memStatsMetrics{
|
|
||||||
{
|
{
|
||||||
desc: NewDesc(
|
desc: NewDesc(
|
||||||
memstatNamespace("alloc_bytes"),
|
memstatNamespace("alloc_bytes"),
|
||||||
@ -239,14 +197,6 @@ func NewGoCollector() Collector {
|
|||||||
),
|
),
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
|
eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) },
|
||||||
valType: GaugeValue,
|
valType: GaugeValue,
|
||||||
}, {
|
|
||||||
desc: NewDesc(
|
|
||||||
memstatNamespace("last_gc_time_seconds"),
|
|
||||||
"Number of seconds since 1970 of last garbage collection.",
|
|
||||||
nil, nil,
|
|
||||||
),
|
|
||||||
eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 },
|
|
||||||
valType: GaugeValue,
|
|
||||||
}, {
|
}, {
|
||||||
desc: NewDesc(
|
desc: NewDesc(
|
||||||
memstatNamespace("gc_cpu_fraction"),
|
memstatNamespace("gc_cpu_fraction"),
|
||||||
@ -256,41 +206,53 @@ func NewGoCollector() Collector {
|
|||||||
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
|
eval: func(ms *runtime.MemStats) float64 { return ms.GCCPUFraction },
|
||||||
valType: GaugeValue,
|
valType: GaugeValue,
|
||||||
},
|
},
|
||||||
},
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func memstatNamespace(s string) string {
|
type baseGoCollector struct {
|
||||||
return "go_memstats_" + s
|
goroutinesDesc *Desc
|
||||||
|
threadsDesc *Desc
|
||||||
|
gcDesc *Desc
|
||||||
|
gcLastTimeDesc *Desc
|
||||||
|
goInfoDesc *Desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBaseGoCollector() baseGoCollector {
|
||||||
|
return baseGoCollector{
|
||||||
|
goroutinesDesc: NewDesc(
|
||||||
|
"go_goroutines",
|
||||||
|
"Number of goroutines that currently exist.",
|
||||||
|
nil, nil),
|
||||||
|
threadsDesc: NewDesc(
|
||||||
|
"go_threads",
|
||||||
|
"Number of OS threads created.",
|
||||||
|
nil, nil),
|
||||||
|
gcDesc: NewDesc(
|
||||||
|
"go_gc_duration_seconds",
|
||||||
|
"A summary of the pause duration of garbage collection cycles.",
|
||||||
|
nil, nil),
|
||||||
|
gcLastTimeDesc: NewDesc(
|
||||||
|
memstatNamespace("last_gc_time_seconds"),
|
||||||
|
"Number of seconds since 1970 of last garbage collection.",
|
||||||
|
nil, nil),
|
||||||
|
goInfoDesc: NewDesc(
|
||||||
|
"go_info",
|
||||||
|
"Information about the Go environment.",
|
||||||
|
nil, Labels{"version": runtime.Version()}),
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Describe returns all descriptions of the collector.
|
// Describe returns all descriptions of the collector.
|
||||||
func (c *goCollector) Describe(ch chan<- *Desc) {
|
func (c *baseGoCollector) Describe(ch chan<- *Desc) {
|
||||||
ch <- c.goroutinesDesc
|
ch <- c.goroutinesDesc
|
||||||
ch <- c.threadsDesc
|
ch <- c.threadsDesc
|
||||||
ch <- c.gcDesc
|
ch <- c.gcDesc
|
||||||
|
ch <- c.gcLastTimeDesc
|
||||||
ch <- c.goInfoDesc
|
ch <- c.goInfoDesc
|
||||||
for _, i := range c.msMetrics {
|
|
||||||
ch <- i.desc
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Collect returns the current state of all metrics of the collector.
|
// Collect returns the current state of all metrics of the collector.
|
||||||
func (c *goCollector) Collect(ch chan<- Metric) {
|
func (c *baseGoCollector) Collect(ch chan<- Metric) {
|
||||||
var (
|
|
||||||
ms = &runtime.MemStats{}
|
|
||||||
done = make(chan struct{})
|
|
||||||
)
|
|
||||||
// Start reading memstats first as it might take a while.
|
|
||||||
go func() {
|
|
||||||
c.msRead(ms)
|
|
||||||
c.msMtx.Lock()
|
|
||||||
c.msLast = ms
|
|
||||||
c.msLastTimestamp = time.Now()
|
|
||||||
c.msMtx.Unlock()
|
|
||||||
close(done)
|
|
||||||
}()
|
|
||||||
|
|
||||||
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
|
ch <- MustNewConstMetric(c.goroutinesDesc, GaugeValue, float64(runtime.NumGoroutine()))
|
||||||
n, _ := runtime.ThreadCreateProfile(nil)
|
n, _ := runtime.ThreadCreateProfile(nil)
|
||||||
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
|
ch <- MustNewConstMetric(c.threadsDesc, GaugeValue, float64(n))
|
||||||
@ -305,63 +267,19 @@ func (c *goCollector) Collect(ch chan<- Metric) {
|
|||||||
}
|
}
|
||||||
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
|
quantiles[0.0] = stats.PauseQuantiles[0].Seconds()
|
||||||
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
|
ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), stats.PauseTotal.Seconds(), quantiles)
|
||||||
|
ch <- MustNewConstMetric(c.gcLastTimeDesc, GaugeValue, float64(stats.LastGC.UnixNano())/1e9)
|
||||||
|
|
||||||
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
|
ch <- MustNewConstMetric(c.goInfoDesc, GaugeValue, 1)
|
||||||
|
|
||||||
timer := time.NewTimer(c.msMaxWait)
|
|
||||||
select {
|
|
||||||
case <-done: // Our own ReadMemStats succeeded in time. Use it.
|
|
||||||
timer.Stop() // Important for high collection frequencies to not pile up timers.
|
|
||||||
c.msCollect(ch, ms)
|
|
||||||
return
|
|
||||||
case <-timer.C: // Time out, use last memstats if possible. Continue below.
|
|
||||||
}
|
|
||||||
c.msMtx.Lock()
|
|
||||||
if time.Since(c.msLastTimestamp) < c.msMaxAge {
|
|
||||||
// Last memstats are recent enough. Collect from them under the lock.
|
|
||||||
c.msCollect(ch, c.msLast)
|
|
||||||
c.msMtx.Unlock()
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// If we are here, the last memstats are too old or don't exist. We have
|
|
||||||
// to wait until our own ReadMemStats finally completes. For that to
|
|
||||||
// happen, we have to release the lock.
|
|
||||||
c.msMtx.Unlock()
|
|
||||||
<-done
|
|
||||||
c.msCollect(ch, ms)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
|
func memstatNamespace(s string) string {
|
||||||
for _, i := range c.msMetrics {
|
return "go_memstats_" + s
|
||||||
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// memStatsMetrics provide description, value, and value type for memstat metrics.
|
// memStatsMetrics provide description, evaluator, runtime/metrics name, and
|
||||||
|
// value type for memstat metrics.
|
||||||
type memStatsMetrics []struct {
|
type memStatsMetrics []struct {
|
||||||
desc *Desc
|
desc *Desc
|
||||||
eval func(*runtime.MemStats) float64
|
eval func(*runtime.MemStats) float64
|
||||||
valType ValueType
|
valType ValueType
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewBuildInfoCollector is the obsolete version of collectors.NewBuildInfoCollector.
|
|
||||||
// See there for documentation.
|
|
||||||
//
|
|
||||||
// Deprecated: Use collectors.NewBuildInfoCollector instead.
|
|
||||||
func NewBuildInfoCollector() Collector {
|
|
||||||
path, version, sum := "unknown", "unknown", "unknown"
|
|
||||||
if bi, ok := debug.ReadBuildInfo(); ok {
|
|
||||||
path = bi.Main.Path
|
|
||||||
version = bi.Main.Version
|
|
||||||
sum = bi.Main.Sum
|
|
||||||
}
|
|
||||||
c := &selfCollector{MustNewConstMetric(
|
|
||||||
NewDesc(
|
|
||||||
"go_build_info",
|
|
||||||
"Build information about the main Go module.",
|
|
||||||
nil, Labels{"path": path, "version": version, "checksum": sum},
|
|
||||||
),
|
|
||||||
GaugeValue, 1)}
|
|
||||||
c.init(c.self)
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
107
vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
generated
vendored
Normal file
107
vendor/github.com/prometheus/client_golang/prometheus/go_collector_go116.go
generated
vendored
Normal file
@ -0,0 +1,107 @@
|
|||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !go1.17
|
||||||
|
// +build !go1.17
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"runtime"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
)
|
||||||
|
|
||||||
|
type goCollector struct {
|
||||||
|
base baseGoCollector
|
||||||
|
|
||||||
|
// ms... are memstats related.
|
||||||
|
msLast *runtime.MemStats // Previously collected memstats.
|
||||||
|
msLastTimestamp time.Time
|
||||||
|
msMtx sync.Mutex // Protects msLast and msLastTimestamp.
|
||||||
|
msMetrics memStatsMetrics
|
||||||
|
msRead func(*runtime.MemStats) // For mocking in tests.
|
||||||
|
msMaxWait time.Duration // Wait time for fresh memstats.
|
||||||
|
msMaxAge time.Duration // Maximum allowed age of old memstats.
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
|
||||||
|
// See there for documentation.
|
||||||
|
//
|
||||||
|
// Deprecated: Use collectors.NewGoCollector instead.
|
||||||
|
func NewGoCollector() Collector {
|
||||||
|
return &goCollector{
|
||||||
|
base: newBaseGoCollector(),
|
||||||
|
msLast: &runtime.MemStats{},
|
||||||
|
msRead: runtime.ReadMemStats,
|
||||||
|
msMaxWait: time.Second,
|
||||||
|
msMaxAge: 5 * time.Minute,
|
||||||
|
msMetrics: goRuntimeMemStats(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describe returns all descriptions of the collector.
|
||||||
|
func (c *goCollector) Describe(ch chan<- *Desc) {
|
||||||
|
c.base.Describe(ch)
|
||||||
|
for _, i := range c.msMetrics {
|
||||||
|
ch <- i.desc
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect returns the current state of all metrics of the collector.
|
||||||
|
func (c *goCollector) Collect(ch chan<- Metric) {
|
||||||
|
var (
|
||||||
|
ms = &runtime.MemStats{}
|
||||||
|
done = make(chan struct{})
|
||||||
|
)
|
||||||
|
// Start reading memstats first as it might take a while.
|
||||||
|
go func() {
|
||||||
|
c.msRead(ms)
|
||||||
|
c.msMtx.Lock()
|
||||||
|
c.msLast = ms
|
||||||
|
c.msLastTimestamp = time.Now()
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
close(done)
|
||||||
|
}()
|
||||||
|
|
||||||
|
// Collect base non-memory metrics.
|
||||||
|
c.base.Collect(ch)
|
||||||
|
|
||||||
|
timer := time.NewTimer(c.msMaxWait)
|
||||||
|
select {
|
||||||
|
case <-done: // Our own ReadMemStats succeeded in time. Use it.
|
||||||
|
timer.Stop() // Important for high collection frequencies to not pile up timers.
|
||||||
|
c.msCollect(ch, ms)
|
||||||
|
return
|
||||||
|
case <-timer.C: // Time out, use last memstats if possible. Continue below.
|
||||||
|
}
|
||||||
|
c.msMtx.Lock()
|
||||||
|
if time.Since(c.msLastTimestamp) < c.msMaxAge {
|
||||||
|
// Last memstats are recent enough. Collect from them under the lock.
|
||||||
|
c.msCollect(ch, c.msLast)
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// If we are here, the last memstats are too old or don't exist. We have
|
||||||
|
// to wait until our own ReadMemStats finally completes. For that to
|
||||||
|
// happen, we have to release the lock.
|
||||||
|
c.msMtx.Unlock()
|
||||||
|
<-done
|
||||||
|
c.msCollect(ch, ms)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *goCollector) msCollect(ch chan<- Metric, ms *runtime.MemStats) {
|
||||||
|
for _, i := range c.msMetrics {
|
||||||
|
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms))
|
||||||
|
}
|
||||||
|
}
|
364
vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go
generated
vendored
Normal file
364
vendor/github.com/prometheus/client_golang/prometheus/go_collector_go117.go
generated
vendored
Normal file
@ -0,0 +1,364 @@
|
|||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build go1.17
|
||||||
|
// +build go1.17
|
||||||
|
|
||||||
|
package prometheus
|
||||||
|
|
||||||
|
import (
|
||||||
|
"math"
|
||||||
|
"runtime"
|
||||||
|
"runtime/metrics"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||||
|
"github.com/golang/protobuf/proto"
|
||||||
|
"github.com/prometheus/client_golang/prometheus/internal"
|
||||||
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
)
|
||||||
|
|
||||||
|
type goCollector struct {
|
||||||
|
base baseGoCollector
|
||||||
|
|
||||||
|
// rm... fields all pertain to the runtime/metrics package.
|
||||||
|
rmSampleBuf []metrics.Sample
|
||||||
|
rmSampleMap map[string]*metrics.Sample
|
||||||
|
rmMetrics []Metric
|
||||||
|
|
||||||
|
// With Go 1.17, the runtime/metrics package was introduced.
|
||||||
|
// From that point on, metric names produced by the runtime/metrics
|
||||||
|
// package could be generated from runtime/metrics names. However,
|
||||||
|
// these differ from the old names for the same values.
|
||||||
|
//
|
||||||
|
// This field exist to export the same values under the old names
|
||||||
|
// as well.
|
||||||
|
msMetrics memStatsMetrics
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewGoCollector is the obsolete version of collectors.NewGoCollector.
|
||||||
|
// See there for documentation.
|
||||||
|
//
|
||||||
|
// Deprecated: Use collectors.NewGoCollector instead.
|
||||||
|
func NewGoCollector() Collector {
|
||||||
|
descriptions := metrics.All()
|
||||||
|
descMap := make(map[string]*metrics.Description)
|
||||||
|
for i := range descriptions {
|
||||||
|
descMap[descriptions[i].Name] = &descriptions[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate a Desc and ValueType for each runtime/metrics metric.
|
||||||
|
metricSet := make([]Metric, 0, len(descriptions))
|
||||||
|
sampleBuf := make([]metrics.Sample, 0, len(descriptions))
|
||||||
|
sampleMap := make(map[string]*metrics.Sample, len(descriptions))
|
||||||
|
for i := range descriptions {
|
||||||
|
d := &descriptions[i]
|
||||||
|
namespace, subsystem, name, ok := internal.RuntimeMetricsToProm(d)
|
||||||
|
if !ok {
|
||||||
|
// Just ignore this metric; we can't do anything with it here.
|
||||||
|
// If a user decides to use the latest version of Go, we don't want
|
||||||
|
// to fail here. This condition is tested elsewhere.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set up sample buffer for reading, and a map
|
||||||
|
// for quick lookup of sample values.
|
||||||
|
sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
|
||||||
|
sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
|
||||||
|
|
||||||
|
var m Metric
|
||||||
|
if d.Kind == metrics.KindFloat64Histogram {
|
||||||
|
_, hasSum := rmExactSumMap[d.Name]
|
||||||
|
m = newBatchHistogram(
|
||||||
|
NewDesc(
|
||||||
|
BuildFQName(namespace, subsystem, name),
|
||||||
|
d.Description,
|
||||||
|
nil,
|
||||||
|
nil,
|
||||||
|
),
|
||||||
|
hasSum,
|
||||||
|
)
|
||||||
|
} else if d.Cumulative {
|
||||||
|
m = NewCounter(CounterOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: subsystem,
|
||||||
|
Name: name,
|
||||||
|
Help: d.Description,
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
m = NewGauge(GaugeOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: subsystem,
|
||||||
|
Name: name,
|
||||||
|
Help: d.Description,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
metricSet = append(metricSet, m)
|
||||||
|
}
|
||||||
|
return &goCollector{
|
||||||
|
base: newBaseGoCollector(),
|
||||||
|
rmSampleBuf: sampleBuf,
|
||||||
|
rmSampleMap: sampleMap,
|
||||||
|
rmMetrics: metricSet,
|
||||||
|
msMetrics: goRuntimeMemStats(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Describe returns all descriptions of the collector.
|
||||||
|
func (c *goCollector) Describe(ch chan<- *Desc) {
|
||||||
|
c.base.Describe(ch)
|
||||||
|
for _, i := range c.msMetrics {
|
||||||
|
ch <- i.desc
|
||||||
|
}
|
||||||
|
for _, m := range c.rmMetrics {
|
||||||
|
ch <- m.Desc()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Collect returns the current state of all metrics of the collector.
|
||||||
|
func (c *goCollector) Collect(ch chan<- Metric) {
|
||||||
|
// Collect base non-memory metrics.
|
||||||
|
c.base.Collect(ch)
|
||||||
|
|
||||||
|
// Populate runtime/metrics sample buffer.
|
||||||
|
metrics.Read(c.rmSampleBuf)
|
||||||
|
|
||||||
|
for i, sample := range c.rmSampleBuf {
|
||||||
|
// N.B. switch on concrete type because it's significantly more efficient
|
||||||
|
// than checking for the Counter and Gauge interface implementations. In
|
||||||
|
// this case, we control all the types here.
|
||||||
|
switch m := c.rmMetrics[i].(type) {
|
||||||
|
case *counter:
|
||||||
|
// Guard against decreases. This should never happen, but a failure
|
||||||
|
// to do so will result in a panic, which is a harsh consequence for
|
||||||
|
// a metrics collection bug.
|
||||||
|
v0, v1 := m.get(), unwrapScalarRMValue(sample.Value)
|
||||||
|
if v1 > v0 {
|
||||||
|
m.Add(unwrapScalarRMValue(sample.Value) - m.get())
|
||||||
|
}
|
||||||
|
m.Collect(ch)
|
||||||
|
case *gauge:
|
||||||
|
m.Set(unwrapScalarRMValue(sample.Value))
|
||||||
|
m.Collect(ch)
|
||||||
|
case *batchHistogram:
|
||||||
|
m.update(sample.Value.Float64Histogram(), c.exactSumFor(sample.Name))
|
||||||
|
m.Collect(ch)
|
||||||
|
default:
|
||||||
|
panic("unexpected metric type")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ms is a dummy MemStats that we populate ourselves so that we can
|
||||||
|
// populate the old metrics from it.
|
||||||
|
var ms runtime.MemStats
|
||||||
|
memStatsFromRM(&ms, c.rmSampleMap)
|
||||||
|
for _, i := range c.msMetrics {
|
||||||
|
ch <- MustNewConstMetric(i.desc, i.valType, i.eval(&ms))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unwrapScalarRMValue unwraps a runtime/metrics value that is assumed
|
||||||
|
// to be scalar and returns the equivalent float64 value. Panics if the
|
||||||
|
// value is not scalar.
|
||||||
|
func unwrapScalarRMValue(v metrics.Value) float64 {
|
||||||
|
switch v.Kind() {
|
||||||
|
case metrics.KindUint64:
|
||||||
|
return float64(v.Uint64())
|
||||||
|
case metrics.KindFloat64:
|
||||||
|
return v.Float64()
|
||||||
|
case metrics.KindBad:
|
||||||
|
// Unsupported metric.
|
||||||
|
//
|
||||||
|
// This should never happen because we always populate our metric
|
||||||
|
// set from the runtime/metrics package.
|
||||||
|
panic("unexpected unsupported metric")
|
||||||
|
default:
|
||||||
|
// Unsupported metric kind.
|
||||||
|
//
|
||||||
|
// This should never happen because we check for this during initialization
|
||||||
|
// and flag and filter metrics whose kinds we don't understand.
|
||||||
|
panic("unexpected unsupported metric kind")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var rmExactSumMap = map[string]string{
|
||||||
|
"/gc/heap/allocs-by-size:bytes": "/gc/heap/allocs:bytes",
|
||||||
|
"/gc/heap/frees-by-size:bytes": "/gc/heap/frees:bytes",
|
||||||
|
}
|
||||||
|
|
||||||
|
// exactSumFor takes a runtime/metrics metric name (that is assumed to
|
||||||
|
// be of kind KindFloat64Histogram) and returns its exact sum and whether
|
||||||
|
// its exact sum exists.
|
||||||
|
//
|
||||||
|
// The runtime/metrics API for histograms doesn't currently expose exact
|
||||||
|
// sums, but some of the other metrics are in fact exact sums of histograms.
|
||||||
|
func (c *goCollector) exactSumFor(rmName string) float64 {
|
||||||
|
sumName, ok := rmExactSumMap[rmName]
|
||||||
|
if !ok {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
s, ok := c.rmSampleMap[sumName]
|
||||||
|
if !ok {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return unwrapScalarRMValue(s.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func memStatsFromRM(ms *runtime.MemStats, rm map[string]*metrics.Sample) {
|
||||||
|
lookupOrZero := func(name string) uint64 {
|
||||||
|
if s, ok := rm[name]; ok {
|
||||||
|
return s.Value.Uint64()
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// Currently, MemStats adds tiny alloc count to both Mallocs AND Frees.
|
||||||
|
// The reason for this is because MemStats couldn't be extended at the time
|
||||||
|
// but there was a desire to have Mallocs at least be a little more representative,
|
||||||
|
// while having Mallocs - Frees still represent a live object count.
|
||||||
|
// Unfortunately, MemStats doesn't actually export a large allocation count,
|
||||||
|
// so it's impossible to pull this number out directly.
|
||||||
|
tinyAllocs := lookupOrZero("/gc/heap/tiny/allocs:objects")
|
||||||
|
ms.Mallocs = lookupOrZero("/gc/heap/allocs:objects") + tinyAllocs
|
||||||
|
ms.Frees = lookupOrZero("/gc/heap/frees:objects") + tinyAllocs
|
||||||
|
|
||||||
|
ms.TotalAlloc = lookupOrZero("/gc/heap/allocs:bytes")
|
||||||
|
ms.Sys = lookupOrZero("/memory/classes/total:bytes")
|
||||||
|
ms.Lookups = 0 // Already always zero.
|
||||||
|
ms.HeapAlloc = lookupOrZero("/memory/classes/heap/objects:bytes")
|
||||||
|
ms.Alloc = ms.HeapAlloc
|
||||||
|
ms.HeapInuse = ms.HeapAlloc + lookupOrZero("/memory/classes/heap/unused:bytes")
|
||||||
|
ms.HeapReleased = lookupOrZero("/memory/classes/heap/released:bytes")
|
||||||
|
ms.HeapIdle = ms.HeapReleased + lookupOrZero("/memory/classes/heap/free:bytes")
|
||||||
|
ms.HeapSys = ms.HeapInuse + ms.HeapIdle
|
||||||
|
ms.HeapObjects = lookupOrZero("/gc/heap/objects:objects")
|
||||||
|
ms.StackInuse = lookupOrZero("/memory/classes/heap/stacks:bytes")
|
||||||
|
ms.StackSys = ms.StackInuse + lookupOrZero("/memory/classes/os-stacks:bytes")
|
||||||
|
ms.MSpanInuse = lookupOrZero("/memory/classes/metadata/mspan/inuse:bytes")
|
||||||
|
ms.MSpanSys = ms.MSpanInuse + lookupOrZero("/memory/classes/metadata/mspan/free:bytes")
|
||||||
|
ms.MCacheInuse = lookupOrZero("/memory/classes/metadata/mcache/inuse:bytes")
|
||||||
|
ms.MCacheSys = ms.MCacheInuse + lookupOrZero("/memory/classes/metadata/mcache/free:bytes")
|
||||||
|
ms.BuckHashSys = lookupOrZero("/memory/classes/profiling/buckets:bytes")
|
||||||
|
ms.GCSys = lookupOrZero("/memory/classes/metadata/other:bytes")
|
||||||
|
ms.OtherSys = lookupOrZero("/memory/classes/other:bytes")
|
||||||
|
ms.NextGC = lookupOrZero("/gc/heap/goal:bytes")
|
||||||
|
|
||||||
|
// N.B. LastGC is omitted because runtime.GCStats already has this.
|
||||||
|
// See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
|
||||||
|
// for more details.
|
||||||
|
ms.LastGC = 0
|
||||||
|
|
||||||
|
// N.B. GCCPUFraction is intentionally omitted. This metric is not useful,
|
||||||
|
// and often misleading due to the fact that it's an average over the lifetime
|
||||||
|
// of the process.
|
||||||
|
// See https://github.com/prometheus/client_golang/issues/842#issuecomment-861812034
|
||||||
|
// for more details.
|
||||||
|
ms.GCCPUFraction = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// batchHistogram is a mutable histogram that is updated
|
||||||
|
// in batches.
|
||||||
|
type batchHistogram struct {
|
||||||
|
selfCollector
|
||||||
|
|
||||||
|
// Static fields updated only once.
|
||||||
|
desc *Desc
|
||||||
|
hasSum bool
|
||||||
|
|
||||||
|
// Because this histogram operates in batches, it just uses a
|
||||||
|
// single mutex for everything. updates are always serialized
|
||||||
|
// but Write calls may operate concurrently with updates.
|
||||||
|
// Contention between these two sources should be rare.
|
||||||
|
mu sync.Mutex
|
||||||
|
buckets []float64 // Inclusive lower bounds.
|
||||||
|
counts []uint64
|
||||||
|
sum float64 // Used if hasSum is true.
|
||||||
|
}
|
||||||
|
|
||||||
|
func newBatchHistogram(desc *Desc, hasSum bool) *batchHistogram {
|
||||||
|
h := &batchHistogram{desc: desc, hasSum: hasSum}
|
||||||
|
h.init(h)
|
||||||
|
return h
|
||||||
|
}
|
||||||
|
|
||||||
|
// update updates the batchHistogram from a runtime/metrics histogram.
|
||||||
|
//
|
||||||
|
// sum must be provided if the batchHistogram was created to have an exact sum.
|
||||||
|
func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
|
||||||
|
counts, buckets := his.Counts, his.Buckets
|
||||||
|
// Skip a -Inf bucket altogether. It's not clear how to represent that.
|
||||||
|
if math.IsInf(buckets[0], -1) {
|
||||||
|
buckets = buckets[1:]
|
||||||
|
counts = counts[1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
h.mu.Lock()
|
||||||
|
defer h.mu.Unlock()
|
||||||
|
|
||||||
|
// Check if we're initialized.
|
||||||
|
if h.buckets == nil {
|
||||||
|
// Make copies of counts and buckets. It's really important
|
||||||
|
// that we don't retain his.Counts or his.Buckets anywhere since
|
||||||
|
// it's going to get reused.
|
||||||
|
h.buckets = make([]float64, len(buckets))
|
||||||
|
copy(h.buckets, buckets)
|
||||||
|
|
||||||
|
h.counts = make([]uint64, len(counts))
|
||||||
|
}
|
||||||
|
copy(h.counts, counts)
|
||||||
|
if h.hasSum {
|
||||||
|
h.sum = sum
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *batchHistogram) Desc() *Desc {
|
||||||
|
return h.desc
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *batchHistogram) Write(out *dto.Metric) error {
|
||||||
|
h.mu.Lock()
|
||||||
|
defer h.mu.Unlock()
|
||||||
|
|
||||||
|
sum := float64(0)
|
||||||
|
if h.hasSum {
|
||||||
|
sum = h.sum
|
||||||
|
}
|
||||||
|
dtoBuckets := make([]*dto.Bucket, 0, len(h.counts))
|
||||||
|
totalCount := uint64(0)
|
||||||
|
for i, count := range h.counts {
|
||||||
|
totalCount += count
|
||||||
|
if !h.hasSum {
|
||||||
|
// N.B. This computed sum is an underestimate.
|
||||||
|
sum += h.buckets[i] * float64(count)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip the +Inf bucket, but only for the bucket list.
|
||||||
|
// It must still count for sum and totalCount.
|
||||||
|
if math.IsInf(h.buckets[i+1], 1) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
// Float64Histogram's upper bound is exclusive, so make it inclusive
|
||||||
|
// by obtaining the next float64 value down, in order.
|
||||||
|
upperBound := math.Nextafter(h.buckets[i+1], h.buckets[i])
|
||||||
|
dtoBuckets = append(dtoBuckets, &dto.Bucket{
|
||||||
|
CumulativeCount: proto.Uint64(totalCount),
|
||||||
|
UpperBound: proto.Float64(upperBound),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
out.Histogram = &dto.Histogram{
|
||||||
|
Bucket: dtoBuckets,
|
||||||
|
SampleCount: proto.Uint64(totalCount),
|
||||||
|
SampleSum: proto.Float64(sum),
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
28
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
28
vendor/github.com/prometheus/client_golang/prometheus/histogram.go
generated
vendored
@ -116,6 +116,34 @@ func ExponentialBuckets(start, factor float64, count int) []float64 {
|
|||||||
return buckets
|
return buckets
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ExponentialBucketsRange creates 'count' buckets, where the lowest bucket is
|
||||||
|
// 'min' and the highest bucket is 'max'. The final +Inf bucket is not counted
|
||||||
|
// and not included in the returned slice. The returned slice is meant to be
|
||||||
|
// used for the Buckets field of HistogramOpts.
|
||||||
|
//
|
||||||
|
// The function panics if 'count' is 0 or negative, if 'min' is 0 or negative.
|
||||||
|
func ExponentialBucketsRange(min, max float64, count int) []float64 {
|
||||||
|
if count < 1 {
|
||||||
|
panic("ExponentialBucketsRange count needs a positive count")
|
||||||
|
}
|
||||||
|
if min <= 0 {
|
||||||
|
panic("ExponentialBucketsRange min needs to be greater than 0")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Formula for exponential buckets.
|
||||||
|
// max = min*growthFactor^(bucketCount-1)
|
||||||
|
|
||||||
|
// We know max/min and highest bucket. Solve for growthFactor.
|
||||||
|
growthFactor := math.Pow(max/min, 1.0/float64(count-1))
|
||||||
|
|
||||||
|
// Now that we know growthFactor, solve for each bucket.
|
||||||
|
buckets := make([]float64, count)
|
||||||
|
for i := 1; i <= count; i++ {
|
||||||
|
buckets[i-1] = min * math.Pow(growthFactor, float64(i-1))
|
||||||
|
}
|
||||||
|
return buckets
|
||||||
|
}
|
||||||
|
|
||||||
// HistogramOpts bundles the options for creating a Histogram metric. It is
|
// HistogramOpts bundles the options for creating a Histogram metric. It is
|
||||||
// mandatory to set Name to a non-empty string. All other fields are optional
|
// mandatory to set Name to a non-empty string. All other fields are optional
|
||||||
// and can safely be left at their zero value, although it is strongly
|
// and can safely be left at their zero value, although it is strongly
|
||||||
|
77
vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
generated
vendored
Normal file
77
vendor/github.com/prometheus/client_golang/prometheus/internal/go_runtime_metrics.go
generated
vendored
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build go1.17
|
||||||
|
// +build go1.17
|
||||||
|
|
||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"path"
|
||||||
|
"runtime/metrics"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/common/model"
|
||||||
|
)
|
||||||
|
|
||||||
|
// RuntimeMetricsToProm produces a Prometheus metric name from a runtime/metrics
|
||||||
|
// metric description and validates whether the metric is suitable for integration
|
||||||
|
// with Prometheus.
|
||||||
|
//
|
||||||
|
// Returns false if a name could not be produced, or if Prometheus does not understand
|
||||||
|
// the runtime/metrics Kind.
|
||||||
|
//
|
||||||
|
// Note that the main reason a name couldn't be produced is if the runtime/metrics
|
||||||
|
// package exports a name with characters outside the valid Prometheus metric name
|
||||||
|
// character set. This is theoretically possible, but should never happen in practice.
|
||||||
|
// Still, don't rely on it.
|
||||||
|
func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool) {
|
||||||
|
namespace := "go"
|
||||||
|
|
||||||
|
comp := strings.SplitN(d.Name, ":", 2)
|
||||||
|
key := comp[0]
|
||||||
|
unit := comp[1]
|
||||||
|
|
||||||
|
// The last path element in the key is the name,
|
||||||
|
// the rest is the subsystem.
|
||||||
|
subsystem := path.Dir(key[1:] /* remove leading / */)
|
||||||
|
name := path.Base(key)
|
||||||
|
|
||||||
|
// subsystem is translated by replacing all / and - with _.
|
||||||
|
subsystem = strings.ReplaceAll(subsystem, "/", "_")
|
||||||
|
subsystem = strings.ReplaceAll(subsystem, "-", "_")
|
||||||
|
|
||||||
|
// unit is translated assuming that the unit contains no
|
||||||
|
// non-ASCII characters.
|
||||||
|
unit = strings.ReplaceAll(unit, "-", "_")
|
||||||
|
unit = strings.ReplaceAll(unit, "*", "_")
|
||||||
|
unit = strings.ReplaceAll(unit, "/", "_per_")
|
||||||
|
|
||||||
|
// name has - replaced with _ and is concatenated with the unit and
|
||||||
|
// other data.
|
||||||
|
name = strings.ReplaceAll(name, "-", "_")
|
||||||
|
name = name + "_" + unit
|
||||||
|
if d.Cumulative {
|
||||||
|
name = name + "_total"
|
||||||
|
}
|
||||||
|
|
||||||
|
valid := model.IsValidMetricName(model.LabelValue(namespace + "_" + subsystem + "_" + name))
|
||||||
|
switch d.Kind {
|
||||||
|
case metrics.KindUint64:
|
||||||
|
case metrics.KindFloat64:
|
||||||
|
case metrics.KindFloat64Histogram:
|
||||||
|
default:
|
||||||
|
valid = false
|
||||||
|
}
|
||||||
|
return namespace, subsystem, name, valid
|
||||||
|
}
|
1
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
1
vendor/github.com/prometheus/client_golang/prometheus/process_collector_other.go
generated
vendored
@ -11,6 +11,7 @@
|
|||||||
// See the License for the specific language governing permissions and
|
// See the License for the specific language governing permissions and
|
||||||
// limitations under the License.
|
// limitations under the License.
|
||||||
|
|
||||||
|
//go:build !windows
|
||||||
// +build !windows
|
// +build !windows
|
||||||
|
|
||||||
package prometheus
|
package prometheus
|
||||||
|
28
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
28
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_client.go
generated
vendored
@ -49,7 +49,10 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp
|
|||||||
// http.RoundTripper to observe the request result with the provided CounterVec.
|
// http.RoundTripper to observe the request result with the provided CounterVec.
|
||||||
// The CounterVec must have zero, one, or two non-const non-curried labels. For
|
// The CounterVec must have zero, one, or two non-const non-curried labels. For
|
||||||
// those, the only allowed label names are "code" and "method". The function
|
// those, the only allowed label names are "code" and "method". The function
|
||||||
// panics otherwise. Partitioning of the CounterVec happens by HTTP status code
|
// panics otherwise. For the "method" label a predefined default label value set
|
||||||
|
// is used to filter given values. Values besides predefined values will count
|
||||||
|
// as `unknown` method.`WithExtraMethods` can be used to add more
|
||||||
|
// methods to the set. Partitioning of the CounterVec happens by HTTP status code
|
||||||
// and/or HTTP method if the respective instance label names are present in the
|
// and/or HTTP method if the respective instance label names are present in the
|
||||||
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
|
// CounterVec. For unpartitioned counting, use a CounterVec with zero labels.
|
||||||
//
|
//
|
||||||
@ -57,13 +60,18 @@ func InstrumentRoundTripperInFlight(gauge prometheus.Gauge, next http.RoundTripp
|
|||||||
// is not incremented.
|
// is not incremented.
|
||||||
//
|
//
|
||||||
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
// See the example for ExampleInstrumentRoundTripperDuration for example usage.
|
||||||
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper) RoundTripperFunc {
|
func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
|
||||||
|
rtOpts := &option{}
|
||||||
|
for _, o := range opts {
|
||||||
|
o(rtOpts)
|
||||||
|
}
|
||||||
|
|
||||||
code, method := checkLabels(counter)
|
code, method := checkLabels(counter)
|
||||||
|
|
||||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
||||||
resp, err := next.RoundTrip(r)
|
resp, err := next.RoundTrip(r)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
counter.With(labels(code, method, r.Method, resp.StatusCode)).Inc()
|
counter.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Inc()
|
||||||
}
|
}
|
||||||
return resp, err
|
return resp, err
|
||||||
})
|
})
|
||||||
@ -73,7 +81,10 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
|
|||||||
// http.RoundTripper to observe the request duration with the provided
|
// http.RoundTripper to observe the request duration with the provided
|
||||||
// ObserverVec. The ObserverVec must have zero, one, or two non-const
|
// ObserverVec. The ObserverVec must have zero, one, or two non-const
|
||||||
// non-curried labels. For those, the only allowed label names are "code" and
|
// non-curried labels. For those, the only allowed label names are "code" and
|
||||||
// "method". The function panics otherwise. The Observe method of the Observer
|
// "method". The function panics otherwise. For the "method" label a predefined
|
||||||
|
// default label value set is used to filter given values. Values besides
|
||||||
|
// predefined values will count as `unknown` method. `WithExtraMethods`
|
||||||
|
// can be used to add more methods to the set. The Observe method of the Observer
|
||||||
// in the ObserverVec is called with the request duration in
|
// in the ObserverVec is called with the request duration in
|
||||||
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
|
// seconds. Partitioning happens by HTTP status code and/or HTTP method if the
|
||||||
// respective instance label names are present in the ObserverVec. For
|
// respective instance label names are present in the ObserverVec. For
|
||||||
@ -85,14 +96,19 @@ func InstrumentRoundTripperCounter(counter *prometheus.CounterVec, next http.Rou
|
|||||||
//
|
//
|
||||||
// Note that this method is only guaranteed to never observe negative durations
|
// Note that this method is only guaranteed to never observe negative durations
|
||||||
// if used with Go1.9+.
|
// if used with Go1.9+.
|
||||||
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper) RoundTripperFunc {
|
func InstrumentRoundTripperDuration(obs prometheus.ObserverVec, next http.RoundTripper, opts ...Option) RoundTripperFunc {
|
||||||
|
rtOpts := &option{}
|
||||||
|
for _, o := range opts {
|
||||||
|
o(rtOpts)
|
||||||
|
}
|
||||||
|
|
||||||
code, method := checkLabels(obs)
|
code, method := checkLabels(obs)
|
||||||
|
|
||||||
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
return RoundTripperFunc(func(r *http.Request) (*http.Response, error) {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
resp, err := next.RoundTrip(r)
|
resp, err := next.RoundTrip(r)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
obs.With(labels(code, method, r.Method, resp.StatusCode)).Observe(time.Since(start).Seconds())
|
obs.With(labels(code, method, r.Method, resp.StatusCode, rtOpts.extraMethods...)).Observe(time.Since(start).Seconds())
|
||||||
}
|
}
|
||||||
return resp, err
|
return resp, err
|
||||||
})
|
})
|
||||||
|
107
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
107
vendor/github.com/prometheus/client_golang/prometheus/promhttp/instrument_server.go
generated
vendored
@ -45,7 +45,10 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl
|
|||||||
// http.Handler to observe the request duration with the provided ObserverVec.
|
// http.Handler to observe the request duration with the provided ObserverVec.
|
||||||
// The ObserverVec must have valid metric and label names and must have zero,
|
// The ObserverVec must have valid metric and label names and must have zero,
|
||||||
// one, or two non-const non-curried labels. For those, the only allowed label
|
// one, or two non-const non-curried labels. For those, the only allowed label
|
||||||
// names are "code" and "method". The function panics otherwise. The Observe
|
// names are "code" and "method". The function panics otherwise. For the "method"
|
||||||
|
// label a predefined default label value set is used to filter given values.
|
||||||
|
// Values besides predefined values will count as `unknown` method.
|
||||||
|
//`WithExtraMethods` can be used to add more methods to the set. The Observe
|
||||||
// method of the Observer in the ObserverVec is called with the request duration
|
// method of the Observer in the ObserverVec is called with the request duration
|
||||||
// in seconds. Partitioning happens by HTTP status code and/or HTTP method if
|
// in seconds. Partitioning happens by HTTP status code and/or HTTP method if
|
||||||
// the respective instance label names are present in the ObserverVec. For
|
// the respective instance label names are present in the ObserverVec. For
|
||||||
@ -58,7 +61,12 @@ func InstrumentHandlerInFlight(g prometheus.Gauge, next http.Handler) http.Handl
|
|||||||
//
|
//
|
||||||
// Note that this method is only guaranteed to never observe negative durations
|
// Note that this method is only guaranteed to never observe negative durations
|
||||||
// if used with Go1.9+.
|
// if used with Go1.9+.
|
||||||
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
|
func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
|
||||||
|
mwOpts := &option{}
|
||||||
|
for _, o := range opts {
|
||||||
|
o(mwOpts)
|
||||||
|
}
|
||||||
|
|
||||||
code, method := checkLabels(obs)
|
code, method := checkLabels(obs)
|
||||||
|
|
||||||
if code {
|
if code {
|
||||||
@ -67,14 +75,14 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) ht
|
|||||||
d := newDelegator(w, nil)
|
d := newDelegator(w, nil)
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
|
|
||||||
obs.With(labels(code, method, r.Method, d.Status())).Observe(time.Since(now).Seconds())
|
obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
obs.With(labels(code, method, r.Method, 0)).Observe(time.Since(now).Seconds())
|
obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,7 +90,10 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) ht
|
|||||||
// to observe the request result with the provided CounterVec. The CounterVec
|
// to observe the request result with the provided CounterVec. The CounterVec
|
||||||
// must have valid metric and label names and must have zero, one, or two
|
// must have valid metric and label names and must have zero, one, or two
|
||||||
// non-const non-curried labels. For those, the only allowed label names are
|
// non-const non-curried labels. For those, the only allowed label names are
|
||||||
// "code" and "method". The function panics otherwise. Partitioning of the
|
// "code" and "method". The function panics otherwise. For the "method"
|
||||||
|
// label a predefined default label value set is used to filter given values.
|
||||||
|
// Values besides predefined values will count as `unknown` method.
|
||||||
|
// `WithExtraMethods` can be used to add more methods to the set. Partitioning of the
|
||||||
// CounterVec happens by HTTP status code and/or HTTP method if the respective
|
// CounterVec happens by HTTP status code and/or HTTP method if the respective
|
||||||
// instance label names are present in the CounterVec. For unpartitioned
|
// instance label names are present in the CounterVec. For unpartitioned
|
||||||
// counting, use a CounterVec with zero labels.
|
// counting, use a CounterVec with zero labels.
|
||||||
@ -92,20 +103,25 @@ func InstrumentHandlerDuration(obs prometheus.ObserverVec, next http.Handler) ht
|
|||||||
// If the wrapped Handler panics, the Counter is not incremented.
|
// If the wrapped Handler panics, the Counter is not incremented.
|
||||||
//
|
//
|
||||||
// See the example for InstrumentHandlerDuration for example usage.
|
// See the example for InstrumentHandlerDuration for example usage.
|
||||||
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler) http.HandlerFunc {
|
func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler, opts ...Option) http.HandlerFunc {
|
||||||
|
mwOpts := &option{}
|
||||||
|
for _, o := range opts {
|
||||||
|
o(mwOpts)
|
||||||
|
}
|
||||||
|
|
||||||
code, method := checkLabels(counter)
|
code, method := checkLabels(counter)
|
||||||
|
|
||||||
if code {
|
if code {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
d := newDelegator(w, nil)
|
d := newDelegator(w, nil)
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
counter.With(labels(code, method, r.Method, d.Status())).Inc()
|
counter.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Inc()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
counter.With(labels(code, method, r.Method, 0)).Inc()
|
counter.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Inc()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,7 +130,10 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler)
|
|||||||
// until the response headers are written. The ObserverVec must have valid
|
// until the response headers are written. The ObserverVec must have valid
|
||||||
// metric and label names and must have zero, one, or two non-const non-curried
|
// metric and label names and must have zero, one, or two non-const non-curried
|
||||||
// labels. For those, the only allowed label names are "code" and "method". The
|
// labels. For those, the only allowed label names are "code" and "method". The
|
||||||
// function panics otherwise. The Observe method of the Observer in the
|
// function panics otherwise. For the "method" label a predefined default label
|
||||||
|
// value set is used to filter given values. Values besides predefined values
|
||||||
|
// will count as `unknown` method.`WithExtraMethods` can be used to add more
|
||||||
|
// methods to the set. The Observe method of the Observer in the
|
||||||
// ObserverVec is called with the request duration in seconds. Partitioning
|
// ObserverVec is called with the request duration in seconds. Partitioning
|
||||||
// happens by HTTP status code and/or HTTP method if the respective instance
|
// happens by HTTP status code and/or HTTP method if the respective instance
|
||||||
// label names are present in the ObserverVec. For unpartitioned observations,
|
// label names are present in the ObserverVec. For unpartitioned observations,
|
||||||
@ -128,13 +147,18 @@ func InstrumentHandlerCounter(counter *prometheus.CounterVec, next http.Handler)
|
|||||||
// if used with Go1.9+.
|
// if used with Go1.9+.
|
||||||
//
|
//
|
||||||
// See the example for InstrumentHandlerDuration for example usage.
|
// See the example for InstrumentHandlerDuration for example usage.
|
||||||
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
|
func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
|
||||||
|
mwOpts := &option{}
|
||||||
|
for _, o := range opts {
|
||||||
|
o(mwOpts)
|
||||||
|
}
|
||||||
|
|
||||||
code, method := checkLabels(obs)
|
code, method := checkLabels(obs)
|
||||||
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
d := newDelegator(w, func(status int) {
|
d := newDelegator(w, func(status int) {
|
||||||
obs.With(labels(code, method, r.Method, status)).Observe(time.Since(now).Seconds())
|
obs.With(labels(code, method, r.Method, status, mwOpts.extraMethods...)).Observe(time.Since(now).Seconds())
|
||||||
})
|
})
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
})
|
})
|
||||||
@ -144,8 +168,11 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
|
|||||||
// http.Handler to observe the request size with the provided ObserverVec. The
|
// http.Handler to observe the request size with the provided ObserverVec. The
|
||||||
// ObserverVec must have valid metric and label names and must have zero, one,
|
// ObserverVec must have valid metric and label names and must have zero, one,
|
||||||
// or two non-const non-curried labels. For those, the only allowed label names
|
// or two non-const non-curried labels. For those, the only allowed label names
|
||||||
// are "code" and "method". The function panics otherwise. The Observe method of
|
// are "code" and "method". The function panics otherwise. For the "method"
|
||||||
// the Observer in the ObserverVec is called with the request size in
|
// label a predefined default label value set is used to filter given values.
|
||||||
|
// Values besides predefined values will count as `unknown` method.
|
||||||
|
// `WithExtraMethods` can be used to add more methods to the set. The Observe
|
||||||
|
// method of the Observer in the ObserverVec is called with the request size in
|
||||||
// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
|
// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
|
||||||
// respective instance label names are present in the ObserverVec. For
|
// respective instance label names are present in the ObserverVec. For
|
||||||
// unpartitioned observations, use an ObserverVec with zero labels. Note that
|
// unpartitioned observations, use an ObserverVec with zero labels. Note that
|
||||||
@ -156,7 +183,12 @@ func InstrumentHandlerTimeToWriteHeader(obs prometheus.ObserverVec, next http.Ha
|
|||||||
// If the wrapped Handler panics, no values are reported.
|
// If the wrapped Handler panics, no values are reported.
|
||||||
//
|
//
|
||||||
// See the example for InstrumentHandlerDuration for example usage.
|
// See the example for InstrumentHandlerDuration for example usage.
|
||||||
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler) http.HandlerFunc {
|
func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.HandlerFunc {
|
||||||
|
mwOpts := &option{}
|
||||||
|
for _, o := range opts {
|
||||||
|
o(mwOpts)
|
||||||
|
}
|
||||||
|
|
||||||
code, method := checkLabels(obs)
|
code, method := checkLabels(obs)
|
||||||
|
|
||||||
if code {
|
if code {
|
||||||
@ -164,14 +196,14 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler)
|
|||||||
d := newDelegator(w, nil)
|
d := newDelegator(w, nil)
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
size := computeApproximateRequestSize(r)
|
size := computeApproximateRequestSize(r)
|
||||||
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(size))
|
obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(size))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
next.ServeHTTP(w, r)
|
next.ServeHTTP(w, r)
|
||||||
size := computeApproximateRequestSize(r)
|
size := computeApproximateRequestSize(r)
|
||||||
obs.With(labels(code, method, r.Method, 0)).Observe(float64(size))
|
obs.With(labels(code, method, r.Method, 0, mwOpts.extraMethods...)).Observe(float64(size))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -179,8 +211,11 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler)
|
|||||||
// http.Handler to observe the response size with the provided ObserverVec. The
|
// http.Handler to observe the response size with the provided ObserverVec. The
|
||||||
// ObserverVec must have valid metric and label names and must have zero, one,
|
// ObserverVec must have valid metric and label names and must have zero, one,
|
||||||
// or two non-const non-curried labels. For those, the only allowed label names
|
// or two non-const non-curried labels. For those, the only allowed label names
|
||||||
// are "code" and "method". The function panics otherwise. The Observe method of
|
// are "code" and "method". The function panics otherwise. For the "method"
|
||||||
// the Observer in the ObserverVec is called with the response size in
|
// label a predefined default label value set is used to filter given values.
|
||||||
|
// Values besides predefined values will count as `unknown` method.
|
||||||
|
// `WithExtraMethods` can be used to add more methods to the set. The Observe
|
||||||
|
// method of the Observer in the ObserverVec is called with the response size in
|
||||||
// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
|
// bytes. Partitioning happens by HTTP status code and/or HTTP method if the
|
||||||
// respective instance label names are present in the ObserverVec. For
|
// respective instance label names are present in the ObserverVec. For
|
||||||
// unpartitioned observations, use an ObserverVec with zero labels. Note that
|
// unpartitioned observations, use an ObserverVec with zero labels. Note that
|
||||||
@ -191,12 +226,18 @@ func InstrumentHandlerRequestSize(obs prometheus.ObserverVec, next http.Handler)
|
|||||||
// If the wrapped Handler panics, no values are reported.
|
// If the wrapped Handler panics, no values are reported.
|
||||||
//
|
//
|
||||||
// See the example for InstrumentHandlerDuration for example usage.
|
// See the example for InstrumentHandlerDuration for example usage.
|
||||||
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler) http.Handler {
|
func InstrumentHandlerResponseSize(obs prometheus.ObserverVec, next http.Handler, opts ...Option) http.Handler {
|
||||||
|
mwOpts := &option{}
|
||||||
|
for _, o := range opts {
|
||||||
|
o(mwOpts)
|
||||||
|
}
|
||||||
|
|
||||||
code, method := checkLabels(obs)
|
code, method := checkLabels(obs)
|
||||||
|
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
d := newDelegator(w, nil)
|
d := newDelegator(w, nil)
|
||||||
next.ServeHTTP(d, r)
|
next.ServeHTTP(d, r)
|
||||||
obs.With(labels(code, method, r.Method, d.Status())).Observe(float64(d.Written()))
|
obs.With(labels(code, method, r.Method, d.Status(), mwOpts.extraMethods...)).Observe(float64(d.Written()))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -290,7 +331,7 @@ func isLabelCurried(c prometheus.Collector, label string) bool {
|
|||||||
// unnecessary allocations on each request.
|
// unnecessary allocations on each request.
|
||||||
var emptyLabels = prometheus.Labels{}
|
var emptyLabels = prometheus.Labels{}
|
||||||
|
|
||||||
func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
|
func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
|
||||||
if !(code || method) {
|
if !(code || method) {
|
||||||
return emptyLabels
|
return emptyLabels
|
||||||
}
|
}
|
||||||
@ -300,7 +341,7 @@ func labels(code, method bool, reqMethod string, status int) prometheus.Labels {
|
|||||||
labels["code"] = sanitizeCode(status)
|
labels["code"] = sanitizeCode(status)
|
||||||
}
|
}
|
||||||
if method {
|
if method {
|
||||||
labels["method"] = sanitizeMethod(reqMethod)
|
labels["method"] = sanitizeMethod(reqMethod, extraMethods...)
|
||||||
}
|
}
|
||||||
|
|
||||||
return labels
|
return labels
|
||||||
@ -330,7 +371,12 @@ func computeApproximateRequestSize(r *http.Request) int {
|
|||||||
return s
|
return s
|
||||||
}
|
}
|
||||||
|
|
||||||
func sanitizeMethod(m string) string {
|
// If the wrapped http.Handler has a known method, it will be sanitized and returned.
|
||||||
|
// Otherwise, "unknown" will be returned. The known method list can be extended
|
||||||
|
// as needed by using extraMethods parameter.
|
||||||
|
func sanitizeMethod(m string, extraMethods ...string) string {
|
||||||
|
// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for
|
||||||
|
// the methods chosen as default.
|
||||||
switch m {
|
switch m {
|
||||||
case "GET", "get":
|
case "GET", "get":
|
||||||
return "get"
|
return "get"
|
||||||
@ -348,15 +394,25 @@ func sanitizeMethod(m string) string {
|
|||||||
return "options"
|
return "options"
|
||||||
case "NOTIFY", "notify":
|
case "NOTIFY", "notify":
|
||||||
return "notify"
|
return "notify"
|
||||||
|
case "TRACE", "trace":
|
||||||
|
return "trace"
|
||||||
|
case "PATCH", "patch":
|
||||||
|
return "patch"
|
||||||
default:
|
default:
|
||||||
|
for _, method := range extraMethods {
|
||||||
|
if strings.EqualFold(m, method) {
|
||||||
return strings.ToLower(m)
|
return strings.ToLower(m)
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the wrapped http.Handler has not set a status code, i.e. the value is
|
// If the wrapped http.Handler has not set a status code, i.e. the value is
|
||||||
// currently 0, santizeCode will return 200, for consistency with behavior in
|
// currently 0, sanitizeCode will return 200, for consistency with behavior in
|
||||||
// the stdlib.
|
// the stdlib.
|
||||||
func sanitizeCode(s int) string {
|
func sanitizeCode(s int) string {
|
||||||
|
// See for accepted codes https://www.iana.org/assignments/http-status-codes/http-status-codes.xhtml
|
||||||
switch s {
|
switch s {
|
||||||
case 100:
|
case 100:
|
||||||
return "100"
|
return "100"
|
||||||
@ -453,6 +509,9 @@ func sanitizeCode(s int) string {
|
|||||||
return "511"
|
return "511"
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
if s >= 100 && s <= 599 {
|
||||||
return strconv.Itoa(s)
|
return strconv.Itoa(s)
|
||||||
}
|
}
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
31
vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
generated
vendored
Normal file
31
vendor/github.com/prometheus/client_golang/prometheus/promhttp/option.go
generated
vendored
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
// Copyright 2022 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package promhttp
|
||||||
|
|
||||||
|
// Option are used to configure a middleware or round tripper..
|
||||||
|
type Option func(*option)
|
||||||
|
|
||||||
|
type option struct {
|
||||||
|
extraMethods []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithExtraMethods adds additional HTTP methods to the list of allowed methods.
|
||||||
|
// See https://developer.mozilla.org/en-US/docs/Web/HTTP/Methods for the default list.
|
||||||
|
//
|
||||||
|
// See the example for ExampleInstrumentHandlerWithExtraMethods for example usage.
|
||||||
|
func WithExtraMethods(methods ...string) Option {
|
||||||
|
return func(o *option) {
|
||||||
|
o.extraMethods = methods
|
||||||
|
}
|
||||||
|
}
|
6
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
6
vendor/github.com/prometheus/client_golang/prometheus/value.go
generated
vendored
@ -21,7 +21,7 @@ import (
|
|||||||
|
|
||||||
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto"
|
||||||
"github.com/golang/protobuf/ptypes"
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
)
|
)
|
||||||
@ -183,8 +183,8 @@ const ExemplarMaxRunes = 64
|
|||||||
func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
|
func newExemplar(value float64, ts time.Time, l Labels) (*dto.Exemplar, error) {
|
||||||
e := &dto.Exemplar{}
|
e := &dto.Exemplar{}
|
||||||
e.Value = proto.Float64(value)
|
e.Value = proto.Float64(value)
|
||||||
tsProto, err := ptypes.TimestampProto(ts)
|
tsProto := timestamppb.New(ts)
|
||||||
if err != nil {
|
if err := tsProto.CheckValid(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
e.Timestamp = tsProto
|
e.Timestamp = tsProto
|
||||||
|
2
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
2
vendor/github.com/prometheus/common/expfmt/encode.go
generated
vendored
@ -18,7 +18,7 @@ import (
|
|||||||
"io"
|
"io"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||||
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
"github.com/matttproud/golang_protobuf_extensions/pbutil"
|
||||||
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
|
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
|
||||||
|
|
||||||
|
2
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
2
vendor/github.com/prometheus/common/expfmt/text_parse.go
generated
vendored
@ -24,7 +24,7 @@ import (
|
|||||||
|
|
||||||
dto "github.com/prometheus/client_model/go"
|
dto "github.com/prometheus/client_model/go"
|
||||||
|
|
||||||
"github.com/golang/protobuf/proto"
|
"github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||||
"github.com/prometheus/common/model"
|
"github.com/prometheus/common/model"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
2
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
2
vendor/github.com/prometheus/procfs/Makefile
generated
vendored
@ -18,6 +18,8 @@ include Makefile.common
|
|||||||
./ttar -C $(dir $*) -x -f $*.ttar
|
./ttar -C $(dir $*) -x -f $*.ttar
|
||||||
touch $@
|
touch $@
|
||||||
|
|
||||||
|
fixtures: fixtures/.unpacked
|
||||||
|
|
||||||
update_fixtures:
|
update_fixtures:
|
||||||
rm -vf fixtures/.unpacked
|
rm -vf fixtures/.unpacked
|
||||||
./ttar -c -f fixtures.ttar fixtures/
|
./ttar -c -f fixtures.ttar fixtures/
|
||||||
|
15
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
15
vendor/github.com/prometheus/procfs/Makefile.common
generated
vendored
@ -78,12 +78,12 @@ ifneq ($(shell which gotestsum),)
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
PROMU_VERSION ?= 0.7.0
|
PROMU_VERSION ?= 0.12.0
|
||||||
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
|
||||||
|
|
||||||
GOLANGCI_LINT :=
|
GOLANGCI_LINT :=
|
||||||
GOLANGCI_LINT_OPTS ?=
|
GOLANGCI_LINT_OPTS ?=
|
||||||
GOLANGCI_LINT_VERSION ?= v1.18.0
|
GOLANGCI_LINT_VERSION ?= v1.39.0
|
||||||
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
|
||||||
# windows isn't included here because of the path separator being different.
|
# windows isn't included here because of the path separator being different.
|
||||||
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
|
||||||
@ -118,7 +118,7 @@ endif
|
|||||||
%: common-% ;
|
%: common-% ;
|
||||||
|
|
||||||
.PHONY: common-all
|
.PHONY: common-all
|
||||||
common-all: precheck style check_license lint unused build test
|
common-all: precheck style check_license lint yamllint unused build test
|
||||||
|
|
||||||
.PHONY: common-style
|
.PHONY: common-style
|
||||||
common-style:
|
common-style:
|
||||||
@ -198,6 +198,15 @@ else
|
|||||||
endif
|
endif
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
.PHONY: common-yamllint
|
||||||
|
common-yamllint:
|
||||||
|
@echo ">> running yamllint on all YAML files in the repository"
|
||||||
|
ifeq (, $(shell which yamllint))
|
||||||
|
@echo "yamllint not installed so skipping"
|
||||||
|
else
|
||||||
|
yamllint .
|
||||||
|
endif
|
||||||
|
|
||||||
# For backward-compatibility.
|
# For backward-compatibility.
|
||||||
.PHONY: common-staticcheck
|
.PHONY: common-staticcheck
|
||||||
common-staticcheck: lint
|
common-staticcheck: lint
|
||||||
|
4
vendor/github.com/prometheus/procfs/README.md
generated
vendored
4
vendor/github.com/prometheus/procfs/README.md
generated
vendored
@ -6,8 +6,8 @@ metrics from the pseudo-filesystems /proc and /sys.
|
|||||||
*WARNING*: This package is a work in progress. Its API may still break in
|
*WARNING*: This package is a work in progress. Its API may still break in
|
||||||
backwards-incompatible ways without warnings. Use it at your own risk.
|
backwards-incompatible ways without warnings. Use it at your own risk.
|
||||||
|
|
||||||
[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs)
|
[![Go Reference](https://pkg.go.dev/badge/github.com/prometheus/procfs.svg)](https://pkg.go.dev/github.com/prometheus/procfs)
|
||||||
[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs)
|
[![CircleCI](https://circleci.com/gh/prometheus/procfs/tree/master.svg?style=svg)](https://circleci.com/gh/prometheus/procfs/tree/master)
|
||||||
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
|
[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs)
|
||||||
|
|
||||||
## Usage
|
## Usage
|
||||||
|
30
vendor/github.com/prometheus/procfs/cmdline.go
generated
vendored
Normal file
30
vendor/github.com/prometheus/procfs/cmdline.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
|||||||
|
// Copyright 2021 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/prometheus/procfs/internal/util"
|
||||||
|
)
|
||||||
|
|
||||||
|
// CmdLine returns the command line of the kernel.
|
||||||
|
func (fs FS) CmdLine() ([]string, error) {
|
||||||
|
data, err := util.ReadFileNoStat(fs.proc.Path("cmdline"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return strings.Fields(string(data)), nil
|
||||||
|
}
|
2
vendor/github.com/prometheus/procfs/doc.go
generated
vendored
2
vendor/github.com/prometheus/procfs/doc.go
generated
vendored
@ -31,7 +31,7 @@
|
|||||||
// log.Fatalf("could not get process: %s", err)
|
// log.Fatalf("could not get process: %s", err)
|
||||||
// }
|
// }
|
||||||
//
|
//
|
||||||
// stat, err := p.NewStat()
|
// stat, err := p.Stat()
|
||||||
// if err != nil {
|
// if err != nil {
|
||||||
// log.Fatalf("could not get process stat: %s", err)
|
// log.Fatalf("could not get process stat: %s", err)
|
||||||
// }
|
// }
|
||||||
|
1178
vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
1178
vendor/github.com/prometheus/procfs/fixtures.ttar
generated
vendored
File diff suppressed because it is too large
Load Diff
85
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
85
vendor/github.com/prometheus/procfs/mdstat.go
generated
vendored
@ -22,8 +22,11 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`)
|
statusLineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[([U_]+)\]`)
|
||||||
recoveryLineRE = regexp.MustCompile(`\((\d+)/\d+\)`)
|
recoveryLineBlocksRE = regexp.MustCompile(`\((\d+)/\d+\)`)
|
||||||
|
recoveryLinePctRE = regexp.MustCompile(`= (.+)%`)
|
||||||
|
recoveryLineFinishRE = regexp.MustCompile(`finish=(.+)min`)
|
||||||
|
recoveryLineSpeedRE = regexp.MustCompile(`speed=(.+)[A-Z]`)
|
||||||
componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`)
|
componentDeviceRE = regexp.MustCompile(`(.*)\[\d+\]`)
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -39,12 +42,20 @@ type MDStat struct {
|
|||||||
DisksTotal int64
|
DisksTotal int64
|
||||||
// Number of failed disks.
|
// Number of failed disks.
|
||||||
DisksFailed int64
|
DisksFailed int64
|
||||||
|
// Number of "down" disks. (the _ indicator in the status line)
|
||||||
|
DisksDown int64
|
||||||
// Spare disks in the device.
|
// Spare disks in the device.
|
||||||
DisksSpare int64
|
DisksSpare int64
|
||||||
// Number of blocks the device holds.
|
// Number of blocks the device holds.
|
||||||
BlocksTotal int64
|
BlocksTotal int64
|
||||||
// Number of blocks on the device that are in sync.
|
// Number of blocks on the device that are in sync.
|
||||||
BlocksSynced int64
|
BlocksSynced int64
|
||||||
|
// progress percentage of current sync
|
||||||
|
BlocksSyncedPct float64
|
||||||
|
// estimated finishing time for current sync (in minutes)
|
||||||
|
BlocksSyncedFinishTime float64
|
||||||
|
// current sync speed (in Kilobytes/sec)
|
||||||
|
BlocksSyncedSpeed float64
|
||||||
// Name of md component devices
|
// Name of md component devices
|
||||||
Devices []string
|
Devices []string
|
||||||
}
|
}
|
||||||
@ -91,7 +102,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||||||
// Failed disks have the suffix (F) & Spare disks have the suffix (S).
|
// Failed disks have the suffix (F) & Spare disks have the suffix (S).
|
||||||
fail := int64(strings.Count(line, "(F)"))
|
fail := int64(strings.Count(line, "(F)"))
|
||||||
spare := int64(strings.Count(line, "(S)"))
|
spare := int64(strings.Count(line, "(S)"))
|
||||||
active, total, size, err := evalStatusLine(lines[i], lines[i+1])
|
active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error parsing md device lines: %w", err)
|
return nil, fmt.Errorf("error parsing md device lines: %w", err)
|
||||||
@ -105,6 +116,9 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||||||
// If device is syncing at the moment, get the number of currently
|
// If device is syncing at the moment, get the number of currently
|
||||||
// synced bytes, otherwise that number equals the size of the device.
|
// synced bytes, otherwise that number equals the size of the device.
|
||||||
syncedBlocks := size
|
syncedBlocks := size
|
||||||
|
speed := float64(0)
|
||||||
|
finish := float64(0)
|
||||||
|
pct := float64(0)
|
||||||
recovering := strings.Contains(lines[syncLineIdx], "recovery")
|
recovering := strings.Contains(lines[syncLineIdx], "recovery")
|
||||||
resyncing := strings.Contains(lines[syncLineIdx], "resync")
|
resyncing := strings.Contains(lines[syncLineIdx], "resync")
|
||||||
checking := strings.Contains(lines[syncLineIdx], "check")
|
checking := strings.Contains(lines[syncLineIdx], "check")
|
||||||
@ -124,7 +138,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||||||
strings.Contains(lines[syncLineIdx], "DELAYED") {
|
strings.Contains(lines[syncLineIdx], "DELAYED") {
|
||||||
syncedBlocks = 0
|
syncedBlocks = 0
|
||||||
} else {
|
} else {
|
||||||
syncedBlocks, err = evalRecoveryLine(lines[syncLineIdx])
|
syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err)
|
return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err)
|
||||||
}
|
}
|
||||||
@ -136,10 +150,14 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||||||
ActivityState: state,
|
ActivityState: state,
|
||||||
DisksActive: active,
|
DisksActive: active,
|
||||||
DisksFailed: fail,
|
DisksFailed: fail,
|
||||||
|
DisksDown: down,
|
||||||
DisksSpare: spare,
|
DisksSpare: spare,
|
||||||
DisksTotal: total,
|
DisksTotal: total,
|
||||||
BlocksTotal: size,
|
BlocksTotal: size,
|
||||||
BlocksSynced: syncedBlocks,
|
BlocksSynced: syncedBlocks,
|
||||||
|
BlocksSyncedPct: pct,
|
||||||
|
BlocksSyncedFinishTime: finish,
|
||||||
|
BlocksSyncedSpeed: speed,
|
||||||
Devices: evalComponentDevices(deviceFields),
|
Devices: evalComponentDevices(deviceFields),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
@ -147,54 +165,85 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
|
|||||||
return mdStats, nil
|
return mdStats, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func evalStatusLine(deviceLine, statusLine string) (active, total, size int64, err error) {
|
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
|
||||||
|
|
||||||
sizeStr := strings.Fields(statusLine)[0]
|
sizeStr := strings.Fields(statusLine)[0]
|
||||||
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
size, err = strconv.ParseInt(sizeStr, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
|
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
|
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
|
||||||
// In the device deviceLine, only disks have a number associated with them in [].
|
// In the device deviceLine, only disks have a number associated with them in [].
|
||||||
total = int64(strings.Count(deviceLine, "["))
|
total = int64(strings.Count(deviceLine, "["))
|
||||||
return total, total, size, nil
|
return total, total, 0, size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if strings.Contains(deviceLine, "inactive") {
|
if strings.Contains(deviceLine, "inactive") {
|
||||||
return 0, 0, size, nil
|
return 0, 0, 0, size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
matches := statusLineRE.FindStringSubmatch(statusLine)
|
matches := statusLineRE.FindStringSubmatch(statusLine)
|
||||||
if len(matches) != 4 {
|
if len(matches) != 5 {
|
||||||
return 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
|
return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
|
||||||
}
|
}
|
||||||
|
|
||||||
total, err = strconv.ParseInt(matches[2], 10, 64)
|
total, err = strconv.ParseInt(matches[2], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
|
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
active, err = strconv.ParseInt(matches[3], 10, 64)
|
active, err = strconv.ParseInt(matches[3], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
|
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
|
||||||
}
|
}
|
||||||
|
down = int64(strings.Count(matches[4], "_"))
|
||||||
|
|
||||||
return active, total, size, nil
|
return active, total, down, size, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, err error) {
|
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
|
||||||
matches := recoveryLineRE.FindStringSubmatch(recoveryLine)
|
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
|
||||||
if len(matches) != 2 {
|
if len(matches) != 2 {
|
||||||
return 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
|
return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
|
||||||
}
|
}
|
||||||
|
|
||||||
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
|
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
|
return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return syncedBlocks, nil
|
// Get percentage complete
|
||||||
|
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
|
||||||
|
if len(matches) != 2 {
|
||||||
|
return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine)
|
||||||
|
}
|
||||||
|
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
|
||||||
|
if err != nil {
|
||||||
|
return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get time expected left to complete
|
||||||
|
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
|
||||||
|
if len(matches) != 2 {
|
||||||
|
return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine)
|
||||||
|
}
|
||||||
|
finish, err = strconv.ParseFloat(matches[1], 64)
|
||||||
|
if err != nil {
|
||||||
|
return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get recovery speed
|
||||||
|
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
|
||||||
|
if len(matches) != 2 {
|
||||||
|
return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine)
|
||||||
|
}
|
||||||
|
speed, err = strconv.ParseFloat(matches[1], 64)
|
||||||
|
if err != nil {
|
||||||
|
return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return syncedBlocks, pct, finish, speed, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func evalComponentDevices(deviceFields []string) []string {
|
func evalComponentDevices(deviceFields []string) []string {
|
||||||
|
10
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
10
vendor/github.com/prometheus/procfs/net_ip_socket.go
generated
vendored
@ -65,6 +65,7 @@ type (
|
|||||||
TxQueue uint64
|
TxQueue uint64
|
||||||
RxQueue uint64
|
RxQueue uint64
|
||||||
UID uint64
|
UID uint64
|
||||||
|
Inode uint64
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -150,9 +151,9 @@ func parseIP(hexIP string) (net.IP, error) {
|
|||||||
// parseNetIPSocketLine parses a single line, represented by a list of fields.
|
// parseNetIPSocketLine parses a single line, represented by a list of fields.
|
||||||
func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
||||||
line := &netIPSocketLine{}
|
line := &netIPSocketLine{}
|
||||||
if len(fields) < 8 {
|
if len(fields) < 10 {
|
||||||
return nil, fmt.Errorf(
|
return nil, fmt.Errorf(
|
||||||
"cannot parse net socket line as it has less then 8 columns %q",
|
"cannot parse net socket line as it has less then 10 columns %q",
|
||||||
strings.Join(fields, " "),
|
strings.Join(fields, " "),
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
@ -216,5 +217,10 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
|
|||||||
return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err)
|
return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// inode
|
||||||
|
if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
|
||||||
|
return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
return line, nil
|
return line, nil
|
||||||
}
|
}
|
||||||
|
68
vendor/github.com/prometheus/procfs/netstat.go
generated
vendored
Normal file
68
vendor/github.com/prometheus/procfs/netstat.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
|||||||
|
// Copyright 2020 The Prometheus Authors
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package procfs
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bufio"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// NetStat contains statistics for all the counters from one file
|
||||||
|
type NetStat struct {
|
||||||
|
Filename string
|
||||||
|
Stats map[string][]uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// NetStat retrieves stats from /proc/net/stat/
|
||||||
|
func (fs FS) NetStat() ([]NetStat, error) {
|
||||||
|
statFiles, err := filepath.Glob(fs.proc.Path("net/stat/*"))
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var netStatsTotal []NetStat
|
||||||
|
|
||||||
|
for _, filePath := range statFiles {
|
||||||
|
file, err := os.Open(filePath)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
netStatFile := NetStat{
|
||||||
|
Filename: filepath.Base(filePath),
|
||||||
|
Stats: make(map[string][]uint64),
|
||||||
|
}
|
||||||
|
scanner := bufio.NewScanner(file)
|
||||||
|
scanner.Scan()
|
||||||
|
// First string is always a header for stats
|
||||||
|
var headers []string
|
||||||
|
headers = append(headers, strings.Fields(scanner.Text())...)
|
||||||
|
|
||||||
|
// Other strings represent per-CPU counters
|
||||||
|
for scanner.Scan() {
|
||||||
|
for num, counter := range strings.Fields(scanner.Text()) {
|
||||||
|
value, err := strconv.ParseUint(counter, 16, 32)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
netStatFile.Stats[headers[num]] = append(netStatFile.Stats[headers[num]], value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
netStatsTotal = append(netStatsTotal, netStatFile)
|
||||||
|
}
|
||||||
|
return netStatsTotal, nil
|
||||||
|
}
|
2
vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
2
vendor/github.com/prometheus/procfs/proc_cgroup.go
generated
vendored
@ -90,7 +90,7 @@ func parseCgroups(data []byte) ([]Cgroup, error) {
|
|||||||
// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
|
// control hierarchy running on this system. On every system (v1 and v2), all hierarchies contain all processes,
|
||||||
// so the len of the returned struct is equal to the number of active hierarchies on this system
|
// so the len of the returned struct is equal to the number of active hierarchies on this system
|
||||||
func (p Proc) Cgroups() ([]Cgroup, error) {
|
func (p Proc) Cgroups() ([]Cgroup, error) {
|
||||||
data, err := util.ReadFileNoStat(fmt.Sprintf("/proc/%d/cgroup", p.PID))
|
data, err := util.ReadFileNoStat(p.path("cgroup"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
32
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
32
vendor/github.com/prometheus/procfs/proc_stat.go
generated
vendored
@ -100,6 +100,15 @@ type ProcStat struct {
|
|||||||
VSize uint
|
VSize uint
|
||||||
// Resident set size in pages.
|
// Resident set size in pages.
|
||||||
RSS int
|
RSS int
|
||||||
|
// Soft limit in bytes on the rss of the process.
|
||||||
|
RSSLimit uint64
|
||||||
|
// Real-time scheduling priority, a number in the range 1 to 99 for processes
|
||||||
|
// scheduled under a real-time policy, or 0, for non-real-time processes.
|
||||||
|
RTPriority uint
|
||||||
|
// Scheduling policy.
|
||||||
|
Policy uint
|
||||||
|
// Aggregated block I/O delays, measured in clock ticks (centiseconds).
|
||||||
|
DelayAcctBlkIOTicks uint64
|
||||||
|
|
||||||
proc fs.FS
|
proc fs.FS
|
||||||
}
|
}
|
||||||
@ -119,7 +128,8 @@ func (p Proc) Stat() (ProcStat, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
ignore int
|
ignoreInt64 int64
|
||||||
|
ignoreUint64 uint64
|
||||||
|
|
||||||
s = ProcStat{PID: p.PID, proc: p.fs}
|
s = ProcStat{PID: p.PID, proc: p.fs}
|
||||||
l = bytes.Index(data, []byte("("))
|
l = bytes.Index(data, []byte("("))
|
||||||
@ -151,10 +161,28 @@ func (p Proc) Stat() (ProcStat, error) {
|
|||||||
&s.Priority,
|
&s.Priority,
|
||||||
&s.Nice,
|
&s.Nice,
|
||||||
&s.NumThreads,
|
&s.NumThreads,
|
||||||
&ignore,
|
&ignoreInt64,
|
||||||
&s.Starttime,
|
&s.Starttime,
|
||||||
&s.VSize,
|
&s.VSize,
|
||||||
&s.RSS,
|
&s.RSS,
|
||||||
|
&s.RSSLimit,
|
||||||
|
&ignoreUint64,
|
||||||
|
&ignoreUint64,
|
||||||
|
&ignoreUint64,
|
||||||
|
&ignoreUint64,
|
||||||
|
&ignoreUint64,
|
||||||
|
&ignoreUint64,
|
||||||
|
&ignoreUint64,
|
||||||
|
&ignoreUint64,
|
||||||
|
&ignoreUint64,
|
||||||
|
&ignoreUint64,
|
||||||
|
&ignoreUint64,
|
||||||
|
&ignoreUint64,
|
||||||
|
&ignoreInt64,
|
||||||
|
&ignoreInt64,
|
||||||
|
&s.RTPriority,
|
||||||
|
&s.Policy,
|
||||||
|
&s.DelayAcctBlkIOTicks,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return ProcStat{}, err
|
return ProcStat{}, err
|
||||||
|
1
vendor/github.com/prometheus/procfs/zoneinfo.go
generated
vendored
1
vendor/github.com/prometheus/procfs/zoneinfo.go
generated
vendored
@ -99,7 +99,6 @@ func parseZoneinfo(zoneinfoData []byte) ([]Zoneinfo, error) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") {
|
if strings.HasPrefix(strings.TrimSpace(line), "per-node stats") {
|
||||||
zoneinfoElement.Zone = ""
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
parts := strings.Fields(strings.TrimSpace(line))
|
parts := strings.Fields(strings.TrimSpace(line))
|
||||||
|
47
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
47
vendor/golang.org/x/net/http2/client_conn_pool.go
generated
vendored
@ -16,6 +16,12 @@ import (
|
|||||||
|
|
||||||
// ClientConnPool manages a pool of HTTP/2 client connections.
|
// ClientConnPool manages a pool of HTTP/2 client connections.
|
||||||
type ClientConnPool interface {
|
type ClientConnPool interface {
|
||||||
|
// GetClientConn returns a specific HTTP/2 connection (usually
|
||||||
|
// a TLS-TCP connection) to an HTTP/2 server. On success, the
|
||||||
|
// returned ClientConn accounts for the upcoming RoundTrip
|
||||||
|
// call, so the caller should not omit it. If the caller needs
|
||||||
|
// to, ClientConn.RoundTrip can be called with a bogus
|
||||||
|
// new(http.Request) to release the stream reservation.
|
||||||
GetClientConn(req *http.Request, addr string) (*ClientConn, error)
|
GetClientConn(req *http.Request, addr string) (*ClientConn, error)
|
||||||
MarkDead(*ClientConn)
|
MarkDead(*ClientConn)
|
||||||
}
|
}
|
||||||
@ -42,7 +48,7 @@ type clientConnPool struct {
|
|||||||
conns map[string][]*ClientConn // key is host:port
|
conns map[string][]*ClientConn // key is host:port
|
||||||
dialing map[string]*dialCall // currently in-flight dials
|
dialing map[string]*dialCall // currently in-flight dials
|
||||||
keys map[*ClientConn][]string
|
keys map[*ClientConn][]string
|
||||||
addConnCalls map[string]*addConnCall // in-flight addConnIfNeede calls
|
addConnCalls map[string]*addConnCall // in-flight addConnIfNeeded calls
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
func (p *clientConnPool) GetClientConn(req *http.Request, addr string) (*ClientConn, error) {
|
||||||
@ -54,28 +60,8 @@ const (
|
|||||||
noDialOnMiss = false
|
noDialOnMiss = false
|
||||||
)
|
)
|
||||||
|
|
||||||
// shouldTraceGetConn reports whether getClientConn should call any
|
|
||||||
// ClientTrace.GetConn hook associated with the http.Request.
|
|
||||||
//
|
|
||||||
// This complexity is needed to avoid double calls of the GetConn hook
|
|
||||||
// during the back-and-forth between net/http and x/net/http2 (when the
|
|
||||||
// net/http.Transport is upgraded to also speak http2), as well as support
|
|
||||||
// the case where x/net/http2 is being used directly.
|
|
||||||
func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool {
|
|
||||||
// If our Transport wasn't made via ConfigureTransport, always
|
|
||||||
// trace the GetConn hook if provided, because that means the
|
|
||||||
// http2 package is being used directly and it's the one
|
|
||||||
// dialing, as opposed to net/http.
|
|
||||||
if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
// Otherwise, only use the GetConn hook if this connection has
|
|
||||||
// been used previously for other requests. For fresh
|
|
||||||
// connections, the net/http package does the dialing.
|
|
||||||
return !st.freshConn
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) {
|
||||||
|
// TODO(dneil): Dial a new connection when t.DisableKeepAlives is set?
|
||||||
if isConnectionCloseRequest(req) && dialOnMiss {
|
if isConnectionCloseRequest(req) && dialOnMiss {
|
||||||
// It gets its own connection.
|
// It gets its own connection.
|
||||||
traceGetConn(req, addr)
|
traceGetConn(req, addr)
|
||||||
@ -89,10 +75,14 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
|
|||||||
for {
|
for {
|
||||||
p.mu.Lock()
|
p.mu.Lock()
|
||||||
for _, cc := range p.conns[addr] {
|
for _, cc := range p.conns[addr] {
|
||||||
if st := cc.idleState(); st.canTakeNewRequest {
|
if cc.ReserveNewRequest() {
|
||||||
if p.shouldTraceGetConn(st) {
|
// When a connection is presented to us by the net/http package,
|
||||||
|
// the GetConn hook has already been called.
|
||||||
|
// Don't call it a second time here.
|
||||||
|
if !cc.getConnCalled {
|
||||||
traceGetConn(req, addr)
|
traceGetConn(req, addr)
|
||||||
}
|
}
|
||||||
|
cc.getConnCalled = false
|
||||||
p.mu.Unlock()
|
p.mu.Unlock()
|
||||||
return cc, nil
|
return cc, nil
|
||||||
}
|
}
|
||||||
@ -108,7 +98,13 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis
|
|||||||
if shouldRetryDial(call, req) {
|
if shouldRetryDial(call, req) {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
return call.res, call.err
|
cc, err := call.res, call.err
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if cc.ReserveNewRequest() {
|
||||||
|
return cc, nil
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -205,6 +201,7 @@ func (c *addConnCall) run(t *Transport, key string, tc *tls.Conn) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
c.err = err
|
c.err = err
|
||||||
} else {
|
} else {
|
||||||
|
cc.getConnCalled = true // already called by the net/http package
|
||||||
p.addConnLocked(key, cc)
|
p.addConnLocked(key, cc)
|
||||||
}
|
}
|
||||||
delete(p.addConnCalls, key)
|
delete(p.addConnCalls, key)
|
||||||
|
12
vendor/golang.org/x/net/http2/errors.go
generated
vendored
12
vendor/golang.org/x/net/http2/errors.go
generated
vendored
@ -53,6 +53,13 @@ func (e ErrCode) String() string {
|
|||||||
return fmt.Sprintf("unknown error code 0x%x", uint32(e))
|
return fmt.Sprintf("unknown error code 0x%x", uint32(e))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e ErrCode) stringToken() string {
|
||||||
|
if s, ok := errCodeName[e]; ok {
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("ERR_UNKNOWN_%d", uint32(e))
|
||||||
|
}
|
||||||
|
|
||||||
// ConnectionError is an error that results in the termination of the
|
// ConnectionError is an error that results in the termination of the
|
||||||
// entire connection.
|
// entire connection.
|
||||||
type ConnectionError ErrCode
|
type ConnectionError ErrCode
|
||||||
@ -67,6 +74,11 @@ type StreamError struct {
|
|||||||
Cause error // optional additional detail
|
Cause error // optional additional detail
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// errFromPeer is a sentinel error value for StreamError.Cause to
|
||||||
|
// indicate that the StreamError was sent from the peer over the wire
|
||||||
|
// and wasn't locally generated in the Transport.
|
||||||
|
var errFromPeer = errors.New("received from peer")
|
||||||
|
|
||||||
func streamError(id uint32, code ErrCode) StreamError {
|
func streamError(id uint32, code ErrCode) StreamError {
|
||||||
return StreamError{StreamID: id, Code: code}
|
return StreamError{StreamID: id, Code: code}
|
||||||
}
|
}
|
||||||
|
62
vendor/golang.org/x/net/http2/frame.go
generated
vendored
62
vendor/golang.org/x/net/http2/frame.go
generated
vendored
@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{
|
|||||||
// a frameParser parses a frame given its FrameHeader and payload
|
// a frameParser parses a frame given its FrameHeader and payload
|
||||||
// bytes. The length of payload will always equal fh.Length (which
|
// bytes. The length of payload will always equal fh.Length (which
|
||||||
// might be 0).
|
// might be 0).
|
||||||
type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error)
|
type frameParser func(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error)
|
||||||
|
|
||||||
var frameParsers = map[FrameType]frameParser{
|
var frameParsers = map[FrameType]frameParser{
|
||||||
FrameData: parseDataFrame,
|
FrameData: parseDataFrame,
|
||||||
@ -267,6 +267,11 @@ type Framer struct {
|
|||||||
lastFrame Frame
|
lastFrame Frame
|
||||||
errDetail error
|
errDetail error
|
||||||
|
|
||||||
|
// countError is a non-nil func that's called on a frame parse
|
||||||
|
// error with some unique error path token. It's initialized
|
||||||
|
// from Transport.CountError or Server.CountError.
|
||||||
|
countError func(errToken string)
|
||||||
|
|
||||||
// lastHeaderStream is non-zero if the last frame was an
|
// lastHeaderStream is non-zero if the last frame was an
|
||||||
// unfinished HEADERS/CONTINUATION.
|
// unfinished HEADERS/CONTINUATION.
|
||||||
lastHeaderStream uint32
|
lastHeaderStream uint32
|
||||||
@ -426,6 +431,7 @@ func NewFramer(w io.Writer, r io.Reader) *Framer {
|
|||||||
fr := &Framer{
|
fr := &Framer{
|
||||||
w: w,
|
w: w,
|
||||||
r: r,
|
r: r,
|
||||||
|
countError: func(string) {},
|
||||||
logReads: logFrameReads,
|
logReads: logFrameReads,
|
||||||
logWrites: logFrameWrites,
|
logWrites: logFrameWrites,
|
||||||
debugReadLoggerf: log.Printf,
|
debugReadLoggerf: log.Printf,
|
||||||
@ -500,7 +506,7 @@ func (fr *Framer) ReadFrame() (Frame, error) {
|
|||||||
if _, err := io.ReadFull(fr.r, payload); err != nil {
|
if _, err := io.ReadFull(fr.r, payload); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload)
|
f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if ce, ok := err.(connError); ok {
|
if ce, ok := err.(connError); ok {
|
||||||
return nil, fr.connError(ce.Code, ce.Reason)
|
return nil, fr.connError(ce.Code, ce.Reason)
|
||||||
@ -588,13 +594,14 @@ func (f *DataFrame) Data() []byte {
|
|||||||
return f.data
|
return f.data
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
|
func parseDataFrame(fc *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) {
|
||||||
if fh.StreamID == 0 {
|
if fh.StreamID == 0 {
|
||||||
// DATA frames MUST be associated with a stream. If a
|
// DATA frames MUST be associated with a stream. If a
|
||||||
// DATA frame is received whose stream identifier
|
// DATA frame is received whose stream identifier
|
||||||
// field is 0x0, the recipient MUST respond with a
|
// field is 0x0, the recipient MUST respond with a
|
||||||
// connection error (Section 5.4.1) of type
|
// connection error (Section 5.4.1) of type
|
||||||
// PROTOCOL_ERROR.
|
// PROTOCOL_ERROR.
|
||||||
|
countError("frame_data_stream_0")
|
||||||
return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
|
return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"}
|
||||||
}
|
}
|
||||||
f := fc.getDataFrame()
|
f := fc.getDataFrame()
|
||||||
@ -605,6 +612,7 @@ func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, erro
|
|||||||
var err error
|
var err error
|
||||||
payload, padSize, err = readByte(payload)
|
payload, padSize, err = readByte(payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
countError("frame_data_pad_byte_short")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -613,6 +621,7 @@ func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, erro
|
|||||||
// length of the frame payload, the recipient MUST
|
// length of the frame payload, the recipient MUST
|
||||||
// treat this as a connection error.
|
// treat this as a connection error.
|
||||||
// Filed: https://github.com/http2/http2-spec/issues/610
|
// Filed: https://github.com/http2/http2-spec/issues/610
|
||||||
|
countError("frame_data_pad_too_big")
|
||||||
return nil, connError{ErrCodeProtocol, "pad size larger than data payload"}
|
return nil, connError{ErrCodeProtocol, "pad size larger than data payload"}
|
||||||
}
|
}
|
||||||
f.data = payload[:len(payload)-int(padSize)]
|
f.data = payload[:len(payload)-int(padSize)]
|
||||||
@ -695,7 +704,7 @@ type SettingsFrame struct {
|
|||||||
p []byte
|
p []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
func parseSettingsFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
|
||||||
if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
|
if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 {
|
||||||
// When this (ACK 0x1) bit is set, the payload of the
|
// When this (ACK 0x1) bit is set, the payload of the
|
||||||
// SETTINGS frame MUST be empty. Receipt of a
|
// SETTINGS frame MUST be empty. Receipt of a
|
||||||
@ -703,6 +712,7 @@ func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error)
|
|||||||
// field value other than 0 MUST be treated as a
|
// field value other than 0 MUST be treated as a
|
||||||
// connection error (Section 5.4.1) of type
|
// connection error (Section 5.4.1) of type
|
||||||
// FRAME_SIZE_ERROR.
|
// FRAME_SIZE_ERROR.
|
||||||
|
countError("frame_settings_ack_with_length")
|
||||||
return nil, ConnectionError(ErrCodeFrameSize)
|
return nil, ConnectionError(ErrCodeFrameSize)
|
||||||
}
|
}
|
||||||
if fh.StreamID != 0 {
|
if fh.StreamID != 0 {
|
||||||
@ -713,14 +723,17 @@ func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error)
|
|||||||
// field is anything other than 0x0, the endpoint MUST
|
// field is anything other than 0x0, the endpoint MUST
|
||||||
// respond with a connection error (Section 5.4.1) of
|
// respond with a connection error (Section 5.4.1) of
|
||||||
// type PROTOCOL_ERROR.
|
// type PROTOCOL_ERROR.
|
||||||
|
countError("frame_settings_has_stream")
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return nil, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
if len(p)%6 != 0 {
|
if len(p)%6 != 0 {
|
||||||
|
countError("frame_settings_mod_6")
|
||||||
// Expecting even number of 6 byte settings.
|
// Expecting even number of 6 byte settings.
|
||||||
return nil, ConnectionError(ErrCodeFrameSize)
|
return nil, ConnectionError(ErrCodeFrameSize)
|
||||||
}
|
}
|
||||||
f := &SettingsFrame{FrameHeader: fh, p: p}
|
f := &SettingsFrame{FrameHeader: fh, p: p}
|
||||||
if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 {
|
if v, ok := f.Value(SettingInitialWindowSize); ok && v > (1<<31)-1 {
|
||||||
|
countError("frame_settings_window_size_too_big")
|
||||||
// Values above the maximum flow control window size of 2^31 - 1 MUST
|
// Values above the maximum flow control window size of 2^31 - 1 MUST
|
||||||
// be treated as a connection error (Section 5.4.1) of type
|
// be treated as a connection error (Section 5.4.1) of type
|
||||||
// FLOW_CONTROL_ERROR.
|
// FLOW_CONTROL_ERROR.
|
||||||
@ -832,11 +845,13 @@ type PingFrame struct {
|
|||||||
|
|
||||||
func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
|
func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) }
|
||||||
|
|
||||||
func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
|
func parsePingFrame(_ *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) {
|
||||||
if len(payload) != 8 {
|
if len(payload) != 8 {
|
||||||
|
countError("frame_ping_length")
|
||||||
return nil, ConnectionError(ErrCodeFrameSize)
|
return nil, ConnectionError(ErrCodeFrameSize)
|
||||||
}
|
}
|
||||||
if fh.StreamID != 0 {
|
if fh.StreamID != 0 {
|
||||||
|
countError("frame_ping_has_stream")
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return nil, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
f := &PingFrame{FrameHeader: fh}
|
f := &PingFrame{FrameHeader: fh}
|
||||||
@ -872,11 +887,13 @@ func (f *GoAwayFrame) DebugData() []byte {
|
|||||||
return f.debugData
|
return f.debugData
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
func parseGoAwayFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
|
||||||
if fh.StreamID != 0 {
|
if fh.StreamID != 0 {
|
||||||
|
countError("frame_goaway_has_stream")
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return nil, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
if len(p) < 8 {
|
if len(p) < 8 {
|
||||||
|
countError("frame_goaway_short")
|
||||||
return nil, ConnectionError(ErrCodeFrameSize)
|
return nil, ConnectionError(ErrCodeFrameSize)
|
||||||
}
|
}
|
||||||
return &GoAwayFrame{
|
return &GoAwayFrame{
|
||||||
@ -912,7 +929,7 @@ func (f *UnknownFrame) Payload() []byte {
|
|||||||
return f.p
|
return f.p
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
func parseUnknownFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
|
||||||
return &UnknownFrame{fh, p}, nil
|
return &UnknownFrame{fh, p}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -923,8 +940,9 @@ type WindowUpdateFrame struct {
|
|||||||
Increment uint32 // never read with high bit set
|
Increment uint32 // never read with high bit set
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
|
||||||
if len(p) != 4 {
|
if len(p) != 4 {
|
||||||
|
countError("frame_windowupdate_bad_len")
|
||||||
return nil, ConnectionError(ErrCodeFrameSize)
|
return nil, ConnectionError(ErrCodeFrameSize)
|
||||||
}
|
}
|
||||||
inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
|
inc := binary.BigEndian.Uint32(p[:4]) & 0x7fffffff // mask off high reserved bit
|
||||||
@ -936,8 +954,10 @@ func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, err
|
|||||||
// control window MUST be treated as a connection
|
// control window MUST be treated as a connection
|
||||||
// error (Section 5.4.1).
|
// error (Section 5.4.1).
|
||||||
if fh.StreamID == 0 {
|
if fh.StreamID == 0 {
|
||||||
|
countError("frame_windowupdate_zero_inc_conn")
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return nil, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
|
countError("frame_windowupdate_zero_inc_stream")
|
||||||
return nil, streamError(fh.StreamID, ErrCodeProtocol)
|
return nil, streamError(fh.StreamID, ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
return &WindowUpdateFrame{
|
return &WindowUpdateFrame{
|
||||||
@ -988,7 +1008,7 @@ func (f *HeadersFrame) HasPriority() bool {
|
|||||||
return f.FrameHeader.Flags.Has(FlagHeadersPriority)
|
return f.FrameHeader.Flags.Has(FlagHeadersPriority)
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
|
func parseHeadersFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (_ Frame, err error) {
|
||||||
hf := &HeadersFrame{
|
hf := &HeadersFrame{
|
||||||
FrameHeader: fh,
|
FrameHeader: fh,
|
||||||
}
|
}
|
||||||
@ -997,11 +1017,13 @@ func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err er
|
|||||||
// is received whose stream identifier field is 0x0, the recipient MUST
|
// is received whose stream identifier field is 0x0, the recipient MUST
|
||||||
// respond with a connection error (Section 5.4.1) of type
|
// respond with a connection error (Section 5.4.1) of type
|
||||||
// PROTOCOL_ERROR.
|
// PROTOCOL_ERROR.
|
||||||
|
countError("frame_headers_zero_stream")
|
||||||
return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"}
|
return nil, connError{ErrCodeProtocol, "HEADERS frame with stream ID 0"}
|
||||||
}
|
}
|
||||||
var padLength uint8
|
var padLength uint8
|
||||||
if fh.Flags.Has(FlagHeadersPadded) {
|
if fh.Flags.Has(FlagHeadersPadded) {
|
||||||
if p, padLength, err = readByte(p); err != nil {
|
if p, padLength, err = readByte(p); err != nil {
|
||||||
|
countError("frame_headers_pad_short")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1009,16 +1031,19 @@ func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err er
|
|||||||
var v uint32
|
var v uint32
|
||||||
p, v, err = readUint32(p)
|
p, v, err = readUint32(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
countError("frame_headers_prio_short")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
hf.Priority.StreamDep = v & 0x7fffffff
|
hf.Priority.StreamDep = v & 0x7fffffff
|
||||||
hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
|
hf.Priority.Exclusive = (v != hf.Priority.StreamDep) // high bit was set
|
||||||
p, hf.Priority.Weight, err = readByte(p)
|
p, hf.Priority.Weight, err = readByte(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
countError("frame_headers_prio_weight_short")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(p)-int(padLength) <= 0 {
|
if len(p)-int(padLength) < 0 {
|
||||||
|
countError("frame_headers_pad_too_big")
|
||||||
return nil, streamError(fh.StreamID, ErrCodeProtocol)
|
return nil, streamError(fh.StreamID, ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
hf.headerFragBuf = p[:len(p)-int(padLength)]
|
hf.headerFragBuf = p[:len(p)-int(padLength)]
|
||||||
@ -1125,11 +1150,13 @@ func (p PriorityParam) IsZero() bool {
|
|||||||
return p == PriorityParam{}
|
return p == PriorityParam{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) {
|
func parsePriorityFrame(_ *frameCache, fh FrameHeader, countError func(string), payload []byte) (Frame, error) {
|
||||||
if fh.StreamID == 0 {
|
if fh.StreamID == 0 {
|
||||||
|
countError("frame_priority_zero_stream")
|
||||||
return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
|
return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"}
|
||||||
}
|
}
|
||||||
if len(payload) != 5 {
|
if len(payload) != 5 {
|
||||||
|
countError("frame_priority_bad_length")
|
||||||
return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
|
return nil, connError{ErrCodeFrameSize, fmt.Sprintf("PRIORITY frame payload size was %d; want 5", len(payload))}
|
||||||
}
|
}
|
||||||
v := binary.BigEndian.Uint32(payload[:4])
|
v := binary.BigEndian.Uint32(payload[:4])
|
||||||
@ -1172,11 +1199,13 @@ type RSTStreamFrame struct {
|
|||||||
ErrCode ErrCode
|
ErrCode ErrCode
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
|
||||||
if len(p) != 4 {
|
if len(p) != 4 {
|
||||||
|
countError("frame_rststream_bad_len")
|
||||||
return nil, ConnectionError(ErrCodeFrameSize)
|
return nil, ConnectionError(ErrCodeFrameSize)
|
||||||
}
|
}
|
||||||
if fh.StreamID == 0 {
|
if fh.StreamID == 0 {
|
||||||
|
countError("frame_rststream_zero_stream")
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return nil, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
|
return &RSTStreamFrame{fh, ErrCode(binary.BigEndian.Uint32(p[:4]))}, nil
|
||||||
@ -1202,8 +1231,9 @@ type ContinuationFrame struct {
|
|||||||
headerFragBuf []byte
|
headerFragBuf []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) {
|
func parseContinuationFrame(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (Frame, error) {
|
||||||
if fh.StreamID == 0 {
|
if fh.StreamID == 0 {
|
||||||
|
countError("frame_continuation_zero_stream")
|
||||||
return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
|
return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"}
|
||||||
}
|
}
|
||||||
return &ContinuationFrame{fh, p}, nil
|
return &ContinuationFrame{fh, p}, nil
|
||||||
@ -1252,7 +1282,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool {
|
|||||||
return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
|
return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders)
|
||||||
}
|
}
|
||||||
|
|
||||||
func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) {
|
func parsePushPromise(_ *frameCache, fh FrameHeader, countError func(string), p []byte) (_ Frame, err error) {
|
||||||
pp := &PushPromiseFrame{
|
pp := &PushPromiseFrame{
|
||||||
FrameHeader: fh,
|
FrameHeader: fh,
|
||||||
}
|
}
|
||||||
@ -1263,6 +1293,7 @@ func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err err
|
|||||||
// with. If the stream identifier field specifies the value
|
// with. If the stream identifier field specifies the value
|
||||||
// 0x0, a recipient MUST respond with a connection error
|
// 0x0, a recipient MUST respond with a connection error
|
||||||
// (Section 5.4.1) of type PROTOCOL_ERROR.
|
// (Section 5.4.1) of type PROTOCOL_ERROR.
|
||||||
|
countError("frame_pushpromise_zero_stream")
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return nil, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
// The PUSH_PROMISE frame includes optional padding.
|
// The PUSH_PROMISE frame includes optional padding.
|
||||||
@ -1270,18 +1301,21 @@ func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err err
|
|||||||
var padLength uint8
|
var padLength uint8
|
||||||
if fh.Flags.Has(FlagPushPromisePadded) {
|
if fh.Flags.Has(FlagPushPromisePadded) {
|
||||||
if p, padLength, err = readByte(p); err != nil {
|
if p, padLength, err = readByte(p); err != nil {
|
||||||
|
countError("frame_pushpromise_pad_short")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
p, pp.PromiseID, err = readUint32(p)
|
p, pp.PromiseID, err = readUint32(p)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
countError("frame_pushpromise_promiseid_short")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
pp.PromiseID = pp.PromiseID & (1<<31 - 1)
|
pp.PromiseID = pp.PromiseID & (1<<31 - 1)
|
||||||
|
|
||||||
if int(padLength) > len(p) {
|
if int(padLength) > len(p) {
|
||||||
// like the DATA frame, error out if padding is longer than the body.
|
// like the DATA frame, error out if padding is longer than the body.
|
||||||
|
countError("frame_pushpromise_pad_too_big")
|
||||||
return nil, ConnectionError(ErrCodeProtocol)
|
return nil, ConnectionError(ErrCodeProtocol)
|
||||||
}
|
}
|
||||||
pp.headerFragBuf = p[:len(p)-int(padLength)]
|
pp.headerFragBuf = p[:len(p)-int(padLength)]
|
||||||
|
16
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
16
vendor/golang.org/x/net/http2/hpack/huffman.go
generated
vendored
@ -140,12 +140,12 @@ func buildRootHuffmanNode() {
|
|||||||
panic("unexpected size")
|
panic("unexpected size")
|
||||||
}
|
}
|
||||||
lazyRootHuffmanNode = newInternalNode()
|
lazyRootHuffmanNode = newInternalNode()
|
||||||
for i, code := range huffmanCodes {
|
// allocate a leaf node for each of the 256 symbols
|
||||||
addDecoderNode(byte(i), code, huffmanCodeLen[i])
|
leaves := new([256]node)
|
||||||
}
|
|
||||||
}
|
for sym, code := range huffmanCodes {
|
||||||
|
codeLen := huffmanCodeLen[sym]
|
||||||
|
|
||||||
func addDecoderNode(sym byte, code uint32, codeLen uint8) {
|
|
||||||
cur := lazyRootHuffmanNode
|
cur := lazyRootHuffmanNode
|
||||||
for codeLen > 8 {
|
for codeLen > 8 {
|
||||||
codeLen -= 8
|
codeLen -= 8
|
||||||
@ -157,8 +157,12 @@ func addDecoderNode(sym byte, code uint32, codeLen uint8) {
|
|||||||
}
|
}
|
||||||
shift := 8 - codeLen
|
shift := 8 - codeLen
|
||||||
start, end := int(uint8(code<<shift)), int(1<<shift)
|
start, end := int(uint8(code<<shift)), int(1<<shift)
|
||||||
|
|
||||||
|
leaves[sym].sym = byte(sym)
|
||||||
|
leaves[sym].codeLen = codeLen
|
||||||
for i := start; i < start+end; i++ {
|
for i := start; i < start+end; i++ {
|
||||||
cur.children[i] = &node{sym: sym, codeLen: codeLen}
|
cur.children[i] = &leaves[sym]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
11
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
11
vendor/golang.org/x/net/http2/pipe.go
generated
vendored
@ -30,6 +30,17 @@ type pipeBuffer interface {
|
|||||||
io.Reader
|
io.Reader
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// setBuffer initializes the pipe buffer.
|
||||||
|
// It has no effect if the pipe is already closed.
|
||||||
|
func (p *pipe) setBuffer(b pipeBuffer) {
|
||||||
|
p.mu.Lock()
|
||||||
|
defer p.mu.Unlock()
|
||||||
|
if p.err != nil || p.breakErr != nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
p.b = b
|
||||||
|
}
|
||||||
|
|
||||||
func (p *pipe) Len() int {
|
func (p *pipe) Len() int {
|
||||||
p.mu.Lock()
|
p.mu.Lock()
|
||||||
defer p.mu.Unlock()
|
defer p.mu.Unlock()
|
||||||
|
109
vendor/golang.org/x/net/http2/server.go
generated
vendored
109
vendor/golang.org/x/net/http2/server.go
generated
vendored
@ -130,6 +130,12 @@ type Server struct {
|
|||||||
// If nil, a default scheduler is chosen.
|
// If nil, a default scheduler is chosen.
|
||||||
NewWriteScheduler func() WriteScheduler
|
NewWriteScheduler func() WriteScheduler
|
||||||
|
|
||||||
|
// CountError, if non-nil, is called on HTTP/2 server errors.
|
||||||
|
// It's intended to increment a metric for monitoring, such
|
||||||
|
// as an expvar or Prometheus metric.
|
||||||
|
// The errType consists of only ASCII word characters.
|
||||||
|
CountError func(errType string)
|
||||||
|
|
||||||
// Internal state. This is a pointer (rather than embedded directly)
|
// Internal state. This is a pointer (rather than embedded directly)
|
||||||
// so that we don't embed a Mutex in this struct, which will make the
|
// so that we don't embed a Mutex in this struct, which will make the
|
||||||
// struct non-copyable, which might break some callers.
|
// struct non-copyable, which might break some callers.
|
||||||
@ -405,6 +411,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) {
|
|||||||
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
|
sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf)
|
||||||
|
|
||||||
fr := NewFramer(sc.bw, c)
|
fr := NewFramer(sc.bw, c)
|
||||||
|
if s.CountError != nil {
|
||||||
|
fr.countError = s.CountError
|
||||||
|
}
|
||||||
fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
|
fr.ReadMetaHeaders = hpack.NewDecoder(initialHeaderTableSize, nil)
|
||||||
fr.MaxHeaderListSize = sc.maxHeaderListSize()
|
fr.MaxHeaderListSize = sc.maxHeaderListSize()
|
||||||
fr.SetMaxReadFrameSize(s.maxReadFrameSize())
|
fr.SetMaxReadFrameSize(s.maxReadFrameSize())
|
||||||
@ -710,7 +719,15 @@ func (sc *serverConn) canonicalHeader(v string) string {
|
|||||||
sc.canonHeader = make(map[string]string)
|
sc.canonHeader = make(map[string]string)
|
||||||
}
|
}
|
||||||
cv = http.CanonicalHeaderKey(v)
|
cv = http.CanonicalHeaderKey(v)
|
||||||
|
// maxCachedCanonicalHeaders is an arbitrarily-chosen limit on the number of
|
||||||
|
// entries in the canonHeader cache. This should be larger than the number
|
||||||
|
// of unique, uncommon header keys likely to be sent by the peer, while not
|
||||||
|
// so high as to permit unreaasonable memory usage if the peer sends an unbounded
|
||||||
|
// number of unique header keys.
|
||||||
|
const maxCachedCanonicalHeaders = 32
|
||||||
|
if len(sc.canonHeader) < maxCachedCanonicalHeaders {
|
||||||
sc.canonHeader[v] = cv
|
sc.canonHeader[v] = cv
|
||||||
|
}
|
||||||
return cv
|
return cv
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1399,7 +1416,7 @@ func (sc *serverConn) processFrame(f Frame) error {
|
|||||||
// First frame received must be SETTINGS.
|
// First frame received must be SETTINGS.
|
||||||
if !sc.sawFirstSettings {
|
if !sc.sawFirstSettings {
|
||||||
if _, ok := f.(*SettingsFrame); !ok {
|
if _, ok := f.(*SettingsFrame); !ok {
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("first_settings", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
sc.sawFirstSettings = true
|
sc.sawFirstSettings = true
|
||||||
}
|
}
|
||||||
@ -1424,7 +1441,7 @@ func (sc *serverConn) processFrame(f Frame) error {
|
|||||||
case *PushPromiseFrame:
|
case *PushPromiseFrame:
|
||||||
// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
|
// A client cannot push. Thus, servers MUST treat the receipt of a PUSH_PROMISE
|
||||||
// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
|
// frame as a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("push_promise", ConnectionError(ErrCodeProtocol))
|
||||||
default:
|
default:
|
||||||
sc.vlogf("http2: server ignoring frame: %v", f.Header())
|
sc.vlogf("http2: server ignoring frame: %v", f.Header())
|
||||||
return nil
|
return nil
|
||||||
@ -1444,7 +1461,7 @@ func (sc *serverConn) processPing(f *PingFrame) error {
|
|||||||
// identifier field value other than 0x0, the recipient MUST
|
// identifier field value other than 0x0, the recipient MUST
|
||||||
// respond with a connection error (Section 5.4.1) of type
|
// respond with a connection error (Section 5.4.1) of type
|
||||||
// PROTOCOL_ERROR."
|
// PROTOCOL_ERROR."
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("ping_on_stream", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
|
if sc.inGoAway && sc.goAwayCode != ErrCodeNo {
|
||||||
return nil
|
return nil
|
||||||
@ -1463,7 +1480,7 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
|
|||||||
// or PRIORITY on a stream in this state MUST be
|
// or PRIORITY on a stream in this state MUST be
|
||||||
// treated as a connection error (Section 5.4.1) of
|
// treated as a connection error (Section 5.4.1) of
|
||||||
// type PROTOCOL_ERROR."
|
// type PROTOCOL_ERROR."
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("stream_idle", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
if st == nil {
|
if st == nil {
|
||||||
// "WINDOW_UPDATE can be sent by a peer that has sent a
|
// "WINDOW_UPDATE can be sent by a peer that has sent a
|
||||||
@ -1474,7 +1491,7 @@ func (sc *serverConn) processWindowUpdate(f *WindowUpdateFrame) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !st.flow.add(int32(f.Increment)) {
|
if !st.flow.add(int32(f.Increment)) {
|
||||||
return streamError(f.StreamID, ErrCodeFlowControl)
|
return sc.countError("bad_flow", streamError(f.StreamID, ErrCodeFlowControl))
|
||||||
}
|
}
|
||||||
default: // connection-level flow control
|
default: // connection-level flow control
|
||||||
if !sc.flow.add(int32(f.Increment)) {
|
if !sc.flow.add(int32(f.Increment)) {
|
||||||
@ -1495,7 +1512,7 @@ func (sc *serverConn) processResetStream(f *RSTStreamFrame) error {
|
|||||||
// identifying an idle stream is received, the
|
// identifying an idle stream is received, the
|
||||||
// recipient MUST treat this as a connection error
|
// recipient MUST treat this as a connection error
|
||||||
// (Section 5.4.1) of type PROTOCOL_ERROR.
|
// (Section 5.4.1) of type PROTOCOL_ERROR.
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("reset_idle_stream", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
if st != nil {
|
if st != nil {
|
||||||
st.cancelCtx()
|
st.cancelCtx()
|
||||||
@ -1547,7 +1564,7 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
|
|||||||
// Why is the peer ACKing settings we never sent?
|
// Why is the peer ACKing settings we never sent?
|
||||||
// The spec doesn't mention this case, but
|
// The spec doesn't mention this case, but
|
||||||
// hang up on them anyway.
|
// hang up on them anyway.
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("ack_mystery", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1555,7 +1572,7 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error {
|
|||||||
// This isn't actually in the spec, but hang up on
|
// This isn't actually in the spec, but hang up on
|
||||||
// suspiciously large settings frames or those with
|
// suspiciously large settings frames or those with
|
||||||
// duplicate entries.
|
// duplicate entries.
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("settings_big_or_dups", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
if err := f.ForeachSetting(sc.processSetting); err != nil {
|
if err := f.ForeachSetting(sc.processSetting); err != nil {
|
||||||
return err
|
return err
|
||||||
@ -1622,7 +1639,7 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error {
|
|||||||
// control window to exceed the maximum size as a
|
// control window to exceed the maximum size as a
|
||||||
// connection error (Section 5.4.1) of type
|
// connection error (Section 5.4.1) of type
|
||||||
// FLOW_CONTROL_ERROR."
|
// FLOW_CONTROL_ERROR."
|
||||||
return ConnectionError(ErrCodeFlowControl)
|
return sc.countError("setting_win_size", ConnectionError(ErrCodeFlowControl))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
@ -1655,7 +1672,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
|||||||
// or PRIORITY on a stream in this state MUST be
|
// or PRIORITY on a stream in this state MUST be
|
||||||
// treated as a connection error (Section 5.4.1) of
|
// treated as a connection error (Section 5.4.1) of
|
||||||
// type PROTOCOL_ERROR."
|
// type PROTOCOL_ERROR."
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("data_on_idle", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
|
|
||||||
// "If a DATA frame is received whose stream is not in "open"
|
// "If a DATA frame is received whose stream is not in "open"
|
||||||
@ -1672,7 +1689,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
|||||||
// and return any flow control bytes since we're not going
|
// and return any flow control bytes since we're not going
|
||||||
// to consume them.
|
// to consume them.
|
||||||
if sc.inflow.available() < int32(f.Length) {
|
if sc.inflow.available() < int32(f.Length) {
|
||||||
return streamError(id, ErrCodeFlowControl)
|
return sc.countError("data_flow", streamError(id, ErrCodeFlowControl))
|
||||||
}
|
}
|
||||||
// Deduct the flow control from inflow, since we're
|
// Deduct the flow control from inflow, since we're
|
||||||
// going to immediately add it back in
|
// going to immediately add it back in
|
||||||
@ -1685,7 +1702,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
|||||||
// Already have a stream error in flight. Don't send another.
|
// Already have a stream error in flight. Don't send another.
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return streamError(id, ErrCodeStreamClosed)
|
return sc.countError("closed", streamError(id, ErrCodeStreamClosed))
|
||||||
}
|
}
|
||||||
if st.body == nil {
|
if st.body == nil {
|
||||||
panic("internal error: should have a body in this state")
|
panic("internal error: should have a body in this state")
|
||||||
@ -1697,12 +1714,12 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
|||||||
// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
|
// RFC 7540, sec 8.1.2.6: A request or response is also malformed if the
|
||||||
// value of a content-length header field does not equal the sum of the
|
// value of a content-length header field does not equal the sum of the
|
||||||
// DATA frame payload lengths that form the body.
|
// DATA frame payload lengths that form the body.
|
||||||
return streamError(id, ErrCodeProtocol)
|
return sc.countError("send_too_much", streamError(id, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
if f.Length > 0 {
|
if f.Length > 0 {
|
||||||
// Check whether the client has flow control quota.
|
// Check whether the client has flow control quota.
|
||||||
if st.inflow.available() < int32(f.Length) {
|
if st.inflow.available() < int32(f.Length) {
|
||||||
return streamError(id, ErrCodeFlowControl)
|
return sc.countError("flow_on_data_length", streamError(id, ErrCodeFlowControl))
|
||||||
}
|
}
|
||||||
st.inflow.take(int32(f.Length))
|
st.inflow.take(int32(f.Length))
|
||||||
|
|
||||||
@ -1710,7 +1727,7 @@ func (sc *serverConn) processData(f *DataFrame) error {
|
|||||||
wrote, err := st.body.Write(data)
|
wrote, err := st.body.Write(data)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
sc.sendWindowUpdate(nil, int(f.Length)-wrote)
|
sc.sendWindowUpdate(nil, int(f.Length)-wrote)
|
||||||
return streamError(id, ErrCodeStreamClosed)
|
return sc.countError("body_write_err", streamError(id, ErrCodeStreamClosed))
|
||||||
}
|
}
|
||||||
if wrote != len(data) {
|
if wrote != len(data) {
|
||||||
panic("internal error: bad Writer")
|
panic("internal error: bad Writer")
|
||||||
@ -1796,7 +1813,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
|||||||
// stream identifier MUST respond with a connection error
|
// stream identifier MUST respond with a connection error
|
||||||
// (Section 5.4.1) of type PROTOCOL_ERROR.
|
// (Section 5.4.1) of type PROTOCOL_ERROR.
|
||||||
if id%2 != 1 {
|
if id%2 != 1 {
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("headers_even", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
// A HEADERS frame can be used to create a new stream or
|
// A HEADERS frame can be used to create a new stream or
|
||||||
// send a trailer for an open one. If we already have a stream
|
// send a trailer for an open one. If we already have a stream
|
||||||
@ -1813,7 +1830,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
|||||||
// this state, it MUST respond with a stream error (Section 5.4.2) of
|
// this state, it MUST respond with a stream error (Section 5.4.2) of
|
||||||
// type STREAM_CLOSED.
|
// type STREAM_CLOSED.
|
||||||
if st.state == stateHalfClosedRemote {
|
if st.state == stateHalfClosedRemote {
|
||||||
return streamError(id, ErrCodeStreamClosed)
|
return sc.countError("headers_half_closed", streamError(id, ErrCodeStreamClosed))
|
||||||
}
|
}
|
||||||
return st.processTrailerHeaders(f)
|
return st.processTrailerHeaders(f)
|
||||||
}
|
}
|
||||||
@ -1824,7 +1841,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
|||||||
// receives an unexpected stream identifier MUST respond with
|
// receives an unexpected stream identifier MUST respond with
|
||||||
// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
|
// a connection error (Section 5.4.1) of type PROTOCOL_ERROR.
|
||||||
if id <= sc.maxClientStreamID {
|
if id <= sc.maxClientStreamID {
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("stream_went_down", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
sc.maxClientStreamID = id
|
sc.maxClientStreamID = id
|
||||||
|
|
||||||
@ -1841,14 +1858,14 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
|||||||
if sc.curClientStreams+1 > sc.advMaxStreams {
|
if sc.curClientStreams+1 > sc.advMaxStreams {
|
||||||
if sc.unackedSettings == 0 {
|
if sc.unackedSettings == 0 {
|
||||||
// They should know better.
|
// They should know better.
|
||||||
return streamError(id, ErrCodeProtocol)
|
return sc.countError("over_max_streams", streamError(id, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
// Assume it's a network race, where they just haven't
|
// Assume it's a network race, where they just haven't
|
||||||
// received our last SETTINGS update. But actually
|
// received our last SETTINGS update. But actually
|
||||||
// this can't happen yet, because we don't yet provide
|
// this can't happen yet, because we don't yet provide
|
||||||
// a way for users to adjust server parameters at
|
// a way for users to adjust server parameters at
|
||||||
// runtime.
|
// runtime.
|
||||||
return streamError(id, ErrCodeRefusedStream)
|
return sc.countError("over_max_streams_race", streamError(id, ErrCodeRefusedStream))
|
||||||
}
|
}
|
||||||
|
|
||||||
initialState := stateOpen
|
initialState := stateOpen
|
||||||
@ -1858,7 +1875,7 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error {
|
|||||||
st := sc.newStream(id, 0, initialState)
|
st := sc.newStream(id, 0, initialState)
|
||||||
|
|
||||||
if f.HasPriority() {
|
if f.HasPriority() {
|
||||||
if err := checkPriority(f.StreamID, f.Priority); err != nil {
|
if err := sc.checkPriority(f.StreamID, f.Priority); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sc.writeSched.AdjustStream(st.id, f.Priority)
|
sc.writeSched.AdjustStream(st.id, f.Priority)
|
||||||
@ -1902,15 +1919,15 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
|
|||||||
sc := st.sc
|
sc := st.sc
|
||||||
sc.serveG.check()
|
sc.serveG.check()
|
||||||
if st.gotTrailerHeader {
|
if st.gotTrailerHeader {
|
||||||
return ConnectionError(ErrCodeProtocol)
|
return sc.countError("dup_trailers", ConnectionError(ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
st.gotTrailerHeader = true
|
st.gotTrailerHeader = true
|
||||||
if !f.StreamEnded() {
|
if !f.StreamEnded() {
|
||||||
return streamError(st.id, ErrCodeProtocol)
|
return sc.countError("trailers_not_ended", streamError(st.id, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(f.PseudoFields()) > 0 {
|
if len(f.PseudoFields()) > 0 {
|
||||||
return streamError(st.id, ErrCodeProtocol)
|
return sc.countError("trailers_pseudo", streamError(st.id, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
if st.trailer != nil {
|
if st.trailer != nil {
|
||||||
for _, hf := range f.RegularFields() {
|
for _, hf := range f.RegularFields() {
|
||||||
@ -1919,7 +1936,7 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
|
|||||||
// TODO: send more details to the peer somehow. But http2 has
|
// TODO: send more details to the peer somehow. But http2 has
|
||||||
// no way to send debug data at a stream level. Discuss with
|
// no way to send debug data at a stream level. Discuss with
|
||||||
// HTTP folk.
|
// HTTP folk.
|
||||||
return streamError(st.id, ErrCodeProtocol)
|
return sc.countError("trailers_bogus", streamError(st.id, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
st.trailer[key] = append(st.trailer[key], hf.Value)
|
st.trailer[key] = append(st.trailer[key], hf.Value)
|
||||||
}
|
}
|
||||||
@ -1928,13 +1945,13 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkPriority(streamID uint32, p PriorityParam) error {
|
func (sc *serverConn) checkPriority(streamID uint32, p PriorityParam) error {
|
||||||
if streamID == p.StreamDep {
|
if streamID == p.StreamDep {
|
||||||
// Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
|
// Section 5.3.1: "A stream cannot depend on itself. An endpoint MUST treat
|
||||||
// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
|
// this as a stream error (Section 5.4.2) of type PROTOCOL_ERROR."
|
||||||
// Section 5.3.3 says that a stream can depend on one of its dependencies,
|
// Section 5.3.3 says that a stream can depend on one of its dependencies,
|
||||||
// so it's only self-dependencies that are forbidden.
|
// so it's only self-dependencies that are forbidden.
|
||||||
return streamError(streamID, ErrCodeProtocol)
|
return sc.countError("priority", streamError(streamID, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -1943,7 +1960,7 @@ func (sc *serverConn) processPriority(f *PriorityFrame) error {
|
|||||||
if sc.inGoAway {
|
if sc.inGoAway {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if err := checkPriority(f.StreamID, f.PriorityParam); err != nil {
|
if err := sc.checkPriority(f.StreamID, f.PriorityParam); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
|
sc.writeSched.AdjustStream(f.StreamID, f.PriorityParam)
|
||||||
@ -2000,7 +2017,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
|
|||||||
isConnect := rp.method == "CONNECT"
|
isConnect := rp.method == "CONNECT"
|
||||||
if isConnect {
|
if isConnect {
|
||||||
if rp.path != "" || rp.scheme != "" || rp.authority == "" {
|
if rp.path != "" || rp.scheme != "" || rp.authority == "" {
|
||||||
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
|
return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
|
} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
|
||||||
// See 8.1.2.6 Malformed Requests and Responses:
|
// See 8.1.2.6 Malformed Requests and Responses:
|
||||||
@ -2013,13 +2030,13 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
|
|||||||
// "All HTTP/2 requests MUST include exactly one valid
|
// "All HTTP/2 requests MUST include exactly one valid
|
||||||
// value for the :method, :scheme, and :path
|
// value for the :method, :scheme, and :path
|
||||||
// pseudo-header fields"
|
// pseudo-header fields"
|
||||||
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
|
return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
|
|
||||||
bodyOpen := !f.StreamEnded()
|
bodyOpen := !f.StreamEnded()
|
||||||
if rp.method == "HEAD" && bodyOpen {
|
if rp.method == "HEAD" && bodyOpen {
|
||||||
// HEAD requests can't have bodies
|
// HEAD requests can't have bodies
|
||||||
return nil, nil, streamError(f.StreamID, ErrCodeProtocol)
|
return nil, nil, sc.countError("head_body", streamError(f.StreamID, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
|
|
||||||
rp.header = make(http.Header)
|
rp.header = make(http.Header)
|
||||||
@ -2102,7 +2119,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r
|
|||||||
var err error
|
var err error
|
||||||
url_, err = url.ParseRequestURI(rp.path)
|
url_, err = url.ParseRequestURI(rp.path)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, streamError(st.id, ErrCodeProtocol)
|
return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol))
|
||||||
}
|
}
|
||||||
requestURI = rp.path
|
requestURI = rp.path
|
||||||
}
|
}
|
||||||
@ -2985,3 +3002,31 @@ func h1ServerKeepAlivesDisabled(hs *http.Server) bool {
|
|||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (sc *serverConn) countError(name string, err error) error {
|
||||||
|
if sc == nil || sc.srv == nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
f := sc.srv.CountError
|
||||||
|
if f == nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
var typ string
|
||||||
|
var code ErrCode
|
||||||
|
switch e := err.(type) {
|
||||||
|
case ConnectionError:
|
||||||
|
typ = "conn"
|
||||||
|
code = ErrCode(e)
|
||||||
|
case StreamError:
|
||||||
|
typ = "stream"
|
||||||
|
code = ErrCode(e.Code)
|
||||||
|
default:
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
codeStr := errCodeName[code]
|
||||||
|
if codeStr == "" {
|
||||||
|
codeStr = strconv.Itoa(int(code))
|
||||||
|
}
|
||||||
|
f(fmt.Sprintf("%s_%s_%s", typ, codeStr, name))
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
1396
vendor/golang.org/x/net/http2/transport.go
generated
vendored
1396
vendor/golang.org/x/net/http2/transport.go
generated
vendored
File diff suppressed because it is too large
Load Diff
4
vendor/golang.org/x/net/http2/writesched.go
generated
vendored
4
vendor/golang.org/x/net/http2/writesched.go
generated
vendored
@ -32,7 +32,8 @@ type WriteScheduler interface {
|
|||||||
|
|
||||||
// Pop dequeues the next frame to write. Returns false if no frames can
|
// Pop dequeues the next frame to write. Returns false if no frames can
|
||||||
// be written. Frames with a given wr.StreamID() are Pop'd in the same
|
// be written. Frames with a given wr.StreamID() are Pop'd in the same
|
||||||
// order they are Push'd. No frames should be discarded except by CloseStream.
|
// order they are Push'd, except RST_STREAM frames. No frames should be
|
||||||
|
// discarded except by CloseStream.
|
||||||
Pop() (wr FrameWriteRequest, ok bool)
|
Pop() (wr FrameWriteRequest, ok bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -52,6 +53,7 @@ type FrameWriteRequest struct {
|
|||||||
|
|
||||||
// stream is the stream on which this frame will be written.
|
// stream is the stream on which this frame will be written.
|
||||||
// nil for non-stream frames like PING and SETTINGS.
|
// nil for non-stream frames like PING and SETTINGS.
|
||||||
|
// nil for RST_STREAM streams, which use the StreamError.StreamID field instead.
|
||||||
stream *stream
|
stream *stream
|
||||||
|
|
||||||
// done, if non-nil, must be a buffered channel with space for
|
// done, if non-nil, must be a buffered channel with space for
|
||||||
|
6
vendor/golang.org/x/net/http2/writesched_random.go
generated
vendored
6
vendor/golang.org/x/net/http2/writesched_random.go
generated
vendored
@ -45,11 +45,11 @@ func (ws *randomWriteScheduler) AdjustStream(streamID uint32, priority PriorityP
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
|
func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
|
||||||
id := wr.StreamID()
|
if wr.isControl() {
|
||||||
if id == 0 {
|
|
||||||
ws.zero.push(wr)
|
ws.zero.push(wr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
id := wr.StreamID()
|
||||||
q, ok := ws.sq[id]
|
q, ok := ws.sq[id]
|
||||||
if !ok {
|
if !ok {
|
||||||
q = ws.queuePool.get()
|
q = ws.queuePool.get()
|
||||||
@ -59,7 +59,7 @@ func (ws *randomWriteScheduler) Push(wr FrameWriteRequest) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
|
func (ws *randomWriteScheduler) Pop() (FrameWriteRequest, bool) {
|
||||||
// Control frames first.
|
// Control and RST_STREAM frames first.
|
||||||
if !ws.zero.empty() {
|
if !ws.zero.empty() {
|
||||||
return ws.zero.shift(), true
|
return ws.zero.shift(), true
|
||||||
}
|
}
|
||||||
|
14
vendor/golang.org/x/net/idna/go118.go
generated
vendored
Normal file
14
vendor/golang.org/x/net/idna/go118.go
generated
vendored
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||||
|
|
||||||
|
// Copyright 2021 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build go1.18
|
||||||
|
// +build go1.18
|
||||||
|
|
||||||
|
package idna
|
||||||
|
|
||||||
|
// Transitional processing is disabled by default in Go 1.18.
|
||||||
|
// https://golang.org/issue/47510
|
||||||
|
const transitionalLookup = false
|
6
vendor/golang.org/x/net/idna/idna10.0.0.go
generated
vendored
6
vendor/golang.org/x/net/idna/idna10.0.0.go
generated
vendored
@ -59,10 +59,10 @@ type Option func(*options)
|
|||||||
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
||||||
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
||||||
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
||||||
// compatibility. It is used by most browsers when resolving domain names. This
|
// compatibility. It is used by some browsers when resolving domain names. This
|
||||||
// option is only meaningful if combined with MapForLookup.
|
// option is only meaningful if combined with MapForLookup.
|
||||||
func Transitional(transitional bool) Option {
|
func Transitional(transitional bool) Option {
|
||||||
return func(o *options) { o.transitional = true }
|
return func(o *options) { o.transitional = transitional }
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
||||||
@ -284,7 +284,7 @@ var (
|
|||||||
|
|
||||||
punycode = &Profile{}
|
punycode = &Profile{}
|
||||||
lookup = &Profile{options{
|
lookup = &Profile{options{
|
||||||
transitional: true,
|
transitional: transitionalLookup,
|
||||||
useSTD3Rules: true,
|
useSTD3Rules: true,
|
||||||
checkHyphens: true,
|
checkHyphens: true,
|
||||||
checkJoiners: true,
|
checkJoiners: true,
|
||||||
|
4
vendor/golang.org/x/net/idna/idna9.0.0.go
generated
vendored
4
vendor/golang.org/x/net/idna/idna9.0.0.go
generated
vendored
@ -58,10 +58,10 @@ type Option func(*options)
|
|||||||
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
// Transitional sets a Profile to use the Transitional mapping as defined in UTS
|
||||||
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
// #46. This will cause, for example, "ß" to be mapped to "ss". Using the
|
||||||
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
// transitional mapping provides a compromise between IDNA2003 and IDNA2008
|
||||||
// compatibility. It is used by most browsers when resolving domain names. This
|
// compatibility. It is used by some browsers when resolving domain names. This
|
||||||
// option is only meaningful if combined with MapForLookup.
|
// option is only meaningful if combined with MapForLookup.
|
||||||
func Transitional(transitional bool) Option {
|
func Transitional(transitional bool) Option {
|
||||||
return func(o *options) { o.transitional = true }
|
return func(o *options) { o.transitional = transitional }
|
||||||
}
|
}
|
||||||
|
|
||||||
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
// VerifyDNSLength sets whether a Profile should fail if any of the IDN parts
|
||||||
|
12
vendor/golang.org/x/net/idna/pre_go118.go
generated
vendored
Normal file
12
vendor/golang.org/x/net/idna/pre_go118.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT.
|
||||||
|
|
||||||
|
// Copyright 2021 The Go Authors. All rights reserved.
|
||||||
|
// Use of this source code is governed by a BSD-style
|
||||||
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build !go1.18
|
||||||
|
// +build !go1.18
|
||||||
|
|
||||||
|
package idna
|
||||||
|
|
||||||
|
const transitionalLookup = true
|
36
vendor/golang.org/x/net/idna/punycode.go
generated
vendored
36
vendor/golang.org/x/net/idna/punycode.go
generated
vendored
@ -49,6 +49,7 @@ func decode(encoded string) (string, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
i, n, bias := int32(0), initialN, initialBias
|
i, n, bias := int32(0), initialN, initialBias
|
||||||
|
overflow := false
|
||||||
for pos < len(encoded) {
|
for pos < len(encoded) {
|
||||||
oldI, w := i, int32(1)
|
oldI, w := i, int32(1)
|
||||||
for k := base; ; k += base {
|
for k := base; ; k += base {
|
||||||
@ -60,29 +61,32 @@ func decode(encoded string) (string, error) {
|
|||||||
return "", punyError(encoded)
|
return "", punyError(encoded)
|
||||||
}
|
}
|
||||||
pos++
|
pos++
|
||||||
i += digit * w
|
i, overflow = madd(i, digit, w)
|
||||||
if i < 0 {
|
if overflow {
|
||||||
return "", punyError(encoded)
|
return "", punyError(encoded)
|
||||||
}
|
}
|
||||||
t := k - bias
|
t := k - bias
|
||||||
if t < tmin {
|
if k <= bias {
|
||||||
t = tmin
|
t = tmin
|
||||||
} else if t > tmax {
|
} else if k >= bias+tmax {
|
||||||
t = tmax
|
t = tmax
|
||||||
}
|
}
|
||||||
if digit < t {
|
if digit < t {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
w *= base - t
|
w, overflow = madd(0, w, base-t)
|
||||||
if w >= math.MaxInt32/base {
|
if overflow {
|
||||||
return "", punyError(encoded)
|
return "", punyError(encoded)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if len(output) >= 1024 {
|
||||||
|
return "", punyError(encoded)
|
||||||
|
}
|
||||||
x := int32(len(output) + 1)
|
x := int32(len(output) + 1)
|
||||||
bias = adapt(i-oldI, x, oldI == 0)
|
bias = adapt(i-oldI, x, oldI == 0)
|
||||||
n += i / x
|
n += i / x
|
||||||
i %= x
|
i %= x
|
||||||
if n > utf8.MaxRune || len(output) >= 1024 {
|
if n < 0 || n > utf8.MaxRune {
|
||||||
return "", punyError(encoded)
|
return "", punyError(encoded)
|
||||||
}
|
}
|
||||||
output = append(output, 0)
|
output = append(output, 0)
|
||||||
@ -115,6 +119,7 @@ func encode(prefix, s string) (string, error) {
|
|||||||
if b > 0 {
|
if b > 0 {
|
||||||
output = append(output, '-')
|
output = append(output, '-')
|
||||||
}
|
}
|
||||||
|
overflow := false
|
||||||
for remaining != 0 {
|
for remaining != 0 {
|
||||||
m := int32(0x7fffffff)
|
m := int32(0x7fffffff)
|
||||||
for _, r := range s {
|
for _, r := range s {
|
||||||
@ -122,8 +127,8 @@ func encode(prefix, s string) (string, error) {
|
|||||||
m = r
|
m = r
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
delta += (m - n) * (h + 1)
|
delta, overflow = madd(delta, m-n, h+1)
|
||||||
if delta < 0 {
|
if overflow {
|
||||||
return "", punyError(s)
|
return "", punyError(s)
|
||||||
}
|
}
|
||||||
n = m
|
n = m
|
||||||
@ -141,9 +146,9 @@ func encode(prefix, s string) (string, error) {
|
|||||||
q := delta
|
q := delta
|
||||||
for k := base; ; k += base {
|
for k := base; ; k += base {
|
||||||
t := k - bias
|
t := k - bias
|
||||||
if t < tmin {
|
if k <= bias {
|
||||||
t = tmin
|
t = tmin
|
||||||
} else if t > tmax {
|
} else if k >= bias+tmax {
|
||||||
t = tmax
|
t = tmax
|
||||||
}
|
}
|
||||||
if q < t {
|
if q < t {
|
||||||
@ -164,6 +169,15 @@ func encode(prefix, s string) (string, error) {
|
|||||||
return string(output), nil
|
return string(output), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// madd computes a + (b * c), detecting overflow.
|
||||||
|
func madd(a, b, c int32) (next int32, overflow bool) {
|
||||||
|
p := int64(b) * int64(c)
|
||||||
|
if p > math.MaxInt32-int64(a) {
|
||||||
|
return 0, true
|
||||||
|
}
|
||||||
|
return a + int32(p), false
|
||||||
|
}
|
||||||
|
|
||||||
func decodeDigit(x byte) (digit int32, ok bool) {
|
func decodeDigit(x byte) (digit int32, ok bool) {
|
||||||
switch {
|
switch {
|
||||||
case '0' <= x && x <= '9':
|
case '0' <= x && x <= '9':
|
||||||
|
4
vendor/golang.org/x/sys/cpu/cpu_gc_x86.go
generated
vendored
4
vendor/golang.org/x/sys/cpu/cpu_gc_x86.go
generated
vendored
@ -15,7 +15,3 @@ func cpuid(eaxArg, ecxArg uint32) (eax, ebx, ecx, edx uint32)
|
|||||||
// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler
|
// xgetbv with ecx = 0 is implemented in cpu_x86.s for gc compiler
|
||||||
// and in cpu_gccgo.c for gccgo.
|
// and in cpu_gccgo.c for gccgo.
|
||||||
func xgetbv() (eax, edx uint32)
|
func xgetbv() (eax, edx uint32)
|
||||||
|
|
||||||
// darwinSupportsAVX512 is implemented in cpu_x86.s for gc compiler
|
|
||||||
// and in cpu_gccgo_x86.go for gccgo.
|
|
||||||
func darwinSupportsAVX512() bool
|
|
||||||
|
7
vendor/golang.org/x/sys/cpu/cpu_x86.go
generated
vendored
7
vendor/golang.org/x/sys/cpu/cpu_x86.go
generated
vendored
@ -90,9 +90,10 @@ func archInit() {
|
|||||||
osSupportsAVX = isSet(1, eax) && isSet(2, eax)
|
osSupportsAVX = isSet(1, eax) && isSet(2, eax)
|
||||||
|
|
||||||
if runtime.GOOS == "darwin" {
|
if runtime.GOOS == "darwin" {
|
||||||
// Check darwin commpage for AVX512 support. Necessary because:
|
// Darwin doesn't save/restore AVX-512 mask registers correctly across signal handlers.
|
||||||
// https://github.com/apple/darwin-xnu/blob/0a798f6738bc1db01281fc08ae024145e84df927/osfmk/i386/fpu.c#L175-L201
|
// Since users can't rely on mask register contents, let's not advertise AVX-512 support.
|
||||||
osSupportsAVX512 = osSupportsAVX && darwinSupportsAVX512()
|
// See issue 49233.
|
||||||
|
osSupportsAVX512 = false
|
||||||
} else {
|
} else {
|
||||||
// Check if OPMASK and ZMM registers have OS support.
|
// Check if OPMASK and ZMM registers have OS support.
|
||||||
osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax)
|
osSupportsAVX512 = osSupportsAVX && isSet(5, eax) && isSet(6, eax) && isSet(7, eax)
|
||||||
|
24
vendor/golang.org/x/sys/cpu/cpu_x86.s
generated
vendored
24
vendor/golang.org/x/sys/cpu/cpu_x86.s
generated
vendored
@ -26,27 +26,3 @@ TEXT ·xgetbv(SB),NOSPLIT,$0-8
|
|||||||
MOVL AX, eax+0(FP)
|
MOVL AX, eax+0(FP)
|
||||||
MOVL DX, edx+4(FP)
|
MOVL DX, edx+4(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// func darwinSupportsAVX512() bool
|
|
||||||
TEXT ·darwinSupportsAVX512(SB), NOSPLIT, $0-1
|
|
||||||
MOVB $0, ret+0(FP) // default to false
|
|
||||||
#ifdef GOOS_darwin // return if not darwin
|
|
||||||
#ifdef GOARCH_amd64 // return if not amd64
|
|
||||||
// These values from:
|
|
||||||
// https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/osfmk/i386/cpu_capabilities.h
|
|
||||||
#define commpage64_base_address 0x00007fffffe00000
|
|
||||||
#define commpage64_cpu_capabilities64 (commpage64_base_address+0x010)
|
|
||||||
#define commpage64_version (commpage64_base_address+0x01E)
|
|
||||||
#define hasAVX512F 0x0000004000000000
|
|
||||||
MOVQ $commpage64_version, BX
|
|
||||||
CMPW (BX), $13 // cpu_capabilities64 undefined in versions < 13
|
|
||||||
JL no_avx512
|
|
||||||
MOVQ $commpage64_cpu_capabilities64, BX
|
|
||||||
MOVQ $hasAVX512F, CX
|
|
||||||
TESTQ (BX), CX
|
|
||||||
JZ no_avx512
|
|
||||||
MOVB $1, ret+0(FP)
|
|
||||||
no_avx512:
|
|
||||||
#endif
|
|
||||||
#endif
|
|
||||||
RET
|
|
||||||
|
1
vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go
generated
vendored
1
vendor/golang.org/x/sys/plan9/pwd_go15_plan9.go
generated
vendored
@ -2,6 +2,7 @@
|
|||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build go1.5
|
||||||
// +build go1.5
|
// +build go1.5
|
||||||
|
|
||||||
package plan9
|
package plan9
|
||||||
|
1
vendor/golang.org/x/sys/plan9/pwd_plan9.go
generated
vendored
1
vendor/golang.org/x/sys/plan9/pwd_plan9.go
generated
vendored
@ -2,6 +2,7 @@
|
|||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build !go1.5
|
||||||
// +build !go1.5
|
// +build !go1.5
|
||||||
|
|
||||||
package plan9
|
package plan9
|
||||||
|
1
vendor/golang.org/x/sys/plan9/race.go
generated
vendored
1
vendor/golang.org/x/sys/plan9/race.go
generated
vendored
@ -2,6 +2,7 @@
|
|||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build plan9 && race
|
||||||
// +build plan9,race
|
// +build plan9,race
|
||||||
|
|
||||||
package plan9
|
package plan9
|
||||||
|
1
vendor/golang.org/x/sys/plan9/race0.go
generated
vendored
1
vendor/golang.org/x/sys/plan9/race0.go
generated
vendored
@ -2,6 +2,7 @@
|
|||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build plan9 && !race
|
||||||
// +build plan9,!race
|
// +build plan9,!race
|
||||||
|
|
||||||
package plan9
|
package plan9
|
||||||
|
1
vendor/golang.org/x/sys/plan9/str.go
generated
vendored
1
vendor/golang.org/x/sys/plan9/str.go
generated
vendored
@ -2,6 +2,7 @@
|
|||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build plan9
|
||||||
// +build plan9
|
// +build plan9
|
||||||
|
|
||||||
package plan9
|
package plan9
|
||||||
|
1
vendor/golang.org/x/sys/plan9/syscall.go
generated
vendored
1
vendor/golang.org/x/sys/plan9/syscall.go
generated
vendored
@ -2,6 +2,7 @@
|
|||||||
// Use of this source code is governed by a BSD-style
|
// Use of this source code is governed by a BSD-style
|
||||||
// license that can be found in the LICENSE file.
|
// license that can be found in the LICENSE file.
|
||||||
|
|
||||||
|
//go:build plan9
|
||||||
// +build plan9
|
// +build plan9
|
||||||
|
|
||||||
// Package plan9 contains an interface to the low-level operating system
|
// Package plan9 contains an interface to the low-level operating system
|
||||||
|
2
vendor/golang.org/x/sys/plan9/syscall_plan9.go
generated
vendored
2
vendor/golang.org/x/sys/plan9/syscall_plan9.go
generated
vendored
@ -132,8 +132,10 @@ func Pipe(p []int) (err error) {
|
|||||||
}
|
}
|
||||||
var pp [2]int32
|
var pp [2]int32
|
||||||
err = pipe(&pp)
|
err = pipe(&pp)
|
||||||
|
if err == nil {
|
||||||
p[0] = int(pp[0])
|
p[0] = int(pp[0])
|
||||||
p[1] = int(pp[1])
|
p[1] = int(pp[1])
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
1
vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go
generated
vendored
1
vendor/golang.org/x/sys/plan9/zsyscall_plan9_386.go
generated
vendored
@ -1,6 +1,7 @@
|
|||||||
// go run mksyscall.go -l32 -plan9 -tags plan9,386 syscall_plan9.go
|
// go run mksyscall.go -l32 -plan9 -tags plan9,386 syscall_plan9.go
|
||||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||||
|
|
||||||
|
//go:build plan9 && 386
|
||||||
// +build plan9,386
|
// +build plan9,386
|
||||||
|
|
||||||
package plan9
|
package plan9
|
||||||
|
1
vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go
generated
vendored
1
vendor/golang.org/x/sys/plan9/zsyscall_plan9_amd64.go
generated
vendored
@ -1,6 +1,7 @@
|
|||||||
// go run mksyscall.go -l32 -plan9 -tags plan9,amd64 syscall_plan9.go
|
// go run mksyscall.go -l32 -plan9 -tags plan9,amd64 syscall_plan9.go
|
||||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||||
|
|
||||||
|
//go:build plan9 && amd64
|
||||||
// +build plan9,amd64
|
// +build plan9,amd64
|
||||||
|
|
||||||
package plan9
|
package plan9
|
||||||
|
1
vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go
generated
vendored
1
vendor/golang.org/x/sys/plan9/zsyscall_plan9_arm.go
generated
vendored
@ -1,6 +1,7 @@
|
|||||||
// go run mksyscall.go -l32 -plan9 -tags plan9,arm syscall_plan9.go
|
// go run mksyscall.go -l32 -plan9 -tags plan9,arm syscall_plan9.go
|
||||||
// Code generated by the command above; see README.md. DO NOT EDIT.
|
// Code generated by the command above; see README.md. DO NOT EDIT.
|
||||||
|
|
||||||
|
//go:build plan9 && arm
|
||||||
// +build plan9,arm
|
// +build plan9,arm
|
||||||
|
|
||||||
package plan9
|
package plan9
|
||||||
|
2
vendor/golang.org/x/sys/unix/README.md
generated
vendored
2
vendor/golang.org/x/sys/unix/README.md
generated
vendored
@ -149,7 +149,7 @@ To add a constant, add the header that includes it to the appropriate variable.
|
|||||||
Then, edit the regex (if necessary) to match the desired constant. Avoid making
|
Then, edit the regex (if necessary) to match the desired constant. Avoid making
|
||||||
the regex too broad to avoid matching unintended constants.
|
the regex too broad to avoid matching unintended constants.
|
||||||
|
|
||||||
### mkmerge.go
|
### internal/mkmerge
|
||||||
|
|
||||||
This program is used to extract duplicate const, func, and type declarations
|
This program is used to extract duplicate const, func, and type declarations
|
||||||
from the generated architecture-specific files listed below, and merge these
|
from the generated architecture-specific files listed below, and merge these
|
||||||
|
2
vendor/golang.org/x/sys/unix/mkall.sh
generated
vendored
2
vendor/golang.org/x/sys/unix/mkall.sh
generated
vendored
@ -50,7 +50,7 @@ if [[ "$GOOS" = "linux" ]]; then
|
|||||||
# Use the Docker-based build system
|
# Use the Docker-based build system
|
||||||
# Files generated through docker (use $cmd so you can Ctl-C the build or run)
|
# Files generated through docker (use $cmd so you can Ctl-C the build or run)
|
||||||
$cmd docker build --tag generate:$GOOS $GOOS
|
$cmd docker build --tag generate:$GOOS $GOOS
|
||||||
$cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")" && /bin/pwd):/build generate:$GOOS
|
$cmd docker run --interactive --tty --volume $(cd -- "$(dirname -- "$0")/.." && /bin/pwd):/build generate:$GOOS
|
||||||
exit
|
exit
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
5
vendor/golang.org/x/sys/unix/mkerrors.sh
generated
vendored
5
vendor/golang.org/x/sys/unix/mkerrors.sh
generated
vendored
@ -239,6 +239,7 @@ struct ltchars {
|
|||||||
#include <linux/magic.h>
|
#include <linux/magic.h>
|
||||||
#include <linux/memfd.h>
|
#include <linux/memfd.h>
|
||||||
#include <linux/module.h>
|
#include <linux/module.h>
|
||||||
|
#include <linux/mount.h>
|
||||||
#include <linux/netfilter/nfnetlink.h>
|
#include <linux/netfilter/nfnetlink.h>
|
||||||
#include <linux/netlink.h>
|
#include <linux/netlink.h>
|
||||||
#include <linux/net_namespace.h>
|
#include <linux/net_namespace.h>
|
||||||
@ -260,6 +261,7 @@ struct ltchars {
|
|||||||
#include <linux/vm_sockets.h>
|
#include <linux/vm_sockets.h>
|
||||||
#include <linux/wait.h>
|
#include <linux/wait.h>
|
||||||
#include <linux/watchdog.h>
|
#include <linux/watchdog.h>
|
||||||
|
#include <linux/wireguard.h>
|
||||||
|
|
||||||
#include <mtd/ubi-user.h>
|
#include <mtd/ubi-user.h>
|
||||||
#include <mtd/mtd-user.h>
|
#include <mtd/mtd-user.h>
|
||||||
@ -520,7 +522,7 @@ ccflags="$@"
|
|||||||
$2 ~ /^HW_MACHINE$/ ||
|
$2 ~ /^HW_MACHINE$/ ||
|
||||||
$2 ~ /^SYSCTL_VERS/ ||
|
$2 ~ /^SYSCTL_VERS/ ||
|
||||||
$2 !~ "MNT_BITS" &&
|
$2 !~ "MNT_BITS" &&
|
||||||
$2 ~ /^(MS|MNT|UMOUNT)_/ ||
|
$2 ~ /^(MS|MNT|MOUNT|UMOUNT)_/ ||
|
||||||
$2 ~ /^NS_GET_/ ||
|
$2 ~ /^NS_GET_/ ||
|
||||||
$2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ ||
|
$2 ~ /^TUN(SET|GET|ATTACH|DETACH)/ ||
|
||||||
$2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|TFD)_/ ||
|
$2 ~ /^(O|F|[ES]?FD|NAME|S|PTRACE|PT|TFD)_/ ||
|
||||||
@ -605,6 +607,7 @@ ccflags="$@"
|
|||||||
$2 ~ /^MTD/ ||
|
$2 ~ /^MTD/ ||
|
||||||
$2 ~ /^OTP/ ||
|
$2 ~ /^OTP/ ||
|
||||||
$2 ~ /^MEM/ ||
|
$2 ~ /^MEM/ ||
|
||||||
|
$2 ~ /^WG/ ||
|
||||||
$2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)}
|
$2 ~ /^BLK[A-Z]*(GET$|SET$|BUF$|PART$|SIZE)/ {printf("\t%s = C.%s\n", $2, $2)}
|
||||||
$2 ~ /^__WCOREFLAG$/ {next}
|
$2 ~ /^__WCOREFLAG$/ {next}
|
||||||
$2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)}
|
$2 ~ /^__W[A-Z0-9]+$/ {printf("\t%s = C.%s\n", substr($2,3), $2)}
|
||||||
|
8
vendor/golang.org/x/sys/unix/sockcmsg_linux.go
generated
vendored
8
vendor/golang.org/x/sys/unix/sockcmsg_linux.go
generated
vendored
@ -67,9 +67,7 @@ func ParseOrigDstAddr(m *SocketControlMessage) (Sockaddr, error) {
|
|||||||
sa := new(SockaddrInet4)
|
sa := new(SockaddrInet4)
|
||||||
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
||||||
sa.Port = int(p[0])<<8 + int(p[1])
|
sa.Port = int(p[0])<<8 + int(p[1])
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.Addr = pp.Addr
|
||||||
sa.Addr[i] = pp.Addr[i]
|
|
||||||
}
|
|
||||||
return sa, nil
|
return sa, nil
|
||||||
|
|
||||||
case m.Header.Level == SOL_IPV6 && m.Header.Type == IPV6_ORIGDSTADDR:
|
case m.Header.Level == SOL_IPV6 && m.Header.Type == IPV6_ORIGDSTADDR:
|
||||||
@ -78,9 +76,7 @@ func ParseOrigDstAddr(m *SocketControlMessage) (Sockaddr, error) {
|
|||||||
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
||||||
sa.Port = int(p[0])<<8 + int(p[1])
|
sa.Port = int(p[0])<<8 + int(p[1])
|
||||||
sa.ZoneId = pp.Scope_id
|
sa.ZoneId = pp.Scope_id
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.Addr = pp.Addr
|
||||||
sa.Addr[i] = pp.Addr[i]
|
|
||||||
}
|
|
||||||
return sa, nil
|
return sa, nil
|
||||||
|
|
||||||
default:
|
default:
|
||||||
|
24
vendor/golang.org/x/sys/unix/syscall_aix.go
generated
vendored
24
vendor/golang.org/x/sys/unix/syscall_aix.go
generated
vendored
@ -70,9 +70,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
|||||||
p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
|
p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
|
||||||
p[0] = byte(sa.Port >> 8)
|
p[0] = byte(sa.Port >> 8)
|
||||||
p[1] = byte(sa.Port)
|
p[1] = byte(sa.Port)
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.raw.Addr = sa.Addr
|
||||||
sa.raw.Addr[i] = sa.Addr[i]
|
|
||||||
}
|
|
||||||
return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil
|
return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -85,9 +83,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
|||||||
p[0] = byte(sa.Port >> 8)
|
p[0] = byte(sa.Port >> 8)
|
||||||
p[1] = byte(sa.Port)
|
p[1] = byte(sa.Port)
|
||||||
sa.raw.Scope_id = sa.ZoneId
|
sa.raw.Scope_id = sa.ZoneId
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.raw.Addr = sa.Addr
|
||||||
sa.raw.Addr[i] = sa.Addr[i]
|
|
||||||
}
|
|
||||||
return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil
|
return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -261,9 +257,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
|
|||||||
sa := new(SockaddrInet4)
|
sa := new(SockaddrInet4)
|
||||||
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
||||||
sa.Port = int(p[0])<<8 + int(p[1])
|
sa.Port = int(p[0])<<8 + int(p[1])
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.Addr = pp.Addr
|
||||||
sa.Addr[i] = pp.Addr[i]
|
|
||||||
}
|
|
||||||
return sa, nil
|
return sa, nil
|
||||||
|
|
||||||
case AF_INET6:
|
case AF_INET6:
|
||||||
@ -272,9 +266,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
|
|||||||
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
||||||
sa.Port = int(p[0])<<8 + int(p[1])
|
sa.Port = int(p[0])<<8 + int(p[1])
|
||||||
sa.ZoneId = pp.Scope_id
|
sa.ZoneId = pp.Scope_id
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.Addr = pp.Addr
|
||||||
sa.Addr[i] = pp.Addr[i]
|
|
||||||
}
|
|
||||||
return sa, nil
|
return sa, nil
|
||||||
}
|
}
|
||||||
return nil, EAFNOSUPPORT
|
return nil, EAFNOSUPPORT
|
||||||
@ -385,6 +377,11 @@ func (w WaitStatus) TrapCause() int { return -1 }
|
|||||||
|
|
||||||
//sys fcntl(fd int, cmd int, arg int) (val int, err error)
|
//sys fcntl(fd int, cmd int, arg int) (val int, err error)
|
||||||
|
|
||||||
|
//sys fsyncRange(fd int, how int, start int64, length int64) (err error) = fsync_range
|
||||||
|
func Fsync(fd int) error {
|
||||||
|
return fsyncRange(fd, O_SYNC, 0, 0)
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Direct access
|
* Direct access
|
||||||
*/
|
*/
|
||||||
@ -401,7 +398,6 @@ func (w WaitStatus) TrapCause() int { return -1 }
|
|||||||
//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
|
//sys Fchmodat(dirfd int, path string, mode uint32, flags int) (err error)
|
||||||
//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error)
|
//sys Fchownat(dirfd int, path string, uid int, gid int, flags int) (err error)
|
||||||
//sys Fdatasync(fd int) (err error)
|
//sys Fdatasync(fd int) (err error)
|
||||||
//sys Fsync(fd int) (err error)
|
|
||||||
// readdir_r
|
// readdir_r
|
||||||
//sysnb Getpgid(pid int) (pgid int, err error)
|
//sysnb Getpgid(pid int) (pgid int, err error)
|
||||||
|
|
||||||
@ -523,8 +519,10 @@ func Pipe(p []int) (err error) {
|
|||||||
}
|
}
|
||||||
var pp [2]_C_int
|
var pp [2]_C_int
|
||||||
err = pipe(&pp)
|
err = pipe(&pp)
|
||||||
|
if err == nil {
|
||||||
p[0] = int(pp[0])
|
p[0] = int(pp[0])
|
||||||
p[1] = int(pp[1])
|
p[1] = int(pp[1])
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
24
vendor/golang.org/x/sys/unix/syscall_bsd.go
generated
vendored
24
vendor/golang.org/x/sys/unix/syscall_bsd.go
generated
vendored
@ -163,9 +163,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
|||||||
p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
|
p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
|
||||||
p[0] = byte(sa.Port >> 8)
|
p[0] = byte(sa.Port >> 8)
|
||||||
p[1] = byte(sa.Port)
|
p[1] = byte(sa.Port)
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.raw.Addr = sa.Addr
|
||||||
sa.raw.Addr[i] = sa.Addr[i]
|
|
||||||
}
|
|
||||||
return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil
|
return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -179,9 +177,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
|||||||
p[0] = byte(sa.Port >> 8)
|
p[0] = byte(sa.Port >> 8)
|
||||||
p[1] = byte(sa.Port)
|
p[1] = byte(sa.Port)
|
||||||
sa.raw.Scope_id = sa.ZoneId
|
sa.raw.Scope_id = sa.ZoneId
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.raw.Addr = sa.Addr
|
||||||
sa.raw.Addr[i] = sa.Addr[i]
|
|
||||||
}
|
|
||||||
return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil
|
return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -210,9 +206,7 @@ func (sa *SockaddrDatalink) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
|||||||
sa.raw.Nlen = sa.Nlen
|
sa.raw.Nlen = sa.Nlen
|
||||||
sa.raw.Alen = sa.Alen
|
sa.raw.Alen = sa.Alen
|
||||||
sa.raw.Slen = sa.Slen
|
sa.raw.Slen = sa.Slen
|
||||||
for i := 0; i < len(sa.raw.Data); i++ {
|
sa.raw.Data = sa.Data
|
||||||
sa.raw.Data[i] = sa.Data[i]
|
|
||||||
}
|
|
||||||
return unsafe.Pointer(&sa.raw), SizeofSockaddrDatalink, nil
|
return unsafe.Pointer(&sa.raw), SizeofSockaddrDatalink, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -228,9 +222,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
|
|||||||
sa.Nlen = pp.Nlen
|
sa.Nlen = pp.Nlen
|
||||||
sa.Alen = pp.Alen
|
sa.Alen = pp.Alen
|
||||||
sa.Slen = pp.Slen
|
sa.Slen = pp.Slen
|
||||||
for i := 0; i < len(sa.Data); i++ {
|
sa.Data = pp.Data
|
||||||
sa.Data[i] = pp.Data[i]
|
|
||||||
}
|
|
||||||
return sa, nil
|
return sa, nil
|
||||||
|
|
||||||
case AF_UNIX:
|
case AF_UNIX:
|
||||||
@ -262,9 +254,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
|
|||||||
sa := new(SockaddrInet4)
|
sa := new(SockaddrInet4)
|
||||||
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
||||||
sa.Port = int(p[0])<<8 + int(p[1])
|
sa.Port = int(p[0])<<8 + int(p[1])
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.Addr = pp.Addr
|
||||||
sa.Addr[i] = pp.Addr[i]
|
|
||||||
}
|
|
||||||
return sa, nil
|
return sa, nil
|
||||||
|
|
||||||
case AF_INET6:
|
case AF_INET6:
|
||||||
@ -273,9 +263,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
|
|||||||
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
||||||
sa.Port = int(p[0])<<8 + int(p[1])
|
sa.Port = int(p[0])<<8 + int(p[1])
|
||||||
sa.ZoneId = pp.Scope_id
|
sa.ZoneId = pp.Scope_id
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.Addr = pp.Addr
|
||||||
sa.Addr[i] = pp.Addr[i]
|
|
||||||
}
|
|
||||||
return sa, nil
|
return sa, nil
|
||||||
}
|
}
|
||||||
return anyToSockaddrGOOS(fd, rsa)
|
return anyToSockaddrGOOS(fd, rsa)
|
||||||
|
23
vendor/golang.org/x/sys/unix/syscall_darwin.go
generated
vendored
23
vendor/golang.org/x/sys/unix/syscall_darwin.go
generated
vendored
@ -159,8 +159,10 @@ func Pipe(p []int) (err error) {
|
|||||||
}
|
}
|
||||||
var x [2]int32
|
var x [2]int32
|
||||||
err = pipe(&x)
|
err = pipe(&x)
|
||||||
|
if err == nil {
|
||||||
p[0] = int(x[0])
|
p[0] = int(x[0])
|
||||||
p[1] = int(x[1])
|
p[1] = int(x[1])
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -430,8 +432,25 @@ func GetsockoptXucred(fd, level, opt int) (*Xucred, error) {
|
|||||||
return x, err
|
return x, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func SysctlKinfoProcSlice(name string) ([]KinfoProc, error) {
|
func SysctlKinfoProc(name string, args ...int) (*KinfoProc, error) {
|
||||||
mib, err := sysctlmib(name)
|
mib, err := sysctlmib(name, args...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var kinfo KinfoProc
|
||||||
|
n := uintptr(SizeofKinfoProc)
|
||||||
|
if err := sysctl(mib, (*byte)(unsafe.Pointer(&kinfo)), &n, nil, 0); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if n != SizeofKinfoProc {
|
||||||
|
return nil, EIO
|
||||||
|
}
|
||||||
|
return &kinfo, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func SysctlKinfoProcSlice(name string, args ...int) ([]KinfoProc, error) {
|
||||||
|
mib, err := sysctlmib(name, args...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
10
vendor/golang.org/x/sys/unix/syscall_dragonfly.go
generated
vendored
10
vendor/golang.org/x/sys/unix/syscall_dragonfly.go
generated
vendored
@ -101,7 +101,10 @@ func Pipe(p []int) (err error) {
|
|||||||
if len(p) != 2 {
|
if len(p) != 2 {
|
||||||
return EINVAL
|
return EINVAL
|
||||||
}
|
}
|
||||||
p[0], p[1], err = pipe()
|
r, w, err := pipe()
|
||||||
|
if err == nil {
|
||||||
|
p[0], p[1] = r, w
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -114,7 +117,10 @@ func Pipe2(p []int, flags int) (err error) {
|
|||||||
var pp [2]_C_int
|
var pp [2]_C_int
|
||||||
// pipe2 on dragonfly takes an fds array as an argument, but still
|
// pipe2 on dragonfly takes an fds array as an argument, but still
|
||||||
// returns the file descriptors.
|
// returns the file descriptors.
|
||||||
p[0], p[1], err = pipe2(&pp, flags)
|
r, w, err := pipe2(&pp, flags)
|
||||||
|
if err == nil {
|
||||||
|
p[0], p[1] = r, w
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/golang.org/x/sys/unix/syscall_freebsd.go
generated
vendored
2
vendor/golang.org/x/sys/unix/syscall_freebsd.go
generated
vendored
@ -110,8 +110,10 @@ func Pipe2(p []int, flags int) error {
|
|||||||
}
|
}
|
||||||
var pp [2]_C_int
|
var pp [2]_C_int
|
||||||
err := pipe2(&pp, flags)
|
err := pipe2(&pp, flags)
|
||||||
|
if err == nil {
|
||||||
p[0] = int(pp[0])
|
p[0] = int(pp[0])
|
||||||
p[1] = int(pp[1])
|
p[1] = int(pp[1])
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
54
vendor/golang.org/x/sys/unix/syscall_linux.go
generated
vendored
54
vendor/golang.org/x/sys/unix/syscall_linux.go
generated
vendored
@ -131,8 +131,10 @@ func Pipe2(p []int, flags int) error {
|
|||||||
}
|
}
|
||||||
var pp [2]_C_int
|
var pp [2]_C_int
|
||||||
err := pipe2(&pp, flags)
|
err := pipe2(&pp, flags)
|
||||||
|
if err == nil {
|
||||||
p[0] = int(pp[0])
|
p[0] = int(pp[0])
|
||||||
p[1] = int(pp[1])
|
p[1] = int(pp[1])
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -372,9 +374,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
|||||||
p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
|
p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
|
||||||
p[0] = byte(sa.Port >> 8)
|
p[0] = byte(sa.Port >> 8)
|
||||||
p[1] = byte(sa.Port)
|
p[1] = byte(sa.Port)
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.raw.Addr = sa.Addr
|
||||||
sa.raw.Addr[i] = sa.Addr[i]
|
|
||||||
}
|
|
||||||
return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil
|
return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -387,9 +387,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
|||||||
p[0] = byte(sa.Port >> 8)
|
p[0] = byte(sa.Port >> 8)
|
||||||
p[1] = byte(sa.Port)
|
p[1] = byte(sa.Port)
|
||||||
sa.raw.Scope_id = sa.ZoneId
|
sa.raw.Scope_id = sa.ZoneId
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.raw.Addr = sa.Addr
|
||||||
sa.raw.Addr[i] = sa.Addr[i]
|
|
||||||
}
|
|
||||||
return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil
|
return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -438,9 +436,7 @@ func (sa *SockaddrLinklayer) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
|||||||
sa.raw.Hatype = sa.Hatype
|
sa.raw.Hatype = sa.Hatype
|
||||||
sa.raw.Pkttype = sa.Pkttype
|
sa.raw.Pkttype = sa.Pkttype
|
||||||
sa.raw.Halen = sa.Halen
|
sa.raw.Halen = sa.Halen
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.raw.Addr = sa.Addr
|
||||||
sa.raw.Addr[i] = sa.Addr[i]
|
|
||||||
}
|
|
||||||
return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil
|
return unsafe.Pointer(&sa.raw), SizeofSockaddrLinklayer, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -855,12 +851,10 @@ func (sa *SockaddrTIPC) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
|||||||
if sa.Addr == nil {
|
if sa.Addr == nil {
|
||||||
return nil, 0, EINVAL
|
return nil, 0, EINVAL
|
||||||
}
|
}
|
||||||
|
|
||||||
sa.raw.Family = AF_TIPC
|
sa.raw.Family = AF_TIPC
|
||||||
sa.raw.Scope = int8(sa.Scope)
|
sa.raw.Scope = int8(sa.Scope)
|
||||||
sa.raw.Addrtype = sa.Addr.tipcAddrtype()
|
sa.raw.Addrtype = sa.Addr.tipcAddrtype()
|
||||||
sa.raw.Addr = sa.Addr.tipcAddr()
|
sa.raw.Addr = sa.Addr.tipcAddr()
|
||||||
|
|
||||||
return unsafe.Pointer(&sa.raw), SizeofSockaddrTIPC, nil
|
return unsafe.Pointer(&sa.raw), SizeofSockaddrTIPC, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -874,9 +868,7 @@ type SockaddrL2TPIP struct {
|
|||||||
func (sa *SockaddrL2TPIP) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
func (sa *SockaddrL2TPIP) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
||||||
sa.raw.Family = AF_INET
|
sa.raw.Family = AF_INET
|
||||||
sa.raw.Conn_id = sa.ConnId
|
sa.raw.Conn_id = sa.ConnId
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.raw.Addr = sa.Addr
|
||||||
sa.raw.Addr[i] = sa.Addr[i]
|
|
||||||
}
|
|
||||||
return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP, nil
|
return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -892,9 +884,7 @@ func (sa *SockaddrL2TPIP6) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
|||||||
sa.raw.Family = AF_INET6
|
sa.raw.Family = AF_INET6
|
||||||
sa.raw.Conn_id = sa.ConnId
|
sa.raw.Conn_id = sa.ConnId
|
||||||
sa.raw.Scope_id = sa.ZoneId
|
sa.raw.Scope_id = sa.ZoneId
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.raw.Addr = sa.Addr
|
||||||
sa.raw.Addr[i] = sa.Addr[i]
|
|
||||||
}
|
|
||||||
return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP6, nil
|
return unsafe.Pointer(&sa.raw), SizeofSockaddrL2TPIP6, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -990,9 +980,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
|
|||||||
sa.Hatype = pp.Hatype
|
sa.Hatype = pp.Hatype
|
||||||
sa.Pkttype = pp.Pkttype
|
sa.Pkttype = pp.Pkttype
|
||||||
sa.Halen = pp.Halen
|
sa.Halen = pp.Halen
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.Addr = pp.Addr
|
||||||
sa.Addr[i] = pp.Addr[i]
|
|
||||||
}
|
|
||||||
return sa, nil
|
return sa, nil
|
||||||
|
|
||||||
case AF_UNIX:
|
case AF_UNIX:
|
||||||
@ -1031,18 +1019,14 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
|
|||||||
pp := (*RawSockaddrL2TPIP)(unsafe.Pointer(rsa))
|
pp := (*RawSockaddrL2TPIP)(unsafe.Pointer(rsa))
|
||||||
sa := new(SockaddrL2TPIP)
|
sa := new(SockaddrL2TPIP)
|
||||||
sa.ConnId = pp.Conn_id
|
sa.ConnId = pp.Conn_id
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.Addr = pp.Addr
|
||||||
sa.Addr[i] = pp.Addr[i]
|
|
||||||
}
|
|
||||||
return sa, nil
|
return sa, nil
|
||||||
default:
|
default:
|
||||||
pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
|
pp := (*RawSockaddrInet4)(unsafe.Pointer(rsa))
|
||||||
sa := new(SockaddrInet4)
|
sa := new(SockaddrInet4)
|
||||||
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
||||||
sa.Port = int(p[0])<<8 + int(p[1])
|
sa.Port = int(p[0])<<8 + int(p[1])
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.Addr = pp.Addr
|
||||||
sa.Addr[i] = pp.Addr[i]
|
|
||||||
}
|
|
||||||
return sa, nil
|
return sa, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1058,9 +1042,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
|
|||||||
sa := new(SockaddrL2TPIP6)
|
sa := new(SockaddrL2TPIP6)
|
||||||
sa.ConnId = pp.Conn_id
|
sa.ConnId = pp.Conn_id
|
||||||
sa.ZoneId = pp.Scope_id
|
sa.ZoneId = pp.Scope_id
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.Addr = pp.Addr
|
||||||
sa.Addr[i] = pp.Addr[i]
|
|
||||||
}
|
|
||||||
return sa, nil
|
return sa, nil
|
||||||
default:
|
default:
|
||||||
pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa))
|
pp := (*RawSockaddrInet6)(unsafe.Pointer(rsa))
|
||||||
@ -1068,9 +1050,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
|
|||||||
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
||||||
sa.Port = int(p[0])<<8 + int(p[1])
|
sa.Port = int(p[0])<<8 + int(p[1])
|
||||||
sa.ZoneId = pp.Scope_id
|
sa.ZoneId = pp.Scope_id
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.Addr = pp.Addr
|
||||||
sa.Addr[i] = pp.Addr[i]
|
|
||||||
}
|
|
||||||
return sa, nil
|
return sa, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1797,6 +1777,16 @@ func Mount(source string, target string, fstype string, flags uintptr, data stri
|
|||||||
return mount(source, target, fstype, flags, datap)
|
return mount(source, target, fstype, flags, datap)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//sys mountSetattr(dirfd int, pathname string, flags uint, attr *MountAttr, size uintptr) (err error) = SYS_MOUNT_SETATTR
|
||||||
|
|
||||||
|
// MountSetattr is a wrapper for mount_setattr(2).
|
||||||
|
// https://man7.org/linux/man-pages/man2/mount_setattr.2.html
|
||||||
|
//
|
||||||
|
// Requires kernel >= 5.12.
|
||||||
|
func MountSetattr(dirfd int, pathname string, flags uint, attr *MountAttr) error {
|
||||||
|
return mountSetattr(dirfd, pathname, flags, attr, unsafe.Sizeof(*attr))
|
||||||
|
}
|
||||||
|
|
||||||
func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
|
func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) {
|
||||||
if raceenabled {
|
if raceenabled {
|
||||||
raceReleaseMerge(unsafe.Pointer(&ioSync))
|
raceReleaseMerge(unsafe.Pointer(&ioSync))
|
||||||
|
10
vendor/golang.org/x/sys/unix/syscall_netbsd.go
generated
vendored
10
vendor/golang.org/x/sys/unix/syscall_netbsd.go
generated
vendored
@ -110,14 +110,8 @@ func direntNamlen(buf []byte) (uint64, bool) {
|
|||||||
return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
|
return readInt(buf, unsafe.Offsetof(Dirent{}.Namlen), unsafe.Sizeof(Dirent{}.Namlen))
|
||||||
}
|
}
|
||||||
|
|
||||||
//sysnb pipe() (fd1 int, fd2 int, err error)
|
|
||||||
|
|
||||||
func Pipe(p []int) (err error) {
|
func Pipe(p []int) (err error) {
|
||||||
if len(p) != 2 {
|
return Pipe2(p, 0)
|
||||||
return EINVAL
|
|
||||||
}
|
|
||||||
p[0], p[1], err = pipe()
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
|
//sysnb pipe2(p *[2]_C_int, flags int) (err error)
|
||||||
@ -128,8 +122,10 @@ func Pipe2(p []int, flags int) error {
|
|||||||
}
|
}
|
||||||
var pp [2]_C_int
|
var pp [2]_C_int
|
||||||
err := pipe2(&pp, flags)
|
err := pipe2(&pp, flags)
|
||||||
|
if err == nil {
|
||||||
p[0] = int(pp[0])
|
p[0] = int(pp[0])
|
||||||
p[1] = int(pp[1])
|
p[1] = int(pp[1])
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/golang.org/x/sys/unix/syscall_openbsd.go
generated
vendored
2
vendor/golang.org/x/sys/unix/syscall_openbsd.go
generated
vendored
@ -87,8 +87,10 @@ func Pipe2(p []int, flags int) error {
|
|||||||
}
|
}
|
||||||
var pp [2]_C_int
|
var pp [2]_C_int
|
||||||
err := pipe2(&pp, flags)
|
err := pipe2(&pp, flags)
|
||||||
|
if err == nil {
|
||||||
p[0] = int(pp[0])
|
p[0] = int(pp[0])
|
||||||
p[1] = int(pp[1])
|
p[1] = int(pp[1])
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
20
vendor/golang.org/x/sys/unix/syscall_solaris.go
generated
vendored
20
vendor/golang.org/x/sys/unix/syscall_solaris.go
generated
vendored
@ -66,8 +66,10 @@ func Pipe(p []int) (err error) {
|
|||||||
if n != 0 {
|
if n != 0 {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if err == nil {
|
||||||
p[0] = int(pp[0])
|
p[0] = int(pp[0])
|
||||||
p[1] = int(pp[1])
|
p[1] = int(pp[1])
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -79,8 +81,10 @@ func Pipe2(p []int, flags int) error {
|
|||||||
}
|
}
|
||||||
var pp [2]_C_int
|
var pp [2]_C_int
|
||||||
err := pipe2(&pp, flags)
|
err := pipe2(&pp, flags)
|
||||||
|
if err == nil {
|
||||||
p[0] = int(pp[0])
|
p[0] = int(pp[0])
|
||||||
p[1] = int(pp[1])
|
p[1] = int(pp[1])
|
||||||
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -92,9 +96,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
|||||||
p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
|
p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
|
||||||
p[0] = byte(sa.Port >> 8)
|
p[0] = byte(sa.Port >> 8)
|
||||||
p[1] = byte(sa.Port)
|
p[1] = byte(sa.Port)
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.raw.Addr = sa.Addr
|
||||||
sa.raw.Addr[i] = sa.Addr[i]
|
|
||||||
}
|
|
||||||
return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil
|
return unsafe.Pointer(&sa.raw), SizeofSockaddrInet4, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -107,9 +109,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
|||||||
p[0] = byte(sa.Port >> 8)
|
p[0] = byte(sa.Port >> 8)
|
||||||
p[1] = byte(sa.Port)
|
p[1] = byte(sa.Port)
|
||||||
sa.raw.Scope_id = sa.ZoneId
|
sa.raw.Scope_id = sa.ZoneId
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.raw.Addr = sa.Addr
|
||||||
sa.raw.Addr[i] = sa.Addr[i]
|
|
||||||
}
|
|
||||||
return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil
|
return unsafe.Pointer(&sa.raw), SizeofSockaddrInet6, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -417,9 +417,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
|
|||||||
sa := new(SockaddrInet4)
|
sa := new(SockaddrInet4)
|
||||||
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
||||||
sa.Port = int(p[0])<<8 + int(p[1])
|
sa.Port = int(p[0])<<8 + int(p[1])
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.Addr = pp.Addr
|
||||||
sa.Addr[i] = pp.Addr[i]
|
|
||||||
}
|
|
||||||
return sa, nil
|
return sa, nil
|
||||||
|
|
||||||
case AF_INET6:
|
case AF_INET6:
|
||||||
@ -428,9 +426,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
|
|||||||
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
||||||
sa.Port = int(p[0])<<8 + int(p[1])
|
sa.Port = int(p[0])<<8 + int(p[1])
|
||||||
sa.ZoneId = pp.Scope_id
|
sa.ZoneId = pp.Scope_id
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.Addr = pp.Addr
|
||||||
sa.Addr[i] = pp.Addr[i]
|
|
||||||
}
|
|
||||||
return sa, nil
|
return sa, nil
|
||||||
}
|
}
|
||||||
return nil, EAFNOSUPPORT
|
return nil, EAFNOSUPPORT
|
||||||
|
18
vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
generated
vendored
18
vendor/golang.org/x/sys/unix/syscall_zos_s390x.go
generated
vendored
@ -67,9 +67,7 @@ func (sa *SockaddrInet4) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
|||||||
p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
|
p := (*[2]byte)(unsafe.Pointer(&sa.raw.Port))
|
||||||
p[0] = byte(sa.Port >> 8)
|
p[0] = byte(sa.Port >> 8)
|
||||||
p[1] = byte(sa.Port)
|
p[1] = byte(sa.Port)
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.raw.Addr = sa.Addr
|
||||||
sa.raw.Addr[i] = sa.Addr[i]
|
|
||||||
}
|
|
||||||
return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil
|
return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -83,9 +81,7 @@ func (sa *SockaddrInet6) sockaddr() (unsafe.Pointer, _Socklen, error) {
|
|||||||
p[0] = byte(sa.Port >> 8)
|
p[0] = byte(sa.Port >> 8)
|
||||||
p[1] = byte(sa.Port)
|
p[1] = byte(sa.Port)
|
||||||
sa.raw.Scope_id = sa.ZoneId
|
sa.raw.Scope_id = sa.ZoneId
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.raw.Addr = sa.Addr
|
||||||
sa.raw.Addr[i] = sa.Addr[i]
|
|
||||||
}
|
|
||||||
return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil
|
return unsafe.Pointer(&sa.raw), _Socklen(sa.raw.Len), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -144,9 +140,7 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) {
|
|||||||
sa := new(SockaddrInet4)
|
sa := new(SockaddrInet4)
|
||||||
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
||||||
sa.Port = int(p[0])<<8 + int(p[1])
|
sa.Port = int(p[0])<<8 + int(p[1])
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.Addr = pp.Addr
|
||||||
sa.Addr[i] = pp.Addr[i]
|
|
||||||
}
|
|
||||||
return sa, nil
|
return sa, nil
|
||||||
|
|
||||||
case AF_INET6:
|
case AF_INET6:
|
||||||
@ -155,9 +149,7 @@ func anyToSockaddr(_ int, rsa *RawSockaddrAny) (Sockaddr, error) {
|
|||||||
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
p := (*[2]byte)(unsafe.Pointer(&pp.Port))
|
||||||
sa.Port = int(p[0])<<8 + int(p[1])
|
sa.Port = int(p[0])<<8 + int(p[1])
|
||||||
sa.ZoneId = pp.Scope_id
|
sa.ZoneId = pp.Scope_id
|
||||||
for i := 0; i < len(sa.Addr); i++ {
|
sa.Addr = pp.Addr
|
||||||
sa.Addr[i] = pp.Addr[i]
|
|
||||||
}
|
|
||||||
return sa, nil
|
return sa, nil
|
||||||
}
|
}
|
||||||
return nil, EAFNOSUPPORT
|
return nil, EAFNOSUPPORT
|
||||||
@ -587,8 +579,10 @@ func Pipe(p []int) (err error) {
|
|||||||
}
|
}
|
||||||
var pp [2]_C_int
|
var pp [2]_C_int
|
||||||
err = pipe(&pp)
|
err = pipe(&pp)
|
||||||
|
if err == nil {
|
||||||
p[0] = int(pp[0])
|
p[0] = int(pp[0])
|
||||||
p[1] = int(pp[1])
|
p[1] = int(pp[1])
|
||||||
|
}
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
70
vendor/golang.org/x/sys/unix/zerrors_linux.go
generated
vendored
70
vendor/golang.org/x/sys/unix/zerrors_linux.go
generated
vendored
@ -1,4 +1,4 @@
|
|||||||
// Code generated by mkmerge.go; DO NOT EDIT.
|
// Code generated by mkmerge; DO NOT EDIT.
|
||||||
|
|
||||||
//go:build linux
|
//go:build linux
|
||||||
// +build linux
|
// +build linux
|
||||||
@ -116,6 +116,7 @@ const (
|
|||||||
ARPHRD_LAPB = 0x204
|
ARPHRD_LAPB = 0x204
|
||||||
ARPHRD_LOCALTLK = 0x305
|
ARPHRD_LOCALTLK = 0x305
|
||||||
ARPHRD_LOOPBACK = 0x304
|
ARPHRD_LOOPBACK = 0x304
|
||||||
|
ARPHRD_MCTP = 0x122
|
||||||
ARPHRD_METRICOM = 0x17
|
ARPHRD_METRICOM = 0x17
|
||||||
ARPHRD_NETLINK = 0x338
|
ARPHRD_NETLINK = 0x338
|
||||||
ARPHRD_NETROM = 0x0
|
ARPHRD_NETROM = 0x0
|
||||||
@ -472,6 +473,7 @@ const (
|
|||||||
DM_DEV_WAIT = 0xc138fd08
|
DM_DEV_WAIT = 0xc138fd08
|
||||||
DM_DIR = "mapper"
|
DM_DIR = "mapper"
|
||||||
DM_GET_TARGET_VERSION = 0xc138fd11
|
DM_GET_TARGET_VERSION = 0xc138fd11
|
||||||
|
DM_IMA_MEASUREMENT_FLAG = 0x80000
|
||||||
DM_INACTIVE_PRESENT_FLAG = 0x40
|
DM_INACTIVE_PRESENT_FLAG = 0x40
|
||||||
DM_INTERNAL_SUSPEND_FLAG = 0x40000
|
DM_INTERNAL_SUSPEND_FLAG = 0x40000
|
||||||
DM_IOCTL = 0xfd
|
DM_IOCTL = 0xfd
|
||||||
@ -716,6 +718,7 @@ const (
|
|||||||
ETH_P_LOOPBACK = 0x9000
|
ETH_P_LOOPBACK = 0x9000
|
||||||
ETH_P_MACSEC = 0x88e5
|
ETH_P_MACSEC = 0x88e5
|
||||||
ETH_P_MAP = 0xf9
|
ETH_P_MAP = 0xf9
|
||||||
|
ETH_P_MCTP = 0xfa
|
||||||
ETH_P_MOBITEX = 0x15
|
ETH_P_MOBITEX = 0x15
|
||||||
ETH_P_MPLS_MC = 0x8848
|
ETH_P_MPLS_MC = 0x8848
|
||||||
ETH_P_MPLS_UC = 0x8847
|
ETH_P_MPLS_UC = 0x8847
|
||||||
@ -738,6 +741,7 @@ const (
|
|||||||
ETH_P_QINQ2 = 0x9200
|
ETH_P_QINQ2 = 0x9200
|
||||||
ETH_P_QINQ3 = 0x9300
|
ETH_P_QINQ3 = 0x9300
|
||||||
ETH_P_RARP = 0x8035
|
ETH_P_RARP = 0x8035
|
||||||
|
ETH_P_REALTEK = 0x8899
|
||||||
ETH_P_SCA = 0x6007
|
ETH_P_SCA = 0x6007
|
||||||
ETH_P_SLOW = 0x8809
|
ETH_P_SLOW = 0x8809
|
||||||
ETH_P_SNAP = 0x5
|
ETH_P_SNAP = 0x5
|
||||||
@ -751,6 +755,21 @@ const (
|
|||||||
ETH_P_WCCP = 0x883e
|
ETH_P_WCCP = 0x883e
|
||||||
ETH_P_X25 = 0x805
|
ETH_P_X25 = 0x805
|
||||||
ETH_P_XDSA = 0xf8
|
ETH_P_XDSA = 0xf8
|
||||||
|
EV_ABS = 0x3
|
||||||
|
EV_CNT = 0x20
|
||||||
|
EV_FF = 0x15
|
||||||
|
EV_FF_STATUS = 0x17
|
||||||
|
EV_KEY = 0x1
|
||||||
|
EV_LED = 0x11
|
||||||
|
EV_MAX = 0x1f
|
||||||
|
EV_MSC = 0x4
|
||||||
|
EV_PWR = 0x16
|
||||||
|
EV_REL = 0x2
|
||||||
|
EV_REP = 0x14
|
||||||
|
EV_SND = 0x12
|
||||||
|
EV_SW = 0x5
|
||||||
|
EV_SYN = 0x0
|
||||||
|
EV_VERSION = 0x10001
|
||||||
EXABYTE_ENABLE_NEST = 0xf0
|
EXABYTE_ENABLE_NEST = 0xf0
|
||||||
EXT2_SUPER_MAGIC = 0xef53
|
EXT2_SUPER_MAGIC = 0xef53
|
||||||
EXT3_SUPER_MAGIC = 0xef53
|
EXT3_SUPER_MAGIC = 0xef53
|
||||||
@ -789,11 +808,15 @@ const (
|
|||||||
FAN_DELETE_SELF = 0x400
|
FAN_DELETE_SELF = 0x400
|
||||||
FAN_DENY = 0x2
|
FAN_DENY = 0x2
|
||||||
FAN_ENABLE_AUDIT = 0x40
|
FAN_ENABLE_AUDIT = 0x40
|
||||||
|
FAN_EPIDFD = -0x2
|
||||||
FAN_EVENT_INFO_TYPE_DFID = 0x3
|
FAN_EVENT_INFO_TYPE_DFID = 0x3
|
||||||
FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2
|
FAN_EVENT_INFO_TYPE_DFID_NAME = 0x2
|
||||||
|
FAN_EVENT_INFO_TYPE_ERROR = 0x5
|
||||||
FAN_EVENT_INFO_TYPE_FID = 0x1
|
FAN_EVENT_INFO_TYPE_FID = 0x1
|
||||||
|
FAN_EVENT_INFO_TYPE_PIDFD = 0x4
|
||||||
FAN_EVENT_METADATA_LEN = 0x18
|
FAN_EVENT_METADATA_LEN = 0x18
|
||||||
FAN_EVENT_ON_CHILD = 0x8000000
|
FAN_EVENT_ON_CHILD = 0x8000000
|
||||||
|
FAN_FS_ERROR = 0x8000
|
||||||
FAN_MARK_ADD = 0x1
|
FAN_MARK_ADD = 0x1
|
||||||
FAN_MARK_DONT_FOLLOW = 0x4
|
FAN_MARK_DONT_FOLLOW = 0x4
|
||||||
FAN_MARK_FILESYSTEM = 0x100
|
FAN_MARK_FILESYSTEM = 0x100
|
||||||
@ -811,6 +834,7 @@ const (
|
|||||||
FAN_MOVE_SELF = 0x800
|
FAN_MOVE_SELF = 0x800
|
||||||
FAN_NOFD = -0x1
|
FAN_NOFD = -0x1
|
||||||
FAN_NONBLOCK = 0x2
|
FAN_NONBLOCK = 0x2
|
||||||
|
FAN_NOPIDFD = -0x1
|
||||||
FAN_ONDIR = 0x40000000
|
FAN_ONDIR = 0x40000000
|
||||||
FAN_OPEN = 0x20
|
FAN_OPEN = 0x20
|
||||||
FAN_OPEN_EXEC = 0x1000
|
FAN_OPEN_EXEC = 0x1000
|
||||||
@ -821,6 +845,7 @@ const (
|
|||||||
FAN_REPORT_DIR_FID = 0x400
|
FAN_REPORT_DIR_FID = 0x400
|
||||||
FAN_REPORT_FID = 0x200
|
FAN_REPORT_FID = 0x200
|
||||||
FAN_REPORT_NAME = 0x800
|
FAN_REPORT_NAME = 0x800
|
||||||
|
FAN_REPORT_PIDFD = 0x80
|
||||||
FAN_REPORT_TID = 0x100
|
FAN_REPORT_TID = 0x100
|
||||||
FAN_UNLIMITED_MARKS = 0x20
|
FAN_UNLIMITED_MARKS = 0x20
|
||||||
FAN_UNLIMITED_QUEUE = 0x10
|
FAN_UNLIMITED_QUEUE = 0x10
|
||||||
@ -1454,6 +1479,18 @@ const (
|
|||||||
MNT_FORCE = 0x1
|
MNT_FORCE = 0x1
|
||||||
MODULE_INIT_IGNORE_MODVERSIONS = 0x1
|
MODULE_INIT_IGNORE_MODVERSIONS = 0x1
|
||||||
MODULE_INIT_IGNORE_VERMAGIC = 0x2
|
MODULE_INIT_IGNORE_VERMAGIC = 0x2
|
||||||
|
MOUNT_ATTR_IDMAP = 0x100000
|
||||||
|
MOUNT_ATTR_NOATIME = 0x10
|
||||||
|
MOUNT_ATTR_NODEV = 0x4
|
||||||
|
MOUNT_ATTR_NODIRATIME = 0x80
|
||||||
|
MOUNT_ATTR_NOEXEC = 0x8
|
||||||
|
MOUNT_ATTR_NOSUID = 0x2
|
||||||
|
MOUNT_ATTR_NOSYMFOLLOW = 0x200000
|
||||||
|
MOUNT_ATTR_RDONLY = 0x1
|
||||||
|
MOUNT_ATTR_RELATIME = 0x0
|
||||||
|
MOUNT_ATTR_SIZE_VER0 = 0x20
|
||||||
|
MOUNT_ATTR_STRICTATIME = 0x20
|
||||||
|
MOUNT_ATTR__ATIME = 0x70
|
||||||
MSDOS_SUPER_MAGIC = 0x4d44
|
MSDOS_SUPER_MAGIC = 0x4d44
|
||||||
MSG_BATCH = 0x40000
|
MSG_BATCH = 0x40000
|
||||||
MSG_CMSG_CLOEXEC = 0x40000000
|
MSG_CMSG_CLOEXEC = 0x40000000
|
||||||
@ -1793,6 +1830,8 @@ const (
|
|||||||
PERF_MEM_BLK_DATA = 0x2
|
PERF_MEM_BLK_DATA = 0x2
|
||||||
PERF_MEM_BLK_NA = 0x1
|
PERF_MEM_BLK_NA = 0x1
|
||||||
PERF_MEM_BLK_SHIFT = 0x28
|
PERF_MEM_BLK_SHIFT = 0x28
|
||||||
|
PERF_MEM_HOPS_0 = 0x1
|
||||||
|
PERF_MEM_HOPS_SHIFT = 0x2b
|
||||||
PERF_MEM_LOCK_LOCKED = 0x2
|
PERF_MEM_LOCK_LOCKED = 0x2
|
||||||
PERF_MEM_LOCK_NA = 0x1
|
PERF_MEM_LOCK_NA = 0x1
|
||||||
PERF_MEM_LOCK_SHIFT = 0x18
|
PERF_MEM_LOCK_SHIFT = 0x18
|
||||||
@ -1952,6 +1991,9 @@ const (
|
|||||||
PR_SCHED_CORE_CREATE = 0x1
|
PR_SCHED_CORE_CREATE = 0x1
|
||||||
PR_SCHED_CORE_GET = 0x0
|
PR_SCHED_CORE_GET = 0x0
|
||||||
PR_SCHED_CORE_MAX = 0x4
|
PR_SCHED_CORE_MAX = 0x4
|
||||||
|
PR_SCHED_CORE_SCOPE_PROCESS_GROUP = 0x2
|
||||||
|
PR_SCHED_CORE_SCOPE_THREAD = 0x0
|
||||||
|
PR_SCHED_CORE_SCOPE_THREAD_GROUP = 0x1
|
||||||
PR_SCHED_CORE_SHARE_FROM = 0x3
|
PR_SCHED_CORE_SHARE_FROM = 0x3
|
||||||
PR_SCHED_CORE_SHARE_TO = 0x2
|
PR_SCHED_CORE_SHARE_TO = 0x2
|
||||||
PR_SET_CHILD_SUBREAPER = 0x24
|
PR_SET_CHILD_SUBREAPER = 0x24
|
||||||
@ -1997,6 +2039,7 @@ const (
|
|||||||
PR_SPEC_ENABLE = 0x2
|
PR_SPEC_ENABLE = 0x2
|
||||||
PR_SPEC_FORCE_DISABLE = 0x8
|
PR_SPEC_FORCE_DISABLE = 0x8
|
||||||
PR_SPEC_INDIRECT_BRANCH = 0x1
|
PR_SPEC_INDIRECT_BRANCH = 0x1
|
||||||
|
PR_SPEC_L1D_FLUSH = 0x2
|
||||||
PR_SPEC_NOT_AFFECTED = 0x0
|
PR_SPEC_NOT_AFFECTED = 0x0
|
||||||
PR_SPEC_PRCTL = 0x1
|
PR_SPEC_PRCTL = 0x1
|
||||||
PR_SPEC_STORE_BYPASS = 0x0
|
PR_SPEC_STORE_BYPASS = 0x0
|
||||||
@ -2132,12 +2175,23 @@ const (
|
|||||||
RTCF_NAT = 0x800000
|
RTCF_NAT = 0x800000
|
||||||
RTCF_VALVE = 0x200000
|
RTCF_VALVE = 0x200000
|
||||||
RTC_AF = 0x20
|
RTC_AF = 0x20
|
||||||
|
RTC_BSM_DIRECT = 0x1
|
||||||
|
RTC_BSM_DISABLED = 0x0
|
||||||
|
RTC_BSM_LEVEL = 0x2
|
||||||
|
RTC_BSM_STANDBY = 0x3
|
||||||
RTC_FEATURE_ALARM = 0x0
|
RTC_FEATURE_ALARM = 0x0
|
||||||
|
RTC_FEATURE_ALARM_RES_2S = 0x3
|
||||||
RTC_FEATURE_ALARM_RES_MINUTE = 0x1
|
RTC_FEATURE_ALARM_RES_MINUTE = 0x1
|
||||||
RTC_FEATURE_CNT = 0x3
|
RTC_FEATURE_BACKUP_SWITCH_MODE = 0x6
|
||||||
|
RTC_FEATURE_CNT = 0x7
|
||||||
|
RTC_FEATURE_CORRECTION = 0x5
|
||||||
RTC_FEATURE_NEED_WEEK_DAY = 0x2
|
RTC_FEATURE_NEED_WEEK_DAY = 0x2
|
||||||
|
RTC_FEATURE_UPDATE_INTERRUPT = 0x4
|
||||||
RTC_IRQF = 0x80
|
RTC_IRQF = 0x80
|
||||||
RTC_MAX_FREQ = 0x2000
|
RTC_MAX_FREQ = 0x2000
|
||||||
|
RTC_PARAM_BACKUP_SWITCH_MODE = 0x2
|
||||||
|
RTC_PARAM_CORRECTION = 0x1
|
||||||
|
RTC_PARAM_FEATURES = 0x0
|
||||||
RTC_PF = 0x40
|
RTC_PF = 0x40
|
||||||
RTC_UF = 0x10
|
RTC_UF = 0x10
|
||||||
RTF_ADDRCLASSMASK = 0xf8000000
|
RTF_ADDRCLASSMASK = 0xf8000000
|
||||||
@ -2432,12 +2486,15 @@ const (
|
|||||||
SMART_WRITE_THRESHOLDS = 0xd7
|
SMART_WRITE_THRESHOLDS = 0xd7
|
||||||
SMB_SUPER_MAGIC = 0x517b
|
SMB_SUPER_MAGIC = 0x517b
|
||||||
SOCKFS_MAGIC = 0x534f434b
|
SOCKFS_MAGIC = 0x534f434b
|
||||||
|
SOCK_BUF_LOCK_MASK = 0x3
|
||||||
SOCK_DCCP = 0x6
|
SOCK_DCCP = 0x6
|
||||||
SOCK_IOC_TYPE = 0x89
|
SOCK_IOC_TYPE = 0x89
|
||||||
SOCK_PACKET = 0xa
|
SOCK_PACKET = 0xa
|
||||||
SOCK_RAW = 0x3
|
SOCK_RAW = 0x3
|
||||||
|
SOCK_RCVBUF_LOCK = 0x2
|
||||||
SOCK_RDM = 0x4
|
SOCK_RDM = 0x4
|
||||||
SOCK_SEQPACKET = 0x5
|
SOCK_SEQPACKET = 0x5
|
||||||
|
SOCK_SNDBUF_LOCK = 0x1
|
||||||
SOL_AAL = 0x109
|
SOL_AAL = 0x109
|
||||||
SOL_ALG = 0x117
|
SOL_ALG = 0x117
|
||||||
SOL_ATM = 0x108
|
SOL_ATM = 0x108
|
||||||
@ -2494,6 +2551,8 @@ const (
|
|||||||
SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1
|
SO_VM_SOCKETS_BUFFER_MIN_SIZE = 0x1
|
||||||
SO_VM_SOCKETS_BUFFER_SIZE = 0x0
|
SO_VM_SOCKETS_BUFFER_SIZE = 0x0
|
||||||
SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6
|
SO_VM_SOCKETS_CONNECT_TIMEOUT = 0x6
|
||||||
|
SO_VM_SOCKETS_CONNECT_TIMEOUT_NEW = 0x8
|
||||||
|
SO_VM_SOCKETS_CONNECT_TIMEOUT_OLD = 0x6
|
||||||
SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7
|
SO_VM_SOCKETS_NONBLOCK_TXRX = 0x7
|
||||||
SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3
|
SO_VM_SOCKETS_PEER_HOST_VM_ID = 0x3
|
||||||
SO_VM_SOCKETS_TRUSTED = 0x5
|
SO_VM_SOCKETS_TRUSTED = 0x5
|
||||||
@ -2788,6 +2847,13 @@ const (
|
|||||||
WDIOS_TEMPPANIC = 0x4
|
WDIOS_TEMPPANIC = 0x4
|
||||||
WDIOS_UNKNOWN = -0x1
|
WDIOS_UNKNOWN = -0x1
|
||||||
WEXITED = 0x4
|
WEXITED = 0x4
|
||||||
|
WGALLOWEDIP_A_MAX = 0x3
|
||||||
|
WGDEVICE_A_MAX = 0x8
|
||||||
|
WGPEER_A_MAX = 0xa
|
||||||
|
WG_CMD_MAX = 0x1
|
||||||
|
WG_GENL_NAME = "wireguard"
|
||||||
|
WG_GENL_VERSION = 0x1
|
||||||
|
WG_KEY_LEN = 0x20
|
||||||
WIN_ACKMEDIACHANGE = 0xdb
|
WIN_ACKMEDIACHANGE = 0xdb
|
||||||
WIN_CHECKPOWERMODE1 = 0xe5
|
WIN_CHECKPOWERMODE1 = 0xe5
|
||||||
WIN_CHECKPOWERMODE2 = 0x98
|
WIN_CHECKPOWERMODE2 = 0x98
|
||||||
|
6
vendor/golang.org/x/sys/unix/zerrors_linux_386.go
generated
vendored
6
vendor/golang.org/x/sys/unix/zerrors_linux_386.go
generated
vendored
@ -5,7 +5,7 @@
|
|||||||
// +build 386,linux
|
// +build 386,linux
|
||||||
|
|
||||||
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
|
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
|
||||||
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/_const.go
|
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m32 /build/unix/_const.go
|
||||||
|
|
||||||
package unix
|
package unix
|
||||||
|
|
||||||
@ -250,6 +250,8 @@ const (
|
|||||||
RTC_EPOCH_SET = 0x4004700e
|
RTC_EPOCH_SET = 0x4004700e
|
||||||
RTC_IRQP_READ = 0x8004700b
|
RTC_IRQP_READ = 0x8004700b
|
||||||
RTC_IRQP_SET = 0x4004700c
|
RTC_IRQP_SET = 0x4004700c
|
||||||
|
RTC_PARAM_GET = 0x40187013
|
||||||
|
RTC_PARAM_SET = 0x40187014
|
||||||
RTC_PIE_OFF = 0x7006
|
RTC_PIE_OFF = 0x7006
|
||||||
RTC_PIE_ON = 0x7005
|
RTC_PIE_ON = 0x7005
|
||||||
RTC_PLL_GET = 0x801c7011
|
RTC_PLL_GET = 0x801c7011
|
||||||
@ -293,6 +295,7 @@ const (
|
|||||||
SO_BPF_EXTENSIONS = 0x30
|
SO_BPF_EXTENSIONS = 0x30
|
||||||
SO_BROADCAST = 0x6
|
SO_BROADCAST = 0x6
|
||||||
SO_BSDCOMPAT = 0xe
|
SO_BSDCOMPAT = 0xe
|
||||||
|
SO_BUF_LOCK = 0x48
|
||||||
SO_BUSY_POLL = 0x2e
|
SO_BUSY_POLL = 0x2e
|
||||||
SO_BUSY_POLL_BUDGET = 0x46
|
SO_BUSY_POLL_BUDGET = 0x46
|
||||||
SO_CNX_ADVICE = 0x35
|
SO_CNX_ADVICE = 0x35
|
||||||
@ -326,6 +329,7 @@ const (
|
|||||||
SO_RCVTIMEO = 0x14
|
SO_RCVTIMEO = 0x14
|
||||||
SO_RCVTIMEO_NEW = 0x42
|
SO_RCVTIMEO_NEW = 0x42
|
||||||
SO_RCVTIMEO_OLD = 0x14
|
SO_RCVTIMEO_OLD = 0x14
|
||||||
|
SO_RESERVE_MEM = 0x49
|
||||||
SO_REUSEADDR = 0x2
|
SO_REUSEADDR = 0x2
|
||||||
SO_REUSEPORT = 0xf
|
SO_REUSEPORT = 0xf
|
||||||
SO_RXQ_OVFL = 0x28
|
SO_RXQ_OVFL = 0x28
|
||||||
|
6
vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
generated
vendored
6
vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
generated
vendored
@ -5,7 +5,7 @@
|
|||||||
// +build amd64,linux
|
// +build amd64,linux
|
||||||
|
|
||||||
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
|
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
|
||||||
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/_const.go
|
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -m64 /build/unix/_const.go
|
||||||
|
|
||||||
package unix
|
package unix
|
||||||
|
|
||||||
@ -251,6 +251,8 @@ const (
|
|||||||
RTC_EPOCH_SET = 0x4008700e
|
RTC_EPOCH_SET = 0x4008700e
|
||||||
RTC_IRQP_READ = 0x8008700b
|
RTC_IRQP_READ = 0x8008700b
|
||||||
RTC_IRQP_SET = 0x4008700c
|
RTC_IRQP_SET = 0x4008700c
|
||||||
|
RTC_PARAM_GET = 0x40187013
|
||||||
|
RTC_PARAM_SET = 0x40187014
|
||||||
RTC_PIE_OFF = 0x7006
|
RTC_PIE_OFF = 0x7006
|
||||||
RTC_PIE_ON = 0x7005
|
RTC_PIE_ON = 0x7005
|
||||||
RTC_PLL_GET = 0x80207011
|
RTC_PLL_GET = 0x80207011
|
||||||
@ -294,6 +296,7 @@ const (
|
|||||||
SO_BPF_EXTENSIONS = 0x30
|
SO_BPF_EXTENSIONS = 0x30
|
||||||
SO_BROADCAST = 0x6
|
SO_BROADCAST = 0x6
|
||||||
SO_BSDCOMPAT = 0xe
|
SO_BSDCOMPAT = 0xe
|
||||||
|
SO_BUF_LOCK = 0x48
|
||||||
SO_BUSY_POLL = 0x2e
|
SO_BUSY_POLL = 0x2e
|
||||||
SO_BUSY_POLL_BUDGET = 0x46
|
SO_BUSY_POLL_BUDGET = 0x46
|
||||||
SO_CNX_ADVICE = 0x35
|
SO_CNX_ADVICE = 0x35
|
||||||
@ -327,6 +330,7 @@ const (
|
|||||||
SO_RCVTIMEO = 0x14
|
SO_RCVTIMEO = 0x14
|
||||||
SO_RCVTIMEO_NEW = 0x42
|
SO_RCVTIMEO_NEW = 0x42
|
||||||
SO_RCVTIMEO_OLD = 0x14
|
SO_RCVTIMEO_OLD = 0x14
|
||||||
|
SO_RESERVE_MEM = 0x49
|
||||||
SO_REUSEADDR = 0x2
|
SO_REUSEADDR = 0x2
|
||||||
SO_REUSEPORT = 0xf
|
SO_REUSEPORT = 0xf
|
||||||
SO_RXQ_OVFL = 0x28
|
SO_RXQ_OVFL = 0x28
|
||||||
|
6
vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
generated
vendored
6
vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
generated
vendored
@ -5,7 +5,7 @@
|
|||||||
// +build arm,linux
|
// +build arm,linux
|
||||||
|
|
||||||
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
|
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
|
||||||
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go
|
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
|
||||||
|
|
||||||
package unix
|
package unix
|
||||||
|
|
||||||
@ -257,6 +257,8 @@ const (
|
|||||||
RTC_EPOCH_SET = 0x4004700e
|
RTC_EPOCH_SET = 0x4004700e
|
||||||
RTC_IRQP_READ = 0x8004700b
|
RTC_IRQP_READ = 0x8004700b
|
||||||
RTC_IRQP_SET = 0x4004700c
|
RTC_IRQP_SET = 0x4004700c
|
||||||
|
RTC_PARAM_GET = 0x40187013
|
||||||
|
RTC_PARAM_SET = 0x40187014
|
||||||
RTC_PIE_OFF = 0x7006
|
RTC_PIE_OFF = 0x7006
|
||||||
RTC_PIE_ON = 0x7005
|
RTC_PIE_ON = 0x7005
|
||||||
RTC_PLL_GET = 0x801c7011
|
RTC_PLL_GET = 0x801c7011
|
||||||
@ -300,6 +302,7 @@ const (
|
|||||||
SO_BPF_EXTENSIONS = 0x30
|
SO_BPF_EXTENSIONS = 0x30
|
||||||
SO_BROADCAST = 0x6
|
SO_BROADCAST = 0x6
|
||||||
SO_BSDCOMPAT = 0xe
|
SO_BSDCOMPAT = 0xe
|
||||||
|
SO_BUF_LOCK = 0x48
|
||||||
SO_BUSY_POLL = 0x2e
|
SO_BUSY_POLL = 0x2e
|
||||||
SO_BUSY_POLL_BUDGET = 0x46
|
SO_BUSY_POLL_BUDGET = 0x46
|
||||||
SO_CNX_ADVICE = 0x35
|
SO_CNX_ADVICE = 0x35
|
||||||
@ -333,6 +336,7 @@ const (
|
|||||||
SO_RCVTIMEO = 0x14
|
SO_RCVTIMEO = 0x14
|
||||||
SO_RCVTIMEO_NEW = 0x42
|
SO_RCVTIMEO_NEW = 0x42
|
||||||
SO_RCVTIMEO_OLD = 0x14
|
SO_RCVTIMEO_OLD = 0x14
|
||||||
|
SO_RESERVE_MEM = 0x49
|
||||||
SO_REUSEADDR = 0x2
|
SO_REUSEADDR = 0x2
|
||||||
SO_REUSEPORT = 0xf
|
SO_REUSEPORT = 0xf
|
||||||
SO_RXQ_OVFL = 0x28
|
SO_RXQ_OVFL = 0x28
|
||||||
|
6
vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
generated
vendored
6
vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
generated
vendored
@ -5,7 +5,7 @@
|
|||||||
// +build arm64,linux
|
// +build arm64,linux
|
||||||
|
|
||||||
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
|
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
|
||||||
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/_const.go
|
// cgo -godefs -- -Wall -Werror -static -I/tmp/include -fsigned-char /build/unix/_const.go
|
||||||
|
|
||||||
package unix
|
package unix
|
||||||
|
|
||||||
@ -247,6 +247,8 @@ const (
|
|||||||
RTC_EPOCH_SET = 0x4008700e
|
RTC_EPOCH_SET = 0x4008700e
|
||||||
RTC_IRQP_READ = 0x8008700b
|
RTC_IRQP_READ = 0x8008700b
|
||||||
RTC_IRQP_SET = 0x4008700c
|
RTC_IRQP_SET = 0x4008700c
|
||||||
|
RTC_PARAM_GET = 0x40187013
|
||||||
|
RTC_PARAM_SET = 0x40187014
|
||||||
RTC_PIE_OFF = 0x7006
|
RTC_PIE_OFF = 0x7006
|
||||||
RTC_PIE_ON = 0x7005
|
RTC_PIE_ON = 0x7005
|
||||||
RTC_PLL_GET = 0x80207011
|
RTC_PLL_GET = 0x80207011
|
||||||
@ -290,6 +292,7 @@ const (
|
|||||||
SO_BPF_EXTENSIONS = 0x30
|
SO_BPF_EXTENSIONS = 0x30
|
||||||
SO_BROADCAST = 0x6
|
SO_BROADCAST = 0x6
|
||||||
SO_BSDCOMPAT = 0xe
|
SO_BSDCOMPAT = 0xe
|
||||||
|
SO_BUF_LOCK = 0x48
|
||||||
SO_BUSY_POLL = 0x2e
|
SO_BUSY_POLL = 0x2e
|
||||||
SO_BUSY_POLL_BUDGET = 0x46
|
SO_BUSY_POLL_BUDGET = 0x46
|
||||||
SO_CNX_ADVICE = 0x35
|
SO_CNX_ADVICE = 0x35
|
||||||
@ -323,6 +326,7 @@ const (
|
|||||||
SO_RCVTIMEO = 0x14
|
SO_RCVTIMEO = 0x14
|
||||||
SO_RCVTIMEO_NEW = 0x42
|
SO_RCVTIMEO_NEW = 0x42
|
||||||
SO_RCVTIMEO_OLD = 0x14
|
SO_RCVTIMEO_OLD = 0x14
|
||||||
|
SO_RESERVE_MEM = 0x49
|
||||||
SO_REUSEADDR = 0x2
|
SO_REUSEADDR = 0x2
|
||||||
SO_REUSEPORT = 0xf
|
SO_REUSEPORT = 0xf
|
||||||
SO_RXQ_OVFL = 0x28
|
SO_RXQ_OVFL = 0x28
|
||||||
|
6
vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
generated
vendored
6
vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
generated
vendored
@ -5,7 +5,7 @@
|
|||||||
// +build mips,linux
|
// +build mips,linux
|
||||||
|
|
||||||
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
|
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
|
||||||
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go
|
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
|
||||||
|
|
||||||
package unix
|
package unix
|
||||||
|
|
||||||
@ -250,6 +250,8 @@ const (
|
|||||||
RTC_EPOCH_SET = 0x8004700e
|
RTC_EPOCH_SET = 0x8004700e
|
||||||
RTC_IRQP_READ = 0x4004700b
|
RTC_IRQP_READ = 0x4004700b
|
||||||
RTC_IRQP_SET = 0x8004700c
|
RTC_IRQP_SET = 0x8004700c
|
||||||
|
RTC_PARAM_GET = 0x80187013
|
||||||
|
RTC_PARAM_SET = 0x80187014
|
||||||
RTC_PIE_OFF = 0x20007006
|
RTC_PIE_OFF = 0x20007006
|
||||||
RTC_PIE_ON = 0x20007005
|
RTC_PIE_ON = 0x20007005
|
||||||
RTC_PLL_GET = 0x401c7011
|
RTC_PLL_GET = 0x401c7011
|
||||||
@ -293,6 +295,7 @@ const (
|
|||||||
SO_BPF_EXTENSIONS = 0x30
|
SO_BPF_EXTENSIONS = 0x30
|
||||||
SO_BROADCAST = 0x20
|
SO_BROADCAST = 0x20
|
||||||
SO_BSDCOMPAT = 0xe
|
SO_BSDCOMPAT = 0xe
|
||||||
|
SO_BUF_LOCK = 0x48
|
||||||
SO_BUSY_POLL = 0x2e
|
SO_BUSY_POLL = 0x2e
|
||||||
SO_BUSY_POLL_BUDGET = 0x46
|
SO_BUSY_POLL_BUDGET = 0x46
|
||||||
SO_CNX_ADVICE = 0x35
|
SO_CNX_ADVICE = 0x35
|
||||||
@ -326,6 +329,7 @@ const (
|
|||||||
SO_RCVTIMEO = 0x1006
|
SO_RCVTIMEO = 0x1006
|
||||||
SO_RCVTIMEO_NEW = 0x42
|
SO_RCVTIMEO_NEW = 0x42
|
||||||
SO_RCVTIMEO_OLD = 0x1006
|
SO_RCVTIMEO_OLD = 0x1006
|
||||||
|
SO_RESERVE_MEM = 0x49
|
||||||
SO_REUSEADDR = 0x4
|
SO_REUSEADDR = 0x4
|
||||||
SO_REUSEPORT = 0x200
|
SO_REUSEPORT = 0x200
|
||||||
SO_RXQ_OVFL = 0x28
|
SO_RXQ_OVFL = 0x28
|
||||||
|
6
vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
generated
vendored
6
vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
generated
vendored
@ -5,7 +5,7 @@
|
|||||||
// +build mips64,linux
|
// +build mips64,linux
|
||||||
|
|
||||||
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
|
// Code generated by cmd/cgo -godefs; DO NOT EDIT.
|
||||||
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/_const.go
|
// cgo -godefs -- -Wall -Werror -static -I/tmp/include /build/unix/_const.go
|
||||||
|
|
||||||
package unix
|
package unix
|
||||||
|
|
||||||
@ -250,6 +250,8 @@ const (
|
|||||||
RTC_EPOCH_SET = 0x8008700e
|
RTC_EPOCH_SET = 0x8008700e
|
||||||
RTC_IRQP_READ = 0x4008700b
|
RTC_IRQP_READ = 0x4008700b
|
||||||
RTC_IRQP_SET = 0x8008700c
|
RTC_IRQP_SET = 0x8008700c
|
||||||
|
RTC_PARAM_GET = 0x80187013
|
||||||
|
RTC_PARAM_SET = 0x80187014
|
||||||
RTC_PIE_OFF = 0x20007006
|
RTC_PIE_OFF = 0x20007006
|
||||||
RTC_PIE_ON = 0x20007005
|
RTC_PIE_ON = 0x20007005
|
||||||
RTC_PLL_GET = 0x40207011
|
RTC_PLL_GET = 0x40207011
|
||||||
@ -293,6 +295,7 @@ const (
|
|||||||
SO_BPF_EXTENSIONS = 0x30
|
SO_BPF_EXTENSIONS = 0x30
|
||||||
SO_BROADCAST = 0x20
|
SO_BROADCAST = 0x20
|
||||||
SO_BSDCOMPAT = 0xe
|
SO_BSDCOMPAT = 0xe
|
||||||
|
SO_BUF_LOCK = 0x48
|
||||||
SO_BUSY_POLL = 0x2e
|
SO_BUSY_POLL = 0x2e
|
||||||
SO_BUSY_POLL_BUDGET = 0x46
|
SO_BUSY_POLL_BUDGET = 0x46
|
||||||
SO_CNX_ADVICE = 0x35
|
SO_CNX_ADVICE = 0x35
|
||||||
@ -326,6 +329,7 @@ const (
|
|||||||
SO_RCVTIMEO = 0x1006
|
SO_RCVTIMEO = 0x1006
|
||||||
SO_RCVTIMEO_NEW = 0x42
|
SO_RCVTIMEO_NEW = 0x42
|
||||||
SO_RCVTIMEO_OLD = 0x1006
|
SO_RCVTIMEO_OLD = 0x1006
|
||||||
|
SO_RESERVE_MEM = 0x49
|
||||||
SO_REUSEADDR = 0x4
|
SO_REUSEADDR = 0x4
|
||||||
SO_REUSEPORT = 0x200
|
SO_REUSEPORT = 0x200
|
||||||
SO_RXQ_OVFL = 0x28
|
SO_RXQ_OVFL = 0x28
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user