implement grpc metrics for ceph-csi

Signed-off-by: Madhu Rajanna <madhupr007@gmail.com>
This commit is contained in:
Madhu Rajanna 2019-08-21 14:58:02 +05:30 committed by mergify[bot]
parent 01a78cace5
commit a81a3bf96b
46 changed files with 1363 additions and 158 deletions

9
Gopkg.lock generated
View File

@ -255,6 +255,14 @@
revision = "c250d6563d4d4c20252cd865923440e829844f4e"
version = "v1.0.0"
[[projects]]
digest = "1:5872c7f130f62fc34bfda20babad36be6309c00b5c9207717f7cd2a51536fff4"
name = "github.com/grpc-ecosystem/go-grpc-prometheus"
packages = ["."]
pruneopts = "NUT"
revision = "c225b8c3b01faf2899099b768856a9e916e5087b"
version = "v1.2.0"
[[projects]]
digest = "1:ed860d2b2c1d066d36a89c982eefc7d019badd534f60e87ab65d3d94f0797ef0"
name = "github.com/hashicorp/golang-lru"
@ -1346,6 +1354,7 @@
"github.com/golang/protobuf/ptypes",
"github.com/golang/protobuf/ptypes/timestamp",
"github.com/grpc-ecosystem/go-grpc-middleware",
"github.com/grpc-ecosystem/go-grpc-prometheus",
"github.com/kubernetes-csi/csi-lib-utils/connection",
"github.com/kubernetes-csi/csi-lib-utils/protosanitizer",
"github.com/kubernetes-csi/csi-lib-utils/rpc",

View File

@ -59,6 +59,14 @@
name = "k8s.io/kube-aggregator"
version = "kubernetes-1.15.2"
[[constraint]]
name = "github.com/grpc-ecosystem/go-grpc-prometheus"
version = "1.2.0"
[[constraint]]
name = "github.com/prometheus/client_golang"
version = "1.1.0"
[prune]
go-tests = true
non-go = true

View File

@ -65,11 +65,16 @@ func init() {
flag.StringVar(&conf.VolumeMounter, "volumemounter", "", "default volume mounter (possible options are 'kernel', 'fuse')")
flag.StringVar(&conf.MountCacheDir, "mountcachedir", "", "mount info cache save dir")
// livenes related flags
flag.IntVar(&conf.LivenessPort, "livenessport", 8080, "TCP port for liveness requests")
flag.StringVar(&conf.LivenessPath, "livenesspath", "/metrics", "path of prometheus endpoint where metrics will be available")
// liveness/grpc metrics related flags
flag.IntVar(&conf.MetricsPort, "metricsport", 8080, "TCP port for liveness/grpc metrics requests")
flag.StringVar(&conf.MetricsPath, "metricspath", "/metrics", "path of prometheus endpoint where metrics will be available")
flag.DurationVar(&conf.PollTime, "polltime", time.Second*60, "time interval in seconds between each poll")
flag.DurationVar(&conf.PoolTimeout, "timeout", time.Second*3, "probe timeout in seconds")
flag.BoolVar(&conf.EnableGRPCMetrics, "enablegrpcmetrics", false, "enable grpc metrics")
flag.StringVar(&conf.HistogramOption, "histogramoption", "0.5,2,6",
"Histogram option for grpc metrics, should be comma separated value, ex:= 0.5,2,6 where start=0.5 factor=2, count=6")
klog.InitFlags(nil)
if err := flag.Set("logtostderr", "true"); err != nil {
klog.Exitf("failed to set logtostderr flag: %v", err)
@ -119,9 +124,9 @@ func main() {
// the driver may need a higher PID limit for handling all concurrent requests
if conf.PidLimit != 0 {
currentLimit, err := util.GetPIDLimit()
if err != nil {
klog.Errorf("Failed to get the PID limit, can not reconfigure: %v", err)
currentLimit, pidErr := util.GetPIDLimit()
if pidErr != nil {
klog.Errorf("Failed to get the PID limit, can not reconfigure: %v", pidErr)
} else {
klog.Infof("Initial PID limit is set to %d", currentLimit)
err = util.SetPIDLimit(conf.PidLimit)
@ -137,6 +142,20 @@ func main() {
}
}
if conf.EnableGRPCMetrics || conf.Vtype == livenessType {
// validate metrics endpoint
conf.MetricsIP = os.Getenv("POD_IP")
if conf.MetricsIP == "" {
klog.Warning("missing POD_IP env var defaulting to 0.0.0.0")
conf.MetricsIP = "0.0.0.0"
}
err = util.ValidateURL(&conf)
if err != nil {
klog.Fatalln(err)
}
}
klog.Infof("Starting driver type: %v with name: %v", conf.Vtype, dname)
switch conf.Vtype {
case rbdType:

View File

@ -4,7 +4,7 @@ apiVersion: v1
metadata:
name: csi-cephfsplugin-provisioner
labels:
app: csi-liveness
app: csi-metrics
spec:
selector:
app: csi-cephfsplugin-provisioner
@ -13,6 +13,10 @@ spec:
port: 8080
protocol: TCP
targetPort: 8081
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8091
---
kind: StatefulSet
@ -74,7 +78,14 @@ spec:
- "--drivername=cephfs.csi.ceph.com"
- "--metadatastorage=k8s_configmap"
- "--pidlimit=-1"
- "--metricsport=8091"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
@ -105,8 +116,8 @@ spec:
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--livenessport=8081"
- "--livenesspath=/metrics"
- "--metricsport=8081"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:

View File

@ -59,7 +59,14 @@ spec:
- "--drivername=cephfs.csi.ceph.com"
- "--metadatastorage=k8s_configmap"
- "--mountcachedir=/mount-cache-dir"
- "--metricsport=8091"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
@ -99,8 +106,8 @@ spec:
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--livenessport=8081"
- "--livenesspath=/metrics"
- "--metricsport=8081"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
@ -150,18 +157,22 @@ spec:
medium: "Memory"
}
---
# This is a service to expose the liveness side car
# This is a service to expose the liveness and grpc metrics
apiVersion: v1
kind: Service
metadata:
name: csi-liveness-cephfsplugin
name: csi-metrics-cephfsplugin
labels:
app: csi-liveness
app: csi-metrics
spec:
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8081
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8091
selector:
app: csi-cephfsplugin

View File

@ -74,7 +74,14 @@ spec:
- "--drivername=$(DRIVER_NAME)"
- "--metadatastorage=k8s_configmap"
- "--mountcachedir=/mount-cache-dir"
- "--metricsport=8091"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: DRIVER_NAME
value: {{ .Values.driverName }}
- name: NODE_ID
@ -115,8 +122,8 @@ spec:
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--livenessport=8081"
- "--livenesspath=/metrics"
- "--metricsport=8081"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}

View File

@ -0,0 +1,25 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: csi-metrics
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
selector:
app: {{ include "ceph-csi-cephfs.name" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8081
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8091

View File

@ -4,7 +4,7 @@ metadata:
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: csi-liveness
app: csi-metrics
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
@ -19,3 +19,7 @@ spec:
port: 8080
protocol: TCP
targetPort: 8081
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8091

View File

@ -77,7 +77,14 @@ spec:
- "--drivername=$(DRIVER_NAME)"
- "--metadatastorage=k8s_configmap"
- "--pidlimit=-1"
- "--metricsport=8091"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: DRIVER_NAME
value: {{ .Values.driverName }}
- name: NODE_ID
@ -102,8 +109,8 @@ spec:
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--livenessport=8081"
- "--livenesspath=/metrics"
- "--metricsport=8081"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}

View File

@ -4,7 +4,7 @@ apiVersion: v1
metadata:
name: csi-cephfsplugin-provisioner
labels:
app: csi-liveness
app: csi-metrics
spec:
selector:
app: csi-cephfsplugin-provisioner
@ -13,6 +13,10 @@ spec:
port: 8080
protocol: TCP
targetPort: 8081
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8091
---
kind: Deployment
@ -77,7 +81,14 @@ spec:
- "--drivername=cephfs.csi.ceph.com"
- "--metadatastorage=k8s_configmap"
- "--pidlimit=-1"
- "--metricsport=8091"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
@ -108,8 +119,8 @@ spec:
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--livenessport=8081"
- "--livenesspath=/metrics"
- "--metricsport=8081"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:

View File

@ -59,7 +59,14 @@ spec:
- "--drivername=cephfs.csi.ceph.com"
- "--metadatastorage=k8s_configmap"
- "--mountcachedir=/mount-cache-dir"
- "--metricsport=8090"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
@ -98,8 +105,8 @@ spec:
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--livenessport=8081"
- "--livenesspath=/metrics"
- "--metricsport=8081"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
@ -149,18 +156,22 @@ spec:
medium: "Memory"
}
---
# This is a service to expose the liveness side car
# This is a service to expose the liveness and grpc metrics
apiVersion: v1
kind: Service
metadata:
name: csi-liveness-cephfsplugin
name: csi-metrics-cephfsplugin
labels:
app: csi-liveness
app: csi-metrics
spec:
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8081
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8091
selector:
app: csi-cephfsplugin

View File

@ -74,7 +74,14 @@ spec:
- "--drivername=$(DRIVER_NAME)"
- "--metadatastorage=k8s_configmap"
- "--mountcachedir=/mount-cache-dir"
- "--metricsport=8091"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: DRIVER_NAME
value: {{ .Values.driverName }}
- name: NODE_ID
@ -115,8 +122,8 @@ spec:
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--livenessport=8081"
- "--livenesspath=/metrics"
- "--metricsport=8081"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}

View File

@ -0,0 +1,25 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "ceph-csi-cephfs.nodeplugin.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: csi-metrics
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
selector:
app: {{ include "ceph-csi-cephfs.name" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8081
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8091

View File

@ -79,7 +79,14 @@ spec:
- "--drivername=$(DRIVER_NAME)"
- "--metadatastorage=k8s_configmap"
- "--pidlimit=-1"
- "--metricsport=8091"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: DRIVER_NAME
value: {{ .Values.driverName }}
- name: NODE_ID
@ -105,8 +112,8 @@ spec:
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--livenessport=8081"
- "--livenesspath=/metrics"
- "--metricsport=8081"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}

View File

@ -0,0 +1,25 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "ceph-csi-cephfs.provisioner.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: csi-metrics
chart: {{ include "ceph-csi-cephfs.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
selector:
app: {{ include "ceph-csi-cephfs.name" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8081
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8091

View File

@ -4,7 +4,7 @@ apiVersion: v1
metadata:
name: csi-rbdplugin-provisioner
labels:
app: csi-liveness
app: csi-metrics
spec:
selector:
app: csi-rbdplugin-provisioner
@ -13,6 +13,10 @@ spec:
port: 8080
protocol: TCP
targetPort: 8080
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8090
---
kind: StatefulSet
@ -89,7 +93,14 @@ spec:
- "--drivername=rbd.csi.ceph.com"
- "--containerized=true"
- "--pidlimit=-1"
- "--metricsport=8090"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
@ -118,8 +129,8 @@ spec:
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--livenessport=8080"
- "--livenesspath=/metrics"
- "--metricsport=8080"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:

View File

@ -61,7 +61,14 @@ spec:
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--containerized=true"
- "--metricsport=8090"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
@ -96,8 +103,8 @@ spec:
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--livenessport=8080"
- "--livenesspath=/metrics"
- "--metricsport=8080"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
@ -148,18 +155,22 @@ spec:
medium: "Memory"
}
---
# This is a service to expose the liveness side car
# This is a service to expose the liveness and grpc metrics
apiVersion: v1
kind: Service
metadata:
name: csi-liveness-rbdplugin
name: csi-metrics-rbdplugin
labels:
app: csi-liveness
app: csi-metrics
spec:
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8080
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8090
selector:
app: csi-rbdplugin

View File

@ -75,7 +75,14 @@ spec:
- "--v=5"
- "--drivername=$(DRIVER_NAME)"
- "--containerized=true"
- "--metricsport=8090"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: DRIVER_NAME
value: {{ .Values.driverName }}
- name: NODE_ID
@ -114,8 +121,8 @@ spec:
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--livenessport=8080"
- "--livenesspath=/metrics"
- "--metricsport=8080"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}

View File

@ -0,0 +1,25 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "ceph-csi-rbd.nodeplugin.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: csi-metrics
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
selector:
app: {{ include "ceph-csi-rbd.name" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8080
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8090

View File

@ -4,7 +4,7 @@ metadata:
name: {{ include "ceph-csi-rbd.provisioner.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: csi-liveness
app: csi-metrics
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.provisioner.name }}
release: {{ .Release.Name }}
@ -19,3 +19,7 @@ spec:
port: 8080
protocol: TCP
targetPort: 8080
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8090

View File

@ -95,7 +95,14 @@ spec:
- "--drivername=$(DRIVER_NAME)"
- "--containerized=true"
- "--pidlimit=-1"
- "--metricsport=8090"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: DRIVER_NAME
value: {{ .Values.driverName }}
- name: NODE_ID
@ -120,8 +127,8 @@ spec:
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--livenessport=8080"
- "--livenesspath=/metrics"
- "--metricsport=8080"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}
@ -142,8 +149,8 @@ spec:
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--livenessport=8081"
- "--livenesspath=/metrics"
- "--metricsport=8081"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}

View File

@ -4,7 +4,7 @@ apiVersion: v1
metadata:
name: csi-rbdplugin-provisioner
labels:
app: csi-liveness
app: csi-metrics
spec:
selector:
app: csi-rbdplugin-provisioner
@ -13,7 +13,10 @@ spec:
port: 8080
protocol: TCP
targetPort: 8080
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8090
---
kind: Deployment
apiVersion: apps/v1
@ -93,7 +96,14 @@ spec:
- "--drivername=rbd.csi.ceph.com"
- "--containerized=true"
- "--pidlimit=-1"
- "--metricsport=8090"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
@ -122,8 +132,8 @@ spec:
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--livenessport=8080"
- "--livenesspath=/metrics"
- "--metricsport=8080"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:

View File

@ -61,7 +61,14 @@ spec:
- "--v=5"
- "--drivername=rbd.csi.ceph.com"
- "--containerized=true"
- "--metricsport=8090"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: NODE_ID
valueFrom:
fieldRef:
@ -96,8 +103,8 @@ spec:
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--livenessport=8080"
- "--livenesspath=/metrics"
- "--metricsport=8080"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
env:
@ -148,18 +155,22 @@ spec:
medium: "Memory"
}
---
# This is a service to expose the liveness side car
# This is a service to expose the liveness and grpc metrics
apiVersion: v1
kind: Service
metadata:
name: csi-liveness-rbdplugin
name: csi-metrics-rbdplugin
labels:
app: csi-liveness
app: csi-metrics
spec:
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8080
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8090
selector:
app: csi-rbdplugin

View File

@ -75,7 +75,14 @@ spec:
- "--v=5"
- "--drivername=$(DRIVER_NAME)"
- "--containerized=true"
- "--metricsport=8090"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: DRIVER_NAME
value: {{ .Values.driverName }}
- name: NODE_ID
@ -114,8 +121,8 @@ spec:
args:
- "--type=liveness"
- "--endpoint=$(CSI_ENDPOINT)"
- "--livenessport=8081"
- "--livenesspath=/metrics"
- "--metricsport=8081"
- "--metricspath=/metrics"
- "--polltime=60s"
- "--timeout=3s"
imagePullPolicy: {{ .Values.nodeplugin.plugin.image.pullPolicy }}

View File

@ -0,0 +1,25 @@
kind: Service
apiVersion: v1
metadata:
name: {{ include "ceph-csi-rbd.nodeplugin.fullname" . }}
namespace: {{ .Release.Namespace }}
labels:
app: csi-metrics
chart: {{ include "ceph-csi-rbd.chart" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
heritage: {{ .Release.Service }}
spec:
selector:
app: {{ include "ceph-csi-rbd.name" . }}
component: {{ .Values.nodeplugin.name }}
release: {{ .Release.Name }}
ports:
- name: http-metrics
port: 8080
protocol: TCP
targetPort: 8080
- name: grpc-metrics
port: 8090
protocol: TCP
targetPort: 8090

View File

@ -98,7 +98,14 @@ spec:
- "--drivername=$(DRIVER_NAME)"
- "--containerized=true"
- "--pidlimit=-1"
- "--metricsport=8090"
- "--metricspath=/metrics"
- "--enablegrpcmetrics=false"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: DRIVER_NAME
value: {{ .Values.driverName }}
- name: NODE_ID

View File

@ -44,7 +44,7 @@ that should be resolved in v14.2.3.
**Available command line arguments:**
| Option | Default value | Description |
| ------------------- | --------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| --------------------- | --------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket |
| `--drivername` | `cephfs.csi.ceph.com` | Name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) |
| `--nodeid` | _empty_ | This node's ID |
@ -55,10 +55,12 @@ that should be resolved in v14.2.3.
| `--pluginpath` | "/var/lib/kubelet/plugins/" | The location of cephcsi plugin on host |
| `--metadatastorage` | _empty_ | Points to where older (1.0.0 or older plugin versions) metadata about provisioned volumes are kept, as file or in as k8s configmap (`node` or `k8s_configmap` respectively) |
| `--pidlimit` | _0_ | Configure the PID limit in cgroups. The container runtime can restrict the number of processes/tasks which can cause problems while provisioning (or deleting) a large number of volumes. A value of `-1` configures the limit to the maximum, `0` does not configure limits at all. |
| `--livenessport` | `8080` | TCP port for liveness requests |
| `--livenesspath` | `/metrics` | Path of prometheus endpoint where metrics will be available |
| `--metricsport` | `8080` | TCP port for /grpc metrics requests |
| `--metricspath` | `/metrics` | Path of prometheus endpoint where metrics will be available |
| `--enablegrpcmetrics` | `false` | Enable grpc metrics collection and start prometheus server |
| `--polltime` | `60s` | Time interval in between each poll |
| `--timeout` | `3s` | Probe timeout in seconds |
| `--histogramoption` | `0.5,2,6` | Histogram option for grpc metrics, should be comma separated value (ex:= "0.5,2,6" where start=0.5 factor=2, count=6) |
**Available environmental variables:**

View File

@ -28,7 +28,7 @@ make image-cephcsi
**Available command line arguments:**
| Option | Default value | Description |
| ------------------- | --------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| --------------------- | --------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ |
| `--endpoint` | `unix://tmp/csi.sock` | CSI endpoint, must be a UNIX socket |
| `--drivername` | `rbd.csi.ceph.com` | Name of the driver (Kubernetes: `provisioner` field in StorageClass must correspond to this value) |
| `--nodeid` | _empty_ | This node's ID |
@ -37,15 +37,17 @@ make image-cephcsi
| `--instanceid` | "default" | Unique ID distinguishing this instance of Ceph CSI among other instances, when sharing Ceph clusters across CSI instances for provisioning |
| `--metadatastorage` | _empty_ | Points to where legacy (1.0.0 or older plugin versions) metadata about provisioned volumes are kept, as file or in as k8s configmap (`node` or `k8s_configmap` respectively) |
| `--pidlimit` | _0_ | Configure the PID limit in cgroups. The container runtime can restrict the number of processes/tasks which can cause problems while provisioning (or deleting) a large number of volumes. A value of `-1` configures the limit to the maximum, `0` does not configure limits at all. |
| `--livenessport` | `8080` | TCP port for liveness requests |
| `--livenesspath` | `"/metrics"` | Path of prometheus endpoint where metrics will be available |
| `--metricsport` | `8080` | TCP port for liveness/grpc metrics requests |
| `--metricspath` | `"/metrics"` | Path of prometheus endpoint where metrics will be available |
| `--enablegrpcmetrics` | `false` | Enable grpc metrics collection and start prometheus server |
| `--polltime` | `"60s"` | Time interval in between each poll |
| `--timeout` | `"3s"` | Probe timeout in seconds |
| `--histogramoption` | `0.5,2,6` | Histogram option for grpc metrics, should be comma separated value (ex:= "0.5,2,6" where start=0.5 factor=2, count=6) |
**Available volume parameters:**
| Parameter | Required | Description |
| ----------------------------------------------------------------------------------------------------- | -------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| --------------------------------------------------------------------------------------------------- | -------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `clusterID` | yes | String representing a Ceph cluster, must be unique across all Ceph clusters in use for provisioning, cannot be greater than 36 bytes in length, and should remain immutable for the lifetime of the Ceph cluster in use |
| `pool` | yes | Ceph pool into which the RBD image shall be created |
| `imageFormat` | no | RBD image format. Defaults to `2`. See [man pages](http://docs.ceph.com/docs/mimic/man/8/rbd/#cmdoption-rbd-image-format) |

View File

@ -1,9 +1,13 @@
# Metrics
CSI deploys a sidecar container that is responsible for collecting metrics.
- [Metrics](#metrics)
- [Liveness](#liveness)
- [GRPC metrics](#grpc-metrics)
## Liveness
CSI deploys a sidecar container that is responsible for collecting metrics.
Liveness metrics are intended to be collected by prometheus but can be accesesed
through a GET request to a specific pod ip.
@ -13,7 +17,7 @@ for example
the expected output should be
```bash
[root@worker2 /]# curl -X GET http://10.109.65.142:8080/metrics 2>/dev/null | grep csi
curl -X GET http://10.109.65.142:8080/metrics 2>/dev/null | grep csi
# HELP csi_liveness Liveness Probe
# TYPE csi_liveness gauge
csi_liveness 1
@ -28,5 +32,18 @@ pods run on port 8080 and cephfs 8081.
These can be changed if desired or if multiple ceph clusters are deployed more
ports will be used for additional CSI pods.
You may need to open the ports used in your firewall depending on how you
Note: You may need to open the ports used in your firewall depending on how you
cluster is setup.
## GRPC metrics
grpc metrics are intended to be collected by prometheus but can be accesesed
through a GET request to a specific pod ip.
Each CSI pod has a service to expose the endpoint to prometheus. By default rbd
pods run on port 8090 and cephfs 8091.
These can be changed if desired or if multiple ceph clusters are deployed more
ports will be used for additional CSI pods.
Note: You may need to open the ports used in your firewall depending on how you
cluster is setup.

View File

@ -5,7 +5,7 @@
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: csi-liveness
name: csi-metrics
namespace: rook-ceph
labels:
team: rook
@ -15,8 +15,11 @@ spec:
- default
selector:
matchLabels:
app: csi-liveness
app: csi-metrics
endpoints:
- port: http-metrics
path: /metrics
interval: 5s
- port: grpc-metrics
path: /metrics
interval: 5s

View File

@ -173,6 +173,9 @@ func (fs *Driver) Run(conf *util.Config, cachePersister util.CachePersister) {
}
server := csicommon.NewNonBlockingGRPCServer()
server.Start(conf.Endpoint, fs.is, fs.cs, fs.ns)
server.Start(conf.Endpoint, conf.HistogramOption, fs.is, fs.cs, fs.ns, conf.EnableGRPCMetrics)
if conf.EnableGRPCMetrics {
go util.StartMetricsServer(conf)
}
server.Wait()
}

View File

@ -19,10 +19,14 @@ package csicommon
import (
"net"
"os"
"strconv"
"strings"
"sync"
"github.com/container-storage-interface/spec/lib/go/csi"
grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware"
grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
"github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
"k8s.io/klog"
)
@ -30,7 +34,7 @@ import (
// NonBlockingGRPCServer defines Non blocking GRPC server interfaces
type NonBlockingGRPCServer interface {
// Start services at the endpoint
Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer)
Start(endpoint, hstOptions string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer, metrics bool)
// Waits for the service to stop
Wait()
// Stops the service gracefully
@ -51,10 +55,10 @@ type nonBlockingGRPCServer struct {
}
// Start start service on endpoint
func (s *nonBlockingGRPCServer) Start(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) {
func (s *nonBlockingGRPCServer) Start(endpoint, hstOptions string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer, metrics bool) {
s.wg.Add(1)
go s.serve(endpoint, ids, cs, ns)
go s.serve(endpoint, hstOptions, ids, cs, ns, metrics)
}
// Wait blocks until the WaitGroup counter
@ -72,7 +76,7 @@ func (s *nonBlockingGRPCServer) ForceStop() {
s.server.Stop()
}
func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer) {
func (s *nonBlockingGRPCServer) serve(endpoint, hstOptions string, ids csi.IdentityServer, cs csi.ControllerServer, ns csi.NodeServer, metrics bool) {
proto, addr, err := parseEndpoint(endpoint)
if err != nil {
@ -91,13 +95,14 @@ func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, c
klog.Fatalf("Failed to listen: %v", err)
}
opts := []grpc.ServerOption{
grpc_middleware.WithUnaryServerChain(
contextIDInjector,
logGRPC,
panicHandler,
),
middleWare := []grpc.UnaryServerInterceptor{contextIDInjector, logGRPC, panicHandler}
if metrics {
middleWare = append(middleWare, grpc_prometheus.UnaryServerInterceptor)
}
opts := []grpc.ServerOption{
grpc_middleware.WithUnaryServerChain(middleWare...),
}
server := grpc.NewServer(opts...)
s.server = server
@ -110,9 +115,29 @@ func (s *nonBlockingGRPCServer) serve(endpoint string, ids csi.IdentityServer, c
if ns != nil {
csi.RegisterNodeServer(server, ns)
}
klog.Infof("Listening for connections on address: %#v", listener.Addr())
if metrics {
ho := strings.Split(hstOptions, ",")
if len(ho) != 3 {
klog.Fatalf("invalid histogram options provided: %v", hstOptions)
}
start, e := strconv.ParseFloat(ho[0], 32)
if e != nil {
klog.Fatalf("failed to parse histogram start value: %v", e)
}
factor, e := strconv.ParseFloat(ho[1], 32)
if err != nil {
klog.Fatalf("failed to parse histogram factor value: %v", e)
}
count, e := strconv.Atoi(ho[2])
if err != nil {
klog.Fatalf("failed to parse histogram count value: %v", e)
}
buckets := prometheus.ExponentialBuckets(start, factor, count)
bktOptios := grpc_prometheus.WithHistogramBuckets(buckets)
grpc_prometheus.EnableHandlingTimeHistogram(bktOptios)
grpc_prometheus.Register(server)
}
err = server.Serve(listener)
if err != nil {
klog.Fatalf("Failed to server: %v", err)

View File

@ -81,29 +81,29 @@ func NewControllerServiceCapability(ctrlCap csi.ControllerServiceCapability_RPC_
}
// RunNodePublishServer starts node server
func RunNodePublishServer(endpoint string, d *CSIDriver, ns csi.NodeServer) {
func RunNodePublishServer(endpoint, hstOption string, d *CSIDriver, ns csi.NodeServer, m bool) {
ids := NewDefaultIdentityServer(d)
s := NewNonBlockingGRPCServer()
s.Start(endpoint, ids, nil, ns)
s.Start(endpoint, hstOption, ids, nil, ns, m)
s.Wait()
}
// RunControllerPublishServer starts controller server
func RunControllerPublishServer(endpoint string, d *CSIDriver, cs csi.ControllerServer) {
func RunControllerPublishServer(endpoint, hstOption string, d *CSIDriver, cs csi.ControllerServer, m bool) {
ids := NewDefaultIdentityServer(d)
s := NewNonBlockingGRPCServer()
s.Start(endpoint, ids, cs, nil)
s.Start(endpoint, hstOption, ids, cs, nil, m)
s.Wait()
}
// RunControllerandNodePublishServer starts both controller and node server
func RunControllerandNodePublishServer(endpoint string, d *CSIDriver, cs csi.ControllerServer, ns csi.NodeServer) {
func RunControllerandNodePublishServer(endpoint, hstOption string, d *CSIDriver, cs csi.ControllerServer, ns csi.NodeServer, m bool) {
ids := NewDefaultIdentityServer(d)
s := NewNonBlockingGRPCServer()
s.Start(endpoint, ids, cs, ns)
s.Start(endpoint, hstOption, ids, cs, ns, m)
s.Wait()
}

View File

@ -18,10 +18,6 @@ package liveness
import (
"context"
"net"
"net/http"
"os"
"strconv"
"time"
"github.com/ceph/ceph-csi/pkg/util"
@ -29,7 +25,6 @@ import (
connlib "github.com/kubernetes-csi/csi-lib-utils/connection"
"github.com/kubernetes-csi/csi-lib-utils/rpc"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"k8s.io/klog"
)
@ -88,21 +83,9 @@ func recordLiveness(endpoint string, pollTime, timeout time.Duration) {
func Run(conf *util.Config) {
klog.Infof("Liveness Running")
ip := os.Getenv("POD_IP")
if ip == "" {
klog.Warning("missing POD_IP env var defaulting to 0.0.0.0")
ip = "0.0.0.0"
}
// start liveness collection
go recordLiveness(conf.Endpoint, conf.PollTime, conf.PoolTimeout)
// start up prometheus endpoint
addr := net.JoinHostPort(ip, strconv.Itoa(conf.LivenessPort))
http.Handle(conf.LivenessPath, promhttp.Handler())
err := http.ListenAndServe(addr, nil)
if err != nil {
klog.Fatalln(err)
}
util.StartMetricsServer(conf)
}

View File

@ -158,6 +158,9 @@ func (r *Driver) Run(conf *util.Config, cachePersister util.CachePersister) {
}
s := csicommon.NewNonBlockingGRPCServer()
s.Start(conf.Endpoint, r.ids, r.cs, r.ns)
s.Start(conf.Endpoint, conf.HistogramOption, r.ids, r.cs, r.ns, conf.EnableGRPCMetrics)
if conf.EnableGRPCMetrics {
go util.StartMetricsServer(conf)
}
s.Wait()
}

27
pkg/util/httpserver.go Normal file
View File

@ -0,0 +1,27 @@
package util
import (
"net"
"net/http"
"net/url"
"strconv"
"github.com/prometheus/client_golang/prometheus/promhttp"
"k8s.io/klog"
)
// ValidateURL validates the url
func ValidateURL(c *Config) error {
_, err := url.Parse(c.MetricsPath)
return err
}
// StartMetricsServer starts http server
func StartMetricsServer(c *Config) {
addr := net.JoinHostPort(c.MetricsIP, strconv.Itoa(c.MetricsPort))
http.Handle(c.MetricsPath, promhttp.Handler())
err := http.ListenAndServe(addr, nil)
if err != nil {
klog.Fatalln(err)
}
}

View File

@ -54,7 +54,6 @@ var (
// Config holds the parameters list which can be configured
type Config struct {
// common flags
Vtype string // driver type [rbd|cephfs|liveness]
Endpoint string // CSI endpoint
DriverName string // name of the driver
@ -62,22 +61,25 @@ type Config struct {
InstanceID string // unique ID distinguishing this instance of Ceph CSI
MetadataStorage string // metadata persistence method [node|k8s_configmap]
PluginPath string // location of cephcsi plugin
PidLimit int // PID limit to configure through cgroups")
IsControllerServer bool // if set to true start provisoner server
IsNodeServer bool // if set to true start node server
// rbd related flags
Containerized bool // whether run as containerized
// cephfs related flags
VolumeMounter string // default volume mounter (possible options are 'kernel', 'fuse')
MountCacheDir string // mount info cache save dir
// livenes related flags
LivenessPort int // TCP port for liveness requests"
LivenessPath string // path of prometheus endpoint where metrics will be available
// metrics related flags
MetricsPath string // path of prometheus endpoint where metrics will be available
HistogramOption string // Histogram option for grpc metrics, should be comma separated value, ex:= "0.5,2,6" where start=0.5 factor=2, count=6
MetricsIP string // TCP port for liveness/ metrics requests
PidLimit int // PID limit to configure through cgroups")
MetricsPort int // TCP port for liveness/grpc metrics requests
PollTime time.Duration // time interval in seconds between each poll
PoolTimeout time.Duration // probe timeout in seconds
EnableGRPCMetrics bool // option to enable grpc metrics
IsControllerServer bool // if set to true start provisoner server
IsNodeServer bool // if set to true start node server
// rbd related flag
Containerized bool // whether run as containerized
}

View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,39 @@
// Copyright 2016 Michal Witkowski. All Rights Reserved.
// See LICENSE for licensing terms.
// gRPC Prometheus monitoring interceptors for client-side gRPC.
package grpc_prometheus
import (
prom "github.com/prometheus/client_golang/prometheus"
)
var (
// DefaultClientMetrics is the default instance of ClientMetrics. It is
// intended to be used in conjunction the default Prometheus metrics
// registry.
DefaultClientMetrics = NewClientMetrics()
// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
UnaryClientInterceptor = DefaultClientMetrics.UnaryClientInterceptor()
// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
StreamClientInterceptor = DefaultClientMetrics.StreamClientInterceptor()
)
func init() {
prom.MustRegister(DefaultClientMetrics.clientStartedCounter)
prom.MustRegister(DefaultClientMetrics.clientHandledCounter)
prom.MustRegister(DefaultClientMetrics.clientStreamMsgReceived)
prom.MustRegister(DefaultClientMetrics.clientStreamMsgSent)
}
// EnableClientHandlingTimeHistogram turns on recording of handling time of
// RPCs. Histogram metrics can be very expensive for Prometheus to retain and
// query. This function acts on the DefaultClientMetrics variable and the
// default Prometheus metrics registry.
func EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
DefaultClientMetrics.EnableClientHandlingTimeHistogram(opts...)
prom.Register(DefaultClientMetrics.clientHandledHistogram)
}

View File

@ -0,0 +1,170 @@
package grpc_prometheus
import (
"io"
prom "github.com/prometheus/client_golang/prometheus"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// ClientMetrics represents a collection of metrics to be registered on a
// Prometheus metrics registry for a gRPC client.
type ClientMetrics struct {
clientStartedCounter *prom.CounterVec
clientHandledCounter *prom.CounterVec
clientStreamMsgReceived *prom.CounterVec
clientStreamMsgSent *prom.CounterVec
clientHandledHistogramEnabled bool
clientHandledHistogramOpts prom.HistogramOpts
clientHandledHistogram *prom.HistogramVec
}
// NewClientMetrics returns a ClientMetrics object. Use a new instance of
// ClientMetrics when not using the default Prometheus metrics registry, for
// example when wanting to control which metrics are added to a registry as
// opposed to automatically adding metrics via init functions.
func NewClientMetrics(counterOpts ...CounterOption) *ClientMetrics {
opts := counterOptions(counterOpts)
return &ClientMetrics{
clientStartedCounter: prom.NewCounterVec(
opts.apply(prom.CounterOpts{
Name: "grpc_client_started_total",
Help: "Total number of RPCs started on the client.",
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
clientHandledCounter: prom.NewCounterVec(
opts.apply(prom.CounterOpts{
Name: "grpc_client_handled_total",
Help: "Total number of RPCs completed by the client, regardless of success or failure.",
}), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
clientStreamMsgReceived: prom.NewCounterVec(
opts.apply(prom.CounterOpts{
Name: "grpc_client_msg_received_total",
Help: "Total number of RPC stream messages received by the client.",
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
clientStreamMsgSent: prom.NewCounterVec(
opts.apply(prom.CounterOpts{
Name: "grpc_client_msg_sent_total",
Help: "Total number of gRPC stream messages sent by the client.",
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
clientHandledHistogramEnabled: false,
clientHandledHistogramOpts: prom.HistogramOpts{
Name: "grpc_client_handling_seconds",
Help: "Histogram of response latency (seconds) of the gRPC until it is finished by the application.",
Buckets: prom.DefBuckets,
},
clientHandledHistogram: nil,
}
}
// Describe sends the super-set of all possible descriptors of metrics
// collected by this Collector to the provided channel and returns once
// the last descriptor has been sent.
func (m *ClientMetrics) Describe(ch chan<- *prom.Desc) {
m.clientStartedCounter.Describe(ch)
m.clientHandledCounter.Describe(ch)
m.clientStreamMsgReceived.Describe(ch)
m.clientStreamMsgSent.Describe(ch)
if m.clientHandledHistogramEnabled {
m.clientHandledHistogram.Describe(ch)
}
}
// Collect is called by the Prometheus registry when collecting
// metrics. The implementation sends each collected metric via the
// provided channel and returns once the last metric has been sent.
func (m *ClientMetrics) Collect(ch chan<- prom.Metric) {
m.clientStartedCounter.Collect(ch)
m.clientHandledCounter.Collect(ch)
m.clientStreamMsgReceived.Collect(ch)
m.clientStreamMsgSent.Collect(ch)
if m.clientHandledHistogramEnabled {
m.clientHandledHistogram.Collect(ch)
}
}
// EnableClientHandlingTimeHistogram turns on recording of handling time of RPCs.
// Histogram metrics can be very expensive for Prometheus to retain and query.
func (m *ClientMetrics) EnableClientHandlingTimeHistogram(opts ...HistogramOption) {
for _, o := range opts {
o(&m.clientHandledHistogramOpts)
}
if !m.clientHandledHistogramEnabled {
m.clientHandledHistogram = prom.NewHistogramVec(
m.clientHandledHistogramOpts,
[]string{"grpc_type", "grpc_service", "grpc_method"},
)
}
m.clientHandledHistogramEnabled = true
}
// UnaryClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Unary RPCs.
func (m *ClientMetrics) UnaryClientInterceptor() func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
monitor := newClientReporter(m, Unary, method)
monitor.SentMessage()
err := invoker(ctx, method, req, reply, cc, opts...)
if err != nil {
monitor.ReceivedMessage()
}
st, _ := status.FromError(err)
monitor.Handled(st.Code())
return err
}
}
// StreamClientInterceptor is a gRPC client-side interceptor that provides Prometheus monitoring for Streaming RPCs.
func (m *ClientMetrics) StreamClientInterceptor() func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
monitor := newClientReporter(m, clientStreamType(desc), method)
clientStream, err := streamer(ctx, desc, cc, method, opts...)
if err != nil {
st, _ := status.FromError(err)
monitor.Handled(st.Code())
return nil, err
}
return &monitoredClientStream{clientStream, monitor}, nil
}
}
func clientStreamType(desc *grpc.StreamDesc) grpcType {
if desc.ClientStreams && !desc.ServerStreams {
return ClientStream
} else if !desc.ClientStreams && desc.ServerStreams {
return ServerStream
}
return BidiStream
}
// monitoredClientStream wraps grpc.ClientStream allowing each Sent/Recv of message to increment counters.
type monitoredClientStream struct {
grpc.ClientStream
monitor *clientReporter
}
func (s *monitoredClientStream) SendMsg(m interface{}) error {
err := s.ClientStream.SendMsg(m)
if err == nil {
s.monitor.SentMessage()
}
return err
}
func (s *monitoredClientStream) RecvMsg(m interface{}) error {
err := s.ClientStream.RecvMsg(m)
if err == nil {
s.monitor.ReceivedMessage()
} else if err == io.EOF {
s.monitor.Handled(codes.OK)
} else {
st, _ := status.FromError(err)
s.monitor.Handled(st.Code())
}
return err
}

View File

@ -0,0 +1,46 @@
// Copyright 2016 Michal Witkowski. All Rights Reserved.
// See LICENSE for licensing terms.
package grpc_prometheus
import (
"time"
"google.golang.org/grpc/codes"
)
type clientReporter struct {
metrics *ClientMetrics
rpcType grpcType
serviceName string
methodName string
startTime time.Time
}
func newClientReporter(m *ClientMetrics, rpcType grpcType, fullMethod string) *clientReporter {
r := &clientReporter{
metrics: m,
rpcType: rpcType,
}
if r.metrics.clientHandledHistogramEnabled {
r.startTime = time.Now()
}
r.serviceName, r.methodName = splitMethodName(fullMethod)
r.metrics.clientStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
return r
}
func (r *clientReporter) ReceivedMessage() {
r.metrics.clientStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
}
func (r *clientReporter) SentMessage() {
r.metrics.clientStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
}
func (r *clientReporter) Handled(code codes.Code) {
r.metrics.clientHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
if r.metrics.clientHandledHistogramEnabled {
r.metrics.clientHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
}
}

View File

@ -0,0 +1,41 @@
package grpc_prometheus
import (
prom "github.com/prometheus/client_golang/prometheus"
)
// A CounterOption lets you add options to Counter metrics using With* funcs.
type CounterOption func(*prom.CounterOpts)
type counterOptions []CounterOption
func (co counterOptions) apply(o prom.CounterOpts) prom.CounterOpts {
for _, f := range co {
f(&o)
}
return o
}
// WithConstLabels allows you to add ConstLabels to Counter metrics.
func WithConstLabels(labels prom.Labels) CounterOption {
return func(o *prom.CounterOpts) {
o.ConstLabels = labels
}
}
// A HistogramOption lets you add options to Histogram metrics using With*
// funcs.
type HistogramOption func(*prom.HistogramOpts)
// WithHistogramBuckets allows you to specify custom bucket ranges for histograms if EnableHandlingTimeHistogram is on.
func WithHistogramBuckets(buckets []float64) HistogramOption {
return func(o *prom.HistogramOpts) { o.Buckets = buckets }
}
// WithHistogramConstLabels allows you to add custom ConstLabels to
// histograms metrics.
func WithHistogramConstLabels(labels prom.Labels) HistogramOption {
return func(o *prom.HistogramOpts) {
o.ConstLabels = labels
}
}

View File

@ -0,0 +1,48 @@
// Copyright 2016 Michal Witkowski. All Rights Reserved.
// See LICENSE for licensing terms.
// gRPC Prometheus monitoring interceptors for server-side gRPC.
package grpc_prometheus
import (
prom "github.com/prometheus/client_golang/prometheus"
"google.golang.org/grpc"
)
var (
// DefaultServerMetrics is the default instance of ServerMetrics. It is
// intended to be used in conjunction the default Prometheus metrics
// registry.
DefaultServerMetrics = NewServerMetrics()
// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
UnaryServerInterceptor = DefaultServerMetrics.UnaryServerInterceptor()
// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
StreamServerInterceptor = DefaultServerMetrics.StreamServerInterceptor()
)
func init() {
prom.MustRegister(DefaultServerMetrics.serverStartedCounter)
prom.MustRegister(DefaultServerMetrics.serverHandledCounter)
prom.MustRegister(DefaultServerMetrics.serverStreamMsgReceived)
prom.MustRegister(DefaultServerMetrics.serverStreamMsgSent)
}
// Register takes a gRPC server and pre-initializes all counters to 0. This
// allows for easier monitoring in Prometheus (no missing metrics), and should
// be called *after* all services have been registered with the server. This
// function acts on the DefaultServerMetrics variable.
func Register(server *grpc.Server) {
DefaultServerMetrics.InitializeMetrics(server)
}
// EnableHandlingTimeHistogram turns on recording of handling time
// of RPCs. Histogram metrics can be very expensive for Prometheus
// to retain and query. This function acts on the DefaultServerMetrics
// variable and the default Prometheus metrics registry.
func EnableHandlingTimeHistogram(opts ...HistogramOption) {
DefaultServerMetrics.EnableHandlingTimeHistogram(opts...)
prom.Register(DefaultServerMetrics.serverHandledHistogram)
}

View File

@ -0,0 +1,185 @@
package grpc_prometheus
import (
prom "github.com/prometheus/client_golang/prometheus"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/status"
)
// ServerMetrics represents a collection of metrics to be registered on a
// Prometheus metrics registry for a gRPC server.
type ServerMetrics struct {
serverStartedCounter *prom.CounterVec
serverHandledCounter *prom.CounterVec
serverStreamMsgReceived *prom.CounterVec
serverStreamMsgSent *prom.CounterVec
serverHandledHistogramEnabled bool
serverHandledHistogramOpts prom.HistogramOpts
serverHandledHistogram *prom.HistogramVec
}
// NewServerMetrics returns a ServerMetrics object. Use a new instance of
// ServerMetrics when not using the default Prometheus metrics registry, for
// example when wanting to control which metrics are added to a registry as
// opposed to automatically adding metrics via init functions.
func NewServerMetrics(counterOpts ...CounterOption) *ServerMetrics {
opts := counterOptions(counterOpts)
return &ServerMetrics{
serverStartedCounter: prom.NewCounterVec(
opts.apply(prom.CounterOpts{
Name: "grpc_server_started_total",
Help: "Total number of RPCs started on the server.",
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
serverHandledCounter: prom.NewCounterVec(
opts.apply(prom.CounterOpts{
Name: "grpc_server_handled_total",
Help: "Total number of RPCs completed on the server, regardless of success or failure.",
}), []string{"grpc_type", "grpc_service", "grpc_method", "grpc_code"}),
serverStreamMsgReceived: prom.NewCounterVec(
opts.apply(prom.CounterOpts{
Name: "grpc_server_msg_received_total",
Help: "Total number of RPC stream messages received on the server.",
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
serverStreamMsgSent: prom.NewCounterVec(
opts.apply(prom.CounterOpts{
Name: "grpc_server_msg_sent_total",
Help: "Total number of gRPC stream messages sent by the server.",
}), []string{"grpc_type", "grpc_service", "grpc_method"}),
serverHandledHistogramEnabled: false,
serverHandledHistogramOpts: prom.HistogramOpts{
Name: "grpc_server_handling_seconds",
Help: "Histogram of response latency (seconds) of gRPC that had been application-level handled by the server.",
Buckets: prom.DefBuckets,
},
serverHandledHistogram: nil,
}
}
// EnableHandlingTimeHistogram enables histograms being registered when
// registering the ServerMetrics on a Prometheus registry. Histograms can be
// expensive on Prometheus servers. It takes options to configure histogram
// options such as the defined buckets.
func (m *ServerMetrics) EnableHandlingTimeHistogram(opts ...HistogramOption) {
for _, o := range opts {
o(&m.serverHandledHistogramOpts)
}
if !m.serverHandledHistogramEnabled {
m.serverHandledHistogram = prom.NewHistogramVec(
m.serverHandledHistogramOpts,
[]string{"grpc_type", "grpc_service", "grpc_method"},
)
}
m.serverHandledHistogramEnabled = true
}
// Describe sends the super-set of all possible descriptors of metrics
// collected by this Collector to the provided channel and returns once
// the last descriptor has been sent.
func (m *ServerMetrics) Describe(ch chan<- *prom.Desc) {
m.serverStartedCounter.Describe(ch)
m.serverHandledCounter.Describe(ch)
m.serverStreamMsgReceived.Describe(ch)
m.serverStreamMsgSent.Describe(ch)
if m.serverHandledHistogramEnabled {
m.serverHandledHistogram.Describe(ch)
}
}
// Collect is called by the Prometheus registry when collecting
// metrics. The implementation sends each collected metric via the
// provided channel and returns once the last metric has been sent.
func (m *ServerMetrics) Collect(ch chan<- prom.Metric) {
m.serverStartedCounter.Collect(ch)
m.serverHandledCounter.Collect(ch)
m.serverStreamMsgReceived.Collect(ch)
m.serverStreamMsgSent.Collect(ch)
if m.serverHandledHistogramEnabled {
m.serverHandledHistogram.Collect(ch)
}
}
// UnaryServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Unary RPCs.
func (m *ServerMetrics) UnaryServerInterceptor() func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) {
monitor := newServerReporter(m, Unary, info.FullMethod)
monitor.ReceivedMessage()
resp, err := handler(ctx, req)
st, _ := status.FromError(err)
monitor.Handled(st.Code())
if err == nil {
monitor.SentMessage()
}
return resp, err
}
}
// StreamServerInterceptor is a gRPC server-side interceptor that provides Prometheus monitoring for Streaming RPCs.
func (m *ServerMetrics) StreamServerInterceptor() func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
monitor := newServerReporter(m, streamRPCType(info), info.FullMethod)
err := handler(srv, &monitoredServerStream{ss, monitor})
st, _ := status.FromError(err)
monitor.Handled(st.Code())
return err
}
}
// InitializeMetrics initializes all metrics, with their appropriate null
// value, for all gRPC methods registered on a gRPC server. This is useful, to
// ensure that all metrics exist when collecting and querying.
func (m *ServerMetrics) InitializeMetrics(server *grpc.Server) {
serviceInfo := server.GetServiceInfo()
for serviceName, info := range serviceInfo {
for _, mInfo := range info.Methods {
preRegisterMethod(m, serviceName, &mInfo)
}
}
}
func streamRPCType(info *grpc.StreamServerInfo) grpcType {
if info.IsClientStream && !info.IsServerStream {
return ClientStream
} else if !info.IsClientStream && info.IsServerStream {
return ServerStream
}
return BidiStream
}
// monitoredStream wraps grpc.ServerStream allowing each Sent/Recv of message to increment counters.
type monitoredServerStream struct {
grpc.ServerStream
monitor *serverReporter
}
func (s *monitoredServerStream) SendMsg(m interface{}) error {
err := s.ServerStream.SendMsg(m)
if err == nil {
s.monitor.SentMessage()
}
return err
}
func (s *monitoredServerStream) RecvMsg(m interface{}) error {
err := s.ServerStream.RecvMsg(m)
if err == nil {
s.monitor.ReceivedMessage()
}
return err
}
// preRegisterMethod is invoked on Register of a Server, allowing all gRPC services labels to be pre-populated.
func preRegisterMethod(metrics *ServerMetrics, serviceName string, mInfo *grpc.MethodInfo) {
methodName := mInfo.Name
methodType := string(typeFromMethodInfo(mInfo))
// These are just references (no increments), as just referencing will create the labels but not set values.
metrics.serverStartedCounter.GetMetricWithLabelValues(methodType, serviceName, methodName)
metrics.serverStreamMsgReceived.GetMetricWithLabelValues(methodType, serviceName, methodName)
metrics.serverStreamMsgSent.GetMetricWithLabelValues(methodType, serviceName, methodName)
if metrics.serverHandledHistogramEnabled {
metrics.serverHandledHistogram.GetMetricWithLabelValues(methodType, serviceName, methodName)
}
for _, code := range allCodes {
metrics.serverHandledCounter.GetMetricWithLabelValues(methodType, serviceName, methodName, code.String())
}
}

View File

@ -0,0 +1,46 @@
// Copyright 2016 Michal Witkowski. All Rights Reserved.
// See LICENSE for licensing terms.
package grpc_prometheus
import (
"time"
"google.golang.org/grpc/codes"
)
type serverReporter struct {
metrics *ServerMetrics
rpcType grpcType
serviceName string
methodName string
startTime time.Time
}
func newServerReporter(m *ServerMetrics, rpcType grpcType, fullMethod string) *serverReporter {
r := &serverReporter{
metrics: m,
rpcType: rpcType,
}
if r.metrics.serverHandledHistogramEnabled {
r.startTime = time.Now()
}
r.serviceName, r.methodName = splitMethodName(fullMethod)
r.metrics.serverStartedCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
return r
}
func (r *serverReporter) ReceivedMessage() {
r.metrics.serverStreamMsgReceived.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
}
func (r *serverReporter) SentMessage() {
r.metrics.serverStreamMsgSent.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Inc()
}
func (r *serverReporter) Handled(code codes.Code) {
r.metrics.serverHandledCounter.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName, code.String()).Inc()
if r.metrics.serverHandledHistogramEnabled {
r.metrics.serverHandledHistogram.WithLabelValues(string(r.rpcType), r.serviceName, r.methodName).Observe(time.Since(r.startTime).Seconds())
}
}

View File

@ -0,0 +1,50 @@
// Copyright 2016 Michal Witkowski. All Rights Reserved.
// See LICENSE for licensing terms.
package grpc_prometheus
import (
"strings"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
type grpcType string
const (
Unary grpcType = "unary"
ClientStream grpcType = "client_stream"
ServerStream grpcType = "server_stream"
BidiStream grpcType = "bidi_stream"
)
var (
allCodes = []codes.Code{
codes.OK, codes.Canceled, codes.Unknown, codes.InvalidArgument, codes.DeadlineExceeded, codes.NotFound,
codes.AlreadyExists, codes.PermissionDenied, codes.Unauthenticated, codes.ResourceExhausted,
codes.FailedPrecondition, codes.Aborted, codes.OutOfRange, codes.Unimplemented, codes.Internal,
codes.Unavailable, codes.DataLoss,
}
)
func splitMethodName(fullMethodName string) (string, string) {
fullMethodName = strings.TrimPrefix(fullMethodName, "/") // remove leading slash
if i := strings.Index(fullMethodName, "/"); i >= 0 {
return fullMethodName[:i], fullMethodName[i+1:]
}
return "unknown", "unknown"
}
func typeFromMethodInfo(mInfo *grpc.MethodInfo) grpcType {
if !mInfo.IsClientStream && !mInfo.IsServerStream {
return Unary
}
if mInfo.IsClientStream && !mInfo.IsServerStream {
return ClientStream
}
if !mInfo.IsClientStream && mInfo.IsServerStream {
return ServerStream
}
return BidiStream
}