common_metrics.yml 12.1 KB
Newer Older
1
dashboard: 'Environment metrics'
2
priority: 1
3
panel_groups:
4
5
6
7
8
9
10
- group: System metrics (Kubernetes)
  panels:
  - title: "Memory Usage (Total)"
    type: "area-chart"
    y_label: "Total Memory Used (GB)"
    metrics:
    - id: system_metrics_kubernetes_container_memory_total
11
12
13
      # Remove the second metric (after OR) when we drop support for K8s 1.13
      # https://gitlab.com/gitlab-org/gitlab/-/issues/229279
      query_range: 'avg(sum(container_memory_usage_bytes{container!="POD",pod=~"^{{ci_environment_slug}}-(.*)",namespace="{{kube_namespace}}"}) by (job)) without (job)  /1024/1024/1024     OR      avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-(.*)",namespace="{{kube_namespace}}"}) by (job)) without (job)  /1024/1024/1024'
14
15
16
17
18
19
20
      label: Total (GB)
      unit: GB
  - title: "Core Usage (Total)"
    type: "area-chart"
    y_label: "Total Cores"
    metrics:
    - id: system_metrics_kubernetes_container_cores_total
21
22
23
      # Remove the second metric (after OR) when we drop support for K8s 1.13
      # https://gitlab.com/gitlab-org/gitlab/-/issues/229279
      query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container!="POD",pod=~"^{{ci_environment_slug}}-(.*)",namespace="{{kube_namespace}}"}[15m])) by (job)) without (job)     OR      avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-(.*)",namespace="{{kube_namespace}}"}[15m])) by (job)) without (job)'
24
25
26
27
28
29
30
      label: Total (cores)
      unit: "cores"
  - title: "Memory Usage (Pod average)"
    type: "line-chart"
    y_label: "Memory Used per Pod (MB)"
    metrics:
    - id: system_metrics_kubernetes_container_memory_average
31
32
33
      # Remove the second metric (after OR) when we drop support for K8s 1.13
      # https://gitlab.com/gitlab-org/gitlab/-/issues/229279
      query_range: 'avg(sum(container_memory_usage_bytes{container!="POD",pod=~"^{{ci_environment_slug}}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="{{kube_namespace}}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container!="POD",pod=~"^{{ci_environment_slug}}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="{{kube_namespace}}"}) without (job)) /1024/1024     OR      avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="{{kube_namespace}}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="{{kube_namespace}}"}) without (job)) /1024/1024'
34
35
36
37
38
39
40
      label: Pod average (MB)
      unit: MB
  - title: "Canary: Memory Usage (Pod Average)"
    type: "line-chart"
    y_label: "Memory Used per Pod (MB)"
    metrics:
    - id: system_metrics_kubernetes_container_memory_average_canary
41
42
43
      # Remove the second metric (after OR) when we drop support for K8s 1.13
      # https://gitlab.com/gitlab-org/gitlab/-/issues/229279
      query_range: 'avg(sum(container_memory_usage_bytes{container!="POD",pod=~"^{{ci_environment_slug}}-canary-(.*)",namespace="{{kube_namespace}}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container!="POD",pod=~"^{{ci_environment_slug}}-canary-(.*)",namespace="{{kube_namespace}}"}) without (job)) /1024/1024     OR      avg(sum(container_memory_usage_bytes{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-canary-(.*)",namespace="{{kube_namespace}}"}) by (job)) without (job) / count(avg(container_memory_usage_bytes{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-canary-(.*)",namespace="{{kube_namespace}}"}) without (job)) /1024/1024'
44
45
46
47
48
49
50
51
      label: Pod average (MB)
      unit: MB
      track: canary
  - title: "Core Usage (Pod Average)"
    type: "line-chart"
    y_label: "Cores per Pod"
    metrics:
    - id: system_metrics_kubernetes_container_core_usage
52
53
54
      # Remove the second metric (after OR) when we drop support for K8s 1.13
      # https://gitlab.com/gitlab-org/gitlab/-/issues/229279
      query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container!="POD",pod=~"^{{ci_environment_slug}}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="{{kube_namespace}}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container!="POD",pod=~"^{{ci_environment_slug}}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="{{kube_namespace}}"}[15m])) by (pod))     OR      avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="{{kube_namespace}}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-([^c].*|c([^a]|a([^n]|n([^a]|a([^r]|r[^y])))).*|)-(.*)",namespace="{{kube_namespace}}"}[15m])) by (pod_name))'
55
56
57
58
59
60
61
      label: Pod average (cores)
      unit: "cores"
  - title: "Canary: Core Usage (Pod Average)"
    type: "line-chart"
    y_label: "Cores per Pod"
    metrics:
    - id: system_metrics_kubernetes_container_core_usage_canary
62
63
64
      # Remove the second metric (after OR) when we drop support for K8s 1.13
      # https://gitlab.com/gitlab-org/gitlab/-/issues/229279
      query_range: 'avg(sum(rate(container_cpu_usage_seconds_total{container!="POD",pod=~"^{{ci_environment_slug}}-canary-(.*)",namespace="{{kube_namespace}}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container!="POD",pod=~"^{{ci_environment_slug}}-canary-(.*)",namespace="{{kube_namespace}}"}[15m])) by (pod))     OR      avg(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-canary-(.*)",namespace="{{kube_namespace}}"}[15m])) by (job)) without (job) / count(sum(rate(container_cpu_usage_seconds_total{container_name!="POD",pod_name=~"^{{ci_environment_slug}}-canary-(.*)",namespace="{{kube_namespace}}"}[15m])) by (pod_name))'
65
66
67
68
69
70
71
72
      label: Pod average (cores)
      unit: "cores"
      track: canary
  - title: "Knative function invocations"
    type: "area-chart"
    y_label: "Invocations"
    metrics:
    - id: system_metrics_knative_function_invocation_count
73
      query_range: 'sum(ceil(rate(istio_requests_total{destination_service_namespace="{{kube_namespace}}", destination_service=~"{{function_name}}.*"}[1m])*60))'
74
75
      label: invocations / minute
      unit: requests
76
# NGINX Ingress metrics for pre-0.16.0 versions
77
- group: Response metrics (NGINX Ingress VTS)
78
  panels:
79
  - title: "Throughput"
80
    type: "area-chart"
81
    y_label: "Requests / Sec"
82
    metrics:
83
    - id: response_metrics_nginx_ingress_throughput_status_code
84
      query_range: 'sum(rate(nginx_upstream_responses_total{upstream=~"{{kube_namespace}}-{{ci_environment_slug}}-.*"}[2m])) by (status_code)'
85
      unit: req / sec
Joshua Lambert's avatar
Joshua Lambert committed
86
      label: Status Code
87
  - title: "Latency"
88
    type: "area-chart"
89
    y_label: "Latency (ms)"
90
91
    y_axis:
      format: milliseconds
92
    metrics:
93
    - id: response_metrics_nginx_ingress_latency_pod_average
94
      query_range: 'avg(nginx_upstream_response_msecs_avg{upstream=~"{{kube_namespace}}-{{ci_environment_slug}}-.*"})'
95
      label: Pod average (ms)
96
97
      unit: ms
  - title: "HTTP Error Rate"
98
    type: "area-chart"
99
    y_label: "HTTP Errors (%)"
100
101
    y_axis:
      format: percentHundred
102
    metrics:
103
    - id: response_metrics_nginx_ingress_http_error_rate
104
      query_range: 'sum(rate(nginx_upstream_responses_total{status_code="5xx", upstream=~"{{kube_namespace}}-{{ci_environment_slug}}-.*"}[2m])) / sum(rate(nginx_upstream_responses_total{upstream=~"{{kube_namespace}}-{{ci_environment_slug}}-.*"}[2m])) * 100'
105
      label: 5xx Errors (%)
106
      unit: "%"
107
108
# NGINX Ingress metrics for post-0.16.0 versions
- group: Response metrics (NGINX Ingress)
109
  panels:
110
  - title: "Throughput"
111
    type: "area-chart"
112
    y_label: "Requests / Sec"
113
    metrics:
114
    - id: response_metrics_nginx_ingress_16_throughput_status_code
115
      query_range: 'sum(label_replace(rate(nginx_ingress_controller_requests{namespace="{{kube_namespace}}",ingress=~".*{{ci_environment_slug}}.*"}[2m]), "status_code", "${1}xx", "status", "(.)..")) by (status_code)'
116
117
118
      unit: req / sec
      label: Status Code
  - title: "Latency"
119
    type: "area-chart"
120
    y_label: "Latency (ms)"
121
    metrics:
122
    - id: response_metrics_nginx_ingress_16_latency_pod_average
123
      query_range: 'sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_sum{namespace="{{kube_namespace}}",ingress=~".*{{ci_environment_slug}}.*"}[2m])) / sum(rate(nginx_ingress_controller_ingress_upstream_latency_seconds_count{namespace="{{kube_namespace}}",ingress=~".*{{ci_environment_slug}}.*"}[2m])) * 1000'
124
      label: Pod average (ms)
125
126
      unit: ms
  - title: "HTTP Error Rate"
127
    type: "area-chart"
128
    y_label: "HTTP Errors (%)"
129
    metrics:
130
    - id: response_metrics_nginx_ingress_16_http_error_rate
131
      query_range: 'sum(rate(nginx_ingress_controller_requests{status=~"5.*",namespace="{{kube_namespace}}",ingress=~".*{{ci_environment_slug}}.*"}[2m])) / sum(rate(nginx_ingress_controller_requests{namespace="{{kube_namespace}}",ingress=~".*{{ci_environment_slug}}.*"}[2m])) * 100'
132
      label: 5xx Errors (%)
133
      unit: "%"
Joshua Lambert's avatar
Joshua Lambert committed
134
- group: Response metrics (HA Proxy)
135
  panels:
136
  - title: "Throughput"
137
    type: "area-chart"
138
    y_label: "Requests / Sec"
139
    metrics:
140
    - id: response_metrics_ha_proxy_throughput_status_code
141
      query_range: 'sum(rate(haproxy_frontend_http_requests_total{ {{environment_filter}} }[2m])) by (code)'
142
      unit: req / sec
143
      label: Status Code
144
  - title: "HTTP Error Rate"
145
    type: "area-chart"
146
    y_label: "Error Rate (%)"
147
    metrics:
148
    - id: response_metrics_ha_proxy_http_error_rate
149
      query_range: 'sum(rate(haproxy_frontend_http_responses_total{code="5xx",{{environment_filter}} }[2m])) / sum(rate(haproxy_frontend_http_responses_total{ {{environment_filter}} }[2m]))'
150
      label: HTTP Errors (%)
151
      unit: "%"
Joshua Lambert's avatar
Joshua Lambert committed
152
- group: Response metrics (AWS ELB)
153
  panels:
154
  - title: "Throughput"
155
    type: "area-chart"
156
    y_label: "Requests / Sec"
157
    metrics:
158
    - id: response_metrics_aws_elb_throughput_requests
159
      query_range: 'sum(aws_elb_request_count_sum{ {{environment_filter}} }) / 60'
160
      label: Total (req/sec)
161
      unit: req / sec
Joshua Lambert's avatar
Joshua Lambert committed
162
  - title: "Latency"
163
    type: "area-chart"
164
    y_label: "Latency (ms)"
165
    metrics:
166
    - id: response_metrics_aws_elb_latency_average
167
      query_range: 'avg(aws_elb_latency_average{ {{environment_filter}} }) * 1000'
168
      label: Average (ms)
169
170
      unit: ms
  - title: "HTTP Error Rate"
171
    type: "area-chart"
172
    y_label: "Error Rate (%)"
173
    metrics:
174
    - id: response_metrics_aws_elb_http_error_rate
175
      query_range: 'sum(aws_elb_httpcode_backend_5_xx_sum{ {{environment_filter}} }) / sum(aws_elb_request_count_sum{ {{environment_filter}} })'
176
      label: HTTP Errors (%)
177
      unit: "%"
Joshua Lambert's avatar
Joshua Lambert committed
178
- group: Response metrics (NGINX)
179
  panels:
180
  - title: "Throughput"
181
    type: "area-chart"
182
    y_label: "Requests / Sec"
183
    metrics:
184
    - id: response_metrics_nginx_throughput_status_code
185
      query_range: 'sum(rate(nginx_server_requests{server_zone!="*", server_zone!="_", {{environment_filter}} }[2m])) by (code)'
186
      unit: req / sec
187
      label: Status Code
188
  - title: "Latency"
189
    type: "area-chart"
190
    y_label: "Latency (ms)"
191
    metrics:
192
    - id: response_metrics_nginx_latency
193
      query_range: 'avg(nginx_server_requestMsec{ {{environment_filter}} })'
194
      label: Upstream (ms)
195
      unit: ms
196
  - title: "HTTP Error Rate (Errors / Sec)"
197
    type: "area-chart"
198
    y_label: "HTTP 500 Errors / Sec"
199
200
    y_axis:
      precision: 0
201
    metrics:
202
    - id: response_metrics_nginx_http_error_rate
203
      query_range: 'sum(rate(nginx_server_requests{code="5xx", {{environment_filter}} }[2m]))'
204
      label: HTTP Errors
205
      unit: "errors / sec"
206
207
208
209
210
  - title: "HTTP Error Rate"
    type: "area-chart"
    y_label: "HTTP Errors (%)"
    metrics:
    - id: response_metrics_nginx_http_error_percentage
211
      query_range: 'sum(rate(nginx_server_requests{code=~"5.*", host="*", {{environment_filter}} }[2m])) / sum(rate(nginx_server_requests{code="total", host="*", {{environment_filter}} }[2m])) * 100'
212
213
      label: 5xx Errors (%)
      unit: "%"