2 # The "region" parameter specifies the region in which to execute the job.
3 # If omitted, this inherits the default region name of "global".
6 # The "datacenters" parameter specifies the list of datacenters which should
7 # be considered when placing this task. This must be provided.
8 datacenters = "${datacenters}"
10 # The "type" parameter controls the type of job, which impacts the scheduler's
11 # decision on placement. This configuration is optional and defaults to
12 # "service". For a full list of job types and their differences, please see
13 # the online documentation.
15 # For more information, please see the online documentation at:
17 # https://www.nomadproject.io/docs/jobspec/schedulers
22 # The "max_parallel" parameter specifies the maximum number of updates to
23 # perform in parallel. In this case, this specifies to update a single task
27 health_check = "checks"
29 # The "min_healthy_time" parameter specifies the minimum time the allocation
30 # must be in the healthy state before it is marked as healthy and unblocks
31 # further allocations from being updated.
32 min_healthy_time = "10s"
34 # The "healthy_deadline" parameter specifies the deadline in which the
35 # allocation must be marked as healthy after which the allocation is
36 # automatically transitioned to unhealthy. Transitioning to unhealthy will
37 # fail the deployment and potentially roll back the job if "auto_revert" is
39 healthy_deadline = "3m"
41 # The "progress_deadline" parameter specifies the deadline in which an
42 # allocation must be marked as healthy. The deadline begins when the first
43 # allocation for the deployment is created and is reset whenever an allocation
44 # as part of the deployment transitions to a healthy state. If no allocation
45 # transitions to the healthy state before the progress deadline, the
46 # deployment is marked as failed.
47 progress_deadline = "10m"
50 # The "canary" parameter specifies that changes to the job that would result
51 # in destructive updates should create the specified number of canaries
52 # without stopping any previous allocations. Once the operator determines the
53 # canaries are healthy, they can be promoted which unblocks a rolling update
54 # of the remaining allocations at a rate of "max_parallel".
56 # Further, setting "canary" equal to the count of the task group allows
57 # blue/green deployments. When the job is updated, a full set of the new
58 # version is deployed and upon promotion the old version is stopped.
61 # Specifies if the job should auto-promote to the canary version when all
62 # canaries become healthy during a deployment. Defaults to false which means
63 # canaries must be manually updated with the nomad deployment promote
67 # The "auto_revert" parameter specifies if the job should auto-revert to the
68 # last stable job on deployment failure. A job is marked as stable if all the
69 # allocations as part of its deployment were marked healthy.
74 # The "group" stanza defines a series of tasks that should be co-located on
75 # the same Nomad client. Any task within a group will be placed on the same
78 # For more information and examples on the "group" stanza, please see
79 # the online documentation at:
81 # https://www.nomadproject.io/docs/job-specification/group
83 group "prod-group1-${service_name}" {
84 # The "count" parameter specifies the number of the task groups that should
85 # be running under this group. This value must be non-negative and defaults
87 count = ${group_count}
89 # The volume stanza allows the group to specify that it requires a given
90 # volume from the cluster.
92 # For more information and examples on the "volume" stanza, please see
93 # the online documentation at:
95 # https://www.nomadproject.io/docs/job-specification/volume
97 %{ if use_host_volume }
98 volume "prod-volume1-${service_name}" {
101 source = "${host_volume}"
105 # The constraint allows restricting the set of eligible nodes. Constraints
106 # may filter on attributes or client metadata.
108 # For more information and examples on the "volume" stanza, please see
109 # the online documentation at:
111 # https://www.nomadproject.io/docs/job-specification/constraint
114 attribute = "$${attr.cpu.arch}"
119 # The "task" stanza creates an individual unit of work, such as a Docker
120 # container, web application, or batch processing.
122 # For more information and examples on the "task" stanza, please see
123 # the online documentation at:
125 # https://www.nomadproject.io/docs/job-specification/task
127 task "prod-task1-${service_name}" {
128 # The "driver" parameter specifies the task driver that should be used to
132 %{ if use_host_volume }
134 volume = "prod-volume1-${service_name}"
135 destination = "${data_dir}"
140 %{ if use_vault_provider }
142 policies = "${vault_kv_policy_name}"
146 # The "config" stanza specifies the driver configuration, which is passed
147 # directly to the driver to start the task. The details of configurations
148 # are specific to each driver, so please see specific driver
149 # documentation for more information.
151 command = "local/prometheus-${version}.linux-amd64/prometheus"
153 "--config.file=secrets/prometheus.yml",
154 "--storage.tsdb.path=${data_dir}prometheus/",
155 "--storage.tsdb.retention.time=15d"
159 # The artifact stanza instructs Nomad to fetch and unpack a remote resource,
160 # such as a file, tarball, or binary. Nomad downloads artifacts using the
161 # popular go-getter library, which permits downloading artifacts from a
162 # variety of locations using a URL as the input source.
164 # For more information and examples on the "artifact" stanza, please see
165 # the online documentation at:
167 # https://www.nomadproject.io/docs/job-specification/artifact
173 # The "template" stanza instructs Nomad to manage a template, such as
174 # a configuration file or script. This template can optionally pull data
175 # from Consul or Vault to populate runtime configuration data.
177 # For more information and examples on the "template" stanza, please see
178 # the online documentation at:
180 # https://www.nomadproject.io/docs/job-specification/template
184 change_signal = "SIGINT"
185 destination = "secrets/alerts.yml"
186 left_delimiter = "{{{"
187 right_delimiter = "}}}"
193 - alert: ConsulServiceHealthcheckFailed
194 expr: consul_catalog_service_node_healthy == 0
199 summary: "Consul service healthcheck failed (instance {{ $labels.instance }})."
200 description: "Service: `{{ $labels.service_name }}` Healthcheck: `{{ $labels.service_id }}`."
201 - alert: ConsulMissingMasterNode
202 expr: consul_raft_peers < 3
207 summary: "Consul missing master node (instance {{ $labels.instance }})."
208 description: "Numbers of consul raft peers should be 3, in order to preserve quorum."
209 - alert: ConsulAgentUnhealthy
210 expr: consul_health_node_status{status="critical"} == 1
215 summary: "Consul agent unhealthy (instance {{ $labels.instance }})."
216 description: "A Consul agent is down."
225 summary: "Prometheus target missing (instance {{ $labels.instance }})."
226 description: "A Prometheus target has disappeared. An exporter might be crashed."
227 - alert: HostHighCpuLoad
228 expr: 100 - (avg by(instance) (rate(node_cpu_seconds_total{mode="idle"}[2m])) * 100) > 80
233 summary: "Host high CPU load (instance {{ $labels.instance }})."
234 description: "CPU load is > 80%."
235 - alert: HostOutOfMemory
236 expr: node_memory_MemAvailable_bytes / node_memory_MemTotal_bytes * 100 < 10
241 summary: "Host out of memory (instance {{ $labels.instance }})."
242 description: "Node memory is filling up (< 10% left)."
243 - alert: HostOomKillDetected
244 expr: increase(node_vmstat_oom_kill[1m]) > 0
249 summary: "Host OOM kill detected (instance {{ $labels.instance }})."
250 description: "OOM kill detected."
251 - alert: HostMemoryUnderMemoryPressure
252 expr: rate(node_vmstat_pgmajfault[1m]) > 1000
257 summary: "Host memory under memory pressure (instance {{ $labels.instance }})."
258 description: "The node is under heavy memory pressure. High rate of major page faults."
259 - alert: HostOutOfDiskSpace
260 expr: (node_filesystem_avail_bytes * 100) / node_filesystem_size_bytes < 10 and ON (instance, device, mountpoint) node_filesystem_readonly == 0
265 summary: "Host out of disk space (instance {{ $labels.instance }})."
266 description: "Disk is almost full (< 10% left)."
267 - alert: HostRaidDiskFailure
268 expr: node_md_disks{state="failed"} > 0
273 summary: "Host RAID disk failure (instance {{ $labels.instance }})."
274 description: "At least one device in RAID array on {{ $labels.instance }} failed. Array {{ $labels.md_device }} needs attention and possibly a disk swap."
275 - alert: HostConntrackLimit
276 expr: node_nf_conntrack_entries / node_nf_conntrack_entries_limit > 0.8
281 summary: "Host conntrack limit (instance {{ $labels.instance }})."
282 description: "The number of conntrack is approching limit."
283 - alert: HostNetworkInterfaceSaturated
284 expr: (rate(node_network_receive_bytes_total{device!~"^tap.*"}[1m]) + rate(node_network_transmit_bytes_total{device!~"^tap.*"}[1m])) / node_network_speed_bytes{device!~"^tap.*"} > 0.8
289 summary: "Host Network Interface Saturated (instance {{ $labels.instance }})."
290 description: "The network interface {{ $labels.interface }} on {{ $labels.instance }} is getting overloaded."
291 - alert: HostSystemdServiceCrashed
292 expr: node_systemd_unit_state{state="failed"} == 1
297 summary: "Host SystemD service crashed (instance {{ $labels.instance }})."
298 description: "SystemD service crashed."
299 - alert: HostEdacCorrectableErrorsDetected
300 expr: increase(node_edac_correctable_errors_total[1m]) > 0
305 summary: "Host EDAC Correctable Errors detected (instance {{ $labels.instance }})."
306 description: '{{ $labels.instance }} has had {{ printf "%.0f" $value }} correctable memory errors reported by EDAC in the last 5 minutes.'
307 - alert: HostEdacUncorrectableErrorsDetected
308 expr: node_edac_uncorrectable_errors_total > 0
313 summary: "Host EDAC Uncorrectable Errors detected (instance {{ $labels.instance }})."
314 description: '{{ $labels.instance }} has had {{ printf "%.0f" $value }} uncorrectable memory errors reported by EDAC in the last 5 minutes.'
317 - alert: MinioDiskOffline
318 expr: minio_offline_disks > 0
323 summary: "Minio disk offline (instance {{ $labels.instance }})"
324 description: "Minio disk is offline."
325 - alert: MinioStorageSpaceExhausted
326 expr: minio_disk_storage_free_bytes / 1024 / 1024 / 1024 < 10
331 summary: "Minio storage space exhausted (instance {{ $labels.instance }})."
332 description: "Minio storage space is low (< 10 GB)."
335 - alert: PrometheusConfigurationReloadFailure
336 expr: prometheus_config_last_reload_successful != 1
341 summary: "Prometheus configuration reload failure (instance {{ $labels.instance }})."
342 description: "Prometheus configuration reload error."
343 - alert: PrometheusTooManyRestarts
344 expr: changes(process_start_time_seconds{job=~"prometheus|pushgateway|alertmanager"}[15m]) > 2
349 summary: "Prometheus too many restarts (instance {{ $labels.instance }})."
350 description: "Prometheus has restarted more than twice in the last 15 minutes. It might be crashlooping."
351 - alert: PrometheusAlertmanagerConfigurationReloadFailure
352 expr: alertmanager_config_last_reload_successful != 1
357 summary: "Prometheus AlertManager configuration reload failure (instance {{ $labels.instance }})."
358 description: "AlertManager configuration reload error."
359 - alert: PrometheusRuleEvaluationFailures
360 expr: increase(prometheus_rule_evaluation_failures_total[3m]) > 0
365 summary: "Prometheus rule evaluation failures (instance {{ $labels.instance }})."
366 description: "Prometheus encountered {{ $value }} rule evaluation failures, leading to potentially ignored alerts."
367 - alert: PrometheusTargetScrapingSlow
368 expr: prometheus_target_interval_length_seconds{quantile="0.9"} > 60
373 summary: "Prometheus target scraping slow (instance {{ $labels.instance }})."
374 description: "Prometheus is scraping exporters slowly."
375 - alert: PrometheusTsdbCompactionsFailed
376 expr: increase(prometheus_tsdb_compactions_failed_total[1m]) > 0
381 summary: "Prometheus TSDB compactions failed (instance {{ $labels.instance }})."
382 description: "Prometheus encountered {{ $value }} TSDB compactions failures."
383 - alert: PrometheusTsdbHeadTruncationsFailed
384 expr: increase(prometheus_tsdb_head_truncations_failed_total[1m]) > 0
389 summary: "Prometheus TSDB head truncations failed (instance {{ $labels.instance }})."
390 description: "Prometheus encountered {{ $value }} TSDB head truncation failures."
391 - alert: PrometheusTsdbWalCorruptions
392 expr: increase(prometheus_tsdb_wal_corruptions_total[1m]) > 0
397 summary: "Prometheus TSDB WAL corruptions (instance {{ $labels.instance }})."
398 description: "Prometheus encountered {{ $value }} TSDB WAL corruptions."
399 - alert: PrometheusTsdbWalTruncationsFailed
400 expr: increase(prometheus_tsdb_wal_truncations_failed_total[1m]) > 0
405 summary: "Prometheus TSDB WAL truncations failed (instance {{ $labels.instance }})."
406 description: "Prometheus encountered {{ $value }} TSDB WAL truncation failures."
412 change_signal = "SIGINT"
413 destination = "secrets/prometheus.yml"
419 evaluation_interval: 5s
424 - server: '{{ env "NOMAD_IP_prometheus" }}:8500'
425 services: [ 'alertmanager' ]
432 - job_name: 'Nomad Cluster'
434 - server: '{{ env "NOMAD_IP_prometheus" }}:8500'
435 services: [ 'nomad-client', 'nomad' ]
437 - source_labels: [__meta_consul_tags]
438 regex: '(.*)http(.*)'
440 metrics_path: /v1/metrics
442 format: [ 'prometheus' ]
444 - job_name: 'Consul Cluster'
446 - targets: [ '10.30.51.30:8500', '10.30.51.32:8500', '10.30.51.33:8500' ]
447 metrics_path: /v1/agent/metrics
449 format: [ 'prometheus' ]
451 - job_name: 'Alertmanager'
453 - server: '{{ env "NOMAD_IP_prometheus" }}:8500'
454 services: [ 'alertmanager' ]
456 - job_name: 'Blackbox Exporter (icmp)'
458 - targets: [ 'gerrit.fd.io' ]
459 - targets: [ 'jenkins.fd.io' ]
460 - targets: [ '10.30.51.32' ]
462 module: [ 'icmp_v4' ]
464 - source_labels: [__address__]
465 target_label: __param_target
466 - source_labels: [__param_target]
467 target_label: instance
468 - target_label: __address__
469 replacement: localhost:9115
472 - job_name: 'Blackbox Exporter (http)'
474 - targets: [ 'gerrit.fd.io' ]
475 - targets: [ 'jenkins.fd.io' ]
477 module: [ 'http_2xx' ]
479 - source_labels: [__address__]
480 target_label: __param_target
481 - source_labels: [__param_target]
482 target_label: instance
483 - target_label: __address__
484 replacement: localhost:9115
487 - job_name: 'cAdvisor Exporter'
489 - server: '{{ env "NOMAD_IP_prometheus" }}:8500'
490 services: [ 'cadvisorexporter' ]
492 - job_name: 'Grafana'
494 - server: '{{ env "NOMAD_IP_prometheus" }}:8500'
495 services: [ 'grafana' ]
497 - job_name: 'Node Exporter'
499 - server: '{{ env "NOMAD_IP_prometheus" }}:8500'
500 services: [ 'nodeexporter' ]
502 - job_name: 'Prometheus'
504 - server: '{{ env "NOMAD_IP_prometheus" }}:8500'
505 services: [ 'prometheus' ]
508 bearer_token: eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCJ9.eyJleHAiOjQ3NjQ1ODEzMzcsImlzcyI6InByb21ldGhldXMiLCJzdWIiOiJtaW5pbyJ9.oeTw3EIaiFmlDikrHXWiWXMH2vxLfDLkfjEC7G2N3M_keH_xyA_l2ofLLNYtopa_3GCEZnxLQdPuFZrmgpkDWg
510 - server: '{{ env "NOMAD_IP_prometheus" }}:8500'
511 services: [ 'storage' ]
512 metrics_path: /minio/prometheus/metrics
516 # The service stanza instructs Nomad to register a service with Consul.
518 # For more information and examples on the "task" stanza, please see
519 # the online documentation at:
521 # https://www.nomadproject.io/docs/job-specification/service
524 name = "${service_name}"
525 port = "${service_name}"
526 tags = [ "${service_name}$${NOMAD_ALLOC_INDEX}" ]
528 name = "Prometheus Check Live"
536 # The "resources" stanza describes the requirements a task needs to
537 # execute. Resource requirements include memory, network, cpu, and more.
538 # This ensures the task will execute on a machine that contains enough
541 # For more information and examples on the "resources" stanza, please see
542 # the online documentation at:
544 # https://www.nomadproject.io/docs/job-specification/resources
549 # The network stanza specifies the networking requirements for the task
550 # group, including the network mode and port allocations. When scheduling
551 # jobs in Nomad they are provisioned across your fleet of machines along
552 # with other jobs and services. Because you don't know in advance what host
553 # your job will be provisioned on, Nomad will provide your tasks with
554 # network configuration when they start up.
556 # For more information and examples on the "template" stanza, please see
557 # the online documentation at:
559 # https://www.nomadproject.io/docs/job-specification/network
562 port "${service_name}" {