2 # The "region" parameter specifies the region in which to execute the job.
3 # If omitted, this inherits the default region name of "global".
6 # The "datacenters" parameter specifies the list of datacenters which should
7 # be considered when placing this task. This must be provided.
8 datacenters = "${datacenters}"
10 # The "type" parameter controls the type of job, which impacts the scheduler's
11 # decision on placement. This configuration is optional and defaults to
12 # "service". For a full list of job types and their differences, please see
13 # the online documentation.
15 # https://www.nomadproject.io/docs/jobspec/schedulers
20 # The "max_parallel" parameter specifies the maximum number of updates to
21 # perform in parallel. In this case, this specifies to update a single task
23 max_parallel = ${max_parallel}
25 health_check = "checks"
27 # The "min_healthy_time" parameter specifies the minimum time the allocation
28 # must be in the healthy state before it is marked as healthy and unblocks
29 # further allocations from being updated.
30 min_healthy_time = "10s"
32 # The "healthy_deadline" parameter specifies the deadline in which the
33 # allocation must be marked as healthy after which the allocation is
34 # automatically transitioned to unhealthy. Transitioning to unhealthy will
35 # fail the deployment and potentially roll back the job if "auto_revert" is
37 healthy_deadline = "3m"
39 # The "progress_deadline" parameter specifies the deadline in which an
40 # allocation must be marked as healthy. The deadline begins when the first
41 # allocation for the deployment is created and is reset whenever an allocation
42 # as part of the deployment transitions to a healthy state. If no allocation
43 # transitions to the healthy state before the progress deadline, the
44 # deployment is marked as failed.
45 progress_deadline = "10m"
48 # The "canary" parameter specifies that changes to the job that would result
49 # in destructive updates should create the specified number of canaries
50 # without stopping any previous allocations. Once the operator determines the
51 # canaries are healthy, they can be promoted which unblocks a rolling update
52 # of the remaining allocations at a rate of "max_parallel".
54 # Further, setting "canary" equal to the count of the task group allows
55 # blue/green deployments. When the job is updated, a full set of the new
56 # version is deployed and upon promotion the old version is stopped.
59 # Specifies if the job should auto-promote to the canary version when all
60 # canaries become healthy during a deployment. Defaults to false which means
61 # canaries must be manually updated with the nomad deployment promote
63 auto_promote = ${auto_promote}
65 # The "auto_revert" parameter specifies if the job should auto-revert to the
66 # last stable job on deployment failure. A job is marked as stable if all the
67 # allocations as part of its deployment were marked healthy.
68 auto_revert = ${auto_revert}
72 # All groups in this job should be scheduled on different hosts.
74 operator = "distinct_hosts"
78 # The "group" stanza defines a series of tasks that should be co-located on
79 # the same Nomad client. Any task within a group will be placed on the same
82 # https://www.nomadproject.io/docs/job-specification/group
84 group "${job_name}-group-1" {
85 # The "count" parameter specifies the number of the task groups that should
86 # be running under this group. This value must be non-negative and defaults
88 count = ${group_count}
90 # The volume stanza allows the group to specify that it requires a given
91 # volume from the cluster. The key of the stanza is the name of the volume
92 # as it will be exposed to task configuration.
94 # https://www.nomadproject.io/docs/job-specification/volume
95 %{ if use_host_volume }
96 volume "${job_name}-volume-1" {
99 source = "${volume_source}"
103 # The restart stanza configures a tasks's behavior on task failure. Restarts
104 # happen on the client that is running the task.
106 # https://www.nomadproject.io/docs/job-specification/restart
115 # The constraint allows restricting the set of eligible nodes. Constraints
116 # may filter on attributes or client metadata.
118 # https://www.nomadproject.io/docs/job-specification/constraint
121 attribute = "$${attr.cpu.arch}"
127 attribute = "$${node.class}"
131 # The network stanza specifies the networking requirements for the task
132 # group, including the network mode and port allocations. When scheduling
133 # jobs in Nomad they are provisioned across your fleet of machines along
134 # with other jobs and services. Because you don't know in advance what host
135 # your job will be provisioned on, Nomad will provide your tasks with
136 # network configuration when they start up.
138 # https://www.nomadproject.io/docs/job-specification/network
141 port "${service_name}" {
147 # The "task" stanza creates an individual unit of work, such as a Docker
148 # container, web application, or batch processing.
150 # https://www.nomadproject.io/docs/job-specification/task
152 task "${job_name}-task-1" {
153 # The "driver" parameter specifies the task driver that should be used to
157 %{ if use_host_volume }
159 volume = "${job_name}-volume-1"
160 destination = "${volume_destination}"
165 %{ if use_vault_provider }
167 policies = "${vault_kv_policy_name}"
171 # The "config" stanza specifies the driver configuration, which is passed
172 # directly to the driver to start the task. The details of configurations
173 # are specific to each driver, so please see specific driver
174 # documentation for more information.
176 command = "local/alertmanager-${version}.linux-amd64/alertmanager"
178 "--config.file=secrets/alertmanager.yml"
182 # The artifact stanza instructs Nomad to fetch and unpack a remote resource,
183 # such as a file, tarball, or binary. Nomad downloads artifacts using the
184 # popular go-getter library, which permits downloading artifacts from a
185 # variety of locations using a URL as the input source.
187 # https://www.nomadproject.io/docs/job-specification/artifact
193 # The "template" stanza instructs Nomad to manage a template, such as
194 # a configuration file or script. This template can optionally pull data
195 # from Consul or Vault to populate runtime configuration data.
197 # https://www.nomadproject.io/docs/job-specification/template
201 change_signal = "SIGINT"
202 destination = "secrets/alertmanager.yml"
203 left_delimiter = "{{{"
204 right_delimiter = "}}}"
206 # The directory from which notification templates are read.
208 - '/etc/alertmanager/template/*.tmpl'
211 # # CA certificate to validate the server certificate with.
212 # ca_file: <filepath> ]
214 # # Certificate and key files for client cert authentication to the server.
215 # cert_file: <filepath>
216 # key_file: <filepath>
218 # # ServerName extension to indicate the name of the server.
219 # # http://tools.ietf.org/html/rfc4366#section-3.1
220 # server_name: <string>
222 # # Disable validation of the server certificate.
223 # insecure_skip_verify: true
225 # The root route on which each incoming alert enters.
227 receiver: '${slack_default_receiver}'
229 # The labels by which incoming alerts are grouped together. For example,
230 # multiple alerts coming in for cluster=A and alertname=LatencyHigh would
231 # be batched into a single group.
233 # To aggregate by all possible labels use '...' as the sole label name.
234 # This effectively disables aggregation entirely, passing through all
235 # alerts as-is. This is unlikely to be what you want, unless you have
236 # a very low alert volume or your upstream notification system performs
237 # its own grouping. Example: group_by: [...]
238 group_by: ['alertname']
240 # When a new group of alerts is created by an incoming alert, wait at
241 # least 'group_wait' to send the initial notification.
242 # This way ensures that you get multiple alerts for the same group that start
243 # firing shortly after another are batched together on the first
247 # When the first notification was sent, wait 'group_interval' to send a batch
248 # of new alerts that started firing for that group.
251 # If an alert has successfully been sent, wait 'repeat_interval' to
255 # All the above attributes are inherited by all child routes and can
256 # overwritten on each.
257 # The child route trees.
260 alertname: JenkinsJob.*
261 receiver: ${slack_jenkins_receiver}
265 receiver: '${slack_jenkins_receiver}'
269 receiver: ${slack_default_receiver}
273 receiver: '${slack_default_receiver}'
275 # Inhibition rules allow to mute a set of alerts given that another alert is
277 # We use this to mute any warning-level notifications if the same alert is
284 equal: ['alertname', 'instance']
287 - name: '${slack_jenkins_receiver}'
289 - api_url: 'https://hooks.slack.com/services/${slack_jenkins_api_key}'
290 channel: '#${slack_jenkins_channel}'
292 icon_url: https://avatars3.githubusercontent.com/u/3380462
294 [{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
295 {{- if gt (len .CommonLabels) (len .GroupLabels) -}}
297 {{- with .CommonLabels.Remove .GroupLabels.Names }}
298 {{- range $index, $label := .SortedPairs -}}
299 {{ if $index }}, {{ end }}
300 {{- $label.Name }}="{{ $label.Value -}}"
307 *Alert:* {{ .Annotations.summary }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
309 *Description:* {{ .Annotations.description }}
312 {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
316 - name: '${slack_default_receiver}'
318 - api_url: 'https://hooks.slack.com/services/${slack_default_api_key}'
319 channel: '#${slack_default_channel}'
321 icon_url: https://avatars3.githubusercontent.com/u/3380462
323 [{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }}
324 {{- if gt (len .CommonLabels) (len .GroupLabels) -}}
326 {{- with .CommonLabels.Remove .GroupLabels.Names }}
327 {{- range $index, $label := .SortedPairs -}}
328 {{ if $index }}, {{ end }}
329 {{- $label.Name }}="{{ $label.Value -}}"
336 *Alert:* {{ .Annotations.summary }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }}
338 *Description:* {{ .Annotations.description }}
341 {{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}`
347 # The service stanza instructs Nomad to register a service with Consul.
349 # https://www.nomadproject.io/docs/job-specification/service
352 name = "${service_name}"
353 port = "${service_name}"
354 tags = [ "${service_name}$${NOMAD_ALLOC_INDEX}" ]
356 name = "Alertmanager Check Live"
364 # The "resources" stanza describes the requirements a task needs to
365 # execute. Resource requirements include memory, network, cpu, and more.
366 # This ensures the task will execute on a machine that contains enough
369 # https://www.nomadproject.io/docs/job-specification/resources