2 # The "region" parameter specifies the region in which to execute the job.
3 # If omitted, this inherits the default region name of "global".
6 # The "datacenters" parameter specifies the list of datacenters which should
7 # be considered when placing this task. This must be provided.
8 datacenters = "${datacenters}"
10 # The "type" parameter controls the type of job, which impacts the scheduler's
11 # decision on placement. This configuration is optional and defaults to
12 # "service". For a full list of job types and their differences, please see
13 # the online documentation.
15 # https://www.nomadproject.io/docs/jobspec/schedulers
20 # The "max_parallel" parameter specifies the maximum number of updates to
21 # perform in parallel. In this case, this specifies to update a single task
23 max_parallel = ${max_parallel}
25 health_check = "checks"
27 # The "min_healthy_time" parameter specifies the minimum time the allocation
28 # must be in the healthy state before it is marked as healthy and unblocks
29 # further allocations from being updated.
30 min_healthy_time = "10s"
32 # The "healthy_deadline" parameter specifies the deadline in which the
33 # allocation must be marked as healthy after which the allocation is
34 # automatically transitioned to unhealthy. Transitioning to unhealthy will
35 # fail the deployment and potentially roll back the job if "auto_revert" is
37 healthy_deadline = "3m"
39 # The "progress_deadline" parameter specifies the deadline in which an
40 # allocation must be marked as healthy. The deadline begins when the first
41 # allocation for the deployment is created and is reset whenever an allocation
42 # as part of the deployment transitions to a healthy state. If no allocation
43 # transitions to the healthy state before the progress deadline, the
44 # deployment is marked as failed.
45 progress_deadline = "10m"
48 # The "canary" parameter specifies that changes to the job that would result
49 # in destructive updates should create the specified number of canaries
50 # without stopping any previous allocations. Once the operator determines the
51 # canaries are healthy, they can be promoted which unblocks a rolling update
52 # of the remaining allocations at a rate of "max_parallel".
54 # Further, setting "canary" equal to the count of the task group allows
55 # blue/green deployments. When the job is updated, a full set of the new
56 # version is deployed and upon promotion the old version is stopped.
59 # Specifies if the job should auto-promote to the canary version when all
60 # canaries become healthy during a deployment. Defaults to false which means
61 # canaries must be manually updated with the nomad deployment promote
63 auto_promote = ${auto_promote}
65 # The "auto_revert" parameter specifies if the job should auto-revert to the
66 # last stable job on deployment failure. A job is marked as stable if all the
67 # allocations as part of its deployment were marked healthy.
68 auto_revert = ${auto_revert}
72 # The "group" stanza defines a series of tasks that should be co-located on
73 # the same Nomad client. Any task within a group will be placed on the same
76 # https://www.nomadproject.io/docs/job-specification/group
78 group "${job_name}-group-1" {
79 # The "count" parameter specifies the number of the task groups that should
80 # be running under this group. This value must be non-negative and defaults
82 count = ${group_count}
84 # The volume stanza allows the group to specify that it requires a given
85 # volume from the cluster. The key of the stanza is the name of the volume
86 # as it will be exposed to task configuration.
88 # https://www.nomadproject.io/docs/job-specification/volume
89 %{ if use_host_volume }
90 volume "${job_name}-volume-1" {
93 source = "${volume_source}"
97 # The restart stanza configures a tasks's behavior on task failure. Restarts
98 # happen on the client that is running the task.
100 # https://www.nomadproject.io/docs/job-specification/restart
109 # The constraint allows restricting the set of eligible nodes. Constraints
110 # may filter on attributes or client metadata.
112 # https://www.nomadproject.io/docs/job-specification/constraint
115 attribute = "$${attr.cpu.arch}"
121 attribute = "$${node.class}"
125 # The network stanza specifies the networking requirements for the task
126 # group, including the network mode and port allocations. When scheduling
127 # jobs in Nomad they are provisioned across your fleet of machines along
128 # with other jobs and services. Because you don't know in advance what host
129 # your job will be provisioned on, Nomad will provide your tasks with
130 # network configuration when they start up.
132 # https://www.nomadproject.io/docs/job-specification/network
135 port "${service_name}" {
141 # The "task" stanza creates an individual unit of work, such as a Docker
142 # container, web application, or batch processing.
144 # https://www.nomadproject.io/docs/job-specification/task
146 task "${job_name}-task-1" {
147 # The "driver" parameter specifies the task driver that should be used to
151 %{ if use_host_volume }
153 volume = "${job_name}-volume-1"
154 destination = "${volume_destination}"
159 %{ if use_vault_provider }
161 policies = "${vault_kv_policy_name}"
165 # The "config" stanza specifies the driver configuration, which is passed
166 # directly to the driver to start the task. The details of configurations
167 # are specific to each driver, so please see specific driver
168 # documentation for more information.
170 command = "local/loki-linux-amd64"
173 # The artifact stanza instructs Nomad to fetch and unpack a remote resource,
174 # such as a file, tarball, or binary. Nomad downloads artifacts using the
175 # popular go-getter library, which permits downloading artifacts from a
176 # variety of locations using a URL as the input source.
178 # https://www.nomadproject.io/docs/job-specification/artifact
183 "-config.file secrets/config.yml"
189 change_signal = "SIGINT"
190 destination = "secrets/loki.yml"
196 http_listen_port: 3100
197 http_listen_address: 127.0.0.1
203 object_store: filesystem
211 directory: /tmp/loki/index
214 directory: /tmp/loki/chunks
218 endpoint: http://storage.service.consul:9000
219 access_key_id: storage
220 secret_access_key: Storage1234
222 sse_encryption: false
224 idle_conn_timeout: 90s
225 response_header_timeout: 0s
226 insecure_skip_verify: false
227 s3forcepathstyle: true
231 # The service stanza instructs Nomad to register a service with Consul.
233 # https://www.nomadproject.io/docs/job-specification/service
236 name = "${service_name}"
237 port = "${service_name}"
238 tags = [ "${service_name}$${NOMAD_ALLOC_INDEX}" ]
240 name = "Loki Check Live"
248 # The "resources" stanza describes the requirements a task needs to
249 # execute. Resource requirements include memory, network, cpu, and more.
250 # This ensures the task will execute on a machine that contains enough
253 # https://www.nomadproject.io/docs/job-specification/resources