2 # The "region" parameter specifies the region in which to execute the job.
3 # If omitted, this inherits the default region name of "global".
6 # The "datacenters" parameter specifies the list of datacenters which should
7 # be considered when placing this task. This must be provided.
8 datacenters = "${datacenters}"
10 # The "type" parameter controls the type of job, which impacts the scheduler's
11 # decision on placement. This configuration is optional and defaults to
12 # "service". For a full list of job types and their differences, please see
13 # the online documentation.
15 # For more information, please see the online documentation at:
17 # https://www.nomadproject.io/docs/jobspec/schedulers.html
22 # The "max_parallel" parameter specifies the maximum number of updates to
23 # perform in parallel. In this case, this specifies to update a single task
27 health_check = "checks"
29 # The "min_healthy_time" parameter specifies the minimum time the allocation
30 # must be in the healthy state before it is marked as healthy and unblocks
31 # further allocations from being updated.
32 min_healthy_time = "10s"
34 # The "healthy_deadline" parameter specifies the deadline in which the
35 # allocation must be marked as healthy after which the allocation is
36 # automatically transitioned to unhealthy. Transitioning to unhealthy will
37 # fail the deployment and potentially roll back the job if "auto_revert" is
39 healthy_deadline = "3m"
41 # The "progress_deadline" parameter specifies the deadline in which an
42 # allocation must be marked as healthy. The deadline begins when the first
43 # allocation for the deployment is created and is reset whenever an allocation
44 # as part of the deployment transitions to a healthy state. If no allocation
45 # transitions to the healthy state before the progress deadline, the
46 # deployment is marked as failed.
47 progress_deadline = "10m"
50 # The "canary" parameter specifies that changes to the job that would result
51 # in destructive updates should create the specified number of canaries
52 # without stopping any previous allocations. Once the operator determines the
53 # canaries are healthy, they can be promoted which unblocks a rolling update
54 # of the remaining allocations at a rate of "max_parallel".
56 # Further, setting "canary" equal to the count of the task group allows
57 # blue/green deployments. When the job is updated, a full set of the new
58 # version is deployed and upon promotion the old version is stopped.
61 # Specifies if the job should auto-promote to the canary version when all
62 # canaries become healthy during a deployment. Defaults to false which means
63 # canaries must be manually updated with the nomad deployment promote
67 # The "auto_revert" parameter specifies if the job should auto-revert to the
68 # last stable job on deployment failure. A job is marked as stable if all the
69 # allocations as part of its deployment were marked healthy.
74 # All groups in this job should be scheduled on different hosts.
76 operator = "distinct_hosts"
80 # The "group" stanza defines a series of tasks that should be co-located on
81 # the same Nomad client. Any task within a group will be placed on the same
84 # For more information and examples on the "group" stanza, please see
85 # the online documentation at:
87 # https://www.nomadproject.io/docs/job-specification/group.html
89 group "prod-group1-minio" {
90 # The "count" parameter specifies the number of the task groups that should
91 # be running under this group. This value must be non-negative and defaults
93 count = ${group_count}
95 # https://www.nomadproject.io/docs/job-specification/volume
96 %{ if use_host_volume }
97 volume "prod-volume1-minio" {
100 source = "${host_volume}"
104 # The "task" stanza creates an individual unit of work, such as a Docker
105 # container, web application, or batch processing.
107 # For more information and examples on the "task" stanza, please see
108 # the online documentation at:
110 # https://www.nomadproject.io/docs/job-specification/task.html
112 task "prod-task1-minio" {
113 # The "driver" parameter specifies the task driver that should be used to
117 %{ if use_host_volume }
119 volume = "prod-volume1-minio"
120 destination = "${data_dir}"
125 %{ if use_vault_provider }
127 policies = "${vault_kv_policy_name}"
131 # The "config" stanza specifies the driver configuration, which is passed
132 # directly to the driver to start the task. The details of configurations
133 # are specific to each driver, so please see specific driver
134 # documentation for more information.
137 dns_servers = [ "$${attr.unique.network.ip-address}" ]
138 network_mode = "host"
140 args = [ "${host}:${port}${data_dir}" ]
147 # The env stanza configures a list of environment variables to populate
148 # the task's environment before starting.
150 %{ if use_vault_provider }
151 {{ with secret "${vault_kv_path}" }}
152 MINIO_ACCESS_KEY = "{{ .Data.data.${vault_kv_field_access_key} }}"
153 MINIO_SECRET_KEY = "{{ .Data.data.${vault_kv_field_secret_key} }}"
156 MINIO_ACCESS_KEY = "${access_key}"
157 MINIO_SECRET_KEY = "${secret_key}"
162 # The service stanza instructs Nomad to register a service with Consul.
164 # For more information and examples on the "task" stanza, please see
165 # the online documentation at:
167 # https://www.nomadproject.io/docs/job-specification/service.html
170 name = "${service_name}"
172 tags = [ "storage$${NOMAD_ALLOC_INDEX}" ]
174 name = "Min.io Server HTTP Check Live"
179 path = "/minio/health/live"
184 name = "Min.io Server HTTP Check Ready"
189 path = "/minio/health/ready"
195 # The "resources" stanza describes the requirements a task needs to
196 # execute. Resource requirements include memory, network, cpu, and more.
197 # This ensures the task will execute on a machine that contains enough
200 # For more information and examples on the "resources" stanza, please see
201 # the online documentation at:
203 # https://www.nomadproject.io/docs/job-specification/resources.html
208 # The network stanza specifies the networking requirements for the task
209 # group, including the network mode and port allocations. When scheduling
210 # jobs in Nomad they are provisioned across your fleet of machines along
211 # with other jobs and services. Because you don't know in advance what host
212 # your job will be provisioned on, Nomad will provide your tasks with
213 # network configuration when they start up.
215 # For more information and examples on the "template" stanza, please see
216 # the online documentation at:
218 # https://www.nomadproject.io/docs/job-specification/network.html