2 # The "region" parameter specifies the region in which to execute the job.
3 # If omitted, this inherits the default region name of "global".
6 # The "datacenters" parameter specifies the list of datacenters which should
7 # be considered when placing this task. This must be provided.
8 datacenters = [ "yul1" ]
10 # The "type" parameter controls the type of job, which impacts the scheduler's
11 # decision on placement. This configuration is optional and defaults to
12 # "service". For a full list of job types and their differences, please see
13 # the online documentation.
15 # For more information, please see the online documentation at:
17 # https://www.nomadproject.io/docs/jobspec/schedulers.html
22 # The "max_parallel" parameter specifies the maximum number of updates to
23 # perform in parallel. In this case, this specifies to update a single task
27 # The "min_healthy_time" parameter specifies the minimum time the allocation
28 # must be in the healthy state before it is marked as healthy and unblocks
29 # further allocations from being updated.
30 min_healthy_time = "10s"
32 # The "healthy_deadline" parameter specifies the deadline in which the
33 # allocation must be marked as healthy after which the allocation is
34 # automatically transitioned to unhealthy. Transitioning to unhealthy will
35 # fail the deployment and potentially roll back the job if "auto_revert" is
37 healthy_deadline = "3m"
39 # The "progress_deadline" parameter specifies the deadline in which an
40 # allocation must be marked as healthy. The deadline begins when the first
41 # allocation for the deployment is created and is reset whenever an allocation
42 # as part of the deployment transitions to a healthy state. If no allocation
43 # transitions to the healthy state before the progress deadline, the
44 # deployment is marked as failed.
45 progress_deadline = "10m"
47 # The "auto_revert" parameter specifies if the job should auto-revert to the
48 # last stable job on deployment failure. A job is marked as stable if all the
49 # allocations as part of its deployment were marked healthy.
52 # The "canary" parameter specifies that changes to the job that would result
53 # in destructive updates should create the specified number of canaries
54 # without stopping any previous allocations. Once the operator determines the
55 # canaries are healthy, they can be promoted which unblocks a rolling update
56 # of the remaining allocations at a rate of "max_parallel".
58 # Further, setting "canary" equal to the count of the task group allows
59 # blue/green deployments. When the job is updated, a full set of the new
60 # version is deployed and upon promotion the old version is stopped.
64 # The reschedule stanza specifies the group's rescheduling strategy. If
65 # specified at the job level, the configuration will apply to all groups
66 # within the job. If the reschedule stanza is present on both the job and the
67 # group, they are merged with the group stanza taking the highest precedence
71 delay_function = "constant"
76 # The "group" stanza defines a series of tasks that should be co-located on
77 # the same Nomad client. Any task within a group will be placed on the same
80 # For more information and examples on the "group" stanza, please see
81 # the online documentation at:
83 # https://www.nomadproject.io/docs/job-specification/group.html
85 group "prod-group1-nginx" {
86 # The "count" parameter specifies the number of the task groups that should
87 # be running under this group. This value must be non-negative and defaults
91 # The restart stanza configures a tasks's behavior on task failure. Restarts
92 # happen on the client that is running the task.
100 # All groups in this job should be scheduled on different hosts.
102 operator = "distinct_hosts"
106 # Prioritize one node.
108 attribute = "${attr.unique.hostname}"
113 # The volume stanza allows the group to specify that it requires a given
114 # volume from the cluster.
116 # For more information and examples on the "volume" stanza, please see
117 # the online documentation at:
119 # https://www.nomadproject.io/docs/job-specification/volume
120 volume "prod-volume1-storage" {
123 source = "prod-volume-data1-1"
126 # The "task" stanza creates an individual unit of work, such as a Docker
127 # container, web application, or batch processing.
129 # For more information and examples on the "task" stanza, please see
130 # the online documentation at:
132 # https://www.nomadproject.io/docs/job-specification/task.html
134 task "prod-task1-nginx" {
135 # The "driver" parameter specifies the task driver that should be used to
140 volume = "prod-volume1-storage"
141 destination = "/data/"
145 # The "config" stanza specifies the driver configuration, which is passed
146 # directly to the driver to start the task. The details of configurations
147 # are specific to each driver, so please see specific driver
148 # documentation for more information.
150 image = "nginx:stable"
151 dns_servers = [ "${attr.unique.network.ip-address}" ]
157 "/etc/consul.d/ssl/consul.pem:/etc/ssl/certs/nginx-cert.pem",
158 "/etc/consul.d/ssl/consul-key.pem:/etc/ssl/private/nginx-key.pem",
159 "custom/logs.conf:/etc/nginx/conf.d/logs.conf",
160 "custom/docs.conf:/etc/nginx/conf.d/docs.conf"
164 # The "template" stanza instructs Nomad to manage a template, such as
165 # a configuration file or script. This template can optionally pull data
166 # from Consul or Vault to populate runtime configuration data.
168 # For more information and examples on the "template" stanza, please see
169 # the online documentation at:
171 # https://www.nomadproject.io/docs/job-specification/template.html
176 listen 443 ssl default_server;
177 server_name logs.nginx.service.consul;
178 keepalive_timeout 70;
179 ssl_session_cache shared:SSL:10m;
180 ssl_session_timeout 10m;
181 ssl_protocols TLSv1.2;
182 ssl_prefer_server_ciphers on;
183 ssl_ciphers "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384";
184 ssl_certificate /etc/ssl/certs/nginx-cert.pem;
185 ssl_certificate_key /etc/ssl/private/nginx-key.pem;
187 root /data/logs.fd.io;
190 autoindex_exact_size on;
191 autoindex_format html;
192 autoindex_localtime off;
194 location ~ \.(html.gz)$ {
195 root /data/logs.fd.io;
196 add_header Content-Encoding gzip;
197 add_header Content-Type text/html;
199 location ~ \.(txt.gz|log.gz)$ {
200 root /data/logs.fd.io;
201 add_header Content-Encoding gzip;
202 add_header Content-Type text/plain;
204 location ~ \.(xml.gz)$ {
205 root /data/logs.fd.io;
206 add_header Content-Encoding gzip;
207 add_header Content-Type application/xml;
211 destination = "custom/logs.conf"
217 server_name docs.nginx.service.consul;
218 keepalive_timeout 70;
219 ssl_session_cache shared:SSL:10m;
220 ssl_session_timeout 10m;
221 ssl_protocols TLSv1.2;
222 ssl_prefer_server_ciphers on;
223 ssl_ciphers "ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384";
224 ssl_certificate /etc/ssl/certs/nginx-cert.pem;
225 ssl_certificate_key /etc/ssl/private/nginx-key.pem;
227 root /data/docs.fd.io;
228 index index.html index.htm;
232 destination = "custom/docs.conf"
235 # The service stanza instructs Nomad to register a service with Consul.
237 # For more information and examples on the "task" stanza, please see
238 # the online documentation at:
240 # https://www.nomadproject.io/docs/job-specification/service.html
245 tags = [ "docs", "logs" ]
248 # The "resources" stanza describes the requirements a task needs to
249 # execute. Resource requirements include memory, network, cpu, and more.
250 # This ensures the task will execute on a machine that contains enough
253 # For more information and examples on the "resources" stanza, please see
254 # the online documentation at:
256 # https://www.nomadproject.io/docs/job-specification/resources.html