From b2ff2bb435a765c588b5ae9f8461bbf9b5064651 Mon Sep 17 00:00:00 2001 From: pmikus Date: Tue, 1 Dec 2020 07:38:28 +0000 Subject: [PATCH] Terraform: Nomad resource definitions + storage - final until more ssd arive. + nginx - final + vpp_device - untested yet (restored from EdK setups) - to be rewritten Signed-off-by: pmikus Change-Id: Ib9499fc8cfb0d9f5c5d5bbd1ccd856ecc951ec2a --- resources/tools/terraform/1n_nmd/.gitignore | 4 + resources/tools/terraform/1n_nmd/main.tf | 40 ++++ .../terraform/1n_nmd/prod_storage/prod-nginx.nomad | 243 +++++++++++++++++++ .../1n_nmd/prod_storage/prod-storage.nomad | 256 +++++++++++++++++++++ .../terraform/1n_nmd/prod_storage/resources.tf | 9 + .../prod_vpp_device/prod_csit_shim_amd.nomad | 100 ++++++++ .../prod_vpp_device/prod_csit_shim_arm.nomad | 100 ++++++++ .../terraform/1n_nmd/prod_vpp_device/resources.tf | 9 + resources/tools/terraform/1n_nmd/variables.tf | 5 + .../lf_inventory/host_vars/10.30.51.28.yaml | 5 +- .../lf_inventory/host_vars/10.30.51.29.yaml | 5 +- .../lf_inventory/host_vars/10.30.51.30.yaml | 5 +- .../lf_inventory/host_vars/10.30.51.32.yaml | 5 +- .../lf_inventory/host_vars/10.30.51.33.yaml | 5 +- .../lf_inventory/host_vars/10.30.51.34.yaml | 5 +- .../lf_inventory/host_vars/10.30.51.35.yaml | 5 +- .../lf_inventory/host_vars/10.30.51.50.yaml | 5 +- .../lf_inventory/host_vars/10.30.51.51.yaml | 5 +- .../lf_inventory/host_vars/10.30.51.65.yaml | 5 +- .../lf_inventory/host_vars/10.30.51.66.yaml | 5 +- .../lf_inventory/host_vars/10.30.51.67.yaml | 5 +- .../lf_inventory/host_vars/10.30.51.68.yaml | 5 +- .../lf_inventory/host_vars/10.30.51.70.yaml | 5 +- .../lf_inventory/host_vars/10.30.51.71.yaml | 5 +- .../lf_inventory/host_vars/10.32.8.14.yaml | 6 +- .../lf_inventory/host_vars/10.32.8.15.yaml | 6 +- .../lf_inventory/host_vars/10.32.8.16.yaml | 6 +- 27 files changed, 787 insertions(+), 72 deletions(-) create mode 100644 resources/tools/terraform/1n_nmd/.gitignore create mode 100644 resources/tools/terraform/1n_nmd/main.tf create mode 100644 resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad create mode 100644 resources/tools/terraform/1n_nmd/prod_storage/prod-storage.nomad create mode 100644 resources/tools/terraform/1n_nmd/prod_storage/resources.tf create mode 100644 resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim_amd.nomad create mode 100644 resources/tools/terraform/1n_nmd/prod_vpp_device/prod_csit_shim_arm.nomad create mode 100644 resources/tools/terraform/1n_nmd/prod_vpp_device/resources.tf create mode 100644 resources/tools/terraform/1n_nmd/variables.tf diff --git a/resources/tools/terraform/1n_nmd/.gitignore b/resources/tools/terraform/1n_nmd/.gitignore new file mode 100644 index 0000000000..fc64f0039f --- /dev/null +++ b/resources/tools/terraform/1n_nmd/.gitignore @@ -0,0 +1,4 @@ +.terraform/ +.terraform.tfstate.lock.info +terraform.tfstate +terraform.tfstate.backup diff --git a/resources/tools/terraform/1n_nmd/main.tf b/resources/tools/terraform/1n_nmd/main.tf new file mode 100644 index 0000000000..330f647476 --- /dev/null +++ b/resources/tools/terraform/1n_nmd/main.tf @@ -0,0 +1,40 @@ +terraform { + # This module is now only being tested with Terraform 0.13.5+. + required_version = ">= 0.13.5" +} + +provider "nomad" { + address = var.nomad_provider_address + alias = "yul1" +} + +# For convenience in simple configurations, a child module automatically +# inherits default (un-aliased) provider configurations from its parent. +# This means that explicit provider blocks appear only in the root module, +# and downstream modules can simply declare resources for that provider +# and have them automatically associated with the root provider +# configurations. + +# prod_storage +# + prod-group1-nginx +# + prod-group1-storage +# + services +# + docs.nginx.service.consul +# + logs.nginx.service.consul +# + storage.nginx.service.consul +module "prod_storage" { + source = "./prod_storage" + providers = { + nomad = nomad.yul1 + } +} + +# prod_vpp_device +# + prod-csit-shim-amd +# + prod-csit-shim-arm +module "prod_vpp_device" { + source = "./prod_vpp_device" + providers = { + nomad = nomad.yul1 + } +} \ No newline at end of file diff --git a/resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad b/resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad new file mode 100644 index 0000000000..6c153ffd03 --- /dev/null +++ b/resources/tools/terraform/1n_nmd/prod_storage/prod-nginx.nomad @@ -0,0 +1,243 @@ +job "prod-nginx" { + # The "region" parameter specifies the region in which to execute the job. + # If omitted, this inherits the default region name of "global". + # region = "global" + # + # The "datacenters" parameter specifies the list of datacenters which should + # be considered when placing this task. This must be provided. + datacenters = [ "yul1" ] + + # The "type" parameter controls the type of job, which impacts the scheduler's + # decision on placement. This configuration is optional and defaults to + # "service". For a full list of job types and their differences, please see + # the online documentation. + # + # For more information, please see the online documentation at: + # + # https://www.nomadproject.io/docs/jobspec/schedulers.html + # + type = "service" + + update { + # The "max_parallel" parameter specifies the maximum number of updates to + # perform in parallel. In this case, this specifies to update a single task + # at a time. + max_parallel = 0 + + # The "min_healthy_time" parameter specifies the minimum time the allocation + # must be in the healthy state before it is marked as healthy and unblocks + # further allocations from being updated. + min_healthy_time = "10s" + + # The "healthy_deadline" parameter specifies the deadline in which the + # allocation must be marked as healthy after which the allocation is + # automatically transitioned to unhealthy. Transitioning to unhealthy will + # fail the deployment and potentially roll back the job if "auto_revert" is + # set to true. + healthy_deadline = "3m" + + # The "progress_deadline" parameter specifies the deadline in which an + # allocation must be marked as healthy. The deadline begins when the first + # allocation for the deployment is created and is reset whenever an allocation + # as part of the deployment transitions to a healthy state. If no allocation + # transitions to the healthy state before the progress deadline, the + # deployment is marked as failed. + progress_deadline = "10m" + + # The "auto_revert" parameter specifies if the job should auto-revert to the + # last stable job on deployment failure. A job is marked as stable if all the + # allocations as part of its deployment were marked healthy. + auto_revert = false + + # The "canary" parameter specifies that changes to the job that would result + # in destructive updates should create the specified number of canaries + # without stopping any previous allocations. Once the operator determines the + # canaries are healthy, they can be promoted which unblocks a rolling update + # of the remaining allocations at a rate of "max_parallel". + # + # Further, setting "canary" equal to the count of the task group allows + # blue/green deployments. When the job is updated, a full set of the new + # version is deployed and upon promotion the old version is stopped. + canary = 0 + } + + # The "group" stanza defines a series of tasks that should be co-located on + # the same Nomad client. Any task within a group will be placed on the same + # client. + # + # For more information and examples on the "group" stanza, please see + # the online documentation at: + # + # https://www.nomadproject.io/docs/job-specification/group.html + # + group "prod-group1-nginx" { + # The "count" parameter specifies the number of the task groups that should + # be running under this group. This value must be non-negative and defaults + # to 1. + count = 1 + + # All groups in this job should be scheduled on different hosts. + constraint { + operator = "distinct_hosts" + value = "false" + } + + # Prioritize one node. + affinity { + attribute = "${attr.unique.hostname}" + value = "s46-nomad" + weight = 100 + } + + # https://www.nomadproject.io/docs/job-specification/volume + volume "prod-volume1-storage" { + type = "host" + read_only = false + source = "prod-volume-data1-1" + } + + # The "task" stanza creates an individual unit of work, such as a Docker + # container, web application, or batch processing. + # + # For more information and examples on the "task" stanza, please see + # the online documentation at: + # + # https://www.nomadproject.io/docs/job-specification/task.html + # + task "prod-task1-nginx" { + # The "driver" parameter specifies the task driver that should be used to + # run the task. + driver = "docker" + + volume_mount { + volume = "prod-volume1-storage" + destination = "/data/" + read_only = true + } + + # The "config" stanza specifies the driver configuration, which is passed + # directly to the driver to start the task. The details of configurations + # are specific to each driver, so please see specific driver + # documentation for more information. + config { + image = "nginx:stable" + dns_servers = [ "${attr.unique.network.ip-address}" ] + port_map { + https = 443 + } + privileged = false + volumes = [ + "/etc/consul.d/ssl/consul.pem:/etc/ssl/certs/nginx-cert.pem", + "/etc/consul.d/ssl/consul-key.pem:/etc/ssl/private/nginx-key.pem", + "custom/logs.conf:/etc/nginx/conf.d/logs.conf", + "custom/docs.conf:/etc/nginx/conf.d/docs.conf" + ] + } + + # The "template" stanza instructs Nomad to manage a template, such as + # a configuration file or script. This template can optionally pull data + # from Consul or Vault to populate runtime configuration data. + # + # For more information and examples on the "template" stanza, please see + # the online documentation at: + # + # https://www.nomadproject.io/docs/job-specification/template.html + # + template { + data = <