d14603b77197c729c45020b9b8ca0039ca7dc900
[csit.git] / terraform-ci-infra / 1n_nmd / main.tf
1 <<<<<<< HEAD   (dae934 Infra: Remove Consul TLS on clients (Nomad conflict))
2 =======
3 # For convenience in simple configurations, a child module automatically
4 # inherits default (un-aliased) provider configurations from its parent.
5 # This means that explicit provider blocks appear only in the root module,
6 # and downstream modules can simply declare resources for that provider
7 # and have them automatically associated with the root provider
8 # configurations.
9 module "alertmanager" {
10   source                         = "./alertmanager"
11   providers                      = {
12     nomad = nomad.yul1
13   }
14
15   # nomad
16   nomad_datacenters              = [ "yul1" ]
17
18   # alertmanager
19   alertmanager_job_name          = "prod-alertmanager"
20   alertmanager_use_canary        = true
21   alertmanager_group_count       = 1
22   alertmanager_vault_secret      = {
23     use_vault_provider           = false,
24     vault_kv_policy_name         = "kv-secret",
25     vault_kv_path                = "secret/data/prometheus",
26     vault_kv_field_access_key    = "access_key",
27     vault_kv_field_secret_key    = "secret_key"
28   }
29   alertmanager_version           = "0.21.0"
30   alertmanager_cpu               = 1000
31   alertmanager_mem               = 1024
32   alertmanager_port              = 9093
33   alertmanager_slack_api_key     = "TE07RD1V1/B01L7PQK9S8/xncEcMAvF0GtJpTbC30E0AyL"
34   alertmanager_slack_channel     = "fdio-infra-monitoring"
35 }
36
37 module "exporter" {
38   source                         = "./exporter"
39   providers                      = {
40     nomad = nomad.yul1
41   }
42
43   # nomad
44   nomad_datacenters              = [ "yul1" ]
45
46   # exporter
47   exporter_job_name              = "prod-exporter"
48   exporter_use_canary            = false
49
50   # node
51   node_version                   = "1.0.1"
52   node_port                      = 9100
53
54   # blackbox
55   blackbox_version               = "0.18.0"
56   blackbox_port                  = 9115
57
58   # cadvisor
59   cadvisor_image                 = "gcr.io/cadvisor/cadvisor:latest"
60   cadvisor_port                  = 8080
61 }
62
63 module "grafana" {
64   source                         = "./grafana"
65   providers                      = {
66     nomad = nomad.yul1
67   }
68
69   # nomad
70   nomad_datacenters              = [ "yul1" ]
71
72   # grafana
73   grafana_job_name               = "prod-grafana"
74   grafana_use_canary             = true
75   grafana_group_count            = 1
76   grafana_vault_secret           = {
77     use_vault_provider           = false,
78     vault_kv_policy_name         = "kv-secret",
79     vault_kv_path                = "secret/data/grafana",
80     vault_kv_field_access_key    = "access_key",
81     vault_kv_field_secret_key    = "secret_key"
82   }
83   grafana_container_image        = "grafana/grafana:7.3.7"
84   grafana_cpu                    = 1000
85   grafana_mem                    = 2048
86   grafana_port                   = 3000
87 }
88
89 module "minio" {
90   source                         = "./minio"
91   providers                      = {
92     nomad = nomad.yul1
93   }
94
95   # nomad
96   nomad_datacenters              = [ "yul1" ]
97   nomad_host_volume              = "prod-volume-data1-1"
98
99   # minio
100   minio_job_name                 = "prod-minio"
101   minio_group_count              = 4
102   minio_service_name             = "storage"
103   minio_host                     = "http://10.32.8.1{4...7}"
104   minio_port                     = 9000
105   minio_container_image          = "minio/minio:RELEASE.2020-12-03T05-49-24Z"
106   minio_vault_secret             = {
107     use_vault_provider           = false,
108     vault_kv_policy_name         = "kv-secret",
109     vault_kv_path                = "secret/data/minio",
110     vault_kv_field_access_key    = "access_key",
111     vault_kv_field_secret_key    = "secret_key"
112   }
113   minio_data_dir                 = "/data/"
114   minio_use_host_volume          = true
115   minio_use_canary               = true
116   minio_envs                     = [ "MINIO_BROWSER=\"off\"" ]
117
118   # minio client
119   mc_job_name                    = "prod-mc"
120   mc_container_image             = "minio/mc:RELEASE.2020-12-10T01-26-17Z"
121   mc_extra_commands              = [
122     "mc policy set public LOCALMINIO/logs.fd.io",
123     "mc policy set public LOCALMINIO/docs.fd.io",
124     "mc ilm add --expiry-days '180' LOCALMINIO/logs.fd.io",
125     "mc admin user add LOCALMINIO storage Storage1234",
126     "mc admin policy set LOCALMINIO writeonly user=storage"
127   ]
128   minio_buckets                  = [ "logs.fd.io", "docs.fd.io" ]
129 }
130
131 module "nginx" {
132   source                         = "./nginx"
133   providers                      = {
134     nomad = nomad.yul1
135   }
136
137   # nomad
138   nomad_datacenters              = [ "yul1" ]
139
140   # nginx
141   nginx_job_name                 = "prod-nginx"
142 }
143
144 module "prometheus" {
145   source                         = "./prometheus"
146   providers                      = {
147     nomad = nomad.yul1
148   }
149
150   # nomad
151   nomad_datacenters              = [ "yul1" ]
152   nomad_host_volume              = "prod-volume-data1-1"
153
154   # prometheus
155   prometheus_job_name            = "prod-prometheus"
156   prometheus_use_canary          = true
157   prometheus_group_count         = 4
158   prometheus_vault_secret        = {
159     use_vault_provider           = false,
160     vault_kv_policy_name         = "kv-secret",
161     vault_kv_path                = "secret/data/prometheus",
162     vault_kv_field_access_key    = "access_key",
163     vault_kv_field_secret_key    = "secret_key"
164   }
165   prometheus_data_dir            = "/data/"
166   prometheus_use_host_volume     = true
167   prometheus_version             = "2.24.0"
168   prometheus_cpu                 = 2000
169   prometheus_mem                 = 8192
170   prometheus_port                = 9090
171 }
172
173 module "vpp_device" {
174   source                         = "./vpp_device"
175   providers                      = {
176     nomad = nomad.yul1
177   }
178
179   # nomad
180   nomad_datacenters              = [ "yul1" ]
181
182   # csit_shim
183   csit_shim_job_name             = "prod-device-csit-shim"
184   csit_shim_group_count          = "1"
185   csit_shim_cpu                  = "1000"
186   csit_shim_mem                  = "5000"
187 }
188 >>>>>>> CHANGE (a44eef Infra: Monitoring capability)