Fix: Slack key workaround
[csit.git] / terraform-ci-infra / 1n_nmd / main.tf
1 # For convenience in simple configurations, a child module automatically
2 # inherits default (un-aliased) provider configurations from its parent.
3 # This means that explicit provider blocks appear only in the root module,
4 # and downstream modules can simply declare resources for that provider
5 # and have them automatically associated with the root provider
6 # configurations.
7 module "alertmanager" {
8   source                         = "./alertmanager"
9   providers                      = {
10     nomad = nomad.yul1
11   }
12
13   # nomad
14   nomad_datacenters              = [ "yul1" ]
15
16   # alertmanager
17   alertmanager_job_name          = "prod-alertmanager"
18   alertmanager_use_canary        = true
19   alertmanager_group_count       = 1
20   alertmanager_vault_secret      = {
21     use_vault_provider           = false,
22     vault_kv_policy_name         = "kv-secret",
23     vault_kv_path                = "secret/data/prometheus",
24     vault_kv_field_access_key    = "access_key",
25     vault_kv_field_secret_key    = "secret_key"
26   }
27   alertmanager_version           = "0.21.0"
28   alertmanager_cpu               = 1000
29   alertmanager_mem               = 1024
30   alertmanager_port              = 9093
31   alertmanager_slack_api_key     = "TE07RD1V1/B01L7PQK9S8/xncEcMAvF0GtJpTbC30E0AyL"
32   alertmanager_slack_channel     = "fdio-infra-monitoring"
33 }
34
35 module "exporter" {
36   source                         = "./exporter"
37   providers                      = {
38     nomad = nomad.yul1
39   }
40
41   # nomad
42   nomad_datacenters              = [ "yul1" ]
43
44   # exporter
45   exporter_job_name              = "prod-exporter"
46   exporter_use_canary            = false
47
48   # node
49   node_version                   = "1.0.1"
50   node_port                      = 9100
51
52   # blackbox
53   blackbox_version               = "0.18.0"
54   blackbox_port                  = 9115
55
56   # cadvisor
57   cadvisor_image                 = "gcr.io/cadvisor/cadvisor:latest"
58   cadvisor_port                  = 8080
59 }
60
61 module "grafana" {
62   source                         = "./grafana"
63   providers                      = {
64     nomad = nomad.yul1
65   }
66
67   # nomad
68   nomad_datacenters              = [ "yul1" ]
69
70   # grafana
71   grafana_job_name               = "prod-grafana"
72   grafana_use_canary             = true
73   grafana_group_count            = 1
74   grafana_vault_secret           = {
75     use_vault_provider           = false,
76     vault_kv_policy_name         = "kv-secret",
77     vault_kv_path                = "secret/data/grafana",
78     vault_kv_field_access_key    = "access_key",
79     vault_kv_field_secret_key    = "secret_key"
80   }
81   grafana_container_image        = "grafana/grafana:7.3.7"
82   grafana_cpu                    = 1000
83   grafana_mem                    = 2048
84   grafana_port                   = 3000
85 }
86
87 module "minio" {
88   source                         = "./minio"
89   providers                      = {
90     nomad = nomad.yul1
91   }
92
93   # nomad
94   nomad_datacenters              = [ "yul1" ]
95   nomad_host_volume              = "prod-volume-data1-1"
96
97   # minio
98   minio_job_name                 = "prod-minio"
99   minio_group_count              = 4
100   minio_service_name             = "storage"
101   minio_host                     = "http://10.32.8.1{4...7}"
102   minio_port                     = 9000
103   minio_container_image          = "minio/minio:RELEASE.2020-12-03T05-49-24Z"
104   minio_vault_secret             = {
105     use_vault_provider           = false,
106     vault_kv_policy_name         = "kv-secret",
107     vault_kv_path                = "secret/data/minio",
108     vault_kv_field_access_key    = "access_key",
109     vault_kv_field_secret_key    = "secret_key"
110   }
111   minio_data_dir                 = "/data/"
112   minio_use_host_volume          = true
113   minio_use_canary               = true
114   minio_envs                     = [ "MINIO_BROWSER=\"off\"" ]
115
116   # minio client
117   mc_job_name                    = "prod-mc"
118   mc_container_image             = "minio/mc:RELEASE.2020-12-10T01-26-17Z"
119   mc_extra_commands              = [
120     "mc policy set public LOCALMINIO/logs.fd.io",
121     "mc policy set public LOCALMINIO/docs.fd.io",
122     "mc ilm add --expiry-days '180' LOCALMINIO/logs.fd.io",
123     "mc admin user add LOCALMINIO storage Storage1234",
124     "mc admin policy set LOCALMINIO writeonly user=storage"
125   ]
126   minio_buckets                  = [ "logs.fd.io", "docs.fd.io" ]
127 }
128
129 module "nginx" {
130   source                         = "./nginx"
131   providers                      = {
132     nomad = nomad.yul1
133   }
134
135   # nomad
136   nomad_datacenters              = [ "yul1" ]
137
138   # nginx
139   nginx_job_name                 = "prod-nginx"
140 }
141
142 module "prometheus" {
143   source                         = "./prometheus"
144   providers                      = {
145     nomad = nomad.yul1
146   }
147
148   # nomad
149   nomad_datacenters              = [ "yul1" ]
150   nomad_host_volume              = "prod-volume-data1-1"
151
152   # prometheus
153   prometheus_job_name            = "prod-prometheus"
154   prometheus_use_canary          = true
155   prometheus_group_count         = 4
156   prometheus_vault_secret        = {
157     use_vault_provider           = false,
158     vault_kv_policy_name         = "kv-secret",
159     vault_kv_path                = "secret/data/prometheus",
160     vault_kv_field_access_key    = "access_key",
161     vault_kv_field_secret_key    = "secret_key"
162   }
163   prometheus_data_dir            = "/data/"
164   prometheus_use_host_volume     = true
165   prometheus_version             = "2.24.0"
166   prometheus_cpu                 = 2000
167   prometheus_mem                 = 8192
168   prometheus_port                = 9090
169 }
170
171 module "vpp_device" {
172   source                         = "./vpp_device"
173   providers                      = {
174     nomad = nomad.yul1
175   }
176
177   # nomad
178   nomad_datacenters              = [ "yul1" ]
179
180   # csit_shim
181   csit_shim_job_name             = "prod-device-csit-shim"
182   csit_shim_group_count          = "1"
183   csit_shim_cpu                  = "1000"
184   csit_shim_mem                  = "5000"
185 }