Infra: Move probes under ansible instead of terraform
[csit.git] / terraform-ci-infra / 1n_nmd / main.tf
1 # For convenience in simple configurations, a child module automatically
2 # inherits default (un-aliased) provider configurations from its parent.
3 # This means that explicit provider blocks appear only in the root module,
4 # and downstream modules can simply declare resources for that provider
5 # and have them automatically associated with the root provider
6 # configurations.
7 module "alertmanager" {
8   source                         = "./alertmanager"
9   providers                      = {
10     nomad = nomad.yul1
11   }
12
13   # nomad
14   nomad_datacenters              = [ "yul1" ]
15
16   # alertmanager
17   alertmanager_job_name          = "prod-alertmanager"
18   alertmanager_use_canary        = true
19   alertmanager_group_count       = 1
20   alertmanager_vault_secret      = {
21     use_vault_provider           = false,
22     vault_kv_policy_name         = "kv-secret",
23     vault_kv_path                = "secret/data/prometheus",
24     vault_kv_field_access_key    = "access_key",
25     vault_kv_field_secret_key    = "secret_key"
26   }
27   alertmanager_version           = "0.21.0"
28   alertmanager_cpu               = 1000
29   alertmanager_mem               = 1024
30   alertmanager_port              = 9093
31   alertmanager_slack_api_key     = "TE07RD1V1/B01L7PQK9S8/pbADGhhhj60JSxHRi3K0NoW6"
32   alertmanager_slack_channel     = "fdio-infra-monitoring"
33 }
34
35 module "grafana" {
36   source                         = "./grafana"
37   providers                      = {
38     nomad = nomad.yul1
39   }
40
41   # nomad
42   nomad_datacenters              = [ "yul1" ]
43
44   # grafana
45   grafana_job_name               = "prod-grafana"
46   grafana_use_canary             = true
47   grafana_group_count            = 1
48   grafana_vault_secret           = {
49     use_vault_provider           = false,
50     vault_kv_policy_name         = "kv-secret",
51     vault_kv_path                = "secret/data/grafana",
52     vault_kv_field_access_key    = "access_key",
53     vault_kv_field_secret_key    = "secret_key"
54   }
55   grafana_container_image        = "grafana/grafana:7.3.7"
56   grafana_cpu                    = 1000
57   grafana_mem                    = 2048
58   grafana_port                   = 3000
59 }
60
61 module "minio" {
62   source                         = "./minio"
63   providers                      = {
64     nomad = nomad.yul1
65   }
66
67   # nomad
68   nomad_datacenters              = [ "yul1" ]
69   nomad_host_volume              = "prod-volume-data1-1"
70
71   # minio
72   minio_job_name                 = "prod-minio"
73   minio_group_count              = 4
74   minio_service_name             = "storage"
75   minio_host                     = "http://10.32.8.1{4...7}"
76   minio_port                     = 9000
77   minio_container_image          = "minio/minio:RELEASE.2020-12-03T05-49-24Z"
78   minio_vault_secret             = {
79     use_vault_provider           = false,
80     vault_kv_policy_name         = "kv-secret",
81     vault_kv_path                = "secret/data/minio",
82     vault_kv_field_access_key    = "access_key",
83     vault_kv_field_secret_key    = "secret_key"
84   }
85   minio_data_dir                 = "/data/"
86   minio_use_host_volume          = true
87   minio_use_canary               = true
88   minio_envs                     = [ "MINIO_BROWSER=\"off\"" ]
89
90   # minio client
91   mc_job_name                    = "prod-mc"
92   mc_container_image             = "minio/mc:RELEASE.2020-12-10T01-26-17Z"
93   mc_extra_commands              = [
94     "mc policy set public LOCALMINIO/logs.fd.io",
95     "mc policy set public LOCALMINIO/docs.fd.io",
96     "mc ilm add --expiry-days '180' LOCALMINIO/logs.fd.io",
97     "mc admin user add LOCALMINIO storage Storage1234",
98     "mc admin policy set LOCALMINIO writeonly user=storage"
99   ]
100   minio_buckets                  = [ "logs.fd.io", "docs.fd.io" ]
101 }
102
103 module "nginx" {
104   source                         = "./nginx"
105   providers                      = {
106     nomad = nomad.yul1
107   }
108
109   # nomad
110   nomad_datacenters              = [ "yul1" ]
111
112   # nginx
113   nginx_job_name                 = "prod-nginx"
114 }
115
116 module "prometheus" {
117   source                         = "./prometheus"
118   providers                      = {
119     nomad = nomad.yul1
120   }
121
122   # nomad
123   nomad_datacenters              = [ "yul1" ]
124   nomad_host_volume              = "prod-volume-data1-1"
125
126   # prometheus
127   prometheus_job_name            = "prod-prometheus"
128   prometheus_use_canary          = true
129   prometheus_group_count         = 4
130   prometheus_vault_secret        = {
131     use_vault_provider           = false,
132     vault_kv_policy_name         = "kv-secret",
133     vault_kv_path                = "secret/data/prometheus",
134     vault_kv_field_access_key    = "access_key",
135     vault_kv_field_secret_key    = "secret_key"
136   }
137   prometheus_data_dir            = "/data/"
138   prometheus_use_host_volume     = true
139   prometheus_version             = "2.24.0"
140   prometheus_cpu                 = 2000
141   prometheus_mem                 = 8192
142   prometheus_port                = 9090
143 }
144
145 module "vpp_device" {
146   source                         = "./vpp_device"
147   providers                      = {
148     nomad = nomad.yul1
149   }
150
151   # nomad
152   nomad_datacenters              = [ "yul1" ]
153
154   # csit_shim
155   csit_shim_job_name             = "prod-device-csit-shim"
156   csit_shim_group_count          = "1"
157   csit_shim_cpu                  = "1000"
158   csit_shim_mem                  = "5000"
159 }