--- /dev/null
+locals {
+ datacenters = join(",", var.datacenters)
+}
+
+resource "nomad_job" "gha-dispatcher" {
+ for_each = toset(var.namespace)
+ jobspec = templatefile(
+ "${path.cwd}/nomad-${var.job_name}-${each.key}.hcl.tftpl",
+ {
+ cpu = var.cpu,
+ datacenters = local.datacenters,
+ image = "${var.image}-${each.key}:latest",
+ job_name = "${var.job_name}-${each.key}",
+ memory = var.memory,
+ namespace = each.key,
+ node_pool = var.node_pool,
+ region = var.region,
+ type = var.type
+ })
+ detach = false
+}
\ No newline at end of file
--- /dev/null
+job "${job_name}" {
+ datacenters = ["${datacenters}"]
+ type = "${type}"
+ node_pool = "${node_pool}"
+ region = "${region}"
+ namespace = "${namespace}"
+
+ group "${job_name}" {
+ count = 1
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ value = "amd64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ ephemeral_disk {
+ migrate = false
+ size = 3000
+ sticky = false
+ }
+ task "${job_name}" {
+ driver = "docker"
+ config {
+ image = "${image}"
+ }
+ template {
+ destination = "$${NOMAD_SECRETS_DIR}/.env"
+ env = true
+ data = <<EOT
+{{- with nomadVar "nomad/jobs" -}}
+{{- range $k, $v := . }}
+{{ $k }}={{ $v }}
+{{- end }}
+{{- end }}
+EOT
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+job "${job_name}" {
+ datacenters = ["${datacenters}"]
+ type = "${type}"
+ node_pool = "${node_pool}"
+ region = "${region}"
+ namespace = "${namespace}"
+
+ group "${job_name}" {
+ count = 1
+ constraint {
+ attribute = "$${attr.cpu.arch}"
+ value = "amd64"
+ }
+ constraint {
+ attribute = "$${node.class}"
+ value = "builder"
+ }
+ ephemeral_disk {
+ migrate = false
+ size = 3000
+ sticky = false
+ }
+ task "${job_name}" {
+ driver = "docker"
+ config {
+ image = "${image}"
+ }
+ template {
+ destination = "$${NOMAD_SECRETS_DIR}/.env"
+ env = true
+ data = <<EOT
+{{- with nomadVar "nomad/jobs" -}}
+{{- range $k, $v := . }}
+{{ $k }}={{ $v }}
+{{- end }}
+{{- end }}
+EOT
+ }
+ resources {
+ cpu = ${cpu}
+ memory = ${memory}
+ }
+ }
+ }
+}
\ No newline at end of file
--- /dev/null
+provider "nomad" {
+ address = "http://10.30.51.24:4646"
+ alias = "yul1"
+ # ca_file = var.nomad_provider_ca_file
+ # cert_file = var.nomad_provider_cert_file
+ # key_file = var.nomad_provider_key_file
+}
\ No newline at end of file
--- /dev/null
+# Nomad
+variable "datacenters" {
+ description = "Specifies the list of DCs to be considered placing this task."
+ type = list(string)
+ default = ["yul1"]
+}
+
+variable "cpu" {
+ description = "Specifies the CPU required to run this task in MHz."
+ type = number
+ default = 12000
+}
+
+variable "image" {
+ description = "Specifies the Docker image to run."
+ type = string
+ default = "pmikus/docker-gha-dispatcher"
+}
+
+variable "job_name" {
+ description = "Specifies a name for the job."
+ type = string
+ default = "gha-dispatcher"
+}
+
+variable "memory" {
+ description = "Specifies the memory required in MB."
+ type = number
+ default = 8000
+}
+
+variable "namespace" {
+ description = "The namespace in which to execute the job."
+ type = set(string)
+ default = ["prod","sandbox"]
+}
+
+variable "node_pool" {
+ description = "Specifies the node pool to place the job in."
+ type = string
+ default = "default"
+}
+
+variable "region" {
+ description = "The region in which to execute the job."
+ type = string
+ default = "global"
+}
+
+variable "type" {
+ description = "Specifies the Nomad scheduler to use."
+ type = string
+ default = "service"
+}
\ No newline at end of file
--- /dev/null
+terraform {
+ backend "consul" {
+ address = "10.30.51.23:8500"
+ scheme = "http"
+ path = "terraform/gha-dispatcher"
+ }
+ required_providers {
+ nomad = {
+ source = "hashicorp/nomad"
+ version = ">= 2.5.0"
+ }
+ }
+ required_version = ">= 1.12.1"
+}