AWS terraform automation scripts 82/25082/5
authorMaros Mullner <mamullne@cisco.com>
Thu, 13 Feb 2020 09:57:02 +0000 (10:57 +0100)
committerPeter Mikus <pmikus@cisco.com>
Mon, 17 Feb 2020 08:06:13 +0000 (08:06 +0000)
Change-Id: I050fe466dd006d330d1bb29cc6ccba8ae4681f01
Signed-off-by: Maros Mullner <maros.mullner@pantheon.tech>
20 files changed:
resources/tools/terraform/aws/.gitignore [new file with mode: 0644]
resources/tools/terraform/aws/main.tf [new file with mode: 0644]
resources/tools/terraform/aws/nic.tf [new file with mode: 0644]
resources/tools/terraform/azure/main.tf
resources/tools/testbed-setup/ansible/cloud_topology.yaml [new file with mode: 0644]
resources/tools/testbed-setup/ansible/roles/aws/defaults/main.yaml [new file with mode: 0644]
resources/tools/testbed-setup/ansible/roles/aws/handlers/main.yaml [new file with mode: 0644]
resources/tools/testbed-setup/ansible/roles/aws/tasks/main.yaml [new file with mode: 0644]
resources/tools/testbed-setup/ansible/roles/iperf/tasks/main.yaml
resources/tools/testbed-setup/ansible/roles/topology/tasks/main.yaml [new file with mode: 0644]
resources/tools/testbed-setup/ansible/roles/user_add/defaults/main.yaml [new file with mode: 0644]
resources/tools/testbed-setup/ansible/roles/user_add/handlers/main.yaml [new file with mode: 0644]
resources/tools/testbed-setup/ansible/roles/user_add/tasks/main.yaml [new file with mode: 0644]
resources/tools/testbed-setup/ansible/site_aws.yaml [new file with mode: 0644]
resources/tools/testbed-setup/ansible/sut_aws.yaml [new file with mode: 0644]
resources/tools/testbed-setup/ansible/sut_azure.yaml
resources/tools/testbed-setup/ansible/templates/topology_aws.j2 [new file with mode: 0644]
resources/tools/testbed-setup/ansible/templates/topology_azure.j2
resources/tools/testbed-setup/ansible/tg_aws.yaml [new file with mode: 0644]
resources/tools/testbed-setup/ansible/tg_azure.yaml

diff --git a/resources/tools/terraform/aws/.gitignore b/resources/tools/terraform/aws/.gitignore
new file mode 100644 (file)
index 0000000..fc64f00
--- /dev/null
@@ -0,0 +1,4 @@
+.terraform/
+.terraform.tfstate.lock.info
+terraform.tfstate
+terraform.tfstate.backup
diff --git a/resources/tools/terraform/aws/main.tf b/resources/tools/terraform/aws/main.tf
new file mode 100644 (file)
index 0000000..74baa12
--- /dev/null
@@ -0,0 +1,368 @@
+provider "aws" {
+  region = "eu-central-1"
+}
+
+variable "avail_zone" {
+  type = string
+  default = "eu-central-1a"
+}
+# Base VPC CIDRs
+variable "vpc_cidr_mgmt" {
+  type = string
+  default = "192.168.0.0/24"
+}
+variable "vpc_cidr_b" {
+  type = string
+  default = "192.168.10.0/24"
+}
+variable "vpc_cidr_c" {
+  type = string
+  default = "200.0.0.0/24"
+}
+variable "vpc_cidr_d" {
+  type = string
+  default = "192.168.20.0/24"
+}
+
+# Trex Dummy CIDRs
+variable "trex_dummy_cidr_port_0" {
+  type = string
+  default = "10.0.0.0/24"
+}
+variable "trex_dummy_cidr_port_1" {
+  type = string
+  default = "20.0.0.0/24"
+}
+
+# IPs
+variable "a_gateway" {
+  type = string
+  default = "192.168.0.1"
+}
+variable "b_gateway" {
+  type = string
+  default = "192.168.10.1"
+}
+variable "c_gateway" {
+  type = string
+  default = "200.0.0.1"
+}
+variable "d_gateway" {
+  type = string
+  default = "192.168.20.1"
+}
+variable "tg_if1_ip" {
+  type = string
+  default = "192.168.10.254"
+}
+variable "tg_if2_ip" {
+  type = string
+  default = "192.168.20.254"
+}
+variable "dut1_if1_ip" {
+  type = string
+  default = "192.168.10.11"
+}
+variable "dut1_if2_ip" {
+  type = string
+  default = "200.0.0.101"
+}
+variable "dut2_if1_ip" {
+  type = string
+  default = "200.0.0.102"
+}
+variable "dut2_if2_ip" {
+  type = string
+  default = "192.168.20.11"
+}
+variable "tg_mgmt_ip" {
+  type = string
+  default = "192.168.0.10"
+}
+variable "dut1_mgmt_ip" {
+  type = string
+  default = "192.168.0.11"
+}
+variable "dut2_mgmt_ip" {
+  type = string
+  default = "192.168.0.12"
+}
+
+# Instance Type
+variable "instance_type" {
+  type = string
+  default = "c5n.9xlarge"
+}
+
+resource "aws_vpc" "CSIT" {
+  cidr_block = var.vpc_cidr_mgmt
+}
+
+resource "aws_security_group" "CSIT" {
+  name        = "CSIT"
+  description = "Allow  inbound traffic"
+  vpc_id = aws_vpc.CSIT.id
+
+  ingress {
+    from_port = 22
+    to_port = 22
+    protocol = "tcp"
+    cidr_blocks = ["0.0.0.0/0"]
+  }
+
+  ingress {
+    from_port = 0
+    to_port = 0
+    protocol = -1
+    self = true
+  }
+
+  egress {
+    from_port       = 0
+    to_port         = 0
+    protocol        = "-1"
+    cidr_blocks     = ["0.0.0.0/0"]
+  }
+
+  depends_on = [aws_vpc.CSIT]
+}
+
+resource "aws_vpc_ipv4_cidr_block_association" "b" {
+  vpc_id     = aws_vpc.CSIT.id
+  cidr_block = var.vpc_cidr_b
+  depends_on = [aws_vpc.CSIT]
+}
+resource "aws_vpc_ipv4_cidr_block_association" "c" {
+  vpc_id     = aws_vpc.CSIT.id
+  cidr_block = var.vpc_cidr_c
+  depends_on = [aws_vpc.CSIT]
+}
+resource "aws_vpc_ipv4_cidr_block_association" "d" {
+  vpc_id     = aws_vpc.CSIT.id
+  cidr_block = var.vpc_cidr_d
+  depends_on = [aws_vpc.CSIT]
+}
+
+resource "aws_subnet" "mgmt" {
+  vpc_id = aws_vpc.CSIT.id
+  cidr_block = var.vpc_cidr_mgmt
+  availability_zone = var.avail_zone
+  depends_on = [aws_vpc.CSIT]
+}
+
+resource "aws_subnet" "b" {
+  vpc_id = aws_vpc.CSIT.id
+  cidr_block = var.vpc_cidr_b
+  availability_zone = var.avail_zone
+  depends_on = [aws_vpc.CSIT, aws_vpc_ipv4_cidr_block_association.b]
+}
+
+resource "aws_subnet" "c" {
+  vpc_id = aws_vpc.CSIT.id
+  cidr_block = var.vpc_cidr_c
+  availability_zone = var.avail_zone
+  depends_on = [aws_vpc.CSIT, aws_vpc_ipv4_cidr_block_association.c]
+}
+
+resource "aws_subnet" "d" {
+  vpc_id = aws_vpc.CSIT.id
+  cidr_block = var.vpc_cidr_d
+  availability_zone = var.avail_zone
+  depends_on = [aws_vpc.CSIT, aws_vpc_ipv4_cidr_block_association.d]
+}
+
+resource "aws_internet_gateway" "CSIT" {
+  vpc_id = aws_vpc.CSIT.id
+  depends_on = [aws_vpc.CSIT]
+}
+
+resource "aws_key_pair" "CSIT" {
+  key_name = "CSIT"
+  public_key = file("~/.ssh/id_rsa.pub")
+}
+
+data "aws_ami" "ubuntu" {
+  most_recent = true
+
+  filter {
+    name   = "name"
+    values = ["*hvm-ssd/ubuntu-bionic-18.04-amd64*"]
+  }
+
+  filter {
+    name   = "virtualization-type"
+    values = ["hvm"]
+  }
+
+  owners = ["099720109477"] # Canonical
+}
+
+resource "aws_placement_group" "CSIT" {
+  name     = "CSIT"
+  strategy = "cluster"
+}
+
+resource "aws_instance" "tg" {
+  ami           = data.aws_ami.ubuntu.id
+  instance_type = var.instance_type
+#  cpu_threads_per_core = 1
+#  cpu_core_count = 18
+  key_name = aws_key_pair.CSIT.key_name
+  associate_public_ip_address = true
+  subnet_id = aws_subnet.mgmt.id
+  private_ip = var.tg_mgmt_ip
+  vpc_security_group_ids = [aws_security_group.CSIT.id]
+  depends_on = [aws_vpc.CSIT, aws_placement_group.CSIT]
+  placement_group = aws_placement_group.CSIT.id
+  source_dest_check = false
+}
+
+resource "aws_instance" "dut1" {
+  ami           = data.aws_ami.ubuntu.id
+#  cpu_threads_per_core = 1
+#  cpu_core_count = 18
+  instance_type = var.instance_type
+  key_name = aws_key_pair.CSIT.key_name
+  associate_public_ip_address = true
+  subnet_id = aws_subnet.mgmt.id
+  private_ip = var.dut1_mgmt_ip
+  vpc_security_group_ids = [aws_security_group.CSIT.id]
+  depends_on = [aws_vpc.CSIT, aws_placement_group.CSIT]
+  placement_group = aws_placement_group.CSIT.id
+  source_dest_check = false
+}
+
+resource "aws_instance" "dut2" {
+  ami           = data.aws_ami.ubuntu.id
+#  cpu_threads_per_core = 1
+#  cpu_core_count = 18
+  instance_type = var.instance_type
+  key_name = aws_key_pair.CSIT.key_name
+  associate_public_ip_address = true
+  subnet_id = aws_subnet.mgmt.id
+  private_ip = var.dut2_mgmt_ip
+  vpc_security_group_ids = [aws_security_group.CSIT.id]
+  depends_on = [aws_vpc.CSIT, aws_placement_group.CSIT]
+  placement_group = aws_placement_group.CSIT.id
+  source_dest_check = false
+}
+
+resource "aws_route" "CSIT-igw" {
+  route_table_id = aws_vpc.CSIT.main_route_table_id
+  gateway_id = aws_internet_gateway.CSIT.id
+  destination_cidr_block = "0.0.0.0/0"
+  depends_on = [aws_vpc.CSIT, aws_internet_gateway.CSIT]
+}
+resource "aws_route" "dummy-trex-port-0" {
+  route_table_id = aws_vpc.CSIT.main_route_table_id
+  network_interface_id = aws_instance.tg.primary_network_interface_id
+  destination_cidr_block = var.trex_dummy_cidr_port_0
+  depends_on = [aws_vpc.CSIT, aws_instance.dut1]
+}
+resource "aws_route" "dummy-trex-port-1" {
+  route_table_id = aws_vpc.CSIT.main_route_table_id
+  network_interface_id = aws_instance.tg.primary_network_interface_id
+  destination_cidr_block = var.trex_dummy_cidr_port_1
+  depends_on = [aws_vpc.CSIT, aws_instance.dut2]
+}
+
+resource "null_resource" "deploy_tg" {
+  depends_on = [ aws_instance.tg ]
+  connection {
+    user = "ubuntu"
+    host = aws_instance.tg.public_ip
+    private_key = file("~/.ssh/id_rsa")
+  }
+  provisioner "ansible" {
+    plays {
+      playbook {
+        file_path = "../../testbed-setup/ansible/site_aws.yaml"
+        force_handlers = true
+      }
+      hosts = ["tg"]
+      extra_vars = {
+        ansible_python_interpreter = "/usr/bin/python3"
+        aws = true
+      }
+    }
+  }
+}
+resource "null_resource" "deploy_dut1" {
+  depends_on = [ aws_instance.dut1 ]
+  connection {
+    user = "ubuntu"
+    host = aws_instance.dut1.public_ip
+    private_key = file("~/.ssh/id_rsa")
+  }
+  provisioner "ansible" {
+    plays {
+      playbook {
+        file_path = "../../testbed-setup/ansible/site_aws.yaml"
+        force_handlers = true
+      }
+      hosts = ["sut"]
+      extra_vars = {
+        ansible_python_interpreter = "/usr/bin/python3"
+        aws = true
+      }
+    }
+  }
+}
+resource "null_resource" "deploy_dut2" {
+  depends_on = [ aws_instance.dut2 ]
+  connection {
+    user = "ubuntu"
+    host = aws_instance.dut2.public_ip
+    private_key = file("~/.ssh/id_rsa")
+  }
+  provisioner "ansible" {
+    plays {
+      playbook {
+        file_path = "../../testbed-setup/ansible/site_aws.yaml"
+        force_handlers = true
+      }
+      hosts = ["sut"]
+      extra_vars = {
+        ansible_python_interpreter = "/usr/bin/python3"
+        aws = true
+      }
+    }
+  }
+}
+
+resource "null_resource" "deploy_topology" {
+  depends_on = [ aws_instance.tg, aws_instance.dut1, aws_instance.dut2 ]
+  provisioner "ansible" {
+    plays {
+      playbook {
+        file_path = "../../testbed-setup/ansible/cloud_topology.yaml"
+      }
+      hosts = ["local"]
+      extra_vars = {
+        ansible_python_interpreter = "/usr/bin/python3"
+        cloud_topology = "aws"
+        tg_if1_mac = data.aws_network_interface.tg_if1.mac_address
+        tg_if2_mac = data.aws_network_interface.tg_if2.mac_address
+        dut1_if1_mac = data.aws_network_interface.dut1_if1.mac_address
+        dut1_if2_mac = data.aws_network_interface.dut1_if2.mac_address
+        dut2_if1_mac = data.aws_network_interface.dut2_if1.mac_address
+        dut2_if2_mac = data.aws_network_interface.dut2_if2.mac_address
+        tg_public_ip = aws_instance.tg.public_ip
+        dut1_public_ip = aws_instance.dut1.public_ip
+        dut2_public_ip = aws_instance.dut2.public_ip
+      }
+    }
+  }
+}
+
+output "dbg_tg" {
+  value = "TG IP: ${aws_instance.tg.public_ip}"
+}
+
+output "dbg_dut1" {
+  value = "DUT1 IP: ${aws_instance.dut1.public_ip}"
+}
+
+output "dbg_dut2" {
+  value = "DUT2 IP: ${aws_instance.dut2.public_ip}"
+}
diff --git a/resources/tools/terraform/aws/nic.tf b/resources/tools/terraform/aws/nic.tf
new file mode 100644 (file)
index 0000000..31926e4
--- /dev/null
@@ -0,0 +1,101 @@
+resource "aws_network_interface" "dut1_if2" {
+  subnet_id = aws_subnet.c.id
+  source_dest_check = false
+  private_ip = var.dut1_if2_ip
+  private_ips = [var.dut1_if2_ip]
+  security_groups = [aws_security_group.CSIT.id]
+  attachment {
+    instance     = aws_instance.dut1.id
+    device_index = 1
+  }
+  depends_on = [aws_vpc.CSIT]
+}
+
+data "aws_network_interface" "dut1_if2" {
+  id = aws_network_interface.dut1_if2.id
+}
+
+resource "aws_network_interface" "dut1_if1" {
+  subnet_id = aws_subnet.b.id
+  source_dest_check = false
+  private_ip = var.dut1_if1_ip
+  private_ips = [var.dut1_if1_ip]
+  security_groups = [aws_security_group.CSIT.id]
+  attachment {
+    instance     = aws_instance.dut1.id
+    device_index = 2
+  }
+  depends_on = [aws_vpc.CSIT, aws_subnet.b]
+}
+
+data "aws_network_interface" "dut1_if1" {
+  id = aws_network_interface.dut1_if1.id
+}
+
+resource "aws_network_interface" "dut2_if1" {
+  subnet_id = aws_subnet.c.id
+  source_dest_check = false
+  private_ip = var.dut2_if1_ip
+  private_ips = [var.dut2_if1_ip]
+  security_groups = [aws_security_group.CSIT.id]
+  attachment {
+    instance     = aws_instance.dut2.id
+    device_index = 1
+  }
+  depends_on = [aws_vpc.CSIT, aws_subnet.c]
+}
+
+data "aws_network_interface" "dut2_if1" {
+  id = aws_network_interface.dut2_if1.id
+}
+
+resource "aws_network_interface" "dut2_if2" {
+  subnet_id = aws_subnet.d.id
+  source_dest_check = false
+  private_ip = var.dut2_if2_ip
+  private_ips = [var.dut2_if2_ip]
+  security_groups = [aws_security_group.CSIT.id]
+  attachment {
+    instance     = aws_instance.dut2.id
+    device_index = 2
+  }
+  depends_on = [aws_vpc.CSIT, aws_subnet.d]
+}
+
+data "aws_network_interface" "dut2_if2" {
+  id = aws_network_interface.dut2_if2.id
+}
+
+resource "aws_network_interface" "tg_if1" {
+  subnet_id = aws_subnet.b.id
+  source_dest_check = false
+  private_ip = var.tg_if1_ip
+  private_ips = [var.tg_if1_ip]
+  security_groups = [aws_security_group.CSIT.id]
+  attachment {
+    instance     = aws_instance.tg.id
+    device_index = 1
+  }
+  depends_on = [aws_vpc.CSIT, aws_subnet.b]
+}
+
+data "aws_network_interface" "tg_if1" {
+  id = aws_network_interface.tg_if1.id
+}
+
+resource "aws_network_interface" "tg_if2" {
+  subnet_id = aws_subnet.d.id
+  source_dest_check = false
+  private_ip = var.tg_if2_ip
+  private_ips = [var.tg_if2_ip]
+  security_groups = [aws_security_group.CSIT.id]
+  attachment {
+    instance     = aws_instance.tg.id
+    device_index = 2
+  }
+  depends_on = [aws_vpc.CSIT, aws_subnet.d]
+}
+
+data "aws_network_interface" "tg_if2" {
+  id = aws_network_interface.tg_if2.id
+}
index 79fa456..cae6591 100644 (file)
@@ -357,13 +357,12 @@ resource "azurerm_virtual_machine" "tg" {
     }
     os_profile {
         computer_name  = "tg"
-        admin_username = "testuser"
-        admin_password = "Csit1234"
+        admin_username = "ubuntu"
     }
     os_profile_linux_config {
         disable_password_authentication = false
         ssh_keys {
-            path     = "/home/testuser/.ssh/authorized_keys"
+            path     = "/home/ubuntu/.ssh/authorized_keys"
             key_data = file("~/.ssh/id_rsa.pub")
         }
     }
@@ -396,13 +395,12 @@ resource "azurerm_virtual_machine" "dut1" {
     }
     os_profile {
         computer_name  = "dut1"
-        admin_username = "testuser"
-        admin_password = "Csit1234"
+        admin_username = "ubuntu"
     }
     os_profile_linux_config {
         disable_password_authentication = false
         ssh_keys {
-            path     = "/home/testuser/.ssh/authorized_keys"
+            path     = "/home/ubuntu/.ssh/authorized_keys"
             key_data = file("~/.ssh/id_rsa.pub")
         }
     }
@@ -435,13 +433,12 @@ resource "azurerm_virtual_machine" "dut2" {
     }
     os_profile {
         computer_name  = "dut2"
-        admin_username = "testuser"
-        admin_password = "Csit1234"
+        admin_username = "ubuntu"
     }
     os_profile_linux_config {
         disable_password_authentication = false
         ssh_keys {
-            path     = "/home/testuser/.ssh/authorized_keys"
+            path     = "/home/ubuntu/.ssh/authorized_keys"
             key_data = file("~/.ssh/id_rsa.pub")
         }
     }
@@ -474,8 +471,9 @@ resource "null_resource" "deploy_tg" {
                  azurerm_network_interface.tg_if1,
                  azurerm_network_interface.tg_if2 ]
   connection {
-    user = "testuser"
+    user = "ubuntu"
     host = data.azurerm_public_ip.tg_public_ip.ip_address
+    private_key = file("~/.ssh/id_rsa")
   }
   provisioner "ansible" {
     plays {
@@ -485,17 +483,8 @@ resource "null_resource" "deploy_tg" {
       }
       hosts = ["tg"]
       extra_vars = {
-        ansible_python_interpreter = "python3"
+        ansible_python_interpreter = "/usr/bin/python3"
         azure = true
-        remote_net = var.vpc_cidr_d
-        tg_if1_mac = azurerm_network_interface.tg_if1.mac_address
-        tg_if2_mac = azurerm_network_interface.tg_if2.mac_address
-        dut1_if1_mac = azurerm_network_interface.dut1_if1.mac_address
-        dut1_if2_mac = azurerm_network_interface.dut1_if2.mac_address
-        dut2_if1_mac = azurerm_network_interface.dut2_if1.mac_address
-        dut2_if2_mac = azurerm_network_interface.dut2_if2.mac_address
-        dut1_if1_ip = azurerm_network_interface.dut1_if1.private_ip_address
-        dut2_if2_ip = azurerm_network_interface.dut2_if2.private_ip_address
       }
     }
   }
@@ -506,8 +495,9 @@ resource "null_resource" "deploy_dut1" {
                  azurerm_network_interface.dut1_if1,
                  azurerm_network_interface.dut1_if2 ]
   connection {
-    user = "testuser"
+    user = "ubuntu"
     host = data.azurerm_public_ip.dut1_public_ip.ip_address
+    private_key = file("~/.ssh/id_rsa")
   }
   provisioner "ansible" {
     plays {
@@ -517,16 +507,8 @@ resource "null_resource" "deploy_dut1" {
       }
       hosts = ["sut"]
       extra_vars = {
-        ansible_python_interpreter = "python3"
+        ansible_python_interpreter = "/usr/bin/python3"
         azure = true
-        dut1_if1_ip = azurerm_network_interface.dut1_if1.private_ip_address
-        dut1_if1_mac = azurerm_network_interface.dut1_if1.mac_address
-        dut1_if2_ip = azurerm_network_interface.dut1_if2.private_ip_address
-        dut1_if2_mac = azurerm_network_interface.dut1_if2.mac_address
-        dut2_if2_ip = azurerm_network_interface.dut2_if1.private_ip_address
-        dut2_if1_gateway = azurerm_network_interface.dut2_if1.private_ip_address
-        traffic_if1 = var.trex_dummy_cidr_port_0
-        traffic_if2 = var.trex_dummy_cidr_port_1
       }
     }
   }
@@ -537,8 +519,9 @@ resource "null_resource" "deploy_dut2" {
                  azurerm_network_interface.dut2_if1,
                  azurerm_network_interface.dut2_if2 ]
   connection {
-    user = "testuser"
+    user = "ubuntu"
     host = data.azurerm_public_ip.dut2_public_ip.ip_address
+    private_key = file("~/.ssh/id_rsa")
   }
   provisioner "ansible" {
     plays {
@@ -548,16 +531,41 @@ resource "null_resource" "deploy_dut2" {
       }
       hosts = ["sut"]
       extra_vars = {
-        ansible_python_interpreter = "python3"
+        ansible_python_interpreter = "/usr/bin/python3"
         azure = true
-        dut2_if1_ip = azurerm_network_interface.dut2_if1.private_ip_address
+      }
+    }
+  }
+}
+
+eesource "null_resource" "deploy_topology" {
+  depends_on = [ azurerm_virtual_machine.tg,
+                 azurerm_network_interface.tg_if1,
+                 azurerm_network_interface.tg_if2,
+                 azurerm_virtual_machine.dut1,
+                 azurerm_network_interface.dut1_if1,
+                 azurerm_network_interface.dut1_if2
+                 azurerm_virtual_machine.dut2,
+                 azurerm_network_interface.dut2_if1,
+                 azurerm_network_interface.dut2_if2 ]
+  provisioner "ansible" {
+    plays {
+      playbook {
+        file_path = "../../testbed-setup/ansible/cloud_topology.yaml"
+      }
+      hosts = ["local"]
+      extra_vars = {
+        ansible_python_interpreter = "/usr/bin/python3"
+        cloud_topology = "azure"
+        tg_if1_mac = azurerm_network_interface.tg_if1.mac_address
+        tg_if2_mac = azurerm_network_interface.tg_if2.mac_address
+        dut1_if1_mac = azurerm_network_interface.dut1_if1.mac_address
+        dut1_if2_mac = azurerm_network_interface.dut1_if2.mac_address
         dut2_if1_mac = azurerm_network_interface.dut2_if1.mac_address
-        dut2_if2_ip = azurerm_network_interface.dut2_if2.private_ip_address
         dut2_if2_mac = azurerm_network_interface.dut2_if2.mac_address
-        dut1_if2_ip = azurerm_network_interface.dut1_if2.private_ip_address
-        dut1_if2_gateway = azurerm_network_interface.dut1_if2.private_ip_address
-        traffic_if1 = var.trex_dummy_cidr_port_0
-        traffic_if2 = var.trex_dummy_cidr_port_1
+        tg_public_ip = data.azurerm_public_ip.tg_public_ip.ip_address
+        dut1_public_ip = data.azurerm_public_ip.dut1_public_ip.ip_address
+        dut2_public_ip = data.azurerm_public_ip.dut2_public_ip.ip_address
       }
     }
   }
diff --git a/resources/tools/testbed-setup/ansible/cloud_topology.yaml b/resources/tools/testbed-setup/ansible/cloud_topology.yaml
new file mode 100644 (file)
index 0000000..083a3bd
--- /dev/null
@@ -0,0 +1,9 @@
+---
+# file: cloud_topology.yaml
+
+- hosts: localhost
+  gather_facts: false
+  roles:
+    - role: topology
+      tags: topology
+
diff --git a/resources/tools/testbed-setup/ansible/roles/aws/defaults/main.yaml b/resources/tools/testbed-setup/ansible/roles/aws/defaults/main.yaml
new file mode 100644 (file)
index 0000000..706e552
--- /dev/null
@@ -0,0 +1,3 @@
+---
+# file: roles/aws/defaults/main.yaml
+
diff --git a/resources/tools/testbed-setup/ansible/roles/aws/handlers/main.yaml b/resources/tools/testbed-setup/ansible/roles/aws/handlers/main.yaml
new file mode 100644 (file)
index 0000000..b155b4b
--- /dev/null
@@ -0,0 +1,8 @@
+---
+# file: roles/aws/handlers/main.yaml
+
+- name: Reboot server
+  reboot:
+    reboot_timeout: 3600
+  tags:
+    - reboot-server
diff --git a/resources/tools/testbed-setup/ansible/roles/aws/tasks/main.yaml b/resources/tools/testbed-setup/ansible/roles/aws/tasks/main.yaml
new file mode 100644 (file)
index 0000000..94dcff4
--- /dev/null
@@ -0,0 +1,24 @@
+---
+# file: roles/aws/tasks/main.yaml
+
+- name: AWS - Load Kernel Modules By Default
+  lineinfile:
+    path: "/etc/modules"
+    state: "present"
+    line: "{{ item }}"
+  with_items:
+    - "vfio-pci"
+  tags:
+    - load-kernel-modules
+
+- name: Performance Tuning - Adjust nr_hugepages
+  # change the minimum size of the hugepage pool.
+  # 2G VPP, 4GB per VNF/CNF, 2G reserve
+  sysctl:
+    name: "vm.nr_hugepages"
+    value: "8192"
+    state: "present"
+    sysctl_file: "/etc/sysctl.d/90-csit.conf"
+    reload: "yes"
+  tags:
+    - set-sysctl
index e43206b..e77fdbc 100644 (file)
@@ -29,7 +29,7 @@
     remote_src: true
     src: "{{ iperf_target_dir }}/iperf-{{ iperf_version }}.tar.gz"
     dest: "{{ iperf_target_dir }}/"
-    creates: "{{ iperf_target_dir }}/iperf-{{ iperf_version }}/"
+    creates: "{{ iperf_target_dir }}/iperf-{{ iperf_version }}/src"
   tags:
     - install-iperf
 
diff --git a/resources/tools/testbed-setup/ansible/roles/topology/tasks/main.yaml b/resources/tools/testbed-setup/ansible/roles/topology/tasks/main.yaml
new file mode 100644 (file)
index 0000000..a2e67f4
--- /dev/null
@@ -0,0 +1,9 @@
+---
+# file: roles/topology/tasks/main.yaml
+
+- name: Create topology file
+  template:
+    src: 'templates/topology_{{ cloud_topology }}.j2'
+    dest: '../../../../topologies/available/{{ cloud_topology }}_3n_skx_testbed.yaml'
+  tags:
+    - create-topology-file
diff --git a/resources/tools/testbed-setup/ansible/roles/user_add/defaults/main.yaml b/resources/tools/testbed-setup/ansible/roles/user_add/defaults/main.yaml
new file mode 100644 (file)
index 0000000..9e5aaf2
--- /dev/null
@@ -0,0 +1,5 @@
+---
+# file: roles/user_add/defaults/main.yaml
+
+user_pass: "$6$zpBUdQ4q$P2zKclumvCndWujgP/qQ8eMk3YZk7ESAom04Fqp26hJH2jWkMXEX..jqxzMdDLJKiDaDHIaSkQMVjHzd3cRLs1"
+
diff --git a/resources/tools/testbed-setup/ansible/roles/user_add/handlers/main.yaml b/resources/tools/testbed-setup/ansible/roles/user_add/handlers/main.yaml
new file mode 100644 (file)
index 0000000..e6b7d82
--- /dev/null
@@ -0,0 +1,10 @@
+---
+# file: roles/user_add/handlers/main.yaml
+
+- name: Restart sshd
+  service:
+    name: sshd
+    state: restarted
+  tags:
+    - restart-sshd
+
diff --git a/resources/tools/testbed-setup/ansible/roles/user_add/tasks/main.yaml b/resources/tools/testbed-setup/ansible/roles/user_add/tasks/main.yaml
new file mode 100644 (file)
index 0000000..2672996
--- /dev/null
@@ -0,0 +1,31 @@
+---
+# file: roles/user_add/tasks/main.yaml
+
+- name: Add testuser account
+  user:
+    name: "testuser"
+    state: present
+    shell: "/bin/bash"
+    password: "{{ user_pass }}"
+  tags:
+    - add-user
+
+- name: Allow password login
+  lineinfile:
+    dest: "/etc/ssh/sshd_config"
+    regexp: "^PasswordAuthentication no"
+    line: "PasswordAuthentication yes"
+  notify:
+    - "Restart sshd"
+  tags:
+    - allow-password-login
+
+- name: Add visudo entry
+  lineinfile:
+    dest: "/etc/sudoers"
+    state: present
+    line: "testuser ALL=(ALL) NOPASSWD: ALL"
+    validate: "visudo -cf %s"
+  tags:
+    - allow-sudo
+
diff --git a/resources/tools/testbed-setup/ansible/site_aws.yaml b/resources/tools/testbed-setup/ansible/site_aws.yaml
new file mode 100644 (file)
index 0000000..4e23a97
--- /dev/null
@@ -0,0 +1,8 @@
+---
+# file: site_aws.yaml
+
+- import_playbook: tg_aws.yaml
+  tags: tg
+
+- import_playbook: sut_aws.yaml
+  tags: sut
diff --git a/resources/tools/testbed-setup/ansible/sut_aws.yaml b/resources/tools/testbed-setup/ansible/sut_aws.yaml
new file mode 100644 (file)
index 0000000..4c71e29
--- /dev/null
@@ -0,0 +1,23 @@
+---
+# file: sut_aws.yaml
+
+- hosts: sut
+  become: yes
+  become_user: root
+  roles:
+    - role: user_add
+      tags: user_add
+    - role: common
+      tags: common
+    - role: aws
+      tags: aws
+    - role: iperf
+      tags: iperf
+    - role: docker
+      tags: docker
+    - role: dpdk
+      tags: dpdk
+    - role: cleanup
+      tags: cleanup
+    - role: calibration
+      tags: calibration
index be45810..4fd897d 100644 (file)
@@ -2,10 +2,11 @@
 # file: sut_azure.yaml
 
 - hosts: sut
-  remote_user: testuser
   become: yes
   become_user: root
   roles:
+    - role: user_add
+      tags: user_add
     - role: common
       tags: common
     - role: azure
diff --git a/resources/tools/testbed-setup/ansible/templates/topology_aws.j2 b/resources/tools/testbed-setup/ansible/templates/topology_aws.j2
new file mode 100644 (file)
index 0000000..631b0be
--- /dev/null
@@ -0,0 +1,83 @@
+---
+metadata:
+  version: 0.1
+  schema:
+    - resources/topology_schemas/3_node_topology.sch.yaml
+    - resources/topology_schemas/topology.sch.yaml
+  tags: [hw, 3-node]
+
+nodes:
+  TG:
+    type: TG
+    subtype: TREX
+    host: "{{ tg_public_ip }}"
+    arch: x86_64
+    port: 22
+    username: testuser
+    password: Csit1234
+    interfaces:
+      port1:
+        # tg_instance/p1 - 50GE port1 on ENA NIC.
+        mac_address: {{ tg_if1_mac }}
+        pci_address: "0000:00:06.0"
+        link: link1
+        model: Amazon-Nitro-50G
+      port2:
+        # tg_instance/p2 - 50GE port2 on ENA NIC.
+        mac_address: {{ tg_if2_mac }}
+        pci_address: "0000:00:07.0"
+        link: link2
+        model: Amazon-Nitro-50G
+  DUT1:
+    type: DUT
+    host: "{{ dut1_public_ip }}"
+    arch: x86_64
+    port: 22
+    username: testuser
+    password: Csit1234
+    uio_driver: vfio-pci
+    honeycomb:
+      user: admin
+      passwd: admin
+      port: 8183
+      netconf_port: 2831
+    interfaces:
+      port1:
+        # dut1_instance/p1 - 50GE port1 on ENA NIC.
+        mac_address: {{ dut1_if1_mac }}
+        pci_address: "0000:00:06.0"
+        link: link1
+        model: Amazon-Nitro-50G
+      port2:
+        # dut1_instance/p2 - 50GE port2 on ENA NIC.
+        mac_address: {{ dut1_if2_mac }}
+        pci_address: "0000:00:07.0"
+        link: link21
+        model: Amazon-Nitro-50G
+  DUT2:
+    type: DUT
+    host: "{{ dut2_public_ip }}"
+    arch: x86_64
+    port: 22
+    username: testuser
+    password: Csit1234
+    uio_driver: vfio-pci
+    honeycomb:
+      user: admin
+      passwd: admin
+      port: 8183
+      netconf_port: 2831
+    interfaces:
+      port1:
+        # dut2_instance/p1 - 50GE port1 on ENA NIC.
+        mac_address: {{ dut2_if1_mac }}
+        pci_address: "0000:00:06.0"
+        link: link21
+        model: Amazon-Nitro-50G
+      port2:
+        # dut2_instance/p2 - 50GE port1 on ENA NIC.
+        mac_address: {{ dut2_if2_mac }}
+        pci_address: "0000:00:07.0"
+        link: link2
+        model: Amazon-Nitro-50G
+
index ef8b22e..ae0ae52 100644 (file)
@@ -10,7 +10,7 @@ nodes:
   TG:
     type: TG
     subtype: TREX
-    host: "{{ ansible_default_ipv4.address }}"
+    host: "{{ tg_public_ip }}"
     arch: x86_64
     port: 22
     username: testuser
@@ -30,7 +30,7 @@ nodes:
         model: azure-mlx-40g
   DUT1:
     type: DUT
-    host: "{{ ansible_default_ipv4.address }}"
+    host: "{{ dut1_public_ip }}"
     arch: x86_64
     port: 22
     username: testuser
@@ -56,7 +56,7 @@ nodes:
         model: azure-mlx-40g
   DUT2:
     type: DUT
-    host: "{{ ansible_default_ipv4.address }}"
+    host: "{{ dut2_public_ip }}"
     arch: x86_64
     port: 22
     username: testuser
diff --git a/resources/tools/testbed-setup/ansible/tg_aws.yaml b/resources/tools/testbed-setup/ansible/tg_aws.yaml
new file mode 100644 (file)
index 0000000..4b5299f
--- /dev/null
@@ -0,0 +1,27 @@
+---
+# file: tg_aws.yaml
+
+- hosts: tg
+  become: yes
+  become_user: root
+  roles:
+    - role: user_add
+      tags: user_add
+    - role: common
+      tags: common
+    - role: aws
+      tags: aws
+    - role: tg
+      tags: tg
+    - role: iperf
+      tags: iperf
+    - role: trex
+      tags: trex
+    - role: wrk
+      tags: wrk
+    - role: docker
+      tags: docker
+    - role: cleanup
+      tags: cleanup
+    - role: calibration
+      tags: calibration
index 4ab2295..e28d0d3 100644 (file)
@@ -2,10 +2,11 @@
 # file: tg_azure.yaml
 
 - hosts: tg
-  remote_user: testuser
   become: yes
   become_user: root
   roles:
+    - role: user_add
+      tags: user_add
     - role: common
       tags: common
     - role: azure