feat(infra): Add new SPR servers 85/39485/4
authorpmikus <peter.mikus@protonmail.ch>
Tue, 5 Sep 2023 08:26:26 +0000 (08:26 +0000)
committerPeter Mikus <peter.mikus@protonmail.ch>
Wed, 6 Sep 2023 08:37:48 +0000 (08:37 +0000)
Signed-off-by: pmikus <peter.mikus@protonmail.ch>
Change-Id: Id90ca8323e6f49b51f19526e1089bf711e4e8182

13 files changed:
docs/content/infrastructure/fdio_csit_testbed_versioning.md
docs/content/infrastructure/fdio_dc_testbed_specifications.md
docs/content/infrastructure/fdio_dc_vexxhost_inventory.md
fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.21.yaml [new file with mode: 0644]
fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.22.yaml [new file with mode: 0644]
fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.30.yaml [new file with mode: 0644]
fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.31.yaml [new file with mode: 0644]
fdio.infra.ansible/inventories/lf_inventory/hosts
fdio.infra.ansible/roles/vpp_device/defaults/main.yaml
fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs-spr.sh [new file with mode: 0644]
fdio.infra.ansible/roles/vpp_device/handlers/main.yaml
fdio.infra.ansible/roles/vpp_device/tasks/main.yaml
fdio.infra/pxe/docker-dnsmasq/etc/dnsmasq.conf

index feb611b..8e923f9 100644 (file)
@@ -36,6 +36,14 @@ environment versioning include:
 
 Following is the list of CSIT versions to date:
 
+- Ver. 13 associated with CSIT rls2310 branch (
+  [HW](https://git.fd.io/csit/tree/docs/content/infrastructure/testbed_configuration?h=rls2310),
+  [CSIT](https://git.fd.io/csit/tree/?h=rls2310)
+  ).
+  - Intel NIC 700/800 series firmware upgrade based on DPDK compatibility
+    matrix.
+  - Mellanox 556A/CX6-DX/MCX713106AS-VEAT series firmware upgrade based on DPDK
+    compatibility matrix.
 - Ver. 12 associated with CSIT rls2306 branch (
   [HW](https://git.fd.io/csit/tree/docs/content/infrastructure/testbed_configuration?h=rls2306),
   [CSIT](https://git.fd.io/csit/tree/?h=rls2306)
index 0e9f8ba..3df698a 100644 (file)
@@ -19,7 +19,6 @@ Each server has a LOM (Lights-Out-Management e.g. SM IPMI) and a
 Management port, which are connected to two different VLANs.
 
 #### LOM (IPMI) VLAN
-
    - Subnet: 10.30.50.0/24
    - Gateway: 10.30.50.1
    - Broadcast: 10.30.50.255
@@ -54,8 +53,9 @@ To access these hosts, VPN connection is required.
 11. 2-Node-Zen2            perf   zn2   zn2   1    1    1    0    0    0    0    0    0    0    2    0    0    0
 12. 3-Node-Icelake         perf   icx   icx   2    4    2    0    0    0    0    0    0    0    0    6    0    0
 13. 3-Node-SnowRidge       perf   snr   icx   1    2    .5   0    0    0    0    0    0    0    0    .5   2    0
-13. 2-Node-SapphireRapids  perf   spr   spr   4    4    4    0    0    0    0    0    0    0    0    0    0    8
-                                     Totals: 28   35  18.5   7    1    2    1    2    4    7    2   15.5  2    8
+14. 2-Node-SapphireRapids  perf   spr   spr   4    4    4    0    0    0    0    0    0    0    0    0    0    8
+15. 1-Node-SapphireRapids  nomad  spr   na    4    4    0    0    0    0    0    0    0    0    0    0    0    4
+                                     Totals: 32   39  18.5   7    1    2    1    2    4    7    2   15.5  2   12
 ```
 
 ### 1-Node-Skylake Xeon Intel (1n-skx)
@@ -70,6 +70,12 @@ Each 1-Node-ThunderX2 testbed includes one SUT (Server-Type-E11) with NIC
 ports connected back-to-back ([Server Types](#server-types)).
 Used for FD.io VPP_Device functional driver tests.
 
+### 1-Node-Skylake Xeon Intel (1n-spr)
+
+Each 1-Node-SapphireRapids testbed includes one SUT (Server-Type-H7) with NIC
+ports connected back-to-back ([Server Types](#server-types)).
+Used for FD.io VPP_Device functional driver tests.
+
 ### 1-Node-Cascadelake Xeon Intel (1n-clx)
 
 Each 1-Node-Cascadelake testbed includes one SUT (Server-Type-C1) with
@@ -215,8 +221,8 @@ FD.io CSIT lab contains following server types:
             - PCIe Slot4 3b:00.xx: x710-4p10GE Intel.
             - PCIe Slot9 5e:00.xx: empty.
         - Numa1: (x16, x16, x16 PCIe3.0 lanes)
-            - PCIe Slot6 86:00.xx: empty.
-            - PCIe Slot8 af:00.xx: empty.
+            - PCIe Slot6 86:00.xx: e810-2CQDA2-2p100GE Intel.
+            - PCIe Slot8 af:00.xx: e810-2CQDA2-2p100GE Intel.
             - PCIe Slot10 d8:00.xx: empty.
 
 3. **Server-Type-B7**: Purpose - Ixia PerfectStorm One Appliance TG for FD.io TCP/IP performance tests.
@@ -740,6 +746,27 @@ FD.io CSIT lab contains following server types:
             - PCIe Slot9 af:00.xx: e810-2CQDA2-2p100GE Intel.
             - PCIe Slot11 d8:00.xx: empty.
 
+30. **Server-Type-H7**: Purpose - SapphireRapids SUT for FD.io VPP_Device functional tests.
+    - Quantity: 2.
+    - Physical connectivity:
+        - IPMI and host management ports.
+        - NIC ports connected into 1-node topologies.
+    - Main HW configuration:
+        - Chassis: SuperMicro SYS-741GE-TNRT.
+        - Motherboard: Super X13DEG-QT-P.
+        - Processors: 2* Intel Platinum 8462Y+ 32 core 2.8 GHz 300W TDP.
+        - RAM Memory: 16* 32GB DDR5-4800.
+        - Disks: 2* 960GB SATA SSD.
+    - NICs configuration:
+        - Numa0: (x16, x16, x16 PCIe5.0 lanes)
+            - PCIe Slot2 18:00.xx: e810-2CQDA2-2p100GE Intel.
+            - PCIe Slot4 3b:00.xx: e810-2CQDA2-2p100GE Intel.
+            - PCIe Slot10 5e:00.xx: empty.
+        - Numa1: (x16, x16, x16 PCIe5.0 lanes)
+            - PCIe Slot7 86:00.xx: empty.
+            - PCIe Slot9 af:00.xx: empty.
+            - PCIe Slot11 d8:00.xx: empty.
+
 ## Testbeds Configuration
 
 ### 1-Node-Skylake (1n-skx)
@@ -759,6 +786,10 @@ FD.io CSIT lab contains following server types:
         - s1-t11-sut1-c4/p2 - 10GE-port2 x710-4p10GE.
         - s1-t11-sut1-c4/p3 - 10GE-port3 x710-4p10GE.
         - s1-t11-sut1-c4/p4 - 10GE-port4 x710-4p10GE.
+        - s1-t11-sut1-c5/p1 - 100GE-port1 e810-2p100GE.
+        - s1-t11-sut1-c5/p2 - 100GE-port2 e810-2p100GE.
+        - s1-t11-sut1-c6/p1 - 100GE-port1 e810-2p100GE.
+        - s1-t11-sut1-c6/p2 - 100GE-port2 e810-2p100GE.
 - SUT [Server-Type-B6]:
     - testbedname: testbed12.
     - hostname: s2-t12-sut1.
@@ -773,6 +804,10 @@ FD.io CSIT lab contains following server types:
         - s2-t12-sut1-c4/p2 - 10GE-port2 x710-4p10GE.
         - s2-t12-sut1-c4/p3 - 10GE-port3 x710-4p10GE.
         - s2-t12-sut1-c4/p4 - 10GE-port4 x710-4p10GE.
+        - s2-t12-sut1-c5/p1 - 100GE-port1 e810-2p100GE.
+        - s2-t12-sut1-c5/p2 - 100GE-port2 e810-2p100GE.
+        - s2-t12-sut1-c6/p1 - 100GE-port1 e810-2p100GE.
+        - s2-t12-sut1-c6/p2 - 100GE-port2 e810-2p100GE.
 ```
 
 ### 1-Node-ThunderX2 (1n-tx2)
@@ -808,6 +843,31 @@ FD.io CSIT lab contains following server types:
         - s56-t14-sut1-c26/p2 - 40GE-port2 ConnectX5-2p10/25GE Mellanox.
 ```
 
+### 1-Node-SapphireRapids (1n-spr)
+
+```
+- SUT [Server-Type-H7]:
+    - testbedname: testbed15.
+    - hostname: s30-t15-sut1.
+    - IPMI IP: 10.30.50.30
+    - Host IP: 10.30.51.31
+    - portnames:
+        - s30-t15-sut1-c1/p1 - 100GE-port1 e810-2p100GE.
+        - s30-t15-sut1-c1/p2 - 100GE-port2 e810-2p100GE.
+        - s30-t15-sut1-c2/p1 - 100GE-port1 e810-2p100GE.
+        - s30-t15-sut1-c2/p2 - 100GE-port1 e810-2p100GE.
+- SUT [Server-Type-H7]:
+    - testbedname: testbed16.
+    - hostname: s31-t16-sut1.
+    - IPMI IP: 10.30.50.31
+    - Host IP: 10.30.51.31
+    - portnames:
+        - s31-t16-sut1-c1/p1 - 100GE-port1 e810-2p100GE.
+        - s31-t16-sut1-c1/p2 - 100GE-port2 e810-2p100GE.
+        - s31-t16-sut1-c2/p1 - 100GE-port1 e810-2p100GE.
+        - s31-t16-sut1-c2/p2 - 100GE-port1 e810-2p100GE.
+```
+
 ### 1-Node-Cascadelake (1n-clx)
 
 ```
@@ -1622,6 +1682,21 @@ Note: There is no IPMI. Serial console is accessible via VIRL2 and VIRL3 USB.
         - s56-t14-sut1-c8/p2 - s56-t14-sut1-c26/p2.
 ```
 
+### 1-Node-SapphireRapids (1n-spr)
+
+```
+- testbed15:
+    - ring1 100GE-ports e810-2p100GE:
+        - s30-t15-sut1-c1/p1 to s30-t15-sut1-c2/p1.
+    - ring2 100GE-ports e810-2p100GE:
+        - s30-t15-sut1-c1/p2 to s30-t15-sut1-c2/p2.
+- testbed16:
+    - ring1 100GE-ports e810-2p100GE:
+        - s31-t16-sut1-c1/p1 to s31-t16-sut1-c2/p1.
+    - ring2 100GE-ports e810-2p100GE:
+        - s31-t16-sut1-c1/p2 to s31-t16-sut1-c2/p2.
+```
+
 ### 2-Node-IxiaPS1L47 (2n-ps1)
 
 ```
index 140c74f..a3e6d75 100644 (file)
@@ -82,20 +82,24 @@ Captured inventory data:
 
 ### Rack YUL1-11 (3016.11)
 
- **name**              | **oper-status** | **testbed-id** | **role**   | **model**      | **s/n**         | **mgmt-ip4** | **ipmi-ip4** | **rackid** | **rackunit**
------------------------|-----------------|----------------|------------|----------------|-----------------|--------------|--------------|------------|--------------
- yul1-11-lb6m          | up              | switch         | arm-uplink | ?              | ?               | ?            | ?            | 3016.11    | u48
- yul1-11-lf-tor-switch | up              | switch         | uplink     | ?              | ?               | ?            | ?            | 3016.11    | u47
- mtl1-6-7050QX-32      | up              | switch         | uplink     | ?              | ?               | ?            | ?            | 3016.11    | u46
- fdio-marvell-dev      | up              | N/A            | dev        | ThunderX-88XX  | N/A             | 10.30.51.38  | 10.30.50.38  | 3016.11    | u45
- s56-t14-sut1          | up              | t14            | 1n-tx2     | ThunderX2-9980 | N/A             | 10.30.51.71  | 10.30.50.71  | 3016.11    | u41-u42
- s78-t38-sut1          | up              | t38            | 3n-icx     | SYS-740GP-TNRT | C7470KL03P50450 | 10.30.51.78  | 10.30.50.78  | 3016.11    | u31-u34
- s79-t38-sut2          | up              | t38            | 3n-icx     | SYS-740GP-TNRT | C7470KL07P50297 | 10.30.51.79  | 10.30.50.79  | 3016.11    | u27-u30
- s80-t38-tg1           | up              | t38            | 3n-icx     | SYS-740GP-TNRT | C7470KL03P50454 | 10.30.51.80  | 10.30.50.80  | 3016.11    | u23-u26
- s55-t13-sut1          | up              | t13            | 1n-tx2     | ThunderX2-9980 | N/A             | 10.30.51.70  | 10.30.50.70  | 3016.11    | u11-u12
- s62-t34-sut1          | up              | t34            | 3n-alt     | WIWYNN         | 04000059N0SC    | 10.30.51.72  | 10.30.50.72  | 3016.11    | u9-u10
- s63-t34-sut2          | up              | t34            | 3n-alt     | WIWYNN         | 0390003EN0SC    | 10.30.51.73  | 10.30.50.73  | 3016.11    | u7-u8
- s64-t34-tg1           | up              | t34            | 3n-alt     | SYS-740GP-TNRT | C7470KK40P50249 | 10.30.51.74  | 10.30.50.74  | 3016.11    | u3-u6
+ **name**              | **oper-status** | **testbed-id** | **role**     | **model**      | **s/n**         | **mgmt-ip4** | **ipmi-ip4** | **rackid** | **rackunit**
+-----------------------|-----------------|----------------|--------------|----------------|-----------------|--------------|--------------|------------|--------------
+ yul1-11-lb6m          | up              | switch         | arm-uplink   | ?              | ?               | ?            | ?            | 3016.11    | u48
+ yul1-11-lf-tor-switch | up              | switch         | uplink       | ?              | ?               | ?            | ?            | 3016.11    | u47
+ mtl1-6-7050QX-32      | up              | switch         | uplink       | ?              | ?               | ?            | ?            | 3016.11    | u46
+ fdio-marvell-dev      | up              | N/A            | dev          | ThunderX-88XX  | N/A             | 10.30.51.38  | 10.30.50.38  | 3016.11    | u45
+ s21-nomad             | up              | nomad          | nomad-client | SYS-741GE-TNRT | ??              | 10.30.51.21  | 10.30.50.21  | 3016.11    | u41-u44
+ s22-nomad             | up              | nomad          | nomad-client | SYS-741GE-TNRT | ??              | 10.30.51.22  | 10.30.50.22  | 3016.11    | u37-u40
+ s78-t38-sut1          | up              | t38            | 3n-icx       | SYS-740GP-TNRT | C7470KL03P50450 | 10.30.51.78  | 10.30.50.78  | 3016.11    | u31-u34
+ s79-t38-sut2          | up              | t38            | 3n-icx       | SYS-740GP-TNRT | C7470KL07P50297 | 10.30.51.79  | 10.30.50.79  | 3016.11    | u27-u30
+ s80-t38-tg1           | up              | t38            | 3n-icx       | SYS-740GP-TNRT | C7470KL03P50454 | 10.30.51.80  | 10.30.50.80  | 3016.11    | u23-u26
+ s30-t15-sut1          | up              | nomad          | nomad-client | SYS-741GE-TNRT | ??              | 10.30.51.30  | 10.30.50.30  | 3016.11    | u19-u22
+ s31-t16-sut1          | up              | nomad          | nomad-client | SYS-741GE-TNRT | ??              | 10.30.51.31  | 10.30.50.31  | 3016.11    | u15-u18
+ s56-t14-sut1          | up              | t14            | 1n-tx2       | ThunderX2-9980 | N/A             | 10.30.51.71  | 10.30.50.71  | 3016.11    | u13-u14
+ s55-t13-sut1          | up              | t13            | 1n-tx2       | ThunderX2-9980 | N/A             | 10.30.51.70  | 10.30.50.70  | 3016.11    | u11-u12
+ s62-t34-sut1          | up              | t34            | 3n-alt       | WIWYNN         | 04000059N0SC    | 10.30.51.72  | 10.30.50.72  | 3016.11    | u9-u10
+ s63-t34-sut2          | up              | t34            | 3n-alt       | WIWYNN         | 0390003EN0SC    | 10.30.51.73  | 10.30.50.73  | 3016.11    | u7-u8
+ s64-t34-tg1           | up              | t34            | 3n-alt       | SYS-740GP-TNRT | C7470KK40P50249 | 10.30.51.74  | 10.30.50.74  | 3016.11    | u3-u6
 
 ### Rack YUL1-12 (3016.12)
 
diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.21.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.21.yaml
new file mode 100644 (file)
index 0000000..fbc2071
--- /dev/null
@@ -0,0 +1,87 @@
+---
+# file: host_vars/10.30.51.21.yaml
+
+hostname: "s21-nomad"
+inventory_ipmi_hostname: "10.30.50.21"
+
+# User management.
+users:
+  - username: localadmin
+    groups: [adm, sudo]
+    password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1"
+    ssh_key:
+      - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKM1D1kkGX1l7fSma1MOgw2yLI7zJHwTCcfVROQ4hh7r peter.mikus@protonmail.ch"
+      - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDCG7Shfr7ASXXwpazYDGFzChGl7i4HgY9n81GTwc17B/O19IiJdrxFcBQH19HMuTFtWU4d9bQ6xgzz2pojBN13L3pEsStCHovDlEYbvfRxI2l2hcAAop1j1E4izHAS9IvCoy1AgWqBg6tsfP5mzOwGbSbcdI9ADRKIHgDTVbHg9SqKed27bNOLU0u3/5ra2Oar/tVIW37geEqFV/nHIBZ03Y/mszvXP/t/hP5bgJIGJKkzTjLd1aqEcuGmubW+wTQnVnrhFB87dw91gPj6BVyV0+7Vt1wrvPKqP2sGJhojAMSrBQBySnlrYgEg00bwDgNGFevatfG9+nTId+nhoKBkXya3MjSp4HwrGqGcij3/h7ovlau3/iRhkqlSeqenaNm4zKTAXRTnb60j2WKa6im0zdqJX98anp4mhjE8xHhmmfZV3vRT8mtY4hF/lg79miXFHpWH97bZV6r/D9qj1HWI/laJfOC5MOJdRcLETwtsUNMHeHtVnY3yu0XFYNcl2Xwajtnz3waF7vn1QHvAM9p878+JrO/IEUqEc0yZWVvlhZ7Krx1pS+APoMy8097MYDGFzFTkYSstKLGbgm/S7dEiWuSVxmMUxW7JYO3gHrQ3f1EvAYh2UFdWy76Dzr5II9UpVwOwF+HL/Oy8Sk77bPaK+tn7Kh4Tx7WWE0+EOAgElQ== ayourtch@ayourtch-lnx"
+      - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes"
+      - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD0GXoSGDZ95TE/TT3kf4ZS3Tiso0UO3MVhqAqZ/F6LOvLyqnMPfhxPz1XpHsDikxvKgwhZvdBm1dWbKkPsD7jtw0PGphQO8QuEwBd2ZMvxZ4Qg6lNDUl5x3zRO2nkbKpcqnOugGLTtXP+yfw/wfQ2HNFLDP9gE90xegsPT83PmRUntQlhbS3ByHcCSUScC+Y1heZXuoKNyrmUY46lxkKsNfhx8sQKo0YhB21atV/mcAQbAaO2LggmaQYGtWizqPNGWIRsi9W8ZYnKva67c3Pbv/TTfaqmrNCwOXJ8G9oL+/3MlKbl3b5mYlTs2a/e9yVgMNwUZVX7aiHpgPgaVjL6j swdev@BradyBunch-MacMini.local"
+      - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmo2YP4t/f58AAYH72rOe5VjYjk3wb/GY3aJEd5s9WspLhnsY0xBL67C+4kMq6VmQQvg0cUB8RJSFX1tUXMHCorVWnXNHkYomx0MCPcPUpVHuRyEqczYJ2pzgZsPzoEfw9E5hTrAiGzYFNAS/NOSavapVMDZxa1zsX7+sWQvJfmVtJWpcTQb0TkoWXRsy0YM3PYfUbYvK7lR3lGwyhwCcJn0WwWGreFB7bIok0poqqX5BgJ/okZuvC8II+UfuGoBHNhg49oqST1JlNi9gRqDNmLWkHRaneWZiF+Y2hdN3PRCdkt1x3eU0R+cdi5kPKslb6P0lsjOEA7fDLlq1+T2z1"
+sshd_disable_password_login: true
+
+# Nomad settings.
+nomad_version: "1.6.1"
+nomad_certificates:
+  - src: "{{ file_nomad_ca_pem }}"
+    dest: "{{ nomad_tls_ca_file }}"
+  - src: "{{ file_nomad_server_pem }}"
+    dest: "{{ nomad_tls_cert_file }}"
+  - src: "{{ file_nomad_server_key_pem }}"
+    dest: "{{ nomad_tls_key_file }}"
+nomad_datacenter: "yul1"
+nomad_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ=="
+nomad_node_name: "{{ hostname }}-{{ ansible_architecture }}"
+nomad_node_role: "client"
+nomad_node_class: "builder"
+nomad_options:
+  driver.raw_exec.enable: 1
+  docker.cleanup.image: true
+  docker.privileged.enabled: true
+  docker.volumes.enabled: true
+  driver.whitelist: "docker,raw_exec,exec"
+  fingerprint.network.disallow_link_local: true
+nomad_service_mgr: "systemd"
+nomad_consul_use_ssl: false
+nomad_use_tls: false
+nomad_tls_http: false
+nomad_tls_rpc: false
+nomad_use_vault: false
+nomad_retry_servers:
+  - "10.30.51.26"
+  - "10.30.51.24"
+  - "10.30.51.25"
+nomad_servers:
+  - "10.30.51.26:4647"
+  - "10.30.51.24:4647"
+  - "10.30.51.25:4647"
+
+# Consul settings.
+nomad_use_consul: true
+consul_certificates:
+  - src: "{{ file_consul_ca_pem }}"
+    dest: "{{ consul_ca_file }}"
+  - src: "{{ file_consul_server_0_pem }}"
+    dest: "{{ consul_cert_file }}"
+  - src: "{{ file_consul_server_0_key_pem }}"
+    dest: "{{ consul_key_file }}"
+consul_verify_incoming: false
+consul_verify_outgoing: false
+consul_vefify_server_hostname: false
+consul_allow_tls: false
+consul_datacenter: "yul1"
+consul_node_role: "client"
+consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ=="
+consul_node_name: "{{ ansible_hostname }}"
+consul_retry_join: true
+consul_retry_servers:
+  - "10.30.51.26"
+  - "10.30.51.24"
+  - "10.30.51.25"
+consul_service_mgr: "systemd"
+
+# Vault settings.
+vault_version: "1.13.1"
+
+# Docker daemon settings.
+docker_daemon:
+  dns: ["172.17.0.1"]
+  dns-opts: []
+  dns-search: ["{{ansible_hostname}}"]
diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.22.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.22.yaml
new file mode 100644 (file)
index 0000000..8d8f807
--- /dev/null
@@ -0,0 +1,87 @@
+---
+# file: host_vars/10.30.51.22.yaml
+
+hostname: "s22-nomad"
+inventory_ipmi_hostname: "10.30.50.22"
+
+# User management.
+users:
+  - username: localadmin
+    groups: [adm, sudo]
+    password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1"
+    ssh_key:
+      - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKM1D1kkGX1l7fSma1MOgw2yLI7zJHwTCcfVROQ4hh7r peter.mikus@protonmail.ch"
+      - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDCG7Shfr7ASXXwpazYDGFzChGl7i4HgY9n81GTwc17B/O19IiJdrxFcBQH19HMuTFtWU4d9bQ6xgzz2pojBN13L3pEsStCHovDlEYbvfRxI2l2hcAAop1j1E4izHAS9IvCoy1AgWqBg6tsfP5mzOwGbSbcdI9ADRKIHgDTVbHg9SqKed27bNOLU0u3/5ra2Oar/tVIW37geEqFV/nHIBZ03Y/mszvXP/t/hP5bgJIGJKkzTjLd1aqEcuGmubW+wTQnVnrhFB87dw91gPj6BVyV0+7Vt1wrvPKqP2sGJhojAMSrBQBySnlrYgEg00bwDgNGFevatfG9+nTId+nhoKBkXya3MjSp4HwrGqGcij3/h7ovlau3/iRhkqlSeqenaNm4zKTAXRTnb60j2WKa6im0zdqJX98anp4mhjE8xHhmmfZV3vRT8mtY4hF/lg79miXFHpWH97bZV6r/D9qj1HWI/laJfOC5MOJdRcLETwtsUNMHeHtVnY3yu0XFYNcl2Xwajtnz3waF7vn1QHvAM9p878+JrO/IEUqEc0yZWVvlhZ7Krx1pS+APoMy8097MYDGFzFTkYSstKLGbgm/S7dEiWuSVxmMUxW7JYO3gHrQ3f1EvAYh2UFdWy76Dzr5II9UpVwOwF+HL/Oy8Sk77bPaK+tn7Kh4Tx7WWE0+EOAgElQ== ayourtch@ayourtch-lnx"
+      - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes"
+      - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD0GXoSGDZ95TE/TT3kf4ZS3Tiso0UO3MVhqAqZ/F6LOvLyqnMPfhxPz1XpHsDikxvKgwhZvdBm1dWbKkPsD7jtw0PGphQO8QuEwBd2ZMvxZ4Qg6lNDUl5x3zRO2nkbKpcqnOugGLTtXP+yfw/wfQ2HNFLDP9gE90xegsPT83PmRUntQlhbS3ByHcCSUScC+Y1heZXuoKNyrmUY46lxkKsNfhx8sQKo0YhB21atV/mcAQbAaO2LggmaQYGtWizqPNGWIRsi9W8ZYnKva67c3Pbv/TTfaqmrNCwOXJ8G9oL+/3MlKbl3b5mYlTs2a/e9yVgMNwUZVX7aiHpgPgaVjL6j swdev@BradyBunch-MacMini.local"
+      - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmo2YP4t/f58AAYH72rOe5VjYjk3wb/GY3aJEd5s9WspLhnsY0xBL67C+4kMq6VmQQvg0cUB8RJSFX1tUXMHCorVWnXNHkYomx0MCPcPUpVHuRyEqczYJ2pzgZsPzoEfw9E5hTrAiGzYFNAS/NOSavapVMDZxa1zsX7+sWQvJfmVtJWpcTQb0TkoWXRsy0YM3PYfUbYvK7lR3lGwyhwCcJn0WwWGreFB7bIok0poqqX5BgJ/okZuvC8II+UfuGoBHNhg49oqST1JlNi9gRqDNmLWkHRaneWZiF+Y2hdN3PRCdkt1x3eU0R+cdi5kPKslb6P0lsjOEA7fDLlq1+T2z1"
+sshd_disable_password_login: true
+
+# Nomad settings.
+nomad_version: "1.6.1"
+nomad_certificates:
+  - src: "{{ file_nomad_ca_pem }}"
+    dest: "{{ nomad_tls_ca_file }}"
+  - src: "{{ file_nomad_server_pem }}"
+    dest: "{{ nomad_tls_cert_file }}"
+  - src: "{{ file_nomad_server_key_pem }}"
+    dest: "{{ nomad_tls_key_file }}"
+nomad_datacenter: "yul1"
+nomad_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ=="
+nomad_node_name: "{{ hostname }}-{{ ansible_architecture }}"
+nomad_node_role: "client"
+nomad_node_class: "builder"
+nomad_options:
+  driver.raw_exec.enable: 1
+  docker.cleanup.image: true
+  docker.privileged.enabled: true
+  docker.volumes.enabled: true
+  driver.whitelist: "docker,raw_exec,exec"
+  fingerprint.network.disallow_link_local: true
+nomad_service_mgr: "systemd"
+nomad_consul_use_ssl: false
+nomad_use_tls: false
+nomad_tls_http: false
+nomad_tls_rpc: false
+nomad_use_vault: false
+nomad_retry_servers:
+  - "10.30.51.23"
+  - "10.30.51.26"
+  - "10.30.51.25"
+nomad_servers:
+  - "10.30.51.23:4647"
+  - "10.30.51.26:4647"
+  - "10.30.51.25:4647"
+
+# Consul settigs.
+nomad_use_consul: true
+consul_certificates:
+  - src: "{{ file_consul_ca_pem }}"
+    dest: "{{ consul_ca_file }}"
+  - src: "{{ file_consul_server_1_pem }}"
+    dest: "{{ consul_cert_file }}"
+  - src: "{{ file_consul_server_1_key_pem }}"
+    dest: "{{ consul_key_file }}"
+consul_verify_incoming: false
+consul_verify_outgoing: false
+consul_vefify_server_hostname: false
+consul_allow_tls: false
+consul_datacenter: "yul1"
+consul_node_role: "client"
+consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ=="
+consul_node_name: "{{ ansible_hostname }}"
+consul_retry_join: true
+consul_retry_servers:
+  - "10.30.51.23"
+  - "10.30.51.26"
+  - "10.30.51.25"
+consul_service_mgr: "systemd"
+
+# Vault settings.
+vault_version: "1.13.1"
+
+# Docker daemon settings.
+docker_daemon:
+  dns: ["172.17.0.1"]
+  dns-opts: []
+  dns-search: ["{{ansible_hostname}}"]
diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.30.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.30.yaml
new file mode 100644 (file)
index 0000000..8c2e764
--- /dev/null
@@ -0,0 +1,96 @@
+---
+# file: host_vars/10.30.51.30.yaml
+
+hostname: "s30-t15-sut1"
+grub:
+  hugepagesz: "2M"
+  hugepages: 32768
+  iommu: "on"
+  vfio.enable_unsafe_noiommu_mode: 1
+inventory_ipmi_hostname: "10.30.50.30"
+vfs_data_file: "csit-initialize-vfs-spr.sh"
+cpu_microarchitecture: "sapphirerapids"
+
+intel_800_matrix: "dpdk23.07"
+
+# User management.
+users:
+  - username: localadmin
+    groups: [adm, sudo]
+    password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1"
+    ssh_key:
+      - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKM1D1kkGX1l7fSma1MOgw2yLI7zJHwTCcfVROQ4hh7r peter.mikus@protonmail.ch"
+      - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDCG7Shfr7ASXXwpazYDGFzChGl7i4HgY9n81GTwc17B/O19IiJdrxFcBQH19HMuTFtWU4d9bQ6xgzz2pojBN13L3pEsStCHovDlEYbvfRxI2l2hcAAop1j1E4izHAS9IvCoy1AgWqBg6tsfP5mzOwGbSbcdI9ADRKIHgDTVbHg9SqKed27bNOLU0u3/5ra2Oar/tVIW37geEqFV/nHIBZ03Y/mszvXP/t/hP5bgJIGJKkzTjLd1aqEcuGmubW+wTQnVnrhFB87dw91gPj6BVyV0+7Vt1wrvPKqP2sGJhojAMSrBQBySnlrYgEg00bwDgNGFevatfG9+nTId+nhoKBkXya3MjSp4HwrGqGcij3/h7ovlau3/iRhkqlSeqenaNm4zKTAXRTnb60j2WKa6im0zdqJX98anp4mhjE8xHhmmfZV3vRT8mtY4hF/lg79miXFHpWH97bZV6r/D9qj1HWI/laJfOC5MOJdRcLETwtsUNMHeHtVnY3yu0XFYNcl2Xwajtnz3waF7vn1QHvAM9p878+JrO/IEUqEc0yZWVvlhZ7Krx1pS+APoMy8097MYDGFzFTkYSstKLGbgm/S7dEiWuSVxmMUxW7JYO3gHrQ3f1EvAYh2UFdWy76Dzr5II9UpVwOwF+HL/Oy8Sk77bPaK+tn7Kh4Tx7WWE0+EOAgElQ== ayourtch@ayourtch-lnx"
+      - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes"
+      - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD0GXoSGDZ95TE/TT3kf4ZS3Tiso0UO3MVhqAqZ/F6LOvLyqnMPfhxPz1XpHsDikxvKgwhZvdBm1dWbKkPsD7jtw0PGphQO8QuEwBd2ZMvxZ4Qg6lNDUl5x3zRO2nkbKpcqnOugGLTtXP+yfw/wfQ2HNFLDP9gE90xegsPT83PmRUntQlhbS3ByHcCSUScC+Y1heZXuoKNyrmUY46lxkKsNfhx8sQKo0YhB21atV/mcAQbAaO2LggmaQYGtWizqPNGWIRsi9W8ZYnKva67c3Pbv/TTfaqmrNCwOXJ8G9oL+/3MlKbl3b5mYlTs2a/e9yVgMNwUZVX7aiHpgPgaVjL6j swdev@BradyBunch-MacMini.local"
+      - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmo2YP4t/f58AAYH72rOe5VjYjk3wb/GY3aJEd5s9WspLhnsY0xBL67C+4kMq6VmQQvg0cUB8RJSFX1tUXMHCorVWnXNHkYomx0MCPcPUpVHuRyEqczYJ2pzgZsPzoEfw9E5hTrAiGzYFNAS/NOSavapVMDZxa1zsX7+sWQvJfmVtJWpcTQb0TkoWXRsy0YM3PYfUbYvK7lR3lGwyhwCcJn0WwWGreFB7bIok0poqqX5BgJ/okZuvC8II+UfuGoBHNhg49oqST1JlNi9gRqDNmLWkHRaneWZiF+Y2hdN3PRCdkt1x3eU0R+cdi5kPKslb6P0lsjOEA7fDLlq1+T2z1"
+sshd_disable_password_login: true
+
+# Nomad settings.
+nomad_version: "1.6.1"
+nomad_certificates:
+  - src: "{{ file_nomad_ca_pem }}"
+    dest: "{{ nomad_tls_ca_file }}"
+  - src: "{{ file_nomad_client_pem }}"
+    dest: "{{ nomad_tls_cert_file }}"
+  - src: "{{ file_nomad_client_key_pem }}"
+    dest: "{{ nomad_tls_key_file }}"
+nomad_datacenter: "yul1"
+nomad_node_name: "{{ hostname }}-{{ ansible_architecture }}"
+nomad_node_role: "client"
+nomad_node_class: "csit"
+nomad_options:
+  driver.raw_exec.enable: 1
+  docker.cleanup.image: false
+  docker.privileged.enabled: true
+  docker.volumes.enabled: true
+  driver.whitelist: "docker,raw_exec,exec"
+nomad_service_mgr: "systemd"
+nomad_consul_use_ssl: false
+nomad_use_tls: false
+nomad_tls_http: false
+nomad_tls_rpc: false
+nomad_use_vault: false
+nomad_retry_servers:
+  - "10.30.51.23"
+  - "10.30.51.24"
+  - "10.30.51.25"
+nomad_servers:
+  - "10.30.51.23:4647"
+  - "10.30.51.24:4647"
+  - "10.30.51.25:4647"
+
+# Consul settigs.
+nomad_use_consul: true
+consul_certificates:
+  - src: "{{ file_consul_agent_ca_pem }}"
+    dest: "{{ consul_ca_file }}"
+  - src: "{{ file_consul_server_0_pem }}"
+    dest: "{{ consul_cert_file }}"
+  - src: "{{ file_consul_server_0_key_pem }}"
+    dest: "{{ consul_key_file }}"
+consul_verify_incoming: false
+consul_verify_outgoing: false
+consul_vefify_server_hostname: false
+consul_allow_tls: false
+consul_datacenter: "yul1"
+consul_node_role: "client"
+consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ=="
+consul_node_name: "{{ ansible_hostname }}"
+consul_retry_join: true
+consul_retry_servers:
+  - "10.30.51.23"
+  - "10.30.51.24"
+  - "10.30.51.25"
+consul_service_mgr: "systemd"
+
+# Vault settings.
+vault_version: "1.13.1"
+
+# Docker settings.
+docker_daemon:
+  default-shm-size: "1073741824"
+  dns: ["172.17.0.1"]
+  dns-opts: []
+  dns-search: ["{{ ansible_hostname }}"]
+  host: ["172.17.0.1:/var/run/docker.sock"]
diff --git a/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.31.yaml b/fdio.infra.ansible/inventories/lf_inventory/host_vars/10.30.51.31.yaml
new file mode 100644 (file)
index 0000000..51d7236
--- /dev/null
@@ -0,0 +1,96 @@
+---
+# file: host_vars/10.30.51.31.yaml
+
+hostname: "s31-t16-sut1"
+grub:
+  hugepagesz: "2M"
+  hugepages: 32768
+  iommu: "on"
+  vfio.enable_unsafe_noiommu_mode: 1
+inventory_ipmi_hostname: "10.30.50.31"
+vfs_data_file: "csit-initialize-vfs-spr.sh"
+cpu_microarchitecture: "sapphirerapids"
+
+intel_800_matrix: "dpdk23.07"
+
+# User management.
+users:
+  - username: localadmin
+    groups: [adm, sudo]
+    password: "$6$FIsbVDQR$5D0wgufOd2FtnmOiRNsGlgg6Loh.0x3dWSj72DSQnqisSyE9DROfgSgA6s0yxDwz4Jd5SRTXiTKuRYuSQ5POI1"
+    ssh_key:
+      - "ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIKM1D1kkGX1l7fSma1MOgw2yLI7zJHwTCcfVROQ4hh7r peter.mikus@protonmail.ch"
+      - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDCG7Shfr7ASXXwpazYDGFzChGl7i4HgY9n81GTwc17B/O19IiJdrxFcBQH19HMuTFtWU4d9bQ6xgzz2pojBN13L3pEsStCHovDlEYbvfRxI2l2hcAAop1j1E4izHAS9IvCoy1AgWqBg6tsfP5mzOwGbSbcdI9ADRKIHgDTVbHg9SqKed27bNOLU0u3/5ra2Oar/tVIW37geEqFV/nHIBZ03Y/mszvXP/t/hP5bgJIGJKkzTjLd1aqEcuGmubW+wTQnVnrhFB87dw91gPj6BVyV0+7Vt1wrvPKqP2sGJhojAMSrBQBySnlrYgEg00bwDgNGFevatfG9+nTId+nhoKBkXya3MjSp4HwrGqGcij3/h7ovlau3/iRhkqlSeqenaNm4zKTAXRTnb60j2WKa6im0zdqJX98anp4mhjE8xHhmmfZV3vRT8mtY4hF/lg79miXFHpWH97bZV6r/D9qj1HWI/laJfOC5MOJdRcLETwtsUNMHeHtVnY3yu0XFYNcl2Xwajtnz3waF7vn1QHvAM9p878+JrO/IEUqEc0yZWVvlhZ7Krx1pS+APoMy8097MYDGFzFTkYSstKLGbgm/S7dEiWuSVxmMUxW7JYO3gHrQ3f1EvAYh2UFdWy76Dzr5II9UpVwOwF+HL/Oy8Sk77bPaK+tn7Kh4Tx7WWE0+EOAgElQ== ayourtch@ayourtch-lnx"
+      - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCXUbbhesOpvPF+KI8nq4TXvEO/Un1aU/ehZ9clCyw9C40xjDkX2BlcX8WeHxFHe7fjFaCd07Vg73rn/3M9uNDnDxvjH1GQ0twvi3iBTO4PkHBBfGF9qnE8MYzno6FvlsVKLuUuPbfm8kbOQ+ZDfdXq6gdtXh0hSYYkqC1heNPCNsqaakkB99Edyle+Ot0V7cpW+Yo2wo98KuX/cgUEhVoA8QnNVE7zaWcjSXBZEteoA4gLpAbV6p67/d6H/2ykHTidBViYTEsHco56tJoA4nTPuAupDOLBcWXgF5TAN6z1aCn2JA1DDfniLakgrZ5oVj2qHhUmbxQAtnKQfHADjqzV jlinkes@jlinkes"
+      - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD0GXoSGDZ95TE/TT3kf4ZS3Tiso0UO3MVhqAqZ/F6LOvLyqnMPfhxPz1XpHsDikxvKgwhZvdBm1dWbKkPsD7jtw0PGphQO8QuEwBd2ZMvxZ4Qg6lNDUl5x3zRO2nkbKpcqnOugGLTtXP+yfw/wfQ2HNFLDP9gE90xegsPT83PmRUntQlhbS3ByHcCSUScC+Y1heZXuoKNyrmUY46lxkKsNfhx8sQKo0YhB21atV/mcAQbAaO2LggmaQYGtWizqPNGWIRsi9W8ZYnKva67c3Pbv/TTfaqmrNCwOXJ8G9oL+/3MlKbl3b5mYlTs2a/e9yVgMNwUZVX7aiHpgPgaVjL6j swdev@BradyBunch-MacMini.local"
+      - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCmo2YP4t/f58AAYH72rOe5VjYjk3wb/GY3aJEd5s9WspLhnsY0xBL67C+4kMq6VmQQvg0cUB8RJSFX1tUXMHCorVWnXNHkYomx0MCPcPUpVHuRyEqczYJ2pzgZsPzoEfw9E5hTrAiGzYFNAS/NOSavapVMDZxa1zsX7+sWQvJfmVtJWpcTQb0TkoWXRsy0YM3PYfUbYvK7lR3lGwyhwCcJn0WwWGreFB7bIok0poqqX5BgJ/okZuvC8II+UfuGoBHNhg49oqST1JlNi9gRqDNmLWkHRaneWZiF+Y2hdN3PRCdkt1x3eU0R+cdi5kPKslb6P0lsjOEA7fDLlq1+T2z1"
+sshd_disable_password_login: true
+
+# Nomad settings.
+nomad_version: "1.6.1"
+nomad_certificates:
+  - src: "{{ file_nomad_ca_pem }}"
+    dest: "{{ nomad_tls_ca_file }}"
+  - src: "{{ file_nomad_client_pem }}"
+    dest: "{{ nomad_tls_cert_file }}"
+  - src: "{{ file_nomad_client_key_pem }}"
+    dest: "{{ nomad_tls_key_file }}"
+nomad_datacenter: "yul1"
+nomad_node_name: "{{ hostname }}-{{ ansible_architecture }}"
+nomad_node_role: "client"
+nomad_node_class: "csit"
+nomad_options:
+  driver.raw_exec.enable: 1
+  docker.cleanup.image: false
+  docker.privileged.enabled: true
+  docker.volumes.enabled: true
+  driver.whitelist: "docker,raw_exec,exec"
+nomad_service_mgr: "systemd"
+nomad_consul_use_ssl: false
+nomad_use_tls: false
+nomad_tls_http: false
+nomad_tls_rpc: false
+nomad_use_vault: false
+nomad_retry_servers:
+  - "10.30.51.23"
+  - "10.30.51.24"
+  - "10.30.51.25"
+nomad_servers:
+  - "10.30.51.23:4647"
+  - "10.30.51.24:4647"
+  - "10.30.51.25:4647"
+
+# Consul settigs.
+nomad_use_consul: true
+consul_certificates:
+  - src: "{{ file_consul_agent_ca_pem }}"
+    dest: "{{ consul_ca_file }}"
+  - src: "{{ file_consul_server_0_pem }}"
+    dest: "{{ consul_cert_file }}"
+  - src: "{{ file_consul_server_0_key_pem }}"
+    dest: "{{ consul_key_file }}"
+consul_verify_incoming: false
+consul_verify_outgoing: false
+consul_vefify_server_hostname: false
+consul_allow_tls: false
+consul_datacenter: "yul1"
+consul_node_role: "client"
+consul_encrypt: "Y4T+5JGx1C3l2NFBBvkTWQ=="
+consul_node_name: "{{ ansible_hostname }}"
+consul_retry_join: true
+consul_retry_servers:
+  - "10.30.51.23"
+  - "10.30.51.24"
+  - "10.30.51.25"
+consul_service_mgr: "systemd"
+
+# Vault settings.
+vault_version: "1.13.1"
+
+# Docker settings.
+docker_daemon:
+  default-shm-size: "1073741824"
+  dns: ["172.17.0.1"]
+  dns-opts: []
+  dns-search: ["{{ ansible_hostname }}"]
+  host: ["172.17.0.1:/var/run/docker.sock"]
index b43fdad..f7141f9 100644 (file)
@@ -46,12 +46,16 @@ all:
         10.30.51.69: #s27-t211-sut1 - thunderx2 9975
     vpp_device:
       hosts:
+        10.30.51.30: #s30-t15-sut1 - sapphirerapids
+        10.30.51.31: #s31-t16-sut1 - sapphirerapids
         10.30.51.50: #s1-t11-sut1 - skylake
         10.30.51.51: #s2-t12-sut1 - skylake
         10.30.51.70: #s55-t13-sut1 - thunderx2 9980
         10.30.51.71: #s56-t14-sut1 - thunderx2 9980
     nomad:
       hosts:
+        10.30.51.21: #s21-nomad - sapphirerapids
+        10.30.51.22: #s22-nomad - sapphirerapids
         10.30.51.23: #s23-nomad - skylake
         10.30.51.24: #s24-nomad - skylake
         10.30.51.25: #s25-nomad - skylake
index 9816d70..377b910 100644 (file)
@@ -1,5 +1,5 @@
 ---
-# file: roles/vpp_device/defaults/main.yaml
+# file: defaults/main.yaml
 
 iavf_target_dir: "/opt"
 iavf_version: "4.1.1"
diff --git a/fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs-spr.sh b/fdio.infra.ansible/roles/vpp_device/files/csit-initialize-vfs-spr.sh
new file mode 100644 (file)
index 0000000..25a9f5d
--- /dev/null
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+
+# Copyright (c) 2023 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Add Intel Corporation Ethernet Controller 10G X550T to blacklist.
+PCI_BLACKLIST=($(lspci -Dmmd ':1563:0200' | cut -f1 -d' '))
+# Add Intel Corporation Ethernet Controller E810-C for 100GbE QSFP to whitelist.
+PCI_WHITELIST+=($(lspci -Dmmd ':1592:0200' | cut -f1 -d' '))
+
+# See http://pci-ids.ucw.cz/v2.2/pci.ids for more info.
+
+declare -A PF_INDICES
+# Intel NICs
+PF_INDICES["0000:86:00.0"]=0
+PF_INDICES["0000:af:00.0"]=0
index 29342ae..3ac80cc 100644 (file)
@@ -1,21 +1,21 @@
 ---
-# file: roles/vpp_device/handlers/main.yaml
+# file: handlers/main.yaml
 
-- name: Start csit-initialize-vfs.service
-  systemd:
+- name: "Start csit-initialize-vfs.service"
+  ansible.builtin.systemd:
     enabled: true
-    state: started
-    name: csit-initialize-vfs.service
+    state: "started"
+    name: "csit-initialize-vfs.service"
   tags:
     - start-vf-service
 
-- name: Update GRUB
-  command: update-grub
+- name: "Update GRUB"
+  ansible.builtin.command: "update-grub"
   tags:
     - update-grub
 
-- name: Reboot server
-  reboot:
+- name: "Reboot server"
+  ansible.builtin.reboot:
     reboot_timeout: 3600
   tags:
     - reboot-server
index ec20d2a..88d4ddb 100644 (file)
@@ -1,15 +1,15 @@
 ---
-# file: roles/vpp_device/tasks/main.yaml
+# file: tasks/main.yaml
 
-- name: ThunderX2 Kernel Modules Config
-  import_tasks: thunderx2.yaml
+- name: "ThunderX2 Kernel Modules Config"
+  import_tasks: "thunderx2.yaml"
   when:
     - cpu_microarchitecture == "thunderx2"
   tags:
     - conf-kernel-modules
 
-- name: Disable IPv6 Router Advertisement
-  sysctl:
+- name: "Disable IPv6 Router Advertisement"
+  ansible.builtin.sysctl:
     name: "net.ipv6.conf.default.accept_ra"
     value: "0"
     state: "present"
@@ -18,8 +18,8 @@
   tags:
     - set-sysctl
 
-- name: Disable IPv6 Router Advertisement
-  sysctl:
+- name: "Disable IPv6 Router Advertisement"
+  ansible.builtin.sysctl:
     name: "net.ipv6.conf.all.accept_ra"
     value: "0"
     state: "present"
@@ -28,8 +28,8 @@
   tags:
     - set-sysctl
 
-- name: Disable IPv6 MLDv1 interval
-  sysctl:
+- name: "Disable IPv6 MLDv1 interval"
+  ansible.builtin.sysctl:
     name: "net.ipv6.conf.all.mldv1_unsolicited_report_interval"
     value: "0"
     state: "present"
@@ -38,8 +38,8 @@
   tags:
     - set-sysctl
 
-- name: Disable IPv6 MLDv2 interval
-  sysctl:
+- name: "Disable IPv6 MLDv2 interval"
+  ansible.builtin.sysctl:
     name: "net.ipv6.conf.all.mldv2_unsolicited_report_interval"
     value: "0"
     state: "present"
@@ -48,8 +48,8 @@
   tags:
     - set-sysctl
 
-- name: Disable IPv6 Autoconf
-  sysctl:
+- name: "Disable IPv6 Autoconf"
+  ansible.builtin.sysctl:
     name: "net.ipv6.conf.all.autoconf"
     value: "0"
     state: "present"
@@ -58,8 +58,8 @@
   tags:
     - set-sysctl
 
-- name: Disable IPv6 MC Forwarding
-  sysctl:
+- name: "Disable IPv6 MC Forwarding"
+  ansible.builtin.sysctl:
     name: "net.ipv6.conf.all.mc_forwarding"
     value: "0"
     state: "present"
@@ -68,8 +68,8 @@
   tags:
     - set-sysctl
 
-- name: Copy csit-initialize-vfs.sh
-  copy:
+- name: "Copy csit-initialize-vfs.sh"
+  ansible.builtin.copy:
     src: "files/csit-initialize-vfs.sh"
     dest: "/usr/local/bin/"
     owner: "root"
@@ -78,8 +78,8 @@
   tags:
     - copy-vf-script
 
-- name: Copy csit-initialize-vfs-data.sh
-  copy:
+- name: "Copy csit-initialize-vfs-data.sh"
+  ansible.builtin.copy:
     src: "files/{{ vfs_data_file }}"
     dest: "/usr/local/bin/csit-initialize-vfs-data.sh"
     owner: "root"
@@ -89,8 +89,8 @@
   when:
     - vfs_data_file is defined
 
-- name: Copy Default csit-initialize-vfs-data.sh
-  copy:
+- name: "Copy Default csit-initialize-vfs-data.sh"
+  ansible.builtin.copy:
     src: "files/csit-initialize-vfs-default.sh"
     dest: "/usr/local/bin/csit-initialize-vfs-data.sh"
     owner: "root"
   when:
     - vfs_data_file is not defined
 
-- name: Start csit-initialize-vfs.service
-  copy:
+- name: "Start csit-initialize-vfs.service"
+  ansible.builtin.copy:
     src: "files/csit-initialize-vfs.service"
     dest: "/etc/systemd/system/"
     owner: "root"
   tags:
     - start-vf-service
 
-- meta: flush_handlers
+- ansible.builtin.meta: "flush_handlers"
 
-- name: Set Hugepages In GRUB
-  lineinfile:
+- name: "Set Hugepages In GRUB"
+  ansible.builtin.lineinfile:
     path: "/etc/default/grub"
     state: "present"
     regexp: "^GRUB_CMDLINE_LINUX="
index a6d6cd0..562a59a 100755 (executable)
@@ -37,12 +37,16 @@ dhcp-option=yul1_2,option:dns-server,1.1.1.1
 dhcp-option=yul1_2,option:dns-server,8.8.8.8
 
 # Hosts DHCP settings yul1_1 subnet.
+dhcp-host=?,10.30.51.21,s21-nomad,infinite # eno1
+dhcp-host=?,10.30.51.22,s22-nomad,infinite # eno1
 dhcp-host=ac:1f:6b:5a:6a:d4,10.30.51.23,s23-nomad,infinite # eno1
 dhcp-host=ac:1f:6b:5a:6a:b4,10.30.51.24,s24-nomad,infinite # eno1
 dhcp-host=ac:1f:6b:5a:6b:08,10.30.51.25,s25-nomad,infinite # eno1
 dhcp-host=ac:1f:6b:ac:d9:2e,10.30.51.26,s26-nomad,infinite # eno1
 dhcp-host=ac:1f:6b:8b:55:d8,10.30.51.27,s27-nomad,infinite # eno1
 dhcp-host=ac:1f:6b:8a:8d:0a,10.30.51.28,s28-nomad,infinite # eno1
+dhcp-host=?,10.30.51.30,s30-t15-sut1,infinite # eno1
+dhcp-host=?,10.30.51.31,s31-t16-sut1,infinite # eno1
 dhcp-host=a0:a3:3b:c1:14:28,10.30.51.36,s17-t33-sut1,infinite # enahisic2i0
 dhcp-host=a0:a3:3b:c1:12:fa,10.30.51.37,s18-t33-sut2,infinite # enahisic2i0
 dhcp-host=40:8d:5c:e7:b2:6e,10.30.51.38,fdio-marvell-dev,infinite # enP2p1s0v0