CSIT-1437 Create portable host-setup document 21/17721/5
authorPeter Mikus <pmikus@cisco.com>
Wed, 20 Feb 2019 12:19:07 +0000 (12:19 +0000)
committerPeter Mikus <pmikus@cisco.com>
Fri, 22 Feb 2019 16:05:38 +0000 (16:05 +0000)
- Ansible simplified and optimalized
- Porting global variables into single file
- Rework the README.md
- Tested outside LF environemnt

Change-Id: I124cffa5c4510edf365e8b558da0c8c498f55d50
Signed-off-by: Peter Mikus <pmikus@cisco.com>
77 files changed:
README.md
resources/libraries/bash/function/setup.sh
resources/tools/testbed-setup/README.md [deleted file]
resources/tools/testbed-setup/README.rst [new file with mode: 0644]
resources/tools/testbed-setup/ansible/group_vars/all.yaml [deleted file]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/group_vars/all.yaml [new file with mode: 0644]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.16.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.16.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.17.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.17.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.18.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.18.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.20.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.20.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.21.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.21.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.22.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.22.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.24.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.24.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.25.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.25.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.26.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.26.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.28.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.28.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.29.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.29.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.30.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.30.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.36.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.36.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.37.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.37.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.44.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.44.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.45.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.45.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.46.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.46.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.47.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.47.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.48.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.48.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.49.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.49.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.50.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.50.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.51.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.51.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.52.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.52.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.53.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.53.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.54.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.54.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.55.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.55.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.56.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.56.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.57.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.57.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.58.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.58.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.59.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.59.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/host_vars/10.30.51.60.yaml [moved from resources/tools/testbed-setup/ansible/host_vars/10.30.51.60.yaml with 100% similarity]
resources/tools/testbed-setup/ansible/inventories/lf_inventory/hosts [moved from resources/tools/testbed-setup/ansible/production with 87% similarity]
resources/tools/testbed-setup/ansible/inventories/sample_inventory/group_vars/all.yaml [new file with mode: 0644]
resources/tools/testbed-setup/ansible/inventories/sample_inventory/host_vars/1.1.1.1.yaml [new file with mode: 0644]
resources/tools/testbed-setup/ansible/inventories/sample_inventory/hosts [new file with mode: 0644]
resources/tools/testbed-setup/ansible/roles/common/tasks/main.yaml
resources/tools/testbed-setup/ansible/roles/common/tasks/ubuntu.yaml
resources/tools/testbed-setup/ansible/roles/sut/tasks/ubuntu.yaml
resources/tools/testbed-setup/ansible/roles/tg/tasks/main.yaml
resources/tools/testbed-setup/ansible/roles/tg/tasks/ubuntu.yaml [new file with mode: 0644]
resources/tools/testbed-setup/ansible/roles/tg/tasks/ubuntu_x86_64.yaml [deleted file]
resources/tools/testbed-setup/ansible/roles/tg_sut/defaults/main.yaml [deleted file]
resources/tools/testbed-setup/ansible/roles/tg_sut/files/netplan_config
resources/tools/testbed-setup/ansible/roles/tg_sut/files/requirements.txt
resources/tools/testbed-setup/ansible/roles/tg_sut/tasks/ubuntu.yaml
resources/tools/testbed-setup/ansible/roles/tg_sut/templates/docker.service.j2 [deleted file]
resources/tools/testbed-setup/ansible/roles/tg_sut/templates/docker.service.proxy.http [new file with mode: 0644]
resources/tools/testbed-setup/ansible/roles/tg_sut/templates/docker.service.proxy.https [new file with mode: 0644]
resources/tools/testbed-setup/ansible/roles/virl/files/interfaces_virl [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/files/nova_os_ip.patch [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/files/requirements.txt [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/files/salt.b64 [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/files/salt/etc/salt/minion.d/testlocal.conf [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server.sls [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/configure.sls [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/files/exports [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/install.sls [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/files/sudoers_jenkins-in [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/files/sudoers_virl [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/files/ttyS0 [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/files/virl/id_rsa_virl [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/files/virl/id_rsa_virl.pub [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/files/virl/ifup [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/files/virl/ssh_environment [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/files/virl/virl-bootstrap-wrapper [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/tasks/02-virl-bootstrap.yaml [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/tasks/03-virl-post-install.yaml [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/tasks/04-disk-image.yaml [deleted file]
resources/tools/testbed-setup/ansible/roles/virl/tasks/main.yaml [deleted file]
resources/tools/testbed-setup/ansible/site.yaml
resources/tools/testbed-setup/ansible/virl.yaml [deleted file]

index f878980..27ae7cb 100644 (file)
--- a/README.md
+++ b/README.md
@@ -179,7 +179,7 @@ describing local VPP Device functional testing.
 
 ### Physical Testbed
 
-[Physical testbed preparation](resources/tools/testbed-setup/README.md)
+[Physical testbed preparation](resources/tools/testbed-setup/README.rst)
 documentation is describing PXE and Ansible setup process. All the software
 requirements for running Performance Teste are part of Ansible playbooks.
 
index 0b863aa..e481c53 100644 (file)
@@ -27,7 +27,7 @@ function ansible_host () {
     pushd "${TOOLS_DIR}"/testbed-setup/ansible || die "Pushd failed!"
     ssh-copy-id -o StrictHostKeyChecking=no testuser@"${HOST}"
     ansible-playbook --vault-id vault_pass --extra-vars '@vault.yml' \
-        --inventory production site.yaml --limit ${HOST} || {
+        --inventory lf_inventory site.yaml --limit ${HOST} || {
         die "Failed to ansible host!"
     }
     popd || die "Popd failed!"
diff --git a/resources/tools/testbed-setup/README.md b/resources/tools/testbed-setup/README.md
deleted file mode 100644 (file)
index b4f376b..0000000
+++ /dev/null
@@ -1,160 +0,0 @@
-# Testbed Setup
-
-## Introduction
-
-This directoctory contains the *high-level* process to set up a hardware
-machine as a CSIT testbed, either for use as a physical testbed host or
-as a VIRL server.
-
-Code in this directory is NOT executed as part of a regular CSIT test case
-but is stored here merely for archiving and documentation purposes.
-
-
-## Setting up a hardware host
-
-Documentation below is just bullet points and assumes and understanding
-of PXE boot and ansible.
-
-This process is specific for LF lab, and both examples given here as
-well as associated code, are based on the assumption that they are run
-in LF environment. If run elsewhere, changes will be required to IP addresses
-and other parameters.
-
-The process below assumes that there is a host used for boostrapping (referred
-to as "PXE boostrap server" below), and that the directory containig this README
-is available on the PXE bootstrap server in ~testuser/host-setup.
-
-### Prepare the PXE bootstrap server when there is no http server AMD64
-
-  - `sudo apt-get install isc-dhcp-server tftpd-hpa nginx-light ansible`
-  - edit dhcpd.conf and place it to /etc/dhcp/
-  - `sudo cp dhcpd.cfg /etc/dhcp/`
-  - `sudo service isc-dhcp-server restart`
-  - `cd ~testuser/host-setup`
-  - `sudo mkdir /mnt/cdrom`
-  - Ubuntu Bionic
-    - `wget 'http://cdimage.ubuntu.com/ubuntu/releases/18.04/release/ubuntu-18.04-server-amd64.iso'`
-    - `sudo mount -o loop ubuntu-18.04-server-amd64.iso /mnt/cdrom/`
-  - `sudo cp -r /mnt/cdrom/install/netboot/* /var/lib/tftpboot/`
-  - figure out where nginx will look for files on the filesystem when
-    responding to HTTP requests. The configuration is in one of the
-    files in /etc/nginx/conf.d/, /etc/nginx/sites-enabled/ or in
-    /etc/nginx/nginx.conf under section server/root. Save the path to WWW_ROOT
-  - `sudo mkdir -p ${WWW_ROOT}/download/ubuntu`
-  - `sudo cp -r /mnt/cdrom/* ${WWW_ROOT}/download/ubuntu/`
-  - `sudo cp /mnt/cdrom/ubuntu/isolinux/ldlinux.c32 /var/lib/tftpboot`
-  - `sudo cp /mnt/cdrom/ubuntu/isolinux/libcom32.c32 /var/lib/tftpboot`
-  - `sudo cp /mnt/cdrom/ubuntu/isolinux/libutil.c32 /var/lib/tftpboot`
-  - `sudo cp /mnt/cdrom/ubuntu/isolinux/chain.c32 /var/lib/tftpboot`
-  - `sudo umount /mnt/cdrom`
-  - edit ks.cfg and replace IP address with that of your PXE bootstrap server and subdir in /var/www (in this case /download)
-  - `sudo cp ks.cfg ${WWW_ROOT}/download/ks.cfg`
-  - edit boot-screens_txt.cfg and replace IP address with that of your PXE bootstrap server and subdir in /var/www (in this case /download)
-  - `sudo cp boot-screens_txt.cfg /var/lib/tftpboot/ubuntu-installer/amd64/boot-screens/txt.cfg`
-  - `sudo cp syslinux.cfg /var/lib/tftpboot/ubuntu-installer/amd64/boot-screens/syslinux.cfg`
-
-### New testbed host - manual preparation
-
-- set CIMC address
-- set CIMC username, password and hostname
-- set IPMI address
-- set IPMI username, password and hostname
-
-### Bootstrap the host
-
-Convenient way to re-stage host via script:
-
-  `sudo ./bootstrap_setup_testbed.sh <linux_ip> <mgmt_ip> <username> <pass>`
-
-Optional: CIMC - From PXE boostrap server
-
-  - Initialize args.ip: Power-Off, reset BIOS defaults, Enable console redir, get LOM MAC addr
-  - `./cimc.py -u admin -p Cisco1234 $CIMC_ADDRESS -d -i`
-  - Adjust BIOS settings
-  - `./cimc.py -u admin -p Cisco1234 $CIMC_ADDRESS -d -s '<biosVfIntelHyperThreadingTech rn="Intel-HyperThreading-Tech" vpIntelHyperThreadingTech="disabled" />' -s '<biosVfEnhancedIntelSpeedStepTech rn="Enhanced-Intel-SpeedStep-Tech" vpEnhancedIntelSpeedStepTech="disabled" />' -s '<biosVfIntelTurboBoostTech rn="Intel-Turbo-Boost-Tech" vpIntelTurboBoostTech="disabled" />'`
-  - Add MAC address to DHCP (/etc/dhcp/dhcpd.conf)
-  - If RAID is not created in CIMC. Create RAID array. Reboot.
-      - `./cimc.py -u admin -p Cisco1234 $CIMC_ADDRESS -d --wipe`
-      - `./cimc.py -u admin -p Cisco1234 $CIMC_ADDRESS -d -r -rl 1 -rs <disk size> -rd '[1,2]'`
-        Alternatively, create the RAID array manually.
-  - Reboot server with boot from PXE (restart immediately)
-  - `./cimc.py -u admin -p Cisco1234 $CIMC_ADDRESS -d -pxe`
-  - Set the next boot from HDD (without restart) Execute while Ubuntu install is running.
-  - `./cimc.py -u admin -p Cisco1234 $CIMC_ADDRESS -d -hdd`
-
-Optional: IPMI - From PXE boostrap server
-
-    - Get MAC address of LAN0
-    - `ipmitool -U ADMIN -H $HOST_ADDRESS raw 0x30 0x21 | tail -c 18`
-    - Add MAC address to DHCP (/etc/dhcp/dhcpd.conf)
-    - Reboot into PXE for next boot only
-    - `ipmitool -I lanplus -H $HOST_ADDRESS -U ADMIN chassis bootdev pxe`
-    - `ipmitool -I lanplus -H $HOST_ADDRESS -U ADMIN power reset`
-    - For live watching SOL (Serial-over-LAN console)
-    - `ipmitool -I lanplus -H $HOST_ADDRESS -U ADMIN sol activate`
-    - `ipmitool -I lanplus -H $HOST_ADDRESS -U ADMIN sol deactivate`
-
-When installation is finished:
-
-  - Copy ssh keys for no pass access: `ssh-copy-id 10.30.51.x`
-  - Clone CSIT actual repo: `git clone https://gerrit.fd.io/r/csit`
-  - Go to ansible directory: `cd csit/resources/tools/testbed-setup/ansible`
-  - Edit production file and uncomment servers that are supposed to be
-    installed.
-  - Run ansible on selected hosts:
-    `ansible-playbook --vault-id vault_pass --extra-vars '@vault.yml' --inventory production site.yaml`
-
-For non-VIRL hosts, stop here.
-
-### VIRL installation
-
-After the host has rebooted:
-
-  - `ansible-playbook 02-virl-bootstrap.yaml`
-  - ssh to host
-      - `sudo -s`
-      - `cd virl-bootstrap`
-      - `./virl-bootstrap-wrapper`
-
-        This command will error out when run the first time, as the VIRL host is not yet licensed.
-
-        Make sure we contact all three VIRL SALT masters:
-
-      - `for a in 1 2 4 ; do sudo salt-call --master us-${a}.virl.info test.ping ; done`
-
-      - Contact the VIRL team, provide the hostname and domain (linuxfoundation.org), and ask them
-        to accept the key
-
-      - After the key has been accepted, verify that connectivity with the SALT master is now OK:
-
-        `for a in 1 2 4 ; do sudo salt-call --master us-${a}.virl.info test.ping ; done`
-
-      - `./virl-bootstrap-wrapper`
-      - `reboot`
-
-After reboot, ssh to host again
-  - as VIRL user, NOT AS ROOT:
-     - `vinstall all`
-     - `sudo reboot`
-
-After reboot, ssh to host again
-  - as VIRL user:
-      - `sudo salt-call state.sls virl.routervms.all`
-      - `sudo salt-call state.sls virl.vmm.vmmall`
-
-Back on the PXE bootstrap server:
-
-  - obtain the current server disk image and place it into
-    `files/virl-server-image/` as `server.qcow2`
-
-    TO-DO: Need to find a place to store this image
-
-  - `ansible-playbook 03-virl-post-install.yaml`
-
-  - Run the following command ONLY ONCE. Otherwise it will create
-    duplicates of the VIRL disk image:
-
-    `ansible-playbook 04-disk-image.yaml`
-
-The VIRL host should now be operational. Test, and when ready, create a
-~jenkins-in/status file with the appropriate status.
diff --git a/resources/tools/testbed-setup/README.rst b/resources/tools/testbed-setup/README.rst
new file mode 100644 (file)
index 0000000..738dffa
--- /dev/null
@@ -0,0 +1,246 @@
+Testbed Setup
+=============
+
+Introduction
+------------
+
+This directory contains the *high-level* process to set up a hardware machine
+as a CSIT testbed, either for use as a physical performance testbed host or as
+a vpp_device host.
+
+Code in this directory is NOT executed as part of a regular CSIT test case
+but is stored here for ad-hoc installation of HW, archiving and documentation
+purposes.
+
+Setting up a hardware host
+--------------------------
+
+Documentation below is step by step tutorial and assumes an understanding of PXE
+boot and Ansible and managing physical hardware via CIMC or IPMI.
+
+This process is not specific for LF lab, but associated files and code, is based
+on the assumption that it runs in LF environment. If run elsewhere, changes
+will be required in following files:
+
+#. Inventory directory: `ansible/inventories/sample_inventory/`
+#. Inventory files: `ansible/inventories/sample_inventory/hosts`
+#. Kickseed file: `pxe/ks.cfg`
+#. DHCPD file: `pxe/dhcpd.conf`
+#. Bootscreen file: `boot-screens_txt.cfg`
+
+The process below assumes that there is a host used for bootstrapping (referred
+to as "PXE bootstrap server" below).
+
+Prepare the PXE bootstrap server when there is no http server AMD64
+```````````````````````````````````````````````````````````````````
+
+#. Clone the csit repo:
+
+   .. code-block:: bash
+
+      git clone https://gerrit.fd.io/r/csit
+      cd csit/resources/tools/testbed-setup/pxe
+
+#. Setup prerequisities (isc-dhcp-server tftpd-hpa nginx-light ansible):
+
+   .. code-block:: bash
+
+      sudo apt-get install isc-dhcp-server tftpd-hpa nginx-light ansible
+
+#. Edit dhcpd.cfg:
+
+   .. code-block:: bash
+
+      sudo cp dhcpd.cfg /etc/dhcp/
+      sudo service isc-dhcp-server restart
+      sudo mkdir /mnt/cdrom
+
+#. Download Ubuntu 18.04 LTS - X86_64:
+
+   .. code-block:: bash
+
+      wget http://cdimage.ubuntu.com/ubuntu/releases/18.04/release/ubuntu-18.04-server-amd64.iso
+      sudo mount -o loop ubuntu-18.04-server-amd64.iso /mnt/cdrom/
+      sudo cp -r /mnt/cdrom/install/netboot/* /var/lib/tftpboot/
+
+      # Figure out root folder for NGINX webserver. The configuration is in one
+      # of the files in /etc/nginx/conf.d/, /etc/nginx/sites-enabled/ or in
+      # /etc/nginx/nginx.conf under section server/root. Save the path to
+      # variable WWW_ROOT.
+      sudo mkdir -p ${WWW_ROOT}/download/ubuntu
+      sudo cp -r /mnt/cdrom/* ${WWW_ROOT}/download/ubuntu/
+      sudo cp /mnt/cdrom/ubuntu/isolinux/ldlinux.c32 /var/lib/tftpboot
+      sudo cp /mnt/cdrom/ubuntu/isolinux/libcom32.c32 /var/lib/tftpboot
+      sudo cp /mnt/cdrom/ubuntu/isolinux/libutil.c32 /var/lib/tftpboot
+      sudo cp /mnt/cdrom/ubuntu/isolinux/chain.c32 /var/lib/tftpboot
+      sudo umount /mnt/cdrom
+
+#. Edit ks.cfg and replace IP address of PXE bootstrap server and subdir in
+   `/var/www` (in this case `/var/www/download`):
+
+   .. code-block:: bash
+
+      sudo cp ks.cfg ${WWW_ROOT}/download/ks.cfg
+
+#. Edit boot-screens_txt.cfg and replace IP address of PXE bootstrap server and
+   subdir in `/var/www` (in this case `/var/www/download`):
+
+   .. code-block:: bash
+
+      sudo cp boot-screens_txt.cfg /var/lib/tftpboot/ubuntu-installer/amd64/boot-screens/txt.cfg
+      sudo cp syslinux.cfg /var/lib/tftpboot/ubuntu-installer/amd64/boot-screens/syslinux.cfg
+
+New testbed host - manual preparation
+`````````````````````````````````````
+
+Set CIMC/IPMI address, username, password and hostname an BIOS.
+
+Bootstrap the host
+``````````````````
+
+Convenient way to re-stage host via script:
+
+.. code-block:: bash
+
+   sudo ./bootstrap_setup_testbed.sh <linux_ip> <mgmt_ip> <username> <pass>
+
+Optional: CIMC - From PXE boostrap server
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+#. Initialize args.ip: Power-Off, reset BIOS defaults, Enable console redir, get
+   LOM MAC addr:
+
+   .. code-block:: bash
+
+     ./cimc.py -u admin -p Cisco1234 $CIMC_ADDRESS -d -i
+
+#. Adjust BIOS settings:
+
+   .. code-block:: bash
+
+      ./cimc.py -u admin -p Cisco1234 $CIMC_ADDRESS -d -s '<biosVfIntelHyperThreadingTech rn="Intel-HyperThreading-Tech" vpIntelHyperThreadingTech="disabled" />' -s '<biosVfEnhancedIntelSpeedStepTech rn="Enhanced-Intel-SpeedStep-Tech" vpEnhancedIntelSpeedStepTech="disabled" />' -s '<biosVfIntelTurboBoostTech rn="Intel-Turbo-Boost-Tech" vpIntelTurboBoostTech="disabled" />'
+
+#. If RAID is not created in CIMC. Create RAID array. Reboot:
+
+   .. code-block:: bash
+
+      ./cimc.py -u admin -p Cisco1234 $CIMC_ADDRESS -d --wipe
+      ./cimc.py -u admin -p Cisco1234 $CIMC_ADDRESS -d -r -rl 1 -rs <disk size> -rd '[1,2]'
+
+#. Reboot server with boot from PXE (restart immediately):
+
+   .. code-block:: bash
+
+      ./cimc.py -u admin -p Cisco1234 $CIMC_ADDRESS -d -pxe
+
+#. Set the next boot from HDD (without restart). Execute while Ubuntu install
+   is running:
+
+   .. code-block:: bash
+
+      ./cimc.py -u admin -p Cisco1234 $CIMC_ADDRESS -d -hdd
+
+Optional: IPMI - From PXE boostrap server
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+#. Get MAC address of LAN0:
+
+   .. code-block:: bash
+
+      ipmitool -U ADMIN -H $HOST_ADDRESS raw 0x30 0x21 | tail -c 18
+
+#. Reboot into PXE for next boot only:
+
+   .. code-block:: bash
+
+      ipmitool -I lanplus -H $HOST_ADDRESS -U ADMIN chassis bootdev pxe
+      ipmitool -I lanplus -H $HOST_ADDRESS -U ADMIN power reset
+
+#. For live watching SOL (Serial-over-LAN console):
+
+   .. code-block:: bash
+
+      ipmitool -I lanplus -H $HOST_ADDRESS -U ADMIN sol activate
+      ipmitool -I lanplus -H $HOST_ADDRESS -U ADMIN sol deactivate
+
+Ansible machine
+~~~~~~~~~~~~~~~
+
+Prerequisities for running Ansible
+..................................
+
+- Ansible can run on any machine that has direct SSH connectivity to target
+  machines that will be provisioned (does not need to be PXE server).
+- User `testuser` with password `Csit1234` is created with home folder
+  initialized on all target machines that will be provisioned.
+- SSH keys for no pass access are copied to all target machines that will be
+  provisioned: `ssh-copy-id x.x.x.x`.
+- Inventory directory is created with same or similar content as
+  `inventories/lf_inventory` in `inventories/` directory (`sample_inventory`
+  can be used).
+- Group variables in `ansible/inventories/<inventory>/group_vars/all.yaml` are
+  adjusted per environment. Special attention to `proxy_env` variable.
+- Host variables in `ansible/inventories/<inventory>/host_vars/x.x.x.x.yaml` are
+  defined.
+
+Ansible structure
+.................
+
+Ansible is defining roles `TG` (Traffic Generator), `SUT` (System Under Test),
+`VPP_DEVICE` (vpp_device host for functional testing).
+
+Each Host has corresponding Ansible role mapped and is applied only if Host
+with that role is present in inventory file. As a part of optimization the role
+`common` contains Ansible tasks applied for all Hosts.
+
+.. note::
+
+   You may see `[WARNING]: Could not match supplied host pattern, ignoring:
+   <role>` in case you have not define hosts for that particular role.
+
+Ansible structure is described below:
+
+.. code-block:: bash
+
+   .
+   ├── inventories                     # Contains all inventories.
+   │   ├── sample_inventory            # Sample, free for edits outside of LF.
+   │   │   ├── group_vars              # Variables applied for all hosts.
+   │   │   │   └── all.yaml
+   │   │   ├── hosts                   # Inventory list with sample hosts.
+   │   │   └── host_vars               # Variables applied for single host only.
+   │   │       └── 1.1.1.1.yaml        # Sample host with IP 1.1.1.1
+   │   └── lf_inventory                # Linux Foundation inventory.
+   │       ├── group_vars
+   │       │   └── all.yaml
+   │       ├── hosts
+   │       └── host_vars
+   ├── roles                           # CSIT roles.
+   │   ├── common                      # Role applied for all hosts.
+   │   ├── sut                         # Role applied for all SUTs only.
+   │   ├── tg                          # Role applied for all TGs only.
+   │   ├── tg_sut                      # Role applied for TGs and SUTs only.
+   │   └── vpp_device                  # Role applied for vpp_device only.
+   ├── site.yaml                       # Main playbook.
+   ├── sut.yaml                        # SUT playbook.
+   ├── tg.yaml                         # TG playbook.
+   ├── vault_pass                      # Main password for vualt.
+   ├── vault.yml                       # Ansible vualt storage.
+   └── vpp_device.yaml                 # vpp_device playbook.
+
+Running Ansible
+...............
+
+#. Go to ansible directory: `cd csit/resources/tools/testbed-setup/ansible`
+#. Run ansible on selected hosts:
+   `ansible-playbook --vault-id vault_pass --extra-vars '@vault.yml' --inventory <inventory_file> site.yaml --limit x.x.x.x`
+
+.. note::
+
+   In case you want to provision only particular role. You can use tags: `tg`,
+   `sut`, `vpp_device`.
+
+Reboot hosts
+------------
+
+Manually reboot hosts after Ansible provisioning succeeded.
diff --git a/resources/tools/testbed-setup/ansible/group_vars/all.yaml b/resources/tools/testbed-setup/ansible/group_vars/all.yaml
deleted file mode 100644 (file)
index c646e06..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
----
-# file: group_vars/all.yaml
-
-ansible_python_interpreter: "/usr/bin/python2.7"
-ansible_become_pass: '{{ inventory_sudo_pass }}'
diff --git a/resources/tools/testbed-setup/ansible/inventories/lf_inventory/group_vars/all.yaml b/resources/tools/testbed-setup/ansible/inventories/lf_inventory/group_vars/all.yaml
new file mode 100644 (file)
index 0000000..c464ad8
--- /dev/null
@@ -0,0 +1,42 @@
+---
+# file: group_vars/all.yaml
+
+# General settings
+ansible_python_interpreter: '/usr/bin/python2.7'
+ansible_become_pass: '{{ inventory_sudo_pass }}'
+# Domain is used in /etc/hosts file on target machine.
+domain: 'linuxfoundation.org'
+# DNS is used in /etc/netplan/01-netcfg.yaml
+dns_servers: "[ 199.204.44.24, 199.204.47.54 ]"
+
+# Proxy settings: Uncomment and fill the proper values. These variables will be
+# set globally by writing into /etc/environment file on target machine.
+#proxy_env:
+#  http_proxy: http://proxy.com:80
+#  HTTP_PROXY: http://proxy.com:80
+#  https_proxy: http://proxy.com:80
+#  HTTPS_PROXY: http://proxy.com:80
+#  ftp_proxy: http://proxy.com:80
+#  FTP_PROXY: http://proxy.com:80
+#  no_proxy: localhost,127.0.0.1,{{ ansible_default_ipv4.address }}
+#  NO_PROXY: localhost,127.0.0.1,{{ ansible_default_ipv4.address }}
+
+# Docker settings.
+docker_edition: 'ce'
+docker_channel: 'edge'
+docker_version: '18.05.0'
+docker_users: ['testuser']
+docker_repository: 'deb https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} {{ docker_channel }}'
+docker_apt_package_name: '{{ docker_version }}~{{ docker_edition }}~3-0~{{ ansible_distribution | lower }}'
+docker_daemon_environment_http:
+  - 'HTTP_PROXY={{ proxy_env.http_proxy }}'
+  - 'NO_PROXY={{ proxy_env.no_proxy }}'
+docker_daemon_environment_https:
+  - 'HTTPS_PROXY={{ proxy_env.https_proxy }}'
+  - 'NO_PROXY={{ proxy_env.no_proxy }}'
+
+# Kubernetes settings.
+kubernetes_channel: 'main'
+kubernetes_version: '1.11.0-00'
+kubernetes_repository: 'deb http://apt.kubernetes.io/ kubernetes-xenial {{ kubernetes_channel }}'
+kubernetes_apt_package_name: '{{ kubernetes_version }}'
@@ -15,11 +15,6 @@ all:
             10.30.51.22: null #t2-sut2
             10.30.51.25: null #t3-sut1
             10.30.51.26: null #t3-sut2
-#        virl: # WARNING, DO NOT ENABLE VIRL UNLESS YOU KNOW WHAT YOU ARE DOING
-#          hosts:
-#            10.30.51.28: null #t4-virl1
-#            10.30.51.29: null #t4-virl2
-#            10.30.51.30: null #t4-virl3
     skylake:
       children:
         tg:
diff --git a/resources/tools/testbed-setup/ansible/inventories/sample_inventory/group_vars/all.yaml b/resources/tools/testbed-setup/ansible/inventories/sample_inventory/group_vars/all.yaml
new file mode 100644 (file)
index 0000000..8ffac21
--- /dev/null
@@ -0,0 +1,42 @@
+---
+# file: group_vars/all.yaml
+
+# General settings
+ansible_python_interpreter: '/usr/bin/python2.7'
+ansible_become_pass: '{{ inventory_sudo_pass }}'
+# Domain is used in /etc/hosts file on target machine.
+domain: 'linuxfoundation.org'
+# DNS is used in /etc/netplan/01-netcfg.yaml on target machine.
+dns_servers: "[ 199.204.44.24, 199.204.47.54 ]"
+
+# Proxy settings: Uncomment and fill the proper values. These variables will be
+# set globally by writing into /etc/environment file on target machine.
+#proxy_env:
+#  http_proxy: http://proxy.com:80
+#  HTTP_PROXY: http://proxy.com:80
+#  https_proxy: http://proxy.com:80
+#  HTTPS_PROXY: http://proxy.com:80
+#  ftp_proxy: http://proxy.com:80
+#  FTP_PROXY: http://proxy.com:80
+#  no_proxy: localhost,127.0.0.1,{{ ansible_default_ipv4.address }}
+#  NO_PROXY: localhost,127.0.0.1,{{ ansible_default_ipv4.address }}
+
+# Docker settings.
+docker_edition: 'ce'
+docker_channel: 'edge'
+docker_version: '18.05.0'
+docker_users: ['testuser']
+docker_repository: 'deb https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} {{ docker_channel }}'
+docker_apt_package_name: '{{ docker_version }}~{{ docker_edition }}~3-0~{{ ansible_distribution | lower }}'
+docker_daemon_environment_http:
+  - 'HTTP_PROXY={{ proxy_env.http_proxy }}'
+  - 'NO_PROXY={{ proxy_env.no_proxy }}'
+docker_daemon_environment_https:
+  - 'HTTPS_PROXY={{ proxy_env.https_proxy }}'
+  - 'NO_PROXY={{ proxy_env.no_proxy }}'
+
+# Kubernetes settings.
+kubernetes_channel: 'main'
+kubernetes_version: '1.11.0-00'
+kubernetes_repository: 'deb http://apt.kubernetes.io/ kubernetes-xenial {{ kubernetes_channel }}'
+kubernetes_apt_package_name: '{{ kubernetes_version }}'
diff --git a/resources/tools/testbed-setup/ansible/inventories/sample_inventory/host_vars/1.1.1.1.yaml b/resources/tools/testbed-setup/ansible/inventories/sample_inventory/host_vars/1.1.1.1.yaml
new file mode 100644 (file)
index 0000000..6cefdb4
--- /dev/null
@@ -0,0 +1,6 @@
+---
+# file: host_vars/x.x.x.x.yaml
+
+hostname: "t1-tg1"
+isolcpus: "1-17,19-35"
+cfs_cpus: "0,18"
diff --git a/resources/tools/testbed-setup/ansible/inventories/sample_inventory/hosts b/resources/tools/testbed-setup/ansible/inventories/sample_inventory/hosts
new file mode 100644 (file)
index 0000000..da5d7f1
--- /dev/null
@@ -0,0 +1,11 @@
+all:
+  children:
+    skylake: # Architecture: (options) skylake, haswell, taishan...
+      children:
+        tg:
+          hosts:
+            1.1.1.1: null #t1-tg
+        sut:
+          hosts:
+            2.2.2.2: null #t1-sut1
+            3.3.3.3: null #t1-sut2
index 38d8188..1481536 100644 (file)
@@ -1,6 +1,14 @@
 ---
 # file: roles/common/tasks/main.yaml
 
+- name: Add permanent proxy settings
+  lineinfile:
+    path: "/etc/environment"
+    state: "present"
+    line: "{{ item.key }}={{ item.value }}"
+  with_dict: "{{ proxy_env }}"
+  when: proxy_env is defined
+
 - name: Ubuntu specific
   import_tasks: ubuntu.yaml
   when: ansible_distribution|lower == 'ubuntu'
@@ -14,7 +22,7 @@
   lineinfile:
     path: '/etc/hosts'
     regexp: '^{{ ansible_default_ipv4.address }}.+$'
-    line: '{{ ansible_default_ipv4.address }} {{ hostname }}.linuxfoundation.org'
+    line: '{{ ansible_default_ipv4.address }} {{ hostname }}.{{ domain }}'
   tags: set-hostname
 
 - name: Set sudoers admin
    mode: '644'
   notify: ['Update GRUB']
   tags: copy-grub
+
+- name: Add permanent proxy settings
+  lineinfile:
+    path: "/etc/environment"
+    state: "present"
+    line: "{{ item.key }}={{ item.value }}"
+  with_dict: "{{ proxy_env }}"
+  when: proxy_env is defined
index ff47ffc..ae606cd 100644 (file)
   tags: copy-apt-sources
   when: ansible_machine == 'x86_64'
 
-- name: Install python-apt
+- name: Install CSIT dependencies
   apt:
-    name: 'python-apt'
+    name: '{{ item }}'
     state: 'present'
-    update_cache: True
-  tags: install-python-apt
-
-- name: Install git
-  apt:
-    name: 'git'
-    state: 'present'
-    update_cache: True
-  tags: install-git
-
-- name: Install crudini
-  apt:
-    name: 'crudini'
-    state: 'present'
-    update_cache: True
-  tags: install-crudini
-
-- name: Install expect
-  apt:
-    name: 'expect'
-    state: 'present'
-    update_cache: True
-  tags: install-expect
-
-- name: Install socat
-  apt:
-    name: 'socat'
-    state: 'present'
-    update_cache: True
-  tags: install-socat
-
-- name: Install qemu
-  apt:
-    name: 'qemu-system'
-    state: 'present'
-    update_cache: True
-  tags: install-qemu
+    install_recommends: False
+  with_items:
+    - 'python-apt'
+    - 'python-setuptools'
+    - 'git'
+    - 'crudini'
+    - 'expect'
+    - 'socat'
+    - 'qemu-system'
+  tags: install-csit-dependencies
index 824f092..69ca529 100644 (file)
 ---
 # file: roles/sut/tasks/ubuntu.yaml
 
-- name: Install DKMS
-  apt:
-    name: 'dkms'
-    state: 'present'
-    update_cache: True
-  become: yes
-  tags: install-dkms
-
-- name: Install pkg-config
-  apt:
-    name: 'pkg-config'
-    state: 'present'
-    update_cache: True
-  become: yes
-  tags: install-pkg-config
-
-- name: Install libglib2.0-dev
-  apt:
-    name: 'libglib2.0-dev'
-    state: 'present'
-    update_cache: True
-  become: yes
-  tags: install-libglib2.0-dev
-
-- name: Install autoconf
-  apt:
-    name: 'autoconf'
-    state: 'present'
-    update_cache: True
-  become: yes
-  tags: install-autoconf
-
-- name: Install libtool
-  apt:
-    name: 'libtool'
-    state: 'present'
-    update_cache: True
-  become: yes
-  tags: install-libtool
-
-- name: Install screen
-  apt:
-    name: 'screen'
-    state: 'present'
-    update_cache: True
-  become: yes
-  tags: install-screen
-
-- name: Install libmbedcrypto1
-  apt:
-    name: 'libmbedcrypto1'
-    state: 'present'
-    update_cache: True
-  become: yes
-  tags: install-libmbedcrypto1
-
-- name: Install libmbedtls10
-  apt:
-    name: 'libmbedtls10'
-    state: 'present'
-    update_cache: True
-  become: yes
-  tags: install-libmbedtls10
-
-- name: Install libmbedx509-0
-  apt:
-    name: 'libmbedx509-0'
-    state: 'present'
-    update_cache: True
-  become: yes
-  tags: install-libmbedx509-0
-
-- name: Install lxc
-  apt:
-    name: 'lxc'
-    state: 'present'
-    update_cache: True
-  become: yes
-  tags: install-lxc
-
-- name: Install java
-  apt:
-    name: 'openjdk-8-jdk'
-    state: 'present'
-    update_cache: True
-  become: yes
-  tags: install-java
-
-- name: Install Pixman (Qemu-dep)
-  apt:
-    name: 'libpixman-1-dev'
-    state: 'present'
-    update_cache: True
-  become: yes
-  tags: install-pixman
-
-- name: Install python-cffi
-  apt:
-    name: 'python-cffi'
-    state: 'present'
-    update_cache: True
-  become: yes
-  tags: install-python-cffi
+- name: Install CSIT dependencies
+  apt:
+    name: '{{ item }}'
+    state: 'present'
+    install_recommends: False
+  with_items:
+    - 'dkms'
+    - 'pkg-config'
+    - 'libglib2.0-dev'
+    - 'autoconf'
+    - 'libtool'
+    - 'screen'
+    - 'libmbedcrypto1'
+    - 'libmbedtls10'
+    - 'libmbedx509-0'
+    - 'lxc'
+    - 'openjdk-8-jdk'
+    - 'libpixman-1-dev'
+    - 'python-cffi'
+  tags: install-csit-dependencies
index 54001a7..9fa19b5 100644 (file)
@@ -2,5 +2,5 @@
 # file: roles/tg/tasks/main.yaml
 
 - name: Ubuntu specific
-  import_tasks: ubuntu_x86_64.yaml
-  when: ansible_distribution|lower == 'ubuntu' and ansible_machine == 'x86_64'
+  import_tasks: ubuntu.yaml
+  when: ansible_distribution|lower == 'ubuntu'
diff --git a/resources/tools/testbed-setup/ansible/roles/tg/tasks/ubuntu.yaml b/resources/tools/testbed-setup/ansible/roles/tg/tasks/ubuntu.yaml
new file mode 100644 (file)
index 0000000..acb4f98
--- /dev/null
@@ -0,0 +1,11 @@
+---
+# file: roles/tg/tasks/ubuntu.yaml
+
+- name: Install CSIT dependencies
+  apt:
+    name: '{{ item }}'
+    state: 'present'
+    install_recommends: False
+  with_items:
+    - 'unzip'
+  tags: install-csit-dependencies
diff --git a/resources/tools/testbed-setup/ansible/roles/tg/tasks/ubuntu_x86_64.yaml b/resources/tools/testbed-setup/ansible/roles/tg/tasks/ubuntu_x86_64.yaml
deleted file mode 100644 (file)
index 54a0468..0000000
+++ /dev/null
@@ -1,9 +0,0 @@
----
-# file: roles/tg/tasks/ubuntu_x86_64.yaml
-
-- name: Install Unzip
-  apt:
-    name: 'unzip'
-    state: 'present'
-    update_cache: True
-  tags: install-unzip
diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/defaults/main.yaml b/resources/tools/testbed-setup/ansible/roles/tg_sut/defaults/main.yaml
deleted file mode 100644 (file)
index 3a6f68f..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
----
-docker_edition: "ce"
-docker_channel: "edge"
-
-docker_version: "18.05.0"
-docker_install_docker_compose: True
-docker_compose_version: "1.21.0"
-
-docker_users: ['testuser']
-
-docker_daemon_options: []
-# Can be used to set environment variables for the Docker daemon, such as:
-# docker_daemon_environment:
-#   - "HTTP_PROXY=http://proxy.example.com:3128/"
-#   - "HTTPS_PROXY=http://proxy.example.com:3128/"
-#   - "NO_PROXY=localhost,127.0.0.1"
-docker_daemon_environment: []
-
-docker_repository: "deb https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} {{ docker_channel }}"
-docker_apt_package_name: "{{ docker_version }}~{{ docker_edition }}~3-0~{{ ansible_distribution | lower }}"
-
-apt_cache_time: 86400
-
-kubernetes_channel: "main"
-kubernetes_version: "1.11.0-00"
-
-kubernetes_repository: "deb http://apt.kubernetes.io/ kubernetes-xenial {{ kubernetes_channel }}"
-kubernetes_apt_package_name: "{{ kubernetes_version }}"
index 2f34cfb..2602f93 100644 (file)
@@ -6,6 +6,7 @@ network:
   ethernets:
     {{ ansible_default_ipv4["interface"] }}:
       addresses: [ {{ (ansible_default_ipv4.address + '/' + ansible_default_ipv4.netmask) | ipaddr('host/prefix') }} ]
+      dhcp4: false
       gateway4: {{ ansible_default_ipv4["gateway"] }}
       nameservers:
-        addresses: [ 199.204.44.24, 199.204.47.54 ]
+        addresses: {{ dns_servers }}
index 11caf5d..0ce17e2 100644 (file)
@@ -1,13 +1,56 @@
-robotframework==2.9.2
-paramiko==1.16.0
-scp==0.10.2
-ipaddress==1.0.16
-interruptingcow==0.6
-PyYAML==3.11
-pykwalify==1.5.0
-scapy==2.3.1
-enum34==1.1.2
-requests==2.9.1
+# DISCLAIMER: BEFORE EDITING THIS FILE!
+#
+# This file has two closely related consequences.
+# The common part is that this file defines
+# the content of virtual Python environment
+# used when Robot tests are running.
+# The difference is in which machine the environment is created on.
+# For the Jenkins executor machines,
+# editing this file has no surprising consequences.
+#
+# But the environment is also being created
+# on virtual machines created by VIRL, which do have specific restrictions.
+# Complete package and setting snapshot (for both VIRL and Physical Testbed),
+# with network being limited in case of VIRL to local only.
+# This implies pip currently being set to site-packages only
+# for both VIRL and Physical Testbed.
+# So if the "virl image" applied on the machine
+# does not reflect the edits, some installations might fail.
+# Even if the failure may not directly impact execution,
+# this leads into inconsistency between installed environment
+# and the code base which may lead into fatal error or uncaught exceptions.
+#
+# The "virl image" is built from files such as
+# resources/tools/disk-image-builder/ubuntu/lists/ubuntu-16.04.1_2017-10-21_2.0/pip-requirements.txt
+# but the build is not automated,
+# the new "virl image" has to be built and applied manually
+# before your edit can pass.
+# This also means, incompatible virl images
+# would make existing tests fail until your edit is merged.
+#
+# TODO: Automate the virtualenv download and distribution
+#   on all types of testbeds prior the test execution.
+# TODO: Figure out a way to verify edits to this file automatically.
+
+# TODO: Split into directly needed packages and their dependencies.
+docopt==0.6.2  # for pykwalify
 ecdsa==0.13
+enum34==1.1.2
+ipaddress==1.0.16
+paramiko==1.16.0
+pexpect==4.6.0
 pycrypto==2.6.1
+pykwalify==1.5.0
 pypcap==1.1.5
+python-dateutil==2.4.2  # for pykwalify
+PyYAML==3.11
+requests==2.9.1
+robotframework==2.9.2
+scapy==2.3.1
+scp==0.10.2
+six==1.12.0  # for python-dateutil
+
+# The following is only there for PLRsearch.
+dill==0.2.8.2
+numpy==1.14.5
+scipy==1.1.0
index 8604185..2f75ae5 100644 (file)
 ---
 # file: roles/tg_sut/tasks/ubuntu.yaml
 
-- name: Install python-dev
+- name: Install CSIT dependencies
   apt:
-    name: 'python-dev'
+    name: '{{ item }}'
     state: 'present'
-    update_cache: True
-  tags: install-python-dev
-
-- name: Install python-virtualenv
-  apt:
-    name: 'python-virtualenv'
-    state: 'present'
-    update_cache: True
-  tags: install-python-virtualenv
-
-- name: Install python pip
-  apt:
-    name: 'python-pip'
-    state: 'present'
-    update_cache: True
-  tags: install-python-pip
-
-- name: Install libpcap-dev
-  apt:
-    name: 'libpcap-dev'
-    state: 'present'
-    update_cache: True
-  tags: install-libpcap-dev
-
-- name: Install cpufrequtils
-  apt:
-    name: 'cpufrequtils'
-    state: 'present'
-    update_cache: True
-  tags: install-cpufrequtils
-
-- name: Install cgroup-support
-  apt:
-    name: 'cgroup-bin'
-    state: 'present'
-    update_cache: True
-  tags: install-cgroup-support
-
-- name: Install zlib1g-dev
-  apt:
-    name: 'zlib1g-dev'
-    state: 'present'
-    update_cache: True
-  tags: install-zlib1g-dev
-
-- name: Install libnuma-dev
-  apt:
-    name: 'libnuma-dev'
-    state: 'present'
-    update_cache: True
-  tags: install-libnuma-dev
-
-- name: Install Docker and role dependencies
-  apt:
-    name: "{{ item }}"
-    state: "present"
     install_recommends: False
   with_items:
-    - "apt-transport-https"
-    - "ca-certificates"
-    - "software-properties-common"
-    - "cron"
-  tags: install-docker
-
-- name: Install Docker APT GPG key
+    - 'python-dev'
+    - 'python-virtualenv'
+    - 'python-pip'
+    - 'libpcap-dev'
+    - 'cpufrequtils'
+    - 'cgroup-bin'
+    - 'zlib1g-dev'
+    - 'apt-transport-https'
+    - 'ca-certificates'
+    - 'software-properties-common'
+    - 'cron'
+    - 'libnuma-dev'
+  tags: install-csit-dependencies
+
+- name: Add an Apt signing key, for docker-ce repository
   apt_key:
     url: https://download.docker.com/linux/ubuntu/gpg
-    state: "present"
+    state: 'present'
   tags: install-docker
 
 - name: Install Docker APT repository
   apt_repository:
-    repo: "{{ docker_repository }}"
-    state: "present"
+    repo: '{{ docker_repository }}'
+    state: 'present'
     update_cache: True
   tags: install-docker
 
 - name: Install Docker
   apt:
-    name: "docker-{{ docker_edition }}={{ docker_apt_package_name }}"
-    state: "present"
-    update_cache: True
-    install_recommends: False
-    cache_valid_time: "{{ apt_cache_time }}"
+    name: 'docker-{{ docker_edition }}={{ docker_apt_package_name }}'
+    state: 'present'
   tags: install-docker
 
-- name: Remove Upstart docker config file
+- name: Creates Docker service directory
   file:
-    path: "/etc/default/docker"
-    state: "absent"
-  tags: docker
+    path: '/etc/systemd/system/docker.service.d'
+    state: 'directory'
 
-- name: Ensure systemd directory exists
-  file:
-    path: "/etc/systemd/system"
-    state: "directory"
-    owner: "root"
-    group: "root"
-    mode: "0755"
-  tags: ensure-docker
+- name: Setup Docker http proxy
+  template:
+    src: 'templates/docker.service.proxy.http'
+    dest: '/etc/systemd/system/docker.service.d/http-proxy.conf'
+    owner: 'root'
+    group: 'root'
+    mode: '0644'
+  register: docker_register_systemd_service
+  when: proxy_env is defined and proxy_env.http_proxy is defined
+  tags: copy-docker
 
-- name: Copy systemd docker unit file
+- name: Setup Docker https proxy
   template:
-    src: "templates/docker.service.j2"
-    dest: "/etc/systemd/system/docker.service"
-    owner: "root"
-    group: "root"
-    mode: "0644"
+    src: 'templates/docker.service.proxy.https'
+    dest: '/etc/systemd/system/docker.service.d/https-proxy.conf'
+    owner: 'root'
+    group: 'root'
+    mode: '0644'
   register: docker_register_systemd_service
+  when: proxy_env is defined and proxy_env.https_proxy is defined
   tags: copy-docker
 
 - name: Reload systemd daemon
-  command: "systemctl daemon-reload"
-  notify: ["Restart Docker"]
+  command: 'systemctl daemon-reload'
+  notify: ['Restart Docker']
   when: (docker_register_systemd_service and
          docker_register_systemd_service is changed)
   tags: restart-docker
 
-- name: Set specific users to "docker" group
+- name: Set specific users to docker group
   user:
-    name: "{{ item }}"
-    groups: "docker"
+    name: '{{ item }}'
+    groups: 'docker'
     append: True
-  with_items: "{{ docker_users }}"
+  with_items: '{{ docker_users }}'
   when: docker_users
   tags: set-docker
 
-- name: Install kubernetes APT GPG key
+- name: Add an Apt signing key, for Kubernetes repository
   apt_key:
     url: https://packages.cloud.google.com/apt/doc/apt-key.gpg
-    state: "present"
+    state: 'present'
   tags: install-kubernetes
 
 - name: Install kubernetes APT repository
   apt_repository:
-    repo: "{{ kubernetes_repository }}"
-    state: "present"
-    update_cache: True
-  tags: install-kubernetes
-
-- name: Install kubeadm
-  apt:
-    name: "kubeadm={{ kubernetes_apt_package_name }}"
-    state: "present"
-    force: yes
-    update_cache: True
-    install_recommends: False
-    cache_valid_time: "{{ apt_cache_time }}"
-  tags: install-kubernetes
-
-- name: Install kubectl
-  apt:
-    name: "kubectl={{ kubernetes_apt_package_name }}"
-    state: "present"
-    force: yes
+    repo: '{{ kubernetes_repository }}'
+    state: 'present'
     update_cache: True
-    install_recommends: False
-    cache_valid_time: "{{ apt_cache_time }}"
   tags: install-kubernetes
 
-- name: Install kubelet
+- name: Install Kubernetes
   apt:
-    name: "kubelet={{ kubernetes_apt_package_name }}"
-    state: "present"
+    name: '{{ item }}={{ kubernetes_apt_package_name }}'
+    state: 'present'
     force: yes
-    update_cache: True
-    install_recommends: False
-    cache_valid_time: "{{ apt_cache_time }}"
+  with_items:
+    - 'kubeadm'
+    - 'kubectl'
+    - 'kubelet'
   tags: install-kubernetes
 
 - name: Apply kubelet parameter
diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/templates/docker.service.j2 b/resources/tools/testbed-setup/ansible/roles/tg_sut/templates/docker.service.j2
deleted file mode 100644 (file)
index 26a1bcf..0000000
+++ /dev/null
@@ -1,34 +0,0 @@
-# {{ ansible_managed }}
-
-[Unit]
-Description=Docker Application Container Engine
-Documentation=https://docs.docker.com
-After=network-online.target docker.socket
-Requires=docker.socket
-
-[Service]
-Type=notify
-# the default is not to use systemd for cgroups because the delegate issues still
-# exists and systemd currently does not support the cgroup feature set required
-# for containers run by docker
-{% if docker_daemon_environment %}
-Environment="{{ docker_daemon_environment | join('" "') }}"
-{% endif %}
-ExecStart=/usr/bin/dockerd {{ docker_daemon_options | join(" ") }}
-ExecReload=/bin/kill -s HUP $MAINPID
-# Having non-zero Limit*s causes performance problems due to accounting overhead
-# in the kernel. We recommend using cgroups to do container-local accounting.
-LimitNOFILE=infinity
-LimitNPROC=infinity
-LimitCORE=infinity
-# Uncomment TasksMax if your systemd version supports it.
-# Only systemd 226 and above support this version.
-TasksMax=infinity
-TimeoutStartSec=0
-# set delegate yes so that systemd does not reset the cgroups of docker containers
-Delegate=yes
-# kill only the docker process, not all processes in the cgroup
-KillMode=process
-
-[Install]
-WantedBy=multi-user.target
diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/templates/docker.service.proxy.http b/resources/tools/testbed-setup/ansible/roles/tg_sut/templates/docker.service.proxy.http
new file mode 100644 (file)
index 0000000..73ceba3
--- /dev/null
@@ -0,0 +1,4 @@
+# {{ ansible_managed }}
+
+[Service]
+Environment="{{ docker_daemon_environment_http | join('" "') }}"
diff --git a/resources/tools/testbed-setup/ansible/roles/tg_sut/templates/docker.service.proxy.https b/resources/tools/testbed-setup/ansible/roles/tg_sut/templates/docker.service.proxy.https
new file mode 100644 (file)
index 0000000..1c2097e
--- /dev/null
@@ -0,0 +1,4 @@
+# {{ ansible_managed }}
+
+[Service]
+Environment="{{ docker_daemon_environment_https | join('" "') }}"
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/interfaces_virl b/resources/tools/testbed-setup/ansible/roles/virl/files/interfaces_virl
deleted file mode 100644 (file)
index 25ea35a..0000000
+++ /dev/null
@@ -1,17 +0,0 @@
-# This file describes the network interfaces available on your system
-# and how to activate them. For more information, see interfaces(5).
-
-# The loopback network interface
-auto lo
-iface lo inet loopback
-
-# The primary network interface
-auto br1
-iface br1 inet static
-    address {{ ansible_default_ipv4["address"] }}
-    netmask {{ ansible_default_ipv4["netmask"] }}
-    gateway {{ ansible_default_ipv4["gateway"] }}
-    dns-nameservers 199.204.44.24 199.204.47.54
-    bridge_maxwait 0
-    bridge_ports eth0 eth4
-    bridge_stp off
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/nova_os_ip.patch b/resources/tools/testbed-setup/ansible/roles/virl/files/nova_os_ip.patch
deleted file mode 100644 (file)
index a943dc9..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
---- api/metadata/base.py.old   2017-04-26 12:38:52.522991596 +0000
-+++ api/metadata/base.py       2017-04-26 10:06:46.396450566 +0000
-@@ -493,7 +493,7 @@
-                 path = 'openstack/%s/%s' % (version, VD_JSON_NAME)
-                 yield (path, self.lookup(path))
-
--            if self._check_version(LIBERTY, version, ALL_OPENSTACK_VERSIONS):
-+            if False and self._check_version(LIBERTY, version, ALL_OPENSTACK_VERSIONS):
-                 path = 'openstack/%s/%s' % (version, NW_JSON_NAME)
-                 yield (path, self.lookup(path))
-
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/requirements.txt b/resources/tools/testbed-setup/ansible/roles/virl/files/requirements.txt
deleted file mode 100644 (file)
index 11caf5d..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-robotframework==2.9.2
-paramiko==1.16.0
-scp==0.10.2
-ipaddress==1.0.16
-interruptingcow==0.6
-PyYAML==3.11
-pykwalify==1.5.0
-scapy==2.3.1
-enum34==1.1.2
-requests==2.9.1
-ecdsa==0.13
-pycrypto==2.6.1
-pypcap==1.1.5
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/salt.b64 b/resources/tools/testbed-setup/ansible/roles/virl/files/salt.b64
deleted file mode 100644 (file)
index 5e76a85..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-H4sIAF7JK1cAA+1a32/bNhD2a/RXcMmDkyGy9VuDsQwDhhUoNuxhHfbSFQYj0TZrSVRJyklQ9H/f
-UZZsGbUj27HVduOXBzvkmUfyuzveURI4kcPeeWEBwtBXn3boW83PGj3bCS3P9v3QcnuW7YJAD/ln
-nleJQkjMEeolLMIJjlOabZdr6/9GIRT/gi/OaQOH8O+HgeI/8GzNfxdY8S/OFwgO598LLE/z3wU2
-+Y/mjAhJ+GkNoeTf34d/LwwskLPB/V3NfxfYwX82EaYgfEH4QCTihToUwYFl7eTfBrJr/l0X+Hds
-R/FvnWSFLfif80+zKCliMjIQMlFN/6BBP81gg5JkZ3/EsgmdFpwYX3opGkdgh//HmUix+HAC5++1
-+7/lr+O/4zng/5YfeNr/u8B2/6/p1879X0fr+X+CVHD//M9XH8r/IRHU+V8XaOd/FQOOPgza4n/o
-N/j3FP+uHdo6/ncBxbQK/hOakEFMOYkk40+qRR0IBdjACHHGZNUw5azIN1pSBqcHAvc2DDXWUEQc
-y2h2gjHtMAyrQTlrG29BebI5XqOlOUcioyF5zBmX63WnOMNTEo82xAPPq/6XJM0TLKHtPc3e46pV
-sIJH0HapPGc03Oo7anRRq7s0DOgaL7vGvMgymk2VTtVCIzJoNCkFGU5heDXanPCMJNWghlHkMcxm
-3FhFlMbq15u/XPZPBDI53nl+t/t/VQC8IBUs/d/zdtd/QbjK/3xL5X9OoOK/9v/z4zPzQhXfypby
-+bSu/9beIeY0H4MgnTyN0F+8IFU7JxNOxGyEXuFEkBYb/tLL1qjQ7v/LGPYSHeUZ/1z9t87/LNsK
-1P2PH+j7v06wL//VYXOUjpb4D1lf2OBfxX/XcXT93wmu0EZGhOSMIBxFRAgEib/kLEEJFRJNGC9z
-JfEE9pEK9DCj0Qyl+AndkyrTILFxdXEhGfrj1RsUJZRkUgwQekNqAXHt3wyMK+MK/fqIIaUi5agg
-vXAQzuLymzuC7tIgZywlAi0xY0Kqo8S+5g+34imLbjM2FsW95ISMoxmJ5jcrGeeasx0yW3V7K41g
-9F6lEE2FGM75vU9XGieCxnfWbcSZEGkmt42+Hqae/efDbJtUM3G++PgRKV98289pkmA+mBLZf3fd
-V+nsKHHGGZEPjM/7t5XUlGM4omupDYG+HToDOxjYA2voeH10c4M+fVITgTmobHssPhRYzLbs5X5z
-cFon4TRm4Zx2Fm7bTrhbdsI9dA51+XFx8b0yq1bxL+3OB+P5+9/TPAc85Pmf44Xq/sfS5383aOH/
-5Zc/vfb7Hyewa/4D2/JU/ed7vj7/u0B5+tOMymHzzv9MNyO1VS1TyqbCS6PtHqSS/vYi7NeNFv9/
-efHXO6T+g/iv3v9zbNvT8b8T7MV/01OP0NFW/4V2UPPvh56t6j/f0vVfJ7hCC5qOBIEKT94VudoM
-iaS4cxCRIyMmUJXQXFKWocu/X//5ex2GLw1cyBlUUJe/YA4Wk6HflpaDfqxt6OeIioiBzaQ/QXRf
-DgzDQHBPyIIk6K3jev476GD5Zvt3yw6DE5Hjh8wwyCOJ0LAQfCjuaVZbIzJj9I9R12sm3rNoGU/h
-vHrAT621S0NwXUhBYlJVDy/R3V44NSXXBdRR2t19V75NcF04bddtZsxUhbdYfgXKWLKA70JyGkmT
-8RhMwjSBttikGVjFBEdECZPHiORy3XaXMGjNaWyqkHM3lGk+VPM369gDXSCgItBSYmMWy6uqu+f2
-YULBTscql1gK79iFLWL9HwblX//z1e+hVxCYc9yueJvcM5pjlsKv71hO1P18NC/D0xHJURn2VQ54
-+tCyQnn+H/j+t+Xo878LrPj/ut7/Lus/zf/5sck/rA6O+kF8hve/D+I/cAJL898FdvAvIYUrV3xs
-zt9E2/N/y6uf/7hh4Dtl/aff/+4GKpcq77TL90jQPRZkee+CTLSuC/Wti4aGhoaGhoaGhoaGhoaG
-hoaGhoaGhsa3hH8BeOCWxQBQAAA=
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/salt/etc/salt/minion.d/testlocal.conf b/resources/tools/testbed-setup/ansible/roles/virl/files/salt/etc/salt/minion.d/testlocal.conf
deleted file mode 100644 (file)
index fce910c..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-file_roots:
-   base:
-     - /srv/salt/
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server.sls b/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server.sls
deleted file mode 100644 (file)
index bf3e47f..0000000
+++ /dev/null
@@ -1,3 +0,0 @@
-include:
-  - ckoester.nfs-server.install
-  - ckoester.nfs-server.configure
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/configure.sls b/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/configure.sls
deleted file mode 100644 (file)
index ce362a3..0000000
+++ /dev/null
@@ -1,31 +0,0 @@
-/nfs:
-  file.directory:
-    - user: root
-    - group: root
-    - mode: 755
-
-/nfs/scratch:
-  file.directory:
-    - user: root
-    - group: root
-    - mode: 1777
-
-/nfs/ro:
-  file.directory:
-    - user: virl
-    - group: virl
-    - mode: 755
-
-/etc/exports:
-  file.managed:
-    - mode: 644
-    - template: jinja
-    - source: "salt://ckoester/nfs-server/files/exports"
-
-nfs_server_running:
-  service.running:
-    - name: nfs-kernel-server
-
-update_exports:
-  cmd.run:
-    - name: exportfs -ra
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/files/exports b/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/files/exports
deleted file mode 100644 (file)
index 23802be..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-# /etc/exports: the access control list for filesystems which may be exported
-#              to NFS clients.  See exports(5).
-#
-# Example for NFSv2 and NFSv3:
-# /srv/homes       hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
-#
-# Example for NFSv4:
-# /srv/nfs4        gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
-# /srv/nfs4/homes  gss/krb5i(rw,sync,no_subtree_check)
-#
-/nfs/scratch   {{ salt['pillar.get']('virl:l2_network', salt['grains.get']('l2_network', '172.16.1.0/24' )) }}(rw,no_root_squash,no_subtree_check) {{ salt['pillar.get']('virl:l2_network2', salt['grains.get']('l2_network2', '172.16.2.0/24' )) }}(rw,no_root_squash,no_subtree_check) {{ salt['pillar.get']('virl:l3_network', salt['grains.get']('l3_network', '172.16.3.0/24' )) }}(rw,no_root_squash,no_subtree_check)
-/nfs/ro                *(ro,no_root_squash,no_subtree_check)
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/install.sls b/resources/tools/testbed-setup/ansible/roles/virl/files/salt/srv/salt/ckoester/nfs-server/install.sls
deleted file mode 100644 (file)
index 0f13634..0000000
+++ /dev/null
@@ -1,5 +0,0 @@
-nfs-kernel-server install:
-  pkg.installed:
-    - skip_verify: True
-    - refresh: False
-    - name: nfs-kernel-server
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/sudoers_jenkins-in b/resources/tools/testbed-setup/ansible/roles/virl/files/sudoers_jenkins-in
deleted file mode 100644 (file)
index 1797c2c..0000000
+++ /dev/null
@@ -1 +0,0 @@
-jenkins-in ALL=(root) NOPASSWD: /bin/rm -fr /scratch/*, /bin/rm -fr /nfs/scratch/*
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/sudoers_virl b/resources/tools/testbed-setup/ansible/roles/virl/files/sudoers_virl
deleted file mode 100644 (file)
index e0cf48a..0000000
+++ /dev/null
@@ -1 +0,0 @@
-virl ALL=(root) NOPASSWD:ALL
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/ttyS0 b/resources/tools/testbed-setup/ansible/roles/virl/files/ttyS0
deleted file mode 100644 (file)
index 0ed8550..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-# ttyS0 - getty
-#
-# This service maintains a getty on ttyS0 from the point the system is
-# started until it is shut down again.
-
-start on stopped rc RUNLEVEL=[12345]
-stop on runlevel [!12345]
-
-respawn
-exec /sbin/getty -L 115200 ttyS0 vt102
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/virl/id_rsa_virl b/resources/tools/testbed-setup/ansible/roles/virl/files/virl/id_rsa_virl
deleted file mode 100644 (file)
index b4c3de7..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
------BEGIN RSA PRIVATE KEY-----
-MIIEpgIBAAKCAQEAwUDlTpzSHpwLQotZOFS4AgcPNEWCnP1AB2hWFmvI+8Kah/gb
-v8ruZU9RqhPs56tyKzxbhvNkY4VbH5F1GilHZu3mLqzM4KfghMmaeMEjO1T7BYYd
-vuBfTvIluljfQ2vAlnYrDwn+ClxJk81m0pDgvrLEX4qVVh2sGh7UEkYy5r82DNa2
-4VjzPB1J/c8a9zP8FoZUhYIzF4FLvRMjUADpbMXgJMsGpaZLmz95ap0Eot7vb1Cc
-1LvF97iyBCrtIOSKRKA50ZhLGjMKmOwnYU+cP5718tbproDVi6VJOo7zeuXyetMs
-8YBl9kWblWG9BqP9jctFvsmi5G7hXgq1Y8u+DwIDAQABAoIBAQC/W4E0DHjLMny7
-0bvw2YKzD0Zw3fttdB94tkm4PdZv5MybooPnsAvLaXVV0hEdfVi5kzSWNl/LY/tN
-EP1BgGphc2QgB59/PPxGwFIjDCvUzlsZpynBHe+B/qh5ExNQcVvsIOqWI7DXlXaN
-0i/khOzmJ6HncRRah1spKimYRsaUUDskyg7q3QqMWVaqBbbMvLs/w7ZWd/zoDqCU
-MY/pCI6hkB3QbRo0OdiZLohphBl2ShABTwjvVyyKL5UA4jAEneJrhH5gWVLXnfgD
-p62W5CollKEYblC8mUkPxpP7Qo277zw3xaq+oktIZhc5SUEUd7nJZtNqVAHqkItW
-79VmpKyxAoGBAPfU+kqNPaTSvp+x1n5sn2SgipzDtgi9QqNmC4cjtrQQaaqI57SG
-OHw1jX8i7L2G1WvVtkHg060nlEVo5n65ffFOqeVBezLVJ7ghWI8U+oBiJJyQ4boD
-GJVNsoOSUQ0rtuGd9eVwfDk3ol9aCN0KK53oPfIYli29pyu4l095kg11AoGBAMef
-bPEMBI/2XmCPshLSwhGFl+dW8d+Klluj3CUQ/0vUlvma3dfBOYNsIwAgTP0iIUTg
-8DYE6KBCdPtxAUEI0YAEAKB9ry1tKR2NQEIPfslYytKErtwjAiqSi0heM6+zwEzu
-f54Z4oBhsMSL0jXoOMnu+NZzEc6EUdQeY4O+jhjzAoGBAIogC3dtjMPGKTP7+93u
-UE/XIioI8fWg9fj3sMka4IMu+pVvRCRbAjRH7JrFLkjbUyuMqs3Arnk9K+gbdQt/
-+m95Njtt6WoFXuPCwgbM3GidSmZwYT4454SfDzVBYScEDCNm1FuR+8ov9bFLDtGT
-D4gsngnGJj1MDFXTxZEn4nzZAoGBAKCg4WmpUPaCuXibyB+rZavxwsTNSn2lJ83/
-sYJGBhf/raiV/FLDUcM1vYg5dZnu37RsB/5/vqxOLZGyYd7x+Jo5HkQGPnKgNwhn
-g8BkdZIRF8uEJqxOo0ycdOU7n/2O93swIpKWo5LIiRPuqqzj+uZKnAL7vuVdxfaY
-qVz2daMPAoGBALgaaKa3voU/HO1PYLWIhFrBThyJ+BQSQ8OqrEzC8AnegWFxRAM8
-EqrzZXl7ACUuo1dH0Eipm41j2+BZWlQjiUgq5uj8+yzy+EU1ZRRyJcOKzbDACeuD
-BpWWSXGBI5G4CppeYLjMUHZpJYeX1USULJQd2c4crLJKb76E8gz3Z9kN
------END RSA PRIVATE KEY-----
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/virl/id_rsa_virl.pub b/resources/tools/testbed-setup/ansible/roles/virl/files/virl/id_rsa_virl.pub
deleted file mode 100644 (file)
index 0ef508c..0000000
+++ /dev/null
@@ -1 +0,0 @@
-ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBQOVOnNIenAtCi1k4VLgCBw80RYKc/UAHaFYWa8j7wpqH+Bu/yu5lT1GqE+znq3IrPFuG82RjhVsfkXUaKUdm7eYurMzgp+CEyZp4wSM7VPsFhh2+4F9O8iW6WN9Da8CWdisPCf4KXEmTzWbSkOC+ssRfipVWHawaHtQSRjLmvzYM1rbhWPM8HUn9zxr3M/wWhlSFgjMXgUu9EyNQAOlsxeAkywalpkubP3lqnQSi3u9vUJzUu8X3uLIEKu0g5IpEoDnRmEsaMwqY7CdhT5w/nvXy1umugNWLpUk6jvN65fJ60yzxgGX2RZuVYb0Go/2Ny0W+yaLkbuFeCrVjy74P virl@tb4-virl
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/virl/ifup b/resources/tools/testbed-setup/ansible/roles/virl/files/virl/ifup
deleted file mode 100644 (file)
index a4a743a..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-#! /bin/sh
-# Reload the OpenSSH server when an interface comes up, to allow it to start
-# listening on new addresses.
-
-set -e
-
-# Don't bother to restart sshd when lo is configured.
-if [ "$IFACE" = lo ]; then
-       exit 0
-fi
-
-# Only run from ifup.
-if [ "$MODE" != start ]; then
-       exit 0
-fi
-
-if [ "$IFACE" = br1 ]; then
-       /sbin/ip route delete default
-       /sbin/ip route add default via 10.30.51.1
-       /sbin/ifconfig br1:0 {{ virl_l2_ip }} netmask 255.255.255.0
-       exit 0
-fi
-
-
-exit 0
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/virl/ssh_environment b/resources/tools/testbed-setup/ansible/roles/virl/files/virl/ssh_environment
deleted file mode 100644 (file)
index 5ec594d..0000000
+++ /dev/null
@@ -1 +0,0 @@
-PATH=/home/jenkins-in/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/files/virl/virl-bootstrap-wrapper b/resources/tools/testbed-setup/ansible/roles/virl/files/virl/virl-bootstrap-wrapper
deleted file mode 100644 (file)
index dc7ead8..0000000
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/expect
-
-if ![ string equal $::env(USER) "root"] {
-  puts "Please re-run this script as root."
-  exit 1
-}
-
-log_file /tmp/virl-bootstrap.log
-set timeout 3600
-
-spawn  ./virl-bootstrap.py
-expect "Which step are you on"
-send "1\r"
-
-expect "Salt master"
-send "vsm-md.virl.info\r"
-
-expect "Which step are you on"
-send "2\r"
-
-expect "Salt id"
-send "{{ hostname }}\r"
-expect "Salt domain name"
-send "linuxfoundation.org\r"
-
-expect "Which step are you on"
-send "3\r"
-
-expect "System hostname"
-send "{{ hostname }}\r"
-expect "System Domain name"
-send "linuxfoundation.org\r"
-
-expect "Which step are you on"
-send "4\r"
-
-puts "*******************STEP 6*************************************************************************************************************************************************"
-expect "Which step are you on"
-send "6\r"
-expect "Salt installed"
-
-puts "*******************STEP 8*************************************************************************************************************************************************"
-expect "Which step are you on"
-send "8\r"
-
-expect "True"
-
-puts "*******************STEP 9*************************************************************************************************************************************************"
-expect "Which step are you on"
-send "9\r"
-
-expect "Failed:     0"
-
-puts "*******************STEP 11*************************************************************************************************************************************************"
-expect "Which step are you on"
-send "11\r"
-
-expect eof
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/tasks/02-virl-bootstrap.yaml b/resources/tools/testbed-setup/ansible/roles/virl/tasks/02-virl-bootstrap.yaml
deleted file mode 100644 (file)
index 9ffb40c..0000000
+++ /dev/null
@@ -1,216 +0,0 @@
----
-  - name: install virl-bootstrap
-    git:
-        repo: 'https://github.com/VIRL-Open/virl-bootstrap.git'
-        dest: /home/virl/virl-bootstrap
-        version: xenial
-    become_user: virl
-  - name: copy vsetting file to /etc
-    shell: /usr/bin/install -m 666 /home/virl/virl-bootstrap/vsettings.ini /etc/virl.ini
-    become: yes
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT hostname {{ hostname }}
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT domain_name linuxfoundation.org
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT using_dhcp_on_the_public_port False
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT public_port dummy3
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT Static_IP 10.30.49.28
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT public_network 10.30.49.0
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT public_netmask {{ ansible_default_ipv4["netmask"] }}
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT public_gateway 10.30.49.1
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT proxy False
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT ntp_server pool.ntp.org
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT first_nameserver 199.204.44.24
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT second_nameserver 199.204.47.54
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT salt_master vsm-md.virl.info
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT salt_id {{ hostname }}
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT salt_domain linuxfoundation.org
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT salt_masterless false
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_port {{ virl_public_port }}
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_network {{ virl_l2_network }}
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_mask 255.255.255.0
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_network_gateway {{ virl_l2_gateway }}
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_start_address {{ virl_l2_start }}
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_end_address {{ virl_l2_end }}
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_address {{ ansible_default_ipv4["address"] }}/24
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT first_flat_nameserver 199.204.44.24
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT second_flat_nameserver 199.204.47.54
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_port2_enabled True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_port2 dummy0
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_network2 172.16.2.0/24
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_mask2 255.255.255.0
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_network_gateway2 172.16.2.1
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_start_address2 172.16.2.50
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_end_address2 172.16.2.253
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l2_address2 172.16.2.254/24
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT first_flat2_nameserver 199.204.44.24
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT second_flat2_nameserver 199.204.47.54
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l3_port dummy1
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l3_network 172.16.3.0/24
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l3_mask 255.255.255.0
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l3_network_gateway 172.16.3.1
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l3_floating_start_address 172.16.3.50
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l3_floating_end_address 172.16.3.253
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT l3_address 172.16.3.254/24
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT first_snat_nameserver 199.204.44.24
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT second_snat_nameserver 199.204.47.54
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT ramdisk True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT ank 19401
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT ank_live 19402
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT virl_webservices 19399
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT virl_user_management 19400
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT virl_apache_port 80
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT virl_webmux 19403
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT Start_of_serial_port_range 17000
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT End_of_serial_port_range 18000
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT serial_port 19406
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT vnc_port 19407
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT location_region US
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT vnc False
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT guest_account True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT user_list tb4-virl:Cisco1234
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT uwmadmin_password Cisco1234
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT password password
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT mysql_password password
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT keystone_service_token fkgjhsdflkjh
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT enable_cinder True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT cinder_file True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT cinder_size 20000
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT cinder_location /var/lib/cinder/cinder-volumes.lvm
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT dummy_int True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT this_node_is_the_controller True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT internalnet_controller_hostname controller
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT internalnet_controller_IP 172.16.10.250
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT internalnet_port dummy2
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT internalnet_IP 172.16.10.250
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT internalnet_network 172.16.10.0
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT internalnet_netmask 255.255.255.0
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT internalnet_gateway 172.16.10.1
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT iosv True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT csr1000v True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT iosxrv432 False
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT iosxrv52 False
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT iosxrv True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT nxosv True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT vpagent True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT iosvl2 True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT asav True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT lxc_server True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT lxc_iperf True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT lxc_routem True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT lxc_ostinato True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT server True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT vmm_mac True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT vmm_win32 True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT vmm_win64 True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT vmm_linux True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT virl_clients True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT ram_overcommit 2
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT web_editor True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT mitaka True
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT kilo False
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT libvirt_cpu_mode host-passthrough
-    become_user: virl
-  - command: crudini --inplace --set /etc/virl.ini DEFAULT neutron_bridge_flooding True
-    become_user: virl
-  - name: copy vinstall bootstrap wrapper script
-    template: src=files/virl/virl-bootstrap-wrapper dest=/home/virl/virl-bootstrap/virl-bootstrap-wrapper owner=virl group=virl mode=775
-    become_user: virl
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/tasks/03-virl-post-install.yaml b/resources/tools/testbed-setup/ansible/roles/virl/tasks/03-virl-post-install.yaml
deleted file mode 100644 (file)
index 58b70c9..0000000
+++ /dev/null
@@ -1,92 +0,0 @@
----
-  - name: Add jenkins-in user
-    user: name=jenkins-in shell=/bin/bash comment="Jenkins user"
-  - name: Add jenkins-in user to sudoers
-    copy: src=files/sudoers_jenkins-in dest=/etc/sudoers.d/jenkins-in owner=root group=root mode=660
-  - name: Set Jenkins user authorized key
-    authorized_key: user=jenkins-in key="{{ lookup('file', '/home/testuser/.ssh/id_rsa.pub') }}"
-  - name: copy salt states for dnsmasq and nfs
-    synchronize: src=files/salt/ dest=/
-  - name: install NFS salt
-    shell: salt-call --local state.sls ckoester.nfs-server
-  - name: NFS symlink
-    shell: ln -s /nfs/scratch /scratch
-    args:
-      creates: /scratch
-  - name: Upate Nova CPU mode
-    ini_file: dest=/etc/nova/nova.conf section=libvirt option=cpu_mode value=host-passthrough
-  - name: Restart nova-compute service
-    service: name=nova-compute state=restarted
-  - name: Change listen interface in NTP settings
-    lineinfile: dest=/etc/ntp.conf state=present regexp='^interface listen 172.16.*' line='interface listen {{ ansible_default_ipv4["address"] }}'
-  - name: Restart NTP service
-    service: name=ntp state=restarted
-  - name: Permit SSH user environment
-    lineinfile: dest=/etc/ssh/sshd_config state=present regexp='PermitUserEnvironment.*' line='PermitUserEnvironment yes'
-  - name: Restart SSH daemon
-    service: name=ssh state=restarted
-  - name: clone csit git repository
-    git: repo=https://gerrit.fd.io/r/csit
-         dest=/home/jenkins-in/git/csit
-    become: jenkins-in
-  - name: Link testcase-infra directory
-    command: ln -sf /home/jenkins-in/git/csit/resources/tools/virl /home/jenkins-in/testcase-infra
-    args:
-      creates: /home/jenkins-in/testcase-infra
-    become: jenkins-in
-  - name: Create bin directory
-    file: path=/home/jenkins-in/bin state=directory mode=0755
-    become: jenkins-in
-  - name: Link start-testcase executable
-    command: ln -sf /home/jenkins-in/testcase-infra/bin/start-testcase /home/jenkins-in/bin/start-testcase
-    args:
-      creates: /home/jenkins-in/bin/start-testcase
-    become: jenkins-in
-  - name: Link stop-testcase executable
-    command: ln -sf /home/jenkins-in/testcase-infra/bin/stop-testcase /home/jenkins-in/bin/stop-testcase
-    args:
-      creates: /home/jenkins-in/bin/stop-testcase
-    become: jenkins-in
-  - name: Link start-testcase-DMM executable
-    command: ln -sf /home/jenkins-in/testcase-infra/bin/start-testcase-DMM /home/jenkins-in/bin/start-testcase-DMM
-    args:
-      creates: /home/jenkins-in/bin/start-testcase-DMM
-    become: jenkins-in
-  - name: Link kill-idle-testcases executable
-    command: ln -sf /home/jenkins-in/testcase-infra/bin/kill-idle-testcases /home/jenkins-in/bin/kill-idle-testcases
-    args:
-      creates: /home/jenkins-in/bin/kill-idle-testcases
-    become: jenkins-in
-  - name: Copy SSH private key
-    copy: src=files/virl/id_rsa_virl dest=/home/jenkins-in/.ssh/id_rsa_virl mode=600
-    become: jenkins-in
-  - name: Copy SSH public key
-    copy: src=files/virl/id_rsa_virl.pub dest=/home/jenkins-in/.ssh/id_rsa_virl.pub mode=644
-    become: jenkins-in
-  - name: Copy SSH environment
-    copy: src=files/virl/ssh_environment dest=/home/jenkins-in/.ssh/environment mode=644
-    become: jenkins-in
-  - name: Add ~/bin to path
-    lineinfile: dest=/home/jenkins-in/.bashrc state=present line='PATH=${HOME}/bin:$PATH'
-    become: jenkins-in
-  - name: Update own IP address in start script
-    shell: sed -i /home/jenkins-in/testcase-infra/bin/start-testcase -e 's/10.30.51.28/{{ ansible_default_ipv4["address"] }}/'
-    become: jenkins-in
-  - name: Add authorized key
-    lineinfile: dest=/home/jenkins-in/.ssh/authorized_keys line='ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD4gderzsZyoxHULjuvPHoJuKnkaGrykqtuoqs/k1/jUdxitPoY5eX2cVYqww7MiUif7zLsiXbt5mHtyxAYCluDxAuIcy1xgSZY3MpmmSqDie4A/FdVfCUqCcpf3TZKsRP0an1MNrKIe0JFZV+uU889IDRQRdboGMs3+4cn5b9fOutpv71qwFVrTm9PZbqfQonrrN8Jp4Mz3XaZDpK22xwDAWhYOZ0eV6CJWquUgbYAHE6/HHMvd0zeJKaWZCXO/1tOGOj6cjgoViHqbnCtmYCjmv/ir0IglzbUdWdOqQY5YkhnPonveV48lVKrmBipqgbDezAUQD8wOQ7HttpYpKgt jenkins-in@tb4-virl'
-    become: jenkins-in
-# All of the below will fail if VIRL user/project already exist
-  - name: Create VIRL project
-    shell: virl_uwm_client project-edit --name tb4-virl --enabled True  -i 400 -r 1024000 -c 500
-    ignore_errors: true
-    become: virl
-#  - name: Delete VIRL project user
-#    shell: virl_uwm_client user-delete --name tb4-virl
-#    ignore_errors: true
-#  - name: Recreate VIRL project user
-#    shell: virl_uwm_client user-create --name tb4-virl --role admin --project tb4-virl --set-password Cisco1234
-#    ignore_errors: true
-  - name: Create VPP flavor
-    shell: virl_uwm_client flavor-create --name vPP --ram 4096 --vcpus 2 --disk 0
-    ignore_errors: true
-    become: virl
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/tasks/04-disk-image.yaml b/resources/tools/testbed-setup/ansible/roles/virl/tasks/04-disk-image.yaml
deleted file mode 100644 (file)
index 254c05e..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
----
-  - name: Create server image directory
-    file: path=/home/virl/virl-server-image state=directory mode=0755
-    become: virl
-  - name: Copy UBUNTU server image
-    copy: src=/home/virl/virl-server-image/packer-csit-ubuntu-16.04.1_2017-10-21_2.0-qemu dest=/home/virl/virl-server-image/packer-csit-ubuntu-16.04.1_2017-10-21_2.0-qemu owner=virl group=virl mode=644
-    become: virl
-  - name: Copy CENTOS server image
-    copy: src=/home/virl/virl-server-image/packer-csit-centos-7.3-1611_2017-02-23_1.4-qemu dest=/home/virl/virl-server-image/packer-csit-centos-7.3-1611_2017-02-23_1.4-qemu owner=virl group=virl mode=644
-    become: virl
-  - name: Import UBUNTU server image into glance
-    shell: virl_uwm_client image-create --subtype server --version csit-ubuntu-16.04.1_2017-10-21_2.0 --image-on-server /home/virl/virl-server-image/packer-csit-ubuntu-16.04.1_2017-10-21_2.0-qemu
-    become: virl
-  - name: Import CENTOS server image into glance
-    shell: virl_uwm_client image-create --subtype server --version csit-centos-7.3-1611_2017-02-23_1.4 --image-on-server /home/virl/virl-server-image/packer-csit-centos-7.3-1611_2017-02-23_1.4-qemu
-    become: virl
-  - name: Create common directory
-    file: path=/nfs/common state=directory mode=0755
-  - name: Create Nested VM directory
-    file: path=/nfs/common/nested-vm state=directory mode=0755
-  - name: Copy Nested VM image
-    copy: src=/home/virl/virl-server-image/csit-nested-1.6.img dest=/nfs/common/nested-vm/csit-nested-1.6.img owner=virl group=virl mode=644
-  - name: NFS symlink
-    file:
-        src: /nfs/common/nested-vm-current.img.disabled
-        dest: /nfs/common/nested-vm/csit-nested-1.6.img
-        state: link
diff --git a/resources/tools/testbed-setup/ansible/roles/virl/tasks/main.yaml b/resources/tools/testbed-setup/ansible/roles/virl/tasks/main.yaml
deleted file mode 100644 (file)
index 8bca04e..0000000
+++ /dev/null
@@ -1,78 +0,0 @@
----
-- name: Interfaces file
-  template:
-    src: "files/interfaces_virl"
-    dest: "/etc/network/interfaces owner=root group=root mode=644"
-  tags: interfaces
-
-- name: Add VIRL user
-  user:
-    name: virl
-    shell: "/bin/bash"
-    comment: "VIRL user"
-    password: "$6$mJPlK5FKLar6$xxXPP0LRhC7T1yhHtym18Z3nKZweTtfTxzi1WpvVHJ0U/Em1UWcHqfMhRApFp0rsY9jHRuL6FfeFbKPN..uDK."
-  tags: users
-
-- name: Add VIRL user to sudoers
-  copy:
-    src: "files/sudoers_virl"
-    dest: "/etc/sudoers.d/virl owner=root group=root mode=660"
-  tags: sudoers
-
-- name: Add post up for additional address
-  copy:
-    src: "files/ifup"
-    dest: "/etc/network/if-up.d/virl owner=root group=root mode=755"
-  tags: interfaces
-
-- name: Set VIRL user authorized key
-  authorized_key:
-    user: virl
-    key: "{{ lookup('file', '/home/testuser/.ssh/id_rsa.pub') }}"
-  tags: users
-
-- name: Install bridge-utils
-  apt:
-    name: "bridge-utils"
-    state: "present"
-  tags: apt-install-bridge-utils
-
-- name: Old interface naming
-  command: "ln -s /dev/null /etc/udev/rules.d/80-net-setup-link.rules"
-  tags: interfaces
-
-- name: Update init for old interface naming
-  command: "update-initramfs -u"
-  tags: interfaces
-
-- name: QEMU log garbage collector
-  cron:
-    minute: "0"
-    hour: "0"
-    name: "QEMU log garbage collector"
-    job: "find /var/log/libvirt/qemu -type f -mtime +14 -name 'instance*.log' -delete"
-  tags: cron
-
-- name: VPP deb package garbage collector
-  cron:
-    minute: "0"
-    hour: "0"
-    name: "VPP deb package garbage collector"
-    job: "find /tmp -type f -atime +14 -name '*.deb' -delete"
-  tags: cron
-
-- name: VPP rpm package garbage collector
-  cron:
-    minute: "0"
-    hour: "0"
-    name: "VPP rpm package garbage collector"
-    job: "find /tmp -type f -atime +14 -name '*.rpm' -delete"
-  tags: cron
-
-- name: NFS scratch dir garbage collector
-  cron:
-    minute: "0"
-    hour: "0"
-    name: "NFS scratch dir garbage collector"
-    job: "find /nfs/scratch/ -type d -mtime +1 -name 'session-*' -exec rm -r '{}' \\;"
-  tags: cron
index 1f59505..f2f68c6 100644 (file)
@@ -9,6 +9,3 @@
 
 - import_playbook: vpp_device.yaml
   tags: vpp-device
-
-#- import_playbook: virl.yaml
-#  tags: virl
diff --git a/resources/tools/testbed-setup/ansible/virl.yaml b/resources/tools/testbed-setup/ansible/virl.yaml
deleted file mode 100644 (file)
index 5e57d58..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
----
-# file: virl.yaml
-
-- hosts: virl
-  remote_user: testuser
-  become: yes
-  become_user: root
-  roles:
-    - common
-    - virl

©2016 FD.io a Linux Foundation Collaborative Project. All Rights Reserved.
Linux Foundation is a registered trademark of The Linux Foundation. Linux is a registered trademark of Linus Torvalds.
Please see our privacy policy and terms of use.