Add documentation and files related to initial host setup 28/1028/4
authorCarsten Koester <ckoester@cisco.com>
Sun, 8 May 2016 05:27:47 +0000 (01:27 -0400)
committerDave Wallace <dwallacelf@gmail.com>
Thu, 2 Jun 2016 17:20:05 +0000 (17:20 +0000)
Change-Id: I73deeb79e57ac7eca208faa49d04be37c7034163
Signed-off-by: Carsten Koester <ckoester@cisco.com>
33 files changed:
resources/tools/testbed-setup/README.md [new file with mode: 0644]
resources/tools/testbed-setup/boot-screens_txt.cfg [new file with mode: 0644]
resources/tools/testbed-setup/cimc/cimc.py [new file with mode: 0755]
resources/tools/testbed-setup/cimc/cimclib.py [new file with mode: 0755]
resources/tools/testbed-setup/ks.cfg [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/01-host-setup.yaml [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/02-virl-bootstrap.yaml [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/03-virl-post-install.yaml [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/04-disk-image.yaml [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/05-ckoester.yaml [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/apt-sources.list [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/cpufrequtils [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/grub [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/hostname [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/hosts [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/interfaces [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/irqbalance [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/salt.b64 [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/salt/etc/salt/minion.d/testlocal.conf [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/salt/srv/salt/ckoester/nfs-server.sls [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/salt/srv/salt/ckoester/nfs-server/configure.sls [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/salt/srv/salt/ckoester/nfs-server/files/exports [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/salt/srv/salt/ckoester/nfs-server/install.sls [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/sudoers [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/sudoers_jenkins-in [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/sudoers_virl [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/ttyS0 [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/virl/id_rsa_virl [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/virl/id_rsa_virl.pub [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/virl/ssh_environment [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/files/virl/virl-bootstrap-wrapper [new file with mode: 0644]
resources/tools/testbed-setup/playbooks/reboot.yaml [new file with mode: 0644]
resources/tools/testbed-setup/syslinux.cfg [new file with mode: 0644]

diff --git a/resources/tools/testbed-setup/README.md b/resources/tools/testbed-setup/README.md
new file mode 100644 (file)
index 0000000..a83fe14
--- /dev/null
@@ -0,0 +1,147 @@
+# Testbed Setup
+
+## Introduction
+
+This directoctory contains the *high-level* process to set up a hardware
+machine as a CSIT testbed, either for use as a physical testbed host or
+as a VIRL server.
+
+Code in this directory is NOT executed as part of a regular CSIT test case
+but is stored here merely for archiving and documentation purposes.
+
+
+## Setting up a hardware host
+
+Documentation below is just bullet points and assumes and understanding
+of PXE boot and ansible.
+
+This process is specific for LF lab, and both examples given here as
+well as associated code, are based on the assumption that they are run
+in LF environment. If run elsewhere, changes will be required to IP addresses
+and other parameters.
+
+The process below assumes that there is a host used for boostrapping (referred
+to as "PXE boostrap server" below), and that the directory containig this README
+is available on the PXE bootstrap server in ~testuser/host-setup.
+
+### Prepare the PXE bootstrap server (one-time)
+
+  - `sudo apt-get install isc-dhcp-server tftpd-hpa nginx-light ansible`
+  - `cd ~testuser/host-setup`
+  - `wget 'http://releases.ubuntu.com/14.04/ubuntu-14.04.4-server-amd64.iso'`
+  - `sudo mkdir /mnt/cdrom`
+  - `sudo mount -o loop ubuntu-14.04.4-server-amd64.iso /mnt/cdrom/`
+  - `sudo cp -r /mnt/cdrom/install/netboot/* /var/lib/tftpboot/`
+  - `sudo mkdir /usr/share/nginx/html/ubuntu`
+  - `sudo cp -r /mnt/cdrom/* /usr/share/nginx/html/ubuntu/`
+  - `sudo umount /mnt/cdrom`
+  - edit ks.cfg and replace IP address with that of your PXE bootstrap server
+  - `sudo cp ks.cfg /usr/share/nginx/html/ks.cfg`
+  - edit boot-screens_txt.cfg and replace IP address with that of your PXE bootstrap server
+  - `sudo cp boot-screens_txt.cfg /var/lib/tftpboot/ubuntu-installer/amd64/boot-screens/txt.cfg`
+  - `sudo cp syslinux.cfg /var/lib/tftpboot/ubuntu-installer/amd64/boot-screens/syslinux.cfg`
+
+### New testbed host - manual preparation
+
+- set CIMC address
+- set CIMC username, password and hostname
+
+### Bootstrap the host
+
+From PXE boostrap server:
+
+  - `cd ~testuser/host-setup/cimc`
+  - `./cimc.py -u admin -p Cisco1234 $CIMC_ADDRESS -d -i`
+  - `./cimc.py -u admin -p Cisco1234 $CIMC_ADDRESS -d -s '<biosVfIntelHyperThreadingTech rn="Intel-HyperThreading-Tech" vpIntelHyperThreadingTech="disabled" />' -s '<biosVfEnhancedIntelSpeedStepTech rn="Enhanced-Intel-SpeedStep-Tech" vpEnhancedIntelSpeedStepTech="disabled" />' -s '<biosVfIntelTurboBoostTech rn="Intel-Turbo-Boost-Tech" vpIntelTurboBoostTech="disabled" />'`
+  - add MAC address to DHCP
+  - `./cimc.py -u admin -p Cisco1234 $CIMC_ADDRESS -d -pxe`
+
+While Ubuntu install is running:
+
+  - create RAID array. Reboot if needed.
+      - `./cimc.py -u admin -p Cisco1234 $CIMC_ADDRESS -d --wipe`
+      - `./cimc.py -u admin -p Cisco1234 $CIMC_ADDRESS -d -r -rl 1 -rs <disk size> -rd '[1,2]'`
+        Alternatively, create the RAID array manually.
+
+  - `./cimc.py -u admin -p Cisco1234 $CIMC_ADDRESS -d -hdd`
+
+When installation is finished:
+
+  - `ssh-copy-id <>`
+  - `cd ~testuser/host-setup/playbooks`
+  - edit /etc/ansible/hosts; add the hosts you are installing. *REMOVE ANY HOSTS YOU ARE NOT CURRENTLY INSTALLING*.
+
+    Example for physical testbed hosts:
+    ~~~
+    [tg]
+    10.30.51.16 hostname=t1-tg1
+
+    [sut]
+    10.30.51.17 hostname=t1-sut1
+    10.30.51.18 hostname=t1-sut2
+    ~~~
+
+    Example for VIRL hosts -- use the "virl" tag and specify the flat network start and end addresses:
+
+    ~~~
+    [virl]
+    10.30.51.28 hostname=t4-virl1 virl_l2_start=10.30.51.31 virl_l2_end=10.30.51.105
+    ~~~
+
+  - `ansible-playbook --ask-sudo-pass 01-host-setup.yaml`
+  - `ansible-playbook reboot.yaml`
+
+For non-VIRL hosts, stop here.
+
+
+### VIRL installation
+
+After the host has rebooted:
+
+  - `ansible-playbook 02-virl-bootstrap.yaml`
+  - ssh to host
+      - `sudo -s`
+      - `cd virl-bootstrap`
+      - `./virl-bootstrap-wrapper`
+
+        This command will error out when run the first time, as the VIRL host is not yet licensed.
+
+        Make sure we contact all three VIRL SALT masters:
+
+      - `for a in 1 2 4 ; do sudo salt-call --master us-${a}.virl.info test.ping ; done`
+
+      - Contact the VIRL team, provide the hostname and domain (linuxfoundation.org), and ask them
+        to accept the key
+
+      - After the key has been accepted, verify that connectivity with the SALT master is now OK:
+
+        `for a in 1 2 4 ; do sudo salt-call --master us-${a}.virl.info test.ping ; done`
+
+      - `./virl-bootstrap-wrapper`
+      - `reboot`
+
+After reboot, ssh to host again
+  - as VIRL user, NOT AS ROOT:
+     - `vinstall all`
+     - `sudo reboot`
+
+After reboot, ssh to host again
+  - as VIRL user:
+      - `sudo salt-call state.sls virl.routervms.all`
+      - `sudo salt-call state.sls virl.vmm.vmmall`
+
+Back on the PXE bootstrap server:
+
+  - obtain the current server disk image and place it into
+    `files/virl-server-image/` as `server.qcow2`
+
+    TO-DO: Need to find a place to store this image
+
+  - `ansible-playbook 03-virl-post-install.yaml`
+
+  - Run the following command ONLY ONCE. Otherwise it will create
+    duplicates of the VIRL disk image:
+
+    `ansible-playbook 04-disk-image.yaml`
+
+The VIRL host should now be operational. Test, and when ready, create a ~jenkins-in/status file with the appropriate status.
diff --git a/resources/tools/testbed-setup/boot-screens_txt.cfg b/resources/tools/testbed-setup/boot-screens_txt.cfg
new file mode 100644 (file)
index 0000000..8736116
--- /dev/null
@@ -0,0 +1,11 @@
+default install
+label install
+       menu label ^Install
+       menu default
+       kernel ubuntu-installer/amd64/linux
+       append ks=http://10.30.51.28/ks.cfg ksdevice=bootif initrd=ubuntu-installer/amd64/initrd.gz console=tty0 console=ttyS0,115200n8 --- quiet 
+       IPAPPEND 2
+label cli
+       menu label ^Command-line install
+       kernel ubuntu-installer/amd64/linux
+       append tasks=standard pkgsel/language-pack-patterns= pkgsel/install-language-support=false vga=788 initrd=ubuntu-installer/amd64/initrd.gz --- quiet 
diff --git a/resources/tools/testbed-setup/cimc/cimc.py b/resources/tools/testbed-setup/cimc/cimc.py
new file mode 100755 (executable)
index 0000000..2e0fc42
--- /dev/null
@@ -0,0 +1,95 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2016 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import cimclib
+import argparse
+
+parser = argparse.ArgumentParser()
+parser.add_argument("ip", help="CIMC IP address")
+parser.add_argument("-u", "--username", help="CIMC username (admin)",
+                    default="admin")
+parser.add_argument("-p", "--password", help="CIMC password (cisco123)",
+                    default="cisco123")
+parser.add_argument("-d", "--debug", help="Enable debugging", action="count",
+                    default=0)
+
+parser.add_argument("-i", "--initialize",
+                    help="Initialize args.ip: Power-Off, reset BIOS defaults, Enable console redir, get LOM MAC addr",
+                    action='store_true')
+parser.add_argument("-s", "--set",
+                    help="Set specific BIOS settings", action='append')
+parser.add_argument("--wipe", help="Delete all virtual drives",
+                    action='store_true')
+parser.add_argument("-r", "--raid", help="Create RAID array",
+                    action='store_true')
+parser.add_argument("-rl", "--raid-level", help="RAID level", default='10')
+parser.add_argument("-rs", "--raid-size", help="RAID size", default=3*571250)
+parser.add_argument("-rd", "--raid-disks",
+                    help="RAID disks ('[1,2][3,4][5,6]')",
+                    default='[1,2][3,4][5,6]')
+parser.add_argument("-pxe", "--boot-pxe", help="Reboot using PXE",
+                    action='store_true')
+parser.add_argument("-hdd", "--boot-hdd", help="Boot using HDD on next boot",
+                    action='store_true')
+parser.add_argument("-poff", "--power-off", help="Power Off",
+                    action='store_true')
+parser.add_argument("-pon", "--power-on", help="Power On", action='store_true')
+parser.add_argument("-m", "--mac-table",
+                    help="Show interface MAC address table",
+                    action='store_true')
+
+args = parser.parse_args()
+
+cookie = cimclib.login(args.ip, args.username, args.password)
+
+if args.wipe:
+    cimclib.deleteAllVirtualDrives(args.ip, cookie, args.debug)
+
+if args.raid:
+    cimclib.createRaid(args.ip, cookie, "raid-virl", args.raid_level, args.raid_size, args.raid_disks, args.debug)
+
+if args.initialize:
+    cimclib.powerOff(args.ip, cookie)
+    cimclib.restoreBiosDefaultSettings(args.ip, cookie, args.debug)
+    cimclib.enableConsoleRedir(args.ip, cookie, args.debug)
+    cimclib.powerOn(args.ip, cookie, args.debug)
+    cimclib.bootIntoUefi(args.ip, cookie, args.debug)
+    lom_mac = cimclib.getLOMMacAddress(args.ip, cookie, args.debug)
+    print "Host {} LOM MAC address: {}".format(args.ip, lom_mac)
+
+if args.set:
+    cimclib.setBiosSettings(args.ip, cookie, args.set, args.debug)
+
+if args.boot_pxe:
+    cimclib.bootPXE(args.ip, cookie, args.debug)
+
+if args.boot_hdd:
+    cimclib.bootHDDPXE(args.ip, cookie, args.debug)
+
+if args.power_off:
+    cimclib.powerOff(args.ip, cookie, args.debug)
+
+if args.power_on:
+    cimclib.powerOn(args.ip, cookie, args.debug)
+
+if args.mac_table:
+    maclist = cimclib.getMacAddresses(args.ip, cookie, args.debug)
+
+    for k in sorted(maclist.keys()):
+        print "{}:".format(k)
+        for p in sorted(maclist[k].keys()):
+            print "  {} - {}".format(p, maclist[k][p])
+
+cimclib.logout(args.ip, cookie)
diff --git a/resources/tools/testbed-setup/cimc/cimclib.py b/resources/tools/testbed-setup/cimc/cimclib.py
new file mode 100755 (executable)
index 0000000..f91832e
--- /dev/null
@@ -0,0 +1,414 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2016 Cisco and/or its affiliates.
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at:
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import requests
+import xml.etree.ElementTree as et
+import re
+
+from requests.packages.urllib3.exceptions import InsecureRequestWarning
+requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
+
+BASEDN = "sys/rack-unit-1"
+
+###
+### Helper function - iterate through a list in pairs
+###
+def chunks(lst, chunksize):
+    """Yield successive n-sized chunks from l."""
+    for i in range(0, len(lst), chunksize):
+        yield lst[i:i+chunksize]
+
+###
+### Helper function: Perform an XML request to CIMC
+###
+def xml_req(ip, xml, debug=False):
+    if debug:
+        print "DEBUG: XML-REQUEST:"
+        et.dump(xml)
+    headers = {'Content-Type': 'text/xml'}
+    req = requests.post('https://' + ip + '/nuova', headers=headers,
+                        verify=False, data=et.tostring(xml))
+    resp = et.fromstring(req.content)
+    if debug:
+        print "DEBUG: XML-RESPONSE:"
+        et.dump(resp)
+
+    if resp.tag == 'error':
+        if debug:
+            print "XML response contains error:"
+            et.dump(error)
+        raise RuntimeError('XML response contains error')
+    return resp
+
+###
+### Authenticate (Log-In) to CIMC and obtain a cookie
+###
+def login(ip, username, password):
+    reqxml = et.Element('aaaLogin',
+                        attrib={'inName':username, 'inPassword':password})
+    respxml = xml_req(ip, reqxml)
+    try:
+        cookie = respxml.attrib['outCookie']
+    except:
+        print "Cannot find cookie in CIMC server response."
+        print "CIMC server output:"
+        et.dump(respxml)
+        raise
+
+    return cookie
+
+###
+### Log out from CIMC.
+###
+### Note: There is a maximum session limit in CIMC and sessions to take a long
+### time (10 minutes) to time out. Therefore, calling this function during
+### testing is essential, otherwise one will quickly exhaust all available
+### sessions.
+###
+def logout(ip, cookie):
+    reqxml = et.Element('aaaLogout', attrib={'cookie': cookie,
+                                             'inCookie': cookie})
+    xml_req(ip, reqxml)
+
+###
+### Power off the host
+###
+def powerOff(ip, cookie, debug=False):
+    reqxml = et.Element('configConfMo',
+                        attrib={'cookie': cookie, 'inHierarchical': 'false',
+                                'dn': BASEDN})
+    inconfig = et.SubElement(reqxml, 'inConfig')
+    et.SubElement(inconfig, 'computeRackUnit',
+                  attrib={'adminPower': 'down', 'dn': BASEDN})
+    respxml = xml_req(ip, reqxml, debug)
+    return respxml
+
+###
+### Power on the host
+###
+def powerOn(ip, cookie, debug=False):
+    reqxml = et.Element('configConfMo',
+                        attrib={'cookie': cookie, 'inHierarchical': 'false',
+                                'dn': BASEDN})
+    inconfig = et.SubElement(reqxml, 'inConfig')
+    et.SubElement(inconfig, 'computeRackUnit',
+                  attrib={'adminPower': 'up', 'dn': BASEDN})
+    respxml = xml_req(ip, reqxml, debug)
+    return respxml
+
+###
+### Restore BIOS to default settings
+###
+def restoreBiosDefaultSettings(ip, cookie, debug=False):
+    reqxml = et.Element('configResolveClass',
+                        attrib={'cookie': cookie, 'inHierarchical': 'true',
+                                'classId': 'biosPlatformDefaults'})
+    respxml = xml_req(ip, reqxml, debug)
+
+    configs = respxml.find('outConfigs')
+    defaults = configs.find('biosPlatformDefaults')
+
+    reqxml = et.Element('configConfMo',
+                        attrib={'cookie': cookie, 'inHierarchical': 'true',
+                                'dn': "{}/bios/bios-settings".format(BASEDN)})
+    inconfig = et.SubElement(reqxml, 'inConfig')
+    biosset = et.SubElement(inconfig, 'biosSettings')
+    biosset.extend(defaults)
+
+    respxml = xml_req(ip, reqxml, debug)
+    return respxml
+
+###
+### Apply specified BIOS settings.
+###
+### These must be a list of strings in XML format. Not currently very
+### user friendly. Format can either be obtained from CIMC
+### documention, or by setting them manually and then fetching
+### BIOS settings via CIMC XML API.
+###
+def setBiosSettings(ip, cookie, settings, debug=False):
+    reqxml = et.Element('configConfMo',
+                        attrib={'cookie': cookie, 'inHierarchical': 'true',
+                                'dn': "{}/bios/bios-settings".format(BASEDN)})
+    inconfig = et.SubElement(reqxml, 'inConfig')
+    biosset = et.SubElement(inconfig, 'biosSettings')
+    print "Applying settings:"
+    print settings
+    for s in settings:
+        x = et.fromstring(s)
+        et.dump(x)
+        biosset.append(et.fromstring(s))
+
+    respxml = xml_req(ip, reqxml, debug)
+    return respxml
+###
+### Delete any existing virtual drives
+###
+### WARNING: THIS WILL ERASE ALL DATA ON ALL DISKS, WITHOUT ANY CONFIRMATION
+### QUESTION.
+###
+### The server must be POWERED ON for this to succeed.
+###
+def deleteAllVirtualDrives(ip, cookie, debug=False):
+    reqxml = et.Element('configResolveClass',
+                        attrib={'cookie': cookie, 'inHierarchical': 'true',
+                                'classId': 'storageController'})
+    respxml = xml_req(ip, reqxml, debug)
+
+    configs = respxml.find('outConfigs')
+    for sc in configs.iter('storageController'):
+        if debug:
+            print "DEBUG: SC DN {} ID {}".format(sc.attrib['dn'],
+                                                 sc.attrib['id'])
+        reqxml = et.Element('configConfMo',
+                            attrib={'cookie': cookie, 'inHierarchical': 'true',
+                                    'dn': sc.attrib['dn']})
+        inconfig = et.SubElement(reqxml, 'inConfig')
+        et.SubElement(inconfig, 'storageController',
+                      attrib={'adminAction': 'delete-all-vds-reset-pds',
+                              'dn': sc.attrib['dn']})
+        xml_req(ip, reqxml, debug)
+
+###
+### Create a single RAID-10 across all drives.
+###
+### The server must be POWERED ON for this to succeed.
+###
+def createRaid10_all(ip, cookie, debug=False):
+    reqxml = et.Element('configResolveClass',
+                        attrib={'cookie': cookie, 'inHierarchical': 'true',
+                                'classId': 'storageController'})
+    respxml = xml_req(ip, reqxml, debug)
+
+    configs = respxml.find('outConfigs')
+    for sc in configs.iter('storageController'):
+        if debug:
+            print "DEBUG: SC DN {} ID {}".format(sc.attrib['dn'],
+                                                 sc.attrib['id'])
+        #
+        # Find disk size and number of disks
+        #
+        disks = []
+        total_size = 0
+        for pd in sc.iter('storageLocalDisk'):
+            if debug:
+                print "DEBUG: PD {} size {}".format(pd.attrib['id'],
+                                                    pd.attrib['coercedSize'])
+            disks.append(pd.attrib['id'])
+            total_size += int(pd.attrib['coercedSize'].split(' ')[0])
+
+        #
+        # Create a RAID10 array of all available disks, as in:
+        # [1,2][3,4][5,6][7,8][9,10][11,12][13,14][15,16][17,18]
+        #
+        raid_size = total_size/2
+        raid_span = ''
+        for p in list(chunks(disks, 2)):
+            raid_span += "[{},{}]".format(p[0], p[1])
+
+        reqxml = et.Element('configConfMo',
+                            attrib={'cookie': cookie, 'inHierarchical': 'true',
+                                    'dn': sc.attrib['dn']})
+        inconfig = et.SubElement(reqxml, 'inConfig')
+        et.SubElement(inconfig,
+                      'storageVirtualDriveCreatorUsingUnusedPhysicalDrive',
+                      attrib={'virtualDriveName': 'raid10-all',
+                              'size': str(raid_size)+' MB',
+                              'raidLevel': '10', 'driveGroup': raid_span,
+                              'adminState': 'trigger'})
+
+        xml_req(ip, reqxml, debug)
+
+###
+### Create a single RAID across from empty drives as provided.
+###
+### The server must be POWERED ON for this to succeed.
+###
+def createRaid(ip, cookie, name, raidlevel, size, drives, debug=False):
+    reqxml = et.Element('configResolveClass',
+                        attrib={'cookie': cookie, 'inHierarchical': 'true',
+                                'classId': 'storageController'})
+    respxml = xml_req(ip, reqxml, debug)
+
+    configs = respxml.find('outConfigs')
+    for sc in configs.iter('storageController'):
+        if debug:
+            print "DEBUG: SC DN {} ID {}".format(sc.attrib['dn'],
+                                                 sc.attrib['id'])
+
+        reqxml = et.Element('configConfMo',
+                            attrib={'cookie': cookie, 'inHierarchical': 'true',
+                                    'dn': sc.attrib['dn']})
+        inconfig = et.SubElement(reqxml, 'inConfig')
+        et.SubElement(inconfig,
+                      'storageVirtualDriveCreatorUsingUnusedPhysicalDrive',
+                      attrib={'virtualDriveName': name,
+                              'size': str(size)+' MB',
+                              'raidLevel': raidlevel,
+                              'driveGroup': drives,
+                              'adminState': 'trigger'})
+
+        xml_req(ip, reqxml, debug)
+
+###
+### Enable Serial-Over-LAN (SOL) console and redirect BIOS output to
+### serial console
+###
+def enableConsoleRedir(ip, cookie, debug=False):
+    reqxml = et.Element('configConfMo',
+                        attrib={'cookie': cookie, 'inHierarchical': 'false',
+                                'dn': "{}/bios/bios-settings".format(BASEDN)})
+    inconfig = et.SubElement(reqxml, 'inConfig')
+    bs = et.SubElement(inconfig, 'biosSettings',
+                       attrib={'dn': "{}/bios/bios-settings".format(BASEDN)})
+    et.SubElement(bs,
+                  'biosVfConsoleRedirection',
+                  attrib={'vpConsoleRedirection': 'com-0',
+                          'vpBaudRate': '115200'})
+    respxml = xml_req(ip, reqxml, debug)
+    reqxml = et.Element('configConfMo',
+                        attrib={'cookie': cookie, 'inHierarchical': 'false',
+                                'dn': BASEDN+'/sol-if'})
+    inconfig = et.SubElement(reqxml, 'inConfig')
+    et.SubElement(inconfig, 'solIf',
+                  attrib={'dn': BASEDN+'/sol-if', 'adminState': 'enable',
+                          'speed': '115200', 'comport': 'com0'})
+    respxml = xml_req(ip, reqxml, debug)
+    return respxml
+
+###
+### Boot into UEFI bootloader (we may use this to "park" the host in
+### powered-on state)
+###
+def bootIntoUefi(ip, cookie, debug=False):
+    reqxml = et.Element('configConfMo',
+                        attrib={'cookie': cookie, 'inHierarchical': 'false',
+                                'dn': BASEDN+'/boot-policy'})
+    inconfig = et.SubElement(reqxml, 'inConfig')
+    bootDef = et.SubElement(inconfig, 'lsbootDef',
+                            attrib={'dn': BASEDN+'/boot-policy',
+                                    'rebootOnUpdate': 'yes'})
+    et.SubElement(bootDef, 'lsbootEfi',
+                  attrib={'rn': 'efi-read-only', 'order': '1',
+                          'type': 'efi'})
+
+    respxml = xml_req(ip, reqxml, debug)
+    return respxml
+
+###
+### Boot via PXE. Reboot immediately.
+###
+def bootPXE(ip, cookie, debug=False):
+    reqxml = et.Element('configConfMo',
+                        attrib={'cookie': cookie, 'inHierarchical': 'false',
+                                'dn': BASEDN+'/boot-policy'})
+    inconfig = et.SubElement(reqxml, 'inConfig')
+    bootDef = et.SubElement(inconfig, 'lsbootDef',
+                            attrib={'dn': BASEDN+'/boot-policy',
+                                    'rebootOnUpdate': 'yes'})
+    et.SubElement(bootDef, 'lsbootLan',
+                  attrib={'rn': 'lan-read-only', 'order': '1',
+                          'type': 'lan', 'prot': 'pxe'})
+
+    respxml = xml_req(ip, reqxml, debug)
+    return respxml
+
+
+###
+### Boot via Local HDD first, then via PXE. Do not reboot immediately.
+###
+def bootHDDPXE(ip, cookie, debug=False):
+    reqxml = et.Element('configConfMo',
+                        attrib={'cookie': cookie, 'inHierarchical': 'false',
+                                'dn': BASEDN+'/boot-policy'})
+    inconfig = et.SubElement(reqxml, 'inConfig')
+    bootDef = et.SubElement(inconfig, 'lsbootDef',
+                            attrib={'dn': BASEDN+'/boot-policy',
+                                    'rebootOnUpdate': 'no'})
+    storage = et.SubElement(bootDef, 'lsbootStorage',
+                            attrib={'rn': 'storage-read-write',
+                                    'access': 'read-write',
+                                    'order': '1', 'type': 'storage'})
+    et.SubElement(storage, 'lsbootLocalStorage',
+                  attrib={'rn': 'local-storage'})
+    et.SubElement(bootDef, 'lsbootLan',
+                  attrib={'rn': 'lan-read-only', 'order': '2',
+                          'type': 'lan', 'prot': 'pxe'})
+
+    respxml = xml_req(ip, reqxml, debug)
+    return respxml
+
+###
+### Return LOM port 1 MAC address
+###
+def getLOMMacAddress(ip, cookie, debug=False):
+    reqxml = et.Element('configResolveClass',
+                        attrib={'cookie': cookie, 'inHierarchical': 'true',
+                                'classId': 'networkAdapterUnit'})
+    respxml = xml_req(ip, reqxml, debug)
+    reqxml = et.Element('configResolveDn',
+                        attrib={'cookie': cookie, 'inHierarchical': 'true',
+                                'dn': BASEDN+'/network-adapter-L/eth-1'})
+    respxml = xml_req(ip, reqxml, debug)
+
+    oc = respxml.find('outConfig')
+    netw = oc.find('networkAdapterEthIf')
+    if debug:
+        print "DEBUG: MAC address is {}".format(netw.get('mac'))
+    return netw.get('mac')
+
+###
+### Return all port MAC addresses
+###
+def getMacAddresses(ip, cookie, debug=False):
+    maclist = {}
+    reqxml = et.Element('configResolveClass',
+                        attrib={'cookie': cookie, 'inHierarchical': 'true',
+                                'classId': 'networkAdapterUnit'})
+    respxml = xml_req(ip, reqxml, debug)
+    oc = respxml.find('outConfigs')
+    for adapter in oc.iter('networkAdapterUnit'):
+        if debug:
+            print "DEBUG: ADAPTER SLOT {} MODEL {}".format(adapter.attrib['slot'],
+                                                           adapter.attrib['model'])
+        slot = adapter.attrib['slot']
+        maclist[slot] = {}
+        for port in adapter.iter('networkAdapterEthIf'):
+            if debug:
+                print "DEBUG:    SLOT {} PORT {} MAC {}".format(slot,
+                                                                port.attrib['id'],
+                                                                port.attrib['mac'])
+            maclist[slot][port.attrib['id']] = port.attrib['mac'].lower()
+
+    reqxml = et.Element('configResolveClass',
+                        attrib={'cookie': cookie, 'inHierarchical': 'true',
+                                'classId': 'adaptorUnit'})
+    respxml = xml_req(ip, reqxml, debug)
+    oc = respxml.find('outConfigs')
+    for adapter in oc.iter('adaptorUnit'):
+        if debug:
+            print "DEBUG: VIC ADAPTER SLOT {} MODEL {}".format(adapter.attrib['pciSlot'],
+                                                               adapter.attrib['model'])
+        slot = adapter.attrib['pciSlot']
+        maclist[slot] = {}
+        for port in adapter.iter('adaptorHostEthIf'):
+            portnum = int(re.sub('eth([0-9]+)', '\\1', port.attrib['name']))+1
+            if debug:
+                print "DEBUG:    VIC SLOT {} PORT {} MAC {}".format(slot,
+                                                                    portnum,
+                                                                    port.attrib['mac'])
+            maclist[slot][portnum] = port.attrib['mac'].lower()
+
+    return maclist
diff --git a/resources/tools/testbed-setup/ks.cfg b/resources/tools/testbed-setup/ks.cfg
new file mode 100644 (file)
index 0000000..6759b1f
--- /dev/null
@@ -0,0 +1,81 @@
+#
+#Generic Kickstart template for Ubuntu
+#Platform: x86 and x86-64
+#
+
+#System language
+lang en_US
+
+#Language modules to install
+langsupport en_US
+
+#System keyboard
+keyboard us
+
+#System mouse
+mouse
+
+#System timezone
+timezone America/Los_Angeles
+
+#Root password
+rootpw --disabled
+
+#Initial user (user with sudo capabilities)
+user testuser --fullname "Test User" --password Cisco1234
+
+#Reboot after installation
+reboot
+
+#Use text mode install
+text
+
+#Install OS instead of upgrade
+install
+
+#Installation media
+#cdrom
+#nfs --server=server.com --dir=/path/to/ubuntu/
+url --url http://10.30.51.28/ubuntu
+
+#System bootloader configuration
+bootloader --location=mbr
+
+#Clear the Master Boot Record
+zerombr yes
+
+#Partition clearing information
+clearpart --all --initlabel
+
+#Basic disk partition
+part /boot --fstype ext4 --size 256 --asprimary
+part / --fstype ext4 --size 1 --grow --asprimary
+part swap --size 1024
+
+#Advanced partition
+#part /boot --fstype=ext4 --size=500 --asprimary
+#part pv.aQcByA-UM0N-siuB-Y96L-rmd3-n6vz-NMo8Vr --grow --size=1
+#volgroup vg_mygroup --pesize=4096 pv.aQcByA-UM0N-siuB-Y96L-rmd3-n6vz-NMo8Vr
+#logvol / --fstype=ext4 --name=lv_root --vgname=vg_mygroup --grow --size=10240 --maxsize=20480
+#logvol swap --name=lv_swap --vgname=vg_mygroup --grow --size=1024 --maxsize=8192
+
+#System authorization infomation
+auth  --useshadow  --enablemd5
+
+#Network information
+network --bootproto=dhcp
+
+#Firewall configuration
+firewall --disabled --ssh
+
+#Do not configure the X Window System
+skipx
+
+###
+
+preseed live-installer/net-image string http://10.30.51.28/ubuntu/install/filesystem.squashfs
+preseed user-setup/allow-password-weak boolean true
+
+
+%packages
+openssh-server
diff --git a/resources/tools/testbed-setup/playbooks/01-host-setup.yaml b/resources/tools/testbed-setup/playbooks/01-host-setup.yaml
new file mode 100644 (file)
index 0000000..bc7eb86
--- /dev/null
@@ -0,0 +1,79 @@
+---
+- hosts: all
+  remote_user: testuser
+  sudo: yes
+  tasks:
+  - name: copy sudoers file
+    copy: src=files/sudoers dest=/etc/sudoers.d/testuser owner=root group=root mode=660
+  - name: copy hosts file
+    template: src=files/hosts dest=/etc/hosts owner=root group=root mode=644
+  - name: copy hostname file
+    template: src=files/hostname dest=/etc/hostname owner=root group=root mode=644
+  - name: interfaces file
+    template: src=files/interfaces dest=/etc/network/interfaces owner=root group=root mode=644
+  - name: copy ttyS0 file
+    template: src=files/ttyS0 dest=/etc/init/ttyS0.conf owner=root group=root mode=644
+  - name: start ttyS0
+    service: name=ttyS0 state=started
+  - name: copy grub file
+    template: src=files/grub dest=/etc/default/grub owner=root group=root mode=644
+  - name: update grub
+    command: update-grub
+  - name: copy apt sources file
+    template: src=files/apt-sources.list dest=/etc/apt/sources.list
+  - name: update repositories
+    apt: update_cache=yes
+  - name: install git
+    apt: name=git state=present
+  - name: install crudini
+    apt: name=crudini state=present
+  - name: install expect
+    apt: name=expect state=present
+- hosts: virl
+  remote_user: testuser
+  sudo: yes
+  tasks:
+  - name: Add VIRL user
+    user: name=virl shell=/bin/bash comment="VIRL user" password="$6$mJPlK5FKLar6$xxXPP0LRhC7T1yhHtym18Z3nKZweTtfTxzi1WpvVHJ0U/Em1UWcHqfMhRApFp0rsY9jHRuL6FfeFbKPN..uDK."
+  - name: Add VIRL user to sudoers
+    copy: src=files/sudoers_virl dest=/etc/sudoers.d/virl owner=root group=root mode=660
+  - name: Set VIRL user authorized key
+    authorized_key: user=virl key="{{ lookup('file', '/home/testuser/.ssh/id_rsa.pub') }}"
+  - name: install qemu (as a workaround)
+    apt: name=qemu-system-x86 state=present
+- hosts: tg:sut
+  remote_user: testuser
+  sudo: yes
+  tasks:
+  - name: Install cpufrequtils
+    apt: name=cpufrequtils state=present
+  - name: Set cpufrequtils defaults
+    copy: src=files/cpufrequtils dest=/etc/default/cpufrequtils owner=root group=root mode=0644
+  - name: Disable IRQ load balancing
+    copy: src=files/irqbalance dest=/etc/default/irqbalance owner=root group=root mode=0644
+  - name: Disable ondemand
+    shell: update-rc.d ondemand disable
+  - name: Install 4.2 kernel
+    apt: name=linux-generic-lts-xenial state=present
+- hosts: tg
+  remote_user: testuser
+  sudo: yes
+  tasks:
+  - name: Install zlib1g-dev
+    apt: name=zlib1g-dev state=present
+  - name: Install unzip
+    apt: name=unzip state=present
+  - name: pstate parameter
+    lineinfile: dest=/etc/default/grub regexp=^GRUB_CMDLINE_LINUX= line=GRUB_CMDLINE_LINUX="\"intel_pstate=disable\""
+  - name: update grub
+    command: update-grub
+- hosts: sut
+  remote_user: testuser
+  sudo: yes
+  tasks:
+  - name: Install dkms
+    apt: name=dkms state=present
+  - name: isolcpus and pstate parameter
+    lineinfile: dest=/etc/default/grub regexp=^GRUB_CMDLINE_LINUX= line=GRUB_CMDLINE_LINUX="\"isolcpus={{ isolcpus }} intel_pstate=disable\""
+  - name: update grub
+    command: update-grub
diff --git a/resources/tools/testbed-setup/playbooks/02-virl-bootstrap.yaml b/resources/tools/testbed-setup/playbooks/02-virl-bootstrap.yaml
new file mode 100644 (file)
index 0000000..037f07a
--- /dev/null
@@ -0,0 +1,109 @@
+---
+- hosts: virl
+  remote_user: virl
+  tasks:
+  - name: install virl-bootstrap
+    git: repo=https://github.com/VIRL-Open/virl-bootstrap.git
+         dest=/home/virl/virl-bootstrap
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=hostname value={{ hostname }}
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=domain_name value=linuxfoundation.org
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=using_dhcp_on_the_public_port value=False
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=public_port value={{ ansible_default_ipv4["interface"] }}
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=Static_IP value={{ ansible_default_ipv4["address"] }}
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=public_network value={{ ansible_default_ipv4["network"] }}
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=public_netmask value={{ ansible_default_ipv4["netmask"] }}
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=public_gateway value={{ ansible_default_ipv4["gateway"] }}
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=proxy value=False
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=ntp_server value=pool.ntp.org
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=first_nameserver value=199.204.44.24
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=second_nameserver value=199.204.47.54
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=salt_master value=us-1.virl.info,us-2.virl.info,us-4.virl.info
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=salt_id value={{ hostname }}
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=salt_domain value=linuxfoundation.org
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=salt_masterless value=false
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l2_port value={{ ansible_default_ipv4["interface"] }}
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l2_network value=10.30.51.0/24
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l2_mask value=255.255.255.0
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l2_network_gateway value=10.30.51.1
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l2_start_address value={{ virl_l2_start }}
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l2_end_address value={{ virl_l2_end }}
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l2_address value={{ ansible_default_ipv4["address"] }}/24
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=first_flat_nameserver value=199.204.44.24
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=second_flat_nameserver value=199.204.47.54
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l2_port2_enabled value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l2_port2 value=dummy0
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l2_network2 value=172.16.2.0/24
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l2_mask2 value=255.255.255.0
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l2_network_gateway2 value=172.16.2.1
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l2_start_address2 value=172.16.2.50
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l2_end_address2 value=172.16.2.253
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l2_address2 value=172.16.2.254/24
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=first_flat2_nameserver value=199.204.44.24
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=second_flat2_nameserver value=199.204.47.54
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l3_port value=dummy1
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l3_network value=172.16.3.0/24
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l3_mask value=255.255.255.0
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l3_network_gateway value=172.16.3.1
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l3_floating_start_address value=172.16.3.50
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l3_floating_end_address value=172.16.3.253
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=l3_address value=172.16.3.254/24
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=first_snat_nameserver value=199.204.44.24
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=second_snat_nameserver value=199.204.47.54
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=ramdisk value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=ank value=19401
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=ank_live value=19402
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=virl_webservices value=19399
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=virl_user_management value=19400
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=virl_apache_port value=80
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=virl_webmux value=19403
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=Start_of_serial_port_range value=17000
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=End_of_serial_port_range value=18000
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=serial_port value=19406
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=vnc_port value=19407
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=location_region value=US
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=vnc value=False
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=guest_account value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=user_list value=tb4-virl:Cisco1234
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=uwmadmin_password value=Cisco1234
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=password value=password
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=mysql_password value=password
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=keystone_service_token value=fkgjhsdflkjh
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=enable_cinder value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=cinder_file value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=cinder_size value=20000
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=cinder_location value=/var/lib/cinder/cinder-volumes.lvm
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=dummy_int value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=this_node_is_the_controller value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=internalnet_controller_hostname value=controller
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=internalnet_controller_IP value=172.16.10.250
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=internalnet_port value=dummy2
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=internalnet_IP value=172.16.10.250
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=internalnet_network value=172.16.10.0
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=internalnet_netmask value=255.255.255.0
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=internalnet_gateway value=172.16.10.1
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=iosv value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=csr1000v value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=iosxrv432 value=False
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=iosxrv52 value=False
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=iosxrv value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=nxosv value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=vpagent value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=iosvl2 value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=asav value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=lxc_server value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=lxc_iperf value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=lxc_routem value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=lxc_ostinato value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=server value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=vmm_mac value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=vmm_win32 value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=vmm_win64 value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=vmm_linux value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=virl_clients value=True
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=ram_overcommit value=2
+  - ini_file: dest=/home/virl/virl-bootstrap/vsettings.ini section=DEFAULT option=web_editor value=True
+  - name: copy vsetting file to /etc
+    sudo: yes
+    shell: cp /home/virl/virl-bootstrap/vsettings.ini /etc/virl.ini
+  - name: copy vinstall bootstrap wrapper script
+    template: src=files/virl/virl-bootstrap-wrapper dest=/home/virl/virl-bootstrap/virl-bootstrap-wrapper owner=virl group=virl mode=775
diff --git a/resources/tools/testbed-setup/playbooks/03-virl-post-install.yaml b/resources/tools/testbed-setup/playbooks/03-virl-post-install.yaml
new file mode 100644 (file)
index 0000000..59bcfe9
--- /dev/null
@@ -0,0 +1,79 @@
+---
+- hosts: virl
+  remote_user: testuser
+  sudo: yes
+  tasks:
+  - name: Add jenkins-in user
+    user: name=jenkins-in shell=/bin/bash comment="Jenkins user"
+  - name: Add jenkins-in user to sudoers
+    copy: src=files/sudoers_jenkins-in dest=/etc/sudoers.d/jenkins-in owner=root group=root mode=660
+  - name: Set Jenkins user authorized key
+    authorized_key: user=jenkins-in key="{{ lookup('file', '/home/testuser/.ssh/id_rsa.pub') }}"
+  - name: copy salt states for dnsmasq and nfs
+    synchronize: src=files/salt/ dest=/
+  - name: install NFS salt
+    shell: salt-call --local state.sls ckoester.nfs-server
+  - name: NFS symlink
+    shell: ln -s /nfs/scratch /scratch
+    args:
+      creates: /scratch
+  - name: upate Nova CPU mode
+    ini_file: dest=/etc/nova/nova.conf section=libvirt option=cpu_mode value=host-passthrough
+  - name: Restart nova-compute service
+    service: name=nova-compute state=restarted
+  - name: Permit SSH user environment
+    lineinfile: dest=/etc/ssh/sshd_config state=present regexp='PermitUserEnvironment.*' line='PermitUserEnvironment yes'
+  - name: Restart SSH daemon
+    service: name=ssh state=restarted
+- hosts: virl
+  remote_user: jenkins-in
+  tasks:
+  - name: clone csit git repository
+    git: repo=https://gerrit.fd.io/r/csit
+         dest=/home/jenkins-in/git/csit
+  - name: Link testcase-infra directory
+    command: ln -sf /home/jenkins-in/git/csit/resources/tools/virl /home/jenkins-in/testcase-infra
+    args:
+      creates: /home/jenkins-in/testcase-infra
+  - name: Create bin directory
+    file: path=/home/jenkins-in/bin state=directory mode=0755
+  - name: Link start-testcase executable
+    command: ln -sf /home/jenkins-in/testcase-infra/bin/start-testcase /home/jenkins-in/bin/start-testcase
+    args:
+      creates: /home/jenkins-in/bin/start-testcase
+  - name: Link stop-testcase executable
+    command: ln -sf /home/jenkins-in/testcase-infra/bin/stop-testcase /home/jenkins-in/bin/stop-testcase
+    args:
+      creates: /home/jenkins-in/bin/stop-testcase
+  - name: Link kill-idle-testcases executable
+    command: ln -sf /home/jenkins-in/testcase-infra/bin/kill-idle-testcases /home/jenkins-in/bin/kill-idle-testcases
+    args:
+      creates: /home/jenkins-in/bin/kill-idle-testcases
+  - name: Copy SSH private key
+    copy: src=files/virl/id_rsa_virl dest=/home/jenkins-in/.ssh/id_rsa_virl mode=600
+  - name: Copy SSH public key
+    copy: src=files/virl/id_rsa_virl.pub dest=/home/jenkins-in/.ssh/id_rsa_virl.pub mode=644
+  - name: Copy SSH environment
+    copy: src=files/virl/ssh_environment dest=/home/jenkins-in/.ssh/environment mode=644
+  - name: Add ~/bin to path
+    lineinfile: dest=/home/jenkins-in/.bashrc state=present line='PATH=${HOME}/bin:$PATH'
+  - name: Update own IP address in start script
+    shell: sed -i /home/jenkins-in/testcase-infra/bin/start-testcase -e 's/10.30.51.28/{{ ansible_default_ipv4["address"] }}/'
+  - name: Add authorized key
+    lineinfile: dest=/home/jenkins-in/.ssh/authorized_keys line='ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQD4gderzsZyoxHULjuvPHoJuKnkaGrykqtuoqs/k1/jUdxitPoY5eX2cVYqww7MiUif7zLsiXbt5mHtyxAYCluDxAuIcy1xgSZY3MpmmSqDie4A/FdVfCUqCcpf3TZKsRP0an1MNrKIe0JFZV+uU889IDRQRdboGMs3+4cn5b9fOutpv71qwFVrTm9PZbqfQonrrN8Jp4Mz3XaZDpK22xwDAWhYOZ0eV6CJWquUgbYAHE6/HHMvd0zeJKaWZCXO/1tOGOj6cjgoViHqbnCtmYCjmv/ir0IglzbUdWdOqQY5YkhnPonveV48lVKrmBipqgbDezAUQD8wOQ7HttpYpKgt jenkins-in@tb4-virl'
+# All of the below will fail if VIRL user/project already exist
+- hosts: virl
+  remote_user: virl
+  tasks:
+  - name: Create VIRL project
+    shell: virl_uwm_client project-create --name tb4-virl --enabled True --user-password Cisco1234
+    ignore_errors: true
+#  - name: Delete VIRL project user
+#    shell: virl_uwm_client user-delete --name tb4-virl
+#    ignore_errors: true
+#  - name: Recreate VIRL project user
+#    shell: virl_uwm_client user-create --name tb4-virl --role admin --project tb4-virl --set-password Cisco1234
+#    ignore_errors: true
+  - name: Create VPP flavor
+    shell: virl_uwm_client flavor-create --name vPP --ram 4096 --vcpus 2 --disk 0
+    ignore_errors: true
diff --git a/resources/tools/testbed-setup/playbooks/04-disk-image.yaml b/resources/tools/testbed-setup/playbooks/04-disk-image.yaml
new file mode 100644 (file)
index 0000000..884a8db
--- /dev/null
@@ -0,0 +1,11 @@
+---
+- hosts: virl
+  remote_user: virl
+  tasks:
+  - name: Create server image directory
+    file: path=/home/virl/virl-server-image state=directory mode=0755
+  - name: Copy server image
+    copy: src=files/virl-server-image/server.qcow2 dest=/home/virl/virl-server-image/server.qcow2 mode=644
+# TODO: Need to make this conditional, and find a good source for the image.
+  - name: Import server image into glance
+    shell: virl_uwm_client image-create --subtype server --version java-nested --release 14.04.03 --image-on-server /home/virl/virl-server-image/server.qcow2
diff --git a/resources/tools/testbed-setup/playbooks/05-ckoester.yaml b/resources/tools/testbed-setup/playbooks/05-ckoester.yaml
new file mode 100644 (file)
index 0000000..02d632e
--- /dev/null
@@ -0,0 +1,17 @@
+- hosts: virl
+  remote_user: jenkins-in
+  tasks:
+  - name: Copy some VPP packages across for testing
+    synchronize: src=/home/jenkins-in/vpp-pkgs dest=/home/jenkins-in/
+  - name: Install Carsten's public key
+    lineinfile: dest=/home/jenkins-in/.ssh/authorized_keys line='ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDiXwDJc8sFpMrn/pEtaT9Ug6BmkFDtJVOX1bCPZWyg650IUBgrIo9cEcpwJNI53p4roTsUmOcYA391jaC5Z9cH5JngUFH0OlyLGdbzlKREzD7PeKGGn7RQFDhZS+L7I+9ayaTn+RJi5EI6b/uWeP0tEvCrFDRJJPu3I9Ohgkbc7CfzeXuX57t9rMbYduvsDpOLQ6s14mSy6T7qGexyE5mQIkSZ4XfWxOuT1nDvMsIw7ej86pKXE/faqvKczoqMa9SUnxo27b4Oe62+KlGt0iXDCYvK9tiDOU8Mi/ZGOM1k2utZOVeD4r7b8rWFbX1aw35roPUPdFEH+ROUGPljrlJT cloud-user@test-ubuntu-1404'
+- hosts: virl
+  remote_user: virl
+  tasks:
+  - name: Install Carsten's public key
+    lineinfile: dest=/home/virl/.ssh/authorized_keys line='ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDiXwDJc8sFpMrn/pEtaT9Ug6BmkFDtJVOX1bCPZWyg650IUBgrIo9cEcpwJNI53p4roTsUmOcYA391jaC5Z9cH5JngUFH0OlyLGdbzlKREzD7PeKGGn7RQFDhZS+L7I+9ayaTn+RJi5EI6b/uWeP0tEvCrFDRJJPu3I9Ohgkbc7CfzeXuX57t9rMbYduvsDpOLQ6s14mSy6T7qGexyE5mQIkSZ4XfWxOuT1nDvMsIw7ej86pKXE/faqvKczoqMa9SUnxo27b4Oe62+KlGt0iXDCYvK9tiDOU8Mi/ZGOM1k2utZOVeD4r7b8rWFbX1aw35roPUPdFEH+ROUGPljrlJT cloud-user@test-ubuntu-1404'
+- hosts: virl
+  remote_user: testuser
+  tasks:
+  - name: Install Carsten's public key
+    lineinfile: dest=/home/testuser/.ssh/authorized_keys line='ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDiXwDJc8sFpMrn/pEtaT9Ug6BmkFDtJVOX1bCPZWyg650IUBgrIo9cEcpwJNI53p4roTsUmOcYA391jaC5Z9cH5JngUFH0OlyLGdbzlKREzD7PeKGGn7RQFDhZS+L7I+9ayaTn+RJi5EI6b/uWeP0tEvCrFDRJJPu3I9Ohgkbc7CfzeXuX57t9rMbYduvsDpOLQ6s14mSy6T7qGexyE5mQIkSZ4XfWxOuT1nDvMsIw7ej86pKXE/faqvKczoqMa9SUnxo27b4Oe62+KlGt0iXDCYvK9tiDOU8Mi/ZGOM1k2utZOVeD4r7b8rWFbX1aw35roPUPdFEH+ROUGPljrlJT cloud-user@test-ubuntu-1404'
diff --git a/resources/tools/testbed-setup/playbooks/files/apt-sources.list b/resources/tools/testbed-setup/playbooks/files/apt-sources.list
new file mode 100644 (file)
index 0000000..b8ed057
--- /dev/null
@@ -0,0 +1,61 @@
+# deb http://us.archive.ubuntu.com/ubuntu/ trusty main restricted
+
+# deb http://us.archive.ubuntu.com/ubuntu/ trusty-updates main restricted
+# deb http://security.ubuntu.com/ubuntu trusty-security main restricted
+
+# See http://help.ubuntu.com/community/UpgradeNotes for how to upgrade to
+# newer versions of the distribution.
+deb http://us.archive.ubuntu.com/ubuntu/ trusty main restricted
+deb-src http://us.archive.ubuntu.com/ubuntu/ trusty main restricted
+
+## Major bug fix updates produced after the final release of the
+## distribution.
+deb http://us.archive.ubuntu.com/ubuntu/ trusty-updates main restricted
+deb-src http://us.archive.ubuntu.com/ubuntu/ trusty-updates main restricted
+
+## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu
+## team. Also, please note that software in universe WILL NOT receive any
+## review or updates from the Ubuntu security team.
+deb http://us.archive.ubuntu.com/ubuntu/ trusty universe
+deb-src http://us.archive.ubuntu.com/ubuntu/ trusty universe
+deb http://us.archive.ubuntu.com/ubuntu/ trusty-updates universe
+deb-src http://us.archive.ubuntu.com/ubuntu/ trusty-updates universe
+
+## N.B. software from this repository is ENTIRELY UNSUPPORTED by the Ubuntu 
+## team, and may not be under a free licence. Please satisfy yourself as to 
+## your rights to use the software. Also, please note that software in 
+## multiverse WILL NOT receive any review or updates from the Ubuntu
+## security team.
+deb http://us.archive.ubuntu.com/ubuntu/ trusty multiverse
+deb-src http://us.archive.ubuntu.com/ubuntu/ trusty multiverse
+deb http://us.archive.ubuntu.com/ubuntu/ trusty-updates multiverse
+deb-src http://us.archive.ubuntu.com/ubuntu/ trusty-updates multiverse
+
+## N.B. software from this repository may not have been tested as
+## extensively as that contained in the main release, although it includes
+## newer versions of some applications which may provide useful features.
+## Also, please note that software in backports WILL NOT receive any review
+## or updates from the Ubuntu security team.
+deb http://us.archive.ubuntu.com/ubuntu/ trusty-backports main restricted universe multiverse
+deb-src http://us.archive.ubuntu.com/ubuntu/ trusty-backports main restricted universe multiverse
+
+deb http://security.ubuntu.com/ubuntu trusty-security main restricted
+deb-src http://security.ubuntu.com/ubuntu trusty-security main restricted
+deb http://security.ubuntu.com/ubuntu trusty-security universe
+deb-src http://security.ubuntu.com/ubuntu trusty-security universe
+deb http://security.ubuntu.com/ubuntu trusty-security multiverse
+deb-src http://security.ubuntu.com/ubuntu trusty-security multiverse
+
+## Uncomment the following two lines to add software from Canonical's
+## 'partner' repository.
+## This software is not part of Ubuntu, but is offered by Canonical and the
+## respective vendors as a service to Ubuntu users.
+# deb http://archive.canonical.com/ubuntu trusty partner
+# deb-src http://archive.canonical.com/ubuntu trusty partner
+
+## Uncomment the following two lines to add software from Ubuntu's
+## 'extras' repository.
+## This software is not part of Ubuntu, but is offered by third-party
+## developers who want to ship their latest software.
+# deb http://extras.ubuntu.com/ubuntu trusty main
+# deb-src http://extras.ubuntu.com/ubuntu trusty main
diff --git a/resources/tools/testbed-setup/playbooks/files/cpufrequtils b/resources/tools/testbed-setup/playbooks/files/cpufrequtils
new file mode 100644 (file)
index 0000000..03070fe
--- /dev/null
@@ -0,0 +1 @@
+GOVERNOR="performance"
diff --git a/resources/tools/testbed-setup/playbooks/files/grub b/resources/tools/testbed-setup/playbooks/files/grub
new file mode 100644 (file)
index 0000000..d4e27b3
--- /dev/null
@@ -0,0 +1,34 @@
+# If you change this file, run 'update-grub' afterwards to update
+# /boot/grub/grub.cfg.
+# For full documentation of the options in this file, see:
+#   info -f grub -n 'Simple configuration'
+
+GRUB_DEFAULT=0
+GRUB_TIMEOUT=10
+GRUB_DISTRIBUTOR=`lsb_release -i -s 2> /dev/null || echo Debian`
+GRUB_CMDLINE_LINUX_DEFAULT="console=tty0 console=ttyS0,115200n8"
+GRUB_CMDLINE_LINUX=""
+
+# Uncomment to enable BadRAM filtering, modify to suit your needs
+# This works with Linux (no patch required) and with any kernel that obtains
+# the memory map information from GRUB (GNU Mach, kernel of FreeBSD ...)
+#GRUB_BADRAM="0x01234567,0xfefefefe,0x89abcdef,0xefefefef"
+
+# Uncomment to disable graphical terminal (grub-pc only)
+#GRUB_TERMINAL=console
+GRUB_TERMINAL=serial
+GRUB_SERIAL_COMMAND="serial --speed=115200 --unit=0 --word=8 --parity=no --stop=1"
+
+# The resolution used on graphical terminal
+# note that you can use only modes which your graphic card supports via VBE
+# you can see them in real GRUB with the command `vbeinfo'
+#GRUB_GFXMODE=640x480
+
+# Uncomment if you don't want GRUB to pass "root=UUID=xxx" parameter to Linux
+#GRUB_DISABLE_LINUX_UUID=true
+
+# Uncomment to disable generation of recovery mode menu entries
+#GRUB_DISABLE_RECOVERY="true"
+
+# Uncomment to get a beep at grub start
+#GRUB_INIT_TUNE="480 440 1"
diff --git a/resources/tools/testbed-setup/playbooks/files/hostname b/resources/tools/testbed-setup/playbooks/files/hostname
new file mode 100644 (file)
index 0000000..56baac7
--- /dev/null
@@ -0,0 +1 @@
+{{ hostname }}
diff --git a/resources/tools/testbed-setup/playbooks/files/hosts b/resources/tools/testbed-setup/playbooks/files/hosts
new file mode 100644 (file)
index 0000000..82dee7b
--- /dev/null
@@ -0,0 +1,7 @@
+127.0.0.1      localhost
+{{ ansible_default_ipv4["address"] }}  {{ hostname }}.linuxfoundation.org      {{ hostname }}
+
+# The following lines are desirable for IPv6 capable hosts
+::1     localhost ip6-localhost ip6-loopback
+ff02::1 ip6-allnodes
+ff02::2 ip6-allrouters
diff --git a/resources/tools/testbed-setup/playbooks/files/interfaces b/resources/tools/testbed-setup/playbooks/files/interfaces
new file mode 100644 (file)
index 0000000..734d8cd
--- /dev/null
@@ -0,0 +1,14 @@
+# This file describes the network interfaces available on your system
+# and how to activate them. For more information, see interfaces(5).
+
+# The loopback network interface
+auto lo
+iface lo inet loopback
+
+# The primary network interface
+auto {{ ansible_default_ipv4["interface"] }}
+iface {{ ansible_default_ipv4["interface"] }} inet static
+    address {{ ansible_default_ipv4["address"] }}
+    netmask {{ ansible_default_ipv4["netmask"] }}
+    gateway {{ ansible_default_ipv4["gateway"] }}
+    dns-nameservers 199.204.44.24 199.204.47.54
diff --git a/resources/tools/testbed-setup/playbooks/files/irqbalance b/resources/tools/testbed-setup/playbooks/files/irqbalance
new file mode 100644 (file)
index 0000000..84fb5f1
--- /dev/null
@@ -0,0 +1,6 @@
+#Configuration for the irqbalance daemon
+#Should irqbalance be enabled?
+ENABLED="0"
+#Balance the IRQs only once?
+ONESHOT="0"
diff --git a/resources/tools/testbed-setup/playbooks/files/salt.b64 b/resources/tools/testbed-setup/playbooks/files/salt.b64
new file mode 100644 (file)
index 0000000..5e76a85
--- /dev/null
@@ -0,0 +1,27 @@
+H4sIAF7JK1cAA+1a32/bNhD2a/RXcMmDkyGy9VuDsQwDhhUoNuxhHfbSFQYj0TZrSVRJyklQ9H/f
+UZZsGbUj27HVduOXBzvkmUfyuzveURI4kcPeeWEBwtBXn3boW83PGj3bCS3P9v3QcnuW7YJAD/ln
+nleJQkjMEeolLMIJjlOabZdr6/9GIRT/gi/OaQOH8O+HgeI/8GzNfxdY8S/OFwgO598LLE/z3wU2
++Y/mjAhJ+GkNoeTf34d/LwwskLPB/V3NfxfYwX82EaYgfEH4QCTihToUwYFl7eTfBrJr/l0X+Hds
+R/FvnWSFLfif80+zKCliMjIQMlFN/6BBP81gg5JkZ3/EsgmdFpwYX3opGkdgh//HmUix+HAC5++1
++7/lr+O/4zng/5YfeNr/u8B2/6/p1879X0fr+X+CVHD//M9XH8r/IRHU+V8XaOd/FQOOPgza4n/o
+N/j3FP+uHdo6/ncBxbQK/hOakEFMOYkk40+qRR0IBdjACHHGZNUw5azIN1pSBqcHAvc2DDXWUEQc
+y2h2gjHtMAyrQTlrG29BebI5XqOlOUcioyF5zBmX63WnOMNTEo82xAPPq/6XJM0TLKHtPc3e46pV
+sIJH0HapPGc03Oo7anRRq7s0DOgaL7vGvMgymk2VTtVCIzJoNCkFGU5heDXanPCMJNWghlHkMcxm
+3FhFlMbq15u/XPZPBDI53nl+t/t/VQC8IBUs/d/zdtd/QbjK/3xL5X9OoOK/9v/z4zPzQhXfypby
++bSu/9beIeY0H4MgnTyN0F+8IFU7JxNOxGyEXuFEkBYb/tLL1qjQ7v/LGPYSHeUZ/1z9t87/LNsK
+1P2PH+j7v06wL//VYXOUjpb4D1lf2OBfxX/XcXT93wmu0EZGhOSMIBxFRAgEib/kLEEJFRJNGC9z
+JfEE9pEK9DCj0Qyl+AndkyrTILFxdXEhGfrj1RsUJZRkUgwQekNqAXHt3wyMK+MK/fqIIaUi5agg
+vXAQzuLymzuC7tIgZywlAi0xY0Kqo8S+5g+34imLbjM2FsW95ISMoxmJ5jcrGeeasx0yW3V7K41g
+9F6lEE2FGM75vU9XGieCxnfWbcSZEGkmt42+Hqae/efDbJtUM3G++PgRKV98289pkmA+mBLZf3fd
+V+nsKHHGGZEPjM/7t5XUlGM4omupDYG+HToDOxjYA2voeH10c4M+fVITgTmobHssPhRYzLbs5X5z
+cFon4TRm4Zx2Fm7bTrhbdsI9dA51+XFx8b0yq1bxL+3OB+P5+9/TPAc85Pmf44Xq/sfS5383aOH/
+5Zc/vfb7Hyewa/4D2/JU/ed7vj7/u0B5+tOMymHzzv9MNyO1VS1TyqbCS6PtHqSS/vYi7NeNFv9/
+efHXO6T+g/iv3v9zbNvT8b8T7MV/01OP0NFW/4V2UPPvh56t6j/f0vVfJ7hCC5qOBIEKT94VudoM
+iaS4cxCRIyMmUJXQXFKWocu/X//5ex2GLw1cyBlUUJe/YA4Wk6HflpaDfqxt6OeIioiBzaQ/QXRf
+DgzDQHBPyIIk6K3jev476GD5Zvt3yw6DE5Hjh8wwyCOJ0LAQfCjuaVZbIzJj9I9R12sm3rNoGU/h
+vHrAT621S0NwXUhBYlJVDy/R3V44NSXXBdRR2t19V75NcF04bddtZsxUhbdYfgXKWLKA70JyGkmT
+8RhMwjSBttikGVjFBEdECZPHiORy3XaXMGjNaWyqkHM3lGk+VPM369gDXSCgItBSYmMWy6uqu+f2
+YULBTscql1gK79iFLWL9HwblX//z1e+hVxCYc9yueJvcM5pjlsKv71hO1P18NC/D0xHJURn2VQ54
++tCyQnn+H/j+t+Xo878LrPj/ut7/Lus/zf/5sck/rA6O+kF8hve/D+I/cAJL898FdvAvIYUrV3xs
+zt9E2/N/y6uf/7hh4Dtl/aff/+4GKpcq77TL90jQPRZkee+CTLSuC/Wti4aGhoaGhoaGhoaGhoaG
+hoaGhoaGhsa3hH8BeOCWxQBQAAA=
diff --git a/resources/tools/testbed-setup/playbooks/files/salt/etc/salt/minion.d/testlocal.conf b/resources/tools/testbed-setup/playbooks/files/salt/etc/salt/minion.d/testlocal.conf
new file mode 100644 (file)
index 0000000..fce910c
--- /dev/null
@@ -0,0 +1,3 @@
+file_roots:
+   base:
+     - /srv/salt/
diff --git a/resources/tools/testbed-setup/playbooks/files/salt/srv/salt/ckoester/nfs-server.sls b/resources/tools/testbed-setup/playbooks/files/salt/srv/salt/ckoester/nfs-server.sls
new file mode 100644 (file)
index 0000000..bf3e47f
--- /dev/null
@@ -0,0 +1,3 @@
+include:
+  - ckoester.nfs-server.install
+  - ckoester.nfs-server.configure
diff --git a/resources/tools/testbed-setup/playbooks/files/salt/srv/salt/ckoester/nfs-server/configure.sls b/resources/tools/testbed-setup/playbooks/files/salt/srv/salt/ckoester/nfs-server/configure.sls
new file mode 100644 (file)
index 0000000..ce362a3
--- /dev/null
@@ -0,0 +1,31 @@
+/nfs:
+  file.directory:
+    - user: root
+    - group: root
+    - mode: 755
+
+/nfs/scratch:
+  file.directory:
+    - user: root
+    - group: root
+    - mode: 1777
+
+/nfs/ro:
+  file.directory:
+    - user: virl
+    - group: virl
+    - mode: 755
+
+/etc/exports:
+  file.managed:
+    - mode: 644
+    - template: jinja
+    - source: "salt://ckoester/nfs-server/files/exports"
+
+nfs_server_running:
+  service.running:
+    - name: nfs-kernel-server
+
+update_exports:
+  cmd.run:
+    - name: exportfs -ra
diff --git a/resources/tools/testbed-setup/playbooks/files/salt/srv/salt/ckoester/nfs-server/files/exports b/resources/tools/testbed-setup/playbooks/files/salt/srv/salt/ckoester/nfs-server/files/exports
new file mode 100644 (file)
index 0000000..23802be
--- /dev/null
@@ -0,0 +1,12 @@
+# /etc/exports: the access control list for filesystems which may be exported
+#              to NFS clients.  See exports(5).
+#
+# Example for NFSv2 and NFSv3:
+# /srv/homes       hostname1(rw,sync,no_subtree_check) hostname2(ro,sync,no_subtree_check)
+#
+# Example for NFSv4:
+# /srv/nfs4        gss/krb5i(rw,sync,fsid=0,crossmnt,no_subtree_check)
+# /srv/nfs4/homes  gss/krb5i(rw,sync,no_subtree_check)
+#
+/nfs/scratch   {{ salt['pillar.get']('virl:l2_network', salt['grains.get']('l2_network', '172.16.1.0/24' )) }}(rw,no_root_squash,no_subtree_check) {{ salt['pillar.get']('virl:l2_network2', salt['grains.get']('l2_network2', '172.16.2.0/24' )) }}(rw,no_root_squash,no_subtree_check) {{ salt['pillar.get']('virl:l3_network', salt['grains.get']('l3_network', '172.16.3.0/24' )) }}(rw,no_root_squash,no_subtree_check)
+/nfs/ro                *(ro,no_root_squash,no_subtree_check)
diff --git a/resources/tools/testbed-setup/playbooks/files/salt/srv/salt/ckoester/nfs-server/install.sls b/resources/tools/testbed-setup/playbooks/files/salt/srv/salt/ckoester/nfs-server/install.sls
new file mode 100644 (file)
index 0000000..0f13634
--- /dev/null
@@ -0,0 +1,5 @@
+nfs-kernel-server install:
+  pkg.installed:
+    - skip_verify: True
+    - refresh: False
+    - name: nfs-kernel-server
diff --git a/resources/tools/testbed-setup/playbooks/files/sudoers b/resources/tools/testbed-setup/playbooks/files/sudoers
new file mode 100644 (file)
index 0000000..367c390
--- /dev/null
@@ -0,0 +1 @@
+testuser ALL=(root) NOPASSWD:ALL
diff --git a/resources/tools/testbed-setup/playbooks/files/sudoers_jenkins-in b/resources/tools/testbed-setup/playbooks/files/sudoers_jenkins-in
new file mode 100644 (file)
index 0000000..1797c2c
--- /dev/null
@@ -0,0 +1 @@
+jenkins-in ALL=(root) NOPASSWD: /bin/rm -fr /scratch/*, /bin/rm -fr /nfs/scratch/*
diff --git a/resources/tools/testbed-setup/playbooks/files/sudoers_virl b/resources/tools/testbed-setup/playbooks/files/sudoers_virl
new file mode 100644 (file)
index 0000000..e0cf48a
--- /dev/null
@@ -0,0 +1 @@
+virl ALL=(root) NOPASSWD:ALL
diff --git a/resources/tools/testbed-setup/playbooks/files/ttyS0 b/resources/tools/testbed-setup/playbooks/files/ttyS0
new file mode 100644 (file)
index 0000000..0ed8550
--- /dev/null
@@ -0,0 +1,10 @@
+# ttyS0 - getty
+#
+# This service maintains a getty on ttyS0 from the point the system is
+# started until it is shut down again.
+
+start on stopped rc RUNLEVEL=[12345]
+stop on runlevel [!12345]
+
+respawn
+exec /sbin/getty -L 115200 ttyS0 vt102
diff --git a/resources/tools/testbed-setup/playbooks/files/virl/id_rsa_virl b/resources/tools/testbed-setup/playbooks/files/virl/id_rsa_virl
new file mode 100644 (file)
index 0000000..b4c3de7
--- /dev/null
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpgIBAAKCAQEAwUDlTpzSHpwLQotZOFS4AgcPNEWCnP1AB2hWFmvI+8Kah/gb
+v8ruZU9RqhPs56tyKzxbhvNkY4VbH5F1GilHZu3mLqzM4KfghMmaeMEjO1T7BYYd
+vuBfTvIluljfQ2vAlnYrDwn+ClxJk81m0pDgvrLEX4qVVh2sGh7UEkYy5r82DNa2
+4VjzPB1J/c8a9zP8FoZUhYIzF4FLvRMjUADpbMXgJMsGpaZLmz95ap0Eot7vb1Cc
+1LvF97iyBCrtIOSKRKA50ZhLGjMKmOwnYU+cP5718tbproDVi6VJOo7zeuXyetMs
+8YBl9kWblWG9BqP9jctFvsmi5G7hXgq1Y8u+DwIDAQABAoIBAQC/W4E0DHjLMny7
+0bvw2YKzD0Zw3fttdB94tkm4PdZv5MybooPnsAvLaXVV0hEdfVi5kzSWNl/LY/tN
+EP1BgGphc2QgB59/PPxGwFIjDCvUzlsZpynBHe+B/qh5ExNQcVvsIOqWI7DXlXaN
+0i/khOzmJ6HncRRah1spKimYRsaUUDskyg7q3QqMWVaqBbbMvLs/w7ZWd/zoDqCU
+MY/pCI6hkB3QbRo0OdiZLohphBl2ShABTwjvVyyKL5UA4jAEneJrhH5gWVLXnfgD
+p62W5CollKEYblC8mUkPxpP7Qo277zw3xaq+oktIZhc5SUEUd7nJZtNqVAHqkItW
+79VmpKyxAoGBAPfU+kqNPaTSvp+x1n5sn2SgipzDtgi9QqNmC4cjtrQQaaqI57SG
+OHw1jX8i7L2G1WvVtkHg060nlEVo5n65ffFOqeVBezLVJ7ghWI8U+oBiJJyQ4boD
+GJVNsoOSUQ0rtuGd9eVwfDk3ol9aCN0KK53oPfIYli29pyu4l095kg11AoGBAMef
+bPEMBI/2XmCPshLSwhGFl+dW8d+Klluj3CUQ/0vUlvma3dfBOYNsIwAgTP0iIUTg
+8DYE6KBCdPtxAUEI0YAEAKB9ry1tKR2NQEIPfslYytKErtwjAiqSi0heM6+zwEzu
+f54Z4oBhsMSL0jXoOMnu+NZzEc6EUdQeY4O+jhjzAoGBAIogC3dtjMPGKTP7+93u
+UE/XIioI8fWg9fj3sMka4IMu+pVvRCRbAjRH7JrFLkjbUyuMqs3Arnk9K+gbdQt/
++m95Njtt6WoFXuPCwgbM3GidSmZwYT4454SfDzVBYScEDCNm1FuR+8ov9bFLDtGT
+D4gsngnGJj1MDFXTxZEn4nzZAoGBAKCg4WmpUPaCuXibyB+rZavxwsTNSn2lJ83/
+sYJGBhf/raiV/FLDUcM1vYg5dZnu37RsB/5/vqxOLZGyYd7x+Jo5HkQGPnKgNwhn
+g8BkdZIRF8uEJqxOo0ycdOU7n/2O93swIpKWo5LIiRPuqqzj+uZKnAL7vuVdxfaY
+qVz2daMPAoGBALgaaKa3voU/HO1PYLWIhFrBThyJ+BQSQ8OqrEzC8AnegWFxRAM8
+EqrzZXl7ACUuo1dH0Eipm41j2+BZWlQjiUgq5uj8+yzy+EU1ZRRyJcOKzbDACeuD
+BpWWSXGBI5G4CppeYLjMUHZpJYeX1USULJQd2c4crLJKb76E8gz3Z9kN
+-----END RSA PRIVATE KEY-----
diff --git a/resources/tools/testbed-setup/playbooks/files/virl/id_rsa_virl.pub b/resources/tools/testbed-setup/playbooks/files/virl/id_rsa_virl.pub
new file mode 100644 (file)
index 0000000..0ef508c
--- /dev/null
@@ -0,0 +1 @@
+ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDBQOVOnNIenAtCi1k4VLgCBw80RYKc/UAHaFYWa8j7wpqH+Bu/yu5lT1GqE+znq3IrPFuG82RjhVsfkXUaKUdm7eYurMzgp+CEyZp4wSM7VPsFhh2+4F9O8iW6WN9Da8CWdisPCf4KXEmTzWbSkOC+ssRfipVWHawaHtQSRjLmvzYM1rbhWPM8HUn9zxr3M/wWhlSFgjMXgUu9EyNQAOlsxeAkywalpkubP3lqnQSi3u9vUJzUu8X3uLIEKu0g5IpEoDnRmEsaMwqY7CdhT5w/nvXy1umugNWLpUk6jvN65fJ60yzxgGX2RZuVYb0Go/2Ny0W+yaLkbuFeCrVjy74P virl@tb4-virl
diff --git a/resources/tools/testbed-setup/playbooks/files/virl/ssh_environment b/resources/tools/testbed-setup/playbooks/files/virl/ssh_environment
new file mode 100644 (file)
index 0000000..5ec594d
--- /dev/null
@@ -0,0 +1 @@
+PATH=/home/jenkins-in/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/games:/usr/local/games
diff --git a/resources/tools/testbed-setup/playbooks/files/virl/virl-bootstrap-wrapper b/resources/tools/testbed-setup/playbooks/files/virl/virl-bootstrap-wrapper
new file mode 100644 (file)
index 0000000..e2d0a5f
--- /dev/null
@@ -0,0 +1,59 @@
+#!/usr/bin/expect
+
+if ![ string equal $::env(USER) "root"] {
+  puts "Please re-run this script as root."
+  exit 1
+}
+
+log_file /tmp/virl-bootstrap.log
+set timeout 3600
+
+spawn  ./virl-bootstrap.py
+expect "Which step are you on"
+send "1\r"
+
+expect "Salt master"
+send "us-1.virl.info,us-2.virl.info,us-4.virl.info\r"
+
+expect "Which step are you on"
+send "2\r"
+
+expect "Salt id"
+send "{{ hostname }}\r"
+expect "Salt domain name"
+send "linuxfoundation.org\r"
+
+expect "Which step are you on"
+send "3\r"
+
+expect "System hostname"
+send "{{ hostname }}\r"
+expect "System Domain name"
+send "linuxfoundation.org\r"
+
+expect "Which step are you on"
+send "4\r"
+
+puts "*******************STEP 6*************************************************************************************************************************************************"
+expect "Which step are you on"
+send "6\r"
+expect "Salt installed"
+
+puts "*******************STEP 8*************************************************************************************************************************************************"
+expect "Which step are you on"
+send "8\r"
+
+expect "Determining pillar cache"
+expect "True"
+
+puts "*******************STEP 9*************************************************************************************************************************************************"
+expect "Which step are you on"
+send "9\r"
+
+expect "Failed:     0"
+
+puts "*******************STEP 11*************************************************************************************************************************************************"
+expect "Which step are you on"
+send "11\r"
+
+expect eof
diff --git a/resources/tools/testbed-setup/playbooks/reboot.yaml b/resources/tools/testbed-setup/playbooks/reboot.yaml
new file mode 100644 (file)
index 0000000..9e22dcb
--- /dev/null
@@ -0,0 +1,14 @@
+- hosts: all
+  remote_user: testuser
+  tasks:
+  - name: Reboot host
+    sudo: true
+    command: shutdown -r now "Ansible updates triggered"
+    async: 0
+    poll: 0
+    ignore_errors: true
+  - name: waiting for server to come back
+    local_action: wait_for host={{ inventory_hostname }}
+                  state=started
+    sudo: false
+
diff --git a/resources/tools/testbed-setup/syslinux.cfg b/resources/tools/testbed-setup/syslinux.cfg
new file mode 100644 (file)
index 0000000..4038c1e
--- /dev/null
@@ -0,0 +1,6 @@
+# D-I config version 2.0
+serial 0 115200
+include ubuntu-installer/amd64/boot-screens/menu.cfg
+default ubuntu-installer/amd64/boot-screens/vesamenu.c32
+prompt 0
+timeout 10