65733f9caf3d1212c73e522ba13af1d67a9fcbe6
[csit.git] / resources / tools / virl / topologies / double-ring-nested.xenial.virl
1 <?xml version="1.0" encoding="UTF-8" standalone="yes"?>
2 <topology xmlns="http://www.cisco.com/VIRL" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" schemaVersion="0.9" xsi:schemaLocation="http://www.cisco.com/VIRL https://raw.github.com/CiscoVIRL/schema/v0.9/virl.xsd">
3     <extensions>
4         <entry key="management_network" type="String">flat</entry>
5     </extensions>
6     <node name="tg1" type="SIMPLE" subtype="server" location="570,238" vmImage="$$VM_IMAGE$$">
7         <extensions>
8             <entry key="config" type="String">#cloud-config
9 bootcmd:
10 - ln -s -t /etc/rc.d /etc/rc.local
11 hostname: tg1
12 manage_etc_hosts: true
13 nfs_server_scratch: $$NFS_SERVER_SCRATCH$$
14 nfs_server_common: $$NFS_SERVER_COMMON$$
15 runcmd:
16 - systemctl start getty@ttyS0.service
17 - systemctl start rc-local
18 - touch /tmp/before-sed
19 - sed -i 's/^\s*PasswordAuthentication\s\+no/PasswordAuthentication yes/' /etc/ssh/sshd_config
20 - echo "UseDNS no" &gt;&gt; /etc/ssh/sshd_config
21 - service ssh restart
22 - service sshd restart
23 users:
24 - default
25 - gecos: User configured by VIRL Configuration Engine 0.21.4
26   lock-passwd: false
27   name: cisco
28   plain-text-passwd: cisco
29   shell: /bin/bash
30   ssh-authorized-keys:
31   - VIRL-USER-SSH-PUBLIC-KEY
32   - VIRL-USER-SSH-PUBLIC-KEY
33   sudo: ALL=(ALL) NOPASSWD:ALL
34 write_files:
35 - path: /etc/init/ttyS0.conf
36   owner: root:root
37   content: |
38     # ttyS0 - getty
39     # This service maintains a getty on ttyS0 from the point the system is
40     # started until it is shut down again.
41     start on stopped rc or RUNLEVEL=[12345]
42     stop on runlevel [!12345]
43     respawn
44     exec /sbin/getty -L 115200 ttyS0 vt102
45   permissions: '0644'
46 - path: /etc/systemd/system/dhclient@.service
47   content: |
48     [Unit]
49     Description=Run dhclient on %i interface
50     After=network.target
51     [Service]
52     Type=oneshot
53     ExecStart=/sbin/dhclient %i -pf /var/run/dhclient.%i.pid -lf /var/lib/dhclient/dhclient.%i.lease
54     RemainAfterExit=yes
55   owner: root:root
56   permissions: '0644'
57 - path: /usr/local/sbin/cloud-instance-name
58   content: |
59     #!/usr/bin/python3.5
60     import pickle
61     print(pickle.loads(open('/var/lib/cloud/instance/obj.pkl', 'rb').read(), encoding="ACSII").metadata['name'])
62   owner: root:root
63   permissions: '0755'
64 - path: /etc/rc.local
65   owner: root:root
66   permissions: '0755'
67   content: |-
68     #!/bin/sh
69     grep -q nfs_server_scratch /var/lib/cloud/instance/user-data.txt || exit 1
70     grep -q nfs_server_common /var/lib/cloud/instance/user-data.txt || exit 1
71     nfs_server_scratch=$(grep -E '^nfs_server_scratch:' /var/lib/cloud/instance/user-data.txt | awk '{ print $2 }')
72     nfs_server_common=$(grep -E '^nfs_server_common:' /var/lib/cloud/instance/user-data.txt | awk '{ print $2 }')
73     instance_name=$(/usr/local/sbin/cloud-instance-name | cut -f 3 -d '&lt;' | cut -f 1 -d '&gt;')
74     echo My instance name is $instance_name
75
76     MAXCOUNT=12
77     RETRY=5
78
79     mkdir -p /scratch
80     mkdir -p /mnt/common
81
82     echo "Mounting NFS directories"
83     count=0
84     while [ $count -lt $MAXCOUNT ] &amp;&amp; ! mount -t nfs "${nfs_server_scratch}/${instance_name}" /scratch
85     do
86       sleep 5
87       count=$[$count+1]
88     done
89
90     mount -t nfs "${nfs_server_common}" /mnt/common
91
92     mkdir /scratch/$(hostname)
93     cp /VERSION /scratch/$(hostname)/
94
95     exit 0
96 </entry>
97             <entry key="Auto-generate config" type="Boolean">false</entry>
98         </extensions>
99         <interface id="0" name="eth1"/>
100         <interface id="1" name="eth2"/>
101         <interface id="2" name="eth3"/>
102         <interface id="3" name="eth4"/>
103         <interface id="4" name="eth5"/>
104         <interface id="5" name="eth6"/>
105     </node>
106     <node name="sut1" type="SIMPLE" subtype="vPP" location="425,26" vmImage="$$VM_IMAGE$$">
107         <extensions>
108             <entry key="config" type="string">#cloud-config
109 bootcmd:
110 - ln -s -t /etc/rc.d /etc/rc.local
111 hostname: sut1
112 manage_etc_hosts: true
113 nfs_server_scratch: $$NFS_SERVER_SCRATCH$$
114 nfs_server_common: $$NFS_SERVER_COMMON$$
115 runcmd:
116 - systemctl start getty@ttyS0.service
117 - systemctl start rc-local
118 - sed -i '/^\s*PasswordAuthentication\s\+no/d' /etc/ssh/sshd_config
119 - echo "UseDNS no" &gt;&gt; /etc/ssh/sshd_config
120 - service ssh restart
121 - service sshd restart
122 - sed -i 's/no-pci//' /opt/cisco/vpe/etc/qn.conf
123 - sed -i 's/1024/1024 decimal-interface-names/g' /opt/cisco/vpe/etc/qn.conf
124 - ln -s /dev/null /etc/sysctl.d/80-vpp.conf
125 users:
126 - default
127 - gecos: User configured by VIRL Configuration Engine 0.21.4
128   lock-passwd: false
129   name: cisco
130   plain-text-passwd: cisco
131   shell: /bin/bash
132   ssh-authorized-keys:
133   - VIRL-USER-SSH-PUBLIC-KEY
134   - VIRL-USER-SSH-PUBLIC-KEY
135   sudo: ALL=(ALL) NOPASSWD:ALL
136 write_files:
137 - path: /etc/init/ttyS0.conf
138   owner: root:root
139   content: |
140     # ttyS0 - getty
141     # This service maintains a getty on ttyS0 from the point the system is
142     # started until it is shut down again.
143     start on stopped rc or RUNLEVEL=[12345]
144     stop on runlevel [!12345]
145     respawn
146     exec /sbin/getty -L 115200 ttyS0 vt102
147   permissions: '0644'
148 - path: /etc/systemd/system/dhclient@.service
149   content: |
150     [Unit]
151     Description=Run dhclient on %i interface
152     After=network.target
153     [Service]
154     Type=oneshot
155     ExecStart=/sbin/dhclient %i -pf /var/run/dhclient.%i.pid -lf /var/lib/dhclient/dhclient.%i.lease
156     RemainAfterExit=yes
157   owner: root:root
158   permissions: '0644'
159 - path: /usr/local/sbin/cloud-instance-name
160   content: |
161     #!/usr/bin/python3.5
162     import pickle
163     print(pickle.loads(open('/var/lib/cloud/instance/obj.pkl', 'rb').read(), encoding="ACSII").metadata['name'])
164   owner: root:root
165   permissions: '0755'
166 - path: /etc/rc.local
167   owner: root:root
168   permissions: '0755'
169   content: |-
170     #!/bin/sh
171     grep -q nfs_server_scratch /var/lib/cloud/instance/user-data.txt || exit 1
172     grep -q nfs_server_common /var/lib/cloud/instance/user-data.txt || exit 1
173     nfs_server_scratch=$(grep -E '^nfs_server_scratch:' /var/lib/cloud/instance/user-data.txt | awk '{ print $2 }')
174     nfs_server_common=$(grep -E '^nfs_server_common:' /var/lib/cloud/instance/user-data.txt | awk '{ print $2 }')
175     instance_name=$(/usr/local/sbin/cloud-instance-name | cut -f 3 -d '&lt;' | cut -f 1 -d '&gt;')
176     echo My instance name is $instance_name
177
178     MAXCOUNT=12
179     RETRY=5
180
181     mkdir -p /scratch
182     mkdir -p /mnt/common
183
184     echo "Mounting NFS directories"
185     count=0
186     while [ $count -lt $MAXCOUNT ] &amp;&amp; ! mount -t nfs "${nfs_server_scratch}/${instance_name}" /scratch
187     do
188       sleep 5
189       count=$[$count+1]
190     done
191
192     mount -t nfs "${nfs_server_common}" /mnt/common
193
194     # Overwrite nested VM image with latest as per NFS
195     if [ -f /mnt/common/nested-vm-current.img ]
196     then
197       rm -f /var/lib/vm/vhost-nested.img
198       cp /mnt/common/nested-vm-current.img /var/lib/vm/vhost-nested.img
199     fi
200
201     mkdir /scratch/$(hostname)
202     cp /VERSION /scratch/$(hostname)/
203     cat /var/lib/vm/vhost-nested.img | strings | grep NESTED_VERSION= > /scratch/$(hostname)/NESTED_VERSION
204
205     exit 0
206 - path: /etc/sysctl.d/90-csit.conf
207   owner: root:root
208   content: |
209     # Number of 2MB hugepages desired
210     vm.nr_hugepages=1024
211
212     # Must be greater than or equal to (2 * vm.nr_hugepages).
213     vm.max_map_count=20000
214
215     # All groups allowed to access hugepages
216     vm.hugetlb_shm_group=0
217
218     # Shared Memory Max must be greator or equal to the total size of hugepages.
219     # For 2MB pages, TotalHugepageSize = vm.nr_hugepages * 2 * 1024 * 1024
220     # If the existing kernel.shmmax setting  (cat /sys/proc/kernel/shmmax)
221     # is greater than the calculated TotalHugepageSize then set this parameter
222     # to current shmmax value.
223     kernel.shmmax=2147483648
224 </entry>
225         </extensions>
226         <interface id="0" name="GigabitEthernet0/4/0"/>
227         <interface id="1" name="GigabitEthernet0/5/0"/>
228         <interface id="2" name="GigabitEthernet0/6/0"/>
229         <interface id="3" name="GigabitEthernet0/7/0"/>
230     </node>
231     <node name="sut2" type="SIMPLE" subtype="vPP" location="748,26" vmImage="$$VM_IMAGE$$">
232         <extensions>
233             <entry key="config" type="string">#cloud-config
234 bootcmd:
235 - ln -s -t /etc/rc.d /etc/rc.local
236 hostname: sut2
237 manage_etc_hosts: true
238 nfs_server_scratch: $$NFS_SERVER_SCRATCH$$
239 nfs_server_common: $$NFS_SERVER_COMMON$$
240 runcmd:
241 - systemctl start getty@ttyS0.service
242 - systemctl start rc-local
243 - sed -i '/^\s*PasswordAuthentication\s\+no/d' /etc/ssh/sshd_config
244 - echo "UseDNS no" &gt;&gt; /etc/ssh/sshd_config
245 - service ssh restart
246 - service sshd restart
247 - sed -i 's/no-pci//' /opt/cisco/vpe/etc/qn.conf
248 - sed -i 's/1024/1024 decimal-interface-names/g' /opt/cisco/vpe/etc/qn.conf
249 - ln -s /dev/null /etc/sysctl.d/80-vpp.conf
250 users:
251 - default
252 - gecos: User configured by VIRL Configuration Engine 0.21.4
253   lock-passwd: false
254   name: cisco
255   plain-text-passwd: cisco
256   shell: /bin/bash
257   ssh-authorized-keys:
258   - VIRL-USER-SSH-PUBLIC-KEY
259   - VIRL-USER-SSH-PUBLIC-KEY
260   sudo: ALL=(ALL) NOPASSWD:ALL
261 write_files:
262 - path: /etc/init/ttyS0.conf
263   owner: root:root
264   content: |
265     # ttyS0 - getty
266     # This service maintains a getty on ttyS0 from the point the system is
267     # started until it is shut down again.
268     start on stopped rc or RUNLEVEL=[12345]
269     stop on runlevel [!12345]
270     respawn
271     exec /sbin/getty -L 115200 ttyS0 vt102
272   permissions: '0644'
273 - path: /etc/systemd/system/dhclient@.service
274   content: |
275     [Unit]
276     Description=Run dhclient on %i interface
277     After=network.target
278     [Service]
279     Type=oneshot
280     ExecStart=/sbin/dhclient %i -pf /var/run/dhclient.%i.pid -lf /var/lib/dhclient/dhclient.%i.lease
281     RemainAfterExit=yes
282   owner: root:root
283   permissions: '0644'
284 - path: /usr/local/sbin/cloud-instance-name
285   content: |
286     #!/usr/bin/python3.5
287     import pickle
288     print(pickle.loads(open('/var/lib/cloud/instance/obj.pkl', 'rb').read(), encoding="ACSII").metadata['name'])
289   owner: root:root
290   permissions: '0755'
291 - path: /etc/rc.local
292   owner: root:root
293   permissions: '0755'
294   content: |-
295     #!/bin/sh
296     grep -q nfs_server_scratch /var/lib/cloud/instance/user-data.txt || exit 1
297     grep -q nfs_server_common /var/lib/cloud/instance/user-data.txt || exit 1
298     nfs_server_scratch=$(grep -E '^nfs_server_scratch:' /var/lib/cloud/instance/user-data.txt | awk '{ print $2 }')
299     nfs_server_common=$(grep -E '^nfs_server_common:' /var/lib/cloud/instance/user-data.txt | awk '{ print $2 }')
300     instance_name=$(/usr/local/sbin/cloud-instance-name | cut -f 3 -d '&lt;' | cut -f 1 -d '&gt;')
301     echo My instance name is $instance_name
302
303     MAXCOUNT=12
304     RETRY=5
305
306     mkdir -p /scratch
307     mkdir -p /mnt/common
308
309     echo "Mounting NFS directories"
310     count=0
311     while [ $count -lt $MAXCOUNT ] &amp;&amp; ! mount -t nfs "${nfs_server_scratch}/${instance_name}" /scratch
312     do
313       sleep 5
314       count=$[$count+1]
315     done
316
317     mount -t nfs "${nfs_server_common}" /mnt/common
318
319     # Overwrite nested VM image with latest as per NFS
320     if [ -f /mnt/common/nested-vm-current.img ]
321     then
322       rm -f /var/lib/vm/vhost-nested.img
323       cp /mnt/common/nested-vm-current.img /var/lib/vm/vhost-nested.img
324     fi
325
326     mkdir /scratch/$(hostname)
327     cp /VERSION /scratch/$(hostname)/
328     cat /var/lib/vm/vhost-nested.img | strings | grep NESTED_VERSION= > /scratch/$(hostname)/NESTED_VERSION
329
330     exit 0
331 - path: /etc/sysctl.d/90-csit.conf
332   owner: root:root
333   content: |
334     # Number of 2MB hugepages desired
335     vm.nr_hugepages=1024
336
337     # Must be greater than or equal to (2 * vm.nr_hugepages).
338     vm.max_map_count=20000
339
340     # All groups allowed to access hugepages
341     vm.hugetlb_shm_group=0
342
343     # Shared Memory Max must be greator or equal to the total size of hugepages.
344     # For 2MB pages, TotalHugepageSize = vm.nr_hugepages * 2 * 1024 * 1024
345     # If the existing kernel.shmmax setting  (cat /sys/proc/kernel/shmmax)
346     # is greater than the calculated TotalHugepageSize then set this parameter
347     # to current shmmax value.
348     kernel.shmmax=2147483648
349 </entry>
350         </extensions>
351         <interface id="0" name="GigabitEthernet0/4/0"/>
352         <interface id="1" name="GigabitEthernet0/5/0"/>
353         <interface id="2" name="GigabitEthernet0/6/0"/>
354         <interface id="3" name="GigabitEthernet0/7/0"/>
355     </node>
356     <connection dst="/virl:topology/virl:node[1]/virl:interface[3]" src="/virl:topology/virl:node[2]/virl:interface[1]"/>
357     <connection dst="/virl:topology/virl:node[1]/virl:interface[4]" src="/virl:topology/virl:node[2]/virl:interface[2]"/>
358     <connection dst="/virl:topology/virl:node[2]/virl:interface[3]" src="/virl:topology/virl:node[3]/virl:interface[3]"/>
359     <connection dst="/virl:topology/virl:node[2]/virl:interface[4]" src="/virl:topology/virl:node[3]/virl:interface[4]"/>
360     <connection dst="/virl:topology/virl:node[1]/virl:interface[5]" src="/virl:topology/virl:node[3]/virl:interface[1]"/>
361     <connection dst="/virl:topology/virl:node[1]/virl:interface[6]" src="/virl:topology/virl:node[3]/virl:interface[2]"/>
362 </topology>