FIX: Detection if l2fwd/l3fwd is up/down
[csit.git] / resources / libraries / python / ContainerUtils.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Library to manipulate Containers."""
15
16 from collections import OrderedDict, Counter
17 from io import open
18 from re import search
19 from string import Template
20 from time import sleep
21
22 from robot.libraries.BuiltIn import BuiltIn
23
24 from resources.libraries.python.Constants import Constants
25 from resources.libraries.python.CpuUtils import CpuUtils
26 from resources.libraries.python.ssh import SSH
27 from resources.libraries.python.topology import Topology, SocketType
28 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
29
30
31 __all__ = [
32     u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
33 ]
34
35 SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
36
37
38 class ContainerManager:
39     """Container lifecycle management class."""
40
41     def __init__(self, engine):
42         """Initialize Container Manager class.
43
44         :param engine: Container technology used (LXC/Docker/...).
45         :type engine: str
46         :raises NotImplementedError: If container technology is not implemented.
47         """
48         try:
49             self.engine = globals()[engine]()
50         except KeyError:
51             raise NotImplementedError(f"{engine} is not implemented.")
52         self.containers = OrderedDict()
53
54     def get_container_by_name(self, name):
55         """Get container instance.
56
57         :param name: Container name.
58         :type name: str
59         :returns: Container instance.
60         :rtype: Container
61         :raises RuntimeError: If failed to get container with name.
62         """
63         try:
64             return self.containers[name]
65         except KeyError:
66             raise RuntimeError(f"Failed to get container with name: {name}")
67
68     def construct_container(self, **kwargs):
69         """Construct container object on node with specified parameters.
70
71         :param kwargs: Key-value pairs used to construct container.
72         :param kwargs: dict
73         """
74         # Create base class
75         self.engine.initialize()
76         # Set parameters
77         for key in kwargs:
78             setattr(self.engine.container, key, kwargs[key])
79
80         # Set additional environmental variables
81         setattr(
82             self.engine.container, u"env",
83             f"MICROSERVICE_LABEL={kwargs[u'name']}"
84         )
85
86         # Store container instance
87         self.containers[kwargs[u"name"]] = self.engine.container
88
89     def construct_containers(self, **kwargs):
90         """Construct 1..N container(s) on node with specified name.
91
92         Ordinal number is automatically added to the name of container as
93         suffix.
94
95         :param kwargs: Named parameters.
96         :param kwargs: dict
97         """
98         name = kwargs[u"name"]
99         for i in range(kwargs[u"count"]):
100             # Name will contain ordinal suffix
101             kwargs[u"name"] = u"".join([name, str(i+1)])
102             # Create container
103             self.construct_container(i=i, **kwargs)
104
105     def acquire_all_containers(self):
106         """Acquire all containers."""
107         for container in self.containers:
108             self.engine.container = self.containers[container]
109             self.engine.acquire()
110
111     def build_all_containers(self):
112         """Build all containers."""
113         for container in self.containers:
114             self.engine.container = self.containers[container]
115             self.engine.build()
116
117     def create_all_containers(self):
118         """Create all containers."""
119         for container in self.containers:
120             self.engine.container = self.containers[container]
121             self.engine.create()
122
123     def execute_on_container(self, name, command):
124         """Execute command on container with name.
125
126         :param name: Container name.
127         :param command: Command to execute.
128         :type name: str
129         :type command: str
130         """
131         self.engine.container = self.get_container_by_name(name)
132         self.engine.execute(command)
133
134     def execute_on_all_containers(self, command):
135         """Execute command on all containers.
136
137         :param command: Command to execute.
138         :type command: str
139         """
140         for container in self.containers:
141             self.engine.container = self.containers[container]
142             self.engine.execute(command)
143
144     def start_vpp_in_all_containers(self):
145         """Start VPP in all containers."""
146         for container in self.containers:
147             self.engine.container = self.containers[container]
148             self.engine.start_vpp()
149
150     def restart_vpp_in_all_containers(self):
151         """Restart VPP in all containers."""
152         for container in self.containers:
153             self.engine.container = self.containers[container]
154             self.engine.restart_vpp()
155
156     def verify_vpp_in_all_containers(self):
157         """Verify that VPP is installed and running in all containers."""
158         for container in self.containers:
159             self.engine.container = self.containers[container]
160             self.engine.verify_vpp()
161
162     def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
163         """Configure VPP in all containers.
164
165         :param chain_topology: Topology used for chaining containers can be
166             chain or cross_horiz. Chain topology is using 1 memif pair per
167             container. Cross_horiz topology is using 1 memif and 1 physical
168             interface in container (only single container can be configured).
169         :param kwargs: Named parameters.
170         :type chain_topology: str
171         :type kwargs: dict
172         """
173         # Count number of DUTs based on node's host information
174         dut_cnt = len(
175             Counter(
176                 [
177                     self.containers[container].node[u"host"]
178                     for container in self.containers
179                 ]
180             )
181         )
182         mod = len(self.containers) // dut_cnt
183
184         for i, container in enumerate(self.containers):
185             mid1 = i % mod + 1
186             mid2 = i % mod + 1
187             sid1 = i % mod * 2 + 1
188             sid2 = i % mod * 2 + 2
189             self.engine.container = self.containers[container]
190             guest_dir = self.engine.container.mnt[0].split(u":")[1]
191
192             if chain_topology == u"chain":
193                 self._configure_vpp_chain_l2xc(
194                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
195                     guest_dir=guest_dir, **kwargs
196                 )
197             elif chain_topology == u"cross_horiz":
198                 self._configure_vpp_cross_horiz(
199                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
200                     guest_dir=guest_dir, **kwargs
201                 )
202             elif chain_topology == u"chain_functional":
203                 self._configure_vpp_chain_functional(
204                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
205                     guest_dir=guest_dir, **kwargs
206                 )
207             elif chain_topology == u"chain_ip4":
208                 self._configure_vpp_chain_ip4(
209                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
210                     guest_dir=guest_dir, **kwargs
211                 )
212             elif chain_topology == u"pipeline_ip4":
213                 self._configure_vpp_pipeline_ip4(
214                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
215                     guest_dir=guest_dir, **kwargs
216                 )
217             elif chain_topology == u"chain_vswitch":
218                 self._configure_vpp_chain_vswitch(
219                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
220                     guest_dir=guest_dir, **kwargs)
221             elif chain_topology == u"chain_ipsec":
222                 idx_match = search(r"\d+$", self.engine.container.name)
223                 if idx_match:
224                     idx = int(idx_match.group())
225                 self._configure_vpp_chain_ipsec(
226                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
227                     guest_dir=guest_dir, nf_instance=idx, **kwargs)
228             else:
229                 raise RuntimeError(
230                     f"Container topology {chain_topology} not implemented"
231                 )
232
233     def _configure_vpp_chain_l2xc(self, **kwargs):
234         """Configure VPP in chain topology with l2xc.
235
236         :param kwargs: Named parameters.
237         :type kwargs: dict
238         """
239         self.engine.create_vpp_startup_config()
240         self.engine.create_vpp_exec_config(
241             u"memif_create_chain_l2xc.exec",
242             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
243             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
244             socket1=f"{kwargs[u'guest_dir']}/memif-"
245             f"{self.engine.container.name}-{kwargs[u'sid1']}",
246             socket2=f"{kwargs[u'guest_dir']}/memif-"
247             f"{self.engine.container.name}-{kwargs[u'sid2']}"
248         )
249
250     def _configure_vpp_cross_horiz(self, **kwargs):
251         """Configure VPP in cross horizontal topology (single memif).
252
253         :param kwargs: Named parameters.
254         :type kwargs: dict
255         """
256         if u"DUT1" in self.engine.container.name:
257             if_pci = Topology.get_interface_pci_addr(
258                 self.engine.container.node, kwargs[u"dut1_if"])
259             if_name = Topology.get_interface_name(
260                 self.engine.container.node, kwargs[u"dut1_if"])
261         if u"DUT2" in self.engine.container.name:
262             if_pci = Topology.get_interface_pci_addr(
263                 self.engine.container.node, kwargs[u"dut2_if"])
264             if_name = Topology.get_interface_name(
265                 self.engine.container.node, kwargs[u"dut2_if"])
266         self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
267         self.engine.create_vpp_exec_config(
268             u"memif_create_cross_horizon.exec",
269             mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
270             socket1=f"{kwargs[u'guest_dir']}/memif-"
271             f"{self.engine.container.name}-{kwargs[u'sid1']}"
272         )
273
274     def _configure_vpp_chain_functional(self, **kwargs):
275         """Configure VPP in chain topology with l2xc (functional).
276
277         :param kwargs: Named parameters.
278         :type kwargs: dict
279         """
280         self.engine.create_vpp_startup_config()
281         self.engine.create_vpp_exec_config(
282             u"memif_create_chain_functional.exec",
283             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
284             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
285             socket1=f"{kwargs[u'guest_dir']}/memif-"
286             f"{self.engine.container.name}-{kwargs[u'sid1']}",
287             socket2=f"{kwargs[u'guest_dir']}/memif-"
288             f"{self.engine.container.name}-{kwargs[u'sid2']}",
289             rx_mode=u"interrupt"
290         )
291
292     def _configure_vpp_chain_ip4(self, **kwargs):
293         """Configure VPP in chain topology with ip4.
294
295         :param kwargs: Named parameters.
296         :type kwargs: dict
297         """
298         self.engine.create_vpp_startup_config()
299
300         vif1_mac = kwargs[u"tg_if1_mac"] \
301             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
302             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
303         vif2_mac = kwargs[u"tg_if2_mac"] \
304             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
305             else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
306         self.engine.create_vpp_exec_config(
307             u"memif_create_chain_ip4.exec",
308             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
309             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
310             socket1=f"{kwargs[u'guest_dir']}/memif-"
311             f"{self.engine.container.name}-{kwargs[u'sid1']}",
312             socket2=f"{kwargs[u'guest_dir']}/memif-"
313             f"{self.engine.container.name}-{kwargs[u'sid2']}",
314             mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
315             mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
316             vif1_mac=vif1_mac, vif2_mac=vif2_mac
317         )
318
319     def _configure_vpp_chain_vswitch(self, **kwargs):
320         """Configure VPP as vswitch in container.
321
322         :param kwargs: Named parameters.
323         :type kwargs: dict
324         """
325         dut = self.engine.container.name.split(u"_")[0]
326         if dut == u"DUT1":
327             if1_pci = Topology.get_interface_pci_addr(
328                 self.engine.container.node, kwargs[u"dut1_if2"])
329             if2_pci = Topology.get_interface_pci_addr(
330                 self.engine.container.node, kwargs[u"dut1_if1"])
331             if_red_name = Topology.get_interface_name(
332                 self.engine.container.node, kwargs[u"dut1_if2"])
333             if_black_name = Topology.get_interface_name(
334                 self.engine.container.node, kwargs[u"dut1_if1"])
335             tg_if_ip4 = kwargs[u"tg_if2_ip4"]
336             tg_if_mac = kwargs[u"tg_if2_mac"]
337         else:
338             tg_if_ip4 = kwargs[u"tg_if1_ip4"]
339             tg_if_mac = kwargs[u"tg_if1_mac"]
340             if1_pci = Topology.get_interface_pci_addr(
341                 self.engine.container.node, kwargs[u"dut2_if1"])
342             if2_pci = Topology.get_interface_pci_addr(
343                 self.engine.container.node, kwargs[u"dut2_if2"])
344             if_red_name = Topology.get_interface_name(
345                 self.engine.container.node, kwargs[u"dut2_if1"])
346             if_black_name = Topology.get_interface_name(
347                 self.engine.container.node, kwargs[u"dut2_if2"])
348
349         n_instances = int(kwargs[u"n_instances"])
350         rxq = 1
351         if u"rxq" in kwargs:
352             rxq = int(kwargs[u"rxq"])
353         nodes = kwargs[u"nodes"]
354         cpuset_cpus = CpuUtils.get_affinity_nf(
355             nodes, dut, nf_chains=1, nf_nodes=1, nf_chain=1,
356             nf_node=1, vs_dtc=0, nf_dtc=8, nf_mtcr=1, nf_dtcr=1
357         )
358         self.engine.create_vpp_startup_config_vswitch(
359             cpuset_cpus, rxq, if1_pci, if2_pci
360         )
361
362         instances = []
363         for i in range(1, n_instances + 1):
364             instances.append(
365                 f"create interface memif id {i} socket-id 1 master\n"
366                 f"set interface state memif1/{i} up\n"
367                 f"set interface l2 bridge memif1/{i} 1\n"
368                 f"create interface memif id {i} socket-id 2 master\n"
369                 f"set interface state memif2/{i} up\n"
370                 f"set interface l2 bridge memif2/{i} 2\n"
371                 f"set ip neighbor memif2/{i} {tg_if_ip4} {tg_if_mac} "
372                 f"static\n\n"
373             )
374
375         self.engine.create_vpp_exec_config(
376             u"memif_create_chain_vswitch_ipsec.exec",
377             socket1=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-1",
378             socket2=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-2",
379             if_red_name=if_red_name,
380             if_black_name=if_black_name,
381             instances=u"\n\n".join(instances))
382
383
384     def _configure_vpp_chain_ipsec(self, **kwargs):
385         """Configure VPP in container with memifs.
386
387         :param kwargs: Named parameters.
388         :type kwargs: dict
389         """
390         nf_nodes = int(kwargs[u"nf_nodes"])
391         nf_instance = int(kwargs[u"nf_instance"])
392         nodes = kwargs[u"nodes"]
393         dut = self.engine.container.name.split(u"_")[0]
394         cpuset_cpus = CpuUtils.get_affinity_nf(
395             nodes, dut, nf_chains=1, nf_nodes=nf_nodes, nf_chain=1,
396             nf_node=nf_instance, vs_dtc=10, nf_dtc=1, nf_mtcr=1, nf_dtcr=1)
397         self.engine.create_vpp_startup_config_ipsec(cpuset_cpus)
398         local_ip_base = kwargs[u"dut2_if1_ip4"].rsplit(u".", 1)[0]
399
400         if dut == u"DUT1":
401             tnl_local_ip = f"{local_ip_base}.{nf_instance + 100}"
402             tnl_remote_ip = f"{local_ip_base}.{nf_instance}"
403             remote_ip_base = kwargs[u"dut1_if1_ip4"].rsplit(u".", 1)[0]
404             tg_if_ip4 = kwargs[u"tg_if1_ip4"]
405             tg_if_mac = kwargs[u"tg_if1_mac"]
406             raddr_ip4 = kwargs[u"laddr_ip4"]
407             l_mac1 = 17
408             l_mac2 = 18
409             r_mac = 1
410         else:
411             tnl_local_ip = f"{local_ip_base}.{nf_instance}"
412             tnl_remote_ip = f"{local_ip_base}.{nf_instance + 100}"
413             remote_ip_base = kwargs[u"dut2_if2_ip4"].rsplit(u".", 1)[0]
414             tg_if_ip4 = kwargs[u"tg_if2_ip4"]
415             tg_if_mac = kwargs[u"tg_if2_mac"]
416             raddr_ip4 = kwargs[u"raddr_ip4"]
417             l_mac1 = 1
418             l_mac2 = 2
419             r_mac = 17
420
421         self.engine.create_vpp_exec_config(
422             u"memif_create_chain_ipsec.exec",
423             socket1=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-1",
424             socket2=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-2",
425             mid1=nf_instance,
426             mid2=nf_instance,
427             sid1=u"1",
428             sid2=u"2",
429             mac1=f"02:02:00:00:{l_mac1:02X}:{(nf_instance - 1):02X}",
430             mac2=f"02:02:00:00:{l_mac2:02X}:{(nf_instance - 1):02X}",
431             tg_if2_ip4=tg_if_ip4,
432             tg_if2_mac=tg_if_mac,
433             raddr_ip4=raddr_ip4,
434             tnl_local_ip=tnl_local_ip,
435             tnl_remote_ip=tnl_remote_ip,
436             tnl_remote_mac=f"02:02:00:00:{r_mac:02X}:{(nf_instance - 1):02X}",
437             remote_ip=f"{remote_ip_base}.{nf_instance}"
438         )
439         self.engine.execute(
440             f"cat {kwargs['guest_dir']}/ipsec_create_tunnel_cnf_"
441             f"{dut}_{nf_instance}.config >> /tmp/running.exec"
442         )
443
444     def _configure_vpp_pipeline_ip4(self, **kwargs):
445         """Configure VPP in pipeline topology with ip4.
446
447         :param kwargs: Named parameters.
448         :type kwargs: dict
449         """
450         self.engine.create_vpp_startup_config()
451         node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
452         mid1 = kwargs[u"mid1"]
453         mid2 = kwargs[u"mid2"]
454         role1 = u"master"
455         role2 = u"master" if node == kwargs[u"nodes"] else u"slave"
456         kwargs[u"mid2"] = kwargs[u"mid2"] \
457             if node == kwargs[u"nodes"] else kwargs[u"mid2"] + 1
458         vif1_mac = kwargs[u"tg_if1_mac"] \
459             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
460             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
461         vif2_mac = kwargs[u"tg_if2_mac"] \
462             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
463             else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
464         socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
465             f"{kwargs[u'sid1']}" if node == 1 \
466             else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid1']}"
467         socket2 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
468             f"{kwargs[u'sid2']}" \
469             if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \
470             else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid2']}"
471
472         self.engine.create_vpp_exec_config(
473             u"memif_create_pipeline_ip4.exec",
474             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
475             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
476             socket1=socket1, socket2=socket2, role1=role1, role2=role2,
477             mac1=f"52:54:00:00:{mid1:02X}:01",
478             mac2=f"52:54:00:00:{mid2:02X}:02",
479             vif1_mac=vif1_mac, vif2_mac=vif2_mac
480         )
481
482     def stop_all_containers(self):
483         """Stop all containers."""
484         for container in self.containers:
485             self.engine.container = self.containers[container]
486             self.engine.stop()
487
488     def destroy_all_containers(self):
489         """Destroy all containers."""
490         for container in self.containers:
491             self.engine.container = self.containers[container]
492             self.engine.destroy()
493
494
495 class ContainerEngine:
496     """Abstract class for container engine."""
497
498     def __init__(self):
499         """Init ContainerEngine object."""
500         self.container = None
501
502     def initialize(self):
503         """Initialize container object."""
504         self.container = Container()
505
506     def acquire(self, force):
507         """Acquire/download container.
508
509         :param force: Destroy a container if exists and create.
510         :type force: bool
511         """
512         raise NotImplementedError
513
514     def build(self):
515         """Build container (compile)."""
516         raise NotImplementedError
517
518     def create(self):
519         """Create/deploy container."""
520         raise NotImplementedError
521
522     def execute(self, command):
523         """Execute process inside container.
524
525         :param command: Command to run inside container.
526         :type command: str
527         """
528         raise NotImplementedError
529
530     def stop(self):
531         """Stop container."""
532         raise NotImplementedError
533
534     def destroy(self):
535         """Destroy/remove container."""
536         raise NotImplementedError
537
538     def info(self):
539         """Info about container."""
540         raise NotImplementedError
541
542     def system_info(self):
543         """System info."""
544         raise NotImplementedError
545
546     def start_vpp(self):
547         """Start VPP inside a container."""
548         self.execute(
549             u"setsid /usr/bin/vpp -c /etc/vpp/startup.conf "
550             u">/tmp/vppd.log 2>&1 < /dev/null &")
551
552         topo_instance = BuiltIn().get_library_instance(
553             u"resources.libraries.python.topology.Topology"
554         )
555         topo_instance.add_new_socket(
556             self.container.node,
557             SocketType.PAPI,
558             self.container.name,
559             f"/tmp/vpp_sockets/{self.container.name}/api.sock"
560         )
561         topo_instance.add_new_socket(
562             self.container.node,
563             SocketType.STATS,
564             self.container.name,
565             f"/tmp/vpp_sockets/{self.container.name}/stats.sock"
566         )
567         self.verify_vpp()
568         self.adjust_privileges()
569
570     def restart_vpp(self):
571         """Restart VPP service inside a container."""
572         self.execute(u"pkill vpp")
573         self.start_vpp()
574
575     # TODO Rewrite to use the VPPUtil.py functionality and remove this.
576     def verify_vpp(self, retries=120, retry_wait=1):
577         """Verify that VPP is installed and running inside container.
578
579         :param retries: Check for VPP for this number of times Default: 120
580         :param retry_wait: Wait for this number of seconds between retries.
581         """
582         for _ in range(retries + 1):
583             try:
584                 self.execute(
585                     u"vppctl show pci 2>&1 | "
586                     u"fgrep -v 'Connection refused' | "
587                     u"fgrep -v 'No such file or directory'"
588                 )
589                 break
590             except RuntimeError:
591                 sleep(retry_wait)
592         else:
593             self.execute(u"cat /tmp/vppd.log")
594             raise RuntimeError(
595                 f"VPP did not come up in container: {self.container.name}"
596             )
597
598     def adjust_privileges(self):
599         """Adjust privileges to control VPP without sudo."""
600         self.execute("chmod -R o+rwx /run/vpp")
601
602     def create_base_vpp_startup_config(self, cpuset_cpus=None):
603         """Create base startup configuration of VPP on container.
604
605         :param cpuset_cpus: List of CPU cores to allocate.
606         :type cpuset_cpus: list.
607         :returns: Base VPP startup configuration.
608         :rtype: VppConfigGenerator
609         """
610         if cpuset_cpus is None:
611             cpuset_cpus = self.container.cpuset_cpus
612
613         # Create config instance
614         vpp_config = VppConfigGenerator()
615         vpp_config.set_node(self.container.node)
616         vpp_config.add_unix_cli_listen()
617         vpp_config.add_unix_nodaemon()
618         vpp_config.add_unix_exec(u"/tmp/running.exec")
619         vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
620         vpp_config.add_statseg_per_node_counters(value=u"on")
621         if cpuset_cpus:
622             # We will pop the first core from the list to be a main core
623             vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
624             # If more cores in the list, the rest will be used as workers.
625             corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
626             vpp_config.add_cpu_corelist_workers(corelist_workers)
627         vpp_config.add_buffers_per_numa(215040)
628         vpp_config.add_plugin(u"disable", u"default")
629         vpp_config.add_plugin(u"enable", u"memif_plugin.so")
630         vpp_config.add_heapsize(u"4G")
631         vpp_config.add_ip_heap_size(u"4G")
632         vpp_config.add_statseg_size(u"4G")
633
634         return vpp_config
635
636     def create_vpp_startup_config(self):
637         """Create startup configuration of VPP without DPDK on container.
638         """
639         vpp_config = self.create_base_vpp_startup_config()
640
641         # Apply configuration
642         self.execute(u"mkdir -p /etc/vpp/")
643         self.execute(
644             f'echo "{vpp_config.get_config_str()}" | '
645             f'tee /etc/vpp/startup.conf'
646         )
647
648     def create_vpp_startup_config_vswitch(self, cpuset_cpus, rxq, *devices):
649         """Create startup configuration of VPP vswitch.
650
651         :param cpuset_cpus: CPU list to run on.
652         :param rxq: Number of interface RX queues.
653         :param devices: PCI devices.
654         :type cpuset_cpus: list
655         :type rxq: int
656         :type devices: list
657         """
658         vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
659         vpp_config.add_dpdk_dev(*devices)
660         vpp_config.add_dpdk_log_level(u"debug")
661         vpp_config.add_dpdk_no_tx_checksum_offload()
662         vpp_config.add_dpdk_dev_default_rxq(rxq)
663         vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
664
665         # Apply configuration
666         self.execute(u"mkdir -p /etc/vpp/")
667         self.execute(
668             f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
669         )
670
671     def create_vpp_startup_config_ipsec(self, cpuset_cpus):
672         """Create startup configuration of VPP with IPsec on container.
673
674         :param cpuset_cpus: CPU list to run on.
675         :type cpuset_cpus: list
676         """
677         vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
678         vpp_config.add_plugin(u"enable", u"crypto_native_plugin.so")
679         vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
680         vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
681
682         # Apply configuration
683         self.execute(u"mkdir -p /etc/vpp/")
684         self.execute(
685             f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
686         )
687
688     def create_vpp_exec_config(self, template_file, **kwargs):
689         """Create VPP exec configuration on container.
690
691         :param template_file: File name of a template script.
692         :param kwargs: Parameters for script.
693         :type template_file: str
694         :type kwargs: dict
695         """
696         running = u"/tmp/running.exec"
697         template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
698
699         with open(template, u"rt") as src_file:
700             src = Template(src_file.read())
701             self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}')
702
703     def is_container_running(self):
704         """Check if container is running."""
705         raise NotImplementedError
706
707     def is_container_present(self):
708         """Check if container is present."""
709         raise NotImplementedError
710
711     def _configure_cgroup(self, name):
712         """Configure the control group associated with a container.
713
714         By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
715         container is initialized a new cgroup /docker or /lxc is created under
716         cpuset parent tree. This newly created cgroup is inheriting parent
717         setting for cpu/mem exclusive parameter and thus cannot be overriden
718         within /docker or /lxc cgroup. This function is supposed to set cgroups
719         to allow coexistence of both engines.
720
721         :param name: Name of cgroup.
722         :type name: str
723         :raises RuntimeError: If applying cgroup settings via cgset failed.
724         """
725         ret, _, _ = self.container.ssh.exec_command_sudo(
726             u"cgset -r cpuset.cpu_exclusive=0 /"
727         )
728         if int(ret) != 0:
729             raise RuntimeError(u"Failed to apply cgroup settings.")
730
731         ret, _, _ = self.container.ssh.exec_command_sudo(
732             u"cgset -r cpuset.mem_exclusive=0 /"
733         )
734         if int(ret) != 0:
735             raise RuntimeError(u"Failed to apply cgroup settings.")
736
737         ret, _, _ = self.container.ssh.exec_command_sudo(
738             f"cgcreate -g cpuset:/{name}"
739         )
740         if int(ret) != 0:
741             raise RuntimeError(u"Failed to copy cgroup settings from root.")
742
743         ret, _, _ = self.container.ssh.exec_command_sudo(
744             f"cgset -r cpuset.cpu_exclusive=0 /{name}"
745         )
746         if int(ret) != 0:
747             raise RuntimeError(u"Failed to apply cgroup settings.")
748
749         ret, _, _ = self.container.ssh.exec_command_sudo(
750             f"cgset -r cpuset.mem_exclusive=0 /{name}"
751         )
752         if int(ret) != 0:
753             raise RuntimeError(u"Failed to apply cgroup settings.")
754
755
756 class LXC(ContainerEngine):
757     """LXC implementation."""
758
759     # Implicit constructor is inherited.
760
761     def acquire(self, force=True):
762         """Acquire a privileged system object where configuration is stored.
763
764         :param force: If a container exists, destroy it and create a new
765             container.
766         :type force: bool
767         :raises RuntimeError: If creating the container or writing the container
768             config fails.
769         """
770         if self.is_container_present():
771             if force:
772                 self.destroy()
773             else:
774                 return
775
776         target_arch = u"arm64" \
777             if Topology.get_node_arch(self.container.node) == u"aarch64" \
778             else u"amd64"
779
780         image = self.container.image if self.container.image \
781             else f"-d ubuntu -r bionic -a {target_arch}"
782
783         cmd = f"lxc-create -t download --name {self.container.name} " \
784             f"-- {image} --no-validate"
785
786         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
787         if int(ret) != 0:
788             raise RuntimeError(u"Failed to create container.")
789
790         self._configure_cgroup(u"lxc")
791
792     def build(self):
793         """Build container (compile)."""
794         raise NotImplementedError
795
796     def create(self):
797         """Create/deploy an application inside a container on system.
798
799         :raises RuntimeError: If creating the container fails.
800         """
801         if self.container.mnt:
802             # LXC fix for tmpfs
803             # https://github.com/lxc/lxc/issues/434
804             mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults"
805             ret, _, _ = self.container.ssh.exec_command_sudo(
806                 f"sh -c \"echo '{mnt_e}' >> "
807                 f"/var/lib/lxc/{self.container.name}/config\""
808             )
809             if int(ret) != 0:
810                 raise RuntimeError(
811                     f"Failed to write {self.container.name} config."
812                 )
813
814             for mount in self.container.mnt:
815                 host_dir, guest_dir = mount.split(u":")
816                 options = u"bind,create=dir" if guest_dir.endswith(u"/") \
817                     else u"bind,create=file"
818                 entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \
819                     f"none {options} 0 0"
820                 self.container.ssh.exec_command_sudo(
821                     f"sh -c \"mkdir -p {host_dir}\""
822                 )
823                 ret, _, _ = self.container.ssh.exec_command_sudo(
824                     f"sh -c \"echo '{entry}' "
825                     f">> /var/lib/lxc/{self.container.name}/config\""
826                 )
827                 if int(ret) != 0:
828                     raise RuntimeError(
829                         f"Failed to write {self.container.name} config."
830                     )
831
832         cpuset_cpus = u",".join(
833             f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
834             if self.container.cpuset_cpus else u""
835
836         ret, _, _ = self.container.ssh.exec_command_sudo(
837             f"lxc-start --name {self.container.name} --daemon"
838         )
839         if int(ret) != 0:
840             raise RuntimeError(
841                 f"Failed to start container {self.container.name}."
842             )
843         self._lxc_wait(u"RUNNING")
844
845         # Workaround for LXC to be able to allocate all cpus including isolated.
846         ret, _, _ = self.container.ssh.exec_command_sudo(
847             u"cgset --copy-from / lxc/"
848         )
849         if int(ret) != 0:
850             raise RuntimeError(u"Failed to copy cgroup to LXC")
851
852         ret, _, _ = self.container.ssh.exec_command_sudo(
853             f"lxc-cgroup --name {self.container.name} cpuset.cpus {cpuset_cpus}"
854         )
855         if int(ret) != 0:
856             raise RuntimeError(
857                 f"Failed to set cpuset.cpus to container {self.container.name}."
858             )
859
860     def execute(self, command):
861         """Start a process inside a running container.
862
863         Runs the specified command inside the container specified by name. The
864         container has to be running already.
865
866         :param command: Command to run inside container.
867         :type command: str
868         :raises RuntimeError: If running the command failed.
869         """
870         env = u"--keep-env " + u" ".join(
871             f"--set-var {env!s}" for env in self.container.env) \
872             if self.container.env else u""
873
874         cmd = f"lxc-attach {env} --name {self.container.name} " \
875             f"-- /bin/sh -c '{command}'"
876
877         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
878         if int(ret) != 0:
879             raise RuntimeError(
880                 f"Failed to run command inside container {self.container.name}."
881             )
882
883     def stop(self):
884         """Stop a container.
885
886         :raises RuntimeError: If stopping the container failed.
887         """
888         cmd = f"lxc-stop --name {self.container.name}"
889
890         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
891         if int(ret) != 0:
892             raise RuntimeError(
893                 f"Failed to stop container {self.container.name}."
894             )
895         self._lxc_wait(u"STOPPED|FROZEN")
896
897     def destroy(self):
898         """Destroy a container.
899
900         :raises RuntimeError: If destroying container failed.
901         """
902         cmd = f"lxc-destroy --force --name {self.container.name}"
903
904         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
905         if int(ret) != 0:
906             raise RuntimeError(
907                 f"Failed to destroy container {self.container.name}."
908             )
909
910     def info(self):
911         """Query and shows information about a container.
912
913         :raises RuntimeError: If getting info about a container failed.
914         """
915         cmd = f"lxc-info --name {self.container.name}"
916
917         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
918         if int(ret) != 0:
919             raise RuntimeError(
920                 f"Failed to get info about container {self.container.name}."
921             )
922
923     def system_info(self):
924         """Check the current kernel for LXC support.
925
926         :raises RuntimeError: If checking LXC support failed.
927         """
928         cmd = u"lxc-checkconfig"
929
930         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
931         if int(ret) != 0:
932             raise RuntimeError(u"Failed to check LXC support.")
933
934     def is_container_running(self):
935         """Check if container is running on node.
936
937         :returns: True if container is running.
938         :rtype: bool
939         :raises RuntimeError: If getting info about a container failed.
940         """
941         cmd = f"lxc-info --no-humanize --state --name {self.container.name}"
942
943         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
944         if int(ret) != 0:
945             raise RuntimeError(
946                 f"Failed to get info about container {self.container.name}."
947             )
948         return u"RUNNING" in stdout
949
950     def is_container_present(self):
951         """Check if container is existing on node.
952
953         :returns: True if container is present.
954         :rtype: bool
955         :raises RuntimeError: If getting info about a container failed.
956         """
957         cmd = f"lxc-info --no-humanize --name {self.container.name}"
958
959         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
960         return not ret
961
962     def _lxc_wait(self, state):
963         """Wait for a specific container state.
964
965         :param state: Specify the container state(s) to wait for.
966         :type state: str
967         :raises RuntimeError: If waiting for state of a container failed.
968         """
969         cmd = f"lxc-wait --name {self.container.name} --state '{state}'"
970
971         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
972         if int(ret) != 0:
973             raise RuntimeError(
974                 f"Failed to wait for state '{state}' "
975                 f"of container {self.container.name}."
976             )
977
978
979 class Docker(ContainerEngine):
980     """Docker implementation."""
981
982     # Implicit constructor is inherited.
983
984     def acquire(self, force=True):
985         """Pull an image or a repository from a registry.
986
987         :param force: Destroy a container if exists.
988         :type force: bool
989         :raises RuntimeError: If pulling a container failed.
990         """
991         if self.is_container_present():
992             if force:
993                 self.destroy()
994             else:
995                 return
996
997         if not self.container.image:
998             img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
999                 if Topology.get_node_arch(self.container.node) == u"aarch64" \
1000                 else Constants.DOCKER_SUT_IMAGE_UBUNTU
1001             setattr(self.container, u"image", img)
1002
1003         cmd = f"docker pull {self.container.image}"
1004
1005         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
1006         if int(ret) != 0:
1007             raise RuntimeError(
1008                 f"Failed to create container {self.container.name}."
1009             )
1010
1011         if self.container.cpuset_cpus:
1012             self._configure_cgroup(u"docker")
1013
1014     def build(self):
1015         """Build container (compile)."""
1016         raise NotImplementedError
1017
1018     def create(self):
1019         """Create/deploy container.
1020
1021         :raises RuntimeError: If creating a container failed.
1022         """
1023         cpuset_cpus = u"--cpuset-cpus=" + u",".join(
1024             f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
1025             if self.container.cpuset_cpus else u""
1026
1027         cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \
1028             if self.container.cpuset_mems is not None else u""
1029         # Temporary workaround - disabling due to bug in memif
1030         cpuset_mems = u""
1031
1032         env = u" ".join(f"--env {env!s}" for env in self.container.env) \
1033             if self.container.env else u""
1034
1035         command = str(self.container.command) if self.container.command else u""
1036
1037         publish = u" ".join(
1038             f"--publish  {var!s}" for var in self.container.publish
1039         ) if self.container.publish else u""
1040
1041         volume = u" ".join(
1042             f"--volume {mnt!s}" for mnt in self.container.mnt) \
1043             if self.container.mnt else u""
1044
1045         cmd = f"docker run --privileged --detach --interactive --tty --rm " \
1046             f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \
1047             f"{env} {volume} --name {self.container.name} " \
1048             f"{self.container.image} {command}"
1049
1050         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1051         if int(ret) != 0:
1052             raise RuntimeError(
1053                 f"Failed to create container {self.container.name}"
1054             )
1055
1056         self.info()
1057
1058     def execute(self, command):
1059         """Start a process inside a running container.
1060
1061         Runs the specified command inside the container specified by name. The
1062         container has to be running already.
1063
1064         :param command: Command to run inside container.
1065         :type command: str
1066         :raises RuntimeError: If running the command in a container failed.
1067         """
1068         cmd = f"docker exec --interactive {self.container.name} " \
1069             f"/bin/sh -c '{command}'"
1070
1071         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
1072         if int(ret) != 0:
1073             raise RuntimeError(
1074                 f"Failed to execute command in container {self.container.name}."
1075             )
1076
1077     def stop(self):
1078         """Stop running container.
1079
1080         :raises RuntimeError: If stopping a container failed.
1081         """
1082         cmd = f"docker stop {self.container.name}"
1083
1084         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1085         if int(ret) != 0:
1086             raise RuntimeError(
1087                 f"Failed to stop container {self.container.name}."
1088             )
1089
1090     def destroy(self):
1091         """Remove a container.
1092
1093         :raises RuntimeError: If removing a container failed.
1094         """
1095         cmd = f"docker rm --force {self.container.name}"
1096
1097         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1098         if int(ret) != 0:
1099             raise RuntimeError(
1100                 f"Failed to destroy container {self.container.name}."
1101             )
1102
1103     def info(self):
1104         """Return low-level information on Docker objects.
1105
1106         :raises RuntimeError: If getting info about a container failed.
1107         """
1108         cmd = f"docker inspect {self.container.name}"
1109
1110         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1111         if int(ret) != 0:
1112             raise RuntimeError(
1113                 f"Failed to get info about container {self.container.name}."
1114             )
1115
1116     def system_info(self):
1117         """Display the docker system-wide information.
1118
1119         :raises RuntimeError: If displaying system information failed.
1120         """
1121         cmd = u"docker system info"
1122
1123         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1124         if int(ret) != 0:
1125             raise RuntimeError(u"Failed to get system info.")
1126
1127     def is_container_present(self):
1128         """Check if container is present on node.
1129
1130         :returns: True if container is present.
1131         :rtype: bool
1132         :raises RuntimeError: If getting info about a container failed.
1133         """
1134         cmd = f"docker ps --all --quiet --filter name={self.container.name}"
1135
1136         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1137         if int(ret) != 0:
1138             raise RuntimeError(
1139                 f"Failed to get info about container {self.container.name}."
1140             )
1141         return bool(stdout)
1142
1143     def is_container_running(self):
1144         """Check if container is running on node.
1145
1146         :returns: True if container is running.
1147         :rtype: bool
1148         :raises RuntimeError: If getting info about a container failed.
1149         """
1150         cmd = f"docker ps --quiet --filter name={self.container.name}"
1151
1152         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1153         if int(ret) != 0:
1154             raise RuntimeError(
1155                 f"Failed to get info about container {self.container.name}."
1156             )
1157         return bool(stdout)
1158
1159
1160 class Container:
1161     """Container class."""
1162
1163     def __getattr__(self, attr):
1164         """Get attribute custom implementation.
1165
1166         :param attr: Attribute to get.
1167         :type attr: str
1168         :returns: Attribute value or None.
1169         :rtype: any
1170         """
1171         try:
1172             return self.__dict__[attr]
1173         except KeyError:
1174             return None
1175
1176     def __setattr__(self, attr, value):
1177         """Set attribute custom implementation.
1178
1179         :param attr: Attribute to set.
1180         :param value: Value to set.
1181         :type attr: str
1182         :type value: any
1183         """
1184         try:
1185             # Check if attribute exists
1186             self.__dict__[attr]
1187         except KeyError:
1188             # Creating new attribute
1189             if attr == u"node":
1190                 self.__dict__[u"ssh"] = SSH()
1191                 self.__dict__[u"ssh"].connect(value)
1192             self.__dict__[attr] = value
1193         else:
1194             # Updating attribute base of type
1195             if isinstance(self.__dict__[attr], list):
1196                 self.__dict__[attr].append(value)
1197             else:
1198                 self.__dict__[attr] = value