Remove supervisord dependency from containers
[csit.git] / resources / libraries / python / ContainerUtils.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Library to manipulate Containers."""
15
16 from collections import OrderedDict, Counter
17 from io import open
18 from re import search
19 from string import Template
20 from time import sleep
21
22 from robot.libraries.BuiltIn import BuiltIn
23
24 from resources.libraries.python.Constants import Constants
25 from resources.libraries.python.CpuUtils import CpuUtils
26 from resources.libraries.python.ssh import SSH
27 from resources.libraries.python.topology import Topology, SocketType
28 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
29
30
31 __all__ = [
32     u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
33 ]
34
35 SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
36
37
38 class ContainerManager:
39     """Container lifecycle management class."""
40
41     def __init__(self, engine):
42         """Initialize Container Manager class.
43
44         :param engine: Container technology used (LXC/Docker/...).
45         :type engine: str
46         :raises NotImplementedError: If container technology is not implemented.
47         """
48         try:
49             self.engine = globals()[engine]()
50         except KeyError:
51             raise NotImplementedError(f"{engine} is not implemented.")
52         self.containers = OrderedDict()
53
54     def get_container_by_name(self, name):
55         """Get container instance.
56
57         :param name: Container name.
58         :type name: str
59         :returns: Container instance.
60         :rtype: Container
61         :raises RuntimeError: If failed to get container with name.
62         """
63         try:
64             return self.containers[name]
65         except KeyError:
66             raise RuntimeError(f"Failed to get container with name: {name}")
67
68     def construct_container(self, **kwargs):
69         """Construct container object on node with specified parameters.
70
71         :param kwargs: Key-value pairs used to construct container.
72         :param kwargs: dict
73         """
74         # Create base class
75         self.engine.initialize()
76         # Set parameters
77         for key in kwargs:
78             setattr(self.engine.container, key, kwargs[key])
79
80         # Set additional environmental variables
81         setattr(
82             self.engine.container, u"env",
83             f"MICROSERVICE_LABEL={kwargs[u'name']}"
84         )
85
86         # Store container instance
87         self.containers[kwargs[u"name"]] = self.engine.container
88
89     def construct_containers(self, **kwargs):
90         """Construct 1..N container(s) on node with specified name.
91
92         Ordinal number is automatically added to the name of container as
93         suffix.
94
95         :param kwargs: Named parameters.
96         :param kwargs: dict
97         """
98         name = kwargs[u"name"]
99         for i in range(kwargs[u"count"]):
100             # Name will contain ordinal suffix
101             kwargs[u"name"] = u"".join([name, str(i+1)])
102             # Create container
103             self.construct_container(i=i, **kwargs)
104
105     def acquire_all_containers(self):
106         """Acquire all containers."""
107         for container in self.containers:
108             self.engine.container = self.containers[container]
109             self.engine.acquire()
110
111     def build_all_containers(self):
112         """Build all containers."""
113         for container in self.containers:
114             self.engine.container = self.containers[container]
115             self.engine.build()
116
117     def create_all_containers(self):
118         """Create all containers."""
119         for container in self.containers:
120             self.engine.container = self.containers[container]
121             self.engine.create()
122
123     def execute_on_container(self, name, command):
124         """Execute command on container with name.
125
126         :param name: Container name.
127         :param command: Command to execute.
128         :type name: str
129         :type command: str
130         """
131         self.engine.container = self.get_container_by_name(name)
132         self.engine.execute(command)
133
134     def execute_on_all_containers(self, command):
135         """Execute command on all containers.
136
137         :param command: Command to execute.
138         :type command: str
139         """
140         for container in self.containers:
141             self.engine.container = self.containers[container]
142             self.engine.execute(command)
143
144     def start_vpp_in_all_containers(self):
145         """Start VPP in all containers."""
146         for container in self.containers:
147             self.engine.container = self.containers[container]
148             self.engine.start_vpp()
149
150     def restart_vpp_in_all_containers(self):
151         """Restart VPP in all containers."""
152         for container in self.containers:
153             self.engine.container = self.containers[container]
154             self.engine.restart_vpp()
155
156     def verify_vpp_in_all_containers(self):
157         """Verify that VPP is installed and running in all containers."""
158         for container in self.containers:
159             self.engine.container = self.containers[container]
160             self.engine.verify_vpp()
161
162     def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
163         """Configure VPP in all containers.
164
165         :param chain_topology: Topology used for chaining containers can be
166             chain or cross_horiz. Chain topology is using 1 memif pair per
167             container. Cross_horiz topology is using 1 memif and 1 physical
168             interface in container (only single container can be configured).
169         :param kwargs: Named parameters.
170         :type chain_topology: str
171         :type kwargs: dict
172         """
173         # Count number of DUTs based on node's host information
174         dut_cnt = len(
175             Counter(
176                 [
177                     self.containers[container].node[u"host"]
178                     for container in self.containers
179                 ]
180             )
181         )
182         mod = len(self.containers) // dut_cnt
183
184         for i, container in enumerate(self.containers):
185             mid1 = i % mod + 1
186             mid2 = i % mod + 1
187             sid1 = i % mod * 2 + 1
188             sid2 = i % mod * 2 + 2
189             self.engine.container = self.containers[container]
190             guest_dir = self.engine.container.mnt[0].split(u":")[1]
191
192             if chain_topology == u"chain":
193                 self._configure_vpp_chain_l2xc(
194                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
195                     guest_dir=guest_dir, **kwargs
196                 )
197             elif chain_topology == u"cross_horiz":
198                 self._configure_vpp_cross_horiz(
199                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
200                     guest_dir=guest_dir, **kwargs
201                 )
202             elif chain_topology == u"chain_functional":
203                 self._configure_vpp_chain_functional(
204                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
205                     guest_dir=guest_dir, **kwargs
206                 )
207             elif chain_topology == u"chain_ip4":
208                 self._configure_vpp_chain_ip4(
209                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
210                     guest_dir=guest_dir, **kwargs
211                 )
212             elif chain_topology == u"pipeline_ip4":
213                 self._configure_vpp_pipeline_ip4(
214                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
215                     guest_dir=guest_dir, **kwargs
216                 )
217             elif chain_topology == u"chain_vswitch":
218                 self._configure_vpp_chain_vswitch(
219                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
220                     guest_dir=guest_dir, **kwargs)
221             elif chain_topology == u"chain_ipsec":
222                 idx_match = search(r"\d+$", self.engine.container.name)
223                 if idx_match:
224                     idx = int(idx_match.group())
225                 self._configure_vpp_chain_ipsec(
226                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
227                     guest_dir=guest_dir, nf_instance=idx, **kwargs)
228             else:
229                 raise RuntimeError(
230                     f"Container topology {chain_topology} not implemented"
231                 )
232
233     def _configure_vpp_chain_l2xc(self, **kwargs):
234         """Configure VPP in chain topology with l2xc.
235
236         :param kwargs: Named parameters.
237         :type kwargs: dict
238         """
239         self.engine.create_vpp_startup_config()
240         self.engine.create_vpp_exec_config(
241             u"memif_create_chain_l2xc.exec",
242             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
243             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
244             socket1=f"{kwargs[u'guest_dir']}/memif-"
245             f"{self.engine.container.name}-{kwargs[u'sid1']}",
246             socket2=f"{kwargs[u'guest_dir']}/memif-"
247             f"{self.engine.container.name}-{kwargs[u'sid2']}"
248         )
249
250     def _configure_vpp_cross_horiz(self, **kwargs):
251         """Configure VPP in cross horizontal topology (single memif).
252
253         :param kwargs: Named parameters.
254         :type kwargs: dict
255         """
256         if u"DUT1" in self.engine.container.name:
257             if_pci = Topology.get_interface_pci_addr(
258                 self.engine.container.node, kwargs[u"dut1_if"])
259             if_name = Topology.get_interface_name(
260                 self.engine.container.node, kwargs[u"dut1_if"])
261         if u"DUT2" in self.engine.container.name:
262             if_pci = Topology.get_interface_pci_addr(
263                 self.engine.container.node, kwargs[u"dut2_if"])
264             if_name = Topology.get_interface_name(
265                 self.engine.container.node, kwargs[u"dut2_if"])
266         self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
267         self.engine.create_vpp_exec_config(
268             u"memif_create_cross_horizon.exec",
269             mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
270             socket1=f"{kwargs[u'guest_dir']}/memif-"
271             f"{self.engine.container.name}-{kwargs[u'sid1']}"
272         )
273
274     def _configure_vpp_chain_functional(self, **kwargs):
275         """Configure VPP in chain topology with l2xc (functional).
276
277         :param kwargs: Named parameters.
278         :type kwargs: dict
279         """
280         self.engine.create_vpp_startup_config()
281         self.engine.create_vpp_exec_config(
282             u"memif_create_chain_functional.exec",
283             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
284             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
285             socket1=f"{kwargs[u'guest_dir']}/memif-"
286             f"{self.engine.container.name}-{kwargs[u'sid1']}",
287             socket2=f"{kwargs[u'guest_dir']}/memif-"
288             f"{self.engine.container.name}-{kwargs[u'sid2']}",
289             rx_mode=u"interrupt"
290         )
291
292     def _configure_vpp_chain_ip4(self, **kwargs):
293         """Configure VPP in chain topology with ip4.
294
295         :param kwargs: Named parameters.
296         :type kwargs: dict
297         """
298         self.engine.create_vpp_startup_config()
299
300         vif1_mac = kwargs[u"tg_if1_mac"] \
301             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
302             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
303         vif2_mac = kwargs[u"tg_if2_mac"] \
304             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
305             else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
306         self.engine.create_vpp_exec_config(
307             u"memif_create_chain_ip4.exec",
308             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
309             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
310             socket1=f"{kwargs[u'guest_dir']}/memif-"
311             f"{self.engine.container.name}-{kwargs[u'sid1']}",
312             socket2=f"{kwargs[u'guest_dir']}/memif-"
313             f"{self.engine.container.name}-{kwargs[u'sid2']}",
314             mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
315             mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
316             vif1_mac=vif1_mac, vif2_mac=vif2_mac
317         )
318
319     def _configure_vpp_chain_vswitch(self, **kwargs):
320         """Configure VPP as vswitch in container.
321
322         :param kwargs: Named parameters.
323         :type kwargs: dict
324         """
325         dut = self.engine.container.name.split(u"_")[0]
326         if dut == u"DUT1":
327             if1_pci = Topology.get_interface_pci_addr(
328                 self.engine.container.node, kwargs[u"dut1_if2"])
329             if2_pci = Topology.get_interface_pci_addr(
330                 self.engine.container.node, kwargs[u"dut1_if1"])
331             if_red_name = Topology.get_interface_name(
332                 self.engine.container.node, kwargs[u"dut1_if2"])
333             if_black_name = Topology.get_interface_name(
334                 self.engine.container.node, kwargs[u"dut1_if1"])
335             tg_if_ip4 = kwargs[u"tg_if2_ip4"]
336             tg_if_mac = kwargs[u"tg_if2_mac"]
337         else:
338             tg_if_ip4 = kwargs[u"tg_if1_ip4"]
339             tg_if_mac = kwargs[u"tg_if1_mac"]
340             if1_pci = Topology.get_interface_pci_addr(
341                 self.engine.container.node, kwargs[u"dut2_if1"])
342             if2_pci = Topology.get_interface_pci_addr(
343                 self.engine.container.node, kwargs[u"dut2_if2"])
344             if_red_name = Topology.get_interface_name(
345                 self.engine.container.node, kwargs[u"dut2_if1"])
346             if_black_name = Topology.get_interface_name(
347                 self.engine.container.node, kwargs[u"dut2_if2"])
348
349         n_instances = int(kwargs[u"n_instances"])
350         rxq = 1
351         if u"rxq" in kwargs:
352             rxq = int(kwargs[u"rxq"])
353         nodes = kwargs[u"nodes"]
354         cpuset_cpus = CpuUtils.get_affinity_nf(
355             nodes, dut, nf_chains=1, nf_nodes=1, nf_chain=1,
356             nf_node=1, vs_dtc=0, nf_dtc=8, nf_mtcr=1, nf_dtcr=1
357         )
358         self.engine.create_vpp_startup_config_vswitch(
359             cpuset_cpus, rxq, if1_pci, if2_pci
360         )
361
362         instances = []
363         for i in range(1, n_instances + 1):
364             instances.append(
365                 f"create interface memif id {i} socket-id 1 master\n"
366                 f"set interface state memif1/{i} up\n"
367                 f"set interface l2 bridge memif1/{i} 1\n"
368                 f"create interface memif id {i} socket-id 2 master\n"
369                 f"set interface state memif2/{i} up\n"
370                 f"set interface l2 bridge memif2/{i} 2\n"
371                 f"set ip arp memif2/{i} {tg_if_ip4} {tg_if_mac} "
372                 f"static\n\n"
373             )
374
375         self.engine.create_vpp_exec_config(
376             u"memif_create_chain_vswitch_ipsec.exec",
377             socket1=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-1",
378             socket2=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-2",
379             if_red_name=if_red_name,
380             if_black_name=if_black_name,
381             instances=u"\n\n".join(instances))
382
383
384     def _configure_vpp_chain_ipsec(self, **kwargs):
385         """Configure VPP in container with memifs.
386
387         :param kwargs: Named parameters.
388         :type kwargs: dict
389         """
390         nf_nodes = int(kwargs[u"nf_nodes"])
391         nf_instance = int(kwargs[u"nf_instance"])
392         nodes = kwargs[u"nodes"]
393         dut = self.engine.container.name.split(u"_")[0]
394         cpuset_cpus = CpuUtils.get_affinity_nf(
395             nodes, dut, nf_chains=1, nf_nodes=nf_nodes, nf_chain=1,
396             nf_node=nf_instance, vs_dtc=10, nf_dtc=1, nf_mtcr=1, nf_dtcr=1)
397         self.engine.create_vpp_startup_config_ipsec(cpuset_cpus)
398         local_ip_base = kwargs[u"dut2_if1_ip4"].rsplit(u".", 1)[0]
399
400         if dut == u"DUT1":
401             tnl_local_ip = f"{local_ip_base}.{nf_instance + 100}"
402             tnl_remote_ip = f"{local_ip_base}.{nf_instance}"
403             remote_ip_base = kwargs[u"dut1_if1_ip4"].rsplit(u".", 1)[0]
404             tg_if_ip4 = kwargs[u"tg_if1_ip4"]
405             tg_if_mac = kwargs[u"tg_if1_mac"]
406             raddr_ip4 = kwargs[u"laddr_ip4"]
407             l_mac1 = 17
408             l_mac2 = 18
409             r_mac = 1
410         else:
411             tnl_local_ip = f"{local_ip_base}.{nf_instance}"
412             tnl_remote_ip = f"{local_ip_base}.{nf_instance + 100}"
413             remote_ip_base = kwargs[u"dut2_if2_ip4"].rsplit(u".", 1)[0]
414             tg_if_ip4 = kwargs[u"tg_if2_ip4"]
415             tg_if_mac = kwargs[u"tg_if2_mac"]
416             raddr_ip4 = kwargs[u"raddr_ip4"]
417             l_mac1 = 1
418             l_mac2 = 2
419             r_mac = 17
420
421         self.engine.create_vpp_exec_config(
422             u"memif_create_chain_ipsec.exec",
423             socket1=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-1",
424             socket2=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-2",
425             mid1=nf_instance,
426             mid2=nf_instance,
427             sid1=u"1",
428             sid2=u"2",
429             mac1=f"02:02:00:00:{l_mac1:02X}:{(nf_instance - 1):02X}",
430             mac2=f"02:02:00:00:{l_mac2:02X}:{(nf_instance - 1):02X}",
431             tg_if2_ip4=tg_if_ip4,
432             tg_if2_mac=tg_if_mac,
433             raddr_ip4=raddr_ip4,
434             tnl_local_ip=tnl_local_ip,
435             tnl_remote_ip=tnl_remote_ip,
436             tnl_remote_mac=f"02:02:00:00:{r_mac:02X}:{(nf_instance - 1):02X}",
437             remote_ip=f"{remote_ip_base}.{nf_instance}"
438         )
439         self.engine.execute(
440             f"cat {kwargs['guest_dir']}/ipsec_create_tunnel_cnf_"
441             f"{dut}_{nf_instance}.config >> /tmp/running.exec"
442         )
443
444     def _configure_vpp_pipeline_ip4(self, **kwargs):
445         """Configure VPP in pipeline topology with ip4.
446
447         :param kwargs: Named parameters.
448         :type kwargs: dict
449         """
450         self.engine.create_vpp_startup_config()
451         node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
452         mid1 = kwargs[u"mid1"]
453         mid2 = kwargs[u"mid2"]
454         role1 = u"master"
455         role2 = u"master" if node == kwargs[u"nodes"] else u"slave"
456         kwargs[u"mid2"] = kwargs[u"mid2"] \
457             if node == kwargs[u"nodes"] else kwargs[u"mid2"] + 1
458         vif1_mac = kwargs[u"tg_if1_mac"] \
459             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
460             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
461         vif2_mac = kwargs[u"tg_if2_mac"] \
462             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
463             else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
464         socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
465             f"{kwargs[u'sid1']}" if node == 1 \
466             else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid1']}"
467         socket2 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
468             f"{kwargs[u'sid2']}" \
469             if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \
470             else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid2']}"
471
472         self.engine.create_vpp_exec_config(
473             u"memif_create_pipeline_ip4.exec",
474             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
475             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
476             socket1=socket1, socket2=socket2, role1=role1, role2=role2,
477             mac1=f"52:54:00:00:{mid1:02X}:01",
478             mac2=f"52:54:00:00:{mid2:02X}:02",
479             vif1_mac=vif1_mac, vif2_mac=vif2_mac
480         )
481
482     def stop_all_containers(self):
483         """Stop all containers."""
484         for container in self.containers:
485             self.engine.container = self.containers[container]
486             self.engine.stop()
487
488     def destroy_all_containers(self):
489         """Destroy all containers."""
490         for container in self.containers:
491             self.engine.container = self.containers[container]
492             self.engine.destroy()
493
494
495 class ContainerEngine:
496     """Abstract class for container engine."""
497
498     def __init__(self):
499         """Init ContainerEngine object."""
500         self.container = None
501
502     def initialize(self):
503         """Initialize container object."""
504         self.container = Container()
505
506     def acquire(self, force):
507         """Acquire/download container.
508
509         :param force: Destroy a container if exists and create.
510         :type force: bool
511         """
512         raise NotImplementedError
513
514     def build(self):
515         """Build container (compile)."""
516         raise NotImplementedError
517
518     def create(self):
519         """Create/deploy container."""
520         raise NotImplementedError
521
522     def execute(self, command):
523         """Execute process inside container.
524
525         :param command: Command to run inside container.
526         :type command: str
527         """
528         raise NotImplementedError
529
530     def stop(self):
531         """Stop container."""
532         raise NotImplementedError
533
534     def destroy(self):
535         """Destroy/remove container."""
536         raise NotImplementedError
537
538     def info(self):
539         """Info about container."""
540         raise NotImplementedError
541
542     def system_info(self):
543         """System info."""
544         raise NotImplementedError
545
546     def start_vpp(self):
547         """Start VPP inside a container."""
548         self.execute(
549             u"setsid /usr/bin/vpp -c /etc/vpp/startup.conf "
550             u">/tmp/vppd.log 2>&1 < /dev/null &")
551
552         topo_instance = BuiltIn().get_library_instance(
553             u"resources.libraries.python.topology.Topology"
554         )
555         topo_instance.add_new_socket(
556             self.container.node,
557             SocketType.PAPI,
558             self.container.name,
559             f"/tmp/vpp_sockets/{self.container.name}/api.sock"
560         )
561         topo_instance.add_new_socket(
562             self.container.node,
563             SocketType.STATS,
564             self.container.name,
565             f"/tmp/vpp_sockets/{self.container.name}/stats.sock"
566         )
567
568     def restart_vpp(self):
569         """Restart VPP service inside a container."""
570         self.execute(u"pkill vpp")
571         self.start_vpp()
572         self.execute(u"cat /tmp/vppd.log")
573
574     # TODO Rewrite to use the VPPUtil.py functionality and remove this.
575     def verify_vpp(self, retries=120, retry_wait=1):
576         """Verify that VPP is installed and running inside container.
577
578         :param retries: Check for VPP for this number of times Default: 120
579         :param retry_wait: Wait for this number of seconds between retries.
580         """
581         cmd = (u"vppctl show pci 2>&1 | "
582                u"fgrep -v 'Connection refused' | "
583                u"fgrep -v 'No such file or directory'")
584
585         for _ in range(retries + 1):
586             try:
587                 self.execute(cmd)
588                 break
589             except RuntimeError:
590                 sleep(retry_wait)
591         else:
592             msg = f"VPP did not come up in container: {self.container.name}"
593             raise RuntimeError(msg)
594
595     def create_base_vpp_startup_config(self, cpuset_cpus=None):
596         """Create base startup configuration of VPP on container.
597
598         :param cpuset_cpus: List of CPU cores to allocate.
599         :type cpuset_cpus: list.
600         :returns: Base VPP startup configuration.
601         :rtype: VppConfigGenerator
602         """
603         if cpuset_cpus is None:
604             cpuset_cpus = self.container.cpuset_cpus
605
606         # Create config instance
607         vpp_config = VppConfigGenerator()
608         vpp_config.set_node(self.container.node)
609         vpp_config.add_unix_cli_listen()
610         vpp_config.add_unix_nodaemon()
611         vpp_config.add_unix_exec(u"/tmp/running.exec")
612         vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
613         vpp_config.add_statseg_per_node_counters(value=u"on")
614         if cpuset_cpus:
615             # We will pop the first core from the list to be a main core
616             vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
617             # If more cores in the list, the rest will be used as workers.
618             corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
619             vpp_config.add_cpu_corelist_workers(corelist_workers)
620         vpp_config.add_buffers_per_numa(215040)
621         vpp_config.add_plugin(u"disable", u"default")
622         vpp_config.add_plugin(u"enable", u"memif_plugin.so")
623         vpp_config.add_heapsize(u"4G")
624         vpp_config.add_ip_heap_size(u"4G")
625         vpp_config.add_statseg_size(u"4G")
626
627         return vpp_config
628
629     def create_vpp_startup_config(self):
630         """Create startup configuration of VPP without DPDK on container.
631         """
632         vpp_config = self.create_base_vpp_startup_config()
633
634         # Apply configuration
635         self.execute(u"mkdir -p /etc/vpp/")
636         self.execute(
637             f'echo "{vpp_config.get_config_str()}" | '
638             f'tee /etc/vpp/startup.conf'
639         )
640
641     def create_vpp_startup_config_vswitch(self, cpuset_cpus, rxq, *devices):
642         """Create startup configuration of VPP vswitch.
643
644         :param cpuset_cpus: CPU list to run on.
645         :param rxq: Number of interface RX queues.
646         :param devices: PCI devices.
647         :type cpuset_cpus: list
648         :type rxq: int
649         :type devices: list
650         """
651         vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
652         vpp_config.add_dpdk_dev(*devices)
653         vpp_config.add_dpdk_log_level(u"debug")
654         vpp_config.add_dpdk_no_tx_checksum_offload()
655         vpp_config.add_dpdk_dev_default_rxq(rxq)
656         vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
657
658         # Apply configuration
659         self.execute(u"mkdir -p /etc/vpp/")
660         self.execute(
661             f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
662         )
663
664     def create_vpp_startup_config_ipsec(self, cpuset_cpus):
665         """Create startup configuration of VPP with IPsec on container.
666
667         :param cpuset_cpus: CPU list to run on.
668         :type cpuset_cpus: list
669         """
670         vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
671         vpp_config.add_plugin(u"enable", u"crypto_ia32_plugin.so")
672         vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
673         vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
674
675         # Apply configuration
676         self.execute(u"mkdir -p /etc/vpp/")
677         self.execute(
678             f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
679         )
680
681     def create_vpp_exec_config(self, template_file, **kwargs):
682         """Create VPP exec configuration on container.
683
684         :param template_file: File name of a template script.
685         :param kwargs: Parameters for script.
686         :type template_file: str
687         :type kwargs: dict
688         """
689         running = u"/tmp/running.exec"
690         template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
691
692         with open(template, "r") as src_file:
693             src = Template(src_file.read())
694             self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}')
695
696     def is_container_running(self):
697         """Check if container is running."""
698         raise NotImplementedError
699
700     def is_container_present(self):
701         """Check if container is present."""
702         raise NotImplementedError
703
704     def _configure_cgroup(self, name):
705         """Configure the control group associated with a container.
706
707         By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
708         container is initialized a new cgroup /docker or /lxc is created under
709         cpuset parent tree. This newly created cgroup is inheriting parent
710         setting for cpu/mem exclusive parameter and thus cannot be overriden
711         within /docker or /lxc cgroup. This function is supposed to set cgroups
712         to allow coexistence of both engines.
713
714         :param name: Name of cgroup.
715         :type name: str
716         :raises RuntimeError: If applying cgroup settings via cgset failed.
717         """
718         ret, _, _ = self.container.ssh.exec_command_sudo(
719             u"cgset -r cpuset.cpu_exclusive=0 /"
720         )
721         if int(ret) != 0:
722             raise RuntimeError(u"Failed to apply cgroup settings.")
723
724         ret, _, _ = self.container.ssh.exec_command_sudo(
725             u"cgset -r cpuset.mem_exclusive=0 /"
726         )
727         if int(ret) != 0:
728             raise RuntimeError(u"Failed to apply cgroup settings.")
729
730         ret, _, _ = self.container.ssh.exec_command_sudo(
731             f"cgcreate -g cpuset:/{name}"
732         )
733         if int(ret) != 0:
734             raise RuntimeError(u"Failed to copy cgroup settings from root.")
735
736         ret, _, _ = self.container.ssh.exec_command_sudo(
737             f"cgset -r cpuset.cpu_exclusive=0 /{name}"
738         )
739         if int(ret) != 0:
740             raise RuntimeError(u"Failed to apply cgroup settings.")
741
742         ret, _, _ = self.container.ssh.exec_command_sudo(
743             f"cgset -r cpuset.mem_exclusive=0 /{name}"
744         )
745         if int(ret) != 0:
746             raise RuntimeError(u"Failed to apply cgroup settings.")
747
748
749 class LXC(ContainerEngine):
750     """LXC implementation."""
751
752     # Implicit constructor is inherited.
753
754     def acquire(self, force=True):
755         """Acquire a privileged system object where configuration is stored.
756
757         :param force: If a container exists, destroy it and create a new
758             container.
759         :type force: bool
760         :raises RuntimeError: If creating the container or writing the container
761             config fails.
762         """
763         if self.is_container_present():
764             if force:
765                 self.destroy()
766             else:
767                 return
768
769         target_arch = u"arm64" \
770             if Topology.get_node_arch(self.container.node) == u"aarch64" \
771             else u"amd64"
772
773         image = self.container.image if self.container.image \
774             else f"-d ubuntu -r bionic -a {target_arch}"
775
776         cmd = f"lxc-create -t download --name {self.container.name} " \
777             f"-- {image} --no-validate"
778
779         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
780         if int(ret) != 0:
781             raise RuntimeError(u"Failed to create container.")
782
783         self._configure_cgroup(u"lxc")
784
785     def build(self):
786         """Build container (compile)."""
787         raise NotImplementedError
788
789     def create(self):
790         """Create/deploy an application inside a container on system.
791
792         :raises RuntimeError: If creating the container fails.
793         """
794         if self.container.mnt:
795             # LXC fix for tmpfs
796             # https://github.com/lxc/lxc/issues/434
797             mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults"
798             ret, _, _ = self.container.ssh.exec_command_sudo(
799                 f"sh -c \"echo '{mnt_e}' >> "
800                 f"/var/lib/lxc/{self.container.name}/config\""
801             )
802             if int(ret) != 0:
803                 raise RuntimeError(
804                     f"Failed to write {self.container.name} config."
805                 )
806
807             for mount in self.container.mnt:
808                 host_dir, guest_dir = mount.split(u":")
809                 options = u"bind,create=dir" if guest_dir.endswith(u"/") \
810                     else u"bind,create=file"
811                 entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \
812                     f"none {options} 0 0"
813                 self.container.ssh.exec_command_sudo(
814                     f"sh -c \"mkdir -p {host_dir}\""
815                 )
816                 ret, _, _ = self.container.ssh.exec_command_sudo(
817                     f"sh -c \"echo '{entry}' "
818                     f">> /var/lib/lxc/{self.container.name}/config\""
819                 )
820                 if int(ret) != 0:
821                     raise RuntimeError(
822                         f"Failed to write {self.container.name} config."
823                     )
824
825         cpuset_cpus = u",".join(
826             f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
827             if self.container.cpuset_cpus else u""
828
829         ret, _, _ = self.container.ssh.exec_command_sudo(
830             f"lxc-start --name {self.container.name} --daemon"
831         )
832         if int(ret) != 0:
833             raise RuntimeError(
834                 f"Failed to start container {self.container.name}."
835             )
836         self._lxc_wait(u"RUNNING")
837
838         # Workaround for LXC to be able to allocate all cpus including isolated.
839         ret, _, _ = self.container.ssh.exec_command_sudo(
840             u"cgset --copy-from / lxc/"
841         )
842         if int(ret) != 0:
843             raise RuntimeError(u"Failed to copy cgroup to LXC")
844
845         ret, _, _ = self.container.ssh.exec_command_sudo(
846             f"lxc-cgroup --name {self.container.name} cpuset.cpus {cpuset_cpus}"
847         )
848         if int(ret) != 0:
849             raise RuntimeError(
850                 f"Failed to set cpuset.cpus to container {self.container.name}."
851             )
852
853     def execute(self, command):
854         """Start a process inside a running container.
855
856         Runs the specified command inside the container specified by name. The
857         container has to be running already.
858
859         :param command: Command to run inside container.
860         :type command: str
861         :raises RuntimeError: If running the command failed.
862         """
863         env = u"--keep-env " + u" ".join(
864             f"--set-var {env!s}" for env in self.container.env) \
865             if self.container.env else u""
866
867         cmd = f"lxc-attach {env} --name {self.container.name} " \
868             f"-- /bin/sh -c '{command}'"
869
870         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
871         if int(ret) != 0:
872             raise RuntimeError(
873                 f"Failed to run command inside container {self.container.name}."
874             )
875
876     def stop(self):
877         """Stop a container.
878
879         :raises RuntimeError: If stopping the container failed.
880         """
881         cmd = f"lxc-stop --name {self.container.name}"
882
883         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
884         if int(ret) != 0:
885             raise RuntimeError(
886                 f"Failed to stop container {self.container.name}."
887             )
888         self._lxc_wait(u"STOPPED|FROZEN")
889
890     def destroy(self):
891         """Destroy a container.
892
893         :raises RuntimeError: If destroying container failed.
894         """
895         cmd = f"lxc-destroy --force --name {self.container.name}"
896
897         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
898         if int(ret) != 0:
899             raise RuntimeError(
900                 f"Failed to destroy container {self.container.name}."
901             )
902
903     def info(self):
904         """Query and shows information about a container.
905
906         :raises RuntimeError: If getting info about a container failed.
907         """
908         cmd = f"lxc-info --name {self.container.name}"
909
910         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
911         if int(ret) != 0:
912             raise RuntimeError(
913                 f"Failed to get info about container {self.container.name}."
914             )
915
916     def system_info(self):
917         """Check the current kernel for LXC support.
918
919         :raises RuntimeError: If checking LXC support failed.
920         """
921         cmd = u"lxc-checkconfig"
922
923         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
924         if int(ret) != 0:
925             raise RuntimeError(u"Failed to check LXC support.")
926
927     def is_container_running(self):
928         """Check if container is running on node.
929
930         :returns: True if container is running.
931         :rtype: bool
932         :raises RuntimeError: If getting info about a container failed.
933         """
934         cmd = f"lxc-info --no-humanize --state --name {self.container.name}"
935
936         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
937         if int(ret) != 0:
938             raise RuntimeError(
939                 f"Failed to get info about container {self.container.name}."
940             )
941         return u"RUNNING" in stdout
942
943     def is_container_present(self):
944         """Check if container is existing on node.
945
946         :returns: True if container is present.
947         :rtype: bool
948         :raises RuntimeError: If getting info about a container failed.
949         """
950         cmd = f"lxc-info --no-humanize --name {self.container.name}"
951
952         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
953         return not ret
954
955     def _lxc_wait(self, state):
956         """Wait for a specific container state.
957
958         :param state: Specify the container state(s) to wait for.
959         :type state: str
960         :raises RuntimeError: If waiting for state of a container failed.
961         """
962         cmd = f"lxc-wait --name {self.container.name} --state '{state}'"
963
964         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
965         if int(ret) != 0:
966             raise RuntimeError(
967                 f"Failed to wait for state '{state}' "
968                 f"of container {self.container.name}."
969             )
970
971
972 class Docker(ContainerEngine):
973     """Docker implementation."""
974
975     # Implicit constructor is inherited.
976
977     def acquire(self, force=True):
978         """Pull an image or a repository from a registry.
979
980         :param force: Destroy a container if exists.
981         :type force: bool
982         :raises RuntimeError: If pulling a container failed.
983         """
984         if self.is_container_present():
985             if force:
986                 self.destroy()
987             else:
988                 return
989
990         if not self.container.image:
991             img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
992                 if Topology.get_node_arch(self.container.node) == u"aarch64" \
993                 else Constants.DOCKER_SUT_IMAGE_UBUNTU
994             setattr(self.container, u"image", img)
995
996         cmd = f"docker pull {self.container.image}"
997
998         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
999         if int(ret) != 0:
1000             raise RuntimeError(
1001                 f"Failed to create container {self.container.name}."
1002             )
1003
1004         if self.container.cpuset_cpus:
1005             self._configure_cgroup(u"docker")
1006
1007     def build(self):
1008         """Build container (compile)."""
1009         raise NotImplementedError
1010
1011     def create(self):
1012         """Create/deploy container.
1013
1014         :raises RuntimeError: If creating a container failed.
1015         """
1016         cpuset_cpus = u"--cpuset-cpus=" + u",".join(
1017             f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
1018             if self.container.cpuset_cpus else u""
1019
1020         cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \
1021             if self.container.cpuset_mems is not None else u""
1022         # Temporary workaround - disabling due to bug in memif
1023         cpuset_mems = u""
1024
1025         env = u" ".join(f"--env {env!s}" for env in self.container.env) \
1026             if self.container.env else u""
1027
1028         command = str(self.container.command) if self.container.command else u""
1029
1030         publish = u" ".join(
1031             f"--publish  {var!s}" for var in self.container.publish
1032         ) if self.container.publish else u""
1033
1034         volume = u" ".join(
1035             f"--volume {mnt!s}" for mnt in self.container.mnt) \
1036             if self.container.mnt else u""
1037
1038         cmd = f"docker run --privileged --detach --interactive --tty --rm " \
1039             f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \
1040             f"{env} {volume} --name {self.container.name} " \
1041             f"{self.container.image} {command}"
1042
1043         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1044         if int(ret) != 0:
1045             raise RuntimeError(
1046                 f"Failed to create container {self.container.name}"
1047             )
1048
1049         self.info()
1050
1051     def execute(self, command):
1052         """Start a process inside a running container.
1053
1054         Runs the specified command inside the container specified by name. The
1055         container has to be running already.
1056
1057         :param command: Command to run inside container.
1058         :type command: str
1059         :raises RuntimeError: If running the command in a container failed.
1060         """
1061         cmd = f"docker exec --interactive {self.container.name} " \
1062             f"/bin/sh -c '{command}'"
1063
1064         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
1065         if int(ret) != 0:
1066             raise RuntimeError(
1067                 f"Failed to execute command in container {self.container.name}."
1068             )
1069
1070     def stop(self):
1071         """Stop running container.
1072
1073         :raises RuntimeError: If stopping a container failed.
1074         """
1075         cmd = f"docker stop {self.container.name}"
1076
1077         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1078         if int(ret) != 0:
1079             raise RuntimeError(
1080                 f"Failed to stop container {self.container.name}."
1081             )
1082
1083     def destroy(self):
1084         """Remove a container.
1085
1086         :raises RuntimeError: If removing a container failed.
1087         """
1088         cmd = f"docker rm --force {self.container.name}"
1089
1090         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1091         if int(ret) != 0:
1092             raise RuntimeError(
1093                 f"Failed to destroy container {self.container.name}."
1094             )
1095
1096     def info(self):
1097         """Return low-level information on Docker objects.
1098
1099         :raises RuntimeError: If getting info about a container failed.
1100         """
1101         cmd = f"docker inspect {self.container.name}"
1102
1103         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1104         if int(ret) != 0:
1105             raise RuntimeError(
1106                 f"Failed to get info about container {self.container.name}."
1107             )
1108
1109     def system_info(self):
1110         """Display the docker system-wide information.
1111
1112         :raises RuntimeError: If displaying system information failed.
1113         """
1114         cmd = u"docker system info"
1115
1116         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1117         if int(ret) != 0:
1118             raise RuntimeError(u"Failed to get system info.")
1119
1120     def is_container_present(self):
1121         """Check if container is present on node.
1122
1123         :returns: True if container is present.
1124         :rtype: bool
1125         :raises RuntimeError: If getting info about a container failed.
1126         """
1127         cmd = f"docker ps --all --quiet --filter name={self.container.name}"
1128
1129         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1130         if int(ret) != 0:
1131             raise RuntimeError(
1132                 f"Failed to get info about container {self.container.name}."
1133             )
1134         return bool(stdout)
1135
1136     def is_container_running(self):
1137         """Check if container is running on node.
1138
1139         :returns: True if container is running.
1140         :rtype: bool
1141         :raises RuntimeError: If getting info about a container failed.
1142         """
1143         cmd = f"docker ps --quiet --filter name={self.container.name}"
1144
1145         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1146         if int(ret) != 0:
1147             raise RuntimeError(
1148                 f"Failed to get info about container {self.container.name}."
1149             )
1150         return bool(stdout)
1151
1152
1153 class Container:
1154     """Container class."""
1155
1156     def __getattr__(self, attr):
1157         """Get attribute custom implementation.
1158
1159         :param attr: Attribute to get.
1160         :type attr: str
1161         :returns: Attribute value or None.
1162         :rtype: any
1163         """
1164         try:
1165             return self.__dict__[attr]
1166         except KeyError:
1167             return None
1168
1169     def __setattr__(self, attr, value):
1170         """Set attribute custom implementation.
1171
1172         :param attr: Attribute to set.
1173         :param value: Value to set.
1174         :type attr: str
1175         :type value: any
1176         """
1177         try:
1178             # Check if attribute exists
1179             self.__dict__[attr]
1180         except KeyError:
1181             # Creating new attribute
1182             if attr == u"node":
1183                 self.__dict__[u"ssh"] = SSH()
1184                 self.__dict__[u"ssh"].connect(value)
1185             self.__dict__[attr] = value
1186         else:
1187             # Updating attribute base of type
1188             if isinstance(self.__dict__[attr], list):
1189                 self.__dict__[attr].append(value)
1190             else:
1191                 self.__dict__[attr] = value