Revert "fix(IPsecUtil): Delete keywords no longer used"
[csit.git] / resources / libraries / python / ContainerUtils.py
1 # Copyright (c) 2024 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Library to manipulate Containers."""
15
16 from collections import OrderedDict, Counter
17 from io import open
18 from re import search
19 from string import Template
20 from time import sleep
21
22 from robot.libraries.BuiltIn import BuiltIn
23
24 from resources.libraries.python.Constants import Constants
25 from resources.libraries.python.CpuUtils import CpuUtils
26 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
27 from resources.libraries.python.ssh import SSH
28 from resources.libraries.python.topology import Topology, SocketType
29 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
30 from resources.libraries.python.VPPUtil import VPPUtil
31
32
33 __all__ = [
34     u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
35 ]
36
37 SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
38
39
40 class ContainerManager:
41     """Container lifecycle management class."""
42
43     def __init__(self, engine):
44         """Initialize Container Manager class.
45
46         :param engine: Container technology used (LXC/Docker/...).
47         :type engine: str
48         :raises NotImplementedError: If container technology is not implemented.
49         """
50         try:
51             self.engine = globals()[engine]()
52         except KeyError:
53             raise NotImplementedError(f"{engine} is not implemented.")
54         self.containers = OrderedDict()
55
56     def get_container_by_name(self, name):
57         """Get container instance.
58
59         :param name: Container name.
60         :type name: str
61         :returns: Container instance.
62         :rtype: Container
63         :raises RuntimeError: If failed to get container with name.
64         """
65         try:
66             return self.containers[name]
67         except KeyError:
68             raise RuntimeError(f"Failed to get container with name: {name}")
69
70     def construct_container(self, **kwargs):
71         """Construct container object on node with specified parameters.
72
73         :param kwargs: Key-value pairs used to construct container.
74         :param kwargs: dict
75         """
76         # Create base class
77         self.engine.initialize()
78         # Set parameters
79         for key in kwargs:
80             setattr(self.engine.container, key, kwargs[key])
81
82         # Set additional environmental variables
83         setattr(
84             self.engine.container, u"env",
85             f"MICROSERVICE_LABEL={kwargs[u'name']}"
86         )
87
88         # Store container instance
89         self.containers[kwargs[u"name"]] = self.engine.container
90
91     def construct_containers(self, **kwargs):
92         """Construct 1..N container(s) on node with specified name.
93
94         Ordinal number is automatically added to the name of container as
95         suffix.
96
97         :param kwargs: Named parameters.
98         :param kwargs: dict
99         """
100         name = kwargs[u"name"]
101         for i in range(kwargs[u"count"]):
102             # Name will contain ordinal suffix
103             kwargs[u"name"] = u"".join([name, str(i+1)])
104             # Create container
105             self.construct_container(i=i, **kwargs)
106
107     def acquire_all_containers(self):
108         """Acquire all containers."""
109         for container in self.containers:
110             self.engine.container = self.containers[container]
111             self.engine.acquire()
112
113     def build_all_containers(self):
114         """Build all containers."""
115         for container in self.containers:
116             self.engine.container = self.containers[container]
117             self.engine.build()
118
119     def create_all_containers(self):
120         """Create all containers."""
121         for container in self.containers:
122             self.engine.container = self.containers[container]
123             self.engine.create()
124
125     def execute_on_container(self, name, command):
126         """Execute command on container with name.
127
128         :param name: Container name.
129         :param command: Command to execute.
130         :type name: str
131         :type command: str
132         """
133         self.engine.container = self.get_container_by_name(name)
134         self.engine.execute(command)
135
136     def execute_on_all_containers(self, command):
137         """Execute command on all containers.
138
139         :param command: Command to execute.
140         :type command: str
141         """
142         for container in self.containers:
143             self.engine.container = self.containers[container]
144             self.engine.execute(command)
145
146     def start_vpp_in_all_containers(self, verify=True):
147         """Start VPP in all containers."""
148         for container in self.containers:
149             self.engine.container = self.containers[container]
150             # For multiple containers, delayed verify is faster.
151             self.engine.start_vpp(verify=False)
152         if verify:
153             self.verify_vpp_in_all_containers()
154
155     def _disconnect_papi_to_all_containers(self):
156         """Disconnect any open PAPI connections to VPPs in containers.
157
158         The current PAPI implementation caches open connections,
159         so explicit disconnect is needed before VPP becomes inaccessible.
160
161         Currently this is a protected method, as restart, stop and destroy
162         are the only dangerous methods, and all are handled by ContainerManager.
163         """
164         for container_object in self.containers.values():
165             PapiSocketExecutor.disconnect_by_node_and_socket(
166                 container_object.node,
167                 container_object.api_socket,
168             )
169
170     def restart_vpp_in_all_containers(self, verify=True):
171         """Restart VPP in all containers."""
172         self._disconnect_papi_to_all_containers()
173         for container in self.containers:
174             self.engine.container = self.containers[container]
175             # For multiple containers, delayed verify is faster.
176             self.engine.restart_vpp(verify=False)
177         if verify:
178             self.verify_vpp_in_all_containers()
179
180     def verify_vpp_in_all_containers(self):
181         """Verify that VPP is installed and running in all containers."""
182         # For multiple containers, multiple fors are faster.
183         for container in self.containers:
184             self.engine.container = self.containers[container]
185             self.engine.verify_vppctl()
186         for container in self.containers:
187             self.engine.container = self.containers[container]
188             self.engine.adjust_privileges()
189         for container in self.containers:
190             self.engine.container = self.containers[container]
191             self.engine.verify_vpp_papi()
192
193     def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
194         """Configure VPP in all containers.
195
196         :param chain_topology: Topology used for chaining containers can be
197             chain or cross_horiz. Chain topology is using 1 memif pair per
198             container. Cross_horiz topology is using 1 memif and 1 physical
199             interface in container (only single container can be configured).
200         :param kwargs: Named parameters.
201         :type chain_topology: str
202         :type kwargs: dict
203         """
204         # Count number of DUTs based on node's host information
205         dut_cnt = len(
206             Counter(
207                 [
208                     f"{container.node['host']}{container.node['port']}"
209                     for container in self.containers.values()
210                 ]
211             )
212         )
213         mod = len(self.containers) // dut_cnt
214
215         for i, container in enumerate(self.containers):
216             mid1 = i % mod + 1
217             mid2 = i % mod + 1
218             sid1 = i % mod * 2 + 1
219             sid2 = i % mod * 2 + 2
220             self.engine.container = self.containers[container]
221             guest_dir = self.engine.container.mnt[0].split(u":")[1]
222
223             if chain_topology == u"chain":
224                 self._configure_vpp_chain_l2xc(
225                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
226                     guest_dir=guest_dir, **kwargs
227                 )
228             elif chain_topology == u"cross_horiz":
229                 self._configure_vpp_cross_horiz(
230                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
231                     guest_dir=guest_dir, **kwargs
232                 )
233             elif chain_topology == u"chain_functional":
234                 self._configure_vpp_chain_functional(
235                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
236                     guest_dir=guest_dir, **kwargs
237                 )
238             elif chain_topology == u"chain_ip4":
239                 self._configure_vpp_chain_ip4(
240                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
241                     guest_dir=guest_dir, **kwargs
242                 )
243             elif chain_topology == u"pipeline_ip4":
244                 self._configure_vpp_pipeline_ip4(
245                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
246                     guest_dir=guest_dir, **kwargs
247                 )
248             elif chain_topology == u"chain_vswitch":
249                 self._configure_vpp_chain_vswitch(
250                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
251                     guest_dir=guest_dir, **kwargs)
252             elif chain_topology == u"chain_ipsec":
253                 idx_match = search(r"\d+$", self.engine.container.name)
254                 if idx_match:
255                     idx = int(idx_match.group())
256                 self._configure_vpp_chain_ipsec(
257                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
258                     guest_dir=guest_dir, nf_instance=idx, **kwargs)
259             elif chain_topology == u"chain_dma":
260                 self._configure_vpp_chain_dma(
261                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
262                     guest_dir=guest_dir, **kwargs
263                 )
264             else:
265                 raise RuntimeError(
266                     f"Container topology {chain_topology} not implemented"
267                 )
268
269     def _configure_vpp_chain_l2xc(self, **kwargs):
270         """Configure VPP in chain topology with l2xc.
271
272         :param kwargs: Named parameters.
273         :type kwargs: dict
274         """
275         self.engine.create_vpp_startup_config()
276         self.engine.create_vpp_exec_config(
277             u"memif_create_chain_l2xc.exec",
278             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
279             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
280             socket1=f"{kwargs[u'guest_dir']}/memif-"
281             f"{self.engine.container.name}-{kwargs[u'sid1']}",
282             socket2=f"{kwargs[u'guest_dir']}/memif-"
283             f"{self.engine.container.name}-{kwargs[u'sid2']}"
284         )
285
286     def _configure_vpp_chain_dma(self, **kwargs):
287         """Configure VPP in chain topology with l2xc (dma).
288
289         :param kwargs: Named parameters.
290         :type kwargs: dict
291         """
292         dma_wqs = kwargs[f"dma_wqs"]
293         self.engine.create_vpp_startup_config_dma(dma_wqs)
294
295         self.engine.create_vpp_exec_config(
296             u"memif_create_chain_dma.exec",
297             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
298             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
299             socket1=f"{kwargs[u'guest_dir']}/memif-"
300             f"{self.engine.container.name}-{kwargs[u'sid1']}",
301             socket2=f"{kwargs[u'guest_dir']}/memif-"
302             f"{self.engine.container.name}-{kwargs[u'sid2']}"
303         )
304
305     def _configure_vpp_cross_horiz(self, **kwargs):
306         """Configure VPP in cross horizontal topology (single memif).
307
308         :param kwargs: Named parameters.
309         :type kwargs: dict
310         """
311         if u"DUT1" in self.engine.container.name:
312             if_pci = Topology.get_interface_pci_addr(
313                 self.engine.container.node, kwargs[u"dut1_if"])
314             if_name = Topology.get_interface_name(
315                 self.engine.container.node, kwargs[u"dut1_if"])
316         if u"DUT2" in self.engine.container.name:
317             if_pci = Topology.get_interface_pci_addr(
318                 self.engine.container.node, kwargs[u"dut2_if"])
319             if_name = Topology.get_interface_name(
320                 self.engine.container.node, kwargs[u"dut2_if"])
321         self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
322         self.engine.create_vpp_exec_config(
323             u"memif_create_cross_horizon.exec",
324             mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
325             socket1=f"{kwargs[u'guest_dir']}/memif-"
326             f"{self.engine.container.name}-{kwargs[u'sid1']}"
327         )
328
329     def _configure_vpp_chain_functional(self, **kwargs):
330         """Configure VPP in chain topology with l2xc (functional).
331
332         :param kwargs: Named parameters.
333         :type kwargs: dict
334         """
335         self.engine.create_vpp_startup_config()
336         self.engine.create_vpp_exec_config(
337             u"memif_create_chain_functional.exec",
338             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
339             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
340             socket1=f"{kwargs[u'guest_dir']}/memif-"
341             f"{self.engine.container.name}-{kwargs[u'sid1']}",
342             socket2=f"{kwargs[u'guest_dir']}/memif-"
343             f"{self.engine.container.name}-{kwargs[u'sid2']}",
344             rx_mode=u"interrupt"
345         )
346
347     def _configure_vpp_chain_ip4(self, **kwargs):
348         """Configure VPP in chain topology with ip4.
349
350         :param kwargs: Named parameters.
351         :type kwargs: dict
352         """
353         self.engine.create_vpp_startup_config()
354
355         vif1_mac = kwargs[u"tg_pf1_mac"] \
356             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
357             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
358         vif2_mac = kwargs[u"tg_pf2_mac"] \
359             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
360             else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
361         self.engine.create_vpp_exec_config(
362             u"memif_create_chain_ip4.exec",
363             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
364             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
365             socket1=f"{kwargs[u'guest_dir']}/memif-"
366             f"{self.engine.container.name}-{kwargs[u'sid1']}",
367             socket2=f"{kwargs[u'guest_dir']}/memif-"
368             f"{self.engine.container.name}-{kwargs[u'sid2']}",
369             mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
370             mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
371             vif1_mac=vif1_mac, vif2_mac=vif2_mac
372         )
373
374     def _configure_vpp_chain_vswitch(self, **kwargs):
375         """Configure VPP as vswitch in container.
376
377         :param kwargs: Named parameters.
378         :type kwargs: dict
379         """
380         dut = self.engine.container.name.split(u"_")[0]
381         if dut == u"DUT1":
382             if1_pci = Topology.get_interface_pci_addr(
383                 self.engine.container.node, kwargs[u"dut1_if2"])
384             if2_pci = Topology.get_interface_pci_addr(
385                 self.engine.container.node, kwargs[u"dut1_if1"])
386             if_red_name = Topology.get_interface_name(
387                 self.engine.container.node, kwargs[u"dut1_if2"])
388             if_black_name = Topology.get_interface_name(
389                 self.engine.container.node, kwargs[u"dut1_if1"])
390             tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
391             tg_pf_mac = kwargs[u"tg_pf2_mac"]
392         else:
393             tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
394             tg_pf_mac = kwargs[u"tg_pf1_mac"]
395             if1_pci = Topology.get_interface_pci_addr(
396                 self.engine.container.node, kwargs[u"dut2_if1"])
397             if2_pci = Topology.get_interface_pci_addr(
398                 self.engine.container.node, kwargs[u"dut2_if2"])
399             if_red_name = Topology.get_interface_name(
400                 self.engine.container.node, kwargs[u"dut2_if1"])
401             if_black_name = Topology.get_interface_name(
402                 self.engine.container.node, kwargs[u"dut2_if2"])
403
404         n_instances = int(kwargs[u"n_instances"])
405         rxq = 1
406         if u"rxq" in kwargs:
407             rxq = int(kwargs[u"rxq"])
408         nodes = kwargs[u"nodes"]
409         cpuset_cpus = CpuUtils.get_affinity_nf(
410             nodes, dut, nf_chains=1, nf_nodes=1, nf_chain=1,
411             nf_node=1, vs_dtc=0, nf_dtc=8, nf_mtcr=1, nf_dtcr=1
412         )
413         self.engine.create_vpp_startup_config_vswitch(
414             cpuset_cpus, rxq, if1_pci, if2_pci
415         )
416
417         instances = []
418         for i in range(1, n_instances + 1):
419             instances.append(
420                 f"create interface memif id {i} socket-id 1 master\n"
421                 f"set interface state memif1/{i} up\n"
422                 f"set interface l2 bridge memif1/{i} 1\n"
423                 f"create interface memif id {i} socket-id 2 master\n"
424                 f"set interface state memif2/{i} up\n"
425                 f"set interface l2 bridge memif2/{i} 2\n"
426                 f"set ip neighbor memif2/{i} {tg_pf_ip4} {tg_pf_mac} "
427                 f"static\n\n"
428             )
429
430         self.engine.create_vpp_exec_config(
431             u"memif_create_chain_vswitch_ipsec.exec",
432             socket1=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-1",
433             socket2=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-2",
434             if_red_name=if_red_name,
435             if_black_name=if_black_name,
436             instances=u"\n\n".join(instances))
437
438
439     def _configure_vpp_chain_ipsec(self, **kwargs):
440         """Configure VPP in container with memifs.
441
442         :param kwargs: Named parameters.
443         :type kwargs: dict
444         """
445         nf_nodes = int(kwargs[u"nf_nodes"])
446         nf_instance = int(kwargs[u"nf_instance"])
447         nodes = kwargs[u"nodes"]
448         dut = self.engine.container.name.split(u"_")[0]
449         cpuset_cpus = CpuUtils.get_affinity_nf(
450             nodes, dut, nf_chains=1, nf_nodes=nf_nodes, nf_chain=1,
451             nf_node=nf_instance, vs_dtc=10, nf_dtc=1, nf_mtcr=1, nf_dtcr=1)
452         self.engine.create_vpp_startup_config_ipsec(cpuset_cpus)
453         local_ip_base = kwargs[u"dut2_if1_ip4"].rsplit(u".", 1)[0]
454
455         if dut == u"DUT1":
456             tnl_local_ip = f"{local_ip_base}.{nf_instance + 100}"
457             tnl_remote_ip = f"{local_ip_base}.{nf_instance}"
458             remote_ip_base = kwargs[u"dut1_if1_ip4"].rsplit(u".", 1)[0]
459             tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
460             tg_pf_mac = kwargs[u"tg_pf1_mac"]
461             raddr_ip4 = kwargs[u"laddr_ip4"]
462             l_mac1 = 17
463             l_mac2 = 18
464             r_mac = 1
465         else:
466             tnl_local_ip = f"{local_ip_base}.{nf_instance}"
467             tnl_remote_ip = f"{local_ip_base}.{nf_instance + 100}"
468             remote_ip_base = kwargs[u"dut2_if2_ip4"].rsplit(u".", 1)[0]
469             tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
470             tg_pf_mac = kwargs[u"tg_pf2_mac"]
471             raddr_ip4 = kwargs[u"raddr_ip4"]
472             l_mac1 = 1
473             l_mac2 = 2
474             r_mac = 17
475
476         self.engine.create_vpp_exec_config(
477             u"memif_create_chain_ipsec.exec",
478             socket1=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-1",
479             socket2=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-2",
480             mid1=nf_instance,
481             mid2=nf_instance,
482             sid1=u"1",
483             sid2=u"2",
484             mac1=f"02:02:00:00:{l_mac1:02X}:{(nf_instance - 1):02X}",
485             mac2=f"02:02:00:00:{l_mac2:02X}:{(nf_instance - 1):02X}",
486             tg_pf2_ip4=tg_pf_ip4,
487             tg_pf2_mac=tg_pf_mac,
488             raddr_ip4=raddr_ip4,
489             tnl_local_ip=tnl_local_ip,
490             tnl_remote_ip=tnl_remote_ip,
491             tnl_remote_mac=f"02:02:00:00:{r_mac:02X}:{(nf_instance - 1):02X}",
492             remote_ip=f"{remote_ip_base}.{nf_instance}"
493         )
494         self.engine.execute(
495             f"cat {kwargs['guest_dir']}/ipsec_create_tunnel_cnf_"
496             f"{dut}_{nf_instance}.config >> /tmp/running.exec"
497         )
498
499     def _configure_vpp_pipeline_ip4(self, **kwargs):
500         """Configure VPP in pipeline topology with ip4.
501
502         :param kwargs: Named parameters.
503         :type kwargs: dict
504         """
505         self.engine.create_vpp_startup_config()
506         node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
507         mid1 = kwargs[u"mid1"]
508         mid2 = kwargs[u"mid2"]
509         role1 = u"master"
510         role2 = u"master" if node == kwargs[u"nodes"] else u"slave"
511         kwargs[u"mid2"] = kwargs[u"mid2"] \
512             if node == kwargs[u"nodes"] else kwargs[u"mid2"] + 1
513         vif1_mac = kwargs[u"tg_pf1_mac"] \
514             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
515             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
516         vif2_mac = kwargs[u"tg_pf2_mac"] \
517             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
518             else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
519         socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
520             f"{kwargs[u'sid1']}" if node == 1 \
521             else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid1']}"
522         socket2 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
523             f"{kwargs[u'sid2']}" \
524             if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \
525             else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid2']}"
526
527         self.engine.create_vpp_exec_config(
528             u"memif_create_pipeline_ip4.exec",
529             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
530             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
531             socket1=socket1, socket2=socket2, role1=role1, role2=role2,
532             mac1=f"52:54:00:00:{mid1:02X}:01",
533             mac2=f"52:54:00:00:{mid2:02X}:02",
534             vif1_mac=vif1_mac, vif2_mac=vif2_mac
535         )
536
537     def stop_all_containers(self):
538         """Stop all containers."""
539         # TODO: Rework if containers can be affected outside ContainerManager.
540         self._disconnect_papi_to_all_containers()
541         for container in self.containers:
542             self.engine.container = self.containers[container]
543             self.engine.stop()
544
545     def destroy_all_containers(self):
546         """Destroy all containers."""
547         # TODO: Rework if containers can be affected outside ContainerManager.
548         self._disconnect_papi_to_all_containers()
549         for container in self.containers:
550             self.engine.container = self.containers[container]
551             self.engine.destroy()
552
553
554 class ContainerEngine:
555     """Abstract class for container engine."""
556
557     def __init__(self):
558         """Init ContainerEngine object."""
559         self.container = None
560
561     def initialize(self):
562         """Initialize container object."""
563         self.container = Container()
564
565     def acquire(self, force):
566         """Acquire/download container.
567
568         :param force: Destroy a container if exists and create.
569         :type force: bool
570         """
571         raise NotImplementedError
572
573     def build(self):
574         """Build container (compile)."""
575         raise NotImplementedError
576
577     def create(self):
578         """Create/deploy container."""
579         raise NotImplementedError
580
581     def execute(self, command):
582         """Execute process inside container.
583
584         :param command: Command to run inside container.
585         :type command: str
586         """
587         raise NotImplementedError
588
589     def stop(self):
590         """Stop container."""
591         raise NotImplementedError
592
593     def destroy(self):
594         """Destroy/remove container."""
595         raise NotImplementedError
596
597     def info(self):
598         """Info about container."""
599         raise NotImplementedError
600
601     def system_info(self):
602         """System info."""
603         raise NotImplementedError
604
605     def start_vpp(self, verify=True):
606         """Start VPP inside a container."""
607         self.execute(
608             u"/usr/bin/vpp -c /etc/vpp/startup.conf")
609
610         topo_instance = BuiltIn().get_library_instance(
611             u"resources.libraries.python.topology.Topology"
612         )
613         topo_instance.add_new_socket(
614             self.container.node,
615             SocketType.CLI,
616             self.container.name,
617             self.container.cli_socket,
618         )
619         topo_instance.add_new_socket(
620             self.container.node,
621             SocketType.PAPI,
622             self.container.name,
623             self.container.api_socket,
624         )
625         topo_instance.add_new_socket(
626             self.container.node,
627             SocketType.STATS,
628             self.container.name,
629             self.container.stats_socket,
630         )
631         if verify:
632             self.verify_vpp()
633
634     def restart_vpp(self, verify=True):
635         """Restart VPP service inside a container."""
636         self.execute(u"pkill vpp")
637         self.start_vpp(verify=verify)
638
639     def verify_vpp(self):
640         """Verify VPP is running and ready."""
641         self.verify_vppctl()
642         self.adjust_privileges()
643         self.verify_vpp_papi()
644
645     # TODO Rewrite to use the VPPUtil.py functionality and remove this.
646     def verify_vppctl(self, retries=120, retry_wait=1):
647         """Verify that VPP is installed and running inside container.
648
649         This function waits a while so VPP can start.
650         PCI interfaces are listed for debug purposes.
651         When the check passes, VPP API socket is created on remote side,
652         but perhaps its directory does not have the correct access rights yet.
653
654         :param retries: Check for VPP for this number of times Default: 120
655         :param retry_wait: Wait for this number of seconds between retries.
656         """
657         for _ in range(retries + 1):
658             try:
659                 # Execute puts the command into single quotes,
660                 # so inner arguments are enclosed in qouble quotes here.
661                 self.execute(
662                     u'/usr/bin/vppctl show pci 2>&1 | '
663                     u'fgrep -v "Connection refused" | '
664                     u'fgrep -v "No such file or directory"'
665                 )
666                 break
667             except (RuntimeError, AssertionError):
668                 sleep(retry_wait)
669         else:
670             self.execute(u"cat /tmp/vppd.log")
671             raise RuntimeError(
672                 f"VPP did not come up in container: {self.container.name}"
673             )
674
675     def adjust_privileges(self):
676         """Adjust privileges to control VPP without sudo."""
677         self.execute("chmod -R o+rwx /run/vpp")
678
679     def verify_vpp_papi(self, retries=120, retry_wait=1):
680         """Verify that VPP is available for PAPI.
681
682         This also opens and caches PAPI connection for quick reuse.
683         The connection is disconnected when ContainerManager decides to do so.
684
685         :param retries: Check for VPP for this number of times Default: 120
686         :param retry_wait: Wait for this number of seconds between retries.
687         """
688         # Wait for success.
689         for _ in range(retries + 1):
690             try:
691                 VPPUtil.vpp_show_version(
692                     node=self.container.node,
693                     remote_vpp_socket=self.container.api_socket,
694                     log=False,
695                 )
696                 break
697             except (RuntimeError, AssertionError):
698                 sleep(retry_wait)
699         else:
700             self.execute(u"cat /tmp/vppd.log")
701             raise RuntimeError(
702                 f"VPP PAPI fails in container: {self.container.name}"
703             )
704
705     def create_base_vpp_startup_config(self, cpuset_cpus=None):
706         """Create base startup configuration of VPP on container.
707
708         :param cpuset_cpus: List of CPU cores to allocate.
709         :type cpuset_cpus: list.
710         :returns: Base VPP startup configuration.
711         :rtype: VppConfigGenerator
712         """
713         if cpuset_cpus is None:
714             cpuset_cpus = self.container.cpuset_cpus
715
716         # Create config instance
717         vpp_config = VppConfigGenerator()
718         vpp_config.set_node(self.container.node)
719         vpp_config.add_unix_cli_listen()
720         vpp_config.add_unix_exec(u"/tmp/running.exec")
721         vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
722         if cpuset_cpus:
723             # We will pop the first core from the list to be a main core
724             vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
725             # If more cores in the list, the rest will be used as workers.
726             corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
727             vpp_config.add_cpu_corelist_workers(corelist_workers)
728         vpp_config.add_buffers_per_numa(215040)
729         vpp_config.add_plugin(u"disable", u"default")
730         vpp_config.add_plugin(u"enable", u"memif_plugin.so")
731         vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
732         vpp_config.add_main_heap_size(u"2G")
733         vpp_config.add_main_heap_page_size(self.container.page_size)
734         vpp_config.add_default_hugepage_size(self.container.page_size)
735         vpp_config.add_statseg_size(u"2G")
736         vpp_config.add_statseg_page_size(self.container.page_size)
737         vpp_config.add_statseg_per_node_counters(u"on")
738
739         return vpp_config
740
741     def create_vpp_startup_config(self):
742         """Create startup configuration of VPP without DPDK on container.
743         """
744         vpp_config = self.create_base_vpp_startup_config()
745
746         # Apply configuration
747         self.execute(u"mkdir -p /etc/vpp/")
748         self.execute(
749             f'echo "{vpp_config.get_config_str()}" | '
750             f'tee /etc/vpp/startup.conf'
751         )
752
753     def create_vpp_startup_config_vswitch(self, cpuset_cpus, rxq, *devices):
754         """Create startup configuration of VPP vswitch.
755
756         :param cpuset_cpus: CPU list to run on.
757         :param rxq: Number of interface RX queues.
758         :param devices: PCI devices.
759         :type cpuset_cpus: list
760         :type rxq: int
761         :type devices: list
762         """
763         vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
764         vpp_config.add_dpdk_dev(*devices)
765         vpp_config.add_dpdk_log_level(u"debug")
766         vpp_config.add_dpdk_no_tx_checksum_offload()
767         vpp_config.add_dpdk_dev_default_rxq(rxq)
768         vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
769         vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
770
771         # Apply configuration
772         self.execute(u"mkdir -p /etc/vpp/")
773         self.execute(
774             f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
775         )
776
777     def create_vpp_startup_config_ipsec(self, cpuset_cpus):
778         """Create startup configuration of VPP with IPsec on container.
779
780         :param cpuset_cpus: CPU list to run on.
781         :type cpuset_cpus: list
782         """
783         vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
784         vpp_config.add_plugin(u"enable", u"crypto_native_plugin.so")
785         vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
786         vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
787         vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
788
789         # Apply configuration
790         self.execute(u"mkdir -p /etc/vpp/")
791         self.execute(
792             f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
793         )
794
795     def create_vpp_startup_config_dma(self, dma_devices):
796         """Create startup configuration of VPP DMA.
797
798         :param dma_devices: DMA devices list.
799         :type dma_devices: list
800         """
801         vpp_config = self.create_base_vpp_startup_config()
802         vpp_config.add_plugin(u"enable", u"dma_intel_plugin.so")
803         vpp_config.add_dma_dev(dma_devices)
804
805         # Apply configuration
806         self.execute(u"mkdir -p /etc/vpp/")
807         self.execute(
808             f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
809         )
810
811     def create_vpp_exec_config(self, template_file, **kwargs):
812         """Create VPP exec configuration on container.
813
814         :param template_file: File name of a template script.
815         :param kwargs: Parameters for script.
816         :type template_file: str
817         :type kwargs: dict
818         """
819         running = u"/tmp/running.exec"
820         template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
821
822         with open(template, u"rt") as src_file:
823             src = Template(src_file.read())
824             self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}')
825
826     def is_container_running(self):
827         """Check if container is running."""
828         raise NotImplementedError
829
830     def is_container_present(self):
831         """Check if container is present."""
832         raise NotImplementedError
833
834     def _configure_cgroup(self, name):
835         """Configure the control group associated with a container.
836
837         By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
838         container is initialized a new cgroup /docker or /lxc is created under
839         cpuset parent tree. This newly created cgroup is inheriting parent
840         setting for cpu/mem exclusive parameter and thus cannot be overriden
841         within /docker or /lxc cgroup. This function is supposed to set cgroups
842         to allow coexistence of both engines.
843
844         :param name: Name of cgroup.
845         :type name: str
846         :raises RuntimeError: If applying cgroup settings via cgset failed.
847         """
848         ret, _, _ = self.container.ssh.exec_command_sudo(
849             f"cgcreate -g cpuset:/{name}"
850         )
851         if int(ret) != 0:
852             raise RuntimeError(u"Failed to copy cgroup settings from root.")
853
854         ret, _, _ = self.container.ssh.exec_command_sudo(
855             f"cgset -r cpuset.cpus=0 /{name}"
856         )
857         if int(ret) != 0:
858             raise RuntimeError(u"Failed to apply cgroup settings.")
859
860         ret, _, _ = self.container.ssh.exec_command_sudo(
861             f"cgset -r cpuset.mems=0 /{name}"
862         )
863         if int(ret) != 0:
864             raise RuntimeError(u"Failed to apply cgroup settings.")
865
866
867 class LXC(ContainerEngine):
868     """LXC implementation."""
869
870     # Implicit constructor is inherited.
871
872     def acquire(self, force=True):
873         """Acquire a privileged system object where configuration is stored.
874
875         :param force: If a container exists, destroy it and create a new
876             container.
877         :type force: bool
878         :raises RuntimeError: If creating the container or writing the container
879             config fails.
880         """
881         if self.is_container_present():
882             if force:
883                 self.destroy()
884             else:
885                 return
886
887         target_arch = u"arm64" \
888             if Topology.get_node_arch(self.container.node) == u"aarch64" \
889             else u"amd64"
890
891         image = self.container.image if self.container.image \
892             else f"-d ubuntu -r jammy -a {target_arch}"
893
894         cmd = f"lxc-create -t download --name {self.container.name} " \
895             f"-- {image} --no-validate"
896
897         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
898         if int(ret) != 0:
899             raise RuntimeError(u"Failed to create container.")
900
901         self._configure_cgroup(u"lxc")
902
903     def build(self):
904         """Build container (compile)."""
905         raise NotImplementedError
906
907     def create(self):
908         """Create/deploy an application inside a container on system.
909
910         :raises RuntimeError: If creating the container fails.
911         """
912         if self.container.mnt:
913             # LXC fix for tmpfs
914             # https://github.com/lxc/lxc/issues/434
915             mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults"
916             ret, _, _ = self.container.ssh.exec_command_sudo(
917                 f"sh -c \"echo '{mnt_e}' >> "
918                 f"/var/lib/lxc/{self.container.name}/config\""
919             )
920             if int(ret) != 0:
921                 raise RuntimeError(
922                     f"Failed to write {self.container.name} config."
923                 )
924
925             for mount in self.container.mnt:
926                 host_dir, guest_dir = mount.split(u":")
927                 options = u"bind,create=dir" if guest_dir.endswith(u"/") \
928                     else u"bind,create=file"
929                 entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \
930                     f"none {options} 0 0"
931                 self.container.ssh.exec_command_sudo(
932                     f"sh -c \"mkdir -p {host_dir}\""
933                 )
934                 ret, _, _ = self.container.ssh.exec_command_sudo(
935                     f"sh -c \"echo '{entry}' "
936                     f">> /var/lib/lxc/{self.container.name}/config\""
937                 )
938                 if int(ret) != 0:
939                     raise RuntimeError(
940                         f"Failed to write {self.container.name} config."
941                     )
942
943         cpuset_cpus = u",".join(
944             f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
945             if self.container.cpuset_cpus else u""
946
947         ret, _, _ = self.container.ssh.exec_command_sudo(
948             f"lxc-start --name {self.container.name} --daemon"
949         )
950         if int(ret) != 0:
951             raise RuntimeError(
952                 f"Failed to start container {self.container.name}."
953             )
954         self._lxc_wait(u"RUNNING")
955
956         # Workaround for LXC to be able to allocate all cpus including isolated.
957         ret, _, _ = self.container.ssh.exec_command_sudo(
958             u"cgset --copy-from / lxc/"
959         )
960         if int(ret) != 0:
961             raise RuntimeError(u"Failed to copy cgroup to LXC")
962
963         ret, _, _ = self.container.ssh.exec_command_sudo(
964             f"lxc-cgroup --name {self.container.name} cpuset.cpus {cpuset_cpus}"
965         )
966         if int(ret) != 0:
967             raise RuntimeError(
968                 f"Failed to set cpuset.cpus to container {self.container.name}."
969             )
970
971     def execute(self, command):
972         """Start a process inside a running container.
973
974         Runs the specified command inside the container specified by name. The
975         container has to be running already.
976
977         :param command: Command to run inside container.
978         :type command: str
979         :raises RuntimeError: If running the command failed.
980         """
981         env = u"--keep-env " + u" ".join(
982             f"--set-var {env!s}" for env in self.container.env) \
983             if self.container.env else u""
984
985         cmd = f"lxc-attach {env} --name {self.container.name} " \
986             f"-- /bin/sh -c '{command}'"
987
988         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
989         if int(ret) != 0:
990             raise RuntimeError(
991                 f"Failed to run command inside container {self.container.name}."
992             )
993
994     def stop(self):
995         """Stop a container.
996
997         :raises RuntimeError: If stopping the container failed.
998         """
999         cmd = f"lxc-stop --name {self.container.name}"
1000
1001         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1002         if int(ret) != 0:
1003             raise RuntimeError(
1004                 f"Failed to stop container {self.container.name}."
1005             )
1006         self._lxc_wait(u"STOPPED|FROZEN")
1007
1008     def destroy(self):
1009         """Destroy a container.
1010
1011         :raises RuntimeError: If destroying container failed.
1012         """
1013         cmd = f"lxc-destroy --force --name {self.container.name}"
1014
1015         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1016         if int(ret) != 0:
1017             raise RuntimeError(
1018                 f"Failed to destroy container {self.container.name}."
1019             )
1020
1021     def info(self):
1022         """Query and shows information about a container.
1023
1024         :raises RuntimeError: If getting info about a container failed.
1025         """
1026         cmd = f"lxc-info --name {self.container.name}"
1027
1028         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1029         if int(ret) != 0:
1030             raise RuntimeError(
1031                 f"Failed to get info about container {self.container.name}."
1032             )
1033
1034     def system_info(self):
1035         """Check the current kernel for LXC support.
1036
1037         :raises RuntimeError: If checking LXC support failed.
1038         """
1039         cmd = u"lxc-checkconfig"
1040
1041         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1042         if int(ret) != 0:
1043             raise RuntimeError(u"Failed to check LXC support.")
1044
1045     def is_container_running(self):
1046         """Check if container is running on node.
1047
1048         :returns: True if container is running.
1049         :rtype: bool
1050         :raises RuntimeError: If getting info about a container failed.
1051         """
1052         cmd = f"lxc-info --no-humanize --state --name {self.container.name}"
1053
1054         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1055         if int(ret) != 0:
1056             raise RuntimeError(
1057                 f"Failed to get info about container {self.container.name}."
1058             )
1059         return u"RUNNING" in stdout
1060
1061     def is_container_present(self):
1062         """Check if container is existing on node.
1063
1064         :returns: True if container is present.
1065         :rtype: bool
1066         :raises RuntimeError: If getting info about a container failed.
1067         """
1068         cmd = f"lxc-info --no-humanize --name {self.container.name}"
1069
1070         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1071         return not ret
1072
1073     def _lxc_wait(self, state):
1074         """Wait for a specific container state.
1075
1076         :param state: Specify the container state(s) to wait for.
1077         :type state: str
1078         :raises RuntimeError: If waiting for state of a container failed.
1079         """
1080         cmd = f"lxc-wait --name {self.container.name} --state '{state}'"
1081
1082         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1083         if int(ret) != 0:
1084             raise RuntimeError(
1085                 f"Failed to wait for state '{state}' "
1086                 f"of container {self.container.name}."
1087             )
1088
1089
1090 class Docker(ContainerEngine):
1091     """Docker implementation."""
1092
1093     # Implicit constructor is inherited.
1094
1095     def acquire(self, force=True):
1096         """Pull an image or a repository from a registry.
1097
1098         :param force: Destroy a container if exists.
1099         :type force: bool
1100         :raises RuntimeError: If pulling a container failed.
1101         """
1102         if self.is_container_present():
1103             if force:
1104                 self.destroy()
1105             else:
1106                 return
1107
1108         if not self.container.image:
1109             img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
1110                 if Topology.get_node_arch(self.container.node) == u"aarch64" \
1111                 else Constants.DOCKER_SUT_IMAGE_UBUNTU
1112             setattr(self.container, u"image", img)
1113
1114         if "/" in self.container.image:
1115             cmd = f"docker pull {self.container.image}"
1116             ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
1117             if int(ret) != 0:
1118                 raise RuntimeError(
1119                     f"Failed to create container {self.container.name}."
1120                 )
1121
1122         if self.container.cpuset_cpus:
1123             self._configure_cgroup(u"docker")
1124
1125     def build(self):
1126         """Build container (compile)."""
1127         raise NotImplementedError
1128
1129     def create(self):
1130         """Create/deploy container.
1131
1132         :raises RuntimeError: If creating a container failed.
1133         """
1134         cpuset_cpus = u"--cpuset-cpus=" + u",".join(
1135             f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
1136             if self.container.cpuset_cpus else u""
1137
1138         cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \
1139             if self.container.cpuset_mems is not None else u""
1140         # Temporary workaround - disabling due to bug in memif
1141         cpuset_mems = u""
1142
1143         env = u" ".join(f"--env {env!s}" for env in self.container.env) \
1144             if self.container.env else u""
1145
1146         command = str(self.container.command) if self.container.command else u""
1147
1148         publish = u" ".join(
1149             f"--publish  {var!s}" for var in self.container.publish
1150         ) if self.container.publish else u""
1151
1152         volume = u" ".join(
1153             f"--volume {mnt!s}" for mnt in self.container.mnt) \
1154             if self.container.mnt else u""
1155
1156         cmd = f"docker run --privileged --detach --interactive --tty --rm " \
1157             f"--cgroup-parent docker.slice {cpuset_cpus} {cpuset_mems} " \
1158             f"{publish} {env} {volume} --name {self.container.name} " \
1159             f"{self.container.image} {command}"
1160
1161         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1162         if int(ret) != 0:
1163             raise RuntimeError(
1164                 f"Failed to create container {self.container.name}"
1165             )
1166
1167         self.info()
1168
1169     def execute(self, command):
1170         """Start a process inside a running container.
1171
1172         Runs the specified command inside the container specified by name. The
1173         container has to be running already.
1174
1175         :param command: Command to run inside container.
1176         :type command: str
1177         :raises RuntimeError: If running the command in a container failed.
1178         """
1179         cmd = f"docker exec --interactive {self.container.name} " \
1180             f"/bin/sh -c '{command}'"
1181
1182         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
1183         if int(ret) != 0:
1184             raise RuntimeError(
1185                 f"Failed to execute command in container {self.container.name}."
1186             )
1187
1188     def stop(self):
1189         """Stop running container.
1190
1191         :raises RuntimeError: If stopping a container failed.
1192         """
1193         cmd = f"docker stop {self.container.name}"
1194
1195         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1196         if int(ret) != 0:
1197             raise RuntimeError(
1198                 f"Failed to stop container {self.container.name}."
1199             )
1200
1201     def destroy(self):
1202         """Remove a container.
1203
1204         :raises RuntimeError: If removing a container failed.
1205         """
1206         cmd = f"docker rm --force {self.container.name}"
1207
1208         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1209         if int(ret) != 0:
1210             raise RuntimeError(
1211                 f"Failed to destroy container {self.container.name}."
1212             )
1213
1214     def info(self):
1215         """Return low-level information on Docker objects.
1216
1217         :raises RuntimeError: If getting info about a container failed.
1218         """
1219         cmd = f"docker inspect {self.container.name}"
1220
1221         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1222         if int(ret) != 0:
1223             raise RuntimeError(
1224                 f"Failed to get info about container {self.container.name}."
1225             )
1226
1227     def system_info(self):
1228         """Display the docker system-wide information.
1229
1230         :raises RuntimeError: If displaying system information failed.
1231         """
1232         cmd = u"docker system info"
1233
1234         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1235         if int(ret) != 0:
1236             raise RuntimeError(u"Failed to get system info.")
1237
1238     def is_container_present(self):
1239         """Check if container is present on node.
1240
1241         :returns: True if container is present.
1242         :rtype: bool
1243         :raises RuntimeError: If getting info about a container failed.
1244         """
1245         cmd = f"docker ps --all --quiet --filter name={self.container.name}"
1246
1247         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1248         if int(ret) != 0:
1249             raise RuntimeError(
1250                 f"Failed to get info about container {self.container.name}."
1251             )
1252         return bool(stdout)
1253
1254     def is_container_running(self):
1255         """Check if container is running on node.
1256
1257         :returns: True if container is running.
1258         :rtype: bool
1259         :raises RuntimeError: If getting info about a container failed.
1260         """
1261         cmd = f"docker ps --quiet --filter name={self.container.name}"
1262
1263         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1264         if int(ret) != 0:
1265             raise RuntimeError(
1266                 f"Failed to get info about container {self.container.name}."
1267             )
1268         return bool(stdout)
1269
1270
1271 class Container:
1272     """Container class."""
1273
1274     def __getattr__(self, attr):
1275         """Get attribute custom implementation.
1276
1277         :param attr: Attribute to get.
1278         :type attr: str
1279         :returns: Attribute value or None.
1280         :rtype: any
1281         """
1282         try:
1283             return self.__dict__[attr]
1284         except KeyError:
1285             return None
1286
1287     def __setattr__(self, attr, value):
1288         """Set attribute custom implementation.
1289
1290         :param attr: Attribute to set.
1291         :param value: Value to set.
1292         :type attr: str
1293         :type value: any
1294         """
1295         try:
1296             # Check if attribute exists
1297             self.__dict__[attr]
1298         except KeyError:
1299             # Creating new attribute
1300             if attr == u"node":
1301                 # Create and cache a connected SSH instance.
1302                 self.__dict__[u"ssh"] = SSH()
1303                 self.__dict__[u"ssh"].connect(value)
1304             elif attr == u"name":
1305                 # Socket paths to not have mutable state,
1306                 # this just saves some horizontal space in callers.
1307                 # TODO: Rename the dir so other apps can add sockets easily.
1308                 # E.g. f"/tmp/app_sockets/{value}/vpp_api.sock"
1309                 path = f"/tmp/vpp_sockets/{value}"
1310                 self.__dict__[u"socket_dir"] = path
1311                 self.__dict__[u"api_socket"] = f"{path}/api.sock"
1312                 self.__dict__[u"cli_socket"] = f"{path}/cli.sock"
1313                 self.__dict__[u"stats_socket"] = f"{path}/stats.sock"
1314             self.__dict__[attr] = value
1315         else:
1316             # Updating attribute base of type
1317             if isinstance(self.__dict__[attr], list):
1318                 self.__dict__[attr].append(value)
1319             else:
1320                 self.__dict__[attr] = value