feat(telemetry): Rework
[csit.git] / resources / libraries / python / ContainerUtils.py
1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Library to manipulate Containers."""
15
16 from collections import OrderedDict, Counter
17 from io import open
18 from re import search
19 from string import Template
20 from time import sleep
21
22 from robot.libraries.BuiltIn import BuiltIn
23
24 from resources.libraries.python.Constants import Constants
25 from resources.libraries.python.CpuUtils import CpuUtils
26 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
27 from resources.libraries.python.ssh import SSH
28 from resources.libraries.python.topology import Topology, SocketType
29 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
30 from resources.libraries.python.VPPUtil import VPPUtil
31
32
33 __all__ = [
34     u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
35 ]
36
37 SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
38
39
40 class ContainerManager:
41     """Container lifecycle management class."""
42
43     def __init__(self, engine):
44         """Initialize Container Manager class.
45
46         :param engine: Container technology used (LXC/Docker/...).
47         :type engine: str
48         :raises NotImplementedError: If container technology is not implemented.
49         """
50         try:
51             self.engine = globals()[engine]()
52         except KeyError:
53             raise NotImplementedError(f"{engine} is not implemented.")
54         self.containers = OrderedDict()
55
56     def get_container_by_name(self, name):
57         """Get container instance.
58
59         :param name: Container name.
60         :type name: str
61         :returns: Container instance.
62         :rtype: Container
63         :raises RuntimeError: If failed to get container with name.
64         """
65         try:
66             return self.containers[name]
67         except KeyError:
68             raise RuntimeError(f"Failed to get container with name: {name}")
69
70     def construct_container(self, **kwargs):
71         """Construct container object on node with specified parameters.
72
73         :param kwargs: Key-value pairs used to construct container.
74         :param kwargs: dict
75         """
76         # Create base class
77         self.engine.initialize()
78         # Set parameters
79         for key in kwargs:
80             setattr(self.engine.container, key, kwargs[key])
81
82         # Set additional environmental variables
83         setattr(
84             self.engine.container, u"env",
85             f"MICROSERVICE_LABEL={kwargs[u'name']}"
86         )
87
88         # Store container instance
89         self.containers[kwargs[u"name"]] = self.engine.container
90
91     def construct_containers(self, **kwargs):
92         """Construct 1..N container(s) on node with specified name.
93
94         Ordinal number is automatically added to the name of container as
95         suffix.
96
97         :param kwargs: Named parameters.
98         :param kwargs: dict
99         """
100         name = kwargs[u"name"]
101         for i in range(kwargs[u"count"]):
102             # Name will contain ordinal suffix
103             kwargs[u"name"] = u"".join([name, str(i+1)])
104             # Create container
105             self.construct_container(i=i, **kwargs)
106
107     def acquire_all_containers(self):
108         """Acquire all containers."""
109         for container in self.containers:
110             self.engine.container = self.containers[container]
111             self.engine.acquire()
112
113     def build_all_containers(self):
114         """Build all containers."""
115         for container in self.containers:
116             self.engine.container = self.containers[container]
117             self.engine.build()
118
119     def create_all_containers(self):
120         """Create all containers."""
121         for container in self.containers:
122             self.engine.container = self.containers[container]
123             self.engine.create()
124
125     def execute_on_container(self, name, command):
126         """Execute command on container with name.
127
128         :param name: Container name.
129         :param command: Command to execute.
130         :type name: str
131         :type command: str
132         """
133         self.engine.container = self.get_container_by_name(name)
134         self.engine.execute(command)
135
136     def execute_on_all_containers(self, command):
137         """Execute command on all containers.
138
139         :param command: Command to execute.
140         :type command: str
141         """
142         for container in self.containers:
143             self.engine.container = self.containers[container]
144             self.engine.execute(command)
145
146     def start_vpp_in_all_containers(self, verify=True):
147         """Start VPP in all containers."""
148         for container in self.containers:
149             self.engine.container = self.containers[container]
150             # For multiple containers, delayed verify is faster.
151             self.engine.start_vpp(verify=False)
152         if verify:
153             self.verify_vpp_in_all_containers()
154
155     def _disconnect_papi_to_all_containers(self):
156         """Disconnect any open PAPI connections to VPPs in containers.
157
158         The current PAPI implementation caches open connections,
159         so explicit disconnect is needed before VPP becomes inaccessible.
160
161         Currently this is a protected method, as restart, stop and destroy
162         are the only dangerous methods, and all are handled by ContainerManager.
163         """
164         for container_object in self.containers.values():
165             PapiSocketExecutor.disconnect_by_node_and_socket(
166                 container_object.node,
167                 container_object.api_socket,
168             )
169
170     def restart_vpp_in_all_containers(self, verify=True):
171         """Restart VPP in all containers."""
172         self._disconnect_papi_to_all_containers()
173         for container in self.containers:
174             self.engine.container = self.containers[container]
175             # For multiple containers, delayed verify is faster.
176             self.engine.restart_vpp(verify=False)
177         if verify:
178             self.verify_vpp_in_all_containers()
179
180     def verify_vpp_in_all_containers(self):
181         """Verify that VPP is installed and running in all containers."""
182         # For multiple containers, multiple fors are faster.
183         for container in self.containers:
184             self.engine.container = self.containers[container]
185             self.engine.verify_vppctl()
186         for container in self.containers:
187             self.engine.container = self.containers[container]
188             self.engine.adjust_privileges()
189         for container in self.containers:
190             self.engine.container = self.containers[container]
191             self.engine.verify_vpp_papi()
192
193     def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
194         """Configure VPP in all containers.
195
196         :param chain_topology: Topology used for chaining containers can be
197             chain or cross_horiz. Chain topology is using 1 memif pair per
198             container. Cross_horiz topology is using 1 memif and 1 physical
199             interface in container (only single container can be configured).
200         :param kwargs: Named parameters.
201         :type chain_topology: str
202         :type kwargs: dict
203         """
204         # Count number of DUTs based on node's host information
205         dut_cnt = len(
206             Counter(
207                 [
208                     self.containers[container].node[u"host"]
209                     for container in self.containers
210                 ]
211             )
212         )
213         mod = len(self.containers) // dut_cnt
214
215         for i, container in enumerate(self.containers):
216             mid1 = i % mod + 1
217             mid2 = i % mod + 1
218             sid1 = i % mod * 2 + 1
219             sid2 = i % mod * 2 + 2
220             self.engine.container = self.containers[container]
221             guest_dir = self.engine.container.mnt[0].split(u":")[1]
222
223             if chain_topology == u"chain":
224                 self._configure_vpp_chain_l2xc(
225                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
226                     guest_dir=guest_dir, **kwargs
227                 )
228             elif chain_topology == u"cross_horiz":
229                 self._configure_vpp_cross_horiz(
230                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
231                     guest_dir=guest_dir, **kwargs
232                 )
233             elif chain_topology == u"chain_functional":
234                 self._configure_vpp_chain_functional(
235                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
236                     guest_dir=guest_dir, **kwargs
237                 )
238             elif chain_topology == u"chain_ip4":
239                 self._configure_vpp_chain_ip4(
240                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
241                     guest_dir=guest_dir, **kwargs
242                 )
243             elif chain_topology == u"pipeline_ip4":
244                 self._configure_vpp_pipeline_ip4(
245                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
246                     guest_dir=guest_dir, **kwargs
247                 )
248             elif chain_topology == u"chain_vswitch":
249                 self._configure_vpp_chain_vswitch(
250                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
251                     guest_dir=guest_dir, **kwargs)
252             elif chain_topology == u"chain_ipsec":
253                 idx_match = search(r"\d+$", self.engine.container.name)
254                 if idx_match:
255                     idx = int(idx_match.group())
256                 self._configure_vpp_chain_ipsec(
257                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
258                     guest_dir=guest_dir, nf_instance=idx, **kwargs)
259             else:
260                 raise RuntimeError(
261                     f"Container topology {chain_topology} not implemented"
262                 )
263
264     def _configure_vpp_chain_l2xc(self, **kwargs):
265         """Configure VPP in chain topology with l2xc.
266
267         :param kwargs: Named parameters.
268         :type kwargs: dict
269         """
270         self.engine.create_vpp_startup_config()
271         self.engine.create_vpp_exec_config(
272             u"memif_create_chain_l2xc.exec",
273             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
274             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
275             socket1=f"{kwargs[u'guest_dir']}/memif-"
276             f"{self.engine.container.name}-{kwargs[u'sid1']}",
277             socket2=f"{kwargs[u'guest_dir']}/memif-"
278             f"{self.engine.container.name}-{kwargs[u'sid2']}"
279         )
280
281     def _configure_vpp_cross_horiz(self, **kwargs):
282         """Configure VPP in cross horizontal topology (single memif).
283
284         :param kwargs: Named parameters.
285         :type kwargs: dict
286         """
287         if u"DUT1" in self.engine.container.name:
288             if_pci = Topology.get_interface_pci_addr(
289                 self.engine.container.node, kwargs[u"dut1_if"])
290             if_name = Topology.get_interface_name(
291                 self.engine.container.node, kwargs[u"dut1_if"])
292         if u"DUT2" in self.engine.container.name:
293             if_pci = Topology.get_interface_pci_addr(
294                 self.engine.container.node, kwargs[u"dut2_if"])
295             if_name = Topology.get_interface_name(
296                 self.engine.container.node, kwargs[u"dut2_if"])
297         self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
298         self.engine.create_vpp_exec_config(
299             u"memif_create_cross_horizon.exec",
300             mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
301             socket1=f"{kwargs[u'guest_dir']}/memif-"
302             f"{self.engine.container.name}-{kwargs[u'sid1']}"
303         )
304
305     def _configure_vpp_chain_functional(self, **kwargs):
306         """Configure VPP in chain topology with l2xc (functional).
307
308         :param kwargs: Named parameters.
309         :type kwargs: dict
310         """
311         self.engine.create_vpp_startup_config()
312         self.engine.create_vpp_exec_config(
313             u"memif_create_chain_functional.exec",
314             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
315             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
316             socket1=f"{kwargs[u'guest_dir']}/memif-"
317             f"{self.engine.container.name}-{kwargs[u'sid1']}",
318             socket2=f"{kwargs[u'guest_dir']}/memif-"
319             f"{self.engine.container.name}-{kwargs[u'sid2']}",
320             rx_mode=u"interrupt"
321         )
322
323     def _configure_vpp_chain_ip4(self, **kwargs):
324         """Configure VPP in chain topology with ip4.
325
326         :param kwargs: Named parameters.
327         :type kwargs: dict
328         """
329         self.engine.create_vpp_startup_config()
330
331         vif1_mac = kwargs[u"tg_pf1_mac"] \
332             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
333             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
334         vif2_mac = kwargs[u"tg_pf2_mac"] \
335             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
336             else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
337         self.engine.create_vpp_exec_config(
338             u"memif_create_chain_ip4.exec",
339             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
340             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
341             socket1=f"{kwargs[u'guest_dir']}/memif-"
342             f"{self.engine.container.name}-{kwargs[u'sid1']}",
343             socket2=f"{kwargs[u'guest_dir']}/memif-"
344             f"{self.engine.container.name}-{kwargs[u'sid2']}",
345             mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
346             mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
347             vif1_mac=vif1_mac, vif2_mac=vif2_mac
348         )
349
350     def _configure_vpp_chain_vswitch(self, **kwargs):
351         """Configure VPP as vswitch in container.
352
353         :param kwargs: Named parameters.
354         :type kwargs: dict
355         """
356         dut = self.engine.container.name.split(u"_")[0]
357         if dut == u"DUT1":
358             if1_pci = Topology.get_interface_pci_addr(
359                 self.engine.container.node, kwargs[u"dut1_if2"])
360             if2_pci = Topology.get_interface_pci_addr(
361                 self.engine.container.node, kwargs[u"dut1_if1"])
362             if_red_name = Topology.get_interface_name(
363                 self.engine.container.node, kwargs[u"dut1_if2"])
364             if_black_name = Topology.get_interface_name(
365                 self.engine.container.node, kwargs[u"dut1_if1"])
366             tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
367             tg_pf_mac = kwargs[u"tg_pf2_mac"]
368         else:
369             tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
370             tg_pf_mac = kwargs[u"tg_pf1_mac"]
371             if1_pci = Topology.get_interface_pci_addr(
372                 self.engine.container.node, kwargs[u"dut2_if1"])
373             if2_pci = Topology.get_interface_pci_addr(
374                 self.engine.container.node, kwargs[u"dut2_if2"])
375             if_red_name = Topology.get_interface_name(
376                 self.engine.container.node, kwargs[u"dut2_if1"])
377             if_black_name = Topology.get_interface_name(
378                 self.engine.container.node, kwargs[u"dut2_if2"])
379
380         n_instances = int(kwargs[u"n_instances"])
381         rxq = 1
382         if u"rxq" in kwargs:
383             rxq = int(kwargs[u"rxq"])
384         nodes = kwargs[u"nodes"]
385         cpuset_cpus = CpuUtils.get_affinity_nf(
386             nodes, dut, nf_chains=1, nf_nodes=1, nf_chain=1,
387             nf_node=1, vs_dtc=0, nf_dtc=8, nf_mtcr=1, nf_dtcr=1
388         )
389         self.engine.create_vpp_startup_config_vswitch(
390             cpuset_cpus, rxq, if1_pci, if2_pci
391         )
392
393         instances = []
394         for i in range(1, n_instances + 1):
395             instances.append(
396                 f"create interface memif id {i} socket-id 1 master\n"
397                 f"set interface state memif1/{i} up\n"
398                 f"set interface l2 bridge memif1/{i} 1\n"
399                 f"create interface memif id {i} socket-id 2 master\n"
400                 f"set interface state memif2/{i} up\n"
401                 f"set interface l2 bridge memif2/{i} 2\n"
402                 f"set ip neighbor memif2/{i} {tg_pf_ip4} {tg_pf_mac} "
403                 f"static\n\n"
404             )
405
406         self.engine.create_vpp_exec_config(
407             u"memif_create_chain_vswitch_ipsec.exec",
408             socket1=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-1",
409             socket2=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-2",
410             if_red_name=if_red_name,
411             if_black_name=if_black_name,
412             instances=u"\n\n".join(instances))
413
414
415     def _configure_vpp_chain_ipsec(self, **kwargs):
416         """Configure VPP in container with memifs.
417
418         :param kwargs: Named parameters.
419         :type kwargs: dict
420         """
421         nf_nodes = int(kwargs[u"nf_nodes"])
422         nf_instance = int(kwargs[u"nf_instance"])
423         nodes = kwargs[u"nodes"]
424         dut = self.engine.container.name.split(u"_")[0]
425         cpuset_cpus = CpuUtils.get_affinity_nf(
426             nodes, dut, nf_chains=1, nf_nodes=nf_nodes, nf_chain=1,
427             nf_node=nf_instance, vs_dtc=10, nf_dtc=1, nf_mtcr=1, nf_dtcr=1)
428         self.engine.create_vpp_startup_config_ipsec(cpuset_cpus)
429         local_ip_base = kwargs[u"dut2_if1_ip4"].rsplit(u".", 1)[0]
430
431         if dut == u"DUT1":
432             tnl_local_ip = f"{local_ip_base}.{nf_instance + 100}"
433             tnl_remote_ip = f"{local_ip_base}.{nf_instance}"
434             remote_ip_base = kwargs[u"dut1_if1_ip4"].rsplit(u".", 1)[0]
435             tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
436             tg_pf_mac = kwargs[u"tg_pf1_mac"]
437             raddr_ip4 = kwargs[u"laddr_ip4"]
438             l_mac1 = 17
439             l_mac2 = 18
440             r_mac = 1
441         else:
442             tnl_local_ip = f"{local_ip_base}.{nf_instance}"
443             tnl_remote_ip = f"{local_ip_base}.{nf_instance + 100}"
444             remote_ip_base = kwargs[u"dut2_if2_ip4"].rsplit(u".", 1)[0]
445             tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
446             tg_pf_mac = kwargs[u"tg_pf2_mac"]
447             raddr_ip4 = kwargs[u"raddr_ip4"]
448             l_mac1 = 1
449             l_mac2 = 2
450             r_mac = 17
451
452         self.engine.create_vpp_exec_config(
453             u"memif_create_chain_ipsec.exec",
454             socket1=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-1",
455             socket2=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-2",
456             mid1=nf_instance,
457             mid2=nf_instance,
458             sid1=u"1",
459             sid2=u"2",
460             mac1=f"02:02:00:00:{l_mac1:02X}:{(nf_instance - 1):02X}",
461             mac2=f"02:02:00:00:{l_mac2:02X}:{(nf_instance - 1):02X}",
462             tg_pf2_ip4=tg_pf_ip4,
463             tg_pf2_mac=tg_pf_mac,
464             raddr_ip4=raddr_ip4,
465             tnl_local_ip=tnl_local_ip,
466             tnl_remote_ip=tnl_remote_ip,
467             tnl_remote_mac=f"02:02:00:00:{r_mac:02X}:{(nf_instance - 1):02X}",
468             remote_ip=f"{remote_ip_base}.{nf_instance}"
469         )
470         self.engine.execute(
471             f"cat {kwargs['guest_dir']}/ipsec_create_tunnel_cnf_"
472             f"{dut}_{nf_instance}.config >> /tmp/running.exec"
473         )
474
475     def _configure_vpp_pipeline_ip4(self, **kwargs):
476         """Configure VPP in pipeline topology with ip4.
477
478         :param kwargs: Named parameters.
479         :type kwargs: dict
480         """
481         self.engine.create_vpp_startup_config()
482         node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
483         mid1 = kwargs[u"mid1"]
484         mid2 = kwargs[u"mid2"]
485         role1 = u"master"
486         role2 = u"master" if node == kwargs[u"nodes"] else u"slave"
487         kwargs[u"mid2"] = kwargs[u"mid2"] \
488             if node == kwargs[u"nodes"] else kwargs[u"mid2"] + 1
489         vif1_mac = kwargs[u"tg_pf1_mac"] \
490             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
491             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
492         vif2_mac = kwargs[u"tg_pf2_mac"] \
493             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
494             else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
495         socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
496             f"{kwargs[u'sid1']}" if node == 1 \
497             else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid1']}"
498         socket2 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
499             f"{kwargs[u'sid2']}" \
500             if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \
501             else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid2']}"
502
503         self.engine.create_vpp_exec_config(
504             u"memif_create_pipeline_ip4.exec",
505             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
506             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
507             socket1=socket1, socket2=socket2, role1=role1, role2=role2,
508             mac1=f"52:54:00:00:{mid1:02X}:01",
509             mac2=f"52:54:00:00:{mid2:02X}:02",
510             vif1_mac=vif1_mac, vif2_mac=vif2_mac
511         )
512
513     def stop_all_containers(self):
514         """Stop all containers."""
515         # TODO: Rework if containers can be affected outside ContainerManager.
516         self._disconnect_papi_to_all_containers()
517         for container in self.containers:
518             self.engine.container = self.containers[container]
519             self.engine.stop()
520
521     def destroy_all_containers(self):
522         """Destroy all containers."""
523         # TODO: Rework if containers can be affected outside ContainerManager.
524         self._disconnect_papi_to_all_containers()
525         for container in self.containers:
526             self.engine.container = self.containers[container]
527             self.engine.destroy()
528
529
530 class ContainerEngine:
531     """Abstract class for container engine."""
532
533     def __init__(self):
534         """Init ContainerEngine object."""
535         self.container = None
536
537     def initialize(self):
538         """Initialize container object."""
539         self.container = Container()
540
541     def acquire(self, force):
542         """Acquire/download container.
543
544         :param force: Destroy a container if exists and create.
545         :type force: bool
546         """
547         raise NotImplementedError
548
549     def build(self):
550         """Build container (compile)."""
551         raise NotImplementedError
552
553     def create(self):
554         """Create/deploy container."""
555         raise NotImplementedError
556
557     def execute(self, command):
558         """Execute process inside container.
559
560         :param command: Command to run inside container.
561         :type command: str
562         """
563         raise NotImplementedError
564
565     def stop(self):
566         """Stop container."""
567         raise NotImplementedError
568
569     def destroy(self):
570         """Destroy/remove container."""
571         raise NotImplementedError
572
573     def info(self):
574         """Info about container."""
575         raise NotImplementedError
576
577     def system_info(self):
578         """System info."""
579         raise NotImplementedError
580
581     def start_vpp(self, verify=True):
582         """Start VPP inside a container."""
583         self.execute(
584             u"setsid /usr/bin/vpp -c /etc/vpp/startup.conf "
585             u">/tmp/vppd.log 2>&1 < /dev/null &")
586
587         topo_instance = BuiltIn().get_library_instance(
588             u"resources.libraries.python.topology.Topology"
589         )
590         topo_instance.add_new_socket(
591             self.container.node,
592             SocketType.CLI,
593             self.container.name,
594             self.container.cli_socket,
595         )
596         topo_instance.add_new_socket(
597             self.container.node,
598             SocketType.PAPI,
599             self.container.name,
600             self.container.api_socket,
601         )
602         topo_instance.add_new_socket(
603             self.container.node,
604             SocketType.STATS,
605             self.container.name,
606             self.container.stats_socket,
607         )
608         if verify:
609             self.verify_vpp()
610
611     def restart_vpp(self, verify=True):
612         """Restart VPP service inside a container."""
613         self.execute(u"pkill vpp")
614         self.start_vpp(verify=verify)
615
616     def verify_vpp(self):
617         """Verify VPP is running and ready."""
618         self.verify_vppctl()
619         self.adjust_privileges()
620         self.verify_vpp_papi()
621
622     # TODO Rewrite to use the VPPUtil.py functionality and remove this.
623     def verify_vppctl(self, retries=120, retry_wait=1):
624         """Verify that VPP is installed and running inside container.
625
626         This function waits a while so VPP can start.
627         PCI interfaces are listed for debug purposes.
628         When the check passes, VPP API socket is created on remote side,
629         but perhaps its directory does not have the correct access rights yet.
630
631         :param retries: Check for VPP for this number of times Default: 120
632         :param retry_wait: Wait for this number of seconds between retries.
633         """
634         for _ in range(retries + 1):
635             try:
636                 # Execute puts the command into single quotes,
637                 # so inner arguments are enclosed in qouble quotes here.
638                 self.execute(
639                     u'vppctl show pci 2>&1 | '
640                     u'fgrep -v "Connection refused" | '
641                     u'fgrep -v "No such file or directory"'
642                 )
643                 break
644             except (RuntimeError, AssertionError):
645                 sleep(retry_wait)
646         else:
647             self.execute(u"cat /tmp/vppd.log")
648             raise RuntimeError(
649                 f"VPP did not come up in container: {self.container.name}"
650             )
651
652     def adjust_privileges(self):
653         """Adjust privileges to control VPP without sudo."""
654         self.execute("chmod -R o+rwx /run/vpp")
655
656     def verify_vpp_papi(self, retries=120, retry_wait=1):
657         """Verify that VPP is available for PAPI.
658
659         This also opens and caches PAPI connection for quick reuse.
660         The connection is disconnected when ContainerManager decides to do so.
661
662         :param retries: Check for VPP for this number of times Default: 120
663         :param retry_wait: Wait for this number of seconds between retries.
664         """
665         # Wait for success.
666         for _ in range(retries + 1):
667             try:
668                 VPPUtil.vpp_show_version(
669                     node=self.container.node,
670                     remote_vpp_socket=self.container.api_socket,
671                     log=False,
672                 )
673                 break
674             except (RuntimeError, AssertionError):
675                 sleep(retry_wait)
676         else:
677             self.execute(u"cat /tmp/vppd.log")
678             raise RuntimeError(
679                 f"VPP PAPI fails in container: {self.container.name}"
680             )
681
682     def create_base_vpp_startup_config(self, cpuset_cpus=None):
683         """Create base startup configuration of VPP on container.
684
685         :param cpuset_cpus: List of CPU cores to allocate.
686         :type cpuset_cpus: list.
687         :returns: Base VPP startup configuration.
688         :rtype: VppConfigGenerator
689         """
690         if cpuset_cpus is None:
691             cpuset_cpus = self.container.cpuset_cpus
692
693         # Create config instance
694         vpp_config = VppConfigGenerator()
695         vpp_config.set_node(self.container.node)
696         vpp_config.add_unix_cli_listen()
697         vpp_config.add_unix_nodaemon()
698         vpp_config.add_unix_exec(u"/tmp/running.exec")
699         vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
700         if cpuset_cpus:
701             # We will pop the first core from the list to be a main core
702             vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
703             # If more cores in the list, the rest will be used as workers.
704             corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
705             vpp_config.add_cpu_corelist_workers(corelist_workers)
706         vpp_config.add_buffers_per_numa(215040)
707         vpp_config.add_plugin(u"disable", u"default")
708         vpp_config.add_plugin(u"enable", u"memif_plugin.so")
709         vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
710         vpp_config.add_main_heap_size(u"2G")
711         vpp_config.add_main_heap_page_size(self.container.page_size)
712         vpp_config.add_default_hugepage_size(self.container.page_size)
713         vpp_config.add_statseg_size(u"2G")
714         vpp_config.add_statseg_page_size(self.container.page_size)
715         vpp_config.add_statseg_per_node_counters(u"on")
716
717         return vpp_config
718
719     def create_vpp_startup_config(self):
720         """Create startup configuration of VPP without DPDK on container.
721         """
722         vpp_config = self.create_base_vpp_startup_config()
723
724         # Apply configuration
725         self.execute(u"mkdir -p /etc/vpp/")
726         self.execute(
727             f'echo "{vpp_config.get_config_str()}" | '
728             f'tee /etc/vpp/startup.conf'
729         )
730
731     def create_vpp_startup_config_vswitch(self, cpuset_cpus, rxq, *devices):
732         """Create startup configuration of VPP vswitch.
733
734         :param cpuset_cpus: CPU list to run on.
735         :param rxq: Number of interface RX queues.
736         :param devices: PCI devices.
737         :type cpuset_cpus: list
738         :type rxq: int
739         :type devices: list
740         """
741         vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
742         vpp_config.add_dpdk_dev(*devices)
743         vpp_config.add_dpdk_log_level(u"debug")
744         vpp_config.add_dpdk_no_tx_checksum_offload()
745         vpp_config.add_dpdk_dev_default_rxq(rxq)
746         vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
747         vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
748
749         # Apply configuration
750         self.execute(u"mkdir -p /etc/vpp/")
751         self.execute(
752             f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
753         )
754
755     def create_vpp_startup_config_ipsec(self, cpuset_cpus):
756         """Create startup configuration of VPP with IPsec on container.
757
758         :param cpuset_cpus: CPU list to run on.
759         :type cpuset_cpus: list
760         """
761         vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
762         vpp_config.add_plugin(u"enable", u"crypto_native_plugin.so")
763         vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
764         vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
765         vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
766
767         # Apply configuration
768         self.execute(u"mkdir -p /etc/vpp/")
769         self.execute(
770             f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
771         )
772
773     def create_vpp_exec_config(self, template_file, **kwargs):
774         """Create VPP exec configuration on container.
775
776         :param template_file: File name of a template script.
777         :param kwargs: Parameters for script.
778         :type template_file: str
779         :type kwargs: dict
780         """
781         running = u"/tmp/running.exec"
782         template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
783
784         with open(template, u"rt") as src_file:
785             src = Template(src_file.read())
786             self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}')
787
788     def is_container_running(self):
789         """Check if container is running."""
790         raise NotImplementedError
791
792     def is_container_present(self):
793         """Check if container is present."""
794         raise NotImplementedError
795
796     def _configure_cgroup(self, name):
797         """Configure the control group associated with a container.
798
799         By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
800         container is initialized a new cgroup /docker or /lxc is created under
801         cpuset parent tree. This newly created cgroup is inheriting parent
802         setting for cpu/mem exclusive parameter and thus cannot be overriden
803         within /docker or /lxc cgroup. This function is supposed to set cgroups
804         to allow coexistence of both engines.
805
806         :param name: Name of cgroup.
807         :type name: str
808         :raises RuntimeError: If applying cgroup settings via cgset failed.
809         """
810         ret, _, _ = self.container.ssh.exec_command_sudo(
811             u"cgset -r cpuset.cpu_exclusive=0 /"
812         )
813         if int(ret) != 0:
814             raise RuntimeError(u"Failed to apply cgroup settings.")
815
816         ret, _, _ = self.container.ssh.exec_command_sudo(
817             u"cgset -r cpuset.mem_exclusive=0 /"
818         )
819         if int(ret) != 0:
820             raise RuntimeError(u"Failed to apply cgroup settings.")
821
822         ret, _, _ = self.container.ssh.exec_command_sudo(
823             f"cgcreate -g cpuset:/{name}"
824         )
825         if int(ret) != 0:
826             raise RuntimeError(u"Failed to copy cgroup settings from root.")
827
828         ret, _, _ = self.container.ssh.exec_command_sudo(
829             f"cgset -r cpuset.cpu_exclusive=0 /{name}"
830         )
831         if int(ret) != 0:
832             raise RuntimeError(u"Failed to apply cgroup settings.")
833
834         ret, _, _ = self.container.ssh.exec_command_sudo(
835             f"cgset -r cpuset.mem_exclusive=0 /{name}"
836         )
837         if int(ret) != 0:
838             raise RuntimeError(u"Failed to apply cgroup settings.")
839
840
841 class LXC(ContainerEngine):
842     """LXC implementation."""
843
844     # Implicit constructor is inherited.
845
846     def acquire(self, force=True):
847         """Acquire a privileged system object where configuration is stored.
848
849         :param force: If a container exists, destroy it and create a new
850             container.
851         :type force: bool
852         :raises RuntimeError: If creating the container or writing the container
853             config fails.
854         """
855         if self.is_container_present():
856             if force:
857                 self.destroy()
858             else:
859                 return
860
861         target_arch = u"arm64" \
862             if Topology.get_node_arch(self.container.node) == u"aarch64" \
863             else u"amd64"
864
865         image = self.container.image if self.container.image \
866             else f"-d ubuntu -r focal -a {target_arch}"
867
868         cmd = f"lxc-create -t download --name {self.container.name} " \
869             f"-- {image} --no-validate"
870
871         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
872         if int(ret) != 0:
873             raise RuntimeError(u"Failed to create container.")
874
875         self._configure_cgroup(u"lxc")
876
877     def build(self):
878         """Build container (compile)."""
879         raise NotImplementedError
880
881     def create(self):
882         """Create/deploy an application inside a container on system.
883
884         :raises RuntimeError: If creating the container fails.
885         """
886         if self.container.mnt:
887             # LXC fix for tmpfs
888             # https://github.com/lxc/lxc/issues/434
889             mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults"
890             ret, _, _ = self.container.ssh.exec_command_sudo(
891                 f"sh -c \"echo '{mnt_e}' >> "
892                 f"/var/lib/lxc/{self.container.name}/config\""
893             )
894             if int(ret) != 0:
895                 raise RuntimeError(
896                     f"Failed to write {self.container.name} config."
897                 )
898
899             for mount in self.container.mnt:
900                 host_dir, guest_dir = mount.split(u":")
901                 options = u"bind,create=dir" if guest_dir.endswith(u"/") \
902                     else u"bind,create=file"
903                 entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \
904                     f"none {options} 0 0"
905                 self.container.ssh.exec_command_sudo(
906                     f"sh -c \"mkdir -p {host_dir}\""
907                 )
908                 ret, _, _ = self.container.ssh.exec_command_sudo(
909                     f"sh -c \"echo '{entry}' "
910                     f">> /var/lib/lxc/{self.container.name}/config\""
911                 )
912                 if int(ret) != 0:
913                     raise RuntimeError(
914                         f"Failed to write {self.container.name} config."
915                     )
916
917         cpuset_cpus = u",".join(
918             f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
919             if self.container.cpuset_cpus else u""
920
921         ret, _, _ = self.container.ssh.exec_command_sudo(
922             f"lxc-start --name {self.container.name} --daemon"
923         )
924         if int(ret) != 0:
925             raise RuntimeError(
926                 f"Failed to start container {self.container.name}."
927             )
928         self._lxc_wait(u"RUNNING")
929
930         # Workaround for LXC to be able to allocate all cpus including isolated.
931         ret, _, _ = self.container.ssh.exec_command_sudo(
932             u"cgset --copy-from / lxc/"
933         )
934         if int(ret) != 0:
935             raise RuntimeError(u"Failed to copy cgroup to LXC")
936
937         ret, _, _ = self.container.ssh.exec_command_sudo(
938             f"lxc-cgroup --name {self.container.name} cpuset.cpus {cpuset_cpus}"
939         )
940         if int(ret) != 0:
941             raise RuntimeError(
942                 f"Failed to set cpuset.cpus to container {self.container.name}."
943             )
944
945     def execute(self, command):
946         """Start a process inside a running container.
947
948         Runs the specified command inside the container specified by name. The
949         container has to be running already.
950
951         :param command: Command to run inside container.
952         :type command: str
953         :raises RuntimeError: If running the command failed.
954         """
955         env = u"--keep-env " + u" ".join(
956             f"--set-var {env!s}" for env in self.container.env) \
957             if self.container.env else u""
958
959         cmd = f"lxc-attach {env} --name {self.container.name} " \
960             f"-- /bin/sh -c '{command}'"
961
962         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
963         if int(ret) != 0:
964             raise RuntimeError(
965                 f"Failed to run command inside container {self.container.name}."
966             )
967
968     def stop(self):
969         """Stop a container.
970
971         :raises RuntimeError: If stopping the container failed.
972         """
973         cmd = f"lxc-stop --name {self.container.name}"
974
975         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
976         if int(ret) != 0:
977             raise RuntimeError(
978                 f"Failed to stop container {self.container.name}."
979             )
980         self._lxc_wait(u"STOPPED|FROZEN")
981
982     def destroy(self):
983         """Destroy a container.
984
985         :raises RuntimeError: If destroying container failed.
986         """
987         cmd = f"lxc-destroy --force --name {self.container.name}"
988
989         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
990         if int(ret) != 0:
991             raise RuntimeError(
992                 f"Failed to destroy container {self.container.name}."
993             )
994
995     def info(self):
996         """Query and shows information about a container.
997
998         :raises RuntimeError: If getting info about a container failed.
999         """
1000         cmd = f"lxc-info --name {self.container.name}"
1001
1002         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1003         if int(ret) != 0:
1004             raise RuntimeError(
1005                 f"Failed to get info about container {self.container.name}."
1006             )
1007
1008     def system_info(self):
1009         """Check the current kernel for LXC support.
1010
1011         :raises RuntimeError: If checking LXC support failed.
1012         """
1013         cmd = u"lxc-checkconfig"
1014
1015         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1016         if int(ret) != 0:
1017             raise RuntimeError(u"Failed to check LXC support.")
1018
1019     def is_container_running(self):
1020         """Check if container is running on node.
1021
1022         :returns: True if container is running.
1023         :rtype: bool
1024         :raises RuntimeError: If getting info about a container failed.
1025         """
1026         cmd = f"lxc-info --no-humanize --state --name {self.container.name}"
1027
1028         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1029         if int(ret) != 0:
1030             raise RuntimeError(
1031                 f"Failed to get info about container {self.container.name}."
1032             )
1033         return u"RUNNING" in stdout
1034
1035     def is_container_present(self):
1036         """Check if container is existing on node.
1037
1038         :returns: True if container is present.
1039         :rtype: bool
1040         :raises RuntimeError: If getting info about a container failed.
1041         """
1042         cmd = f"lxc-info --no-humanize --name {self.container.name}"
1043
1044         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1045         return not ret
1046
1047     def _lxc_wait(self, state):
1048         """Wait for a specific container state.
1049
1050         :param state: Specify the container state(s) to wait for.
1051         :type state: str
1052         :raises RuntimeError: If waiting for state of a container failed.
1053         """
1054         cmd = f"lxc-wait --name {self.container.name} --state '{state}'"
1055
1056         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1057         if int(ret) != 0:
1058             raise RuntimeError(
1059                 f"Failed to wait for state '{state}' "
1060                 f"of container {self.container.name}."
1061             )
1062
1063
1064 class Docker(ContainerEngine):
1065     """Docker implementation."""
1066
1067     # Implicit constructor is inherited.
1068
1069     def acquire(self, force=True):
1070         """Pull an image or a repository from a registry.
1071
1072         :param force: Destroy a container if exists.
1073         :type force: bool
1074         :raises RuntimeError: If pulling a container failed.
1075         """
1076         if self.is_container_present():
1077             if force:
1078                 self.destroy()
1079             else:
1080                 return
1081
1082         if not self.container.image:
1083             img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
1084                 if Topology.get_node_arch(self.container.node) == u"aarch64" \
1085                 else Constants.DOCKER_SUT_IMAGE_UBUNTU
1086             setattr(self.container, u"image", img)
1087
1088         if "/" in self.container.image:
1089             cmd = f"docker pull {self.container.image}"
1090             ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
1091             if int(ret) != 0:
1092                 raise RuntimeError(
1093                     f"Failed to create container {self.container.name}."
1094                 )
1095
1096         if self.container.cpuset_cpus:
1097             self._configure_cgroup(u"docker")
1098
1099     def build(self):
1100         """Build container (compile)."""
1101         raise NotImplementedError
1102
1103     def create(self):
1104         """Create/deploy container.
1105
1106         :raises RuntimeError: If creating a container failed.
1107         """
1108         cpuset_cpus = u"--cpuset-cpus=" + u",".join(
1109             f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
1110             if self.container.cpuset_cpus else u""
1111
1112         cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \
1113             if self.container.cpuset_mems is not None else u""
1114         # Temporary workaround - disabling due to bug in memif
1115         cpuset_mems = u""
1116
1117         env = u" ".join(f"--env {env!s}" for env in self.container.env) \
1118             if self.container.env else u""
1119
1120         command = str(self.container.command) if self.container.command else u""
1121
1122         publish = u" ".join(
1123             f"--publish  {var!s}" for var in self.container.publish
1124         ) if self.container.publish else u""
1125
1126         volume = u" ".join(
1127             f"--volume {mnt!s}" for mnt in self.container.mnt) \
1128             if self.container.mnt else u""
1129
1130         cmd = f"docker run --privileged --detach --interactive --tty --rm " \
1131             f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \
1132             f"{env} {volume} --name {self.container.name} " \
1133             f"{self.container.image} {command}"
1134
1135         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1136         if int(ret) != 0:
1137             raise RuntimeError(
1138                 f"Failed to create container {self.container.name}"
1139             )
1140
1141         self.info()
1142
1143     def execute(self, command):
1144         """Start a process inside a running container.
1145
1146         Runs the specified command inside the container specified by name. The
1147         container has to be running already.
1148
1149         :param command: Command to run inside container.
1150         :type command: str
1151         :raises RuntimeError: If running the command in a container failed.
1152         """
1153         cmd = f"docker exec --interactive {self.container.name} " \
1154             f"/bin/sh -c '{command}'"
1155
1156         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
1157         if int(ret) != 0:
1158             raise RuntimeError(
1159                 f"Failed to execute command in container {self.container.name}."
1160             )
1161
1162     def stop(self):
1163         """Stop running container.
1164
1165         :raises RuntimeError: If stopping a container failed.
1166         """
1167         cmd = f"docker stop {self.container.name}"
1168
1169         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1170         if int(ret) != 0:
1171             raise RuntimeError(
1172                 f"Failed to stop container {self.container.name}."
1173             )
1174
1175     def destroy(self):
1176         """Remove a container.
1177
1178         :raises RuntimeError: If removing a container failed.
1179         """
1180         cmd = f"docker rm --force {self.container.name}"
1181
1182         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1183         if int(ret) != 0:
1184             raise RuntimeError(
1185                 f"Failed to destroy container {self.container.name}."
1186             )
1187
1188     def info(self):
1189         """Return low-level information on Docker objects.
1190
1191         :raises RuntimeError: If getting info about a container failed.
1192         """
1193         cmd = f"docker inspect {self.container.name}"
1194
1195         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1196         if int(ret) != 0:
1197             raise RuntimeError(
1198                 f"Failed to get info about container {self.container.name}."
1199             )
1200
1201     def system_info(self):
1202         """Display the docker system-wide information.
1203
1204         :raises RuntimeError: If displaying system information failed.
1205         """
1206         cmd = u"docker system info"
1207
1208         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1209         if int(ret) != 0:
1210             raise RuntimeError(u"Failed to get system info.")
1211
1212     def is_container_present(self):
1213         """Check if container is present on node.
1214
1215         :returns: True if container is present.
1216         :rtype: bool
1217         :raises RuntimeError: If getting info about a container failed.
1218         """
1219         cmd = f"docker ps --all --quiet --filter name={self.container.name}"
1220
1221         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1222         if int(ret) != 0:
1223             raise RuntimeError(
1224                 f"Failed to get info about container {self.container.name}."
1225             )
1226         return bool(stdout)
1227
1228     def is_container_running(self):
1229         """Check if container is running on node.
1230
1231         :returns: True if container is running.
1232         :rtype: bool
1233         :raises RuntimeError: If getting info about a container failed.
1234         """
1235         cmd = f"docker ps --quiet --filter name={self.container.name}"
1236
1237         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1238         if int(ret) != 0:
1239             raise RuntimeError(
1240                 f"Failed to get info about container {self.container.name}."
1241             )
1242         return bool(stdout)
1243
1244
1245 class Container:
1246     """Container class."""
1247
1248     def __getattr__(self, attr):
1249         """Get attribute custom implementation.
1250
1251         :param attr: Attribute to get.
1252         :type attr: str
1253         :returns: Attribute value or None.
1254         :rtype: any
1255         """
1256         try:
1257             return self.__dict__[attr]
1258         except KeyError:
1259             return None
1260
1261     def __setattr__(self, attr, value):
1262         """Set attribute custom implementation.
1263
1264         :param attr: Attribute to set.
1265         :param value: Value to set.
1266         :type attr: str
1267         :type value: any
1268         """
1269         try:
1270             # Check if attribute exists
1271             self.__dict__[attr]
1272         except KeyError:
1273             # Creating new attribute
1274             if attr == u"node":
1275                 # Create and cache a connected SSH instance.
1276                 self.__dict__[u"ssh"] = SSH()
1277                 self.__dict__[u"ssh"].connect(value)
1278             elif attr == u"name":
1279                 # Socket paths to not have mutable state,
1280                 # this just saves some horizontal space in callers.
1281                 # TODO: Rename the dir so other apps can add sockets easily.
1282                 # E.g. f"/tmp/app_sockets/{value}/vpp_api.sock"
1283                 path = f"/tmp/vpp_sockets/{value}"
1284                 self.__dict__[u"socket_dir"] = path
1285                 self.__dict__[u"api_socket"] = f"{path}/api.sock"
1286                 self.__dict__[u"cli_socket"] = f"{path}/cli.sock"
1287                 self.__dict__[u"stats_socket"] = f"{path}/stats.sock"
1288             self.__dict__[attr] = value
1289         else:
1290             # Updating attribute base of type
1291             if isinstance(self.__dict__[attr], list):
1292                 self.__dict__[attr].append(value)
1293             else:
1294                 self.__dict__[attr] = value