Dpdk in VM: Increase num_mbufs
[csit.git] / resources / libraries / python / ContainerUtils.py
1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Library to manipulate Containers."""
15
16 from collections import OrderedDict, Counter
17 from io import open
18 from re import search
19 from string import Template
20 from time import sleep
21
22 from robot.libraries.BuiltIn import BuiltIn
23
24 from resources.libraries.python.Constants import Constants
25 from resources.libraries.python.CpuUtils import CpuUtils
26 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
27 from resources.libraries.python.ssh import SSH
28 from resources.libraries.python.topology import Topology, SocketType
29 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
30 from resources.libraries.python.VPPUtil import VPPUtil
31
32
33 __all__ = [
34     u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
35 ]
36
37 SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
38
39
40 class ContainerManager:
41     """Container lifecycle management class."""
42
43     def __init__(self, engine):
44         """Initialize Container Manager class.
45
46         :param engine: Container technology used (LXC/Docker/...).
47         :type engine: str
48         :raises NotImplementedError: If container technology is not implemented.
49         """
50         try:
51             self.engine = globals()[engine]()
52         except KeyError:
53             raise NotImplementedError(f"{engine} is not implemented.")
54         self.containers = OrderedDict()
55
56     def get_container_by_name(self, name):
57         """Get container instance.
58
59         :param name: Container name.
60         :type name: str
61         :returns: Container instance.
62         :rtype: Container
63         :raises RuntimeError: If failed to get container with name.
64         """
65         try:
66             return self.containers[name]
67         except KeyError:
68             raise RuntimeError(f"Failed to get container with name: {name}")
69
70     def construct_container(self, **kwargs):
71         """Construct container object on node with specified parameters.
72
73         :param kwargs: Key-value pairs used to construct container.
74         :param kwargs: dict
75         """
76         # Create base class
77         self.engine.initialize()
78         # Set parameters
79         for key in kwargs:
80             setattr(self.engine.container, key, kwargs[key])
81
82         # Set additional environmental variables
83         setattr(
84             self.engine.container, u"env",
85             f"MICROSERVICE_LABEL={kwargs[u'name']}"
86         )
87
88         # Store container instance
89         self.containers[kwargs[u"name"]] = self.engine.container
90
91     def construct_containers(self, **kwargs):
92         """Construct 1..N container(s) on node with specified name.
93
94         Ordinal number is automatically added to the name of container as
95         suffix.
96
97         :param kwargs: Named parameters.
98         :param kwargs: dict
99         """
100         name = kwargs[u"name"]
101         for i in range(kwargs[u"count"]):
102             # Name will contain ordinal suffix
103             kwargs[u"name"] = u"".join([name, str(i+1)])
104             # Create container
105             self.construct_container(i=i, **kwargs)
106
107     def acquire_all_containers(self):
108         """Acquire all containers."""
109         for container in self.containers:
110             self.engine.container = self.containers[container]
111             self.engine.acquire()
112
113     def build_all_containers(self):
114         """Build all containers."""
115         for container in self.containers:
116             self.engine.container = self.containers[container]
117             self.engine.build()
118
119     def create_all_containers(self):
120         """Create all containers."""
121         for container in self.containers:
122             self.engine.container = self.containers[container]
123             self.engine.create()
124
125     def execute_on_container(self, name, command):
126         """Execute command on container with name.
127
128         :param name: Container name.
129         :param command: Command to execute.
130         :type name: str
131         :type command: str
132         """
133         self.engine.container = self.get_container_by_name(name)
134         self.engine.execute(command)
135
136     def execute_on_all_containers(self, command):
137         """Execute command on all containers.
138
139         :param command: Command to execute.
140         :type command: str
141         """
142         for container in self.containers:
143             self.engine.container = self.containers[container]
144             self.engine.execute(command)
145
146     def start_vpp_in_all_containers(self, verify=True):
147         """Start VPP in all containers."""
148         for container in self.containers:
149             self.engine.container = self.containers[container]
150             # For multiple containers, delayed verify is faster.
151             self.engine.start_vpp(verify=False)
152         if verify:
153             self.verify_vpp_in_all_containers()
154
155     def _disconnect_papi_to_all_containers(self):
156         """Disconnect any open PAPI connections to VPPs in containers.
157
158         The current PAPI implementation caches open connections,
159         so explicit disconnect is needed before VPP becomes inaccessible.
160
161         Currently this is a protected method, as restart, stop and destroy
162         are the only dangerous methods, and all are handled by ContainerManager.
163         """
164         for container_object in self.containers.values():
165             PapiSocketExecutor.disconnect_by_node_and_socket(
166                 container_object.node,
167                 container_object.api_socket,
168             )
169
170     def restart_vpp_in_all_containers(self, verify=True):
171         """Restart VPP in all containers."""
172         self._disconnect_papi_to_all_containers()
173         for container in self.containers:
174             self.engine.container = self.containers[container]
175             # For multiple containers, delayed verify is faster.
176             self.engine.restart_vpp(verify=False)
177         if verify:
178             self.verify_vpp_in_all_containers()
179
180     def verify_vpp_in_all_containers(self):
181         """Verify that VPP is installed and running in all containers."""
182         # For multiple containers, multiple fors are faster.
183         for container in self.containers:
184             self.engine.container = self.containers[container]
185             self.engine.verify_vppctl()
186         for container in self.containers:
187             self.engine.container = self.containers[container]
188             self.engine.adjust_privileges()
189         for container in self.containers:
190             self.engine.container = self.containers[container]
191             self.engine.verify_vpp_papi()
192
193     def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
194         """Configure VPP in all containers.
195
196         :param chain_topology: Topology used for chaining containers can be
197             chain or cross_horiz. Chain topology is using 1 memif pair per
198             container. Cross_horiz topology is using 1 memif and 1 physical
199             interface in container (only single container can be configured).
200         :param kwargs: Named parameters.
201         :type chain_topology: str
202         :type kwargs: dict
203         """
204         # Count number of DUTs based on node's host information
205         dut_cnt = len(
206             Counter(
207                 [
208                     self.containers[container].node[u"host"]
209                     for container in self.containers
210                 ]
211             )
212         )
213         mod = len(self.containers) // dut_cnt
214
215         for i, container in enumerate(self.containers):
216             mid1 = i % mod + 1
217             mid2 = i % mod + 1
218             sid1 = i % mod * 2 + 1
219             sid2 = i % mod * 2 + 2
220             self.engine.container = self.containers[container]
221             guest_dir = self.engine.container.mnt[0].split(u":")[1]
222
223             if chain_topology == u"chain":
224                 self._configure_vpp_chain_l2xc(
225                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
226                     guest_dir=guest_dir, **kwargs
227                 )
228             elif chain_topology == u"cross_horiz":
229                 self._configure_vpp_cross_horiz(
230                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
231                     guest_dir=guest_dir, **kwargs
232                 )
233             elif chain_topology == u"chain_functional":
234                 self._configure_vpp_chain_functional(
235                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
236                     guest_dir=guest_dir, **kwargs
237                 )
238             elif chain_topology == u"chain_ip4":
239                 self._configure_vpp_chain_ip4(
240                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
241                     guest_dir=guest_dir, **kwargs
242                 )
243             elif chain_topology == u"pipeline_ip4":
244                 self._configure_vpp_pipeline_ip4(
245                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
246                     guest_dir=guest_dir, **kwargs
247                 )
248             elif chain_topology == u"chain_vswitch":
249                 self._configure_vpp_chain_vswitch(
250                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
251                     guest_dir=guest_dir, **kwargs)
252             elif chain_topology == u"chain_ipsec":
253                 idx_match = search(r"\d+$", self.engine.container.name)
254                 if idx_match:
255                     idx = int(idx_match.group())
256                 self._configure_vpp_chain_ipsec(
257                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
258                     guest_dir=guest_dir, nf_instance=idx, **kwargs)
259             else:
260                 raise RuntimeError(
261                     f"Container topology {chain_topology} not implemented"
262                 )
263
264     def _configure_vpp_chain_l2xc(self, **kwargs):
265         """Configure VPP in chain topology with l2xc.
266
267         :param kwargs: Named parameters.
268         :type kwargs: dict
269         """
270         self.engine.create_vpp_startup_config()
271         self.engine.create_vpp_exec_config(
272             u"memif_create_chain_l2xc.exec",
273             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
274             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
275             socket1=f"{kwargs[u'guest_dir']}/memif-"
276             f"{self.engine.container.name}-{kwargs[u'sid1']}",
277             socket2=f"{kwargs[u'guest_dir']}/memif-"
278             f"{self.engine.container.name}-{kwargs[u'sid2']}"
279         )
280
281     def _configure_vpp_cross_horiz(self, **kwargs):
282         """Configure VPP in cross horizontal topology (single memif).
283
284         :param kwargs: Named parameters.
285         :type kwargs: dict
286         """
287         if u"DUT1" in self.engine.container.name:
288             if_pci = Topology.get_interface_pci_addr(
289                 self.engine.container.node, kwargs[u"dut1_if"])
290             if_name = Topology.get_interface_name(
291                 self.engine.container.node, kwargs[u"dut1_if"])
292         if u"DUT2" in self.engine.container.name:
293             if_pci = Topology.get_interface_pci_addr(
294                 self.engine.container.node, kwargs[u"dut2_if"])
295             if_name = Topology.get_interface_name(
296                 self.engine.container.node, kwargs[u"dut2_if"])
297         self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
298         self.engine.create_vpp_exec_config(
299             u"memif_create_cross_horizon.exec",
300             mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
301             socket1=f"{kwargs[u'guest_dir']}/memif-"
302             f"{self.engine.container.name}-{kwargs[u'sid1']}"
303         )
304
305     def _configure_vpp_chain_functional(self, **kwargs):
306         """Configure VPP in chain topology with l2xc (functional).
307
308         :param kwargs: Named parameters.
309         :type kwargs: dict
310         """
311         self.engine.create_vpp_startup_config()
312         self.engine.create_vpp_exec_config(
313             u"memif_create_chain_functional.exec",
314             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
315             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
316             socket1=f"{kwargs[u'guest_dir']}/memif-"
317             f"{self.engine.container.name}-{kwargs[u'sid1']}",
318             socket2=f"{kwargs[u'guest_dir']}/memif-"
319             f"{self.engine.container.name}-{kwargs[u'sid2']}",
320             rx_mode=u"interrupt"
321         )
322
323     def _configure_vpp_chain_ip4(self, **kwargs):
324         """Configure VPP in chain topology with ip4.
325
326         :param kwargs: Named parameters.
327         :type kwargs: dict
328         """
329         self.engine.create_vpp_startup_config()
330
331         vif1_mac = kwargs[u"tg_pf1_mac"] \
332             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
333             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
334         vif2_mac = kwargs[u"tg_pf2_mac"] \
335             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
336             else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
337         self.engine.create_vpp_exec_config(
338             u"memif_create_chain_ip4.exec",
339             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
340             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
341             socket1=f"{kwargs[u'guest_dir']}/memif-"
342             f"{self.engine.container.name}-{kwargs[u'sid1']}",
343             socket2=f"{kwargs[u'guest_dir']}/memif-"
344             f"{self.engine.container.name}-{kwargs[u'sid2']}",
345             mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
346             mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
347             vif1_mac=vif1_mac, vif2_mac=vif2_mac
348         )
349
350     def _configure_vpp_chain_vswitch(self, **kwargs):
351         """Configure VPP as vswitch in container.
352
353         :param kwargs: Named parameters.
354         :type kwargs: dict
355         """
356         dut = self.engine.container.name.split(u"_")[0]
357         if dut == u"DUT1":
358             if1_pci = Topology.get_interface_pci_addr(
359                 self.engine.container.node, kwargs[u"dut1_if2"])
360             if2_pci = Topology.get_interface_pci_addr(
361                 self.engine.container.node, kwargs[u"dut1_if1"])
362             if_red_name = Topology.get_interface_name(
363                 self.engine.container.node, kwargs[u"dut1_if2"])
364             if_black_name = Topology.get_interface_name(
365                 self.engine.container.node, kwargs[u"dut1_if1"])
366             tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
367             tg_pf_mac = kwargs[u"tg_pf2_mac"]
368         else:
369             tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
370             tg_pf_mac = kwargs[u"tg_pf1_mac"]
371             if1_pci = Topology.get_interface_pci_addr(
372                 self.engine.container.node, kwargs[u"dut2_if1"])
373             if2_pci = Topology.get_interface_pci_addr(
374                 self.engine.container.node, kwargs[u"dut2_if2"])
375             if_red_name = Topology.get_interface_name(
376                 self.engine.container.node, kwargs[u"dut2_if1"])
377             if_black_name = Topology.get_interface_name(
378                 self.engine.container.node, kwargs[u"dut2_if2"])
379
380         n_instances = int(kwargs[u"n_instances"])
381         rxq = 1
382         if u"rxq" in kwargs:
383             rxq = int(kwargs[u"rxq"])
384         nodes = kwargs[u"nodes"]
385         cpuset_cpus = CpuUtils.get_affinity_nf(
386             nodes, dut, nf_chains=1, nf_nodes=1, nf_chain=1,
387             nf_node=1, vs_dtc=0, nf_dtc=8, nf_mtcr=1, nf_dtcr=1
388         )
389         self.engine.create_vpp_startup_config_vswitch(
390             cpuset_cpus, rxq, if1_pci, if2_pci
391         )
392
393         instances = []
394         for i in range(1, n_instances + 1):
395             instances.append(
396                 f"create interface memif id {i} socket-id 1 master\n"
397                 f"set interface state memif1/{i} up\n"
398                 f"set interface l2 bridge memif1/{i} 1\n"
399                 f"create interface memif id {i} socket-id 2 master\n"
400                 f"set interface state memif2/{i} up\n"
401                 f"set interface l2 bridge memif2/{i} 2\n"
402                 f"set ip neighbor memif2/{i} {tg_pf_ip4} {tg_pf_mac} "
403                 f"static\n\n"
404             )
405
406         self.engine.create_vpp_exec_config(
407             u"memif_create_chain_vswitch_ipsec.exec",
408             socket1=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-1",
409             socket2=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-2",
410             if_red_name=if_red_name,
411             if_black_name=if_black_name,
412             instances=u"\n\n".join(instances))
413
414
415     def _configure_vpp_chain_ipsec(self, **kwargs):
416         """Configure VPP in container with memifs.
417
418         :param kwargs: Named parameters.
419         :type kwargs: dict
420         """
421         nf_nodes = int(kwargs[u"nf_nodes"])
422         nf_instance = int(kwargs[u"nf_instance"])
423         nodes = kwargs[u"nodes"]
424         dut = self.engine.container.name.split(u"_")[0]
425         cpuset_cpus = CpuUtils.get_affinity_nf(
426             nodes, dut, nf_chains=1, nf_nodes=nf_nodes, nf_chain=1,
427             nf_node=nf_instance, vs_dtc=10, nf_dtc=1, nf_mtcr=1, nf_dtcr=1)
428         self.engine.create_vpp_startup_config_ipsec(cpuset_cpus)
429         local_ip_base = kwargs[u"dut2_if1_ip4"].rsplit(u".", 1)[0]
430
431         if dut == u"DUT1":
432             tnl_local_ip = f"{local_ip_base}.{nf_instance + 100}"
433             tnl_remote_ip = f"{local_ip_base}.{nf_instance}"
434             remote_ip_base = kwargs[u"dut1_if1_ip4"].rsplit(u".", 1)[0]
435             tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
436             tg_pf_mac = kwargs[u"tg_pf1_mac"]
437             raddr_ip4 = kwargs[u"laddr_ip4"]
438             l_mac1 = 17
439             l_mac2 = 18
440             r_mac = 1
441         else:
442             tnl_local_ip = f"{local_ip_base}.{nf_instance}"
443             tnl_remote_ip = f"{local_ip_base}.{nf_instance + 100}"
444             remote_ip_base = kwargs[u"dut2_if2_ip4"].rsplit(u".", 1)[0]
445             tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
446             tg_pf_mac = kwargs[u"tg_pf2_mac"]
447             raddr_ip4 = kwargs[u"raddr_ip4"]
448             l_mac1 = 1
449             l_mac2 = 2
450             r_mac = 17
451
452         self.engine.create_vpp_exec_config(
453             u"memif_create_chain_ipsec.exec",
454             socket1=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-1",
455             socket2=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-2",
456             mid1=nf_instance,
457             mid2=nf_instance,
458             sid1=u"1",
459             sid2=u"2",
460             mac1=f"02:02:00:00:{l_mac1:02X}:{(nf_instance - 1):02X}",
461             mac2=f"02:02:00:00:{l_mac2:02X}:{(nf_instance - 1):02X}",
462             tg_pf2_ip4=tg_pf_ip4,
463             tg_pf2_mac=tg_pf_mac,
464             raddr_ip4=raddr_ip4,
465             tnl_local_ip=tnl_local_ip,
466             tnl_remote_ip=tnl_remote_ip,
467             tnl_remote_mac=f"02:02:00:00:{r_mac:02X}:{(nf_instance - 1):02X}",
468             remote_ip=f"{remote_ip_base}.{nf_instance}"
469         )
470         self.engine.execute(
471             f"cat {kwargs['guest_dir']}/ipsec_create_tunnel_cnf_"
472             f"{dut}_{nf_instance}.config >> /tmp/running.exec"
473         )
474
475     def _configure_vpp_pipeline_ip4(self, **kwargs):
476         """Configure VPP in pipeline topology with ip4.
477
478         :param kwargs: Named parameters.
479         :type kwargs: dict
480         """
481         self.engine.create_vpp_startup_config()
482         node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
483         mid1 = kwargs[u"mid1"]
484         mid2 = kwargs[u"mid2"]
485         role1 = u"master"
486         role2 = u"master" if node == kwargs[u"nodes"] else u"slave"
487         kwargs[u"mid2"] = kwargs[u"mid2"] \
488             if node == kwargs[u"nodes"] else kwargs[u"mid2"] + 1
489         vif1_mac = kwargs[u"tg_pf1_mac"] \
490             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
491             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
492         vif2_mac = kwargs[u"tg_pf2_mac"] \
493             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
494             else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
495         socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
496             f"{kwargs[u'sid1']}" if node == 1 \
497             else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid1']}"
498         socket2 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
499             f"{kwargs[u'sid2']}" \
500             if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \
501             else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid2']}"
502
503         self.engine.create_vpp_exec_config(
504             u"memif_create_pipeline_ip4.exec",
505             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
506             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
507             socket1=socket1, socket2=socket2, role1=role1, role2=role2,
508             mac1=f"52:54:00:00:{mid1:02X}:01",
509             mac2=f"52:54:00:00:{mid2:02X}:02",
510             vif1_mac=vif1_mac, vif2_mac=vif2_mac
511         )
512
513     def stop_all_containers(self):
514         """Stop all containers."""
515         # TODO: Rework if containers can be affected outside ContainerManager.
516         self._disconnect_papi_to_all_containers()
517         for container in self.containers:
518             self.engine.container = self.containers[container]
519             self.engine.stop()
520
521     def destroy_all_containers(self):
522         """Destroy all containers."""
523         # TODO: Rework if containers can be affected outside ContainerManager.
524         self._disconnect_papi_to_all_containers()
525         for container in self.containers:
526             self.engine.container = self.containers[container]
527             self.engine.destroy()
528
529
530 class ContainerEngine:
531     """Abstract class for container engine."""
532
533     def __init__(self):
534         """Init ContainerEngine object."""
535         self.container = None
536
537     def initialize(self):
538         """Initialize container object."""
539         self.container = Container()
540
541     def acquire(self, force):
542         """Acquire/download container.
543
544         :param force: Destroy a container if exists and create.
545         :type force: bool
546         """
547         raise NotImplementedError
548
549     def build(self):
550         """Build container (compile)."""
551         raise NotImplementedError
552
553     def create(self):
554         """Create/deploy container."""
555         raise NotImplementedError
556
557     def execute(self, command):
558         """Execute process inside container.
559
560         :param command: Command to run inside container.
561         :type command: str
562         """
563         raise NotImplementedError
564
565     def stop(self):
566         """Stop container."""
567         raise NotImplementedError
568
569     def destroy(self):
570         """Destroy/remove container."""
571         raise NotImplementedError
572
573     def info(self):
574         """Info about container."""
575         raise NotImplementedError
576
577     def system_info(self):
578         """System info."""
579         raise NotImplementedError
580
581     def start_vpp(self, verify=True):
582         """Start VPP inside a container."""
583         self.execute(
584             u"setsid /usr/bin/vpp -c /etc/vpp/startup.conf "
585             u">/tmp/vppd.log 2>&1 < /dev/null &")
586
587         topo_instance = BuiltIn().get_library_instance(
588             u"resources.libraries.python.topology.Topology"
589         )
590         topo_instance.add_new_socket(
591             self.container.node,
592             SocketType.PAPI,
593             self.container.name,
594             self.container.api_socket,
595         )
596         topo_instance.add_new_socket(
597             self.container.node,
598             SocketType.STATS,
599             self.container.name,
600             self.container.stats_socket,
601         )
602         if verify:
603             self.verify_vpp()
604
605     def restart_vpp(self, verify=True):
606         """Restart VPP service inside a container."""
607         self.execute(u"pkill vpp")
608         self.start_vpp(verify=verify)
609
610     def verify_vpp(self):
611         """Verify VPP is running and ready."""
612         self.verify_vppctl()
613         self.adjust_privileges()
614         self.verify_vpp_papi()
615
616     # TODO Rewrite to use the VPPUtil.py functionality and remove this.
617     def verify_vppctl(self, retries=120, retry_wait=1):
618         """Verify that VPP is installed and running inside container.
619
620         This function waits a while so VPP can start.
621         PCI interfaces are listed for debug purposes.
622         When the check passes, VPP API socket is created on remote side,
623         but perhaps its directory does not have the correct access rights yet.
624
625         :param retries: Check for VPP for this number of times Default: 120
626         :param retry_wait: Wait for this number of seconds between retries.
627         """
628         for _ in range(retries + 1):
629             try:
630                 # Execute puts the command into single quotes,
631                 # so inner arguments are enclosed in qouble quotes here.
632                 self.execute(
633                     u'vppctl show pci 2>&1 | '
634                     u'fgrep -v "Connection refused" | '
635                     u'fgrep -v "No such file or directory"'
636                 )
637                 break
638             except (RuntimeError, AssertionError):
639                 sleep(retry_wait)
640         else:
641             self.execute(u"cat /tmp/vppd.log")
642             raise RuntimeError(
643                 f"VPP did not come up in container: {self.container.name}"
644             )
645
646     def adjust_privileges(self):
647         """Adjust privileges to control VPP without sudo."""
648         self.execute("chmod -R o+rwx /run/vpp")
649
650     def verify_vpp_papi(self, retries=120, retry_wait=1):
651         """Verify that VPP is available for PAPI.
652
653         This also opens and caches PAPI connection for quick reuse.
654         The connection is disconnected when ContainerManager decides to do so.
655
656         :param retries: Check for VPP for this number of times Default: 120
657         :param retry_wait: Wait for this number of seconds between retries.
658         """
659         # Wait for success.
660         for _ in range(retries + 1):
661             try:
662                 VPPUtil.vpp_show_version(
663                     node=self.container.node,
664                     remote_vpp_socket=self.container.api_socket,
665                     log=False,
666                 )
667                 break
668             except (RuntimeError, AssertionError):
669                 sleep(retry_wait)
670         else:
671             self.execute(u"cat /tmp/vppd.log")
672             raise RuntimeError(
673                 f"VPP PAPI fails in container: {self.container.name}"
674             )
675
676     def create_base_vpp_startup_config(self, cpuset_cpus=None):
677         """Create base startup configuration of VPP on container.
678
679         :param cpuset_cpus: List of CPU cores to allocate.
680         :type cpuset_cpus: list.
681         :returns: Base VPP startup configuration.
682         :rtype: VppConfigGenerator
683         """
684         if cpuset_cpus is None:
685             cpuset_cpus = self.container.cpuset_cpus
686
687         # Create config instance
688         vpp_config = VppConfigGenerator()
689         vpp_config.set_node(self.container.node)
690         vpp_config.add_unix_cli_listen()
691         vpp_config.add_unix_nodaemon()
692         vpp_config.add_unix_exec(u"/tmp/running.exec")
693         vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
694         if cpuset_cpus:
695             # We will pop the first core from the list to be a main core
696             vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
697             # If more cores in the list, the rest will be used as workers.
698             corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
699             vpp_config.add_cpu_corelist_workers(corelist_workers)
700         vpp_config.add_buffers_per_numa(215040)
701         vpp_config.add_plugin(u"disable", u"default")
702         vpp_config.add_plugin(u"enable", u"memif_plugin.so")
703         vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
704         vpp_config.add_main_heap_size(u"2G")
705         vpp_config.add_main_heap_page_size(u"2M")
706         vpp_config.add_statseg_size(u"2G")
707         vpp_config.add_statseg_page_size(u"2M")
708         vpp_config.add_statseg_per_node_counters(u"on")
709
710         return vpp_config
711
712     def create_vpp_startup_config(self):
713         """Create startup configuration of VPP without DPDK on container.
714         """
715         vpp_config = self.create_base_vpp_startup_config()
716
717         # Apply configuration
718         self.execute(u"mkdir -p /etc/vpp/")
719         self.execute(
720             f'echo "{vpp_config.get_config_str()}" | '
721             f'tee /etc/vpp/startup.conf'
722         )
723
724     def create_vpp_startup_config_vswitch(self, cpuset_cpus, rxq, *devices):
725         """Create startup configuration of VPP vswitch.
726
727         :param cpuset_cpus: CPU list to run on.
728         :param rxq: Number of interface RX queues.
729         :param devices: PCI devices.
730         :type cpuset_cpus: list
731         :type rxq: int
732         :type devices: list
733         """
734         vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
735         vpp_config.add_dpdk_dev(*devices)
736         vpp_config.add_dpdk_log_level(u"debug")
737         vpp_config.add_dpdk_no_tx_checksum_offload()
738         vpp_config.add_dpdk_dev_default_rxq(rxq)
739         vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
740         vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
741
742         # Apply configuration
743         self.execute(u"mkdir -p /etc/vpp/")
744         self.execute(
745             f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
746         )
747
748     def create_vpp_startup_config_ipsec(self, cpuset_cpus):
749         """Create startup configuration of VPP with IPsec on container.
750
751         :param cpuset_cpus: CPU list to run on.
752         :type cpuset_cpus: list
753         """
754         vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
755         vpp_config.add_plugin(u"enable", u"crypto_native_plugin.so")
756         vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
757         vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
758         vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
759
760         # Apply configuration
761         self.execute(u"mkdir -p /etc/vpp/")
762         self.execute(
763             f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
764         )
765
766     def create_vpp_exec_config(self, template_file, **kwargs):
767         """Create VPP exec configuration on container.
768
769         :param template_file: File name of a template script.
770         :param kwargs: Parameters for script.
771         :type template_file: str
772         :type kwargs: dict
773         """
774         running = u"/tmp/running.exec"
775         template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
776
777         with open(template, u"rt") as src_file:
778             src = Template(src_file.read())
779             self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}')
780
781     def is_container_running(self):
782         """Check if container is running."""
783         raise NotImplementedError
784
785     def is_container_present(self):
786         """Check if container is present."""
787         raise NotImplementedError
788
789     def _configure_cgroup(self, name):
790         """Configure the control group associated with a container.
791
792         By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
793         container is initialized a new cgroup /docker or /lxc is created under
794         cpuset parent tree. This newly created cgroup is inheriting parent
795         setting for cpu/mem exclusive parameter and thus cannot be overriden
796         within /docker or /lxc cgroup. This function is supposed to set cgroups
797         to allow coexistence of both engines.
798
799         :param name: Name of cgroup.
800         :type name: str
801         :raises RuntimeError: If applying cgroup settings via cgset failed.
802         """
803         ret, _, _ = self.container.ssh.exec_command_sudo(
804             u"cgset -r cpuset.cpu_exclusive=0 /"
805         )
806         if int(ret) != 0:
807             raise RuntimeError(u"Failed to apply cgroup settings.")
808
809         ret, _, _ = self.container.ssh.exec_command_sudo(
810             u"cgset -r cpuset.mem_exclusive=0 /"
811         )
812         if int(ret) != 0:
813             raise RuntimeError(u"Failed to apply cgroup settings.")
814
815         ret, _, _ = self.container.ssh.exec_command_sudo(
816             f"cgcreate -g cpuset:/{name}"
817         )
818         if int(ret) != 0:
819             raise RuntimeError(u"Failed to copy cgroup settings from root.")
820
821         ret, _, _ = self.container.ssh.exec_command_sudo(
822             f"cgset -r cpuset.cpu_exclusive=0 /{name}"
823         )
824         if int(ret) != 0:
825             raise RuntimeError(u"Failed to apply cgroup settings.")
826
827         ret, _, _ = self.container.ssh.exec_command_sudo(
828             f"cgset -r cpuset.mem_exclusive=0 /{name}"
829         )
830         if int(ret) != 0:
831             raise RuntimeError(u"Failed to apply cgroup settings.")
832
833
834 class LXC(ContainerEngine):
835     """LXC implementation."""
836
837     # Implicit constructor is inherited.
838
839     def acquire(self, force=True):
840         """Acquire a privileged system object where configuration is stored.
841
842         :param force: If a container exists, destroy it and create a new
843             container.
844         :type force: bool
845         :raises RuntimeError: If creating the container or writing the container
846             config fails.
847         """
848         if self.is_container_present():
849             if force:
850                 self.destroy()
851             else:
852                 return
853
854         target_arch = u"arm64" \
855             if Topology.get_node_arch(self.container.node) == u"aarch64" \
856             else u"amd64"
857
858         image = self.container.image if self.container.image \
859             else f"-d ubuntu -r focal -a {target_arch}"
860
861         cmd = f"lxc-create -t download --name {self.container.name} " \
862             f"-- {image} --no-validate"
863
864         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
865         if int(ret) != 0:
866             raise RuntimeError(u"Failed to create container.")
867
868         self._configure_cgroup(u"lxc")
869
870     def build(self):
871         """Build container (compile)."""
872         raise NotImplementedError
873
874     def create(self):
875         """Create/deploy an application inside a container on system.
876
877         :raises RuntimeError: If creating the container fails.
878         """
879         if self.container.mnt:
880             # LXC fix for tmpfs
881             # https://github.com/lxc/lxc/issues/434
882             mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults"
883             ret, _, _ = self.container.ssh.exec_command_sudo(
884                 f"sh -c \"echo '{mnt_e}' >> "
885                 f"/var/lib/lxc/{self.container.name}/config\""
886             )
887             if int(ret) != 0:
888                 raise RuntimeError(
889                     f"Failed to write {self.container.name} config."
890                 )
891
892             for mount in self.container.mnt:
893                 host_dir, guest_dir = mount.split(u":")
894                 options = u"bind,create=dir" if guest_dir.endswith(u"/") \
895                     else u"bind,create=file"
896                 entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \
897                     f"none {options} 0 0"
898                 self.container.ssh.exec_command_sudo(
899                     f"sh -c \"mkdir -p {host_dir}\""
900                 )
901                 ret, _, _ = self.container.ssh.exec_command_sudo(
902                     f"sh -c \"echo '{entry}' "
903                     f">> /var/lib/lxc/{self.container.name}/config\""
904                 )
905                 if int(ret) != 0:
906                     raise RuntimeError(
907                         f"Failed to write {self.container.name} config."
908                     )
909
910         cpuset_cpus = u",".join(
911             f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
912             if self.container.cpuset_cpus else u""
913
914         ret, _, _ = self.container.ssh.exec_command_sudo(
915             f"lxc-start --name {self.container.name} --daemon"
916         )
917         if int(ret) != 0:
918             raise RuntimeError(
919                 f"Failed to start container {self.container.name}."
920             )
921         self._lxc_wait(u"RUNNING")
922
923         # Workaround for LXC to be able to allocate all cpus including isolated.
924         ret, _, _ = self.container.ssh.exec_command_sudo(
925             u"cgset --copy-from / lxc/"
926         )
927         if int(ret) != 0:
928             raise RuntimeError(u"Failed to copy cgroup to LXC")
929
930         ret, _, _ = self.container.ssh.exec_command_sudo(
931             f"lxc-cgroup --name {self.container.name} cpuset.cpus {cpuset_cpus}"
932         )
933         if int(ret) != 0:
934             raise RuntimeError(
935                 f"Failed to set cpuset.cpus to container {self.container.name}."
936             )
937
938     def execute(self, command):
939         """Start a process inside a running container.
940
941         Runs the specified command inside the container specified by name. The
942         container has to be running already.
943
944         :param command: Command to run inside container.
945         :type command: str
946         :raises RuntimeError: If running the command failed.
947         """
948         env = u"--keep-env " + u" ".join(
949             f"--set-var {env!s}" for env in self.container.env) \
950             if self.container.env else u""
951
952         cmd = f"lxc-attach {env} --name {self.container.name} " \
953             f"-- /bin/sh -c '{command}'"
954
955         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
956         if int(ret) != 0:
957             raise RuntimeError(
958                 f"Failed to run command inside container {self.container.name}."
959             )
960
961     def stop(self):
962         """Stop a container.
963
964         :raises RuntimeError: If stopping the container failed.
965         """
966         cmd = f"lxc-stop --name {self.container.name}"
967
968         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
969         if int(ret) != 0:
970             raise RuntimeError(
971                 f"Failed to stop container {self.container.name}."
972             )
973         self._lxc_wait(u"STOPPED|FROZEN")
974
975     def destroy(self):
976         """Destroy a container.
977
978         :raises RuntimeError: If destroying container failed.
979         """
980         cmd = f"lxc-destroy --force --name {self.container.name}"
981
982         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
983         if int(ret) != 0:
984             raise RuntimeError(
985                 f"Failed to destroy container {self.container.name}."
986             )
987
988     def info(self):
989         """Query and shows information about a container.
990
991         :raises RuntimeError: If getting info about a container failed.
992         """
993         cmd = f"lxc-info --name {self.container.name}"
994
995         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
996         if int(ret) != 0:
997             raise RuntimeError(
998                 f"Failed to get info about container {self.container.name}."
999             )
1000
1001     def system_info(self):
1002         """Check the current kernel for LXC support.
1003
1004         :raises RuntimeError: If checking LXC support failed.
1005         """
1006         cmd = u"lxc-checkconfig"
1007
1008         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1009         if int(ret) != 0:
1010             raise RuntimeError(u"Failed to check LXC support.")
1011
1012     def is_container_running(self):
1013         """Check if container is running on node.
1014
1015         :returns: True if container is running.
1016         :rtype: bool
1017         :raises RuntimeError: If getting info about a container failed.
1018         """
1019         cmd = f"lxc-info --no-humanize --state --name {self.container.name}"
1020
1021         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1022         if int(ret) != 0:
1023             raise RuntimeError(
1024                 f"Failed to get info about container {self.container.name}."
1025             )
1026         return u"RUNNING" in stdout
1027
1028     def is_container_present(self):
1029         """Check if container is existing on node.
1030
1031         :returns: True if container is present.
1032         :rtype: bool
1033         :raises RuntimeError: If getting info about a container failed.
1034         """
1035         cmd = f"lxc-info --no-humanize --name {self.container.name}"
1036
1037         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1038         return not ret
1039
1040     def _lxc_wait(self, state):
1041         """Wait for a specific container state.
1042
1043         :param state: Specify the container state(s) to wait for.
1044         :type state: str
1045         :raises RuntimeError: If waiting for state of a container failed.
1046         """
1047         cmd = f"lxc-wait --name {self.container.name} --state '{state}'"
1048
1049         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1050         if int(ret) != 0:
1051             raise RuntimeError(
1052                 f"Failed to wait for state '{state}' "
1053                 f"of container {self.container.name}."
1054             )
1055
1056
1057 class Docker(ContainerEngine):
1058     """Docker implementation."""
1059
1060     # Implicit constructor is inherited.
1061
1062     def acquire(self, force=True):
1063         """Pull an image or a repository from a registry.
1064
1065         :param force: Destroy a container if exists.
1066         :type force: bool
1067         :raises RuntimeError: If pulling a container failed.
1068         """
1069         if self.is_container_present():
1070             if force:
1071                 self.destroy()
1072             else:
1073                 return
1074
1075         if not self.container.image:
1076             img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
1077                 if Topology.get_node_arch(self.container.node) == u"aarch64" \
1078                 else Constants.DOCKER_SUT_IMAGE_UBUNTU
1079             setattr(self.container, u"image", img)
1080
1081         if "/" in self.container.image:
1082             cmd = f"docker pull {self.container.image}"
1083             ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
1084             if int(ret) != 0:
1085                 raise RuntimeError(
1086                     f"Failed to create container {self.container.name}."
1087                 )
1088
1089         if self.container.cpuset_cpus:
1090             self._configure_cgroup(u"docker")
1091
1092     def build(self):
1093         """Build container (compile)."""
1094         raise NotImplementedError
1095
1096     def create(self):
1097         """Create/deploy container.
1098
1099         :raises RuntimeError: If creating a container failed.
1100         """
1101         cpuset_cpus = u"--cpuset-cpus=" + u",".join(
1102             f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
1103             if self.container.cpuset_cpus else u""
1104
1105         cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \
1106             if self.container.cpuset_mems is not None else u""
1107         # Temporary workaround - disabling due to bug in memif
1108         cpuset_mems = u""
1109
1110         env = u" ".join(f"--env {env!s}" for env in self.container.env) \
1111             if self.container.env else u""
1112
1113         command = str(self.container.command) if self.container.command else u""
1114
1115         publish = u" ".join(
1116             f"--publish  {var!s}" for var in self.container.publish
1117         ) if self.container.publish else u""
1118
1119         volume = u" ".join(
1120             f"--volume {mnt!s}" for mnt in self.container.mnt) \
1121             if self.container.mnt else u""
1122
1123         cmd = f"docker run --privileged --detach --interactive --tty --rm " \
1124             f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \
1125             f"{env} {volume} --name {self.container.name} " \
1126             f"{self.container.image} {command}"
1127
1128         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1129         if int(ret) != 0:
1130             raise RuntimeError(
1131                 f"Failed to create container {self.container.name}"
1132             )
1133
1134         self.info()
1135
1136     def execute(self, command):
1137         """Start a process inside a running container.
1138
1139         Runs the specified command inside the container specified by name. The
1140         container has to be running already.
1141
1142         :param command: Command to run inside container.
1143         :type command: str
1144         :raises RuntimeError: If running the command in a container failed.
1145         """
1146         cmd = f"docker exec --interactive {self.container.name} " \
1147             f"/bin/sh -c '{command}'"
1148
1149         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
1150         if int(ret) != 0:
1151             raise RuntimeError(
1152                 f"Failed to execute command in container {self.container.name}."
1153             )
1154
1155     def stop(self):
1156         """Stop running container.
1157
1158         :raises RuntimeError: If stopping a container failed.
1159         """
1160         cmd = f"docker stop {self.container.name}"
1161
1162         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1163         if int(ret) != 0:
1164             raise RuntimeError(
1165                 f"Failed to stop container {self.container.name}."
1166             )
1167
1168     def destroy(self):
1169         """Remove a container.
1170
1171         :raises RuntimeError: If removing a container failed.
1172         """
1173         cmd = f"docker rm --force {self.container.name}"
1174
1175         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1176         if int(ret) != 0:
1177             raise RuntimeError(
1178                 f"Failed to destroy container {self.container.name}."
1179             )
1180
1181     def info(self):
1182         """Return low-level information on Docker objects.
1183
1184         :raises RuntimeError: If getting info about a container failed.
1185         """
1186         cmd = f"docker inspect {self.container.name}"
1187
1188         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1189         if int(ret) != 0:
1190             raise RuntimeError(
1191                 f"Failed to get info about container {self.container.name}."
1192             )
1193
1194     def system_info(self):
1195         """Display the docker system-wide information.
1196
1197         :raises RuntimeError: If displaying system information failed.
1198         """
1199         cmd = u"docker system info"
1200
1201         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1202         if int(ret) != 0:
1203             raise RuntimeError(u"Failed to get system info.")
1204
1205     def is_container_present(self):
1206         """Check if container is present on node.
1207
1208         :returns: True if container is present.
1209         :rtype: bool
1210         :raises RuntimeError: If getting info about a container failed.
1211         """
1212         cmd = f"docker ps --all --quiet --filter name={self.container.name}"
1213
1214         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1215         if int(ret) != 0:
1216             raise RuntimeError(
1217                 f"Failed to get info about container {self.container.name}."
1218             )
1219         return bool(stdout)
1220
1221     def is_container_running(self):
1222         """Check if container is running on node.
1223
1224         :returns: True if container is running.
1225         :rtype: bool
1226         :raises RuntimeError: If getting info about a container failed.
1227         """
1228         cmd = f"docker ps --quiet --filter name={self.container.name}"
1229
1230         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1231         if int(ret) != 0:
1232             raise RuntimeError(
1233                 f"Failed to get info about container {self.container.name}."
1234             )
1235         return bool(stdout)
1236
1237
1238 class Container:
1239     """Container class."""
1240
1241     def __getattr__(self, attr):
1242         """Get attribute custom implementation.
1243
1244         :param attr: Attribute to get.
1245         :type attr: str
1246         :returns: Attribute value or None.
1247         :rtype: any
1248         """
1249         try:
1250             return self.__dict__[attr]
1251         except KeyError:
1252             return None
1253
1254     def __setattr__(self, attr, value):
1255         """Set attribute custom implementation.
1256
1257         :param attr: Attribute to set.
1258         :param value: Value to set.
1259         :type attr: str
1260         :type value: any
1261         """
1262         try:
1263             # Check if attribute exists
1264             self.__dict__[attr]
1265         except KeyError:
1266             # Creating new attribute
1267             if attr == u"node":
1268                 # Create and cache a connected SSH instance.
1269                 self.__dict__[u"ssh"] = SSH()
1270                 self.__dict__[u"ssh"].connect(value)
1271             elif attr == u"name":
1272                 # Socket paths to not have mutable state,
1273                 # this just saves some horizontal space in callers.
1274                 # TODO: Rename the dir so other apps can add sockets easily.
1275                 # E.g. f"/tmp/app_sockets/{value}/vpp_api.sock"
1276                 path = f"/tmp/vpp_sockets/{value}"
1277                 self.__dict__[u"socket_dir"] = path
1278                 self.__dict__[u"api_socket"] = f"{path}/api.sock"
1279                 self.__dict__[u"stats_socket"] = f"{path}/stats.sock"
1280             self.__dict__[attr] = value
1281         else:
1282             # Updating attribute base of type
1283             if isinstance(self.__dict__[attr], list):
1284                 self.__dict__[attr].append(value)
1285             else:
1286                 self.__dict__[attr] = value