Introduce VPP-IPsec container tests.
[csit.git] / resources / libraries / python / ContainerUtils.py
1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Library to manipulate Containers."""
15
16 from collections import OrderedDict, Counter
17 from io import open
18 from re import search
19 from string import Template
20 from time import sleep
21
22 from robot.libraries.BuiltIn import BuiltIn
23
24 from resources.libraries.python.Constants import Constants
25 from resources.libraries.python.CpuUtils import CpuUtils
26 from resources.libraries.python.ssh import SSH
27 from resources.libraries.python.topology import Topology, SocketType
28 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
29
30
31 __all__ = [
32     u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
33 ]
34
35 SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
36
37
38 class ContainerManager:
39     """Container lifecycle management class."""
40
41     def __init__(self, engine):
42         """Initialize Container Manager class.
43
44         :param engine: Container technology used (LXC/Docker/...).
45         :type engine: str
46         :raises NotImplementedError: If container technology is not implemented.
47         """
48         try:
49             self.engine = globals()[engine]()
50         except KeyError:
51             raise NotImplementedError(f"{engine} is not implemented.")
52         self.containers = OrderedDict()
53
54     def get_container_by_name(self, name):
55         """Get container instance.
56
57         :param name: Container name.
58         :type name: str
59         :returns: Container instance.
60         :rtype: Container
61         :raises RuntimeError: If failed to get container with name.
62         """
63         try:
64             return self.containers[name]
65         except KeyError:
66             raise RuntimeError(f"Failed to get container with name: {name}")
67
68     def construct_container(self, **kwargs):
69         """Construct container object on node with specified parameters.
70
71         :param kwargs: Key-value pairs used to construct container.
72         :param kwargs: dict
73         """
74         # Create base class
75         self.engine.initialize()
76         # Set parameters
77         for key in kwargs:
78             setattr(self.engine.container, key, kwargs[key])
79
80         # Set additional environmental variables
81         setattr(
82             self.engine.container, u"env",
83             f"MICROSERVICE_LABEL={kwargs[u'name']}"
84         )
85
86         # Store container instance
87         self.containers[kwargs[u"name"]] = self.engine.container
88
89     def construct_containers(self, **kwargs):
90         """Construct 1..N container(s) on node with specified name.
91
92         Ordinal number is automatically added to the name of container as
93         suffix.
94
95         :param kwargs: Named parameters.
96         :param kwargs: dict
97         """
98         name = kwargs[u"name"]
99         for i in range(kwargs[u"count"]):
100             # Name will contain ordinal suffix
101             kwargs[u"name"] = u"".join([name, str(i+1)])
102             # Create container
103             self.construct_container(i=i, **kwargs)
104
105     def acquire_all_containers(self):
106         """Acquire all containers."""
107         for container in self.containers:
108             self.engine.container = self.containers[container]
109             self.engine.acquire()
110
111     def build_all_containers(self):
112         """Build all containers."""
113         for container in self.containers:
114             self.engine.container = self.containers[container]
115             self.engine.build()
116
117     def create_all_containers(self):
118         """Create all containers."""
119         for container in self.containers:
120             self.engine.container = self.containers[container]
121             self.engine.create()
122
123     def execute_on_container(self, name, command):
124         """Execute command on container with name.
125
126         :param name: Container name.
127         :param command: Command to execute.
128         :type name: str
129         :type command: str
130         """
131         self.engine.container = self.get_container_by_name(name)
132         self.engine.execute(command)
133
134     def execute_on_all_containers(self, command):
135         """Execute command on all containers.
136
137         :param command: Command to execute.
138         :type command: str
139         """
140         for container in self.containers:
141             self.engine.container = self.containers[container]
142             self.engine.execute(command)
143
144     def start_vpp_in_all_containers(self):
145         """Start VPP in all containers."""
146         for container in self.containers:
147             self.engine.container = self.containers[container]
148             # We need to install supervisor client/server system to control VPP
149             # as a service
150             self.engine.install_supervisor()
151             self.engine.start_vpp()
152
153     def restart_vpp_in_all_containers(self):
154         """Restart VPP in all containers."""
155         for container in self.containers:
156             self.engine.container = self.containers[container]
157             self.engine.restart_vpp()
158
159     def verify_vpp_in_all_containers(self):
160         """Verify that VPP is installed and running in all containers."""
161         for container in self.containers:
162             self.engine.container = self.containers[container]
163             self.engine.verify_vpp()
164
165     def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
166         """Configure VPP in all containers.
167
168         :param chain_topology: Topology used for chaining containers can be
169             chain or cross_horiz. Chain topology is using 1 memif pair per
170             container. Cross_horiz topology is using 1 memif and 1 physical
171             interface in container (only single container can be configured).
172         :param kwargs: Named parameters.
173         :type chain_topology: str
174         :param kwargs: dict
175         """
176         # Count number of DUTs based on node's host information
177         dut_cnt = len(
178             Counter(
179                 [
180                     self.containers[container].node[u"host"]
181                     for container in self.containers
182                 ]
183             )
184         )
185         mod = len(self.containers) // dut_cnt
186
187         for i, container in enumerate(self.containers):
188             mid1 = i % mod + 1
189             mid2 = i % mod + 1
190             sid1 = i % mod * 2 + 1
191             sid2 = i % mod * 2 + 2
192             self.engine.container = self.containers[container]
193             guest_dir = self.engine.container.mnt[0].split(u":")[1]
194
195             if chain_topology == u"chain":
196                 self._configure_vpp_chain_l2xc(
197                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
198                     guest_dir=guest_dir, **kwargs
199                 )
200             elif chain_topology == u"cross_horiz":
201                 self._configure_vpp_cross_horiz(
202                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
203                     guest_dir=guest_dir, **kwargs
204                 )
205             elif chain_topology == u"chain_functional":
206                 self._configure_vpp_chain_functional(
207                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
208                     guest_dir=guest_dir, **kwargs
209                 )
210             elif chain_topology == u"chain_ip4":
211                 self._configure_vpp_chain_ip4(
212                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
213                     guest_dir=guest_dir, **kwargs
214                 )
215             elif chain_topology == u"pipeline_ip4":
216                 self._configure_vpp_pipeline_ip4(
217                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
218                     guest_dir=guest_dir, **kwargs
219                 )
220             elif chain_topology == u"chain_vswitch":
221                 self._configure_vpp_chain_vswitch(
222                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
223                     guest_dir=guest_dir, **kwargs)
224             elif chain_topology == u"chain_ipsec":
225                 idx_match = search(r"\d+$", self.engine.container.name)
226                 if idx_match:
227                     idx = int(idx_match.group())
228                 self._configure_vpp_chain_ipsec(
229                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
230                     guest_dir=guest_dir, nf_instance=idx, **kwargs)
231             else:
232                 raise RuntimeError(
233                     f"Container topology {chain_topology} not implemented"
234                 )
235
236     def _configure_vpp_chain_l2xc(self, **kwargs):
237         """Configure VPP in chain topology with l2xc.
238
239         :param kwargs: Named parameters.
240         :param kwargs: dict
241         """
242         self.engine.create_vpp_startup_config()
243         self.engine.create_vpp_exec_config(
244             u"memif_create_chain_l2xc.exec",
245             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
246             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
247             socket1=f"{kwargs[u'guest_dir']}/memif-"
248             f"{self.engine.container.name}-{kwargs[u'sid1']}",
249             socket2=f"{kwargs[u'guest_dir']}/memif-"
250             f"{self.engine.container.name}-{kwargs[u'sid2']}"
251         )
252
253     def _configure_vpp_cross_horiz(self, **kwargs):
254         """Configure VPP in cross horizontal topology (single memif).
255
256         :param kwargs: Named parameters.
257         :param kwargs: dict
258         """
259         if u"DUT1" in self.engine.container.name:
260             if_pci = Topology.get_interface_pci_addr(
261                 self.engine.container.node, kwargs[u"dut1_if"])
262             if_name = Topology.get_interface_name(
263                 self.engine.container.node, kwargs[u"dut1_if"])
264         if u"DUT2" in self.engine.container.name:
265             if_pci = Topology.get_interface_pci_addr(
266                 self.engine.container.node, kwargs[u"dut2_if"])
267             if_name = Topology.get_interface_name(
268                 self.engine.container.node, kwargs[u"dut2_if"])
269         self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
270         self.engine.create_vpp_exec_config(
271             u"memif_create_cross_horizon.exec",
272             mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
273             socket1=f"{kwargs[u'guest_dir']}/memif-"
274             f"{self.engine.container.name}-{kwargs[u'sid1']}"
275         )
276
277     def _configure_vpp_chain_functional(self, **kwargs):
278         """Configure VPP in chain topology with l2xc (functional).
279
280         :param kwargs: Named parameters.
281         :param kwargs: dict
282         """
283         self.engine.create_vpp_startup_config_func_dev()
284         self.engine.create_vpp_exec_config(
285             u"memif_create_chain_functional.exec",
286             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
287             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
288             socket1=f"{kwargs[u'guest_dir']}/memif-"
289             f"{self.engine.container.name}-{kwargs[u'sid1']}",
290             socket2=f"{kwargs[u'guest_dir']}/memif-"
291             f"{self.engine.container.name}-{kwargs[u'sid2']}",
292             rx_mode=u"interrupt"
293         )
294
295     def _configure_vpp_chain_ip4(self, **kwargs):
296         """Configure VPP in chain topology with ip4.
297
298         :param kwargs: Named parameters.
299         :param kwargs: dict
300         """
301         self.engine.create_vpp_startup_config()
302
303         vif1_mac = kwargs[u"tg_if1_mac"] \
304             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
305             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
306         vif2_mac = kwargs[u"tg_if2_mac"] \
307             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
308             else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
309         self.engine.create_vpp_exec_config(
310             u"memif_create_chain_ip4.exec",
311             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
312             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
313             socket1=f"{kwargs[u'guest_dir']}/memif-"
314             f"{self.engine.container.name}-{kwargs[u'sid1']}",
315             socket2=f"{kwargs[u'guest_dir']}/memif-"
316             f"{self.engine.container.name}-{kwargs[u'sid2']}",
317             mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
318             mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
319             vif1_mac=vif1_mac, vif2_mac=vif2_mac
320         )
321
322     def _configure_vpp_chain_vswitch(self, **kwargs):
323         """Configure VPP as vswitch in container.
324
325         :param kwargs: Named parameters.
326         :param kwargs: dict
327         """
328         dut = self.engine.container.name.split(u"_")[0]
329         if dut == u"DUT1":
330             if1_pci = Topology.get_interface_pci_addr(
331                 self.engine.container.node, kwargs[u"dut1_if2"])
332             if2_pci = Topology.get_interface_pci_addr(
333                 self.engine.container.node, kwargs[u"dut1_if1"])
334             if_red_name = Topology.get_interface_name(
335                 self.engine.container.node, kwargs[u"dut1_if2"])
336             if_black_name = Topology.get_interface_name(
337                 self.engine.container.node, kwargs[u"dut1_if1"])
338             tg_if_ip4 = kwargs[u"tg_if2_ip4"]
339             tg_if_mac = kwargs[u"tg_if2_mac"]
340         else:
341             tg_if_ip4 = kwargs[u"tg_if1_ip4"]
342             tg_if_mac = kwargs[u"tg_if1_mac"]
343             if1_pci = Topology.get_interface_pci_addr(
344                 self.engine.container.node, kwargs[u"dut2_if1"])
345             if2_pci = Topology.get_interface_pci_addr(
346                 self.engine.container.node, kwargs[u"dut2_if2"])
347             if_red_name = Topology.get_interface_name(
348                 self.engine.container.node, kwargs[u"dut2_if1"])
349             if_black_name = Topology.get_interface_name(
350                 self.engine.container.node, kwargs[u"dut2_if2"])
351
352         n_instances = int(kwargs[u"n_instances"])
353         rxq = 1
354         if u"rxq" in kwargs:
355             rxq = int(kwargs[u"rxq"])
356         buffers = 215040
357         if u"buffers" in kwargs:
358             buffers = int(kwargs[u"buffers"])
359         nodes = kwargs[u"nodes"]
360         cpuset_cpus = CpuUtils.get_affinity_nf(
361             nodes, dut, nf_chains=1, nf_nodes=1, nf_chain=1,
362             nf_node=1, vs_dtc=0, nf_dtc=8, nf_mtcr=1, nf_dtcr=1
363         )
364         self.engine.create_vpp_startup_config_vswitch(
365             cpuset_cpus, rxq, buffers, if1_pci, if2_pci
366         )
367
368         instances = []
369         for i in range(1, n_instances + 1):
370             instances.append(
371                 f"create interface memif id {i} socket-id 1 master\n"
372                 f"set interface state memif1/{i} up\n"
373                 f"set interface l2 bridge memif1/{i} 1\n"
374                 f"create interface memif id {i} socket-id 2 master\n"
375                 f"set interface state memif2/{i} up\n"
376                 f"set interface l2 bridge memif2/{i} 2\n"
377                 f"set ip arp memif2/{i} {tg_if_ip4} {tg_if_mac} "
378                 f"static\n\n"
379             )
380
381         self.engine.create_vpp_exec_config(
382             u"memif_create_chain_vswitch_ipsec.exec",
383             socket1=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-1",
384             socket2=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-2",
385             if_red_name=if_red_name,
386             if_black_name=if_black_name,
387             instances=u"\n\n".join(instances))
388
389
390     def _configure_vpp_chain_ipsec(self, **kwargs):
391         """Configure VPP in container with memifs.
392
393         :param kwargs: Named parameters.
394         :param kwargs: dict
395         """
396         nf_nodes = int(kwargs[u"nf_nodes"])
397         nf_instance = int(kwargs[u"nf_instance"])
398         nodes = kwargs[u"nodes"]
399         dut = self.engine.container.name.split(u"_")[0]
400         cpuset_cpus = CpuUtils.get_affinity_nf(
401             nodes, dut, nf_chains=1, nf_nodes=nf_nodes, nf_chain=1,
402             nf_node=nf_instance, vs_dtc=10, nf_dtc=1, nf_mtcr=1, nf_dtcr=1)
403         self.engine.create_vpp_startup_config_ipsec(cpuset_cpus)
404         local_ip_base = kwargs[u"dut2_if1_ip4"].rsplit(u".", 1)[0]
405
406         if dut == u"DUT1":
407             tnl_local_ip = f"{local_ip_base}.{nf_instance + 100}"
408             tnl_remote_ip = f"{local_ip_base}.{nf_instance}"
409             remote_ip_base = kwargs[u"dut1_if1_ip4"].rsplit(u".", 1)[0]
410             tg_if_ip4 = kwargs[u"tg_if1_ip4"]
411             tg_if_mac = kwargs[u"tg_if1_mac"]
412             raddr_ip4 = kwargs[u"laddr_ip4"]
413             l_mac1 = 17
414             l_mac2 = 18
415             r_mac = 1
416         else:
417             tnl_local_ip = f"{local_ip_base}.{nf_instance}"
418             tnl_remote_ip = f"{local_ip_base}.{nf_instance + 100}"
419             remote_ip_base = kwargs[u"dut2_if2_ip4"].rsplit(u".", 1)[0]
420             tg_if_ip4 = kwargs[u"tg_if2_ip4"]
421             tg_if_mac = kwargs[u"tg_if2_mac"]
422             raddr_ip4 = kwargs[u"raddr_ip4"]
423             l_mac1 = 1
424             l_mac2 = 2
425             r_mac = 17
426
427         self.engine.create_vpp_exec_config(
428             u"memif_create_chain_ipsec.exec",
429             socket1=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-1",
430             socket2=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-2",
431             mid1=nf_instance,
432             mid2=nf_instance,
433             sid1=u"1",
434             sid2=u"2",
435             mac1=f"02:02:00:00:{l_mac1:02X}:{(nf_instance - 1):02X}",
436             mac2=f"02:02:00:00:{l_mac2:02X}:{(nf_instance - 1):02X}",
437             tg_if2_ip4=tg_if_ip4,
438             tg_if2_mac=tg_if_mac,
439             raddr_ip4=raddr_ip4,
440             tnl_local_ip=tnl_local_ip,
441             tnl_remote_ip=tnl_remote_ip,
442             tnl_remote_mac=f"02:02:00:00:{r_mac:02X}:{(nf_instance - 1):02X}",
443             remote_ip=f"{remote_ip_base}.{nf_instance}"
444         )
445         self.engine.execute(
446             f"cat {kwargs['guest_dir']}/ipsec_create_tunnel_cnf_"
447             f"{dut}_{nf_instance}.config >> /tmp/running.exec"
448         )
449
450     def _configure_vpp_pipeline_ip4(self, **kwargs):
451         """Configure VPP in pipeline topology with ip4.
452
453         :param kwargs: Named parameters.
454         :param kwargs: dict
455         """
456         self.engine.create_vpp_startup_config()
457         node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
458         mid1 = kwargs[u"mid1"]
459         mid2 = kwargs[u"mid2"]
460         role1 = u"master"
461         role2 = u"master" if node in (kwargs[u"nodes"], 1) else u"slave"
462         kwargs[u"mid2"] = kwargs[u"mid2"] if node in (kwargs[u"nodes"], 1) \
463             else kwargs[u"mid2"] + 1
464         vif1_mac = kwargs[u"tg_if1_mac"] \
465             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
466             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
467         vif2_mac = kwargs[u"tg_if2_mac"] \
468             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
469             else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
470         socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
471             f"{kwargs[u'sid1']}" if node == 1 \
472             else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid1']}"
473         socket2 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
474             f"{kwargs[u'sid2']}" \
475             if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \
476             else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid2']}"
477
478         self.engine.create_vpp_exec_config(
479             u"memif_create_pipeline_ip4.exec",
480             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
481             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
482             socket1=socket1, socket2=socket2, role1=role1, role2=role2,
483             mac1=f"52:54:00:00:{mid1:02X}:01",
484             mac2=f"52:54:00:00:{mid2:02X}:02",
485             vif1_mac=vif1_mac, vif2_mac=vif2_mac
486         )
487
488     def stop_all_containers(self):
489         """Stop all containers."""
490         for container in self.containers:
491             self.engine.container = self.containers[container]
492             self.engine.stop()
493
494     def destroy_all_containers(self):
495         """Destroy all containers."""
496         for container in self.containers:
497             self.engine.container = self.containers[container]
498             self.engine.destroy()
499
500
501 class ContainerEngine:
502     """Abstract class for container engine."""
503
504     def __init__(self):
505         """Init ContainerEngine object."""
506         self.container = None
507
508     def initialize(self):
509         """Initialize container object."""
510         self.container = Container()
511
512     def acquire(self, force):
513         """Acquire/download container.
514
515         :param force: Destroy a container if exists and create.
516         :type force: bool
517         """
518         raise NotImplementedError
519
520     def build(self):
521         """Build container (compile)."""
522         raise NotImplementedError
523
524     def create(self):
525         """Create/deploy container."""
526         raise NotImplementedError
527
528     def execute(self, command):
529         """Execute process inside container.
530
531         :param command: Command to run inside container.
532         :type command: str
533         """
534         raise NotImplementedError
535
536     def stop(self):
537         """Stop container."""
538         raise NotImplementedError
539
540     def destroy(self):
541         """Destroy/remove container."""
542         raise NotImplementedError
543
544     def info(self):
545         """Info about container."""
546         raise NotImplementedError
547
548     def system_info(self):
549         """System info."""
550         raise NotImplementedError
551
552     def install_supervisor(self):
553         """Install supervisord inside a container."""
554         if isinstance(self, LXC):
555             self.execute(u"sleep 3; apt-get update")
556             self.execute(u"apt-get install -y supervisor")
557             config = \
558                 u"[unix_http_server]\n" \
559                 u"file  = /tmp/supervisor.sock\n\n" \
560                 u"[rpcinterface:supervisor]\n" \
561                 u"supervisor.rpcinterface_factory = " \
562                 u"supervisor.rpcinterface:make_main_rpcinterface\n\n" \
563                 u"[supervisorctl]\n" \
564                 u"serverurl = unix:///tmp/supervisor.sock\n\n" \
565                 u"[supervisord]\n" \
566                 u"pidfile = /tmp/supervisord.pid\n" \
567                 u"identifier = supervisor\n" \
568                 u"directory = /tmp\n" \
569                 u"logfile = /tmp/supervisord.log\n" \
570                 u"loglevel = debug\n" \
571                 u"nodaemon = false\n\n"
572             self.execute(
573                 f'echo "{config}" > {SUPERVISOR_CONF} && '
574                 f'supervisord -c {SUPERVISOR_CONF}'
575             )
576
577     def start_vpp(self):
578         """Start VPP inside a container."""
579
580         config = \
581             u"[program:vpp]\n" \
582             u"command = /usr/bin/vpp -c /etc/vpp/startup.conf\n" \
583             u"autostart = false\n" \
584             u"autorestart = false\n" \
585             u"redirect_stderr = true\n" \
586             u"priority = 1"
587         self.execute(
588             f'echo "{config}" >> {SUPERVISOR_CONF} && supervisorctl reload'
589         )
590         self.execute(u"supervisorctl start vpp")
591
592         topo_instance = BuiltIn().get_library_instance(
593             u"resources.libraries.python.topology.Topology"
594         )
595         topo_instance.add_new_socket(
596             self.container.node,
597             SocketType.PAPI,
598             self.container.name,
599             f"/tmp/vpp_sockets/{self.container.name}/api.sock"
600         )
601         topo_instance.add_new_socket(
602             self.container.node,
603             SocketType.STATS,
604             self.container.name,
605             f"/tmp/vpp_sockets/{self.container.name}/stats.sock"
606         )
607
608     def restart_vpp(self):
609         """Restart VPP service inside a container."""
610         self.execute(u"supervisorctl restart vpp")
611         self.execute(u"cat /tmp/supervisord.log")
612
613     # TODO Rewrite .execute to accept retries parameter and get rid of this
614     # function.
615     def verify_vpp(self, retries=120, retry_wait=1):
616         """Verify that VPP is installed and running inside container.
617
618         :param retries: Check for VPP for this number of times Default: 120
619         :param retry_wait: Wait for this number of seconds between retries.
620
621         """
622         cmd = (u"vppctl show pci 2>&1 | "
623                u"fgrep -v 'Connection refused' | "
624                u"fgrep -v 'No such file or directory'")
625
626         for _ in range(retries + 1):
627             try:
628                 self.execute(cmd)
629                 break
630             except RuntimeError:
631                 sleep(retry_wait)
632         else:
633             msg = f"VPP did not come up in container: {self.container.name}"
634             raise RuntimeError(msg)
635
636     def create_base_vpp_startup_config(self, cpuset_cpus=None):
637         """Create base startup configuration of VPP on container.
638
639         :returns: Base VPP startup configuration.
640         :rtype: VppConfigGenerator
641         """
642         if cpuset_cpus is None:
643             cpuset_cpus = self.container.cpuset_cpus
644
645         # Create config instance
646         vpp_config = VppConfigGenerator()
647         vpp_config.set_node(self.container.node)
648         vpp_config.add_unix_cli_listen()
649         vpp_config.add_unix_nodaemon()
650         vpp_config.add_unix_exec(u"/tmp/running.exec")
651         vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
652         vpp_config.add_statseg_per_node_counters(value=u"on")
653         # We will pop the first core from the list to be a main core
654         vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
655         # If more cores in the list, the rest will be used as workers.
656         if cpuset_cpus:
657             corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
658             vpp_config.add_cpu_corelist_workers(corelist_workers)
659
660         return vpp_config
661
662     def create_vpp_startup_config(self):
663         """Create startup configuration of VPP without DPDK on container.
664         """
665         vpp_config = self.create_base_vpp_startup_config()
666         vpp_config.add_plugin(u"disable", u"dpdk_plugin.so")
667
668         # Apply configuration
669         self.execute(u"mkdir -p /etc/vpp/")
670         self.execute(
671             f'echo "{vpp_config.get_config_str()}" | '
672             f'tee /etc/vpp/startup.conf'
673         )
674
675     def create_vpp_startup_config_dpdk_dev(self, *devices):
676         """Create startup configuration of VPP with DPDK on container.
677
678         :param devices: List of PCI devices to add.
679         :type devices: list
680         """
681         vpp_config = self.create_base_vpp_startup_config()
682         vpp_config.add_dpdk_dev(*devices)
683         vpp_config.add_dpdk_no_tx_checksum_offload()
684         vpp_config.add_dpdk_log_level(u"debug")
685         vpp_config.add_plugin(u"disable", u"default")
686         vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
687         vpp_config.add_plugin(u"enable", u"memif_plugin.so")
688
689         # Apply configuration
690         self.execute(u"mkdir -p /etc/vpp/")
691         self.execute(
692             f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
693         )
694
695     def create_vpp_startup_config_func_dev(self):
696         """Create startup configuration of VPP on container for functional
697         vpp_device tests.
698         """
699         # Create config instance
700         vpp_config = VppConfigGenerator()
701         vpp_config.set_node(self.container.node)
702         vpp_config.add_unix_cli_listen()
703         vpp_config.add_unix_nodaemon()
704         vpp_config.add_unix_exec(u"/tmp/running.exec")
705         vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
706         vpp_config.add_statseg_per_node_counters(value=u"on")
707         vpp_config.add_plugin(u"disable", u"dpdk_plugin.so")
708
709         # Apply configuration
710         self.execute(u"mkdir -p /etc/vpp/")
711         self.execute(
712             f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
713         )
714
715     def create_vpp_startup_config_vswitch(self, cpuset_cpus, rxq, buffers,
716                                           *devices):
717         """Create startup configuration of VPP vswitch.
718
719         :param cpuset_cpus: CPU list to run on.
720         :param rxq: Number of interface RX queues.
721         :param buffers: Number of buffers per numa.
722         :param devices: List of PCI devices to add.
723         :type cpuset_cpus: list
724         :type rxq: int
725         :type buffers: int
726         :type devices: list
727         """
728         vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
729         vpp_config.add_dpdk_dev(*devices)
730         vpp_config.add_dpdk_log_level(u"debug")
731         vpp_config.add_plugin(u"disable", u"default")
732         vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
733         vpp_config.add_plugin(u"enable", u"memif_plugin.so")
734         vpp_config.add_dpdk_no_tx_checksum_offload()
735         vpp_config.add_buffers_per_numa(buffers)
736         vpp_config.add_dpdk_dev_default_rxq(rxq)
737
738         # Apply configuration
739         self.execute(u"mkdir -p /etc/vpp/")
740         self.execute(
741             f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
742         )
743
744     def create_vpp_startup_config_ipsec(self, cpuset_cpus):
745         """Create startup configuration of VPP with IPsec on container.
746
747         :param cpuset_cpus: CPU list to run on.
748         :type cpuset_cpus: list
749         """
750         vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
751         vpp_config.add_plugin(u"disable", u"default")
752         vpp_config.add_plugin(u"enable", u"memif_plugin.so")
753         vpp_config.add_plugin(u"enable", u"crypto_ia32_plugin.so")
754         vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
755         vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
756         vpp_config.add_heapsize(u"4G")
757         vpp_config.add_ip_heap_size(u"4G")
758         vpp_config.add_statseg_size(u"4G")
759
760         # Apply configuration
761         self.execute(u"mkdir -p /etc/vpp/")
762         self.execute(
763             f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
764         )
765
766     def create_vpp_exec_config(self, template_file, **kwargs):
767         """Create VPP exec configuration on container.
768
769         :param template_file: File name of a template script.
770         :param kwargs: Parameters for script.
771         :type template_file: str
772         :type kwargs: dict
773         """
774         running = u"/tmp/running.exec"
775
776         template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
777
778         with open(template, "r") as src_file:
779             src = Template(src_file.read())
780             self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}')
781
782     def is_container_running(self):
783         """Check if container is running."""
784         raise NotImplementedError
785
786     def is_container_present(self):
787         """Check if container is present."""
788         raise NotImplementedError
789
790     def _configure_cgroup(self, name):
791         """Configure the control group associated with a container.
792
793         By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
794         container is initialized a new cgroup /docker or /lxc is created under
795         cpuset parent tree. This newly created cgroup is inheriting parent
796         setting for cpu/mem exclusive parameter and thus cannot be overriden
797         within /docker or /lxc cgroup. This function is supposed to set cgroups
798         to allow coexistence of both engines.
799
800         :param name: Name of cgroup.
801         :type name: str
802         :raises RuntimeError: If applying cgroup settings via cgset failed.
803         """
804         ret, _, _ = self.container.ssh.exec_command_sudo(
805             u"cgset -r cpuset.cpu_exclusive=0 /"
806         )
807         if int(ret) != 0:
808             raise RuntimeError(u"Failed to apply cgroup settings.")
809
810         ret, _, _ = self.container.ssh.exec_command_sudo(
811             u"cgset -r cpuset.mem_exclusive=0 /"
812         )
813         if int(ret) != 0:
814             raise RuntimeError(u"Failed to apply cgroup settings.")
815
816         ret, _, _ = self.container.ssh.exec_command_sudo(
817             f"cgcreate -g cpuset:/{name}"
818         )
819         if int(ret) != 0:
820             raise RuntimeError(u"Failed to copy cgroup settings from root.")
821
822         ret, _, _ = self.container.ssh.exec_command_sudo(
823             f"cgset -r cpuset.cpu_exclusive=0 /{name}"
824         )
825         if int(ret) != 0:
826             raise RuntimeError(u"Failed to apply cgroup settings.")
827
828         ret, _, _ = self.container.ssh.exec_command_sudo(
829             f"cgset -r cpuset.mem_exclusive=0 /{name}"
830         )
831         if int(ret) != 0:
832             raise RuntimeError(u"Failed to apply cgroup settings.")
833
834
835 class LXC(ContainerEngine):
836     """LXC implementation."""
837
838     # Implicit constructor is inherited.
839
840     def acquire(self, force=True):
841         """Acquire a privileged system object where configuration is stored.
842
843         :param force: If a container exists, destroy it and create a new
844             container.
845         :type force: bool
846         :raises RuntimeError: If creating the container or writing the container
847             config fails.
848         """
849         if self.is_container_present():
850             if force:
851                 self.destroy()
852             else:
853                 return
854
855         target_arch = u"arm64" \
856             if Topology.get_node_arch(self.container.node) == u"aarch64" \
857             else u"amd64"
858
859         image = self.container.image if self.container.image \
860             else f"-d ubuntu -r bionic -a {target_arch}"
861
862         cmd = f"lxc-create -t download --name {self.container.name} " \
863             f"-- {image} --no-validate"
864
865         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
866         if int(ret) != 0:
867             raise RuntimeError(u"Failed to create container.")
868
869         self._configure_cgroup(u"lxc")
870
871     def build(self):
872         """Build container (compile).
873
874         TODO: Remove from parent class if no sibling implements this.
875         """
876         raise NotImplementedError
877
878     def create(self):
879         """Create/deploy an application inside a container on system.
880
881         :raises RuntimeError: If creating the container fails.
882         """
883         if self.container.mnt:
884             # LXC fix for tmpfs
885             # https://github.com/lxc/lxc/issues/434
886             mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults"
887             ret, _, _ = self.container.ssh.exec_command_sudo(
888                 f"sh -c \"echo '{mnt_e}' >> "
889                 f"/var/lib/lxc/{self.container.name}/config\""
890             )
891             if int(ret) != 0:
892                 raise RuntimeError(
893                     f"Failed to write {self.container.name} config."
894                 )
895
896             for mount in self.container.mnt:
897                 host_dir, guest_dir = mount.split(u":")
898                 options = u"bind,create=dir" if guest_dir.endswith(u"/") \
899                     else u"bind,create=file"
900                 entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \
901                     f"none {options} 0 0"
902                 self.container.ssh.exec_command_sudo(
903                     f"sh -c \"mkdir -p {host_dir}\""
904                 )
905                 ret, _, _ = self.container.ssh.exec_command_sudo(
906                     f"sh -c \"echo '{entry}' "
907                     f">> /var/lib/lxc/{self.container.name}/config\""
908                 )
909                 if int(ret) != 0:
910                     raise RuntimeError(
911                         f"Failed to write {self.container.name} config."
912                     )
913
914         cpuset_cpus = u",".join(
915             f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
916             if self.container.cpuset_cpus else u""
917
918         ret, _, _ = self.container.ssh.exec_command_sudo(
919             f"lxc-start --name {self.container.name} --daemon"
920         )
921         if int(ret) != 0:
922             raise RuntimeError(
923                 f"Failed to start container {self.container.name}."
924             )
925         self._lxc_wait(u"RUNNING")
926
927         # Workaround for LXC to be able to allocate all cpus including isolated.
928         ret, _, _ = self.container.ssh.exec_command_sudo(
929             u"cgset --copy-from / lxc/"
930         )
931         if int(ret) != 0:
932             raise RuntimeError(u"Failed to copy cgroup to LXC")
933
934         ret, _, _ = self.container.ssh.exec_command_sudo(
935             f"lxc-cgroup --name {self.container.name} cpuset.cpus {cpuset_cpus}"
936         )
937         if int(ret) != 0:
938             raise RuntimeError(
939                 f"Failed to set cpuset.cpus to container {self.container.name}."
940             )
941
942     def execute(self, command):
943         """Start a process inside a running container.
944
945         Runs the specified command inside the container specified by name. The
946         container has to be running already.
947
948         :param command: Command to run inside container.
949         :type command: str
950         :raises RuntimeError: If running the command failed.
951         """
952         env = u"--keep-env " + u" ".join(
953             f"--set-var {env!s}" for env in self.container.env) \
954             if self.container.env else u""
955
956         cmd = f"lxc-attach {env} --name {self.container.name} " \
957             f"-- /bin/sh -c '{command}; exit $?'"
958
959         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
960         if int(ret) != 0:
961             raise RuntimeError(
962                 f"Failed to run command inside container {self.container.name}."
963             )
964
965     def stop(self):
966         """Stop a container.
967
968         :raises RuntimeError: If stopping the container failed.
969         """
970         cmd = f"lxc-stop --name {self.container.name}"
971
972         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
973         if int(ret) != 0:
974             raise RuntimeError(
975                 f"Failed to stop container {self.container.name}."
976             )
977         self._lxc_wait(u"STOPPED|FROZEN")
978
979     def destroy(self):
980         """Destroy a container.
981
982         :raises RuntimeError: If destroying container failed.
983         """
984         cmd = f"lxc-destroy --force --name {self.container.name}"
985
986         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
987         if int(ret) != 0:
988             raise RuntimeError(
989                 f"Failed to destroy container {self.container.name}."
990             )
991
992     def info(self):
993         """Query and shows information about a container.
994
995         :raises RuntimeError: If getting info about a container failed.
996         """
997         cmd = f"lxc-info --name {self.container.name}"
998
999         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1000         if int(ret) != 0:
1001             raise RuntimeError(
1002                 f"Failed to get info about container {self.container.name}."
1003             )
1004
1005     def system_info(self):
1006         """Check the current kernel for LXC support.
1007
1008         :raises RuntimeError: If checking LXC support failed.
1009         """
1010         cmd = u"lxc-checkconfig"
1011
1012         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1013         if int(ret) != 0:
1014             raise RuntimeError(u"Failed to check LXC support.")
1015
1016     def is_container_running(self):
1017         """Check if container is running on node.
1018
1019         :returns: True if container is running.
1020         :rtype: bool
1021         :raises RuntimeError: If getting info about a container failed.
1022         """
1023         cmd = f"lxc-info --no-humanize --state --name {self.container.name}"
1024
1025         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1026         if int(ret) != 0:
1027             raise RuntimeError(
1028                 f"Failed to get info about container {self.container.name}."
1029             )
1030         return u"RUNNING" in stdout
1031
1032     def is_container_present(self):
1033         """Check if container is existing on node.
1034
1035         :returns: True if container is present.
1036         :rtype: bool
1037         :raises RuntimeError: If getting info about a container failed.
1038         """
1039         cmd = f"lxc-info --no-humanize --name {self.container.name}"
1040
1041         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1042         return not ret
1043
1044     def _lxc_wait(self, state):
1045         """Wait for a specific container state.
1046
1047         :param state: Specify the container state(s) to wait for.
1048         :type state: str
1049         :raises RuntimeError: If waiting for state of a container failed.
1050         """
1051         cmd = f"lxc-wait --name {self.container.name} --state '{state}'"
1052
1053         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1054         if int(ret) != 0:
1055             raise RuntimeError(
1056                 f"Failed to wait for state '{state}' "
1057                 f"of container {self.container.name}."
1058             )
1059
1060
1061 class Docker(ContainerEngine):
1062     """Docker implementation."""
1063
1064     # Implicit constructor is inherited.
1065
1066     def acquire(self, force=True):
1067         """Pull an image or a repository from a registry.
1068
1069         :param force: Destroy a container if exists.
1070         :type force: bool
1071         :raises RuntimeError: If pulling a container failed.
1072         """
1073         if self.is_container_present():
1074             if force:
1075                 self.destroy()
1076             else:
1077                 return
1078
1079         if not self.container.image:
1080             img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
1081                 if Topology.get_node_arch(self.container.node) == u"aarch64" \
1082                 else Constants.DOCKER_SUT_IMAGE_UBUNTU
1083             setattr(self.container, u"image", img)
1084
1085         cmd = f"docker pull {self.container.image}"
1086
1087         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
1088         if int(ret) != 0:
1089             raise RuntimeError(
1090                 f"Failed to create container {self.container.name}."
1091             )
1092
1093         if self.container.cpuset_cpus:
1094             self._configure_cgroup(u"docker")
1095
1096     def build(self):
1097         """Build container (compile).
1098
1099         TODO: Remove from parent class if no sibling implements this.
1100         """
1101         raise NotImplementedError
1102
1103     def create(self):
1104         """Create/deploy container.
1105
1106         :raises RuntimeError: If creating a container failed.
1107         """
1108         cpuset_cpus = u"--cpuset-cpus=" + u",".join(
1109             f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
1110             if self.container.cpuset_cpus else u""
1111
1112         cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \
1113             if self.container.cpuset_mems is not None else u""
1114         # Temporary workaround - disabling due to bug in memif
1115         cpuset_mems = u""
1116
1117         env = u" ".join(f"--env {env!s}" for env in self.container.env) \
1118             if self.container.env else u""
1119
1120         command = str(self.container.command) if self.container.command else u""
1121
1122         publish = u" ".join(
1123             f"--publish  {var!s}" for var in self.container.publish
1124         ) if self.container.publish else u""
1125
1126         volume = u" ".join(
1127             f"--volume {mnt!s}" for mnt in self.container.mnt) \
1128             if self.container.mnt else u""
1129
1130         cmd = f"docker run --privileged --detach --interactive --tty --rm " \
1131             f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \
1132             f"{env} {volume} --name {self.container.name} " \
1133             f"{self.container.image} {command}"
1134
1135         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1136         if int(ret) != 0:
1137             raise RuntimeError(
1138                 f"Failed to create container {self.container.name}"
1139             )
1140
1141         self.info()
1142
1143     def execute(self, command):
1144         """Start a process inside a running container.
1145
1146         Runs the specified command inside the container specified by name. The
1147         container has to be running already.
1148
1149         :param command: Command to run inside container.
1150         :type command: str
1151         :raises RuntimeError: If running the command in a container failed.
1152         """
1153         cmd = f"docker exec --interactive {self.container.name} " \
1154             f"/bin/sh -c '{command}; exit $?'"
1155
1156         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
1157         if int(ret) != 0:
1158             raise RuntimeError(
1159                 f"Failed to execute command in container {self.container.name}."
1160             )
1161
1162     def stop(self):
1163         """Stop running container.
1164
1165         :raises RuntimeError: If stopping a container failed.
1166         """
1167         cmd = f"docker stop {self.container.name}"
1168
1169         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1170         if int(ret) != 0:
1171             raise RuntimeError(
1172                 f"Failed to stop container {self.container.name}."
1173             )
1174
1175     def destroy(self):
1176         """Remove a container.
1177
1178         :raises RuntimeError: If removing a container failed.
1179         """
1180         cmd = f"docker rm --force {self.container.name}"
1181
1182         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1183         if int(ret) != 0:
1184             raise RuntimeError(
1185                 f"Failed to destroy container {self.container.name}."
1186             )
1187
1188     def info(self):
1189         """Return low-level information on Docker objects.
1190
1191         :raises RuntimeError: If getting info about a container failed.
1192         """
1193         cmd = f"docker inspect {self.container.name}"
1194
1195         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1196         if int(ret) != 0:
1197             raise RuntimeError(
1198                 f"Failed to get info about container {self.container.name}."
1199             )
1200
1201     def system_info(self):
1202         """Display the docker system-wide information.
1203
1204         :raises RuntimeError: If displaying system information failed.
1205         """
1206         cmd = u"docker system info"
1207
1208         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1209         if int(ret) != 0:
1210             raise RuntimeError(u"Failed to get system info.")
1211
1212     def is_container_present(self):
1213         """Check if container is present on node.
1214
1215         :returns: True if container is present.
1216         :rtype: bool
1217         :raises RuntimeError: If getting info about a container failed.
1218         """
1219         cmd = f"docker ps --all --quiet --filter name={self.container.name}"
1220
1221         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1222         if int(ret) != 0:
1223             raise RuntimeError(
1224                 f"Failed to get info about container {self.container.name}."
1225             )
1226         return bool(stdout)
1227
1228     def is_container_running(self):
1229         """Check if container is running on node.
1230
1231         :returns: True if container is running.
1232         :rtype: bool
1233         :raises RuntimeError: If getting info about a container failed.
1234         """
1235         cmd = f"docker ps --quiet --filter name={self.container.name}"
1236
1237         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1238         if int(ret) != 0:
1239             raise RuntimeError(
1240                 f"Failed to get info about container {self.container.name}."
1241             )
1242         return bool(stdout)
1243
1244
1245 class Container:
1246     """Container class."""
1247
1248     def __getattr__(self, attr):
1249         """Get attribute custom implementation.
1250
1251         :param attr: Attribute to get.
1252         :type attr: str
1253         :returns: Attribute value or None.
1254         :rtype: any
1255         """
1256         try:
1257             return self.__dict__[attr]
1258         except KeyError:
1259             return None
1260
1261     def __setattr__(self, attr, value):
1262         """Set attribute custom implementation.
1263
1264         :param attr: Attribute to set.
1265         :param value: Value to set.
1266         :type attr: str
1267         :type value: any
1268         """
1269         try:
1270             # Check if attribute exists
1271             self.__dict__[attr]
1272         except KeyError:
1273             # Creating new attribute
1274             if attr == u"node":
1275                 self.__dict__[u"ssh"] = SSH()
1276                 self.__dict__[u"ssh"].connect(value)
1277             self.__dict__[attr] = value
1278         else:
1279             # Updating attribute base of type
1280             if isinstance(self.__dict__[attr], list):
1281                 self.__dict__[attr].append(value)
1282             else:
1283                 self.__dict__[attr] = value