3e4109c9079302fa737bafba449296051d03963a
[csit.git] / resources / libraries / python / ContainerUtils.py
1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
5 #
6 #     http://www.apache.org/licenses/LICENSE-2.0
7 #
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
13
14 """Library to manipulate Containers."""
15
16 from collections import OrderedDict, Counter
17 from io import open
18 from re import search
19 from string import Template
20 from time import sleep
21
22 from robot.libraries.BuiltIn import BuiltIn
23
24 from resources.libraries.python.Constants import Constants
25 from resources.libraries.python.CpuUtils import CpuUtils
26 from resources.libraries.python.ssh import SSH
27 from resources.libraries.python.topology import Topology, SocketType
28 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
29
30
31 __all__ = [
32     u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
33 ]
34
35 SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
36
37
38 class ContainerManager:
39     """Container lifecycle management class."""
40
41     def __init__(self, engine):
42         """Initialize Container Manager class.
43
44         :param engine: Container technology used (LXC/Docker/...).
45         :type engine: str
46         :raises NotImplementedError: If container technology is not implemented.
47         """
48         try:
49             self.engine = globals()[engine]()
50         except KeyError:
51             raise NotImplementedError(f"{engine} is not implemented.")
52         self.containers = OrderedDict()
53
54     def get_container_by_name(self, name):
55         """Get container instance.
56
57         :param name: Container name.
58         :type name: str
59         :returns: Container instance.
60         :rtype: Container
61         :raises RuntimeError: If failed to get container with name.
62         """
63         try:
64             return self.containers[name]
65         except KeyError:
66             raise RuntimeError(f"Failed to get container with name: {name}")
67
68     def construct_container(self, **kwargs):
69         """Construct container object on node with specified parameters.
70
71         :param kwargs: Key-value pairs used to construct container.
72         :param kwargs: dict
73         """
74         # Create base class
75         self.engine.initialize()
76         # Set parameters
77         for key in kwargs:
78             setattr(self.engine.container, key, kwargs[key])
79
80         # Set additional environmental variables
81         setattr(
82             self.engine.container, u"env",
83             f"MICROSERVICE_LABEL={kwargs[u'name']}"
84         )
85
86         # Store container instance
87         self.containers[kwargs[u"name"]] = self.engine.container
88
89     def construct_containers(self, **kwargs):
90         """Construct 1..N container(s) on node with specified name.
91
92         Ordinal number is automatically added to the name of container as
93         suffix.
94
95         :param kwargs: Named parameters.
96         :param kwargs: dict
97         """
98         name = kwargs[u"name"]
99         for i in range(kwargs[u"count"]):
100             # Name will contain ordinal suffix
101             kwargs[u"name"] = u"".join([name, str(i+1)])
102             # Create container
103             self.construct_container(i=i, **kwargs)
104
105     def acquire_all_containers(self):
106         """Acquire all containers."""
107         for container in self.containers:
108             self.engine.container = self.containers[container]
109             self.engine.acquire()
110
111     def build_all_containers(self):
112         """Build all containers."""
113         for container in self.containers:
114             self.engine.container = self.containers[container]
115             self.engine.build()
116
117     def create_all_containers(self):
118         """Create all containers."""
119         for container in self.containers:
120             self.engine.container = self.containers[container]
121             self.engine.create()
122
123     def execute_on_container(self, name, command):
124         """Execute command on container with name.
125
126         :param name: Container name.
127         :param command: Command to execute.
128         :type name: str
129         :type command: str
130         """
131         self.engine.container = self.get_container_by_name(name)
132         self.engine.execute(command)
133
134     def execute_on_all_containers(self, command):
135         """Execute command on all containers.
136
137         :param command: Command to execute.
138         :type command: str
139         """
140         for container in self.containers:
141             self.engine.container = self.containers[container]
142             self.engine.execute(command)
143
144     def start_vpp_in_all_containers(self):
145         """Start VPP in all containers."""
146         for container in self.containers:
147             self.engine.container = self.containers[container]
148             self.engine.start_vpp()
149
150     def restart_vpp_in_all_containers(self):
151         """Restart VPP in all containers."""
152         for container in self.containers:
153             self.engine.container = self.containers[container]
154             self.engine.restart_vpp()
155
156     def verify_vpp_in_all_containers(self):
157         """Verify that VPP is installed and running in all containers."""
158         for container in self.containers:
159             self.engine.container = self.containers[container]
160             self.engine.verify_vpp()
161
162     def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
163         """Configure VPP in all containers.
164
165         :param chain_topology: Topology used for chaining containers can be
166             chain or cross_horiz. Chain topology is using 1 memif pair per
167             container. Cross_horiz topology is using 1 memif and 1 physical
168             interface in container (only single container can be configured).
169         :param kwargs: Named parameters.
170         :type chain_topology: str
171         :type kwargs: dict
172         """
173         # Count number of DUTs based on node's host information
174         dut_cnt = len(
175             Counter(
176                 [
177                     self.containers[container].node[u"host"]
178                     for container in self.containers
179                 ]
180             )
181         )
182         mod = len(self.containers) // dut_cnt
183
184         for i, container in enumerate(self.containers):
185             mid1 = i % mod + 1
186             mid2 = i % mod + 1
187             sid1 = i % mod * 2 + 1
188             sid2 = i % mod * 2 + 2
189             self.engine.container = self.containers[container]
190             guest_dir = self.engine.container.mnt[0].split(u":")[1]
191
192             if chain_topology == u"chain":
193                 self._configure_vpp_chain_l2xc(
194                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
195                     guest_dir=guest_dir, **kwargs
196                 )
197             elif chain_topology == u"cross_horiz":
198                 self._configure_vpp_cross_horiz(
199                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
200                     guest_dir=guest_dir, **kwargs
201                 )
202             elif chain_topology == u"chain_functional":
203                 self._configure_vpp_chain_functional(
204                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
205                     guest_dir=guest_dir, **kwargs
206                 )
207             elif chain_topology == u"chain_ip4":
208                 self._configure_vpp_chain_ip4(
209                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
210                     guest_dir=guest_dir, **kwargs
211                 )
212             elif chain_topology == u"pipeline_ip4":
213                 self._configure_vpp_pipeline_ip4(
214                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
215                     guest_dir=guest_dir, **kwargs
216                 )
217             elif chain_topology == u"chain_vswitch":
218                 self._configure_vpp_chain_vswitch(
219                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
220                     guest_dir=guest_dir, **kwargs)
221             elif chain_topology == u"chain_ipsec":
222                 idx_match = search(r"\d+$", self.engine.container.name)
223                 if idx_match:
224                     idx = int(idx_match.group())
225                 self._configure_vpp_chain_ipsec(
226                     mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
227                     guest_dir=guest_dir, nf_instance=idx, **kwargs)
228             else:
229                 raise RuntimeError(
230                     f"Container topology {chain_topology} not implemented"
231                 )
232
233     def _configure_vpp_chain_l2xc(self, **kwargs):
234         """Configure VPP in chain topology with l2xc.
235
236         :param kwargs: Named parameters.
237         :type kwargs: dict
238         """
239         self.engine.create_vpp_startup_config()
240         self.engine.create_vpp_exec_config(
241             u"memif_create_chain_l2xc.exec",
242             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
243             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
244             socket1=f"{kwargs[u'guest_dir']}/memif-"
245             f"{self.engine.container.name}-{kwargs[u'sid1']}",
246             socket2=f"{kwargs[u'guest_dir']}/memif-"
247             f"{self.engine.container.name}-{kwargs[u'sid2']}"
248         )
249
250     def _configure_vpp_cross_horiz(self, **kwargs):
251         """Configure VPP in cross horizontal topology (single memif).
252
253         :param kwargs: Named parameters.
254         :type kwargs: dict
255         """
256         if u"DUT1" in self.engine.container.name:
257             if_pci = Topology.get_interface_pci_addr(
258                 self.engine.container.node, kwargs[u"dut1_if"])
259             if_name = Topology.get_interface_name(
260                 self.engine.container.node, kwargs[u"dut1_if"])
261         if u"DUT2" in self.engine.container.name:
262             if_pci = Topology.get_interface_pci_addr(
263                 self.engine.container.node, kwargs[u"dut2_if"])
264             if_name = Topology.get_interface_name(
265                 self.engine.container.node, kwargs[u"dut2_if"])
266         self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
267         self.engine.create_vpp_exec_config(
268             u"memif_create_cross_horizon.exec",
269             mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
270             socket1=f"{kwargs[u'guest_dir']}/memif-"
271             f"{self.engine.container.name}-{kwargs[u'sid1']}"
272         )
273
274     def _configure_vpp_chain_functional(self, **kwargs):
275         """Configure VPP in chain topology with l2xc (functional).
276
277         :param kwargs: Named parameters.
278         :type kwargs: dict
279         """
280         self.engine.create_vpp_startup_config()
281         self.engine.create_vpp_exec_config(
282             u"memif_create_chain_functional.exec",
283             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
284             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
285             socket1=f"{kwargs[u'guest_dir']}/memif-"
286             f"{self.engine.container.name}-{kwargs[u'sid1']}",
287             socket2=f"{kwargs[u'guest_dir']}/memif-"
288             f"{self.engine.container.name}-{kwargs[u'sid2']}",
289             rx_mode=u"interrupt"
290         )
291
292     def _configure_vpp_chain_ip4(self, **kwargs):
293         """Configure VPP in chain topology with ip4.
294
295         :param kwargs: Named parameters.
296         :type kwargs: dict
297         """
298         self.engine.create_vpp_startup_config()
299
300         vif1_mac = kwargs[u"tg_pf1_mac"] \
301             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
302             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
303         vif2_mac = kwargs[u"tg_pf2_mac"] \
304             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
305             else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
306         self.engine.create_vpp_exec_config(
307             u"memif_create_chain_ip4.exec",
308             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
309             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
310             socket1=f"{kwargs[u'guest_dir']}/memif-"
311             f"{self.engine.container.name}-{kwargs[u'sid1']}",
312             socket2=f"{kwargs[u'guest_dir']}/memif-"
313             f"{self.engine.container.name}-{kwargs[u'sid2']}",
314             mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
315             mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
316             vif1_mac=vif1_mac, vif2_mac=vif2_mac
317         )
318
319     def _configure_vpp_chain_vswitch(self, **kwargs):
320         """Configure VPP as vswitch in container.
321
322         :param kwargs: Named parameters.
323         :type kwargs: dict
324         """
325         dut = self.engine.container.name.split(u"_")[0]
326         if dut == u"DUT1":
327             if1_pci = Topology.get_interface_pci_addr(
328                 self.engine.container.node, kwargs[u"dut1_if2"])
329             if2_pci = Topology.get_interface_pci_addr(
330                 self.engine.container.node, kwargs[u"dut1_if1"])
331             if_red_name = Topology.get_interface_name(
332                 self.engine.container.node, kwargs[u"dut1_if2"])
333             if_black_name = Topology.get_interface_name(
334                 self.engine.container.node, kwargs[u"dut1_if1"])
335             tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
336             tg_pf_mac = kwargs[u"tg_pf2_mac"]
337         else:
338             tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
339             tg_pf_mac = kwargs[u"tg_pf1_mac"]
340             if1_pci = Topology.get_interface_pci_addr(
341                 self.engine.container.node, kwargs[u"dut2_if1"])
342             if2_pci = Topology.get_interface_pci_addr(
343                 self.engine.container.node, kwargs[u"dut2_if2"])
344             if_red_name = Topology.get_interface_name(
345                 self.engine.container.node, kwargs[u"dut2_if1"])
346             if_black_name = Topology.get_interface_name(
347                 self.engine.container.node, kwargs[u"dut2_if2"])
348
349         n_instances = int(kwargs[u"n_instances"])
350         rxq = 1
351         if u"rxq" in kwargs:
352             rxq = int(kwargs[u"rxq"])
353         nodes = kwargs[u"nodes"]
354         cpuset_cpus = CpuUtils.get_affinity_nf(
355             nodes, dut, nf_chains=1, nf_nodes=1, nf_chain=1,
356             nf_node=1, vs_dtc=0, nf_dtc=8, nf_mtcr=1, nf_dtcr=1
357         )
358         self.engine.create_vpp_startup_config_vswitch(
359             cpuset_cpus, rxq, if1_pci, if2_pci
360         )
361
362         instances = []
363         for i in range(1, n_instances + 1):
364             instances.append(
365                 f"create interface memif id {i} socket-id 1 master\n"
366                 f"set interface state memif1/{i} up\n"
367                 f"set interface l2 bridge memif1/{i} 1\n"
368                 f"create interface memif id {i} socket-id 2 master\n"
369                 f"set interface state memif2/{i} up\n"
370                 f"set interface l2 bridge memif2/{i} 2\n"
371                 f"set ip neighbor memif2/{i} {tg_pf_ip4} {tg_pf_mac} "
372                 f"static\n\n"
373             )
374
375         self.engine.create_vpp_exec_config(
376             u"memif_create_chain_vswitch_ipsec.exec",
377             socket1=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-1",
378             socket2=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-2",
379             if_red_name=if_red_name,
380             if_black_name=if_black_name,
381             instances=u"\n\n".join(instances))
382
383
384     def _configure_vpp_chain_ipsec(self, **kwargs):
385         """Configure VPP in container with memifs.
386
387         :param kwargs: Named parameters.
388         :type kwargs: dict
389         """
390         nf_nodes = int(kwargs[u"nf_nodes"])
391         nf_instance = int(kwargs[u"nf_instance"])
392         nodes = kwargs[u"nodes"]
393         dut = self.engine.container.name.split(u"_")[0]
394         cpuset_cpus = CpuUtils.get_affinity_nf(
395             nodes, dut, nf_chains=1, nf_nodes=nf_nodes, nf_chain=1,
396             nf_node=nf_instance, vs_dtc=10, nf_dtc=1, nf_mtcr=1, nf_dtcr=1)
397         self.engine.create_vpp_startup_config_ipsec(cpuset_cpus)
398         local_ip_base = kwargs[u"dut2_if1_ip4"].rsplit(u".", 1)[0]
399
400         if dut == u"DUT1":
401             tnl_local_ip = f"{local_ip_base}.{nf_instance + 100}"
402             tnl_remote_ip = f"{local_ip_base}.{nf_instance}"
403             remote_ip_base = kwargs[u"dut1_if1_ip4"].rsplit(u".", 1)[0]
404             tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
405             tg_pf_mac = kwargs[u"tg_pf1_mac"]
406             raddr_ip4 = kwargs[u"laddr_ip4"]
407             l_mac1 = 17
408             l_mac2 = 18
409             r_mac = 1
410         else:
411             tnl_local_ip = f"{local_ip_base}.{nf_instance}"
412             tnl_remote_ip = f"{local_ip_base}.{nf_instance + 100}"
413             remote_ip_base = kwargs[u"dut2_if2_ip4"].rsplit(u".", 1)[0]
414             tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
415             tg_pf_mac = kwargs[u"tg_pf2_mac"]
416             raddr_ip4 = kwargs[u"raddr_ip4"]
417             l_mac1 = 1
418             l_mac2 = 2
419             r_mac = 17
420
421         self.engine.create_vpp_exec_config(
422             u"memif_create_chain_ipsec.exec",
423             socket1=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-1",
424             socket2=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-2",
425             mid1=nf_instance,
426             mid2=nf_instance,
427             sid1=u"1",
428             sid2=u"2",
429             mac1=f"02:02:00:00:{l_mac1:02X}:{(nf_instance - 1):02X}",
430             mac2=f"02:02:00:00:{l_mac2:02X}:{(nf_instance - 1):02X}",
431             tg_pf2_ip4=tg_pf_ip4,
432             tg_pf2_mac=tg_pf_mac,
433             raddr_ip4=raddr_ip4,
434             tnl_local_ip=tnl_local_ip,
435             tnl_remote_ip=tnl_remote_ip,
436             tnl_remote_mac=f"02:02:00:00:{r_mac:02X}:{(nf_instance - 1):02X}",
437             remote_ip=f"{remote_ip_base}.{nf_instance}"
438         )
439         self.engine.execute(
440             f"cat {kwargs['guest_dir']}/ipsec_create_tunnel_cnf_"
441             f"{dut}_{nf_instance}.config >> /tmp/running.exec"
442         )
443
444     def _configure_vpp_pipeline_ip4(self, **kwargs):
445         """Configure VPP in pipeline topology with ip4.
446
447         :param kwargs: Named parameters.
448         :type kwargs: dict
449         """
450         self.engine.create_vpp_startup_config()
451         node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
452         mid1 = kwargs[u"mid1"]
453         mid2 = kwargs[u"mid2"]
454         role1 = u"master"
455         role2 = u"master" if node == kwargs[u"nodes"] else u"slave"
456         kwargs[u"mid2"] = kwargs[u"mid2"] \
457             if node == kwargs[u"nodes"] else kwargs[u"mid2"] + 1
458         vif1_mac = kwargs[u"tg_pf1_mac"] \
459             if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
460             else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
461         vif2_mac = kwargs[u"tg_pf2_mac"] \
462             if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
463             else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
464         socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
465             f"{kwargs[u'sid1']}" if node == 1 \
466             else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid1']}"
467         socket2 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
468             f"{kwargs[u'sid2']}" \
469             if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \
470             else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid2']}"
471
472         self.engine.create_vpp_exec_config(
473             u"memif_create_pipeline_ip4.exec",
474             mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
475             sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
476             socket1=socket1, socket2=socket2, role1=role1, role2=role2,
477             mac1=f"52:54:00:00:{mid1:02X}:01",
478             mac2=f"52:54:00:00:{mid2:02X}:02",
479             vif1_mac=vif1_mac, vif2_mac=vif2_mac
480         )
481
482     def stop_all_containers(self):
483         """Stop all containers."""
484         for container in self.containers:
485             self.engine.container = self.containers[container]
486             self.engine.stop()
487
488     def destroy_all_containers(self):
489         """Destroy all containers."""
490         for container in self.containers:
491             self.engine.container = self.containers[container]
492             self.engine.destroy()
493
494
495 class ContainerEngine:
496     """Abstract class for container engine."""
497
498     def __init__(self):
499         """Init ContainerEngine object."""
500         self.container = None
501
502     def initialize(self):
503         """Initialize container object."""
504         self.container = Container()
505
506     def acquire(self, force):
507         """Acquire/download container.
508
509         :param force: Destroy a container if exists and create.
510         :type force: bool
511         """
512         raise NotImplementedError
513
514     def build(self):
515         """Build container (compile)."""
516         raise NotImplementedError
517
518     def create(self):
519         """Create/deploy container."""
520         raise NotImplementedError
521
522     def execute(self, command):
523         """Execute process inside container.
524
525         :param command: Command to run inside container.
526         :type command: str
527         """
528         raise NotImplementedError
529
530     def stop(self):
531         """Stop container."""
532         raise NotImplementedError
533
534     def destroy(self):
535         """Destroy/remove container."""
536         raise NotImplementedError
537
538     def info(self):
539         """Info about container."""
540         raise NotImplementedError
541
542     def system_info(self):
543         """System info."""
544         raise NotImplementedError
545
546     def start_vpp(self):
547         """Start VPP inside a container."""
548         self.execute(
549             u"setsid /usr/bin/vpp -c /etc/vpp/startup.conf "
550             u">/tmp/vppd.log 2>&1 < /dev/null &")
551
552         topo_instance = BuiltIn().get_library_instance(
553             u"resources.libraries.python.topology.Topology"
554         )
555         topo_instance.add_new_socket(
556             self.container.node,
557             SocketType.PAPI,
558             self.container.name,
559             f"/tmp/vpp_sockets/{self.container.name}/api.sock"
560         )
561         topo_instance.add_new_socket(
562             self.container.node,
563             SocketType.STATS,
564             self.container.name,
565             f"/tmp/vpp_sockets/{self.container.name}/stats.sock"
566         )
567         self.verify_vpp()
568         self.adjust_privileges()
569
570     def restart_vpp(self):
571         """Restart VPP service inside a container."""
572         self.execute(u"pkill vpp")
573         self.start_vpp()
574
575     # TODO Rewrite to use the VPPUtil.py functionality and remove this.
576     def verify_vpp(self, retries=120, retry_wait=1):
577         """Verify that VPP is installed and running inside container.
578
579         :param retries: Check for VPP for this number of times Default: 120
580         :param retry_wait: Wait for this number of seconds between retries.
581         """
582         for _ in range(retries + 1):
583             try:
584                 self.execute(
585                     u"vppctl show pci 2>&1 | "
586                     u"fgrep -v 'Connection refused' | "
587                     u"fgrep -v 'No such file or directory'"
588                 )
589                 break
590             except RuntimeError:
591                 sleep(retry_wait)
592         else:
593             self.execute(u"cat /tmp/vppd.log")
594             raise RuntimeError(
595                 f"VPP did not come up in container: {self.container.name}"
596             )
597
598     def adjust_privileges(self):
599         """Adjust privileges to control VPP without sudo."""
600         self.execute("chmod -R o+rwx /run/vpp")
601
602     def create_base_vpp_startup_config(self, cpuset_cpus=None):
603         """Create base startup configuration of VPP on container.
604
605         :param cpuset_cpus: List of CPU cores to allocate.
606         :type cpuset_cpus: list.
607         :returns: Base VPP startup configuration.
608         :rtype: VppConfigGenerator
609         """
610         if cpuset_cpus is None:
611             cpuset_cpus = self.container.cpuset_cpus
612
613         # Create config instance
614         vpp_config = VppConfigGenerator()
615         vpp_config.set_node(self.container.node)
616         vpp_config.add_unix_cli_listen()
617         vpp_config.add_unix_nodaemon()
618         vpp_config.add_unix_exec(u"/tmp/running.exec")
619         vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
620         if cpuset_cpus:
621             # We will pop the first core from the list to be a main core
622             vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
623             # If more cores in the list, the rest will be used as workers.
624             corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
625             vpp_config.add_cpu_corelist_workers(corelist_workers)
626         vpp_config.add_buffers_per_numa(215040)
627         vpp_config.add_plugin(u"disable", u"default")
628         vpp_config.add_plugin(u"enable", u"memif_plugin.so")
629         vpp_config.add_main_heap_size(u"2G")
630         vpp_config.add_main_heap_page_size(u"2M")
631         vpp_config.add_statseg_size(u"2G")
632         vpp_config.add_statseg_page_size(u"2M")
633         vpp_config.add_statseg_per_node_counters(u"on")
634
635         return vpp_config
636
637     def create_vpp_startup_config(self):
638         """Create startup configuration of VPP without DPDK on container.
639         """
640         vpp_config = self.create_base_vpp_startup_config()
641
642         # Apply configuration
643         self.execute(u"mkdir -p /etc/vpp/")
644         self.execute(
645             f'echo "{vpp_config.get_config_str()}" | '
646             f'tee /etc/vpp/startup.conf'
647         )
648
649     def create_vpp_startup_config_vswitch(self, cpuset_cpus, rxq, *devices):
650         """Create startup configuration of VPP vswitch.
651
652         :param cpuset_cpus: CPU list to run on.
653         :param rxq: Number of interface RX queues.
654         :param devices: PCI devices.
655         :type cpuset_cpus: list
656         :type rxq: int
657         :type devices: list
658         """
659         vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
660         vpp_config.add_dpdk_dev(*devices)
661         vpp_config.add_dpdk_log_level(u"debug")
662         vpp_config.add_dpdk_no_tx_checksum_offload()
663         vpp_config.add_dpdk_dev_default_rxq(rxq)
664         vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
665
666         # Apply configuration
667         self.execute(u"mkdir -p /etc/vpp/")
668         self.execute(
669             f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
670         )
671
672     def create_vpp_startup_config_ipsec(self, cpuset_cpus):
673         """Create startup configuration of VPP with IPsec on container.
674
675         :param cpuset_cpus: CPU list to run on.
676         :type cpuset_cpus: list
677         """
678         vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
679         vpp_config.add_plugin(u"enable", u"crypto_native_plugin.so")
680         vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
681         vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
682
683         # Apply configuration
684         self.execute(u"mkdir -p /etc/vpp/")
685         self.execute(
686             f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
687         )
688
689     def create_vpp_exec_config(self, template_file, **kwargs):
690         """Create VPP exec configuration on container.
691
692         :param template_file: File name of a template script.
693         :param kwargs: Parameters for script.
694         :type template_file: str
695         :type kwargs: dict
696         """
697         running = u"/tmp/running.exec"
698         template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
699
700         with open(template, u"rt") as src_file:
701             src = Template(src_file.read())
702             self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}')
703
704     def is_container_running(self):
705         """Check if container is running."""
706         raise NotImplementedError
707
708     def is_container_present(self):
709         """Check if container is present."""
710         raise NotImplementedError
711
712     def _configure_cgroup(self, name):
713         """Configure the control group associated with a container.
714
715         By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
716         container is initialized a new cgroup /docker or /lxc is created under
717         cpuset parent tree. This newly created cgroup is inheriting parent
718         setting for cpu/mem exclusive parameter and thus cannot be overriden
719         within /docker or /lxc cgroup. This function is supposed to set cgroups
720         to allow coexistence of both engines.
721
722         :param name: Name of cgroup.
723         :type name: str
724         :raises RuntimeError: If applying cgroup settings via cgset failed.
725         """
726         ret, _, _ = self.container.ssh.exec_command_sudo(
727             u"cgset -r cpuset.cpu_exclusive=0 /"
728         )
729         if int(ret) != 0:
730             raise RuntimeError(u"Failed to apply cgroup settings.")
731
732         ret, _, _ = self.container.ssh.exec_command_sudo(
733             u"cgset -r cpuset.mem_exclusive=0 /"
734         )
735         if int(ret) != 0:
736             raise RuntimeError(u"Failed to apply cgroup settings.")
737
738         ret, _, _ = self.container.ssh.exec_command_sudo(
739             f"cgcreate -g cpuset:/{name}"
740         )
741         if int(ret) != 0:
742             raise RuntimeError(u"Failed to copy cgroup settings from root.")
743
744         ret, _, _ = self.container.ssh.exec_command_sudo(
745             f"cgset -r cpuset.cpu_exclusive=0 /{name}"
746         )
747         if int(ret) != 0:
748             raise RuntimeError(u"Failed to apply cgroup settings.")
749
750         ret, _, _ = self.container.ssh.exec_command_sudo(
751             f"cgset -r cpuset.mem_exclusive=0 /{name}"
752         )
753         if int(ret) != 0:
754             raise RuntimeError(u"Failed to apply cgroup settings.")
755
756
757 class LXC(ContainerEngine):
758     """LXC implementation."""
759
760     # Implicit constructor is inherited.
761
762     def acquire(self, force=True):
763         """Acquire a privileged system object where configuration is stored.
764
765         :param force: If a container exists, destroy it and create a new
766             container.
767         :type force: bool
768         :raises RuntimeError: If creating the container or writing the container
769             config fails.
770         """
771         if self.is_container_present():
772             if force:
773                 self.destroy()
774             else:
775                 return
776
777         target_arch = u"arm64" \
778             if Topology.get_node_arch(self.container.node) == u"aarch64" \
779             else u"amd64"
780
781         image = self.container.image if self.container.image \
782             else f"-d ubuntu -r bionic -a {target_arch}"
783
784         cmd = f"lxc-create -t download --name {self.container.name} " \
785             f"-- {image} --no-validate"
786
787         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
788         if int(ret) != 0:
789             raise RuntimeError(u"Failed to create container.")
790
791         self._configure_cgroup(u"lxc")
792
793     def build(self):
794         """Build container (compile)."""
795         raise NotImplementedError
796
797     def create(self):
798         """Create/deploy an application inside a container on system.
799
800         :raises RuntimeError: If creating the container fails.
801         """
802         if self.container.mnt:
803             # LXC fix for tmpfs
804             # https://github.com/lxc/lxc/issues/434
805             mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults"
806             ret, _, _ = self.container.ssh.exec_command_sudo(
807                 f"sh -c \"echo '{mnt_e}' >> "
808                 f"/var/lib/lxc/{self.container.name}/config\""
809             )
810             if int(ret) != 0:
811                 raise RuntimeError(
812                     f"Failed to write {self.container.name} config."
813                 )
814
815             for mount in self.container.mnt:
816                 host_dir, guest_dir = mount.split(u":")
817                 options = u"bind,create=dir" if guest_dir.endswith(u"/") \
818                     else u"bind,create=file"
819                 entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \
820                     f"none {options} 0 0"
821                 self.container.ssh.exec_command_sudo(
822                     f"sh -c \"mkdir -p {host_dir}\""
823                 )
824                 ret, _, _ = self.container.ssh.exec_command_sudo(
825                     f"sh -c \"echo '{entry}' "
826                     f">> /var/lib/lxc/{self.container.name}/config\""
827                 )
828                 if int(ret) != 0:
829                     raise RuntimeError(
830                         f"Failed to write {self.container.name} config."
831                     )
832
833         cpuset_cpus = u",".join(
834             f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
835             if self.container.cpuset_cpus else u""
836
837         ret, _, _ = self.container.ssh.exec_command_sudo(
838             f"lxc-start --name {self.container.name} --daemon"
839         )
840         if int(ret) != 0:
841             raise RuntimeError(
842                 f"Failed to start container {self.container.name}."
843             )
844         self._lxc_wait(u"RUNNING")
845
846         # Workaround for LXC to be able to allocate all cpus including isolated.
847         ret, _, _ = self.container.ssh.exec_command_sudo(
848             u"cgset --copy-from / lxc/"
849         )
850         if int(ret) != 0:
851             raise RuntimeError(u"Failed to copy cgroup to LXC")
852
853         ret, _, _ = self.container.ssh.exec_command_sudo(
854             f"lxc-cgroup --name {self.container.name} cpuset.cpus {cpuset_cpus}"
855         )
856         if int(ret) != 0:
857             raise RuntimeError(
858                 f"Failed to set cpuset.cpus to container {self.container.name}."
859             )
860
861     def execute(self, command):
862         """Start a process inside a running container.
863
864         Runs the specified command inside the container specified by name. The
865         container has to be running already.
866
867         :param command: Command to run inside container.
868         :type command: str
869         :raises RuntimeError: If running the command failed.
870         """
871         env = u"--keep-env " + u" ".join(
872             f"--set-var {env!s}" for env in self.container.env) \
873             if self.container.env else u""
874
875         cmd = f"lxc-attach {env} --name {self.container.name} " \
876             f"-- /bin/sh -c '{command}'"
877
878         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
879         if int(ret) != 0:
880             raise RuntimeError(
881                 f"Failed to run command inside container {self.container.name}."
882             )
883
884     def stop(self):
885         """Stop a container.
886
887         :raises RuntimeError: If stopping the container failed.
888         """
889         cmd = f"lxc-stop --name {self.container.name}"
890
891         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
892         if int(ret) != 0:
893             raise RuntimeError(
894                 f"Failed to stop container {self.container.name}."
895             )
896         self._lxc_wait(u"STOPPED|FROZEN")
897
898     def destroy(self):
899         """Destroy a container.
900
901         :raises RuntimeError: If destroying container failed.
902         """
903         cmd = f"lxc-destroy --force --name {self.container.name}"
904
905         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
906         if int(ret) != 0:
907             raise RuntimeError(
908                 f"Failed to destroy container {self.container.name}."
909             )
910
911     def info(self):
912         """Query and shows information about a container.
913
914         :raises RuntimeError: If getting info about a container failed.
915         """
916         cmd = f"lxc-info --name {self.container.name}"
917
918         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
919         if int(ret) != 0:
920             raise RuntimeError(
921                 f"Failed to get info about container {self.container.name}."
922             )
923
924     def system_info(self):
925         """Check the current kernel for LXC support.
926
927         :raises RuntimeError: If checking LXC support failed.
928         """
929         cmd = u"lxc-checkconfig"
930
931         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
932         if int(ret) != 0:
933             raise RuntimeError(u"Failed to check LXC support.")
934
935     def is_container_running(self):
936         """Check if container is running on node.
937
938         :returns: True if container is running.
939         :rtype: bool
940         :raises RuntimeError: If getting info about a container failed.
941         """
942         cmd = f"lxc-info --no-humanize --state --name {self.container.name}"
943
944         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
945         if int(ret) != 0:
946             raise RuntimeError(
947                 f"Failed to get info about container {self.container.name}."
948             )
949         return u"RUNNING" in stdout
950
951     def is_container_present(self):
952         """Check if container is existing on node.
953
954         :returns: True if container is present.
955         :rtype: bool
956         :raises RuntimeError: If getting info about a container failed.
957         """
958         cmd = f"lxc-info --no-humanize --name {self.container.name}"
959
960         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
961         return not ret
962
963     def _lxc_wait(self, state):
964         """Wait for a specific container state.
965
966         :param state: Specify the container state(s) to wait for.
967         :type state: str
968         :raises RuntimeError: If waiting for state of a container failed.
969         """
970         cmd = f"lxc-wait --name {self.container.name} --state '{state}'"
971
972         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
973         if int(ret) != 0:
974             raise RuntimeError(
975                 f"Failed to wait for state '{state}' "
976                 f"of container {self.container.name}."
977             )
978
979
980 class Docker(ContainerEngine):
981     """Docker implementation."""
982
983     # Implicit constructor is inherited.
984
985     def acquire(self, force=True):
986         """Pull an image or a repository from a registry.
987
988         :param force: Destroy a container if exists.
989         :type force: bool
990         :raises RuntimeError: If pulling a container failed.
991         """
992         if self.is_container_present():
993             if force:
994                 self.destroy()
995             else:
996                 return
997
998         if not self.container.image:
999             img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
1000                 if Topology.get_node_arch(self.container.node) == u"aarch64" \
1001                 else Constants.DOCKER_SUT_IMAGE_UBUNTU
1002             setattr(self.container, u"image", img)
1003
1004         cmd = f"docker pull {self.container.image}"
1005
1006         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
1007         if int(ret) != 0:
1008             raise RuntimeError(
1009                 f"Failed to create container {self.container.name}."
1010             )
1011
1012         if self.container.cpuset_cpus:
1013             self._configure_cgroup(u"docker")
1014
1015     def build(self):
1016         """Build container (compile)."""
1017         raise NotImplementedError
1018
1019     def create(self):
1020         """Create/deploy container.
1021
1022         :raises RuntimeError: If creating a container failed.
1023         """
1024         cpuset_cpus = u"--cpuset-cpus=" + u",".join(
1025             f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
1026             if self.container.cpuset_cpus else u""
1027
1028         cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \
1029             if self.container.cpuset_mems is not None else u""
1030         # Temporary workaround - disabling due to bug in memif
1031         cpuset_mems = u""
1032
1033         env = u" ".join(f"--env {env!s}" for env in self.container.env) \
1034             if self.container.env else u""
1035
1036         command = str(self.container.command) if self.container.command else u""
1037
1038         publish = u" ".join(
1039             f"--publish  {var!s}" for var in self.container.publish
1040         ) if self.container.publish else u""
1041
1042         volume = u" ".join(
1043             f"--volume {mnt!s}" for mnt in self.container.mnt) \
1044             if self.container.mnt else u""
1045
1046         cmd = f"docker run --privileged --detach --interactive --tty --rm " \
1047             f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \
1048             f"{env} {volume} --name {self.container.name} " \
1049             f"{self.container.image} {command}"
1050
1051         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1052         if int(ret) != 0:
1053             raise RuntimeError(
1054                 f"Failed to create container {self.container.name}"
1055             )
1056
1057         self.info()
1058
1059     def execute(self, command):
1060         """Start a process inside a running container.
1061
1062         Runs the specified command inside the container specified by name. The
1063         container has to be running already.
1064
1065         :param command: Command to run inside container.
1066         :type command: str
1067         :raises RuntimeError: If running the command in a container failed.
1068         """
1069         cmd = f"docker exec --interactive {self.container.name} " \
1070             f"/bin/sh -c '{command}'"
1071
1072         ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
1073         if int(ret) != 0:
1074             raise RuntimeError(
1075                 f"Failed to execute command in container {self.container.name}."
1076             )
1077
1078     def stop(self):
1079         """Stop running container.
1080
1081         :raises RuntimeError: If stopping a container failed.
1082         """
1083         cmd = f"docker stop {self.container.name}"
1084
1085         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1086         if int(ret) != 0:
1087             raise RuntimeError(
1088                 f"Failed to stop container {self.container.name}."
1089             )
1090
1091     def destroy(self):
1092         """Remove a container.
1093
1094         :raises RuntimeError: If removing a container failed.
1095         """
1096         cmd = f"docker rm --force {self.container.name}"
1097
1098         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1099         if int(ret) != 0:
1100             raise RuntimeError(
1101                 f"Failed to destroy container {self.container.name}."
1102             )
1103
1104     def info(self):
1105         """Return low-level information on Docker objects.
1106
1107         :raises RuntimeError: If getting info about a container failed.
1108         """
1109         cmd = f"docker inspect {self.container.name}"
1110
1111         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1112         if int(ret) != 0:
1113             raise RuntimeError(
1114                 f"Failed to get info about container {self.container.name}."
1115             )
1116
1117     def system_info(self):
1118         """Display the docker system-wide information.
1119
1120         :raises RuntimeError: If displaying system information failed.
1121         """
1122         cmd = u"docker system info"
1123
1124         ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1125         if int(ret) != 0:
1126             raise RuntimeError(u"Failed to get system info.")
1127
1128     def is_container_present(self):
1129         """Check if container is present on node.
1130
1131         :returns: True if container is present.
1132         :rtype: bool
1133         :raises RuntimeError: If getting info about a container failed.
1134         """
1135         cmd = f"docker ps --all --quiet --filter name={self.container.name}"
1136
1137         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1138         if int(ret) != 0:
1139             raise RuntimeError(
1140                 f"Failed to get info about container {self.container.name}."
1141             )
1142         return bool(stdout)
1143
1144     def is_container_running(self):
1145         """Check if container is running on node.
1146
1147         :returns: True if container is running.
1148         :rtype: bool
1149         :raises RuntimeError: If getting info about a container failed.
1150         """
1151         cmd = f"docker ps --quiet --filter name={self.container.name}"
1152
1153         ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1154         if int(ret) != 0:
1155             raise RuntimeError(
1156                 f"Failed to get info about container {self.container.name}."
1157             )
1158         return bool(stdout)
1159
1160
1161 class Container:
1162     """Container class."""
1163
1164     def __getattr__(self, attr):
1165         """Get attribute custom implementation.
1166
1167         :param attr: Attribute to get.
1168         :type attr: str
1169         :returns: Attribute value or None.
1170         :rtype: any
1171         """
1172         try:
1173             return self.__dict__[attr]
1174         except KeyError:
1175             return None
1176
1177     def __setattr__(self, attr, value):
1178         """Set attribute custom implementation.
1179
1180         :param attr: Attribute to set.
1181         :param value: Value to set.
1182         :type attr: str
1183         :type value: any
1184         """
1185         try:
1186             # Check if attribute exists
1187             self.__dict__[attr]
1188         except KeyError:
1189             # Creating new attribute
1190             if attr == u"node":
1191                 self.__dict__[u"ssh"] = SSH()
1192                 self.__dict__[u"ssh"].connect(value)
1193             self.__dict__[attr] = value
1194         else:
1195             # Updating attribute base of type
1196             if isinstance(self.__dict__[attr], list):
1197                 self.__dict__[attr].append(value)
1198             else:
1199                 self.__dict__[attr] = value