1 # Copyright (c) 2022 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Library to manipulate Containers."""
16 from collections import OrderedDict, Counter
19 from string import Template
20 from time import sleep
22 from robot.libraries.BuiltIn import BuiltIn
24 from resources.libraries.python.Constants import Constants
25 from resources.libraries.python.CpuUtils import CpuUtils
26 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
27 from resources.libraries.python.ssh import SSH
28 from resources.libraries.python.topology import Topology, SocketType
29 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
30 from resources.libraries.python.VPPUtil import VPPUtil
34 u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
37 SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
40 class ContainerManager:
41 """Container lifecycle management class."""
43 def __init__(self, engine):
44 """Initialize Container Manager class.
46 :param engine: Container technology used (LXC/Docker/...).
48 :raises NotImplementedError: If container technology is not implemented.
51 self.engine = globals()[engine]()
53 raise NotImplementedError(f"{engine} is not implemented.")
54 self.containers = OrderedDict()
56 def get_container_by_name(self, name):
57 """Get container instance.
59 :param name: Container name.
61 :returns: Container instance.
63 :raises RuntimeError: If failed to get container with name.
66 return self.containers[name]
68 raise RuntimeError(f"Failed to get container with name: {name}")
70 def construct_container(self, **kwargs):
71 """Construct container object on node with specified parameters.
73 :param kwargs: Key-value pairs used to construct container.
77 self.engine.initialize()
80 setattr(self.engine.container, key, kwargs[key])
82 # Set additional environmental variables
84 self.engine.container, u"env",
85 f"MICROSERVICE_LABEL={kwargs[u'name']}"
88 # Store container instance
89 self.containers[kwargs[u"name"]] = self.engine.container
91 def construct_containers(self, **kwargs):
92 """Construct 1..N container(s) on node with specified name.
94 Ordinal number is automatically added to the name of container as
97 :param kwargs: Named parameters.
100 name = kwargs[u"name"]
101 for i in range(kwargs[u"count"]):
102 # Name will contain ordinal suffix
103 kwargs[u"name"] = u"".join([name, str(i+1)])
105 self.construct_container(i=i, **kwargs)
107 def acquire_all_containers(self):
108 """Acquire all containers."""
109 for container in self.containers:
110 self.engine.container = self.containers[container]
111 self.engine.acquire()
113 def build_all_containers(self):
114 """Build all containers."""
115 for container in self.containers:
116 self.engine.container = self.containers[container]
119 def create_all_containers(self):
120 """Create all containers."""
121 for container in self.containers:
122 self.engine.container = self.containers[container]
125 def execute_on_container(self, name, command):
126 """Execute command on container with name.
128 :param name: Container name.
129 :param command: Command to execute.
133 self.engine.container = self.get_container_by_name(name)
134 self.engine.execute(command)
136 def execute_on_all_containers(self, command):
137 """Execute command on all containers.
139 :param command: Command to execute.
142 for container in self.containers:
143 self.engine.container = self.containers[container]
144 self.engine.execute(command)
146 def start_vpp_in_all_containers(self, verify=True):
147 """Start VPP in all containers."""
148 for container in self.containers:
149 self.engine.container = self.containers[container]
150 # For multiple containers, delayed verify is faster.
151 self.engine.start_vpp(verify=False)
153 self.verify_vpp_in_all_containers()
155 def _disconnect_papi_to_all_containers(self):
156 """Disconnect any open PAPI connections to VPPs in containers.
158 The current PAPI implementation caches open connections,
159 so explicit disconnect is needed before VPP becomes inaccessible.
161 Currently this is a protected method, as restart, stop and destroy
162 are the only dangerous methods, and all are handled by ContainerManager.
164 for container_object in self.containers.values():
165 PapiSocketExecutor.disconnect_by_node_and_socket(
166 container_object.node,
167 container_object.api_socket,
170 def restart_vpp_in_all_containers(self, verify=True):
171 """Restart VPP in all containers."""
172 self._disconnect_papi_to_all_containers()
173 for container in self.containers:
174 self.engine.container = self.containers[container]
175 # For multiple containers, delayed verify is faster.
176 self.engine.restart_vpp(verify=False)
178 self.verify_vpp_in_all_containers()
180 def verify_vpp_in_all_containers(self):
181 """Verify that VPP is installed and running in all containers."""
182 # For multiple containers, multiple fors are faster.
183 for container in self.containers:
184 self.engine.container = self.containers[container]
185 self.engine.verify_vppctl()
186 for container in self.containers:
187 self.engine.container = self.containers[container]
188 self.engine.adjust_privileges()
189 for container in self.containers:
190 self.engine.container = self.containers[container]
191 self.engine.verify_vpp_papi()
193 def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
194 """Configure VPP in all containers.
196 :param chain_topology: Topology used for chaining containers can be
197 chain or cross_horiz. Chain topology is using 1 memif pair per
198 container. Cross_horiz topology is using 1 memif and 1 physical
199 interface in container (only single container can be configured).
200 :param kwargs: Named parameters.
201 :type chain_topology: str
204 # Count number of DUTs based on node's host information
208 self.containers[container].node[u"host"]
209 for container in self.containers
213 mod = len(self.containers) // dut_cnt
215 for i, container in enumerate(self.containers):
218 sid1 = i % mod * 2 + 1
219 sid2 = i % mod * 2 + 2
220 self.engine.container = self.containers[container]
221 guest_dir = self.engine.container.mnt[0].split(u":")[1]
223 if chain_topology == u"chain":
224 self._configure_vpp_chain_l2xc(
225 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
226 guest_dir=guest_dir, **kwargs
228 elif chain_topology == u"cross_horiz":
229 self._configure_vpp_cross_horiz(
230 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
231 guest_dir=guest_dir, **kwargs
233 elif chain_topology == u"chain_functional":
234 self._configure_vpp_chain_functional(
235 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
236 guest_dir=guest_dir, **kwargs
238 elif chain_topology == u"chain_ip4":
239 self._configure_vpp_chain_ip4(
240 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
241 guest_dir=guest_dir, **kwargs
243 elif chain_topology == u"pipeline_ip4":
244 self._configure_vpp_pipeline_ip4(
245 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
246 guest_dir=guest_dir, **kwargs
248 elif chain_topology == u"chain_vswitch":
249 self._configure_vpp_chain_vswitch(
250 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
251 guest_dir=guest_dir, **kwargs)
252 elif chain_topology == u"chain_ipsec":
253 idx_match = search(r"\d+$", self.engine.container.name)
255 idx = int(idx_match.group())
256 self._configure_vpp_chain_ipsec(
257 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
258 guest_dir=guest_dir, nf_instance=idx, **kwargs)
261 f"Container topology {chain_topology} not implemented"
264 def _configure_vpp_chain_l2xc(self, **kwargs):
265 """Configure VPP in chain topology with l2xc.
267 :param kwargs: Named parameters.
270 self.engine.create_vpp_startup_config()
271 self.engine.create_vpp_exec_config(
272 u"memif_create_chain_l2xc.exec",
273 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
274 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
275 socket1=f"{kwargs[u'guest_dir']}/memif-"
276 f"{self.engine.container.name}-{kwargs[u'sid1']}",
277 socket2=f"{kwargs[u'guest_dir']}/memif-"
278 f"{self.engine.container.name}-{kwargs[u'sid2']}"
281 def _configure_vpp_cross_horiz(self, **kwargs):
282 """Configure VPP in cross horizontal topology (single memif).
284 :param kwargs: Named parameters.
287 if u"DUT1" in self.engine.container.name:
288 if_pci = Topology.get_interface_pci_addr(
289 self.engine.container.node, kwargs[u"dut1_if"])
290 if_name = Topology.get_interface_name(
291 self.engine.container.node, kwargs[u"dut1_if"])
292 if u"DUT2" in self.engine.container.name:
293 if_pci = Topology.get_interface_pci_addr(
294 self.engine.container.node, kwargs[u"dut2_if"])
295 if_name = Topology.get_interface_name(
296 self.engine.container.node, kwargs[u"dut2_if"])
297 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
298 self.engine.create_vpp_exec_config(
299 u"memif_create_cross_horizon.exec",
300 mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
301 socket1=f"{kwargs[u'guest_dir']}/memif-"
302 f"{self.engine.container.name}-{kwargs[u'sid1']}"
305 def _configure_vpp_chain_functional(self, **kwargs):
306 """Configure VPP in chain topology with l2xc (functional).
308 :param kwargs: Named parameters.
311 self.engine.create_vpp_startup_config()
312 self.engine.create_vpp_exec_config(
313 u"memif_create_chain_functional.exec",
314 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
315 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
316 socket1=f"{kwargs[u'guest_dir']}/memif-"
317 f"{self.engine.container.name}-{kwargs[u'sid1']}",
318 socket2=f"{kwargs[u'guest_dir']}/memif-"
319 f"{self.engine.container.name}-{kwargs[u'sid2']}",
323 def _configure_vpp_chain_ip4(self, **kwargs):
324 """Configure VPP in chain topology with ip4.
326 :param kwargs: Named parameters.
329 self.engine.create_vpp_startup_config()
331 vif1_mac = kwargs[u"tg_pf1_mac"] \
332 if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
333 else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
334 vif2_mac = kwargs[u"tg_pf2_mac"] \
335 if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
336 else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
337 self.engine.create_vpp_exec_config(
338 u"memif_create_chain_ip4.exec",
339 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
340 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
341 socket1=f"{kwargs[u'guest_dir']}/memif-"
342 f"{self.engine.container.name}-{kwargs[u'sid1']}",
343 socket2=f"{kwargs[u'guest_dir']}/memif-"
344 f"{self.engine.container.name}-{kwargs[u'sid2']}",
345 mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
346 mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
347 vif1_mac=vif1_mac, vif2_mac=vif2_mac
350 def _configure_vpp_chain_vswitch(self, **kwargs):
351 """Configure VPP as vswitch in container.
353 :param kwargs: Named parameters.
356 dut = self.engine.container.name.split(u"_")[0]
358 if1_pci = Topology.get_interface_pci_addr(
359 self.engine.container.node, kwargs[u"dut1_if2"])
360 if2_pci = Topology.get_interface_pci_addr(
361 self.engine.container.node, kwargs[u"dut1_if1"])
362 if_red_name = Topology.get_interface_name(
363 self.engine.container.node, kwargs[u"dut1_if2"])
364 if_black_name = Topology.get_interface_name(
365 self.engine.container.node, kwargs[u"dut1_if1"])
366 tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
367 tg_pf_mac = kwargs[u"tg_pf2_mac"]
369 tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
370 tg_pf_mac = kwargs[u"tg_pf1_mac"]
371 if1_pci = Topology.get_interface_pci_addr(
372 self.engine.container.node, kwargs[u"dut2_if1"])
373 if2_pci = Topology.get_interface_pci_addr(
374 self.engine.container.node, kwargs[u"dut2_if2"])
375 if_red_name = Topology.get_interface_name(
376 self.engine.container.node, kwargs[u"dut2_if1"])
377 if_black_name = Topology.get_interface_name(
378 self.engine.container.node, kwargs[u"dut2_if2"])
380 n_instances = int(kwargs[u"n_instances"])
383 rxq = int(kwargs[u"rxq"])
384 nodes = kwargs[u"nodes"]
385 cpuset_cpus = CpuUtils.get_affinity_nf(
386 nodes, dut, nf_chains=1, nf_nodes=1, nf_chain=1,
387 nf_node=1, vs_dtc=0, nf_dtc=8, nf_mtcr=1, nf_dtcr=1
389 self.engine.create_vpp_startup_config_vswitch(
390 cpuset_cpus, rxq, if1_pci, if2_pci
394 for i in range(1, n_instances + 1):
396 f"create interface memif id {i} socket-id 1 master\n"
397 f"set interface state memif1/{i} up\n"
398 f"set interface l2 bridge memif1/{i} 1\n"
399 f"create interface memif id {i} socket-id 2 master\n"
400 f"set interface state memif2/{i} up\n"
401 f"set interface l2 bridge memif2/{i} 2\n"
402 f"set ip neighbor memif2/{i} {tg_pf_ip4} {tg_pf_mac} "
406 self.engine.create_vpp_exec_config(
407 u"memif_create_chain_vswitch_ipsec.exec",
408 socket1=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-1",
409 socket2=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-2",
410 if_red_name=if_red_name,
411 if_black_name=if_black_name,
412 instances=u"\n\n".join(instances))
415 def _configure_vpp_chain_ipsec(self, **kwargs):
416 """Configure VPP in container with memifs.
418 :param kwargs: Named parameters.
421 nf_nodes = int(kwargs[u"nf_nodes"])
422 nf_instance = int(kwargs[u"nf_instance"])
423 nodes = kwargs[u"nodes"]
424 dut = self.engine.container.name.split(u"_")[0]
425 cpuset_cpus = CpuUtils.get_affinity_nf(
426 nodes, dut, nf_chains=1, nf_nodes=nf_nodes, nf_chain=1,
427 nf_node=nf_instance, vs_dtc=10, nf_dtc=1, nf_mtcr=1, nf_dtcr=1)
428 self.engine.create_vpp_startup_config_ipsec(cpuset_cpus)
429 local_ip_base = kwargs[u"dut2_if1_ip4"].rsplit(u".", 1)[0]
432 tnl_local_ip = f"{local_ip_base}.{nf_instance + 100}"
433 tnl_remote_ip = f"{local_ip_base}.{nf_instance}"
434 remote_ip_base = kwargs[u"dut1_if1_ip4"].rsplit(u".", 1)[0]
435 tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
436 tg_pf_mac = kwargs[u"tg_pf1_mac"]
437 raddr_ip4 = kwargs[u"laddr_ip4"]
442 tnl_local_ip = f"{local_ip_base}.{nf_instance}"
443 tnl_remote_ip = f"{local_ip_base}.{nf_instance + 100}"
444 remote_ip_base = kwargs[u"dut2_if2_ip4"].rsplit(u".", 1)[0]
445 tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
446 tg_pf_mac = kwargs[u"tg_pf2_mac"]
447 raddr_ip4 = kwargs[u"raddr_ip4"]
452 self.engine.create_vpp_exec_config(
453 u"memif_create_chain_ipsec.exec",
454 socket1=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-1",
455 socket2=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-2",
460 mac1=f"02:02:00:00:{l_mac1:02X}:{(nf_instance - 1):02X}",
461 mac2=f"02:02:00:00:{l_mac2:02X}:{(nf_instance - 1):02X}",
462 tg_pf2_ip4=tg_pf_ip4,
463 tg_pf2_mac=tg_pf_mac,
465 tnl_local_ip=tnl_local_ip,
466 tnl_remote_ip=tnl_remote_ip,
467 tnl_remote_mac=f"02:02:00:00:{r_mac:02X}:{(nf_instance - 1):02X}",
468 remote_ip=f"{remote_ip_base}.{nf_instance}"
471 f"cat {kwargs['guest_dir']}/ipsec_create_tunnel_cnf_"
472 f"{dut}_{nf_instance}.config >> /tmp/running.exec"
475 def _configure_vpp_pipeline_ip4(self, **kwargs):
476 """Configure VPP in pipeline topology with ip4.
478 :param kwargs: Named parameters.
481 self.engine.create_vpp_startup_config()
482 node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
483 mid1 = kwargs[u"mid1"]
484 mid2 = kwargs[u"mid2"]
486 role2 = u"master" if node == kwargs[u"nodes"] else u"slave"
487 kwargs[u"mid2"] = kwargs[u"mid2"] \
488 if node == kwargs[u"nodes"] else kwargs[u"mid2"] + 1
489 vif1_mac = kwargs[u"tg_pf1_mac"] \
490 if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
491 else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
492 vif2_mac = kwargs[u"tg_pf2_mac"] \
493 if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
494 else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
495 socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
496 f"{kwargs[u'sid1']}" if node == 1 \
497 else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid1']}"
498 socket2 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
499 f"{kwargs[u'sid2']}" \
500 if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \
501 else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid2']}"
503 self.engine.create_vpp_exec_config(
504 u"memif_create_pipeline_ip4.exec",
505 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
506 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
507 socket1=socket1, socket2=socket2, role1=role1, role2=role2,
508 mac1=f"52:54:00:00:{mid1:02X}:01",
509 mac2=f"52:54:00:00:{mid2:02X}:02",
510 vif1_mac=vif1_mac, vif2_mac=vif2_mac
513 def stop_all_containers(self):
514 """Stop all containers."""
515 # TODO: Rework if containers can be affected outside ContainerManager.
516 self._disconnect_papi_to_all_containers()
517 for container in self.containers:
518 self.engine.container = self.containers[container]
521 def destroy_all_containers(self):
522 """Destroy all containers."""
523 # TODO: Rework if containers can be affected outside ContainerManager.
524 self._disconnect_papi_to_all_containers()
525 for container in self.containers:
526 self.engine.container = self.containers[container]
527 self.engine.destroy()
530 class ContainerEngine:
531 """Abstract class for container engine."""
534 """Init ContainerEngine object."""
535 self.container = None
537 def initialize(self):
538 """Initialize container object."""
539 self.container = Container()
541 def acquire(self, force):
542 """Acquire/download container.
544 :param force: Destroy a container if exists and create.
547 raise NotImplementedError
550 """Build container (compile)."""
551 raise NotImplementedError
554 """Create/deploy container."""
555 raise NotImplementedError
557 def execute(self, command):
558 """Execute process inside container.
560 :param command: Command to run inside container.
563 raise NotImplementedError
566 """Stop container."""
567 raise NotImplementedError
570 """Destroy/remove container."""
571 raise NotImplementedError
574 """Info about container."""
575 raise NotImplementedError
577 def system_info(self):
579 raise NotImplementedError
581 def start_vpp(self, verify=True):
582 """Start VPP inside a container."""
584 u"setsid /usr/bin/vpp -c /etc/vpp/startup.conf "
585 u">/tmp/vppd.log 2>&1 < /dev/null &")
587 topo_instance = BuiltIn().get_library_instance(
588 u"resources.libraries.python.topology.Topology"
590 topo_instance.add_new_socket(
594 self.container.cli_socket,
596 topo_instance.add_new_socket(
600 self.container.api_socket,
602 topo_instance.add_new_socket(
606 self.container.stats_socket,
611 def restart_vpp(self, verify=True):
612 """Restart VPP service inside a container."""
613 self.execute(u"pkill vpp")
614 self.start_vpp(verify=verify)
616 def verify_vpp(self):
617 """Verify VPP is running and ready."""
619 self.adjust_privileges()
620 self.verify_vpp_papi()
622 # TODO Rewrite to use the VPPUtil.py functionality and remove this.
623 def verify_vppctl(self, retries=120, retry_wait=1):
624 """Verify that VPP is installed and running inside container.
626 This function waits a while so VPP can start.
627 PCI interfaces are listed for debug purposes.
628 When the check passes, VPP API socket is created on remote side,
629 but perhaps its directory does not have the correct access rights yet.
631 :param retries: Check for VPP for this number of times Default: 120
632 :param retry_wait: Wait for this number of seconds between retries.
634 for _ in range(retries + 1):
636 # Execute puts the command into single quotes,
637 # so inner arguments are enclosed in qouble quotes here.
639 u'vppctl show pci 2>&1 | '
640 u'fgrep -v "Connection refused" | '
641 u'fgrep -v "No such file or directory"'
644 except (RuntimeError, AssertionError):
647 self.execute(u"cat /tmp/vppd.log")
649 f"VPP did not come up in container: {self.container.name}"
652 def adjust_privileges(self):
653 """Adjust privileges to control VPP without sudo."""
654 self.execute("chmod -R o+rwx /run/vpp")
656 def verify_vpp_papi(self, retries=120, retry_wait=1):
657 """Verify that VPP is available for PAPI.
659 This also opens and caches PAPI connection for quick reuse.
660 The connection is disconnected when ContainerManager decides to do so.
662 :param retries: Check for VPP for this number of times Default: 120
663 :param retry_wait: Wait for this number of seconds between retries.
666 for _ in range(retries + 1):
668 VPPUtil.vpp_show_version(
669 node=self.container.node,
670 remote_vpp_socket=self.container.api_socket,
674 except (RuntimeError, AssertionError):
677 self.execute(u"cat /tmp/vppd.log")
679 f"VPP PAPI fails in container: {self.container.name}"
682 def create_base_vpp_startup_config(self, cpuset_cpus=None):
683 """Create base startup configuration of VPP on container.
685 :param cpuset_cpus: List of CPU cores to allocate.
686 :type cpuset_cpus: list.
687 :returns: Base VPP startup configuration.
688 :rtype: VppConfigGenerator
690 if cpuset_cpus is None:
691 cpuset_cpus = self.container.cpuset_cpus
693 # Create config instance
694 vpp_config = VppConfigGenerator()
695 vpp_config.set_node(self.container.node)
696 vpp_config.add_unix_cli_listen()
697 vpp_config.add_unix_nodaemon()
698 vpp_config.add_unix_exec(u"/tmp/running.exec")
699 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
701 # We will pop the first core from the list to be a main core
702 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
703 # If more cores in the list, the rest will be used as workers.
704 corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
705 vpp_config.add_cpu_corelist_workers(corelist_workers)
706 vpp_config.add_buffers_per_numa(215040)
707 vpp_config.add_plugin(u"disable", u"default")
708 vpp_config.add_plugin(u"enable", u"memif_plugin.so")
709 vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
710 vpp_config.add_main_heap_size(u"2G")
711 vpp_config.add_main_heap_page_size(self.container.page_size)
712 vpp_config.add_default_hugepage_size(self.container.page_size)
713 vpp_config.add_statseg_size(u"2G")
714 vpp_config.add_statseg_page_size(self.container.page_size)
715 vpp_config.add_statseg_per_node_counters(u"on")
719 def create_vpp_startup_config(self):
720 """Create startup configuration of VPP without DPDK on container.
722 vpp_config = self.create_base_vpp_startup_config()
724 # Apply configuration
725 self.execute(u"mkdir -p /etc/vpp/")
727 f'echo "{vpp_config.get_config_str()}" | '
728 f'tee /etc/vpp/startup.conf'
731 def create_vpp_startup_config_vswitch(self, cpuset_cpus, rxq, *devices):
732 """Create startup configuration of VPP vswitch.
734 :param cpuset_cpus: CPU list to run on.
735 :param rxq: Number of interface RX queues.
736 :param devices: PCI devices.
737 :type cpuset_cpus: list
741 vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
742 vpp_config.add_dpdk_dev(*devices)
743 vpp_config.add_dpdk_log_level(u"debug")
744 vpp_config.add_dpdk_no_tx_checksum_offload()
745 vpp_config.add_dpdk_dev_default_rxq(rxq)
746 vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
747 vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
749 # Apply configuration
750 self.execute(u"mkdir -p /etc/vpp/")
752 f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
755 def create_vpp_startup_config_ipsec(self, cpuset_cpus):
756 """Create startup configuration of VPP with IPsec on container.
758 :param cpuset_cpus: CPU list to run on.
759 :type cpuset_cpus: list
761 vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
762 vpp_config.add_plugin(u"enable", u"crypto_native_plugin.so")
763 vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
764 vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
765 vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
767 # Apply configuration
768 self.execute(u"mkdir -p /etc/vpp/")
770 f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
773 def create_vpp_exec_config(self, template_file, **kwargs):
774 """Create VPP exec configuration on container.
776 :param template_file: File name of a template script.
777 :param kwargs: Parameters for script.
778 :type template_file: str
781 running = u"/tmp/running.exec"
782 template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
784 with open(template, u"rt") as src_file:
785 src = Template(src_file.read())
786 self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}')
788 def is_container_running(self):
789 """Check if container is running."""
790 raise NotImplementedError
792 def is_container_present(self):
793 """Check if container is present."""
794 raise NotImplementedError
796 def _configure_cgroup(self, name):
797 """Configure the control group associated with a container.
799 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
800 container is initialized a new cgroup /docker or /lxc is created under
801 cpuset parent tree. This newly created cgroup is inheriting parent
802 setting for cpu/mem exclusive parameter and thus cannot be overriden
803 within /docker or /lxc cgroup. This function is supposed to set cgroups
804 to allow coexistence of both engines.
806 :param name: Name of cgroup.
808 :raises RuntimeError: If applying cgroup settings via cgset failed.
810 ret, _, _ = self.container.ssh.exec_command_sudo(
811 u"cgset -r cpuset.cpu_exclusive=0 /"
814 raise RuntimeError(u"Failed to apply cgroup settings.")
816 ret, _, _ = self.container.ssh.exec_command_sudo(
817 u"cgset -r cpuset.mem_exclusive=0 /"
820 raise RuntimeError(u"Failed to apply cgroup settings.")
822 ret, _, _ = self.container.ssh.exec_command_sudo(
823 f"cgcreate -g cpuset:/{name}"
826 raise RuntimeError(u"Failed to copy cgroup settings from root.")
828 ret, _, _ = self.container.ssh.exec_command_sudo(
829 f"cgset -r cpuset.cpu_exclusive=0 /{name}"
832 raise RuntimeError(u"Failed to apply cgroup settings.")
834 ret, _, _ = self.container.ssh.exec_command_sudo(
835 f"cgset -r cpuset.mem_exclusive=0 /{name}"
838 raise RuntimeError(u"Failed to apply cgroup settings.")
841 class LXC(ContainerEngine):
842 """LXC implementation."""
844 # Implicit constructor is inherited.
846 def acquire(self, force=True):
847 """Acquire a privileged system object where configuration is stored.
849 :param force: If a container exists, destroy it and create a new
852 :raises RuntimeError: If creating the container or writing the container
855 if self.is_container_present():
861 target_arch = u"arm64" \
862 if Topology.get_node_arch(self.container.node) == u"aarch64" \
865 image = self.container.image if self.container.image \
866 else f"-d ubuntu -r focal -a {target_arch}"
868 cmd = f"lxc-create -t download --name {self.container.name} " \
869 f"-- {image} --no-validate"
871 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
873 raise RuntimeError(u"Failed to create container.")
875 self._configure_cgroup(u"lxc")
878 """Build container (compile)."""
879 raise NotImplementedError
882 """Create/deploy an application inside a container on system.
884 :raises RuntimeError: If creating the container fails.
886 if self.container.mnt:
888 # https://github.com/lxc/lxc/issues/434
889 mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults"
890 ret, _, _ = self.container.ssh.exec_command_sudo(
891 f"sh -c \"echo '{mnt_e}' >> "
892 f"/var/lib/lxc/{self.container.name}/config\""
896 f"Failed to write {self.container.name} config."
899 for mount in self.container.mnt:
900 host_dir, guest_dir = mount.split(u":")
901 options = u"bind,create=dir" if guest_dir.endswith(u"/") \
902 else u"bind,create=file"
903 entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \
904 f"none {options} 0 0"
905 self.container.ssh.exec_command_sudo(
906 f"sh -c \"mkdir -p {host_dir}\""
908 ret, _, _ = self.container.ssh.exec_command_sudo(
909 f"sh -c \"echo '{entry}' "
910 f">> /var/lib/lxc/{self.container.name}/config\""
914 f"Failed to write {self.container.name} config."
917 cpuset_cpus = u",".join(
918 f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
919 if self.container.cpuset_cpus else u""
921 ret, _, _ = self.container.ssh.exec_command_sudo(
922 f"lxc-start --name {self.container.name} --daemon"
926 f"Failed to start container {self.container.name}."
928 self._lxc_wait(u"RUNNING")
930 # Workaround for LXC to be able to allocate all cpus including isolated.
931 ret, _, _ = self.container.ssh.exec_command_sudo(
932 u"cgset --copy-from / lxc/"
935 raise RuntimeError(u"Failed to copy cgroup to LXC")
937 ret, _, _ = self.container.ssh.exec_command_sudo(
938 f"lxc-cgroup --name {self.container.name} cpuset.cpus {cpuset_cpus}"
942 f"Failed to set cpuset.cpus to container {self.container.name}."
945 def execute(self, command):
946 """Start a process inside a running container.
948 Runs the specified command inside the container specified by name. The
949 container has to be running already.
951 :param command: Command to run inside container.
953 :raises RuntimeError: If running the command failed.
955 env = u"--keep-env " + u" ".join(
956 f"--set-var {env!s}" for env in self.container.env) \
957 if self.container.env else u""
959 cmd = f"lxc-attach {env} --name {self.container.name} " \
960 f"-- /bin/sh -c '{command}'"
962 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
965 f"Failed to run command inside container {self.container.name}."
971 :raises RuntimeError: If stopping the container failed.
973 cmd = f"lxc-stop --name {self.container.name}"
975 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
978 f"Failed to stop container {self.container.name}."
980 self._lxc_wait(u"STOPPED|FROZEN")
983 """Destroy a container.
985 :raises RuntimeError: If destroying container failed.
987 cmd = f"lxc-destroy --force --name {self.container.name}"
989 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
992 f"Failed to destroy container {self.container.name}."
996 """Query and shows information about a container.
998 :raises RuntimeError: If getting info about a container failed.
1000 cmd = f"lxc-info --name {self.container.name}"
1002 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1005 f"Failed to get info about container {self.container.name}."
1008 def system_info(self):
1009 """Check the current kernel for LXC support.
1011 :raises RuntimeError: If checking LXC support failed.
1013 cmd = u"lxc-checkconfig"
1015 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1017 raise RuntimeError(u"Failed to check LXC support.")
1019 def is_container_running(self):
1020 """Check if container is running on node.
1022 :returns: True if container is running.
1024 :raises RuntimeError: If getting info about a container failed.
1026 cmd = f"lxc-info --no-humanize --state --name {self.container.name}"
1028 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1031 f"Failed to get info about container {self.container.name}."
1033 return u"RUNNING" in stdout
1035 def is_container_present(self):
1036 """Check if container is existing on node.
1038 :returns: True if container is present.
1040 :raises RuntimeError: If getting info about a container failed.
1042 cmd = f"lxc-info --no-humanize --name {self.container.name}"
1044 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1047 def _lxc_wait(self, state):
1048 """Wait for a specific container state.
1050 :param state: Specify the container state(s) to wait for.
1052 :raises RuntimeError: If waiting for state of a container failed.
1054 cmd = f"lxc-wait --name {self.container.name} --state '{state}'"
1056 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1059 f"Failed to wait for state '{state}' "
1060 f"of container {self.container.name}."
1064 class Docker(ContainerEngine):
1065 """Docker implementation."""
1067 # Implicit constructor is inherited.
1069 def acquire(self, force=True):
1070 """Pull an image or a repository from a registry.
1072 :param force: Destroy a container if exists.
1074 :raises RuntimeError: If pulling a container failed.
1076 if self.is_container_present():
1082 if not self.container.image:
1083 img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
1084 if Topology.get_node_arch(self.container.node) == u"aarch64" \
1085 else Constants.DOCKER_SUT_IMAGE_UBUNTU
1086 setattr(self.container, u"image", img)
1088 if "/" in self.container.image:
1089 cmd = f"docker pull {self.container.image}"
1090 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
1093 f"Failed to create container {self.container.name}."
1096 if self.container.cpuset_cpus:
1097 self._configure_cgroup(u"docker")
1100 """Build container (compile)."""
1101 raise NotImplementedError
1104 """Create/deploy container.
1106 :raises RuntimeError: If creating a container failed.
1108 cpuset_cpus = u"--cpuset-cpus=" + u",".join(
1109 f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
1110 if self.container.cpuset_cpus else u""
1112 cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \
1113 if self.container.cpuset_mems is not None else u""
1114 # Temporary workaround - disabling due to bug in memif
1117 env = u" ".join(f"--env {env!s}" for env in self.container.env) \
1118 if self.container.env else u""
1120 command = str(self.container.command) if self.container.command else u""
1122 publish = u" ".join(
1123 f"--publish {var!s}" for var in self.container.publish
1124 ) if self.container.publish else u""
1127 f"--volume {mnt!s}" for mnt in self.container.mnt) \
1128 if self.container.mnt else u""
1130 cmd = f"docker run --privileged --detach --interactive --tty --rm " \
1131 f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \
1132 f"{env} {volume} --name {self.container.name} " \
1133 f"{self.container.image} {command}"
1135 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1138 f"Failed to create container {self.container.name}"
1143 def execute(self, command):
1144 """Start a process inside a running container.
1146 Runs the specified command inside the container specified by name. The
1147 container has to be running already.
1149 :param command: Command to run inside container.
1151 :raises RuntimeError: If running the command in a container failed.
1153 cmd = f"docker exec --interactive {self.container.name} " \
1154 f"/bin/sh -c '{command}'"
1156 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
1159 f"Failed to execute command in container {self.container.name}."
1163 """Stop running container.
1165 :raises RuntimeError: If stopping a container failed.
1167 cmd = f"docker stop {self.container.name}"
1169 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1172 f"Failed to stop container {self.container.name}."
1176 """Remove a container.
1178 :raises RuntimeError: If removing a container failed.
1180 cmd = f"docker rm --force {self.container.name}"
1182 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1185 f"Failed to destroy container {self.container.name}."
1189 """Return low-level information on Docker objects.
1191 :raises RuntimeError: If getting info about a container failed.
1193 cmd = f"docker inspect {self.container.name}"
1195 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1198 f"Failed to get info about container {self.container.name}."
1201 def system_info(self):
1202 """Display the docker system-wide information.
1204 :raises RuntimeError: If displaying system information failed.
1206 cmd = u"docker system info"
1208 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1210 raise RuntimeError(u"Failed to get system info.")
1212 def is_container_present(self):
1213 """Check if container is present on node.
1215 :returns: True if container is present.
1217 :raises RuntimeError: If getting info about a container failed.
1219 cmd = f"docker ps --all --quiet --filter name={self.container.name}"
1221 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1224 f"Failed to get info about container {self.container.name}."
1228 def is_container_running(self):
1229 """Check if container is running on node.
1231 :returns: True if container is running.
1233 :raises RuntimeError: If getting info about a container failed.
1235 cmd = f"docker ps --quiet --filter name={self.container.name}"
1237 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1240 f"Failed to get info about container {self.container.name}."
1246 """Container class."""
1248 def __getattr__(self, attr):
1249 """Get attribute custom implementation.
1251 :param attr: Attribute to get.
1253 :returns: Attribute value or None.
1257 return self.__dict__[attr]
1261 def __setattr__(self, attr, value):
1262 """Set attribute custom implementation.
1264 :param attr: Attribute to set.
1265 :param value: Value to set.
1270 # Check if attribute exists
1273 # Creating new attribute
1275 # Create and cache a connected SSH instance.
1276 self.__dict__[u"ssh"] = SSH()
1277 self.__dict__[u"ssh"].connect(value)
1278 elif attr == u"name":
1279 # Socket paths to not have mutable state,
1280 # this just saves some horizontal space in callers.
1281 # TODO: Rename the dir so other apps can add sockets easily.
1282 # E.g. f"/tmp/app_sockets/{value}/vpp_api.sock"
1283 path = f"/tmp/vpp_sockets/{value}"
1284 self.__dict__[u"socket_dir"] = path
1285 self.__dict__[u"api_socket"] = f"{path}/api.sock"
1286 self.__dict__[u"cli_socket"] = f"{path}/cli.sock"
1287 self.__dict__[u"stats_socket"] = f"{path}/stats.sock"
1288 self.__dict__[attr] = value
1290 # Updating attribute base of type
1291 if isinstance(self.__dict__[attr], list):
1292 self.__dict__[attr].append(value)
1294 self.__dict__[attr] = value