1 # Copyright (c) 2024 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Library to manipulate Containers."""
16 from collections import OrderedDict, Counter
19 from string import Template
20 from time import sleep
22 from robot.libraries.BuiltIn import BuiltIn
24 from resources.libraries.python.Constants import Constants
25 from resources.libraries.python.CpuUtils import CpuUtils
26 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
27 from resources.libraries.python.ssh import SSH
28 from resources.libraries.python.topology import Topology, SocketType
29 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
30 from resources.libraries.python.VPPUtil import VPPUtil
34 u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
37 SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
40 class ContainerManager:
41 """Container lifecycle management class."""
43 def __init__(self, engine):
44 """Initialize Container Manager class.
46 :param engine: Container technology used (LXC/Docker/...).
48 :raises NotImplementedError: If container technology is not implemented.
51 self.engine = globals()[engine]()
53 raise NotImplementedError(f"{engine} is not implemented.")
54 self.containers = OrderedDict()
56 def get_container_by_name(self, name):
57 """Get container instance.
59 :param name: Container name.
61 :returns: Container instance.
63 :raises RuntimeError: If failed to get container with name.
66 return self.containers[name]
68 raise RuntimeError(f"Failed to get container with name: {name}")
70 def construct_container(self, **kwargs):
71 """Construct container object on node with specified parameters.
73 :param kwargs: Key-value pairs used to construct container.
77 self.engine.initialize()
80 setattr(self.engine.container, key, kwargs[key])
82 # Set additional environmental variables
84 self.engine.container, u"env",
85 f"MICROSERVICE_LABEL={kwargs[u'name']}"
88 # Store container instance
89 self.containers[kwargs[u"name"]] = self.engine.container
91 def construct_containers(self, **kwargs):
92 """Construct 1..N container(s) on node with specified name.
94 Ordinal number is automatically added to the name of container as
97 :param kwargs: Named parameters.
100 name = kwargs[u"name"]
101 for i in range(kwargs[u"count"]):
102 # Name will contain ordinal suffix
103 kwargs[u"name"] = u"".join([name, str(i+1)])
105 self.construct_container(i=i, **kwargs)
107 def acquire_all_containers(self):
108 """Acquire all containers."""
109 for container in self.containers:
110 self.engine.container = self.containers[container]
111 self.engine.acquire()
113 def build_all_containers(self):
114 """Build all containers."""
115 for container in self.containers:
116 self.engine.container = self.containers[container]
119 def create_all_containers(self):
120 """Create all containers."""
121 for container in self.containers:
122 self.engine.container = self.containers[container]
125 def execute_on_container(self, name, command):
126 """Execute command on container with name.
128 :param name: Container name.
129 :param command: Command to execute.
133 self.engine.container = self.get_container_by_name(name)
134 self.engine.execute(command)
136 def execute_on_all_containers(self, command):
137 """Execute command on all containers.
139 :param command: Command to execute.
142 for container in self.containers:
143 self.engine.container = self.containers[container]
144 self.engine.execute(command)
146 def start_vpp_in_all_containers(self, verify=True):
147 """Start VPP in all containers."""
148 for container in self.containers:
149 self.engine.container = self.containers[container]
150 # For multiple containers, delayed verify is faster.
151 self.engine.start_vpp(verify=False)
153 self.verify_vpp_in_all_containers()
155 def _disconnect_papi_to_all_containers(self):
156 """Disconnect any open PAPI connections to VPPs in containers.
158 The current PAPI implementation caches open connections,
159 so explicit disconnect is needed before VPP becomes inaccessible.
161 Currently this is a protected method, as restart, stop and destroy
162 are the only dangerous methods, and all are handled by ContainerManager.
164 for container_object in self.containers.values():
165 PapiSocketExecutor.disconnect_by_node_and_socket(
166 container_object.node,
167 container_object.api_socket,
170 def restart_vpp_in_all_containers(self, verify=True):
171 """Restart VPP in all containers."""
172 self._disconnect_papi_to_all_containers()
173 for container in self.containers:
174 self.engine.container = self.containers[container]
175 # For multiple containers, delayed verify is faster.
176 self.engine.restart_vpp(verify=False)
178 self.verify_vpp_in_all_containers()
180 def verify_vpp_in_all_containers(self):
181 """Verify that VPP is installed and running in all containers."""
182 # For multiple containers, multiple fors are faster.
183 for container in self.containers:
184 self.engine.container = self.containers[container]
185 self.engine.verify_vppctl()
186 for container in self.containers:
187 self.engine.container = self.containers[container]
188 self.engine.adjust_privileges()
189 for container in self.containers:
190 self.engine.container = self.containers[container]
191 self.engine.verify_vpp_papi()
193 def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
194 """Configure VPP in all containers.
196 :param chain_topology: Topology used for chaining containers can be
197 chain or cross_horiz. Chain topology is using 1 memif pair per
198 container. Cross_horiz topology is using 1 memif and 1 physical
199 interface in container (only single container can be configured).
200 :param kwargs: Named parameters.
201 :type chain_topology: str
204 # Count number of DUTs based on node's host information
208 f"{container.node['host']}{container.node['port']}"
209 for container in self.containers.values()
213 mod = len(self.containers) // dut_cnt
215 for i, container in enumerate(self.containers):
218 sid1 = i % mod * 2 + 1
219 sid2 = i % mod * 2 + 2
220 self.engine.container = self.containers[container]
221 guest_dir = self.engine.container.mnt[0].split(u":")[1]
223 if chain_topology == u"chain":
224 self._configure_vpp_chain_l2xc(
225 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
226 guest_dir=guest_dir, **kwargs
228 elif chain_topology == u"cross_horiz":
229 self._configure_vpp_cross_horiz(
230 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
231 guest_dir=guest_dir, **kwargs
233 elif chain_topology == u"chain_functional":
234 self._configure_vpp_chain_functional(
235 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
236 guest_dir=guest_dir, **kwargs
238 elif chain_topology == u"chain_ip4":
239 self._configure_vpp_chain_ip4(
240 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
241 guest_dir=guest_dir, **kwargs
243 elif chain_topology == u"pipeline_ip4":
244 self._configure_vpp_pipeline_ip4(
245 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
246 guest_dir=guest_dir, **kwargs
248 elif chain_topology == u"chain_vswitch":
249 self._configure_vpp_chain_vswitch(
250 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
251 guest_dir=guest_dir, **kwargs)
252 elif chain_topology == u"chain_ipsec":
253 idx_match = search(r"\d+$", self.engine.container.name)
255 idx = int(idx_match.group())
256 self._configure_vpp_chain_ipsec(
257 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
258 guest_dir=guest_dir, nf_instance=idx, **kwargs)
259 elif chain_topology == u"chain_dma":
260 self._configure_vpp_chain_dma(
261 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
262 guest_dir=guest_dir, **kwargs
266 f"Container topology {chain_topology} not implemented"
269 def _configure_vpp_chain_l2xc(self, **kwargs):
270 """Configure VPP in chain topology with l2xc.
272 :param kwargs: Named parameters.
275 self.engine.create_vpp_startup_config()
276 self.engine.create_vpp_exec_config(
277 u"memif_create_chain_l2xc.exec",
278 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
279 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
280 socket1=f"{kwargs[u'guest_dir']}/memif-"
281 f"{self.engine.container.name}-{kwargs[u'sid1']}",
282 socket2=f"{kwargs[u'guest_dir']}/memif-"
283 f"{self.engine.container.name}-{kwargs[u'sid2']}"
286 def _configure_vpp_chain_dma(self, **kwargs):
287 """Configure VPP in chain topology with l2xc (dma).
289 :param kwargs: Named parameters.
292 dma_wqs = kwargs[f"dma_wqs"]
293 self.engine.create_vpp_startup_config_dma(dma_wqs)
295 self.engine.create_vpp_exec_config(
296 u"memif_create_chain_dma.exec",
297 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
298 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
299 socket1=f"{kwargs[u'guest_dir']}/memif-"
300 f"{self.engine.container.name}-{kwargs[u'sid1']}",
301 socket2=f"{kwargs[u'guest_dir']}/memif-"
302 f"{self.engine.container.name}-{kwargs[u'sid2']}"
305 def _configure_vpp_cross_horiz(self, **kwargs):
306 """Configure VPP in cross horizontal topology (single memif).
308 :param kwargs: Named parameters.
311 if u"DUT1" in self.engine.container.name:
312 if_pci = Topology.get_interface_pci_addr(
313 self.engine.container.node, kwargs[u"dut1_if"])
314 if_name = Topology.get_interface_name(
315 self.engine.container.node, kwargs[u"dut1_if"])
316 if u"DUT2" in self.engine.container.name:
317 if_pci = Topology.get_interface_pci_addr(
318 self.engine.container.node, kwargs[u"dut2_if"])
319 if_name = Topology.get_interface_name(
320 self.engine.container.node, kwargs[u"dut2_if"])
321 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
322 self.engine.create_vpp_exec_config(
323 u"memif_create_cross_horizon.exec",
324 mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
325 socket1=f"{kwargs[u'guest_dir']}/memif-"
326 f"{self.engine.container.name}-{kwargs[u'sid1']}"
329 def _configure_vpp_chain_functional(self, **kwargs):
330 """Configure VPP in chain topology with l2xc (functional).
332 :param kwargs: Named parameters.
335 self.engine.create_vpp_startup_config()
336 self.engine.create_vpp_exec_config(
337 u"memif_create_chain_functional.exec",
338 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
339 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
340 socket1=f"{kwargs[u'guest_dir']}/memif-"
341 f"{self.engine.container.name}-{kwargs[u'sid1']}",
342 socket2=f"{kwargs[u'guest_dir']}/memif-"
343 f"{self.engine.container.name}-{kwargs[u'sid2']}",
347 def _configure_vpp_chain_ip4(self, **kwargs):
348 """Configure VPP in chain topology with ip4.
350 :param kwargs: Named parameters.
353 self.engine.create_vpp_startup_config()
355 vif1_mac = kwargs[u"tg_pf1_mac"] \
356 if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
357 else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
358 vif2_mac = kwargs[u"tg_pf2_mac"] \
359 if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
360 else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
361 self.engine.create_vpp_exec_config(
362 u"memif_create_chain_ip4.exec",
363 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
364 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
365 socket1=f"{kwargs[u'guest_dir']}/memif-"
366 f"{self.engine.container.name}-{kwargs[u'sid1']}",
367 socket2=f"{kwargs[u'guest_dir']}/memif-"
368 f"{self.engine.container.name}-{kwargs[u'sid2']}",
369 mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
370 mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
371 vif1_mac=vif1_mac, vif2_mac=vif2_mac
374 def _configure_vpp_chain_vswitch(self, **kwargs):
375 """Configure VPP as vswitch in container.
377 :param kwargs: Named parameters.
380 dut = self.engine.container.name.split(u"_")[0]
382 if1_pci = Topology.get_interface_pci_addr(
383 self.engine.container.node, kwargs[u"dut1_if2"])
384 if2_pci = Topology.get_interface_pci_addr(
385 self.engine.container.node, kwargs[u"dut1_if1"])
386 if_red_name = Topology.get_interface_name(
387 self.engine.container.node, kwargs[u"dut1_if2"])
388 if_black_name = Topology.get_interface_name(
389 self.engine.container.node, kwargs[u"dut1_if1"])
390 tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
391 tg_pf_mac = kwargs[u"tg_pf2_mac"]
393 tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
394 tg_pf_mac = kwargs[u"tg_pf1_mac"]
395 if1_pci = Topology.get_interface_pci_addr(
396 self.engine.container.node, kwargs[u"dut2_if1"])
397 if2_pci = Topology.get_interface_pci_addr(
398 self.engine.container.node, kwargs[u"dut2_if2"])
399 if_red_name = Topology.get_interface_name(
400 self.engine.container.node, kwargs[u"dut2_if1"])
401 if_black_name = Topology.get_interface_name(
402 self.engine.container.node, kwargs[u"dut2_if2"])
404 n_instances = int(kwargs[u"n_instances"])
407 rxq = int(kwargs[u"rxq"])
408 nodes = kwargs[u"nodes"]
409 cpuset_cpus = CpuUtils.get_affinity_nf(
410 nodes, dut, nf_chains=1, nf_nodes=1, nf_chain=1,
411 nf_node=1, vs_dtc=0, nf_dtc=8, nf_mtcr=1, nf_dtcr=1
413 self.engine.create_vpp_startup_config_vswitch(
414 cpuset_cpus, rxq, if1_pci, if2_pci
418 for i in range(1, n_instances + 1):
420 f"create interface memif id {i} socket-id 1 master\n"
421 f"set interface state memif1/{i} up\n"
422 f"set interface l2 bridge memif1/{i} 1\n"
423 f"create interface memif id {i} socket-id 2 master\n"
424 f"set interface state memif2/{i} up\n"
425 f"set interface l2 bridge memif2/{i} 2\n"
426 f"set ip neighbor memif2/{i} {tg_pf_ip4} {tg_pf_mac} "
430 self.engine.create_vpp_exec_config(
431 u"memif_create_chain_vswitch_ipsec.exec",
432 socket1=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-1",
433 socket2=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-2",
434 if_red_name=if_red_name,
435 if_black_name=if_black_name,
436 instances=u"\n\n".join(instances))
439 def _configure_vpp_chain_ipsec(self, **kwargs):
440 """Configure VPP in container with memifs.
442 :param kwargs: Named parameters.
445 nf_nodes = int(kwargs[u"nf_nodes"])
446 nf_instance = int(kwargs[u"nf_instance"])
447 nodes = kwargs[u"nodes"]
448 dut = self.engine.container.name.split(u"_")[0]
449 cpuset_cpus = CpuUtils.get_affinity_nf(
450 nodes, dut, nf_chains=1, nf_nodes=nf_nodes, nf_chain=1,
451 nf_node=nf_instance, vs_dtc=10, nf_dtc=1, nf_mtcr=1, nf_dtcr=1)
452 self.engine.create_vpp_startup_config_ipsec(cpuset_cpus)
453 local_ip_base = kwargs[u"dut2_if1_ip4"].rsplit(u".", 1)[0]
456 tnl_local_ip = f"{local_ip_base}.{nf_instance + 100}"
457 tnl_remote_ip = f"{local_ip_base}.{nf_instance}"
458 remote_ip_base = kwargs[u"dut1_if1_ip4"].rsplit(u".", 1)[0]
459 tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
460 tg_pf_mac = kwargs[u"tg_pf1_mac"]
461 raddr_ip4 = kwargs[u"laddr_ip4"]
466 tnl_local_ip = f"{local_ip_base}.{nf_instance}"
467 tnl_remote_ip = f"{local_ip_base}.{nf_instance + 100}"
468 remote_ip_base = kwargs[u"dut2_if2_ip4"].rsplit(u".", 1)[0]
469 tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
470 tg_pf_mac = kwargs[u"tg_pf2_mac"]
471 raddr_ip4 = kwargs[u"raddr_ip4"]
476 self.engine.create_vpp_exec_config(
477 u"memif_create_chain_ipsec.exec",
478 socket1=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-1",
479 socket2=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-2",
484 mac1=f"02:02:00:00:{l_mac1:02X}:{(nf_instance - 1):02X}",
485 mac2=f"02:02:00:00:{l_mac2:02X}:{(nf_instance - 1):02X}",
486 tg_pf2_ip4=tg_pf_ip4,
487 tg_pf2_mac=tg_pf_mac,
489 tnl_local_ip=tnl_local_ip,
490 tnl_remote_ip=tnl_remote_ip,
491 tnl_remote_mac=f"02:02:00:00:{r_mac:02X}:{(nf_instance - 1):02X}",
492 remote_ip=f"{remote_ip_base}.{nf_instance}"
495 f"cat {kwargs['guest_dir']}/ipsec_create_tunnel_cnf_"
496 f"{dut}_{nf_instance}.config >> /tmp/running.exec"
499 def _configure_vpp_pipeline_ip4(self, **kwargs):
500 """Configure VPP in pipeline topology with ip4.
502 :param kwargs: Named parameters.
505 self.engine.create_vpp_startup_config()
506 node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
507 mid1 = kwargs[u"mid1"]
508 mid2 = kwargs[u"mid2"]
510 role2 = u"master" if node == kwargs[u"nodes"] else u"slave"
511 kwargs[u"mid2"] = kwargs[u"mid2"] \
512 if node == kwargs[u"nodes"] else kwargs[u"mid2"] + 1
513 vif1_mac = kwargs[u"tg_pf1_mac"] \
514 if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
515 else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
516 vif2_mac = kwargs[u"tg_pf2_mac"] \
517 if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
518 else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
519 socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
520 f"{kwargs[u'sid1']}" if node == 1 \
521 else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid1']}"
522 socket2 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
523 f"{kwargs[u'sid2']}" \
524 if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \
525 else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid2']}"
527 self.engine.create_vpp_exec_config(
528 u"memif_create_pipeline_ip4.exec",
529 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
530 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
531 socket1=socket1, socket2=socket2, role1=role1, role2=role2,
532 mac1=f"52:54:00:00:{mid1:02X}:01",
533 mac2=f"52:54:00:00:{mid2:02X}:02",
534 vif1_mac=vif1_mac, vif2_mac=vif2_mac
537 def stop_all_containers(self):
538 """Stop all containers."""
539 # TODO: Rework if containers can be affected outside ContainerManager.
540 self._disconnect_papi_to_all_containers()
541 for container in self.containers:
542 self.engine.container = self.containers[container]
545 def destroy_all_containers(self):
546 """Destroy all containers."""
547 # TODO: Rework if containers can be affected outside ContainerManager.
548 self._disconnect_papi_to_all_containers()
549 for container in self.containers:
550 self.engine.container = self.containers[container]
551 self.engine.destroy()
554 class ContainerEngine:
555 """Abstract class for container engine."""
558 """Init ContainerEngine object."""
559 self.container = None
561 def initialize(self):
562 """Initialize container object."""
563 self.container = Container()
565 def acquire(self, force):
566 """Acquire/download container.
568 :param force: Destroy a container if exists and create.
571 raise NotImplementedError
574 """Build container (compile)."""
575 raise NotImplementedError
578 """Create/deploy container."""
579 raise NotImplementedError
581 def execute(self, command):
582 """Execute process inside container.
584 :param command: Command to run inside container.
587 raise NotImplementedError
590 """Stop container."""
591 raise NotImplementedError
594 """Destroy/remove container."""
595 raise NotImplementedError
598 """Info about container."""
599 raise NotImplementedError
601 def system_info(self):
603 raise NotImplementedError
605 def start_vpp(self, verify=True):
606 """Start VPP inside a container."""
608 u"/usr/bin/vpp -c /etc/vpp/startup.conf")
610 topo_instance = BuiltIn().get_library_instance(
611 u"resources.libraries.python.topology.Topology"
613 topo_instance.add_new_socket(
617 self.container.cli_socket,
619 topo_instance.add_new_socket(
623 self.container.api_socket,
625 topo_instance.add_new_socket(
629 self.container.stats_socket,
634 def restart_vpp(self, verify=True):
635 """Restart VPP service inside a container."""
636 self.execute(u"pkill vpp")
637 self.start_vpp(verify=verify)
639 def verify_vpp(self):
640 """Verify VPP is running and ready."""
642 self.adjust_privileges()
643 self.verify_vpp_papi()
645 # TODO Rewrite to use the VPPUtil.py functionality and remove this.
646 def verify_vppctl(self, retries=120, retry_wait=1):
647 """Verify that VPP is installed and running inside container.
649 This function waits a while so VPP can start.
650 PCI interfaces are listed for debug purposes.
651 When the check passes, VPP API socket is created on remote side,
652 but perhaps its directory does not have the correct access rights yet.
654 :param retries: Check for VPP for this number of times Default: 120
655 :param retry_wait: Wait for this number of seconds between retries.
657 for _ in range(retries + 1):
659 # Execute puts the command into single quotes,
660 # so inner arguments are enclosed in qouble quotes here.
662 u'/usr/bin/vppctl show pci 2>&1 | '
663 u'fgrep -v "Connection refused" | '
664 u'fgrep -v "No such file or directory"'
667 except (RuntimeError, AssertionError):
670 self.execute(u"cat /tmp/vppd.log")
672 f"VPP did not come up in container: {self.container.name}"
675 def adjust_privileges(self):
676 """Adjust privileges to control VPP without sudo."""
677 self.execute("chmod -R o+rwx /run/vpp")
679 def verify_vpp_papi(self, retries=120, retry_wait=1):
680 """Verify that VPP is available for PAPI.
682 This also opens and caches PAPI connection for quick reuse.
683 The connection is disconnected when ContainerManager decides to do so.
685 :param retries: Check for VPP for this number of times Default: 120
686 :param retry_wait: Wait for this number of seconds between retries.
689 for _ in range(retries + 1):
691 VPPUtil.vpp_show_version(
692 node=self.container.node,
693 remote_vpp_socket=self.container.api_socket,
697 except (RuntimeError, AssertionError):
700 self.execute(u"cat /tmp/vppd.log")
702 f"VPP PAPI fails in container: {self.container.name}"
705 def create_base_vpp_startup_config(self, cpuset_cpus=None):
706 """Create base startup configuration of VPP on container.
708 :param cpuset_cpus: List of CPU cores to allocate.
709 :type cpuset_cpus: list.
710 :returns: Base VPP startup configuration.
711 :rtype: VppConfigGenerator
713 if cpuset_cpus is None:
714 cpuset_cpus = self.container.cpuset_cpus
716 # Create config instance
717 vpp_config = VppConfigGenerator()
718 vpp_config.set_node(self.container.node)
719 vpp_config.add_unix_cli_listen()
720 vpp_config.add_unix_exec(u"/tmp/running.exec")
721 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
723 # We will pop the first core from the list to be a main core
724 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
725 # If more cores in the list, the rest will be used as workers.
726 corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
727 vpp_config.add_cpu_corelist_workers(corelist_workers)
728 vpp_config.add_buffers_per_numa(215040)
729 vpp_config.add_plugin(u"disable", u"default")
730 vpp_config.add_plugin(u"enable", u"memif_plugin.so")
731 vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
732 vpp_config.add_main_heap_size(u"2G")
733 vpp_config.add_main_heap_page_size(self.container.page_size)
734 vpp_config.add_default_hugepage_size(self.container.page_size)
735 vpp_config.add_statseg_size(u"2G")
736 vpp_config.add_statseg_page_size(self.container.page_size)
737 vpp_config.add_statseg_per_node_counters(u"on")
741 def create_vpp_startup_config(self):
742 """Create startup configuration of VPP without DPDK on container.
744 vpp_config = self.create_base_vpp_startup_config()
746 # Apply configuration
747 self.execute(u"mkdir -p /etc/vpp/")
749 f'echo "{vpp_config.get_config_str()}" | '
750 f'tee /etc/vpp/startup.conf'
753 def create_vpp_startup_config_vswitch(self, cpuset_cpus, rxq, *devices):
754 """Create startup configuration of VPP vswitch.
756 :param cpuset_cpus: CPU list to run on.
757 :param rxq: Number of interface RX queues.
758 :param devices: PCI devices.
759 :type cpuset_cpus: list
763 vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
764 vpp_config.add_dpdk_dev(*devices)
765 vpp_config.add_dpdk_log_level(u"debug")
766 vpp_config.add_dpdk_no_tx_checksum_offload()
767 vpp_config.add_dpdk_dev_default_rxq(rxq)
768 vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
769 vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
771 # Apply configuration
772 self.execute(u"mkdir -p /etc/vpp/")
774 f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
777 def create_vpp_startup_config_ipsec(self, cpuset_cpus):
778 """Create startup configuration of VPP with IPsec on container.
780 :param cpuset_cpus: CPU list to run on.
781 :type cpuset_cpus: list
783 vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
784 vpp_config.add_plugin(u"enable", u"crypto_native_plugin.so")
785 vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
786 vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
787 vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
789 # Apply configuration
790 self.execute(u"mkdir -p /etc/vpp/")
792 f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
795 def create_vpp_startup_config_dma(self, dma_devices):
796 """Create startup configuration of VPP DMA.
798 :param dma_devices: DMA devices list.
799 :type dma_devices: list
801 vpp_config = self.create_base_vpp_startup_config()
802 vpp_config.add_plugin(u"enable", u"dma_intel_plugin.so")
803 vpp_config.add_dma_dev(dma_devices)
805 # Apply configuration
806 self.execute(u"mkdir -p /etc/vpp/")
808 f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
811 def create_vpp_exec_config(self, template_file, **kwargs):
812 """Create VPP exec configuration on container.
814 :param template_file: File name of a template script.
815 :param kwargs: Parameters for script.
816 :type template_file: str
819 running = u"/tmp/running.exec"
820 template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
822 with open(template, u"rt") as src_file:
823 src = Template(src_file.read())
824 self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}')
826 def is_container_running(self):
827 """Check if container is running."""
828 raise NotImplementedError
830 def is_container_present(self):
831 """Check if container is present."""
832 raise NotImplementedError
834 def _configure_cgroup(self, name):
835 """Configure the control group associated with a container.
837 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
838 container is initialized a new cgroup /docker or /lxc is created under
839 cpuset parent tree. This newly created cgroup is inheriting parent
840 setting for cpu/mem exclusive parameter and thus cannot be overriden
841 within /docker or /lxc cgroup. This function is supposed to set cgroups
842 to allow coexistence of both engines.
844 :param name: Name of cgroup.
846 :raises RuntimeError: If applying cgroup settings via cgset failed.
848 ret, _, _ = self.container.ssh.exec_command_sudo(
849 f"cgcreate -g cpuset:/{name}"
852 raise RuntimeError(u"Failed to copy cgroup settings from root.")
854 ret, _, _ = self.container.ssh.exec_command_sudo(
855 f"cgset -r cpuset.cpus=0 /{name}"
858 raise RuntimeError(u"Failed to apply cgroup settings.")
860 ret, _, _ = self.container.ssh.exec_command_sudo(
861 f"cgset -r cpuset.mems=0 /{name}"
864 raise RuntimeError(u"Failed to apply cgroup settings.")
867 class LXC(ContainerEngine):
868 """LXC implementation."""
870 # Implicit constructor is inherited.
872 def acquire(self, force=True):
873 """Acquire a privileged system object where configuration is stored.
875 :param force: If a container exists, destroy it and create a new
878 :raises RuntimeError: If creating the container or writing the container
881 if self.is_container_present():
887 target_arch = u"arm64" \
888 if Topology.get_node_arch(self.container.node) == u"aarch64" \
891 image = self.container.image if self.container.image \
892 else f"-d ubuntu -r jammy -a {target_arch}"
894 cmd = f"lxc-create -t download --name {self.container.name} " \
895 f"-- {image} --no-validate"
897 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
899 raise RuntimeError(u"Failed to create container.")
901 self._configure_cgroup(u"lxc")
904 """Build container (compile)."""
905 raise NotImplementedError
908 """Create/deploy an application inside a container on system.
910 :raises RuntimeError: If creating the container fails.
912 if self.container.mnt:
914 # https://github.com/lxc/lxc/issues/434
915 mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults"
916 ret, _, _ = self.container.ssh.exec_command_sudo(
917 f"sh -c \"echo '{mnt_e}' >> "
918 f"/var/lib/lxc/{self.container.name}/config\""
922 f"Failed to write {self.container.name} config."
925 for mount in self.container.mnt:
926 host_dir, guest_dir = mount.split(u":")
927 options = u"bind,create=dir" if guest_dir.endswith(u"/") \
928 else u"bind,create=file"
929 entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \
930 f"none {options} 0 0"
931 self.container.ssh.exec_command_sudo(
932 f"sh -c \"mkdir -p {host_dir}\""
934 ret, _, _ = self.container.ssh.exec_command_sudo(
935 f"sh -c \"echo '{entry}' "
936 f">> /var/lib/lxc/{self.container.name}/config\""
940 f"Failed to write {self.container.name} config."
943 cpuset_cpus = u",".join(
944 f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
945 if self.container.cpuset_cpus else u""
947 ret, _, _ = self.container.ssh.exec_command_sudo(
948 f"lxc-start --name {self.container.name} --daemon"
952 f"Failed to start container {self.container.name}."
954 self._lxc_wait(u"RUNNING")
956 # Workaround for LXC to be able to allocate all cpus including isolated.
957 ret, _, _ = self.container.ssh.exec_command_sudo(
958 u"cgset --copy-from / lxc/"
961 raise RuntimeError(u"Failed to copy cgroup to LXC")
963 ret, _, _ = self.container.ssh.exec_command_sudo(
964 f"lxc-cgroup --name {self.container.name} cpuset.cpus {cpuset_cpus}"
968 f"Failed to set cpuset.cpus to container {self.container.name}."
971 def execute(self, command):
972 """Start a process inside a running container.
974 Runs the specified command inside the container specified by name. The
975 container has to be running already.
977 :param command: Command to run inside container.
979 :raises RuntimeError: If running the command failed.
981 env = u"--keep-env " + u" ".join(
982 f"--set-var {env!s}" for env in self.container.env) \
983 if self.container.env else u""
985 cmd = f"lxc-attach {env} --name {self.container.name} " \
986 f"-- /bin/sh -c '{command}'"
988 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
991 f"Failed to run command inside container {self.container.name}."
997 :raises RuntimeError: If stopping the container failed.
999 cmd = f"lxc-stop --name {self.container.name}"
1001 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1004 f"Failed to stop container {self.container.name}."
1006 self._lxc_wait(u"STOPPED|FROZEN")
1009 """Destroy a container.
1011 :raises RuntimeError: If destroying container failed.
1013 cmd = f"lxc-destroy --force --name {self.container.name}"
1015 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1018 f"Failed to destroy container {self.container.name}."
1022 """Query and shows information about a container.
1024 :raises RuntimeError: If getting info about a container failed.
1026 cmd = f"lxc-info --name {self.container.name}"
1028 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1031 f"Failed to get info about container {self.container.name}."
1034 def system_info(self):
1035 """Check the current kernel for LXC support.
1037 :raises RuntimeError: If checking LXC support failed.
1039 cmd = u"lxc-checkconfig"
1041 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1043 raise RuntimeError(u"Failed to check LXC support.")
1045 def is_container_running(self):
1046 """Check if container is running on node.
1048 :returns: True if container is running.
1050 :raises RuntimeError: If getting info about a container failed.
1052 cmd = f"lxc-info --no-humanize --state --name {self.container.name}"
1054 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1057 f"Failed to get info about container {self.container.name}."
1059 return u"RUNNING" in stdout
1061 def is_container_present(self):
1062 """Check if container is existing on node.
1064 :returns: True if container is present.
1066 :raises RuntimeError: If getting info about a container failed.
1068 cmd = f"lxc-info --no-humanize --name {self.container.name}"
1070 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1073 def _lxc_wait(self, state):
1074 """Wait for a specific container state.
1076 :param state: Specify the container state(s) to wait for.
1078 :raises RuntimeError: If waiting for state of a container failed.
1080 cmd = f"lxc-wait --name {self.container.name} --state '{state}'"
1082 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1085 f"Failed to wait for state '{state}' "
1086 f"of container {self.container.name}."
1090 class Docker(ContainerEngine):
1091 """Docker implementation."""
1093 # Implicit constructor is inherited.
1095 def acquire(self, force=True):
1096 """Pull an image or a repository from a registry.
1098 :param force: Destroy a container if exists.
1100 :raises RuntimeError: If pulling a container failed.
1102 if self.is_container_present():
1108 if not self.container.image:
1109 img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
1110 if Topology.get_node_arch(self.container.node) == u"aarch64" \
1111 else Constants.DOCKER_SUT_IMAGE_UBUNTU
1112 setattr(self.container, u"image", img)
1114 if "/" in self.container.image:
1115 cmd = f"docker pull {self.container.image}"
1116 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
1119 f"Failed to create container {self.container.name}."
1122 if self.container.cpuset_cpus:
1123 self._configure_cgroup(u"docker")
1126 """Build container (compile)."""
1127 raise NotImplementedError
1130 """Create/deploy container.
1132 :raises RuntimeError: If creating a container failed.
1134 cpuset_cpus = u"--cpuset-cpus=" + u",".join(
1135 f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
1136 if self.container.cpuset_cpus else u""
1138 cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \
1139 if self.container.cpuset_mems is not None else u""
1140 # Temporary workaround - disabling due to bug in memif
1143 env = u" ".join(f"--env {env!s}" for env in self.container.env) \
1144 if self.container.env else u""
1146 command = str(self.container.command) if self.container.command else u""
1148 publish = u" ".join(
1149 f"--publish {var!s}" for var in self.container.publish
1150 ) if self.container.publish else u""
1153 f"--volume {mnt!s}" for mnt in self.container.mnt) \
1154 if self.container.mnt else u""
1156 cmd = f"docker run --privileged --detach --interactive --tty --rm " \
1157 f"--cgroup-parent docker.slice {cpuset_cpus} {cpuset_mems} " \
1158 f"{publish} {env} {volume} --name {self.container.name} " \
1159 f"{self.container.image} {command}"
1161 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1164 f"Failed to create container {self.container.name}"
1169 def execute(self, command):
1170 """Start a process inside a running container.
1172 Runs the specified command inside the container specified by name. The
1173 container has to be running already.
1175 :param command: Command to run inside container.
1177 :raises RuntimeError: If running the command in a container failed.
1179 cmd = f"docker exec --interactive {self.container.name} " \
1180 f"/bin/sh -c '{command}'"
1182 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
1185 f"Failed to execute command in container {self.container.name}."
1189 """Stop running container.
1191 :raises RuntimeError: If stopping a container failed.
1193 cmd = f"docker stop {self.container.name}"
1195 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1198 f"Failed to stop container {self.container.name}."
1202 """Remove a container.
1204 :raises RuntimeError: If removing a container failed.
1206 cmd = f"docker rm --force {self.container.name}"
1208 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1211 f"Failed to destroy container {self.container.name}."
1215 """Return low-level information on Docker objects.
1217 :raises RuntimeError: If getting info about a container failed.
1219 cmd = f"docker inspect {self.container.name}"
1221 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1224 f"Failed to get info about container {self.container.name}."
1227 def system_info(self):
1228 """Display the docker system-wide information.
1230 :raises RuntimeError: If displaying system information failed.
1232 cmd = u"docker system info"
1234 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1236 raise RuntimeError(u"Failed to get system info.")
1238 def is_container_present(self):
1239 """Check if container is present on node.
1241 :returns: True if container is present.
1243 :raises RuntimeError: If getting info about a container failed.
1245 cmd = f"docker ps --all --quiet --filter name={self.container.name}"
1247 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1250 f"Failed to get info about container {self.container.name}."
1254 def is_container_running(self):
1255 """Check if container is running on node.
1257 :returns: True if container is running.
1259 :raises RuntimeError: If getting info about a container failed.
1261 cmd = f"docker ps --quiet --filter name={self.container.name}"
1263 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1266 f"Failed to get info about container {self.container.name}."
1272 """Container class."""
1274 def __getattr__(self, attr):
1275 """Get attribute custom implementation.
1277 :param attr: Attribute to get.
1279 :returns: Attribute value or None.
1283 return self.__dict__[attr]
1287 def __setattr__(self, attr, value):
1288 """Set attribute custom implementation.
1290 :param attr: Attribute to set.
1291 :param value: Value to set.
1296 # Check if attribute exists
1299 # Creating new attribute
1301 # Create and cache a connected SSH instance.
1302 self.__dict__[u"ssh"] = SSH()
1303 self.__dict__[u"ssh"].connect(value)
1304 elif attr == u"name":
1305 # Socket paths to not have mutable state,
1306 # this just saves some horizontal space in callers.
1307 # TODO: Rename the dir so other apps can add sockets easily.
1308 # E.g. f"/tmp/app_sockets/{value}/vpp_api.sock"
1309 path = f"/tmp/vpp_sockets/{value}"
1310 self.__dict__[u"socket_dir"] = path
1311 self.__dict__[u"api_socket"] = f"{path}/api.sock"
1312 self.__dict__[u"cli_socket"] = f"{path}/cli.sock"
1313 self.__dict__[u"stats_socket"] = f"{path}/stats.sock"
1314 self.__dict__[attr] = value
1316 # Updating attribute base of type
1317 if isinstance(self.__dict__[attr], list):
1318 self.__dict__[attr].append(value)
1320 self.__dict__[attr] = value