1 # Copyright (c) 2021 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Library to manipulate Containers."""
16 from collections import OrderedDict, Counter
19 from string import Template
20 from time import sleep
22 from robot.libraries.BuiltIn import BuiltIn
24 from resources.libraries.python.Constants import Constants
25 from resources.libraries.python.CpuUtils import CpuUtils
26 from resources.libraries.python.PapiExecutor import PapiSocketExecutor
27 from resources.libraries.python.ssh import SSH
28 from resources.libraries.python.topology import Topology, SocketType
29 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
30 from resources.libraries.python.VPPUtil import VPPUtil
34 u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
37 SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
40 class ContainerManager:
41 """Container lifecycle management class."""
43 def __init__(self, engine):
44 """Initialize Container Manager class.
46 :param engine: Container technology used (LXC/Docker/...).
48 :raises NotImplementedError: If container technology is not implemented.
51 self.engine = globals()[engine]()
53 raise NotImplementedError(f"{engine} is not implemented.")
54 self.containers = OrderedDict()
56 def get_container_by_name(self, name):
57 """Get container instance.
59 :param name: Container name.
61 :returns: Container instance.
63 :raises RuntimeError: If failed to get container with name.
66 return self.containers[name]
68 raise RuntimeError(f"Failed to get container with name: {name}")
70 def construct_container(self, **kwargs):
71 """Construct container object on node with specified parameters.
73 :param kwargs: Key-value pairs used to construct container.
77 self.engine.initialize()
80 setattr(self.engine.container, key, kwargs[key])
82 # Set additional environmental variables
84 self.engine.container, u"env",
85 f"MICROSERVICE_LABEL={kwargs[u'name']}"
88 # Store container instance
89 self.containers[kwargs[u"name"]] = self.engine.container
91 def construct_containers(self, **kwargs):
92 """Construct 1..N container(s) on node with specified name.
94 Ordinal number is automatically added to the name of container as
97 :param kwargs: Named parameters.
100 name = kwargs[u"name"]
101 for i in range(kwargs[u"count"]):
102 # Name will contain ordinal suffix
103 kwargs[u"name"] = u"".join([name, str(i+1)])
105 self.construct_container(i=i, **kwargs)
107 def acquire_all_containers(self):
108 """Acquire all containers."""
109 for container in self.containers:
110 self.engine.container = self.containers[container]
111 self.engine.acquire()
113 def build_all_containers(self):
114 """Build all containers."""
115 for container in self.containers:
116 self.engine.container = self.containers[container]
119 def create_all_containers(self):
120 """Create all containers."""
121 for container in self.containers:
122 self.engine.container = self.containers[container]
125 def execute_on_container(self, name, command):
126 """Execute command on container with name.
128 :param name: Container name.
129 :param command: Command to execute.
133 self.engine.container = self.get_container_by_name(name)
134 self.engine.execute(command)
136 def execute_on_all_containers(self, command):
137 """Execute command on all containers.
139 :param command: Command to execute.
142 for container in self.containers:
143 self.engine.container = self.containers[container]
144 self.engine.execute(command)
146 def start_vpp_in_all_containers(self, verify=True):
147 """Start VPP in all containers."""
148 for container in self.containers:
149 self.engine.container = self.containers[container]
150 # For multiple containers, delayed verify is faster.
151 self.engine.start_vpp(verify=False)
153 self.verify_vpp_in_all_containers()
155 def _disconnect_papi_to_all_containers(self):
156 """Disconnect any open PAPI connections to VPPs in containers.
158 The current PAPI implementation caches open connections,
159 so explicit disconnect is needed before VPP becomes inaccessible.
161 Currently this is a protected method, as restart, stop and destroy
162 are the only dangerous methods, and all are handled by ContainerManager.
164 for container_object in self.containers.values():
165 PapiSocketExecutor.disconnect_by_node_and_socket(
166 container_object.node,
167 container_object.api_socket,
170 def restart_vpp_in_all_containers(self, verify=True):
171 """Restart VPP in all containers."""
172 self._disconnect_papi_to_all_containers()
173 for container in self.containers:
174 self.engine.container = self.containers[container]
175 # For multiple containers, delayed verify is faster.
176 self.engine.restart_vpp(verify=False)
178 self.verify_vpp_in_all_containers()
180 def verify_vpp_in_all_containers(self):
181 """Verify that VPP is installed and running in all containers."""
182 # For multiple containers, multiple fors are faster.
183 for container in self.containers:
184 self.engine.container = self.containers[container]
185 self.engine.verify_vppctl()
186 for container in self.containers:
187 self.engine.container = self.containers[container]
188 self.engine.adjust_privileges()
189 for container in self.containers:
190 self.engine.container = self.containers[container]
191 self.engine.verify_vpp_papi()
193 def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
194 """Configure VPP in all containers.
196 :param chain_topology: Topology used for chaining containers can be
197 chain or cross_horiz. Chain topology is using 1 memif pair per
198 container. Cross_horiz topology is using 1 memif and 1 physical
199 interface in container (only single container can be configured).
200 :param kwargs: Named parameters.
201 :type chain_topology: str
204 # Count number of DUTs based on node's host information
208 self.containers[container].node[u"host"]
209 for container in self.containers
213 mod = len(self.containers) // dut_cnt
215 for i, container in enumerate(self.containers):
218 sid1 = i % mod * 2 + 1
219 sid2 = i % mod * 2 + 2
220 self.engine.container = self.containers[container]
221 guest_dir = self.engine.container.mnt[0].split(u":")[1]
223 if chain_topology == u"chain":
224 self._configure_vpp_chain_l2xc(
225 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
226 guest_dir=guest_dir, **kwargs
228 elif chain_topology == u"cross_horiz":
229 self._configure_vpp_cross_horiz(
230 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
231 guest_dir=guest_dir, **kwargs
233 elif chain_topology == u"chain_functional":
234 self._configure_vpp_chain_functional(
235 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
236 guest_dir=guest_dir, **kwargs
238 elif chain_topology == u"chain_ip4":
239 self._configure_vpp_chain_ip4(
240 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
241 guest_dir=guest_dir, **kwargs
243 elif chain_topology == u"pipeline_ip4":
244 self._configure_vpp_pipeline_ip4(
245 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
246 guest_dir=guest_dir, **kwargs
248 elif chain_topology == u"chain_vswitch":
249 self._configure_vpp_chain_vswitch(
250 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
251 guest_dir=guest_dir, **kwargs)
252 elif chain_topology == u"chain_ipsec":
253 idx_match = search(r"\d+$", self.engine.container.name)
255 idx = int(idx_match.group())
256 self._configure_vpp_chain_ipsec(
257 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
258 guest_dir=guest_dir, nf_instance=idx, **kwargs)
261 f"Container topology {chain_topology} not implemented"
264 def _configure_vpp_chain_l2xc(self, **kwargs):
265 """Configure VPP in chain topology with l2xc.
267 :param kwargs: Named parameters.
270 self.engine.create_vpp_startup_config()
271 self.engine.create_vpp_exec_config(
272 u"memif_create_chain_l2xc.exec",
273 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
274 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
275 socket1=f"{kwargs[u'guest_dir']}/memif-"
276 f"{self.engine.container.name}-{kwargs[u'sid1']}",
277 socket2=f"{kwargs[u'guest_dir']}/memif-"
278 f"{self.engine.container.name}-{kwargs[u'sid2']}"
281 def _configure_vpp_cross_horiz(self, **kwargs):
282 """Configure VPP in cross horizontal topology (single memif).
284 :param kwargs: Named parameters.
287 if u"DUT1" in self.engine.container.name:
288 if_pci = Topology.get_interface_pci_addr(
289 self.engine.container.node, kwargs[u"dut1_if"])
290 if_name = Topology.get_interface_name(
291 self.engine.container.node, kwargs[u"dut1_if"])
292 if u"DUT2" in self.engine.container.name:
293 if_pci = Topology.get_interface_pci_addr(
294 self.engine.container.node, kwargs[u"dut2_if"])
295 if_name = Topology.get_interface_name(
296 self.engine.container.node, kwargs[u"dut2_if"])
297 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
298 self.engine.create_vpp_exec_config(
299 u"memif_create_cross_horizon.exec",
300 mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
301 socket1=f"{kwargs[u'guest_dir']}/memif-"
302 f"{self.engine.container.name}-{kwargs[u'sid1']}"
305 def _configure_vpp_chain_functional(self, **kwargs):
306 """Configure VPP in chain topology with l2xc (functional).
308 :param kwargs: Named parameters.
311 self.engine.create_vpp_startup_config()
312 self.engine.create_vpp_exec_config(
313 u"memif_create_chain_functional.exec",
314 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
315 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
316 socket1=f"{kwargs[u'guest_dir']}/memif-"
317 f"{self.engine.container.name}-{kwargs[u'sid1']}",
318 socket2=f"{kwargs[u'guest_dir']}/memif-"
319 f"{self.engine.container.name}-{kwargs[u'sid2']}",
323 def _configure_vpp_chain_ip4(self, **kwargs):
324 """Configure VPP in chain topology with ip4.
326 :param kwargs: Named parameters.
329 self.engine.create_vpp_startup_config()
331 vif1_mac = kwargs[u"tg_pf1_mac"] \
332 if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
333 else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
334 vif2_mac = kwargs[u"tg_pf2_mac"] \
335 if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
336 else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
337 self.engine.create_vpp_exec_config(
338 u"memif_create_chain_ip4.exec",
339 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
340 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
341 socket1=f"{kwargs[u'guest_dir']}/memif-"
342 f"{self.engine.container.name}-{kwargs[u'sid1']}",
343 socket2=f"{kwargs[u'guest_dir']}/memif-"
344 f"{self.engine.container.name}-{kwargs[u'sid2']}",
345 mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
346 mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
347 vif1_mac=vif1_mac, vif2_mac=vif2_mac
350 def _configure_vpp_chain_vswitch(self, **kwargs):
351 """Configure VPP as vswitch in container.
353 :param kwargs: Named parameters.
356 dut = self.engine.container.name.split(u"_")[0]
358 if1_pci = Topology.get_interface_pci_addr(
359 self.engine.container.node, kwargs[u"dut1_if2"])
360 if2_pci = Topology.get_interface_pci_addr(
361 self.engine.container.node, kwargs[u"dut1_if1"])
362 if_red_name = Topology.get_interface_name(
363 self.engine.container.node, kwargs[u"dut1_if2"])
364 if_black_name = Topology.get_interface_name(
365 self.engine.container.node, kwargs[u"dut1_if1"])
366 tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
367 tg_pf_mac = kwargs[u"tg_pf2_mac"]
369 tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
370 tg_pf_mac = kwargs[u"tg_pf1_mac"]
371 if1_pci = Topology.get_interface_pci_addr(
372 self.engine.container.node, kwargs[u"dut2_if1"])
373 if2_pci = Topology.get_interface_pci_addr(
374 self.engine.container.node, kwargs[u"dut2_if2"])
375 if_red_name = Topology.get_interface_name(
376 self.engine.container.node, kwargs[u"dut2_if1"])
377 if_black_name = Topology.get_interface_name(
378 self.engine.container.node, kwargs[u"dut2_if2"])
380 n_instances = int(kwargs[u"n_instances"])
383 rxq = int(kwargs[u"rxq"])
384 nodes = kwargs[u"nodes"]
385 cpuset_cpus = CpuUtils.get_affinity_nf(
386 nodes, dut, nf_chains=1, nf_nodes=1, nf_chain=1,
387 nf_node=1, vs_dtc=0, nf_dtc=8, nf_mtcr=1, nf_dtcr=1
389 self.engine.create_vpp_startup_config_vswitch(
390 cpuset_cpus, rxq, if1_pci, if2_pci
394 for i in range(1, n_instances + 1):
396 f"create interface memif id {i} socket-id 1 master\n"
397 f"set interface state memif1/{i} up\n"
398 f"set interface l2 bridge memif1/{i} 1\n"
399 f"create interface memif id {i} socket-id 2 master\n"
400 f"set interface state memif2/{i} up\n"
401 f"set interface l2 bridge memif2/{i} 2\n"
402 f"set ip neighbor memif2/{i} {tg_pf_ip4} {tg_pf_mac} "
406 self.engine.create_vpp_exec_config(
407 u"memif_create_chain_vswitch_ipsec.exec",
408 socket1=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-1",
409 socket2=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-2",
410 if_red_name=if_red_name,
411 if_black_name=if_black_name,
412 instances=u"\n\n".join(instances))
415 def _configure_vpp_chain_ipsec(self, **kwargs):
416 """Configure VPP in container with memifs.
418 :param kwargs: Named parameters.
421 nf_nodes = int(kwargs[u"nf_nodes"])
422 nf_instance = int(kwargs[u"nf_instance"])
423 nodes = kwargs[u"nodes"]
424 dut = self.engine.container.name.split(u"_")[0]
425 cpuset_cpus = CpuUtils.get_affinity_nf(
426 nodes, dut, nf_chains=1, nf_nodes=nf_nodes, nf_chain=1,
427 nf_node=nf_instance, vs_dtc=10, nf_dtc=1, nf_mtcr=1, nf_dtcr=1)
428 self.engine.create_vpp_startup_config_ipsec(cpuset_cpus)
429 local_ip_base = kwargs[u"dut2_if1_ip4"].rsplit(u".", 1)[0]
432 tnl_local_ip = f"{local_ip_base}.{nf_instance + 100}"
433 tnl_remote_ip = f"{local_ip_base}.{nf_instance}"
434 remote_ip_base = kwargs[u"dut1_if1_ip4"].rsplit(u".", 1)[0]
435 tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
436 tg_pf_mac = kwargs[u"tg_pf1_mac"]
437 raddr_ip4 = kwargs[u"laddr_ip4"]
442 tnl_local_ip = f"{local_ip_base}.{nf_instance}"
443 tnl_remote_ip = f"{local_ip_base}.{nf_instance + 100}"
444 remote_ip_base = kwargs[u"dut2_if2_ip4"].rsplit(u".", 1)[0]
445 tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
446 tg_pf_mac = kwargs[u"tg_pf2_mac"]
447 raddr_ip4 = kwargs[u"raddr_ip4"]
452 self.engine.create_vpp_exec_config(
453 u"memif_create_chain_ipsec.exec",
454 socket1=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-1",
455 socket2=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-2",
460 mac1=f"02:02:00:00:{l_mac1:02X}:{(nf_instance - 1):02X}",
461 mac2=f"02:02:00:00:{l_mac2:02X}:{(nf_instance - 1):02X}",
462 tg_pf2_ip4=tg_pf_ip4,
463 tg_pf2_mac=tg_pf_mac,
465 tnl_local_ip=tnl_local_ip,
466 tnl_remote_ip=tnl_remote_ip,
467 tnl_remote_mac=f"02:02:00:00:{r_mac:02X}:{(nf_instance - 1):02X}",
468 remote_ip=f"{remote_ip_base}.{nf_instance}"
471 f"cat {kwargs['guest_dir']}/ipsec_create_tunnel_cnf_"
472 f"{dut}_{nf_instance}.config >> /tmp/running.exec"
475 def _configure_vpp_pipeline_ip4(self, **kwargs):
476 """Configure VPP in pipeline topology with ip4.
478 :param kwargs: Named parameters.
481 self.engine.create_vpp_startup_config()
482 node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
483 mid1 = kwargs[u"mid1"]
484 mid2 = kwargs[u"mid2"]
486 role2 = u"master" if node == kwargs[u"nodes"] else u"slave"
487 kwargs[u"mid2"] = kwargs[u"mid2"] \
488 if node == kwargs[u"nodes"] else kwargs[u"mid2"] + 1
489 vif1_mac = kwargs[u"tg_pf1_mac"] \
490 if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
491 else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
492 vif2_mac = kwargs[u"tg_pf2_mac"] \
493 if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
494 else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
495 socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
496 f"{kwargs[u'sid1']}" if node == 1 \
497 else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid1']}"
498 socket2 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
499 f"{kwargs[u'sid2']}" \
500 if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \
501 else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid2']}"
503 self.engine.create_vpp_exec_config(
504 u"memif_create_pipeline_ip4.exec",
505 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
506 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
507 socket1=socket1, socket2=socket2, role1=role1, role2=role2,
508 mac1=f"52:54:00:00:{mid1:02X}:01",
509 mac2=f"52:54:00:00:{mid2:02X}:02",
510 vif1_mac=vif1_mac, vif2_mac=vif2_mac
513 def stop_all_containers(self):
514 """Stop all containers."""
515 # TODO: Rework if containers can be affected outside ContainerManager.
516 self._disconnect_papi_to_all_containers()
517 for container in self.containers:
518 self.engine.container = self.containers[container]
521 def destroy_all_containers(self):
522 """Destroy all containers."""
523 # TODO: Rework if containers can be affected outside ContainerManager.
524 self._disconnect_papi_to_all_containers()
525 for container in self.containers:
526 self.engine.container = self.containers[container]
527 self.engine.destroy()
530 class ContainerEngine:
531 """Abstract class for container engine."""
534 """Init ContainerEngine object."""
535 self.container = None
537 def initialize(self):
538 """Initialize container object."""
539 self.container = Container()
541 def acquire(self, force):
542 """Acquire/download container.
544 :param force: Destroy a container if exists and create.
547 raise NotImplementedError
550 """Build container (compile)."""
551 raise NotImplementedError
554 """Create/deploy container."""
555 raise NotImplementedError
557 def execute(self, command):
558 """Execute process inside container.
560 :param command: Command to run inside container.
563 raise NotImplementedError
566 """Stop container."""
567 raise NotImplementedError
570 """Destroy/remove container."""
571 raise NotImplementedError
574 """Info about container."""
575 raise NotImplementedError
577 def system_info(self):
579 raise NotImplementedError
581 def start_vpp(self, verify=True):
582 """Start VPP inside a container."""
584 u"setsid /usr/bin/vpp -c /etc/vpp/startup.conf "
585 u">/tmp/vppd.log 2>&1 < /dev/null &")
587 topo_instance = BuiltIn().get_library_instance(
588 u"resources.libraries.python.topology.Topology"
590 topo_instance.add_new_socket(
594 self.container.api_socket,
596 topo_instance.add_new_socket(
600 self.container.stats_socket,
605 def restart_vpp(self, verify=True):
606 """Restart VPP service inside a container."""
607 self.execute(u"pkill vpp")
608 self.start_vpp(verify=verify)
610 def verify_vpp(self):
611 """Verify VPP is running and ready."""
613 self.adjust_privileges()
614 self.verify_vpp_papi()
616 # TODO Rewrite to use the VPPUtil.py functionality and remove this.
617 def verify_vppctl(self, retries=120, retry_wait=1):
618 """Verify that VPP is installed and running inside container.
620 This function waits a while so VPP can start.
621 PCI interfaces are listed for debug purposes.
622 When the check passes, VPP API socket is created on remote side,
623 but perhaps its directory does not have the correct access rights yet.
625 :param retries: Check for VPP for this number of times Default: 120
626 :param retry_wait: Wait for this number of seconds between retries.
628 for _ in range(retries + 1):
630 # Execute puts the command into single quotes,
631 # so inner arguments are enclosed in qouble quotes here.
633 u'vppctl show pci 2>&1 | '
634 u'fgrep -v "Connection refused" | '
635 u'fgrep -v "No such file or directory"'
638 except (RuntimeError, AssertionError):
641 self.execute(u"cat /tmp/vppd.log")
643 f"VPP did not come up in container: {self.container.name}"
646 def adjust_privileges(self):
647 """Adjust privileges to control VPP without sudo."""
648 self.execute("chmod -R o+rwx /run/vpp")
650 def verify_vpp_papi(self, retries=120, retry_wait=1):
651 """Verify that VPP is available for PAPI.
653 This also opens and caches PAPI connection for quick reuse.
654 The connection is disconnected when ContainerManager decides to do so.
656 :param retries: Check for VPP for this number of times Default: 120
657 :param retry_wait: Wait for this number of seconds between retries.
660 for _ in range(retries + 1):
662 VPPUtil.vpp_show_version(
663 node=self.container.node,
664 remote_vpp_socket=self.container.api_socket,
668 except (RuntimeError, AssertionError):
671 self.execute(u"cat /tmp/vppd.log")
673 f"VPP PAPI fails in container: {self.container.name}"
676 def create_base_vpp_startup_config(self, cpuset_cpus=None):
677 """Create base startup configuration of VPP on container.
679 :param cpuset_cpus: List of CPU cores to allocate.
680 :type cpuset_cpus: list.
681 :returns: Base VPP startup configuration.
682 :rtype: VppConfigGenerator
684 if cpuset_cpus is None:
685 cpuset_cpus = self.container.cpuset_cpus
687 # Create config instance
688 vpp_config = VppConfigGenerator()
689 vpp_config.set_node(self.container.node)
690 vpp_config.add_unix_cli_listen()
691 vpp_config.add_unix_nodaemon()
692 vpp_config.add_unix_exec(u"/tmp/running.exec")
693 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
695 # We will pop the first core from the list to be a main core
696 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
697 # If more cores in the list, the rest will be used as workers.
698 corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
699 vpp_config.add_cpu_corelist_workers(corelist_workers)
700 vpp_config.add_buffers_per_numa(215040)
701 vpp_config.add_plugin(u"disable", u"default")
702 vpp_config.add_plugin(u"enable", u"memif_plugin.so")
703 vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
704 vpp_config.add_main_heap_size(u"2G")
705 vpp_config.add_main_heap_page_size(self.container.page_size)
706 vpp_config.add_default_hugepage_size(self.container.page_size)
707 vpp_config.add_statseg_size(u"2G")
708 vpp_config.add_statseg_page_size(self.container.page_size)
709 vpp_config.add_statseg_per_node_counters(u"on")
713 def create_vpp_startup_config(self):
714 """Create startup configuration of VPP without DPDK on container.
716 vpp_config = self.create_base_vpp_startup_config()
718 # Apply configuration
719 self.execute(u"mkdir -p /etc/vpp/")
721 f'echo "{vpp_config.get_config_str()}" | '
722 f'tee /etc/vpp/startup.conf'
725 def create_vpp_startup_config_vswitch(self, cpuset_cpus, rxq, *devices):
726 """Create startup configuration of VPP vswitch.
728 :param cpuset_cpus: CPU list to run on.
729 :param rxq: Number of interface RX queues.
730 :param devices: PCI devices.
731 :type cpuset_cpus: list
735 vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
736 vpp_config.add_dpdk_dev(*devices)
737 vpp_config.add_dpdk_log_level(u"debug")
738 vpp_config.add_dpdk_no_tx_checksum_offload()
739 vpp_config.add_dpdk_dev_default_rxq(rxq)
740 vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
741 vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
743 # Apply configuration
744 self.execute(u"mkdir -p /etc/vpp/")
746 f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
749 def create_vpp_startup_config_ipsec(self, cpuset_cpus):
750 """Create startup configuration of VPP with IPsec on container.
752 :param cpuset_cpus: CPU list to run on.
753 :type cpuset_cpus: list
755 vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
756 vpp_config.add_plugin(u"enable", u"crypto_native_plugin.so")
757 vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
758 vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
759 vpp_config.add_plugin(u"enable", u"perfmon_plugin.so")
761 # Apply configuration
762 self.execute(u"mkdir -p /etc/vpp/")
764 f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
767 def create_vpp_exec_config(self, template_file, **kwargs):
768 """Create VPP exec configuration on container.
770 :param template_file: File name of a template script.
771 :param kwargs: Parameters for script.
772 :type template_file: str
775 running = u"/tmp/running.exec"
776 template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
778 with open(template, u"rt") as src_file:
779 src = Template(src_file.read())
780 self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}')
782 def is_container_running(self):
783 """Check if container is running."""
784 raise NotImplementedError
786 def is_container_present(self):
787 """Check if container is present."""
788 raise NotImplementedError
790 def _configure_cgroup(self, name):
791 """Configure the control group associated with a container.
793 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
794 container is initialized a new cgroup /docker or /lxc is created under
795 cpuset parent tree. This newly created cgroup is inheriting parent
796 setting for cpu/mem exclusive parameter and thus cannot be overriden
797 within /docker or /lxc cgroup. This function is supposed to set cgroups
798 to allow coexistence of both engines.
800 :param name: Name of cgroup.
802 :raises RuntimeError: If applying cgroup settings via cgset failed.
804 ret, _, _ = self.container.ssh.exec_command_sudo(
805 u"cgset -r cpuset.cpu_exclusive=0 /"
808 raise RuntimeError(u"Failed to apply cgroup settings.")
810 ret, _, _ = self.container.ssh.exec_command_sudo(
811 u"cgset -r cpuset.mem_exclusive=0 /"
814 raise RuntimeError(u"Failed to apply cgroup settings.")
816 ret, _, _ = self.container.ssh.exec_command_sudo(
817 f"cgcreate -g cpuset:/{name}"
820 raise RuntimeError(u"Failed to copy cgroup settings from root.")
822 ret, _, _ = self.container.ssh.exec_command_sudo(
823 f"cgset -r cpuset.cpu_exclusive=0 /{name}"
826 raise RuntimeError(u"Failed to apply cgroup settings.")
828 ret, _, _ = self.container.ssh.exec_command_sudo(
829 f"cgset -r cpuset.mem_exclusive=0 /{name}"
832 raise RuntimeError(u"Failed to apply cgroup settings.")
835 class LXC(ContainerEngine):
836 """LXC implementation."""
838 # Implicit constructor is inherited.
840 def acquire(self, force=True):
841 """Acquire a privileged system object where configuration is stored.
843 :param force: If a container exists, destroy it and create a new
846 :raises RuntimeError: If creating the container or writing the container
849 if self.is_container_present():
855 target_arch = u"arm64" \
856 if Topology.get_node_arch(self.container.node) == u"aarch64" \
859 image = self.container.image if self.container.image \
860 else f"-d ubuntu -r focal -a {target_arch}"
862 cmd = f"lxc-create -t download --name {self.container.name} " \
863 f"-- {image} --no-validate"
865 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
867 raise RuntimeError(u"Failed to create container.")
869 self._configure_cgroup(u"lxc")
872 """Build container (compile)."""
873 raise NotImplementedError
876 """Create/deploy an application inside a container on system.
878 :raises RuntimeError: If creating the container fails.
880 if self.container.mnt:
882 # https://github.com/lxc/lxc/issues/434
883 mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults"
884 ret, _, _ = self.container.ssh.exec_command_sudo(
885 f"sh -c \"echo '{mnt_e}' >> "
886 f"/var/lib/lxc/{self.container.name}/config\""
890 f"Failed to write {self.container.name} config."
893 for mount in self.container.mnt:
894 host_dir, guest_dir = mount.split(u":")
895 options = u"bind,create=dir" if guest_dir.endswith(u"/") \
896 else u"bind,create=file"
897 entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \
898 f"none {options} 0 0"
899 self.container.ssh.exec_command_sudo(
900 f"sh -c \"mkdir -p {host_dir}\""
902 ret, _, _ = self.container.ssh.exec_command_sudo(
903 f"sh -c \"echo '{entry}' "
904 f">> /var/lib/lxc/{self.container.name}/config\""
908 f"Failed to write {self.container.name} config."
911 cpuset_cpus = u",".join(
912 f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
913 if self.container.cpuset_cpus else u""
915 ret, _, _ = self.container.ssh.exec_command_sudo(
916 f"lxc-start --name {self.container.name} --daemon"
920 f"Failed to start container {self.container.name}."
922 self._lxc_wait(u"RUNNING")
924 # Workaround for LXC to be able to allocate all cpus including isolated.
925 ret, _, _ = self.container.ssh.exec_command_sudo(
926 u"cgset --copy-from / lxc/"
929 raise RuntimeError(u"Failed to copy cgroup to LXC")
931 ret, _, _ = self.container.ssh.exec_command_sudo(
932 f"lxc-cgroup --name {self.container.name} cpuset.cpus {cpuset_cpus}"
936 f"Failed to set cpuset.cpus to container {self.container.name}."
939 def execute(self, command):
940 """Start a process inside a running container.
942 Runs the specified command inside the container specified by name. The
943 container has to be running already.
945 :param command: Command to run inside container.
947 :raises RuntimeError: If running the command failed.
949 env = u"--keep-env " + u" ".join(
950 f"--set-var {env!s}" for env in self.container.env) \
951 if self.container.env else u""
953 cmd = f"lxc-attach {env} --name {self.container.name} " \
954 f"-- /bin/sh -c '{command}'"
956 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
959 f"Failed to run command inside container {self.container.name}."
965 :raises RuntimeError: If stopping the container failed.
967 cmd = f"lxc-stop --name {self.container.name}"
969 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
972 f"Failed to stop container {self.container.name}."
974 self._lxc_wait(u"STOPPED|FROZEN")
977 """Destroy a container.
979 :raises RuntimeError: If destroying container failed.
981 cmd = f"lxc-destroy --force --name {self.container.name}"
983 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
986 f"Failed to destroy container {self.container.name}."
990 """Query and shows information about a container.
992 :raises RuntimeError: If getting info about a container failed.
994 cmd = f"lxc-info --name {self.container.name}"
996 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
999 f"Failed to get info about container {self.container.name}."
1002 def system_info(self):
1003 """Check the current kernel for LXC support.
1005 :raises RuntimeError: If checking LXC support failed.
1007 cmd = u"lxc-checkconfig"
1009 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1011 raise RuntimeError(u"Failed to check LXC support.")
1013 def is_container_running(self):
1014 """Check if container is running on node.
1016 :returns: True if container is running.
1018 :raises RuntimeError: If getting info about a container failed.
1020 cmd = f"lxc-info --no-humanize --state --name {self.container.name}"
1022 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1025 f"Failed to get info about container {self.container.name}."
1027 return u"RUNNING" in stdout
1029 def is_container_present(self):
1030 """Check if container is existing on node.
1032 :returns: True if container is present.
1034 :raises RuntimeError: If getting info about a container failed.
1036 cmd = f"lxc-info --no-humanize --name {self.container.name}"
1038 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1041 def _lxc_wait(self, state):
1042 """Wait for a specific container state.
1044 :param state: Specify the container state(s) to wait for.
1046 :raises RuntimeError: If waiting for state of a container failed.
1048 cmd = f"lxc-wait --name {self.container.name} --state '{state}'"
1050 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1053 f"Failed to wait for state '{state}' "
1054 f"of container {self.container.name}."
1058 class Docker(ContainerEngine):
1059 """Docker implementation."""
1061 # Implicit constructor is inherited.
1063 def acquire(self, force=True):
1064 """Pull an image or a repository from a registry.
1066 :param force: Destroy a container if exists.
1068 :raises RuntimeError: If pulling a container failed.
1070 if self.is_container_present():
1076 if not self.container.image:
1077 img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
1078 if Topology.get_node_arch(self.container.node) == u"aarch64" \
1079 else Constants.DOCKER_SUT_IMAGE_UBUNTU
1080 setattr(self.container, u"image", img)
1082 if "/" in self.container.image:
1083 cmd = f"docker pull {self.container.image}"
1084 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
1087 f"Failed to create container {self.container.name}."
1090 if self.container.cpuset_cpus:
1091 self._configure_cgroup(u"docker")
1094 """Build container (compile)."""
1095 raise NotImplementedError
1098 """Create/deploy container.
1100 :raises RuntimeError: If creating a container failed.
1102 cpuset_cpus = u"--cpuset-cpus=" + u",".join(
1103 f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
1104 if self.container.cpuset_cpus else u""
1106 cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \
1107 if self.container.cpuset_mems is not None else u""
1108 # Temporary workaround - disabling due to bug in memif
1111 env = u" ".join(f"--env {env!s}" for env in self.container.env) \
1112 if self.container.env else u""
1114 command = str(self.container.command) if self.container.command else u""
1116 publish = u" ".join(
1117 f"--publish {var!s}" for var in self.container.publish
1118 ) if self.container.publish else u""
1121 f"--volume {mnt!s}" for mnt in self.container.mnt) \
1122 if self.container.mnt else u""
1124 cmd = f"docker run --privileged --detach --interactive --tty --rm " \
1125 f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \
1126 f"{env} {volume} --name {self.container.name} " \
1127 f"{self.container.image} {command}"
1129 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1132 f"Failed to create container {self.container.name}"
1137 def execute(self, command):
1138 """Start a process inside a running container.
1140 Runs the specified command inside the container specified by name. The
1141 container has to be running already.
1143 :param command: Command to run inside container.
1145 :raises RuntimeError: If running the command in a container failed.
1147 cmd = f"docker exec --interactive {self.container.name} " \
1148 f"/bin/sh -c '{command}'"
1150 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
1153 f"Failed to execute command in container {self.container.name}."
1157 """Stop running container.
1159 :raises RuntimeError: If stopping a container failed.
1161 cmd = f"docker stop {self.container.name}"
1163 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1166 f"Failed to stop container {self.container.name}."
1170 """Remove a container.
1172 :raises RuntimeError: If removing a container failed.
1174 cmd = f"docker rm --force {self.container.name}"
1176 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1179 f"Failed to destroy container {self.container.name}."
1183 """Return low-level information on Docker objects.
1185 :raises RuntimeError: If getting info about a container failed.
1187 cmd = f"docker inspect {self.container.name}"
1189 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1192 f"Failed to get info about container {self.container.name}."
1195 def system_info(self):
1196 """Display the docker system-wide information.
1198 :raises RuntimeError: If displaying system information failed.
1200 cmd = u"docker system info"
1202 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1204 raise RuntimeError(u"Failed to get system info.")
1206 def is_container_present(self):
1207 """Check if container is present on node.
1209 :returns: True if container is present.
1211 :raises RuntimeError: If getting info about a container failed.
1213 cmd = f"docker ps --all --quiet --filter name={self.container.name}"
1215 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1218 f"Failed to get info about container {self.container.name}."
1222 def is_container_running(self):
1223 """Check if container is running on node.
1225 :returns: True if container is running.
1227 :raises RuntimeError: If getting info about a container failed.
1229 cmd = f"docker ps --quiet --filter name={self.container.name}"
1231 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1234 f"Failed to get info about container {self.container.name}."
1240 """Container class."""
1242 def __getattr__(self, attr):
1243 """Get attribute custom implementation.
1245 :param attr: Attribute to get.
1247 :returns: Attribute value or None.
1251 return self.__dict__[attr]
1255 def __setattr__(self, attr, value):
1256 """Set attribute custom implementation.
1258 :param attr: Attribute to set.
1259 :param value: Value to set.
1264 # Check if attribute exists
1267 # Creating new attribute
1269 # Create and cache a connected SSH instance.
1270 self.__dict__[u"ssh"] = SSH()
1271 self.__dict__[u"ssh"].connect(value)
1272 elif attr == u"name":
1273 # Socket paths to not have mutable state,
1274 # this just saves some horizontal space in callers.
1275 # TODO: Rename the dir so other apps can add sockets easily.
1276 # E.g. f"/tmp/app_sockets/{value}/vpp_api.sock"
1277 path = f"/tmp/vpp_sockets/{value}"
1278 self.__dict__[u"socket_dir"] = path
1279 self.__dict__[u"api_socket"] = f"{path}/api.sock"
1280 self.__dict__[u"stats_socket"] = f"{path}/stats.sock"
1281 self.__dict__[attr] = value
1283 # Updating attribute base of type
1284 if isinstance(self.__dict__[attr], list):
1285 self.__dict__[attr].append(value)
1287 self.__dict__[attr] = value