1 # Copyright (c) 2020 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Library to manipulate Containers."""
16 from collections import OrderedDict, Counter
19 from string import Template
20 from time import sleep
22 from robot.libraries.BuiltIn import BuiltIn
24 from resources.libraries.python.Constants import Constants
25 from resources.libraries.python.CpuUtils import CpuUtils
26 from resources.libraries.python.ssh import SSH
27 from resources.libraries.python.topology import Topology, SocketType
28 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
32 u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
35 SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
38 class ContainerManager:
39 """Container lifecycle management class."""
41 def __init__(self, engine):
42 """Initialize Container Manager class.
44 :param engine: Container technology used (LXC/Docker/...).
46 :raises NotImplementedError: If container technology is not implemented.
49 self.engine = globals()[engine]()
51 raise NotImplementedError(f"{engine} is not implemented.")
52 self.containers = OrderedDict()
54 def get_container_by_name(self, name):
55 """Get container instance.
57 :param name: Container name.
59 :returns: Container instance.
61 :raises RuntimeError: If failed to get container with name.
64 return self.containers[name]
66 raise RuntimeError(f"Failed to get container with name: {name}")
68 def construct_container(self, **kwargs):
69 """Construct container object on node with specified parameters.
71 :param kwargs: Key-value pairs used to construct container.
75 self.engine.initialize()
78 setattr(self.engine.container, key, kwargs[key])
80 # Set additional environmental variables
82 self.engine.container, u"env",
83 f"MICROSERVICE_LABEL={kwargs[u'name']}"
86 # Store container instance
87 self.containers[kwargs[u"name"]] = self.engine.container
89 def construct_containers(self, **kwargs):
90 """Construct 1..N container(s) on node with specified name.
92 Ordinal number is automatically added to the name of container as
95 :param kwargs: Named parameters.
98 name = kwargs[u"name"]
99 for i in range(kwargs[u"count"]):
100 # Name will contain ordinal suffix
101 kwargs[u"name"] = u"".join([name, str(i+1)])
103 self.construct_container(i=i, **kwargs)
105 def acquire_all_containers(self):
106 """Acquire all containers."""
107 for container in self.containers:
108 self.engine.container = self.containers[container]
109 self.engine.acquire()
111 def build_all_containers(self):
112 """Build all containers."""
113 for container in self.containers:
114 self.engine.container = self.containers[container]
117 def create_all_containers(self):
118 """Create all containers."""
119 for container in self.containers:
120 self.engine.container = self.containers[container]
123 def execute_on_container(self, name, command):
124 """Execute command on container with name.
126 :param name: Container name.
127 :param command: Command to execute.
131 self.engine.container = self.get_container_by_name(name)
132 self.engine.execute(command)
134 def execute_on_all_containers(self, command):
135 """Execute command on all containers.
137 :param command: Command to execute.
140 for container in self.containers:
141 self.engine.container = self.containers[container]
142 self.engine.execute(command)
144 def start_vpp_in_all_containers(self):
145 """Start VPP in all containers."""
146 for container in self.containers:
147 self.engine.container = self.containers[container]
148 self.engine.start_vpp()
150 def restart_vpp_in_all_containers(self):
151 """Restart VPP in all containers."""
152 for container in self.containers:
153 self.engine.container = self.containers[container]
154 self.engine.restart_vpp()
156 def verify_vpp_in_all_containers(self):
157 """Verify that VPP is installed and running in all containers."""
158 for container in self.containers:
159 self.engine.container = self.containers[container]
160 self.engine.verify_vpp()
162 def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
163 """Configure VPP in all containers.
165 :param chain_topology: Topology used for chaining containers can be
166 chain or cross_horiz. Chain topology is using 1 memif pair per
167 container. Cross_horiz topology is using 1 memif and 1 physical
168 interface in container (only single container can be configured).
169 :param kwargs: Named parameters.
170 :type chain_topology: str
173 # Count number of DUTs based on node's host information
177 self.containers[container].node[u"host"]
178 for container in self.containers
182 mod = len(self.containers) // dut_cnt
184 for i, container in enumerate(self.containers):
187 sid1 = i % mod * 2 + 1
188 sid2 = i % mod * 2 + 2
189 self.engine.container = self.containers[container]
190 guest_dir = self.engine.container.mnt[0].split(u":")[1]
192 if chain_topology == u"chain":
193 self._configure_vpp_chain_l2xc(
194 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
195 guest_dir=guest_dir, **kwargs
197 elif chain_topology == u"cross_horiz":
198 self._configure_vpp_cross_horiz(
199 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
200 guest_dir=guest_dir, **kwargs
202 elif chain_topology == u"chain_functional":
203 self._configure_vpp_chain_functional(
204 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
205 guest_dir=guest_dir, **kwargs
207 elif chain_topology == u"chain_ip4":
208 self._configure_vpp_chain_ip4(
209 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
210 guest_dir=guest_dir, **kwargs
212 elif chain_topology == u"pipeline_ip4":
213 self._configure_vpp_pipeline_ip4(
214 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
215 guest_dir=guest_dir, **kwargs
217 elif chain_topology == u"chain_vswitch":
218 self._configure_vpp_chain_vswitch(
219 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
220 guest_dir=guest_dir, **kwargs)
221 elif chain_topology == u"chain_ipsec":
222 idx_match = search(r"\d+$", self.engine.container.name)
224 idx = int(idx_match.group())
225 self._configure_vpp_chain_ipsec(
226 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
227 guest_dir=guest_dir, nf_instance=idx, **kwargs)
230 f"Container topology {chain_topology} not implemented"
233 def _configure_vpp_chain_l2xc(self, **kwargs):
234 """Configure VPP in chain topology with l2xc.
236 :param kwargs: Named parameters.
239 self.engine.create_vpp_startup_config()
240 self.engine.create_vpp_exec_config(
241 u"memif_create_chain_l2xc.exec",
242 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
243 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
244 socket1=f"{kwargs[u'guest_dir']}/memif-"
245 f"{self.engine.container.name}-{kwargs[u'sid1']}",
246 socket2=f"{kwargs[u'guest_dir']}/memif-"
247 f"{self.engine.container.name}-{kwargs[u'sid2']}"
250 def _configure_vpp_cross_horiz(self, **kwargs):
251 """Configure VPP in cross horizontal topology (single memif).
253 :param kwargs: Named parameters.
256 if u"DUT1" in self.engine.container.name:
257 if_pci = Topology.get_interface_pci_addr(
258 self.engine.container.node, kwargs[u"dut1_if"])
259 if_name = Topology.get_interface_name(
260 self.engine.container.node, kwargs[u"dut1_if"])
261 if u"DUT2" in self.engine.container.name:
262 if_pci = Topology.get_interface_pci_addr(
263 self.engine.container.node, kwargs[u"dut2_if"])
264 if_name = Topology.get_interface_name(
265 self.engine.container.node, kwargs[u"dut2_if"])
266 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
267 self.engine.create_vpp_exec_config(
268 u"memif_create_cross_horizon.exec",
269 mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
270 socket1=f"{kwargs[u'guest_dir']}/memif-"
271 f"{self.engine.container.name}-{kwargs[u'sid1']}"
274 def _configure_vpp_chain_functional(self, **kwargs):
275 """Configure VPP in chain topology with l2xc (functional).
277 :param kwargs: Named parameters.
280 self.engine.create_vpp_startup_config()
281 self.engine.create_vpp_exec_config(
282 u"memif_create_chain_functional.exec",
283 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
284 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
285 socket1=f"{kwargs[u'guest_dir']}/memif-"
286 f"{self.engine.container.name}-{kwargs[u'sid1']}",
287 socket2=f"{kwargs[u'guest_dir']}/memif-"
288 f"{self.engine.container.name}-{kwargs[u'sid2']}",
292 def _configure_vpp_chain_ip4(self, **kwargs):
293 """Configure VPP in chain topology with ip4.
295 :param kwargs: Named parameters.
298 self.engine.create_vpp_startup_config()
300 vif1_mac = kwargs[u"tg_pf1_mac"] \
301 if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
302 else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
303 vif2_mac = kwargs[u"tg_pf2_mac"] \
304 if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
305 else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
306 self.engine.create_vpp_exec_config(
307 u"memif_create_chain_ip4.exec",
308 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
309 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
310 socket1=f"{kwargs[u'guest_dir']}/memif-"
311 f"{self.engine.container.name}-{kwargs[u'sid1']}",
312 socket2=f"{kwargs[u'guest_dir']}/memif-"
313 f"{self.engine.container.name}-{kwargs[u'sid2']}",
314 mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
315 mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
316 vif1_mac=vif1_mac, vif2_mac=vif2_mac
319 def _configure_vpp_chain_vswitch(self, **kwargs):
320 """Configure VPP as vswitch in container.
322 :param kwargs: Named parameters.
325 dut = self.engine.container.name.split(u"_")[0]
327 if1_pci = Topology.get_interface_pci_addr(
328 self.engine.container.node, kwargs[u"dut1_if2"])
329 if2_pci = Topology.get_interface_pci_addr(
330 self.engine.container.node, kwargs[u"dut1_if1"])
331 if_red_name = Topology.get_interface_name(
332 self.engine.container.node, kwargs[u"dut1_if2"])
333 if_black_name = Topology.get_interface_name(
334 self.engine.container.node, kwargs[u"dut1_if1"])
335 tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
336 tg_pf_mac = kwargs[u"tg_pf2_mac"]
338 tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
339 tg_pf_mac = kwargs[u"tg_pf1_mac"]
340 if1_pci = Topology.get_interface_pci_addr(
341 self.engine.container.node, kwargs[u"dut2_if1"])
342 if2_pci = Topology.get_interface_pci_addr(
343 self.engine.container.node, kwargs[u"dut2_if2"])
344 if_red_name = Topology.get_interface_name(
345 self.engine.container.node, kwargs[u"dut2_if1"])
346 if_black_name = Topology.get_interface_name(
347 self.engine.container.node, kwargs[u"dut2_if2"])
349 n_instances = int(kwargs[u"n_instances"])
352 rxq = int(kwargs[u"rxq"])
353 nodes = kwargs[u"nodes"]
354 cpuset_cpus = CpuUtils.get_affinity_nf(
355 nodes, dut, nf_chains=1, nf_nodes=1, nf_chain=1,
356 nf_node=1, vs_dtc=0, nf_dtc=8, nf_mtcr=1, nf_dtcr=1
358 self.engine.create_vpp_startup_config_vswitch(
359 cpuset_cpus, rxq, if1_pci, if2_pci
363 for i in range(1, n_instances + 1):
365 f"create interface memif id {i} socket-id 1 master\n"
366 f"set interface state memif1/{i} up\n"
367 f"set interface l2 bridge memif1/{i} 1\n"
368 f"create interface memif id {i} socket-id 2 master\n"
369 f"set interface state memif2/{i} up\n"
370 f"set interface l2 bridge memif2/{i} 2\n"
371 f"set ip neighbor memif2/{i} {tg_pf_ip4} {tg_pf_mac} "
375 self.engine.create_vpp_exec_config(
376 u"memif_create_chain_vswitch_ipsec.exec",
377 socket1=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-1",
378 socket2=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-2",
379 if_red_name=if_red_name,
380 if_black_name=if_black_name,
381 instances=u"\n\n".join(instances))
384 def _configure_vpp_chain_ipsec(self, **kwargs):
385 """Configure VPP in container with memifs.
387 :param kwargs: Named parameters.
390 nf_nodes = int(kwargs[u"nf_nodes"])
391 nf_instance = int(kwargs[u"nf_instance"])
392 nodes = kwargs[u"nodes"]
393 dut = self.engine.container.name.split(u"_")[0]
394 cpuset_cpus = CpuUtils.get_affinity_nf(
395 nodes, dut, nf_chains=1, nf_nodes=nf_nodes, nf_chain=1,
396 nf_node=nf_instance, vs_dtc=10, nf_dtc=1, nf_mtcr=1, nf_dtcr=1)
397 self.engine.create_vpp_startup_config_ipsec(cpuset_cpus)
398 local_ip_base = kwargs[u"dut2_if1_ip4"].rsplit(u".", 1)[0]
401 tnl_local_ip = f"{local_ip_base}.{nf_instance + 100}"
402 tnl_remote_ip = f"{local_ip_base}.{nf_instance}"
403 remote_ip_base = kwargs[u"dut1_if1_ip4"].rsplit(u".", 1)[0]
404 tg_pf_ip4 = kwargs[u"tg_pf1_ip4"]
405 tg_pf_mac = kwargs[u"tg_pf1_mac"]
406 raddr_ip4 = kwargs[u"laddr_ip4"]
411 tnl_local_ip = f"{local_ip_base}.{nf_instance}"
412 tnl_remote_ip = f"{local_ip_base}.{nf_instance + 100}"
413 remote_ip_base = kwargs[u"dut2_if2_ip4"].rsplit(u".", 1)[0]
414 tg_pf_ip4 = kwargs[u"tg_pf2_ip4"]
415 tg_pf_mac = kwargs[u"tg_pf2_mac"]
416 raddr_ip4 = kwargs[u"raddr_ip4"]
421 self.engine.create_vpp_exec_config(
422 u"memif_create_chain_ipsec.exec",
423 socket1=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-1",
424 socket2=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-2",
429 mac1=f"02:02:00:00:{l_mac1:02X}:{(nf_instance - 1):02X}",
430 mac2=f"02:02:00:00:{l_mac2:02X}:{(nf_instance - 1):02X}",
431 tg_pf2_ip4=tg_pf_ip4,
432 tg_pf2_mac=tg_pf_mac,
434 tnl_local_ip=tnl_local_ip,
435 tnl_remote_ip=tnl_remote_ip,
436 tnl_remote_mac=f"02:02:00:00:{r_mac:02X}:{(nf_instance - 1):02X}",
437 remote_ip=f"{remote_ip_base}.{nf_instance}"
440 f"cat {kwargs['guest_dir']}/ipsec_create_tunnel_cnf_"
441 f"{dut}_{nf_instance}.config >> /tmp/running.exec"
444 def _configure_vpp_pipeline_ip4(self, **kwargs):
445 """Configure VPP in pipeline topology with ip4.
447 :param kwargs: Named parameters.
450 self.engine.create_vpp_startup_config()
451 node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
452 mid1 = kwargs[u"mid1"]
453 mid2 = kwargs[u"mid2"]
455 role2 = u"master" if node == kwargs[u"nodes"] else u"slave"
456 kwargs[u"mid2"] = kwargs[u"mid2"] \
457 if node == kwargs[u"nodes"] else kwargs[u"mid2"] + 1
458 vif1_mac = kwargs[u"tg_pf1_mac"] \
459 if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
460 else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
461 vif2_mac = kwargs[u"tg_pf2_mac"] \
462 if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
463 else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
464 socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
465 f"{kwargs[u'sid1']}" if node == 1 \
466 else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid1']}"
467 socket2 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
468 f"{kwargs[u'sid2']}" \
469 if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \
470 else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid2']}"
472 self.engine.create_vpp_exec_config(
473 u"memif_create_pipeline_ip4.exec",
474 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
475 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
476 socket1=socket1, socket2=socket2, role1=role1, role2=role2,
477 mac1=f"52:54:00:00:{mid1:02X}:01",
478 mac2=f"52:54:00:00:{mid2:02X}:02",
479 vif1_mac=vif1_mac, vif2_mac=vif2_mac
482 def stop_all_containers(self):
483 """Stop all containers."""
484 for container in self.containers:
485 self.engine.container = self.containers[container]
488 def destroy_all_containers(self):
489 """Destroy all containers."""
490 for container in self.containers:
491 self.engine.container = self.containers[container]
492 self.engine.destroy()
495 class ContainerEngine:
496 """Abstract class for container engine."""
499 """Init ContainerEngine object."""
500 self.container = None
502 def initialize(self):
503 """Initialize container object."""
504 self.container = Container()
506 def acquire(self, force):
507 """Acquire/download container.
509 :param force: Destroy a container if exists and create.
512 raise NotImplementedError
515 """Build container (compile)."""
516 raise NotImplementedError
519 """Create/deploy container."""
520 raise NotImplementedError
522 def execute(self, command):
523 """Execute process inside container.
525 :param command: Command to run inside container.
528 raise NotImplementedError
531 """Stop container."""
532 raise NotImplementedError
535 """Destroy/remove container."""
536 raise NotImplementedError
539 """Info about container."""
540 raise NotImplementedError
542 def system_info(self):
544 raise NotImplementedError
547 """Start VPP inside a container."""
549 u"setsid /usr/bin/vpp -c /etc/vpp/startup.conf "
550 u">/tmp/vppd.log 2>&1 < /dev/null &")
552 topo_instance = BuiltIn().get_library_instance(
553 u"resources.libraries.python.topology.Topology"
555 topo_instance.add_new_socket(
559 f"/tmp/vpp_sockets/{self.container.name}/api.sock"
561 topo_instance.add_new_socket(
565 f"/tmp/vpp_sockets/{self.container.name}/stats.sock"
568 self.adjust_privileges()
570 def restart_vpp(self):
571 """Restart VPP service inside a container."""
572 self.execute(u"pkill vpp")
575 # TODO Rewrite to use the VPPUtil.py functionality and remove this.
576 def verify_vpp(self, retries=120, retry_wait=1):
577 """Verify that VPP is installed and running inside container.
579 :param retries: Check for VPP for this number of times Default: 120
580 :param retry_wait: Wait for this number of seconds between retries.
582 for _ in range(retries + 1):
585 u"vppctl show pci 2>&1 | "
586 u"fgrep -v 'Connection refused' | "
587 u"fgrep -v 'No such file or directory'"
593 self.execute(u"cat /tmp/vppd.log")
595 f"VPP did not come up in container: {self.container.name}"
598 def adjust_privileges(self):
599 """Adjust privileges to control VPP without sudo."""
600 self.execute("chmod -R o+rwx /run/vpp")
602 def create_base_vpp_startup_config(self, cpuset_cpus=None):
603 """Create base startup configuration of VPP on container.
605 :param cpuset_cpus: List of CPU cores to allocate.
606 :type cpuset_cpus: list.
607 :returns: Base VPP startup configuration.
608 :rtype: VppConfigGenerator
610 if cpuset_cpus is None:
611 cpuset_cpus = self.container.cpuset_cpus
613 # Create config instance
614 vpp_config = VppConfigGenerator()
615 vpp_config.set_node(self.container.node)
616 vpp_config.add_unix_cli_listen()
617 vpp_config.add_unix_nodaemon()
618 vpp_config.add_unix_exec(u"/tmp/running.exec")
619 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
621 # We will pop the first core from the list to be a main core
622 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
623 # If more cores in the list, the rest will be used as workers.
624 corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
625 vpp_config.add_cpu_corelist_workers(corelist_workers)
626 vpp_config.add_buffers_per_numa(215040)
627 vpp_config.add_plugin(u"disable", u"default")
628 vpp_config.add_plugin(u"enable", u"memif_plugin.so")
629 vpp_config.add_main_heap_size(u"2G")
630 vpp_config.add_main_heap_page_size(u"2M")
631 vpp_config.add_statseg_size(u"2G")
632 vpp_config.add_statseg_page_size(u"2M")
633 vpp_config.add_statseg_per_node_counters(u"on")
637 def create_vpp_startup_config(self):
638 """Create startup configuration of VPP without DPDK on container.
640 vpp_config = self.create_base_vpp_startup_config()
642 # Apply configuration
643 self.execute(u"mkdir -p /etc/vpp/")
645 f'echo "{vpp_config.get_config_str()}" | '
646 f'tee /etc/vpp/startup.conf'
649 def create_vpp_startup_config_vswitch(self, cpuset_cpus, rxq, *devices):
650 """Create startup configuration of VPP vswitch.
652 :param cpuset_cpus: CPU list to run on.
653 :param rxq: Number of interface RX queues.
654 :param devices: PCI devices.
655 :type cpuset_cpus: list
659 vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
660 vpp_config.add_dpdk_dev(*devices)
661 vpp_config.add_dpdk_log_level(u"debug")
662 vpp_config.add_dpdk_no_tx_checksum_offload()
663 vpp_config.add_dpdk_dev_default_rxq(rxq)
664 vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
666 # Apply configuration
667 self.execute(u"mkdir -p /etc/vpp/")
669 f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
672 def create_vpp_startup_config_ipsec(self, cpuset_cpus):
673 """Create startup configuration of VPP with IPsec on container.
675 :param cpuset_cpus: CPU list to run on.
676 :type cpuset_cpus: list
678 vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
679 vpp_config.add_plugin(u"enable", u"crypto_native_plugin.so")
680 vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
681 vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
683 # Apply configuration
684 self.execute(u"mkdir -p /etc/vpp/")
686 f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
689 def create_vpp_exec_config(self, template_file, **kwargs):
690 """Create VPP exec configuration on container.
692 :param template_file: File name of a template script.
693 :param kwargs: Parameters for script.
694 :type template_file: str
697 running = u"/tmp/running.exec"
698 template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
700 with open(template, u"rt") as src_file:
701 src = Template(src_file.read())
702 self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}')
704 def is_container_running(self):
705 """Check if container is running."""
706 raise NotImplementedError
708 def is_container_present(self):
709 """Check if container is present."""
710 raise NotImplementedError
712 def _configure_cgroup(self, name):
713 """Configure the control group associated with a container.
715 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
716 container is initialized a new cgroup /docker or /lxc is created under
717 cpuset parent tree. This newly created cgroup is inheriting parent
718 setting for cpu/mem exclusive parameter and thus cannot be overriden
719 within /docker or /lxc cgroup. This function is supposed to set cgroups
720 to allow coexistence of both engines.
722 :param name: Name of cgroup.
724 :raises RuntimeError: If applying cgroup settings via cgset failed.
726 ret, _, _ = self.container.ssh.exec_command_sudo(
727 u"cgset -r cpuset.cpu_exclusive=0 /"
730 raise RuntimeError(u"Failed to apply cgroup settings.")
732 ret, _, _ = self.container.ssh.exec_command_sudo(
733 u"cgset -r cpuset.mem_exclusive=0 /"
736 raise RuntimeError(u"Failed to apply cgroup settings.")
738 ret, _, _ = self.container.ssh.exec_command_sudo(
739 f"cgcreate -g cpuset:/{name}"
742 raise RuntimeError(u"Failed to copy cgroup settings from root.")
744 ret, _, _ = self.container.ssh.exec_command_sudo(
745 f"cgset -r cpuset.cpu_exclusive=0 /{name}"
748 raise RuntimeError(u"Failed to apply cgroup settings.")
750 ret, _, _ = self.container.ssh.exec_command_sudo(
751 f"cgset -r cpuset.mem_exclusive=0 /{name}"
754 raise RuntimeError(u"Failed to apply cgroup settings.")
757 class LXC(ContainerEngine):
758 """LXC implementation."""
760 # Implicit constructor is inherited.
762 def acquire(self, force=True):
763 """Acquire a privileged system object where configuration is stored.
765 :param force: If a container exists, destroy it and create a new
768 :raises RuntimeError: If creating the container or writing the container
771 if self.is_container_present():
777 target_arch = u"arm64" \
778 if Topology.get_node_arch(self.container.node) == u"aarch64" \
781 image = self.container.image if self.container.image \
782 else f"-d ubuntu -r bionic -a {target_arch}"
784 cmd = f"lxc-create -t download --name {self.container.name} " \
785 f"-- {image} --no-validate"
787 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
789 raise RuntimeError(u"Failed to create container.")
791 self._configure_cgroup(u"lxc")
794 """Build container (compile)."""
795 raise NotImplementedError
798 """Create/deploy an application inside a container on system.
800 :raises RuntimeError: If creating the container fails.
802 if self.container.mnt:
804 # https://github.com/lxc/lxc/issues/434
805 mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults"
806 ret, _, _ = self.container.ssh.exec_command_sudo(
807 f"sh -c \"echo '{mnt_e}' >> "
808 f"/var/lib/lxc/{self.container.name}/config\""
812 f"Failed to write {self.container.name} config."
815 for mount in self.container.mnt:
816 host_dir, guest_dir = mount.split(u":")
817 options = u"bind,create=dir" if guest_dir.endswith(u"/") \
818 else u"bind,create=file"
819 entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \
820 f"none {options} 0 0"
821 self.container.ssh.exec_command_sudo(
822 f"sh -c \"mkdir -p {host_dir}\""
824 ret, _, _ = self.container.ssh.exec_command_sudo(
825 f"sh -c \"echo '{entry}' "
826 f">> /var/lib/lxc/{self.container.name}/config\""
830 f"Failed to write {self.container.name} config."
833 cpuset_cpus = u",".join(
834 f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
835 if self.container.cpuset_cpus else u""
837 ret, _, _ = self.container.ssh.exec_command_sudo(
838 f"lxc-start --name {self.container.name} --daemon"
842 f"Failed to start container {self.container.name}."
844 self._lxc_wait(u"RUNNING")
846 # Workaround for LXC to be able to allocate all cpus including isolated.
847 ret, _, _ = self.container.ssh.exec_command_sudo(
848 u"cgset --copy-from / lxc/"
851 raise RuntimeError(u"Failed to copy cgroup to LXC")
853 ret, _, _ = self.container.ssh.exec_command_sudo(
854 f"lxc-cgroup --name {self.container.name} cpuset.cpus {cpuset_cpus}"
858 f"Failed to set cpuset.cpus to container {self.container.name}."
861 def execute(self, command):
862 """Start a process inside a running container.
864 Runs the specified command inside the container specified by name. The
865 container has to be running already.
867 :param command: Command to run inside container.
869 :raises RuntimeError: If running the command failed.
871 env = u"--keep-env " + u" ".join(
872 f"--set-var {env!s}" for env in self.container.env) \
873 if self.container.env else u""
875 cmd = f"lxc-attach {env} --name {self.container.name} " \
876 f"-- /bin/sh -c '{command}'"
878 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
881 f"Failed to run command inside container {self.container.name}."
887 :raises RuntimeError: If stopping the container failed.
889 cmd = f"lxc-stop --name {self.container.name}"
891 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
894 f"Failed to stop container {self.container.name}."
896 self._lxc_wait(u"STOPPED|FROZEN")
899 """Destroy a container.
901 :raises RuntimeError: If destroying container failed.
903 cmd = f"lxc-destroy --force --name {self.container.name}"
905 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
908 f"Failed to destroy container {self.container.name}."
912 """Query and shows information about a container.
914 :raises RuntimeError: If getting info about a container failed.
916 cmd = f"lxc-info --name {self.container.name}"
918 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
921 f"Failed to get info about container {self.container.name}."
924 def system_info(self):
925 """Check the current kernel for LXC support.
927 :raises RuntimeError: If checking LXC support failed.
929 cmd = u"lxc-checkconfig"
931 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
933 raise RuntimeError(u"Failed to check LXC support.")
935 def is_container_running(self):
936 """Check if container is running on node.
938 :returns: True if container is running.
940 :raises RuntimeError: If getting info about a container failed.
942 cmd = f"lxc-info --no-humanize --state --name {self.container.name}"
944 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
947 f"Failed to get info about container {self.container.name}."
949 return u"RUNNING" in stdout
951 def is_container_present(self):
952 """Check if container is existing on node.
954 :returns: True if container is present.
956 :raises RuntimeError: If getting info about a container failed.
958 cmd = f"lxc-info --no-humanize --name {self.container.name}"
960 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
963 def _lxc_wait(self, state):
964 """Wait for a specific container state.
966 :param state: Specify the container state(s) to wait for.
968 :raises RuntimeError: If waiting for state of a container failed.
970 cmd = f"lxc-wait --name {self.container.name} --state '{state}'"
972 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
975 f"Failed to wait for state '{state}' "
976 f"of container {self.container.name}."
980 class Docker(ContainerEngine):
981 """Docker implementation."""
983 # Implicit constructor is inherited.
985 def acquire(self, force=True):
986 """Pull an image or a repository from a registry.
988 :param force: Destroy a container if exists.
990 :raises RuntimeError: If pulling a container failed.
992 if self.is_container_present():
998 if not self.container.image:
999 img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
1000 if Topology.get_node_arch(self.container.node) == u"aarch64" \
1001 else Constants.DOCKER_SUT_IMAGE_UBUNTU
1002 setattr(self.container, u"image", img)
1004 cmd = f"docker pull {self.container.image}"
1006 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
1009 f"Failed to create container {self.container.name}."
1012 if self.container.cpuset_cpus:
1013 self._configure_cgroup(u"docker")
1016 """Build container (compile)."""
1017 raise NotImplementedError
1020 """Create/deploy container.
1022 :raises RuntimeError: If creating a container failed.
1024 cpuset_cpus = u"--cpuset-cpus=" + u",".join(
1025 f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
1026 if self.container.cpuset_cpus else u""
1028 cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \
1029 if self.container.cpuset_mems is not None else u""
1030 # Temporary workaround - disabling due to bug in memif
1033 env = u" ".join(f"--env {env!s}" for env in self.container.env) \
1034 if self.container.env else u""
1036 command = str(self.container.command) if self.container.command else u""
1038 publish = u" ".join(
1039 f"--publish {var!s}" for var in self.container.publish
1040 ) if self.container.publish else u""
1043 f"--volume {mnt!s}" for mnt in self.container.mnt) \
1044 if self.container.mnt else u""
1046 cmd = f"docker run --privileged --detach --interactive --tty --rm " \
1047 f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \
1048 f"{env} {volume} --name {self.container.name} " \
1049 f"{self.container.image} {command}"
1051 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1054 f"Failed to create container {self.container.name}"
1059 def execute(self, command):
1060 """Start a process inside a running container.
1062 Runs the specified command inside the container specified by name. The
1063 container has to be running already.
1065 :param command: Command to run inside container.
1067 :raises RuntimeError: If running the command in a container failed.
1069 cmd = f"docker exec --interactive {self.container.name} " \
1070 f"/bin/sh -c '{command}'"
1072 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
1075 f"Failed to execute command in container {self.container.name}."
1079 """Stop running container.
1081 :raises RuntimeError: If stopping a container failed.
1083 cmd = f"docker stop {self.container.name}"
1085 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1088 f"Failed to stop container {self.container.name}."
1092 """Remove a container.
1094 :raises RuntimeError: If removing a container failed.
1096 cmd = f"docker rm --force {self.container.name}"
1098 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1101 f"Failed to destroy container {self.container.name}."
1105 """Return low-level information on Docker objects.
1107 :raises RuntimeError: If getting info about a container failed.
1109 cmd = f"docker inspect {self.container.name}"
1111 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1114 f"Failed to get info about container {self.container.name}."
1117 def system_info(self):
1118 """Display the docker system-wide information.
1120 :raises RuntimeError: If displaying system information failed.
1122 cmd = u"docker system info"
1124 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1126 raise RuntimeError(u"Failed to get system info.")
1128 def is_container_present(self):
1129 """Check if container is present on node.
1131 :returns: True if container is present.
1133 :raises RuntimeError: If getting info about a container failed.
1135 cmd = f"docker ps --all --quiet --filter name={self.container.name}"
1137 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1140 f"Failed to get info about container {self.container.name}."
1144 def is_container_running(self):
1145 """Check if container is running on node.
1147 :returns: True if container is running.
1149 :raises RuntimeError: If getting info about a container failed.
1151 cmd = f"docker ps --quiet --filter name={self.container.name}"
1153 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1156 f"Failed to get info about container {self.container.name}."
1162 """Container class."""
1164 def __getattr__(self, attr):
1165 """Get attribute custom implementation.
1167 :param attr: Attribute to get.
1169 :returns: Attribute value or None.
1173 return self.__dict__[attr]
1177 def __setattr__(self, attr, value):
1178 """Set attribute custom implementation.
1180 :param attr: Attribute to set.
1181 :param value: Value to set.
1186 # Check if attribute exists
1189 # Creating new attribute
1191 self.__dict__[u"ssh"] = SSH()
1192 self.__dict__[u"ssh"].connect(value)
1193 self.__dict__[attr] = value
1195 # Updating attribute base of type
1196 if isinstance(self.__dict__[attr], list):
1197 self.__dict__[attr].append(value)
1199 self.__dict__[attr] = value