1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Library to manipulate Containers."""
16 from collections import OrderedDict, Counter
19 from string import Template
20 from time import sleep
22 from robot.libraries.BuiltIn import BuiltIn
24 from resources.libraries.python.Constants import Constants
25 from resources.libraries.python.CpuUtils import CpuUtils
26 from resources.libraries.python.ssh import SSH
27 from resources.libraries.python.topology import Topology, SocketType
28 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
32 u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
35 SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
38 class ContainerManager:
39 """Container lifecycle management class."""
41 def __init__(self, engine):
42 """Initialize Container Manager class.
44 :param engine: Container technology used (LXC/Docker/...).
46 :raises NotImplementedError: If container technology is not implemented.
49 self.engine = globals()[engine]()
51 raise NotImplementedError(f"{engine} is not implemented.")
52 self.containers = OrderedDict()
54 def get_container_by_name(self, name):
55 """Get container instance.
57 :param name: Container name.
59 :returns: Container instance.
61 :raises RuntimeError: If failed to get container with name.
64 return self.containers[name]
66 raise RuntimeError(f"Failed to get container with name: {name}")
68 def construct_container(self, **kwargs):
69 """Construct container object on node with specified parameters.
71 :param kwargs: Key-value pairs used to construct container.
75 self.engine.initialize()
78 setattr(self.engine.container, key, kwargs[key])
80 # Set additional environmental variables
82 self.engine.container, u"env",
83 f"MICROSERVICE_LABEL={kwargs[u'name']}"
86 # Store container instance
87 self.containers[kwargs[u"name"]] = self.engine.container
89 def construct_containers(self, **kwargs):
90 """Construct 1..N container(s) on node with specified name.
92 Ordinal number is automatically added to the name of container as
95 :param kwargs: Named parameters.
98 name = kwargs[u"name"]
99 for i in range(kwargs[u"count"]):
100 # Name will contain ordinal suffix
101 kwargs[u"name"] = u"".join([name, str(i+1)])
103 self.construct_container(i=i, **kwargs)
105 def acquire_all_containers(self):
106 """Acquire all containers."""
107 for container in self.containers:
108 self.engine.container = self.containers[container]
109 self.engine.acquire()
111 def build_all_containers(self):
112 """Build all containers."""
113 for container in self.containers:
114 self.engine.container = self.containers[container]
117 def create_all_containers(self):
118 """Create all containers."""
119 for container in self.containers:
120 self.engine.container = self.containers[container]
123 def execute_on_container(self, name, command):
124 """Execute command on container with name.
126 :param name: Container name.
127 :param command: Command to execute.
131 self.engine.container = self.get_container_by_name(name)
132 self.engine.execute(command)
134 def execute_on_all_containers(self, command):
135 """Execute command on all containers.
137 :param command: Command to execute.
140 for container in self.containers:
141 self.engine.container = self.containers[container]
142 self.engine.execute(command)
144 def start_vpp_in_all_containers(self):
145 """Start VPP in all containers."""
146 for container in self.containers:
147 self.engine.container = self.containers[container]
148 self.engine.start_vpp()
150 def restart_vpp_in_all_containers(self):
151 """Restart VPP in all containers."""
152 for container in self.containers:
153 self.engine.container = self.containers[container]
154 self.engine.restart_vpp()
156 def verify_vpp_in_all_containers(self):
157 """Verify that VPP is installed and running in all containers."""
158 for container in self.containers:
159 self.engine.container = self.containers[container]
160 self.engine.verify_vpp()
162 def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
163 """Configure VPP in all containers.
165 :param chain_topology: Topology used for chaining containers can be
166 chain or cross_horiz. Chain topology is using 1 memif pair per
167 container. Cross_horiz topology is using 1 memif and 1 physical
168 interface in container (only single container can be configured).
169 :param kwargs: Named parameters.
170 :type chain_topology: str
173 # Count number of DUTs based on node's host information
177 self.containers[container].node[u"host"]
178 for container in self.containers
182 mod = len(self.containers) // dut_cnt
184 for i, container in enumerate(self.containers):
187 sid1 = i % mod * 2 + 1
188 sid2 = i % mod * 2 + 2
189 self.engine.container = self.containers[container]
190 guest_dir = self.engine.container.mnt[0].split(u":")[1]
192 if chain_topology == u"chain":
193 self._configure_vpp_chain_l2xc(
194 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
195 guest_dir=guest_dir, **kwargs
197 elif chain_topology == u"cross_horiz":
198 self._configure_vpp_cross_horiz(
199 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
200 guest_dir=guest_dir, **kwargs
202 elif chain_topology == u"chain_functional":
203 self._configure_vpp_chain_functional(
204 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
205 guest_dir=guest_dir, **kwargs
207 elif chain_topology == u"chain_ip4":
208 self._configure_vpp_chain_ip4(
209 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
210 guest_dir=guest_dir, **kwargs
212 elif chain_topology == u"pipeline_ip4":
213 self._configure_vpp_pipeline_ip4(
214 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
215 guest_dir=guest_dir, **kwargs
217 elif chain_topology == u"chain_vswitch":
218 self._configure_vpp_chain_vswitch(
219 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
220 guest_dir=guest_dir, **kwargs)
221 elif chain_topology == u"chain_ipsec":
222 idx_match = search(r"\d+$", self.engine.container.name)
224 idx = int(idx_match.group())
225 self._configure_vpp_chain_ipsec(
226 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
227 guest_dir=guest_dir, nf_instance=idx, **kwargs)
230 f"Container topology {chain_topology} not implemented"
233 def _configure_vpp_chain_l2xc(self, **kwargs):
234 """Configure VPP in chain topology with l2xc.
236 :param kwargs: Named parameters.
239 self.engine.create_vpp_startup_config()
240 self.engine.create_vpp_exec_config(
241 u"memif_create_chain_l2xc.exec",
242 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
243 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
244 socket1=f"{kwargs[u'guest_dir']}/memif-"
245 f"{self.engine.container.name}-{kwargs[u'sid1']}",
246 socket2=f"{kwargs[u'guest_dir']}/memif-"
247 f"{self.engine.container.name}-{kwargs[u'sid2']}"
250 def _configure_vpp_cross_horiz(self, **kwargs):
251 """Configure VPP in cross horizontal topology (single memif).
253 :param kwargs: Named parameters.
256 if u"DUT1" in self.engine.container.name:
257 if_pci = Topology.get_interface_pci_addr(
258 self.engine.container.node, kwargs[u"dut1_if"])
259 if_name = Topology.get_interface_name(
260 self.engine.container.node, kwargs[u"dut1_if"])
261 if u"DUT2" in self.engine.container.name:
262 if_pci = Topology.get_interface_pci_addr(
263 self.engine.container.node, kwargs[u"dut2_if"])
264 if_name = Topology.get_interface_name(
265 self.engine.container.node, kwargs[u"dut2_if"])
266 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
267 self.engine.create_vpp_exec_config(
268 u"memif_create_cross_horizon.exec",
269 mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
270 socket1=f"{kwargs[u'guest_dir']}/memif-"
271 f"{self.engine.container.name}-{kwargs[u'sid1']}"
274 def _configure_vpp_chain_functional(self, **kwargs):
275 """Configure VPP in chain topology with l2xc (functional).
277 :param kwargs: Named parameters.
280 self.engine.create_vpp_startup_config()
281 self.engine.create_vpp_exec_config(
282 u"memif_create_chain_functional.exec",
283 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
284 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
285 socket1=f"{kwargs[u'guest_dir']}/memif-"
286 f"{self.engine.container.name}-{kwargs[u'sid1']}",
287 socket2=f"{kwargs[u'guest_dir']}/memif-"
288 f"{self.engine.container.name}-{kwargs[u'sid2']}",
292 def _configure_vpp_chain_ip4(self, **kwargs):
293 """Configure VPP in chain topology with ip4.
295 :param kwargs: Named parameters.
298 self.engine.create_vpp_startup_config()
300 vif1_mac = kwargs[u"tg_if1_mac"] \
301 if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
302 else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
303 vif2_mac = kwargs[u"tg_if2_mac"] \
304 if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
305 else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
306 self.engine.create_vpp_exec_config(
307 u"memif_create_chain_ip4.exec",
308 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
309 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
310 socket1=f"{kwargs[u'guest_dir']}/memif-"
311 f"{self.engine.container.name}-{kwargs[u'sid1']}",
312 socket2=f"{kwargs[u'guest_dir']}/memif-"
313 f"{self.engine.container.name}-{kwargs[u'sid2']}",
314 mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
315 mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
316 vif1_mac=vif1_mac, vif2_mac=vif2_mac
319 def _configure_vpp_chain_vswitch(self, **kwargs):
320 """Configure VPP as vswitch in container.
322 :param kwargs: Named parameters.
325 dut = self.engine.container.name.split(u"_")[0]
327 if1_pci = Topology.get_interface_pci_addr(
328 self.engine.container.node, kwargs[u"dut1_if2"])
329 if2_pci = Topology.get_interface_pci_addr(
330 self.engine.container.node, kwargs[u"dut1_if1"])
331 if_red_name = Topology.get_interface_name(
332 self.engine.container.node, kwargs[u"dut1_if2"])
333 if_black_name = Topology.get_interface_name(
334 self.engine.container.node, kwargs[u"dut1_if1"])
335 tg_if_ip4 = kwargs[u"tg_if2_ip4"]
336 tg_if_mac = kwargs[u"tg_if2_mac"]
338 tg_if_ip4 = kwargs[u"tg_if1_ip4"]
339 tg_if_mac = kwargs[u"tg_if1_mac"]
340 if1_pci = Topology.get_interface_pci_addr(
341 self.engine.container.node, kwargs[u"dut2_if1"])
342 if2_pci = Topology.get_interface_pci_addr(
343 self.engine.container.node, kwargs[u"dut2_if2"])
344 if_red_name = Topology.get_interface_name(
345 self.engine.container.node, kwargs[u"dut2_if1"])
346 if_black_name = Topology.get_interface_name(
347 self.engine.container.node, kwargs[u"dut2_if2"])
349 n_instances = int(kwargs[u"n_instances"])
352 rxq = int(kwargs[u"rxq"])
353 nodes = kwargs[u"nodes"]
354 cpuset_cpus = CpuUtils.get_affinity_nf(
355 nodes, dut, nf_chains=1, nf_nodes=1, nf_chain=1,
356 nf_node=1, vs_dtc=0, nf_dtc=8, nf_mtcr=1, nf_dtcr=1
358 self.engine.create_vpp_startup_config_vswitch(
359 cpuset_cpus, rxq, if1_pci, if2_pci
363 for i in range(1, n_instances + 1):
365 f"create interface memif id {i} socket-id 1 master\n"
366 f"set interface state memif1/{i} up\n"
367 f"set interface l2 bridge memif1/{i} 1\n"
368 f"create interface memif id {i} socket-id 2 master\n"
369 f"set interface state memif2/{i} up\n"
370 f"set interface l2 bridge memif2/{i} 2\n"
371 f"set ip neighbor memif2/{i} {tg_if_ip4} {tg_if_mac} "
375 self.engine.create_vpp_exec_config(
376 u"memif_create_chain_vswitch_ipsec.exec",
377 socket1=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-1",
378 socket2=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-2",
379 if_red_name=if_red_name,
380 if_black_name=if_black_name,
381 instances=u"\n\n".join(instances))
384 def _configure_vpp_chain_ipsec(self, **kwargs):
385 """Configure VPP in container with memifs.
387 :param kwargs: Named parameters.
390 nf_nodes = int(kwargs[u"nf_nodes"])
391 nf_instance = int(kwargs[u"nf_instance"])
392 nodes = kwargs[u"nodes"]
393 dut = self.engine.container.name.split(u"_")[0]
394 cpuset_cpus = CpuUtils.get_affinity_nf(
395 nodes, dut, nf_chains=1, nf_nodes=nf_nodes, nf_chain=1,
396 nf_node=nf_instance, vs_dtc=10, nf_dtc=1, nf_mtcr=1, nf_dtcr=1)
397 self.engine.create_vpp_startup_config_ipsec(cpuset_cpus)
398 local_ip_base = kwargs[u"dut2_if1_ip4"].rsplit(u".", 1)[0]
401 tnl_local_ip = f"{local_ip_base}.{nf_instance + 100}"
402 tnl_remote_ip = f"{local_ip_base}.{nf_instance}"
403 remote_ip_base = kwargs[u"dut1_if1_ip4"].rsplit(u".", 1)[0]
404 tg_if_ip4 = kwargs[u"tg_if1_ip4"]
405 tg_if_mac = kwargs[u"tg_if1_mac"]
406 raddr_ip4 = kwargs[u"laddr_ip4"]
411 tnl_local_ip = f"{local_ip_base}.{nf_instance}"
412 tnl_remote_ip = f"{local_ip_base}.{nf_instance + 100}"
413 remote_ip_base = kwargs[u"dut2_if2_ip4"].rsplit(u".", 1)[0]
414 tg_if_ip4 = kwargs[u"tg_if2_ip4"]
415 tg_if_mac = kwargs[u"tg_if2_mac"]
416 raddr_ip4 = kwargs[u"raddr_ip4"]
421 self.engine.create_vpp_exec_config(
422 u"memif_create_chain_ipsec.exec",
423 socket1=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-1",
424 socket2=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-2",
429 mac1=f"02:02:00:00:{l_mac1:02X}:{(nf_instance - 1):02X}",
430 mac2=f"02:02:00:00:{l_mac2:02X}:{(nf_instance - 1):02X}",
431 tg_if2_ip4=tg_if_ip4,
432 tg_if2_mac=tg_if_mac,
434 tnl_local_ip=tnl_local_ip,
435 tnl_remote_ip=tnl_remote_ip,
436 tnl_remote_mac=f"02:02:00:00:{r_mac:02X}:{(nf_instance - 1):02X}",
437 remote_ip=f"{remote_ip_base}.{nf_instance}"
440 f"cat {kwargs['guest_dir']}/ipsec_create_tunnel_cnf_"
441 f"{dut}_{nf_instance}.config >> /tmp/running.exec"
444 def _configure_vpp_pipeline_ip4(self, **kwargs):
445 """Configure VPP in pipeline topology with ip4.
447 :param kwargs: Named parameters.
450 self.engine.create_vpp_startup_config()
451 node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
452 mid1 = kwargs[u"mid1"]
453 mid2 = kwargs[u"mid2"]
455 role2 = u"master" if node == kwargs[u"nodes"] else u"slave"
456 kwargs[u"mid2"] = kwargs[u"mid2"] \
457 if node == kwargs[u"nodes"] else kwargs[u"mid2"] + 1
458 vif1_mac = kwargs[u"tg_if1_mac"] \
459 if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
460 else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
461 vif2_mac = kwargs[u"tg_if2_mac"] \
462 if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
463 else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
464 socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
465 f"{kwargs[u'sid1']}" if node == 1 \
466 else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid1']}"
467 socket2 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
468 f"{kwargs[u'sid2']}" \
469 if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \
470 else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid2']}"
472 self.engine.create_vpp_exec_config(
473 u"memif_create_pipeline_ip4.exec",
474 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
475 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
476 socket1=socket1, socket2=socket2, role1=role1, role2=role2,
477 mac1=f"52:54:00:00:{mid1:02X}:01",
478 mac2=f"52:54:00:00:{mid2:02X}:02",
479 vif1_mac=vif1_mac, vif2_mac=vif2_mac
482 def stop_all_containers(self):
483 """Stop all containers."""
484 for container in self.containers:
485 self.engine.container = self.containers[container]
488 def destroy_all_containers(self):
489 """Destroy all containers."""
490 for container in self.containers:
491 self.engine.container = self.containers[container]
492 self.engine.destroy()
495 class ContainerEngine:
496 """Abstract class for container engine."""
499 """Init ContainerEngine object."""
500 self.container = None
502 def initialize(self):
503 """Initialize container object."""
504 self.container = Container()
506 def acquire(self, force):
507 """Acquire/download container.
509 :param force: Destroy a container if exists and create.
512 raise NotImplementedError
515 """Build container (compile)."""
516 raise NotImplementedError
519 """Create/deploy container."""
520 raise NotImplementedError
522 def execute(self, command):
523 """Execute process inside container.
525 :param command: Command to run inside container.
528 raise NotImplementedError
531 """Stop container."""
532 raise NotImplementedError
535 """Destroy/remove container."""
536 raise NotImplementedError
539 """Info about container."""
540 raise NotImplementedError
542 def system_info(self):
544 raise NotImplementedError
547 """Start VPP inside a container."""
549 u"setsid /usr/bin/vpp -c /etc/vpp/startup.conf "
550 u">/tmp/vppd.log 2>&1 < /dev/null &")
552 topo_instance = BuiltIn().get_library_instance(
553 u"resources.libraries.python.topology.Topology"
555 topo_instance.add_new_socket(
559 f"/tmp/vpp_sockets/{self.container.name}/api.sock"
561 topo_instance.add_new_socket(
565 f"/tmp/vpp_sockets/{self.container.name}/stats.sock"
568 self.adjust_privileges()
570 def restart_vpp(self):
571 """Restart VPP service inside a container."""
572 self.execute(u"pkill vpp")
575 # TODO Rewrite to use the VPPUtil.py functionality and remove this.
576 def verify_vpp(self, retries=120, retry_wait=1):
577 """Verify that VPP is installed and running inside container.
579 :param retries: Check for VPP for this number of times Default: 120
580 :param retry_wait: Wait for this number of seconds between retries.
582 for _ in range(retries + 1):
585 u"vppctl show pci 2>&1 | "
586 u"fgrep -v 'Connection refused' | "
587 u"fgrep -v 'No such file or directory'"
593 self.execute(u"cat /tmp/vppd.log")
595 f"VPP did not come up in container: {self.container.name}"
598 def adjust_privileges(self):
599 """Adjust privileges to control VPP without sudo."""
600 self.execute("chmod -R o+rwx /run/vpp")
602 def create_base_vpp_startup_config(self, cpuset_cpus=None):
603 """Create base startup configuration of VPP on container.
605 :param cpuset_cpus: List of CPU cores to allocate.
606 :type cpuset_cpus: list.
607 :returns: Base VPP startup configuration.
608 :rtype: VppConfigGenerator
610 if cpuset_cpus is None:
611 cpuset_cpus = self.container.cpuset_cpus
613 # Create config instance
614 vpp_config = VppConfigGenerator()
615 vpp_config.set_node(self.container.node)
616 vpp_config.add_unix_cli_listen()
617 vpp_config.add_unix_nodaemon()
618 vpp_config.add_unix_exec(u"/tmp/running.exec")
619 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
620 vpp_config.add_statseg_per_node_counters(value=u"on")
622 # We will pop the first core from the list to be a main core
623 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
624 # If more cores in the list, the rest will be used as workers.
625 corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
626 vpp_config.add_cpu_corelist_workers(corelist_workers)
627 vpp_config.add_buffers_per_numa(215040)
628 vpp_config.add_plugin(u"disable", u"default")
629 vpp_config.add_plugin(u"enable", u"memif_plugin.so")
630 vpp_config.add_heapsize(u"4G")
631 vpp_config.add_ip_heap_size(u"4G")
632 vpp_config.add_statseg_size(u"4G")
636 def create_vpp_startup_config(self):
637 """Create startup configuration of VPP without DPDK on container.
639 vpp_config = self.create_base_vpp_startup_config()
641 # Apply configuration
642 self.execute(u"mkdir -p /etc/vpp/")
644 f'echo "{vpp_config.get_config_str()}" | '
645 f'tee /etc/vpp/startup.conf'
648 def create_vpp_startup_config_vswitch(self, cpuset_cpus, rxq, *devices):
649 """Create startup configuration of VPP vswitch.
651 :param cpuset_cpus: CPU list to run on.
652 :param rxq: Number of interface RX queues.
653 :param devices: PCI devices.
654 :type cpuset_cpus: list
658 vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
659 vpp_config.add_dpdk_dev(*devices)
660 vpp_config.add_dpdk_log_level(u"debug")
661 vpp_config.add_dpdk_no_tx_checksum_offload()
662 vpp_config.add_dpdk_dev_default_rxq(rxq)
663 vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
665 # Apply configuration
666 self.execute(u"mkdir -p /etc/vpp/")
668 f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
671 def create_vpp_startup_config_ipsec(self, cpuset_cpus):
672 """Create startup configuration of VPP with IPsec on container.
674 :param cpuset_cpus: CPU list to run on.
675 :type cpuset_cpus: list
677 vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
678 vpp_config.add_plugin(u"enable", u"crypto_native_plugin.so")
679 vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
680 vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
682 # Apply configuration
683 self.execute(u"mkdir -p /etc/vpp/")
685 f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
688 def create_vpp_exec_config(self, template_file, **kwargs):
689 """Create VPP exec configuration on container.
691 :param template_file: File name of a template script.
692 :param kwargs: Parameters for script.
693 :type template_file: str
696 running = u"/tmp/running.exec"
697 template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
699 with open(template, u"rt") as src_file:
700 src = Template(src_file.read())
701 self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}')
703 def is_container_running(self):
704 """Check if container is running."""
705 raise NotImplementedError
707 def is_container_present(self):
708 """Check if container is present."""
709 raise NotImplementedError
711 def _configure_cgroup(self, name):
712 """Configure the control group associated with a container.
714 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
715 container is initialized a new cgroup /docker or /lxc is created under
716 cpuset parent tree. This newly created cgroup is inheriting parent
717 setting for cpu/mem exclusive parameter and thus cannot be overriden
718 within /docker or /lxc cgroup. This function is supposed to set cgroups
719 to allow coexistence of both engines.
721 :param name: Name of cgroup.
723 :raises RuntimeError: If applying cgroup settings via cgset failed.
725 ret, _, _ = self.container.ssh.exec_command_sudo(
726 u"cgset -r cpuset.cpu_exclusive=0 /"
729 raise RuntimeError(u"Failed to apply cgroup settings.")
731 ret, _, _ = self.container.ssh.exec_command_sudo(
732 u"cgset -r cpuset.mem_exclusive=0 /"
735 raise RuntimeError(u"Failed to apply cgroup settings.")
737 ret, _, _ = self.container.ssh.exec_command_sudo(
738 f"cgcreate -g cpuset:/{name}"
741 raise RuntimeError(u"Failed to copy cgroup settings from root.")
743 ret, _, _ = self.container.ssh.exec_command_sudo(
744 f"cgset -r cpuset.cpu_exclusive=0 /{name}"
747 raise RuntimeError(u"Failed to apply cgroup settings.")
749 ret, _, _ = self.container.ssh.exec_command_sudo(
750 f"cgset -r cpuset.mem_exclusive=0 /{name}"
753 raise RuntimeError(u"Failed to apply cgroup settings.")
756 class LXC(ContainerEngine):
757 """LXC implementation."""
759 # Implicit constructor is inherited.
761 def acquire(self, force=True):
762 """Acquire a privileged system object where configuration is stored.
764 :param force: If a container exists, destroy it and create a new
767 :raises RuntimeError: If creating the container or writing the container
770 if self.is_container_present():
776 target_arch = u"arm64" \
777 if Topology.get_node_arch(self.container.node) == u"aarch64" \
780 image = self.container.image if self.container.image \
781 else f"-d ubuntu -r bionic -a {target_arch}"
783 cmd = f"lxc-create -t download --name {self.container.name} " \
784 f"-- {image} --no-validate"
786 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
788 raise RuntimeError(u"Failed to create container.")
790 self._configure_cgroup(u"lxc")
793 """Build container (compile)."""
794 raise NotImplementedError
797 """Create/deploy an application inside a container on system.
799 :raises RuntimeError: If creating the container fails.
801 if self.container.mnt:
803 # https://github.com/lxc/lxc/issues/434
804 mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults"
805 ret, _, _ = self.container.ssh.exec_command_sudo(
806 f"sh -c \"echo '{mnt_e}' >> "
807 f"/var/lib/lxc/{self.container.name}/config\""
811 f"Failed to write {self.container.name} config."
814 for mount in self.container.mnt:
815 host_dir, guest_dir = mount.split(u":")
816 options = u"bind,create=dir" if guest_dir.endswith(u"/") \
817 else u"bind,create=file"
818 entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \
819 f"none {options} 0 0"
820 self.container.ssh.exec_command_sudo(
821 f"sh -c \"mkdir -p {host_dir}\""
823 ret, _, _ = self.container.ssh.exec_command_sudo(
824 f"sh -c \"echo '{entry}' "
825 f">> /var/lib/lxc/{self.container.name}/config\""
829 f"Failed to write {self.container.name} config."
832 cpuset_cpus = u",".join(
833 f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
834 if self.container.cpuset_cpus else u""
836 ret, _, _ = self.container.ssh.exec_command_sudo(
837 f"lxc-start --name {self.container.name} --daemon"
841 f"Failed to start container {self.container.name}."
843 self._lxc_wait(u"RUNNING")
845 # Workaround for LXC to be able to allocate all cpus including isolated.
846 ret, _, _ = self.container.ssh.exec_command_sudo(
847 u"cgset --copy-from / lxc/"
850 raise RuntimeError(u"Failed to copy cgroup to LXC")
852 ret, _, _ = self.container.ssh.exec_command_sudo(
853 f"lxc-cgroup --name {self.container.name} cpuset.cpus {cpuset_cpus}"
857 f"Failed to set cpuset.cpus to container {self.container.name}."
860 def execute(self, command):
861 """Start a process inside a running container.
863 Runs the specified command inside the container specified by name. The
864 container has to be running already.
866 :param command: Command to run inside container.
868 :raises RuntimeError: If running the command failed.
870 env = u"--keep-env " + u" ".join(
871 f"--set-var {env!s}" for env in self.container.env) \
872 if self.container.env else u""
874 cmd = f"lxc-attach {env} --name {self.container.name} " \
875 f"-- /bin/sh -c '{command}'"
877 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
880 f"Failed to run command inside container {self.container.name}."
886 :raises RuntimeError: If stopping the container failed.
888 cmd = f"lxc-stop --name {self.container.name}"
890 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
893 f"Failed to stop container {self.container.name}."
895 self._lxc_wait(u"STOPPED|FROZEN")
898 """Destroy a container.
900 :raises RuntimeError: If destroying container failed.
902 cmd = f"lxc-destroy --force --name {self.container.name}"
904 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
907 f"Failed to destroy container {self.container.name}."
911 """Query and shows information about a container.
913 :raises RuntimeError: If getting info about a container failed.
915 cmd = f"lxc-info --name {self.container.name}"
917 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
920 f"Failed to get info about container {self.container.name}."
923 def system_info(self):
924 """Check the current kernel for LXC support.
926 :raises RuntimeError: If checking LXC support failed.
928 cmd = u"lxc-checkconfig"
930 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
932 raise RuntimeError(u"Failed to check LXC support.")
934 def is_container_running(self):
935 """Check if container is running on node.
937 :returns: True if container is running.
939 :raises RuntimeError: If getting info about a container failed.
941 cmd = f"lxc-info --no-humanize --state --name {self.container.name}"
943 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
946 f"Failed to get info about container {self.container.name}."
948 return u"RUNNING" in stdout
950 def is_container_present(self):
951 """Check if container is existing on node.
953 :returns: True if container is present.
955 :raises RuntimeError: If getting info about a container failed.
957 cmd = f"lxc-info --no-humanize --name {self.container.name}"
959 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
962 def _lxc_wait(self, state):
963 """Wait for a specific container state.
965 :param state: Specify the container state(s) to wait for.
967 :raises RuntimeError: If waiting for state of a container failed.
969 cmd = f"lxc-wait --name {self.container.name} --state '{state}'"
971 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
974 f"Failed to wait for state '{state}' "
975 f"of container {self.container.name}."
979 class Docker(ContainerEngine):
980 """Docker implementation."""
982 # Implicit constructor is inherited.
984 def acquire(self, force=True):
985 """Pull an image or a repository from a registry.
987 :param force: Destroy a container if exists.
989 :raises RuntimeError: If pulling a container failed.
991 if self.is_container_present():
997 if not self.container.image:
998 img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
999 if Topology.get_node_arch(self.container.node) == u"aarch64" \
1000 else Constants.DOCKER_SUT_IMAGE_UBUNTU
1001 setattr(self.container, u"image", img)
1003 cmd = f"docker pull {self.container.image}"
1005 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
1008 f"Failed to create container {self.container.name}."
1011 if self.container.cpuset_cpus:
1012 self._configure_cgroup(u"docker")
1015 """Build container (compile)."""
1016 raise NotImplementedError
1019 """Create/deploy container.
1021 :raises RuntimeError: If creating a container failed.
1023 cpuset_cpus = u"--cpuset-cpus=" + u",".join(
1024 f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
1025 if self.container.cpuset_cpus else u""
1027 cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \
1028 if self.container.cpuset_mems is not None else u""
1029 # Temporary workaround - disabling due to bug in memif
1032 env = u" ".join(f"--env {env!s}" for env in self.container.env) \
1033 if self.container.env else u""
1035 command = str(self.container.command) if self.container.command else u""
1037 publish = u" ".join(
1038 f"--publish {var!s}" for var in self.container.publish
1039 ) if self.container.publish else u""
1042 f"--volume {mnt!s}" for mnt in self.container.mnt) \
1043 if self.container.mnt else u""
1045 cmd = f"docker run --privileged --detach --interactive --tty --rm " \
1046 f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \
1047 f"{env} {volume} --name {self.container.name} " \
1048 f"{self.container.image} {command}"
1050 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1053 f"Failed to create container {self.container.name}"
1058 def execute(self, command):
1059 """Start a process inside a running container.
1061 Runs the specified command inside the container specified by name. The
1062 container has to be running already.
1064 :param command: Command to run inside container.
1066 :raises RuntimeError: If running the command in a container failed.
1068 cmd = f"docker exec --interactive {self.container.name} " \
1069 f"/bin/sh -c '{command}'"
1071 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
1074 f"Failed to execute command in container {self.container.name}."
1078 """Stop running container.
1080 :raises RuntimeError: If stopping a container failed.
1082 cmd = f"docker stop {self.container.name}"
1084 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1087 f"Failed to stop container {self.container.name}."
1091 """Remove a container.
1093 :raises RuntimeError: If removing a container failed.
1095 cmd = f"docker rm --force {self.container.name}"
1097 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1100 f"Failed to destroy container {self.container.name}."
1104 """Return low-level information on Docker objects.
1106 :raises RuntimeError: If getting info about a container failed.
1108 cmd = f"docker inspect {self.container.name}"
1110 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1113 f"Failed to get info about container {self.container.name}."
1116 def system_info(self):
1117 """Display the docker system-wide information.
1119 :raises RuntimeError: If displaying system information failed.
1121 cmd = u"docker system info"
1123 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1125 raise RuntimeError(u"Failed to get system info.")
1127 def is_container_present(self):
1128 """Check if container is present on node.
1130 :returns: True if container is present.
1132 :raises RuntimeError: If getting info about a container failed.
1134 cmd = f"docker ps --all --quiet --filter name={self.container.name}"
1136 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1139 f"Failed to get info about container {self.container.name}."
1143 def is_container_running(self):
1144 """Check if container is running on node.
1146 :returns: True if container is running.
1148 :raises RuntimeError: If getting info about a container failed.
1150 cmd = f"docker ps --quiet --filter name={self.container.name}"
1152 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1155 f"Failed to get info about container {self.container.name}."
1161 """Container class."""
1163 def __getattr__(self, attr):
1164 """Get attribute custom implementation.
1166 :param attr: Attribute to get.
1168 :returns: Attribute value or None.
1172 return self.__dict__[attr]
1176 def __setattr__(self, attr, value):
1177 """Set attribute custom implementation.
1179 :param attr: Attribute to set.
1180 :param value: Value to set.
1185 # Check if attribute exists
1188 # Creating new attribute
1190 self.__dict__[u"ssh"] = SSH()
1191 self.__dict__[u"ssh"].connect(value)
1192 self.__dict__[attr] = value
1194 # Updating attribute base of type
1195 if isinstance(self.__dict__[attr], list):
1196 self.__dict__[attr].append(value)
1198 self.__dict__[attr] = value