1 # Copyright (c) 2019 Cisco and/or its affiliates.
2 # Licensed under the Apache License, Version 2.0 (the "License");
3 # you may not use this file except in compliance with the License.
4 # You may obtain a copy of the License at:
6 # http://www.apache.org/licenses/LICENSE-2.0
8 # Unless required by applicable law or agreed to in writing, software
9 # distributed under the License is distributed on an "AS IS" BASIS,
10 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
11 # See the License for the specific language governing permissions and
12 # limitations under the License.
14 """Library to manipulate Containers."""
16 from collections import OrderedDict, Counter
19 from string import Template
20 from time import sleep
22 from robot.libraries.BuiltIn import BuiltIn
24 from resources.libraries.python.Constants import Constants
25 from resources.libraries.python.CpuUtils import CpuUtils
26 from resources.libraries.python.ssh import SSH
27 from resources.libraries.python.topology import Topology, SocketType
28 from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
32 u"ContainerManager", u"ContainerEngine", u"LXC", u"Docker", u"Container"
35 SUPERVISOR_CONF = u"/etc/supervisor/supervisord.conf"
38 class ContainerManager:
39 """Container lifecycle management class."""
41 def __init__(self, engine):
42 """Initialize Container Manager class.
44 :param engine: Container technology used (LXC/Docker/...).
46 :raises NotImplementedError: If container technology is not implemented.
49 self.engine = globals()[engine]()
51 raise NotImplementedError(f"{engine} is not implemented.")
52 self.containers = OrderedDict()
54 def get_container_by_name(self, name):
55 """Get container instance.
57 :param name: Container name.
59 :returns: Container instance.
61 :raises RuntimeError: If failed to get container with name.
64 return self.containers[name]
66 raise RuntimeError(f"Failed to get container with name: {name}")
68 def construct_container(self, **kwargs):
69 """Construct container object on node with specified parameters.
71 :param kwargs: Key-value pairs used to construct container.
75 self.engine.initialize()
78 setattr(self.engine.container, key, kwargs[key])
80 # Set additional environmental variables
82 self.engine.container, u"env",
83 f"MICROSERVICE_LABEL={kwargs[u'name']}"
86 # Store container instance
87 self.containers[kwargs[u"name"]] = self.engine.container
89 def construct_containers(self, **kwargs):
90 """Construct 1..N container(s) on node with specified name.
92 Ordinal number is automatically added to the name of container as
95 :param kwargs: Named parameters.
98 name = kwargs[u"name"]
99 for i in range(kwargs[u"count"]):
100 # Name will contain ordinal suffix
101 kwargs[u"name"] = u"".join([name, str(i+1)])
103 self.construct_container(i=i, **kwargs)
105 def acquire_all_containers(self):
106 """Acquire all containers."""
107 for container in self.containers:
108 self.engine.container = self.containers[container]
109 self.engine.acquire()
111 def build_all_containers(self):
112 """Build all containers."""
113 for container in self.containers:
114 self.engine.container = self.containers[container]
117 def create_all_containers(self):
118 """Create all containers."""
119 for container in self.containers:
120 self.engine.container = self.containers[container]
123 def execute_on_container(self, name, command):
124 """Execute command on container with name.
126 :param name: Container name.
127 :param command: Command to execute.
131 self.engine.container = self.get_container_by_name(name)
132 self.engine.execute(command)
134 def execute_on_all_containers(self, command):
135 """Execute command on all containers.
137 :param command: Command to execute.
140 for container in self.containers:
141 self.engine.container = self.containers[container]
142 self.engine.execute(command)
144 def start_vpp_in_all_containers(self):
145 """Start VPP in all containers."""
146 for container in self.containers:
147 self.engine.container = self.containers[container]
148 # We need to install supervisor client/server system to control VPP
150 self.engine.install_supervisor()
151 self.engine.start_vpp()
153 def restart_vpp_in_all_containers(self):
154 """Restart VPP in all containers."""
155 for container in self.containers:
156 self.engine.container = self.containers[container]
157 self.engine.restart_vpp()
159 def verify_vpp_in_all_containers(self):
160 """Verify that VPP is installed and running in all containers."""
161 for container in self.containers:
162 self.engine.container = self.containers[container]
163 self.engine.verify_vpp()
165 def configure_vpp_in_all_containers(self, chain_topology, **kwargs):
166 """Configure VPP in all containers.
168 :param chain_topology: Topology used for chaining containers can be
169 chain or cross_horiz. Chain topology is using 1 memif pair per
170 container. Cross_horiz topology is using 1 memif and 1 physical
171 interface in container (only single container can be configured).
172 :param kwargs: Named parameters.
173 :type chain_topology: str
176 # Count number of DUTs based on node's host information
180 self.containers[container].node[u"host"]
181 for container in self.containers
185 mod = len(self.containers) // dut_cnt
187 for i, container in enumerate(self.containers):
190 sid1 = i % mod * 2 + 1
191 sid2 = i % mod * 2 + 2
192 self.engine.container = self.containers[container]
193 guest_dir = self.engine.container.mnt[0].split(u":")[1]
195 if chain_topology == u"chain":
196 self._configure_vpp_chain_l2xc(
197 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
198 guest_dir=guest_dir, **kwargs
200 elif chain_topology == u"cross_horiz":
201 self._configure_vpp_cross_horiz(
202 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
203 guest_dir=guest_dir, **kwargs
205 elif chain_topology == u"chain_functional":
206 self._configure_vpp_chain_functional(
207 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
208 guest_dir=guest_dir, **kwargs
210 elif chain_topology == u"chain_ip4":
211 self._configure_vpp_chain_ip4(
212 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
213 guest_dir=guest_dir, **kwargs
215 elif chain_topology == u"pipeline_ip4":
216 self._configure_vpp_pipeline_ip4(
217 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
218 guest_dir=guest_dir, **kwargs
220 elif chain_topology == u"chain_vswitch":
221 self._configure_vpp_chain_vswitch(
222 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
223 guest_dir=guest_dir, **kwargs)
224 elif chain_topology == u"chain_ipsec":
225 idx_match = search(r"\d+$", self.engine.container.name)
227 idx = int(idx_match.group())
228 self._configure_vpp_chain_ipsec(
229 mid1=mid1, mid2=mid2, sid1=sid1, sid2=sid2,
230 guest_dir=guest_dir, nf_instance=idx, **kwargs)
233 f"Container topology {chain_topology} not implemented"
236 def _configure_vpp_chain_l2xc(self, **kwargs):
237 """Configure VPP in chain topology with l2xc.
239 :param kwargs: Named parameters.
242 self.engine.create_vpp_startup_config()
243 self.engine.create_vpp_exec_config(
244 u"memif_create_chain_l2xc.exec",
245 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
246 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
247 socket1=f"{kwargs[u'guest_dir']}/memif-"
248 f"{self.engine.container.name}-{kwargs[u'sid1']}",
249 socket2=f"{kwargs[u'guest_dir']}/memif-"
250 f"{self.engine.container.name}-{kwargs[u'sid2']}"
253 def _configure_vpp_cross_horiz(self, **kwargs):
254 """Configure VPP in cross horizontal topology (single memif).
256 :param kwargs: Named parameters.
259 if u"DUT1" in self.engine.container.name:
260 if_pci = Topology.get_interface_pci_addr(
261 self.engine.container.node, kwargs[u"dut1_if"])
262 if_name = Topology.get_interface_name(
263 self.engine.container.node, kwargs[u"dut1_if"])
264 if u"DUT2" in self.engine.container.name:
265 if_pci = Topology.get_interface_pci_addr(
266 self.engine.container.node, kwargs[u"dut2_if"])
267 if_name = Topology.get_interface_name(
268 self.engine.container.node, kwargs[u"dut2_if"])
269 self.engine.create_vpp_startup_config_dpdk_dev(if_pci)
270 self.engine.create_vpp_exec_config(
271 u"memif_create_cross_horizon.exec",
272 mid1=kwargs[u"mid1"], sid1=kwargs[u"sid1"], if_name=if_name,
273 socket1=f"{kwargs[u'guest_dir']}/memif-"
274 f"{self.engine.container.name}-{kwargs[u'sid1']}"
277 def _configure_vpp_chain_functional(self, **kwargs):
278 """Configure VPP in chain topology with l2xc (functional).
280 :param kwargs: Named parameters.
283 self.engine.create_vpp_startup_config_func_dev()
284 self.engine.create_vpp_exec_config(
285 u"memif_create_chain_functional.exec",
286 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
287 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
288 socket1=f"{kwargs[u'guest_dir']}/memif-"
289 f"{self.engine.container.name}-{kwargs[u'sid1']}",
290 socket2=f"{kwargs[u'guest_dir']}/memif-"
291 f"{self.engine.container.name}-{kwargs[u'sid2']}",
295 def _configure_vpp_chain_ip4(self, **kwargs):
296 """Configure VPP in chain topology with ip4.
298 :param kwargs: Named parameters.
301 self.engine.create_vpp_startup_config()
303 vif1_mac = kwargs[u"tg_if1_mac"] \
304 if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
305 else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
306 vif2_mac = kwargs[u"tg_if2_mac"] \
307 if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
308 else f"52:54:00:00:{(kwargs['mid2'] + 1):02X}:01"
309 self.engine.create_vpp_exec_config(
310 u"memif_create_chain_ip4.exec",
311 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
312 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
313 socket1=f"{kwargs[u'guest_dir']}/memif-"
314 f"{self.engine.container.name}-{kwargs[u'sid1']}",
315 socket2=f"{kwargs[u'guest_dir']}/memif-"
316 f"{self.engine.container.name}-{kwargs[u'sid2']}",
317 mac1=f"52:54:00:00:{kwargs[u'mid1']:02X}:01",
318 mac2=f"52:54:00:00:{kwargs[u'mid2']:02X}:02",
319 vif1_mac=vif1_mac, vif2_mac=vif2_mac
322 def _configure_vpp_chain_vswitch(self, **kwargs):
323 """Configure VPP as vswitch in container.
325 :param kwargs: Named parameters.
328 dut = self.engine.container.name.split(u"_")[0]
330 if1_pci = Topology.get_interface_pci_addr(
331 self.engine.container.node, kwargs[u"dut1_if2"])
332 if2_pci = Topology.get_interface_pci_addr(
333 self.engine.container.node, kwargs[u"dut1_if1"])
334 if_red_name = Topology.get_interface_name(
335 self.engine.container.node, kwargs[u"dut1_if2"])
336 if_black_name = Topology.get_interface_name(
337 self.engine.container.node, kwargs[u"dut1_if1"])
338 tg_if_ip4 = kwargs[u"tg_if2_ip4"]
339 tg_if_mac = kwargs[u"tg_if2_mac"]
341 tg_if_ip4 = kwargs[u"tg_if1_ip4"]
342 tg_if_mac = kwargs[u"tg_if1_mac"]
343 if1_pci = Topology.get_interface_pci_addr(
344 self.engine.container.node, kwargs[u"dut2_if1"])
345 if2_pci = Topology.get_interface_pci_addr(
346 self.engine.container.node, kwargs[u"dut2_if2"])
347 if_red_name = Topology.get_interface_name(
348 self.engine.container.node, kwargs[u"dut2_if1"])
349 if_black_name = Topology.get_interface_name(
350 self.engine.container.node, kwargs[u"dut2_if2"])
352 n_instances = int(kwargs[u"n_instances"])
355 rxq = int(kwargs[u"rxq"])
357 if u"buffers" in kwargs:
358 buffers = int(kwargs[u"buffers"])
359 nodes = kwargs[u"nodes"]
360 cpuset_cpus = CpuUtils.get_affinity_nf(
361 nodes, dut, nf_chains=1, nf_nodes=1, nf_chain=1,
362 nf_node=1, vs_dtc=0, nf_dtc=8, nf_mtcr=1, nf_dtcr=1
364 self.engine.create_vpp_startup_config_vswitch(
365 cpuset_cpus, rxq, buffers, if1_pci, if2_pci
369 for i in range(1, n_instances + 1):
371 f"create interface memif id {i} socket-id 1 master\n"
372 f"set interface state memif1/{i} up\n"
373 f"set interface l2 bridge memif1/{i} 1\n"
374 f"create interface memif id {i} socket-id 2 master\n"
375 f"set interface state memif2/{i} up\n"
376 f"set interface l2 bridge memif2/{i} 2\n"
377 f"set ip arp memif2/{i} {tg_if_ip4} {tg_if_mac} "
381 self.engine.create_vpp_exec_config(
382 u"memif_create_chain_vswitch_ipsec.exec",
383 socket1=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-1",
384 socket2=f"{kwargs[u'guest_dir']}/{dut}_memif-vswitch-2",
385 if_red_name=if_red_name,
386 if_black_name=if_black_name,
387 instances=u"\n\n".join(instances))
390 def _configure_vpp_chain_ipsec(self, **kwargs):
391 """Configure VPP in container with memifs.
393 :param kwargs: Named parameters.
396 nf_nodes = int(kwargs[u"nf_nodes"])
397 nf_instance = int(kwargs[u"nf_instance"])
398 nodes = kwargs[u"nodes"]
399 dut = self.engine.container.name.split(u"_")[0]
400 cpuset_cpus = CpuUtils.get_affinity_nf(
401 nodes, dut, nf_chains=1, nf_nodes=nf_nodes, nf_chain=1,
402 nf_node=nf_instance, vs_dtc=10, nf_dtc=1, nf_mtcr=1, nf_dtcr=1)
403 self.engine.create_vpp_startup_config_ipsec(cpuset_cpus)
404 local_ip_base = kwargs[u"dut2_if1_ip4"].rsplit(u".", 1)[0]
407 tnl_local_ip = f"{local_ip_base}.{nf_instance + 100}"
408 tnl_remote_ip = f"{local_ip_base}.{nf_instance}"
409 remote_ip_base = kwargs[u"dut1_if1_ip4"].rsplit(u".", 1)[0]
410 tg_if_ip4 = kwargs[u"tg_if1_ip4"]
411 tg_if_mac = kwargs[u"tg_if1_mac"]
412 raddr_ip4 = kwargs[u"laddr_ip4"]
417 tnl_local_ip = f"{local_ip_base}.{nf_instance}"
418 tnl_remote_ip = f"{local_ip_base}.{nf_instance + 100}"
419 remote_ip_base = kwargs[u"dut2_if2_ip4"].rsplit(u".", 1)[0]
420 tg_if_ip4 = kwargs[u"tg_if2_ip4"]
421 tg_if_mac = kwargs[u"tg_if2_mac"]
422 raddr_ip4 = kwargs[u"raddr_ip4"]
427 self.engine.create_vpp_exec_config(
428 u"memif_create_chain_ipsec.exec",
429 socket1=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-1",
430 socket2=f"{kwargs['guest_dir']}/{dut}_memif-vswitch-2",
435 mac1=f"02:02:00:00:{l_mac1:02X}:{(nf_instance - 1):02X}",
436 mac2=f"02:02:00:00:{l_mac2:02X}:{(nf_instance - 1):02X}",
437 tg_if2_ip4=tg_if_ip4,
438 tg_if2_mac=tg_if_mac,
440 tnl_local_ip=tnl_local_ip,
441 tnl_remote_ip=tnl_remote_ip,
442 tnl_remote_mac=f"02:02:00:00:{r_mac:02X}:{(nf_instance - 1):02X}",
443 remote_ip=f"{remote_ip_base}.{nf_instance}"
446 f"cat {kwargs['guest_dir']}/ipsec_create_tunnel_cnf_"
447 f"{dut}_{nf_instance}.config >> /tmp/running.exec"
450 def _configure_vpp_pipeline_ip4(self, **kwargs):
451 """Configure VPP in pipeline topology with ip4.
453 :param kwargs: Named parameters.
456 self.engine.create_vpp_startup_config()
457 node = (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1
458 mid1 = kwargs[u"mid1"]
459 mid2 = kwargs[u"mid2"]
461 role2 = u"master" if node == kwargs[u"nodes"] else u"slave"
462 kwargs[u"mid2"] = kwargs[u"mid2"] \
463 if node == kwargs[u"nodes"] else kwargs[u"mid2"] + 1
464 vif1_mac = kwargs[u"tg_if1_mac"] \
465 if (kwargs[u"mid1"] - 1) % kwargs[u"nodes"] + 1 == 1 \
466 else f"52:54:00:00:{(kwargs[u'mid1'] - 1):02X}:02"
467 vif2_mac = kwargs[u"tg_if2_mac"] \
468 if (kwargs[u"mid2"] - 1) % kwargs[u"nodes"] + 1 == kwargs[u"nodes"]\
469 else f"52:54:00:00:{(kwargs[u'mid2'] + 1):02X}:01"
470 socket1 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
471 f"{kwargs[u'sid1']}" if node == 1 \
472 else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid1']}"
473 socket2 = f"{kwargs[u'guest_dir']}/memif-{self.engine.container.name}-"\
474 f"{kwargs[u'sid2']}" \
475 if node == 1 and kwargs[u"nodes"] == 1 or node == kwargs[u"nodes"] \
476 else f"{kwargs[u'guest_dir']}/memif-pipe-{kwargs[u'mid2']}"
478 self.engine.create_vpp_exec_config(
479 u"memif_create_pipeline_ip4.exec",
480 mid1=kwargs[u"mid1"], mid2=kwargs[u"mid2"],
481 sid1=kwargs[u"sid1"], sid2=kwargs[u"sid2"],
482 socket1=socket1, socket2=socket2, role1=role1, role2=role2,
483 mac1=f"52:54:00:00:{mid1:02X}:01",
484 mac2=f"52:54:00:00:{mid2:02X}:02",
485 vif1_mac=vif1_mac, vif2_mac=vif2_mac
488 def stop_all_containers(self):
489 """Stop all containers."""
490 for container in self.containers:
491 self.engine.container = self.containers[container]
494 def destroy_all_containers(self):
495 """Destroy all containers."""
496 for container in self.containers:
497 self.engine.container = self.containers[container]
498 self.engine.destroy()
501 class ContainerEngine:
502 """Abstract class for container engine."""
505 """Init ContainerEngine object."""
506 self.container = None
508 def initialize(self):
509 """Initialize container object."""
510 self.container = Container()
512 def acquire(self, force):
513 """Acquire/download container.
515 :param force: Destroy a container if exists and create.
518 raise NotImplementedError
521 """Build container (compile)."""
522 raise NotImplementedError
525 """Create/deploy container."""
526 raise NotImplementedError
528 def execute(self, command):
529 """Execute process inside container.
531 :param command: Command to run inside container.
534 raise NotImplementedError
537 """Stop container."""
538 raise NotImplementedError
541 """Destroy/remove container."""
542 raise NotImplementedError
545 """Info about container."""
546 raise NotImplementedError
548 def system_info(self):
550 raise NotImplementedError
552 def install_supervisor(self):
553 """Install supervisord inside a container."""
554 if isinstance(self, LXC):
555 self.execute(u"sleep 3; apt-get update")
556 self.execute(u"apt-get install -y supervisor")
558 u"[unix_http_server]\n" \
559 u"file = /tmp/supervisor.sock\n\n" \
560 u"[rpcinterface:supervisor]\n" \
561 u"supervisor.rpcinterface_factory = " \
562 u"supervisor.rpcinterface:make_main_rpcinterface\n\n" \
563 u"[supervisorctl]\n" \
564 u"serverurl = unix:///tmp/supervisor.sock\n\n" \
566 u"pidfile = /tmp/supervisord.pid\n" \
567 u"identifier = supervisor\n" \
568 u"directory = /tmp\n" \
569 u"logfile = /tmp/supervisord.log\n" \
570 u"loglevel = debug\n" \
571 u"nodaemon = false\n\n"
573 f'echo "{config}" > {SUPERVISOR_CONF} && '
574 f'supervisord -c {SUPERVISOR_CONF}'
578 """Start VPP inside a container."""
582 u"command = /usr/bin/vpp -c /etc/vpp/startup.conf\n" \
583 u"autostart = false\n" \
584 u"autorestart = false\n" \
585 u"redirect_stderr = true\n" \
588 f'echo "{config}" >> {SUPERVISOR_CONF} && supervisorctl reload'
590 self.execute(u"supervisorctl start vpp")
592 topo_instance = BuiltIn().get_library_instance(
593 u"resources.libraries.python.topology.Topology"
595 topo_instance.add_new_socket(
599 f"/tmp/vpp_sockets/{self.container.name}/api.sock"
601 topo_instance.add_new_socket(
605 f"/tmp/vpp_sockets/{self.container.name}/stats.sock"
608 def restart_vpp(self):
609 """Restart VPP service inside a container."""
610 self.execute(u"supervisorctl restart vpp")
611 self.execute(u"cat /tmp/supervisord.log")
613 # TODO Rewrite .execute to accept retries parameter and get rid of this
615 def verify_vpp(self, retries=120, retry_wait=1):
616 """Verify that VPP is installed and running inside container.
618 :param retries: Check for VPP for this number of times Default: 120
619 :param retry_wait: Wait for this number of seconds between retries.
622 cmd = (u"vppctl show pci 2>&1 | "
623 u"fgrep -v 'Connection refused' | "
624 u"fgrep -v 'No such file or directory'")
626 for _ in range(retries + 1):
633 msg = f"VPP did not come up in container: {self.container.name}"
634 raise RuntimeError(msg)
636 def create_base_vpp_startup_config(self, cpuset_cpus=None):
637 """Create base startup configuration of VPP on container.
639 :returns: Base VPP startup configuration.
640 :rtype: VppConfigGenerator
642 if cpuset_cpus is None:
643 cpuset_cpus = self.container.cpuset_cpus
645 # Create config instance
646 vpp_config = VppConfigGenerator()
647 vpp_config.set_node(self.container.node)
648 vpp_config.add_unix_cli_listen()
649 vpp_config.add_unix_nodaemon()
650 vpp_config.add_unix_exec(u"/tmp/running.exec")
651 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
652 vpp_config.add_statseg_per_node_counters(value=u"on")
653 # We will pop the first core from the list to be a main core
654 vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
655 # If more cores in the list, the rest will be used as workers.
657 corelist_workers = u",".join(str(cpu) for cpu in cpuset_cpus)
658 vpp_config.add_cpu_corelist_workers(corelist_workers)
662 def create_vpp_startup_config(self):
663 """Create startup configuration of VPP without DPDK on container.
665 vpp_config = self.create_base_vpp_startup_config()
666 vpp_config.add_plugin(u"disable", u"dpdk_plugin.so")
668 # Apply configuration
669 self.execute(u"mkdir -p /etc/vpp/")
671 f'echo "{vpp_config.get_config_str()}" | '
672 f'tee /etc/vpp/startup.conf'
675 def create_vpp_startup_config_dpdk_dev(self, *devices):
676 """Create startup configuration of VPP with DPDK on container.
678 :param devices: List of PCI devices to add.
681 vpp_config = self.create_base_vpp_startup_config()
682 vpp_config.add_dpdk_dev(*devices)
683 vpp_config.add_dpdk_no_tx_checksum_offload()
684 vpp_config.add_dpdk_log_level(u"debug")
685 vpp_config.add_plugin(u"disable", u"default")
686 vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
687 vpp_config.add_plugin(u"enable", u"memif_plugin.so")
689 # Apply configuration
690 self.execute(u"mkdir -p /etc/vpp/")
692 f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
695 def create_vpp_startup_config_func_dev(self):
696 """Create startup configuration of VPP on container for functional
699 # Create config instance
700 vpp_config = VppConfigGenerator()
701 vpp_config.set_node(self.container.node)
702 vpp_config.add_unix_cli_listen()
703 vpp_config.add_unix_nodaemon()
704 vpp_config.add_unix_exec(u"/tmp/running.exec")
705 vpp_config.add_socksvr(socket=Constants.SOCKSVR_PATH)
706 vpp_config.add_statseg_per_node_counters(value=u"on")
707 vpp_config.add_plugin(u"disable", u"dpdk_plugin.so")
709 # Apply configuration
710 self.execute(u"mkdir -p /etc/vpp/")
712 f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
715 def create_vpp_startup_config_vswitch(self, cpuset_cpus, rxq, buffers,
717 """Create startup configuration of VPP vswitch.
719 :param cpuset_cpus: CPU list to run on.
720 :param rxq: Number of interface RX queues.
721 :param buffers: Number of buffers per numa.
722 :param devices: List of PCI devices to add.
723 :type cpuset_cpus: list
728 vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
729 vpp_config.add_dpdk_dev(*devices)
730 vpp_config.add_dpdk_log_level(u"debug")
731 vpp_config.add_plugin(u"disable", u"default")
732 vpp_config.add_plugin(u"enable", u"dpdk_plugin.so")
733 vpp_config.add_plugin(u"enable", u"memif_plugin.so")
734 vpp_config.add_dpdk_no_tx_checksum_offload()
735 vpp_config.add_buffers_per_numa(buffers)
736 vpp_config.add_dpdk_dev_default_rxq(rxq)
738 # Apply configuration
739 self.execute(u"mkdir -p /etc/vpp/")
741 f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
744 def create_vpp_startup_config_ipsec(self, cpuset_cpus):
745 """Create startup configuration of VPP with IPsec on container.
747 :param cpuset_cpus: CPU list to run on.
748 :type cpuset_cpus: list
750 vpp_config = self.create_base_vpp_startup_config(cpuset_cpus)
751 vpp_config.add_plugin(u"disable", u"default")
752 vpp_config.add_plugin(u"enable", u"memif_plugin.so")
753 vpp_config.add_plugin(u"enable", u"crypto_ia32_plugin.so")
754 vpp_config.add_plugin(u"enable", u"crypto_ipsecmb_plugin.so")
755 vpp_config.add_plugin(u"enable", u"crypto_openssl_plugin.so")
756 vpp_config.add_heapsize(u"4G")
757 vpp_config.add_ip_heap_size(u"4G")
758 vpp_config.add_statseg_size(u"4G")
760 # Apply configuration
761 self.execute(u"mkdir -p /etc/vpp/")
763 f'echo "{vpp_config.get_config_str()}" | tee /etc/vpp/startup.conf'
766 def create_vpp_exec_config(self, template_file, **kwargs):
767 """Create VPP exec configuration on container.
769 :param template_file: File name of a template script.
770 :param kwargs: Parameters for script.
771 :type template_file: str
774 running = u"/tmp/running.exec"
776 template = f"{Constants.RESOURCES_TPL_CONTAINER}/{template_file}"
778 with open(template, "r") as src_file:
779 src = Template(src_file.read())
780 self.execute(f'echo "{src.safe_substitute(**kwargs)}" > {running}')
782 def is_container_running(self):
783 """Check if container is running."""
784 raise NotImplementedError
786 def is_container_present(self):
787 """Check if container is present."""
788 raise NotImplementedError
790 def _configure_cgroup(self, name):
791 """Configure the control group associated with a container.
793 By default the cpuset cgroup is using exclusive CPU/MEM. When Docker/LXC
794 container is initialized a new cgroup /docker or /lxc is created under
795 cpuset parent tree. This newly created cgroup is inheriting parent
796 setting for cpu/mem exclusive parameter and thus cannot be overriden
797 within /docker or /lxc cgroup. This function is supposed to set cgroups
798 to allow coexistence of both engines.
800 :param name: Name of cgroup.
802 :raises RuntimeError: If applying cgroup settings via cgset failed.
804 ret, _, _ = self.container.ssh.exec_command_sudo(
805 u"cgset -r cpuset.cpu_exclusive=0 /"
808 raise RuntimeError(u"Failed to apply cgroup settings.")
810 ret, _, _ = self.container.ssh.exec_command_sudo(
811 u"cgset -r cpuset.mem_exclusive=0 /"
814 raise RuntimeError(u"Failed to apply cgroup settings.")
816 ret, _, _ = self.container.ssh.exec_command_sudo(
817 f"cgcreate -g cpuset:/{name}"
820 raise RuntimeError(u"Failed to copy cgroup settings from root.")
822 ret, _, _ = self.container.ssh.exec_command_sudo(
823 f"cgset -r cpuset.cpu_exclusive=0 /{name}"
826 raise RuntimeError(u"Failed to apply cgroup settings.")
828 ret, _, _ = self.container.ssh.exec_command_sudo(
829 f"cgset -r cpuset.mem_exclusive=0 /{name}"
832 raise RuntimeError(u"Failed to apply cgroup settings.")
835 class LXC(ContainerEngine):
836 """LXC implementation."""
838 # Implicit constructor is inherited.
840 def acquire(self, force=True):
841 """Acquire a privileged system object where configuration is stored.
843 :param force: If a container exists, destroy it and create a new
846 :raises RuntimeError: If creating the container or writing the container
849 if self.is_container_present():
855 target_arch = u"arm64" \
856 if Topology.get_node_arch(self.container.node) == u"aarch64" \
859 image = self.container.image if self.container.image \
860 else f"-d ubuntu -r bionic -a {target_arch}"
862 cmd = f"lxc-create -t download --name {self.container.name} " \
863 f"-- {image} --no-validate"
865 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
867 raise RuntimeError(u"Failed to create container.")
869 self._configure_cgroup(u"lxc")
872 """Build container (compile).
874 TODO: Remove from parent class if no sibling implements this.
876 raise NotImplementedError
879 """Create/deploy an application inside a container on system.
881 :raises RuntimeError: If creating the container fails.
883 if self.container.mnt:
885 # https://github.com/lxc/lxc/issues/434
886 mnt_e = u"lxc.mount.entry = tmpfs run tmpfs defaults"
887 ret, _, _ = self.container.ssh.exec_command_sudo(
888 f"sh -c \"echo '{mnt_e}' >> "
889 f"/var/lib/lxc/{self.container.name}/config\""
893 f"Failed to write {self.container.name} config."
896 for mount in self.container.mnt:
897 host_dir, guest_dir = mount.split(u":")
898 options = u"bind,create=dir" if guest_dir.endswith(u"/") \
899 else u"bind,create=file"
900 entry = f"lxc.mount.entry = {host_dir} {guest_dir[1:]} " \
901 f"none {options} 0 0"
902 self.container.ssh.exec_command_sudo(
903 f"sh -c \"mkdir -p {host_dir}\""
905 ret, _, _ = self.container.ssh.exec_command_sudo(
906 f"sh -c \"echo '{entry}' "
907 f">> /var/lib/lxc/{self.container.name}/config\""
911 f"Failed to write {self.container.name} config."
914 cpuset_cpus = u",".join(
915 f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
916 if self.container.cpuset_cpus else u""
918 ret, _, _ = self.container.ssh.exec_command_sudo(
919 f"lxc-start --name {self.container.name} --daemon"
923 f"Failed to start container {self.container.name}."
925 self._lxc_wait(u"RUNNING")
927 # Workaround for LXC to be able to allocate all cpus including isolated.
928 ret, _, _ = self.container.ssh.exec_command_sudo(
929 u"cgset --copy-from / lxc/"
932 raise RuntimeError(u"Failed to copy cgroup to LXC")
934 ret, _, _ = self.container.ssh.exec_command_sudo(
935 f"lxc-cgroup --name {self.container.name} cpuset.cpus {cpuset_cpus}"
939 f"Failed to set cpuset.cpus to container {self.container.name}."
942 def execute(self, command):
943 """Start a process inside a running container.
945 Runs the specified command inside the container specified by name. The
946 container has to be running already.
948 :param command: Command to run inside container.
950 :raises RuntimeError: If running the command failed.
952 env = u"--keep-env " + u" ".join(
953 f"--set-var {env!s}" for env in self.container.env) \
954 if self.container.env else u""
956 cmd = f"lxc-attach {env} --name {self.container.name} " \
957 f"-- /bin/sh -c '{command}; exit $?'"
959 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
962 f"Failed to run command inside container {self.container.name}."
968 :raises RuntimeError: If stopping the container failed.
970 cmd = f"lxc-stop --name {self.container.name}"
972 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
975 f"Failed to stop container {self.container.name}."
977 self._lxc_wait(u"STOPPED|FROZEN")
980 """Destroy a container.
982 :raises RuntimeError: If destroying container failed.
984 cmd = f"lxc-destroy --force --name {self.container.name}"
986 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
989 f"Failed to destroy container {self.container.name}."
993 """Query and shows information about a container.
995 :raises RuntimeError: If getting info about a container failed.
997 cmd = f"lxc-info --name {self.container.name}"
999 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1002 f"Failed to get info about container {self.container.name}."
1005 def system_info(self):
1006 """Check the current kernel for LXC support.
1008 :raises RuntimeError: If checking LXC support failed.
1010 cmd = u"lxc-checkconfig"
1012 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1014 raise RuntimeError(u"Failed to check LXC support.")
1016 def is_container_running(self):
1017 """Check if container is running on node.
1019 :returns: True if container is running.
1021 :raises RuntimeError: If getting info about a container failed.
1023 cmd = f"lxc-info --no-humanize --state --name {self.container.name}"
1025 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1028 f"Failed to get info about container {self.container.name}."
1030 return u"RUNNING" in stdout
1032 def is_container_present(self):
1033 """Check if container is existing on node.
1035 :returns: True if container is present.
1037 :raises RuntimeError: If getting info about a container failed.
1039 cmd = f"lxc-info --no-humanize --name {self.container.name}"
1041 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1044 def _lxc_wait(self, state):
1045 """Wait for a specific container state.
1047 :param state: Specify the container state(s) to wait for.
1049 :raises RuntimeError: If waiting for state of a container failed.
1051 cmd = f"lxc-wait --name {self.container.name} --state '{state}'"
1053 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1056 f"Failed to wait for state '{state}' "
1057 f"of container {self.container.name}."
1061 class Docker(ContainerEngine):
1062 """Docker implementation."""
1064 # Implicit constructor is inherited.
1066 def acquire(self, force=True):
1067 """Pull an image or a repository from a registry.
1069 :param force: Destroy a container if exists.
1071 :raises RuntimeError: If pulling a container failed.
1073 if self.is_container_present():
1079 if not self.container.image:
1080 img = Constants.DOCKER_SUT_IMAGE_UBUNTU_ARM \
1081 if Topology.get_node_arch(self.container.node) == u"aarch64" \
1082 else Constants.DOCKER_SUT_IMAGE_UBUNTU
1083 setattr(self.container, u"image", img)
1085 cmd = f"docker pull {self.container.image}"
1087 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=1800)
1090 f"Failed to create container {self.container.name}."
1093 if self.container.cpuset_cpus:
1094 self._configure_cgroup(u"docker")
1097 """Build container (compile).
1099 TODO: Remove from parent class if no sibling implements this.
1101 raise NotImplementedError
1104 """Create/deploy container.
1106 :raises RuntimeError: If creating a container failed.
1108 cpuset_cpus = u"--cpuset-cpus=" + u",".join(
1109 f"{cpu!s}" for cpu in self.container.cpuset_cpus) \
1110 if self.container.cpuset_cpus else u""
1112 cpuset_mems = f"--cpuset-mems={self.container.cpuset_mems}" \
1113 if self.container.cpuset_mems is not None else u""
1114 # Temporary workaround - disabling due to bug in memif
1117 env = u" ".join(f"--env {env!s}" for env in self.container.env) \
1118 if self.container.env else u""
1120 command = str(self.container.command) if self.container.command else u""
1122 publish = u" ".join(
1123 f"--publish {var!s}" for var in self.container.publish
1124 ) if self.container.publish else u""
1127 f"--volume {mnt!s}" for mnt in self.container.mnt) \
1128 if self.container.mnt else u""
1130 cmd = f"docker run --privileged --detach --interactive --tty --rm " \
1131 f"--cgroup-parent docker {cpuset_cpus} {cpuset_mems} {publish} " \
1132 f"{env} {volume} --name {self.container.name} " \
1133 f"{self.container.image} {command}"
1135 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1138 f"Failed to create container {self.container.name}"
1143 def execute(self, command):
1144 """Start a process inside a running container.
1146 Runs the specified command inside the container specified by name. The
1147 container has to be running already.
1149 :param command: Command to run inside container.
1151 :raises RuntimeError: If running the command in a container failed.
1153 cmd = f"docker exec --interactive {self.container.name} " \
1154 f"/bin/sh -c '{command}; exit $?'"
1156 ret, _, _ = self.container.ssh.exec_command_sudo(cmd, timeout=180)
1159 f"Failed to execute command in container {self.container.name}."
1163 """Stop running container.
1165 :raises RuntimeError: If stopping a container failed.
1167 cmd = f"docker stop {self.container.name}"
1169 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1172 f"Failed to stop container {self.container.name}."
1176 """Remove a container.
1178 :raises RuntimeError: If removing a container failed.
1180 cmd = f"docker rm --force {self.container.name}"
1182 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1185 f"Failed to destroy container {self.container.name}."
1189 """Return low-level information on Docker objects.
1191 :raises RuntimeError: If getting info about a container failed.
1193 cmd = f"docker inspect {self.container.name}"
1195 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1198 f"Failed to get info about container {self.container.name}."
1201 def system_info(self):
1202 """Display the docker system-wide information.
1204 :raises RuntimeError: If displaying system information failed.
1206 cmd = u"docker system info"
1208 ret, _, _ = self.container.ssh.exec_command_sudo(cmd)
1210 raise RuntimeError(u"Failed to get system info.")
1212 def is_container_present(self):
1213 """Check if container is present on node.
1215 :returns: True if container is present.
1217 :raises RuntimeError: If getting info about a container failed.
1219 cmd = f"docker ps --all --quiet --filter name={self.container.name}"
1221 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1224 f"Failed to get info about container {self.container.name}."
1228 def is_container_running(self):
1229 """Check if container is running on node.
1231 :returns: True if container is running.
1233 :raises RuntimeError: If getting info about a container failed.
1235 cmd = f"docker ps --quiet --filter name={self.container.name}"
1237 ret, stdout, _ = self.container.ssh.exec_command_sudo(cmd)
1240 f"Failed to get info about container {self.container.name}."
1246 """Container class."""
1248 def __getattr__(self, attr):
1249 """Get attribute custom implementation.
1251 :param attr: Attribute to get.
1253 :returns: Attribute value or None.
1257 return self.__dict__[attr]
1261 def __setattr__(self, attr, value):
1262 """Set attribute custom implementation.
1264 :param attr: Attribute to set.
1265 :param value: Value to set.
1270 # Check if attribute exists
1273 # Creating new attribute
1275 self.__dict__[u"ssh"] = SSH()
1276 self.__dict__[u"ssh"].connect(value)
1277 self.__dict__[attr] = value
1279 # Updating attribute base of type
1280 if isinstance(self.__dict__[attr], list):
1281 self.__dict__[attr].append(value)
1283 self.__dict__[attr] = value