-# Copyright (c) 2017 Cisco and/or its affiliates.
+# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
from time import sleep
-from resources.libraries.python.constants import Constants
+from resources.libraries.python.Constants import Constants
from resources.libraries.python.topology import NodeType
-from resources.libraries.python.ssh import SSH
+from resources.libraries.python.ssh import SSH, exec_cmd_no_error
from resources.libraries.python.CpuUtils import CpuUtils
from resources.libraries.python.VppConfigGenerator import VppConfigGenerator
"""Initialize KubernetesUtils class."""
pass
+ @staticmethod
+ def load_docker_image_on_node(node, image_path):
+ """Load Docker container image from file on node.
+
+ :param node: DUT node.
+ :param image_path: Container image path.
+ :type node: dict
+ :type image_path: str
+ :raises RuntimeError: If loading image failed on node.
+ """
+ command = 'docker load -i {image_path}'.\
+ format(image_path=image_path)
+ message = 'Failed to load Docker image on {node}.'.\
+ format(node=node['host'])
+ exec_cmd_no_error(node, command, timeout=240, sudo=True,
+ message=message)
+
+ command = "docker rmi $(sudo docker images -f 'dangling=true' -q)".\
+ format(image_path=image_path)
+ message = 'Failed to clean Docker images on {node}.'.\
+ format(node=node['host'])
+ try:
+ exec_cmd_no_error(node, command, timeout=240, sudo=True,
+ message=message)
+ except RuntimeError:
+ pass
+
+ @staticmethod
+ def load_docker_image_on_all_duts(nodes, image_path):
+ """Load Docker container image from file on all DUTs.
+
+ :param nodes: Topology nodes.
+ :param image_path: Container image path.
+ :type nodes: dict
+ :type image_path: str
+ """
+ for node in nodes.values():
+ if node['type'] == NodeType.DUT:
+ KubernetesUtils.load_docker_image_on_node(node, image_path)
+
@staticmethod
def setup_kubernetes_on_node(node):
"""Set up Kubernetes on node.
:type rtype: str
:type name: str
:raises RuntimeError: If retrieving or deleting Kubernetes resource
- failed.
+ failed.
"""
ssh = SSH()
ssh.connect(node)
cmd = 'kubectl delete {nspace} {rtype} {name}'\
.format(nspace=nspace, rtype=rtype, name=name)
- (ret_code, _, _) = ssh.exec_command_sudo(cmd)
+ (ret_code, _, _) = ssh.exec_command_sudo(cmd, timeout=120)
if int(ret_code) != 0:
raise RuntimeError('Failed to delete Kubernetes resources '
'on {node}.'.format(node=node['host']))
- cmd = 'kubectl get {nspace} pods -a --no-headers'\
+ cmd = 'kubectl get {nspace} pods --no-headers'\
.format(nspace=nspace)
for _ in range(MAX_RETRY):
(ret_code, stdout, stderr) = ssh.exec_command_sudo(cmd)
nspace = '-n {nspace}'.format(nspace=nspace) if nspace \
else '--all-namespaces'
- cmd = 'kubectl get {nspace} pods -a --no-headers' \
+ cmd = 'kubectl get {nspace} pods --no-headers' \
.format(nspace=nspace)
for _ in range(MAX_RETRY):
(ret_code, stdout, _) = ssh.exec_command_sudo(cmd)
:param kwargs: Key-value pairs used to create configuration.
:param kwargs: dict
"""
+ smt_used = CpuUtils.is_smt_enabled(kwargs['node']['cpuinfo'])
+
cpuset_cpus = \
CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
cpu_node=kwargs['cpu_node'],
- skip_cnt=kwargs['cpu_skip'],
- cpu_cnt=kwargs['cpu_cnt'],
- smt_used=kwargs['smt_used'])
+ skip_cnt=2,
+ cpu_cnt=kwargs['phy_cores'],
+ smt_used=smt_used)
+ cpuset_main = \
+ CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
+ cpu_node=kwargs['cpu_node'],
+ skip_cnt=1,
+ cpu_cnt=1,
+ smt_used=smt_used)
# Create config instance
vpp_config = VppConfigGenerator()
vpp_config.set_node(kwargs['node'])
vpp_config.add_unix_cli_listen(value='0.0.0.0:5002')
vpp_config.add_unix_nodaemon()
- vpp_config.add_dpdk_socketmem('1024,1024')
- vpp_config.add_heapsize('3G')
+ vpp_config.add_heapsize('4G')
+ vpp_config.add_ip_heap_size('4G')
+ vpp_config.add_ip6_heap_size('4G')
vpp_config.add_ip6_hash_buckets('2000000')
- vpp_config.add_ip6_heap_size('3G')
- if kwargs['framesize'] < 1522:
+ if not kwargs['jumbo']:
vpp_config.add_dpdk_no_multi_seg()
- vpp_config.add_dpdk_dev_default_rxq(kwargs['rxq'])
+ vpp_config.add_dpdk_no_tx_checksum_offload()
+ vpp_config.add_dpdk_dev_default_rxq(kwargs['rxq_count_int'])
vpp_config.add_dpdk_dev(kwargs['if1'], kwargs['if2'])
+ vpp_config.add_buffers_per_numa(kwargs['buffers_per_numa'])
# We will pop first core from list to be main core
- vpp_config.add_cpu_main_core(str(cpuset_cpus.pop(0)))
+ vpp_config.add_cpu_main_core(str(cpuset_main.pop(0)))
# if this is not only core in list, the rest will be used as workers.
if cpuset_cpus:
corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
vpp_config.add_cpu_corelist_workers(corelist_workers)
- vpp_config.apply_config(filename=kwargs['filename'], restart_vpp=False)
+ vpp_config.write_config(filename=kwargs['filename'])
@staticmethod
def create_kubernetes_vnf_startup_config(**kwargs):
:param kwargs: Key-value pairs used to create configuration.
:param kwargs: dict
"""
+ smt_used = CpuUtils.is_smt_enabled(kwargs['node']['cpuinfo'])
skip_cnt = kwargs['cpu_skip'] + (kwargs['i'] - 1) * \
- (kwargs['cpu_cnt'] - 1)
+ (kwargs['phy_cores'] - 1)
cpuset_cpus = \
CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
cpu_node=kwargs['cpu_node'],
skip_cnt=skip_cnt,
- cpu_cnt=kwargs['cpu_cnt']-1,
- smt_used=kwargs['smt_used'])
+ cpu_cnt=kwargs['phy_cores']-1,
+ smt_used=smt_used)
cpuset_main = \
CpuUtils.cpu_slice_of_list_per_node(node=kwargs['node'],
cpu_node=kwargs['cpu_node'],
skip_cnt=1,
cpu_cnt=1,
- smt_used=kwargs['smt_used'])
+ smt_used=smt_used)
# Create config instance
vpp_config = VppConfigGenerator()
vpp_config.set_node(kwargs['node'])
corelist_workers = ','.join(str(cpu) for cpu in cpuset_cpus)
vpp_config.add_cpu_corelist_workers(corelist_workers)
vpp_config.add_plugin('disable', 'dpdk_plugin.so')
- vpp_config.apply_config(filename=kwargs['filename'], restart_vpp=False)
+ vpp_config.write_config(filename=kwargs['filename'])