#include <vppinfra/error.h>
#include <vppinfra/format.h>
#include <vppinfra/bitmap.h>
+#include <vppinfra/linux/sysfs.h>
+#include <vlib/unix/unix.h>
#include <vnet/ethernet/ethernet.h>
#include <dpdk/device/dpdk.h>
-#include <vlib/unix/physmem.h>
#include <vlib/pci/pci.h>
+#include <rte_ring.h>
+
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <dpdk/device/dpdk_priv.h>
dpdk_main_t dpdk_main;
+dpdk_config_main_t dpdk_config_main;
#define LINK_STATE_ELOGS 0
-#define DEFAULT_HUGE_DIR (VPP_RUN_DIR "/hugepages")
-
/* Port configuration, mildly modified Intel app values */
static struct rte_eth_conf port_conf_template = {
}
}
+static struct rte_mempool_ops *
+get_ops_by_name (i8 * ops_name)
+{
+ u32 i;
+
+ for (i = 0; i < rte_mempool_ops_table.num_ops; i++)
+ {
+ if (!strcmp (ops_name, rte_mempool_ops_table.ops[i].name))
+ return &rte_mempool_ops_table.ops[i];
+ }
+
+ return 0;
+}
+
+static int
+dpdk_ring_alloc (struct rte_mempool *mp)
+{
+ u32 rg_flags = 0, count;
+ i32 ret;
+ i8 rg_name[RTE_RING_NAMESIZE];
+ struct rte_ring *r;
+
+ ret = snprintf (rg_name, sizeof (rg_name), RTE_MEMPOOL_MZ_FORMAT, mp->name);
+ if (ret < 0 || ret >= (i32) sizeof (rg_name))
+ return -ENAMETOOLONG;
+
+ /* ring flags */
+ if (mp->flags & MEMPOOL_F_SP_PUT)
+ rg_flags |= RING_F_SP_ENQ;
+ if (mp->flags & MEMPOOL_F_SC_GET)
+ rg_flags |= RING_F_SC_DEQ;
+
+ count = rte_align32pow2 (mp->size + 1);
+ /*
+ * Allocate the ring that will be used to store objects.
+ * Ring functions will return appropriate errors if we are
+ * running as a secondary process etc., so no checks made
+ * in this function for that condition.
+ */
+ /* XXX can we get memory from the right socket? */
+ r = clib_mem_alloc_aligned (rte_ring_get_memsize (count),
+ CLIB_CACHE_LINE_BYTES);
+
+ /* XXX rte_ring_lookup will not work */
+
+ ret = rte_ring_init (r, rg_name, count, rg_flags);
+ if (ret)
+ return ret;
+
+ mp->pool_data = r;
+
+ return 0;
+}
+
static clib_error_t *
dpdk_lib_init (dpdk_main_t * dm)
{
"dpdk rx");
if (dm->conf->enable_tcp_udp_checksum)
- dm->buffer_flags_template &= ~(IP_BUFFER_L4_CHECKSUM_CORRECT
- | IP_BUFFER_L4_CHECKSUM_COMPUTED);
+ dm->buffer_flags_template &= ~(VNET_BUFFER_F_L4_CHECKSUM_CORRECT
+ | VNET_BUFFER_F_L4_CHECKSUM_COMPUTED);
/* vlib_buffer_t template */
vec_validate_aligned (dm->buffer_templates, tm->n_vlib_mains - 1,
case VNET_DPDK_PMD_IGB:
case VNET_DPDK_PMD_IXGBE:
case VNET_DPDK_PMD_I40E:
+ xd->port_type = port_type_from_speed_capa (&dev_info);
+ xd->flags |= DPDK_DEVICE_FLAG_TX_OFFLOAD |
+ DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM;
+
+ break;
case VNET_DPDK_PMD_CXGBE:
case VNET_DPDK_PMD_MLX4:
case VNET_DPDK_PMD_MLX5:
case VNET_DPDK_PMD_THUNDERX:
xd->port_type = VNET_DPDK_PORT_TYPE_ETH_VF;
+ xd->port_conf.rxmode.hw_strip_crc = 1;
break;
case VNET_DPDK_PMD_DPAA2:
hi = vnet_get_hw_interface (dm->vnet_main, xd->hw_if_index);
+ if (xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD)
+ hi->flags |= VNET_HW_INTERFACE_FLAG_SUPPORTS_TX_L4_CKSUM_OFFLOAD;
+
dpdk_device_setup (xd);
if (vec_len (xd->errors))
/* Chelsio T4/T5 */
else if (d->vendor_id == 0x1425 && (d->device_id & 0xe000) == 0x4000)
;
+ /* Mellanox */
+ else if (d->vendor_id == 0x15b3 && d->device_id >= 0x1013 && d->device_id <= 0x101a)
+ {
+ continue;
+ }
else
{
clib_warning ("Unsupported PCI device 0x%04x:0x%04x found "
u8 huge_dir = 0;
u8 file_prefix = 0;
u8 *socket_mem = 0;
+ u8 *huge_dir_path = 0;
+
+ huge_dir_path =
+ format (0, "%s/hugepages%c", vlib_unix_get_runtime_dir (), 0);
conf->device_config_index_by_pci_addr = hash_create (0, sizeof (uword));
log_level = RTE_LOG_NOTICE;
{
u32 x, *mem_by_socket = 0;
uword c = 0;
- u8 use_1g = 1;
- u8 use_2m = 1;
- u8 less_than_1g = 1;
int rv;
- umount (DEFAULT_HUGE_DIR);
+ umount ((char *) huge_dir_path);
/* Process "socket-mem" parameter value */
if (vec_len (socket_mem))
break;
vec_add1 (mem_by_socket, x);
-
- if (x > 1023)
- less_than_1g = 0;
}
/* Note: unformat_free vec_frees(in.buffer), aka socket_mem... */
unformat_free (&in);
clib_bitmap_foreach (c, tm->cpu_socket_bitmap, (
{
vec_validate(mem_by_socket, c);
- mem_by_socket[c] = 256; /* default per-socket mem */
+ mem_by_socket[c] = 64; /* default per-socket mem */
}
));
/* *INDENT-ON* */
}
- /* check if available enough 1GB pages for each socket */
/* *INDENT-OFF* */
clib_bitmap_foreach (c, tm->cpu_socket_bitmap, (
{
- int pages_avail, page_size, mem;
+ clib_error_t *e;
vec_validate(mem_by_socket, c);
- mem = mem_by_socket[c];
- page_size = 1024;
- pages_avail = vlib_sysfs_get_free_hugepages(c, page_size * 1024);
-
- if (pages_avail < 0 || page_size * pages_avail < mem)
- use_1g = 0;
-
- page_size = 2;
- pages_avail = vlib_sysfs_get_free_hugepages(c, page_size * 1024);
-
- if (pages_avail < 0 || page_size * pages_avail < mem)
- use_2m = 0;
+ e = clib_sysfs_prealloc_hugepages(c, 2 << 10, mem_by_socket[c] / 2);
+ if (e)
+ clib_error_report (e);
}));
/* *INDENT-ON* */
vec_free (mem_by_socket);
- /* Make sure VPP_RUN_DIR exists */
- error = unix_make_vpp_run_dir ();
+ error = vlib_unix_recursive_mkdir ((char *) huge_dir_path);
if (error)
- goto done;
-
- rv = mkdir (DEFAULT_HUGE_DIR, 0755);
- if (rv && errno != EEXIST)
{
- error = clib_error_return (0, "mkdir '%s' failed errno %d",
- DEFAULT_HUGE_DIR, errno);
goto done;
}
- if (use_1g && !(less_than_1g && use_2m))
- {
- rv =
- mount ("none", DEFAULT_HUGE_DIR, "hugetlbfs", 0, "pagesize=1G");
- }
- else if (use_2m)
- {
- rv = mount ("none", DEFAULT_HUGE_DIR, "hugetlbfs", 0, NULL);
- }
- else
- {
- return clib_error_return (0, "not enough free huge pages");
- }
+ rv = mount ("none", (char *) huge_dir_path, "hugetlbfs", 0, NULL);
if (rv)
{
tmp = format (0, "--huge-dir%c", 0);
vec_add1 (conf->eal_init_args, tmp);
- tmp = format (0, "%s%c", DEFAULT_HUGE_DIR, 0);
+ tmp = format (0, "%s%c", huge_dir_path, 0);
vec_add1 (conf->eal_init_args, tmp);
if (!file_prefix)
{
/* Set up DPDK eal and packet mbuf pool early. */
-#if RTE_VERSION >= RTE_VERSION_NUM(17, 5, 0, 0)
rte_log_set_global_level (log_level);
-#else
- rte_set_log_level (log_level);
-#endif
vm = vlib_get_main ();
conf->eal_init_args_str = format (conf->eal_init_args_str, "%s ",
conf->eal_init_args[i]);
+ clib_warning ("EAL init args: %s", conf->eal_init_args_str);
ret =
rte_eal_init (vec_len (conf->eal_init_args),
(char **) conf->eal_init_args);
/* lazy umount hugepages */
- umount2 (DEFAULT_HUGE_DIR, MNT_DETACH);
+ umount2 ((char *) huge_dir_path, MNT_DETACH);
+ rmdir ((char *) huge_dir_path);
+ vec_free (huge_dir_path);
if (ret < 0)
return clib_error_return (0, "rte_eal_init returned %d", ret);
fprintf (stdout, "DPDK physical memory layout:\n");
rte_dump_physmem_layout (stdout);
+ /* set custom ring memory allocator */
+ {
+ struct rte_mempool_ops *ops = NULL;
+
+ ops = get_ops_by_name ("ring_sp_sc");
+ ops->alloc = dpdk_ring_alloc;
+
+ ops = get_ops_by_name ("ring_mp_sc");
+ ops->alloc = dpdk_ring_alloc;
+
+ ops = get_ops_by_name ("ring_sp_mc");
+ ops->alloc = dpdk_ring_alloc;
+
+ ops = get_ops_by_name ("ring_mp_mc");
+ ops->alloc = dpdk_ring_alloc;
+ }
+
/* main thread 1st */
- error = vlib_buffer_pool_create (vm, conf->num_mbufs, rte_socket_id ());
+ error = dpdk_buffer_pool_create (vm, conf->num_mbufs, rte_socket_id ());
if (error)
return error;
for (i = 0; i < RTE_MAX_LCORE; i++)
{
- error = vlib_buffer_pool_create (vm, conf->num_mbufs,
+ error = dpdk_buffer_pool_create (vm, conf->num_mbufs,
rte_lcore_to_socket_id (i));
if (error)
return error;
/* Default vlib_buffer_t flags, DISABLES tcp/udp checksumming... */
dm->buffer_flags_template =
(VLIB_BUFFER_TOTAL_LENGTH_VALID | VLIB_BUFFER_EXT_HDR_VALID
- | IP_BUFFER_L4_CHECKSUM_COMPUTED | IP_BUFFER_L4_CHECKSUM_CORRECT);
+ | VNET_BUFFER_F_L4_CHECKSUM_COMPUTED |
+ VNET_BUFFER_F_L4_CHECKSUM_CORRECT);
dm->stat_poll_interval = DPDK_STATS_POLL_INTERVAL;
dm->link_state_poll_interval = DPDK_LINK_POLL_INTERVAL;