},
};
+VLIB_NODE_FUNCTION_MULTIARCH (handoff_dispatch_node, handoff_dispatch_node_fn)
+
clib_error_t *handoff_dispatch_init (vlib_main_t *vm)
{
handoff_dispatch_main_t * mp = &handoff_dispatch_main;
n_left -= 1;
b0 = vlib_get_buffer (vm, bi0);
- mb = ((struct rte_mbuf *)b0) - 1;
+ mb = rte_mbuf_from_vlib_buffer(b0);
dpdk_rx_next_and_error_from_mb_flags_x1 (xd, mb, b0,
&next0, &error0);
vlib_trace_buffer (vm, node, next0, b0, /* follow_chain */ 0);
t0->device_index = xd->device_index;
t0->buffer_index = bi0;
- memcpy (&t0->mb, mb, sizeof (t0->mb));
- memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
- memcpy (t0->buffer.pre_data, b0->data, sizeof (t0->buffer.pre_data));
+ clib_memcpy (&t0->mb, mb, sizeof (t0->mb));
+ clib_memcpy (&t0->buffer, b0, sizeof (b0[0]) - sizeof (b0->pre_data));
+ clib_memcpy (t0->buffer.pre_data, b0->data, sizeof (t0->buffer.pre_data));
#ifdef RTE_LIBRTE_MBUF_EXT_RX_OLFLAGS
/*
dpdk_device_t * xd,
vlib_node_runtime_t * node,
u32 cpu_index,
- u16 queue_id)
+ u16 queue_id,
+ int use_efd)
{
u32 n_buffers;
u32 next_index = DPDK_RX_NEXT_ETHERNET_INPUT;
if (n_buffers == 0)
{
/* check if EFD (dpdk) is enabled */
- if (PREDICT_FALSE(dm->efd.enabled))
+ if (PREDICT_FALSE(use_efd && dm->efd.enabled))
{
/* reset a few stats */
xd->efd_agent.last_poll_time = 0;
/* Check for congestion if EFD (Early-Fast-Discard) is enabled
* in any mode (e.g. dpdk, monitor, or drop_all)
*/
- if (PREDICT_FALSE(dm->efd.enabled))
+ if (PREDICT_FALSE(use_efd && dm->efd.enabled))
{
/* update EFD counters */
dpdk_efd_update_counters(xd, n_buffers, dm->efd.enabled);
if (PREDICT_TRUE(n_buffers > 2))
{
struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2];
- vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
+ vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb);
CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, STORE);
CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
}
ASSERT(mb);
- b0 = (vlib_buffer_t *)(mb+1);
+ b0 = vlib_buffer_from_rte_mbuf(mb);
/* check whether EFD is looking for packets to discard */
if (PREDICT_FALSE(efd_discard_burst))
{
vlib_thread_main_t * tm = vlib_get_thread_main();
-
+
if (PREDICT_TRUE(cntr_type = is_efd_discardable(tm, b0, mb)))
{
rte_pktmbuf_free(mb);
if (PREDICT_FALSE(mb->nb_segs > 1))
{
struct rte_mbuf *pfmb = mb->next;
- vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
+ vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb);
CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
b_chain = b0;
{
ASSERT(mb_seg != 0);
- b_seg = (vlib_buffer_t *)(mb_seg+1);
+ b_seg = vlib_buffer_from_rte_mbuf(mb_seg);
vlib_buffer_init_for_free_list (b_seg, fl);
b_seg->clone_count = 0;
{
xd = vec_elt_at_index(dm->devices, dq->device);
ASSERT(dq->queue_id == 0);
- n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, 0);
+ n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, 0, 0);
}
VIRL_SPEED_LIMIT()
vec_foreach (dq, dm->devices_by_cpu[cpu_index])
{
xd = vec_elt_at_index(dm->devices, dq->device);
- n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, dq->queue_id);
+ n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, dq->queue_id, 0);
}
VIRL_SPEED_LIMIT()
return n_rx_packets;
}
+uword
+dpdk_input_efd (vlib_main_t * vm,
+ vlib_node_runtime_t * node,
+ vlib_frame_t * f)
+{
+ dpdk_main_t * dm = &dpdk_main;
+ dpdk_device_t * xd;
+ uword n_rx_packets = 0;
+ dpdk_device_and_queue_t * dq;
+ u32 cpu_index = os_get_cpu_number();
+
+ /*
+ * Poll all devices on this cpu for input/interrupts.
+ */
+ vec_foreach (dq, dm->devices_by_cpu[cpu_index])
+ {
+ xd = vec_elt_at_index(dm->devices, dq->device);
+ n_rx_packets += dpdk_device_input (dm, xd, node, cpu_index, dq->queue_id, 1);
+ }
+
+ VIRL_SPEED_LIMIT()
+
+ return n_rx_packets;
+}
+
+
VLIB_REGISTER_NODE (dpdk_input_node) = {
.function = dpdk_input,
.type = VLIB_NODE_TYPE_INPUT,
},
};
+
+/* handle dpdk_input_rss alternative function */
+VLIB_NODE_FUNCTION_MULTIARCH_CLONE(dpdk_input)
+VLIB_NODE_FUNCTION_MULTIARCH_CLONE(dpdk_input_rss)
+VLIB_NODE_FUNCTION_MULTIARCH_CLONE(dpdk_input_efd)
+
+/* this macro defines dpdk_input_rss_multiarch_select() */
+CLIB_MULTIARCH_SELECT_FN(dpdk_input);
+CLIB_MULTIARCH_SELECT_FN(dpdk_input_rss);
+CLIB_MULTIARCH_SELECT_FN(dpdk_input_efd);
+
/*
* Override the next nodes for the dpdk input nodes.
* Must be invoked prior to VLIB_INIT_FUNCTION calls.
u64 hash_key;
hash_key = ip->src_address.as_u64[0] ^
- ip->src_address.as_u64[1] ^
- ip->dst_address.as_u64[0] ^
- ip->dst_address.as_u64[1] ^
+ rotate_left(ip->src_address.as_u64[1],13) ^
+ rotate_left(ip->dst_address.as_u64[0],26) ^
+ rotate_left(ip->dst_address.as_u64[1],39) ^
ip->protocol;
return hash_key;
u32 num_devices = 0;
uword * p;
u16 queue_id = 0;
- vlib_node_runtime_t * node_trace;
+ vlib_node_runtime_t * node_trace = 0;
u32 first_worker_index = 0;
u32 buffer_flags_template;
first_worker_index + num_workers - 1,
(vlib_frame_queue_t *)(~0));
- /* packet tracing is triggered on the dpdk-input node for ease-of-use */
- node_trace = vlib_node_get_runtime (vm, dpdk_input_node.index);
-
buffer_flags_template = dm->buffer_flags_template;
/* And handle them... */
continue;
}
- vec_reset_length (xd->d_trace_buffers);
- trace_cnt = n_trace = vlib_get_trace_count (vm, node_trace);
+ trace_cnt = n_trace = 0;
+ if (PREDICT_FALSE(vm->trace_main.trace_active_hint))
+ {
+ /*
+ * packet tracing is triggered on the dpdk-input node for
+ * ease-of-use. Re-fetch the node_runtime for dpdk-input
+ * in case it has changed.
+ */
+ node_trace = vlib_node_get_runtime (vm, dpdk_input_node.index);
+
+ vec_reset_length (xd->d_trace_buffers);
+ trace_cnt = n_trace = vlib_get_trace_count (vm, node_trace);
+ }
/*
* DAW-FIXME: VMXNET3 device stop/start doesn't work,
if (PREDICT_TRUE(n_buffers > 1))
{
struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2];
- vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
+ vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb);
CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
CLIB_PREFETCH (bp->data, CLIB_CACHE_LINE_BYTES, LOAD);
}
- b0 = (vlib_buffer_t *)(mb+1);
+ b0 = vlib_buffer_from_rte_mbuf(mb);
/* check whether EFD is looking for packets to discard */
if (PREDICT_FALSE(efd_discard_burst))
if (PREDICT_FALSE(mb->nb_segs > 1))
{
struct rte_mbuf *pfmb = mb->next;
- vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
+ vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb);
CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
b_chain = b0;
{
ASSERT(mb_seg != 0);
- b_seg = (vlib_buffer_t *)(mb_seg+1);
+ b_seg = vlib_buffer_from_rte_mbuf(mb_seg);
vlib_buffer_init_for_free_list (b_seg, fl);
b_seg->clone_count = 0;
if (PREDICT_TRUE(n_buffers > 1))
{
struct rte_mbuf *pfmb = xd->rx_vectors[queue_id][mb_index+2];
- vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
+ vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb);
CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
CLIB_PREFETCH (bp->data, CLIB_CACHE_LINE_BYTES, LOAD);
}
-
- b0 = (vlib_buffer_t *)(mb+1);
-
+
+ b0 = vlib_buffer_from_rte_mbuf(mb);
+
/* check whether EFD is looking for packets to discard */
if (PREDICT_FALSE(efd_discard_burst))
{
if (PREDICT_FALSE(mb->nb_segs > 1))
{
struct rte_mbuf *pfmb = mb->next;
- vlib_buffer_t *bp = (vlib_buffer_t *)(pfmb+1);
+ vlib_buffer_t *bp = vlib_buffer_from_rte_mbuf(pfmb);
CLIB_PREFETCH (pfmb, CLIB_CACHE_LINE_BYTES, LOAD);
CLIB_PREFETCH (bp, CLIB_CACHE_LINE_BYTES, STORE);
b_chain = b0;
{
ASSERT(mb_seg != 0);
- b_seg = (vlib_buffer_t *)(mb_seg+1);
+ b_seg = vlib_buffer_from_rte_mbuf(mb_seg);
vlib_buffer_init_for_free_list (b_seg, fl);
b_seg->clone_count = 0;