2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/format.h>
20 #include <vnet/ethernet/ethernet.h>
21 #include <dpdk/buffer.h>
22 #include <dpdk/device/dpdk.h>
23 #include <dpdk/device/dpdk_priv.h>
24 #include <vppinfra/error.h>
25 #include <vlib/unix/unix.h>
27 #define foreach_dpdk_tx_func_error \
28 _(BAD_RETVAL, "DPDK tx function returned an error") \
29 _(PKT_DROP, "Tx packet drops (dpdk tx failure)")
33 #define _(f,s) DPDK_TX_FUNC_ERROR_##f,
34 foreach_dpdk_tx_func_error
37 } dpdk_tx_func_error_t;
39 static char *dpdk_tx_func_error_strings[] = {
41 foreach_dpdk_tx_func_error
46 dpdk_add_del_mac_address (vnet_hw_interface_t * hi,
47 const u8 * address, u8 is_add)
50 dpdk_main_t *dm = &dpdk_main;
51 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
54 error = rte_eth_dev_mac_addr_add (xd->port_id,
55 (struct rte_ether_addr *) address, 0);
57 error = rte_eth_dev_mac_addr_remove (xd->port_id,
58 (struct rte_ether_addr *) address);
62 return clib_error_return (0, "mac address add/del failed: %d", error);
69 dpdk_set_mac_address (vnet_hw_interface_t * hi,
70 const u8 * old_address, const u8 * address)
73 dpdk_main_t *dm = &dpdk_main;
74 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
76 error = rte_eth_dev_default_mac_addr_set (xd->port_id, (void *) address);
80 return clib_error_return (0, "mac address set failed: %d", error);
84 vec_reset_length (xd->default_mac_address);
85 vec_add (xd->default_mac_address, address, sizeof (mac_address_t));
91 dpdk_tx_trace_buffer (dpdk_main_t * dm, vlib_node_runtime_t * node,
92 dpdk_device_t * xd, u16 queue_id,
93 vlib_buffer_t * buffer)
95 vlib_main_t *vm = vlib_get_main ();
99 mb = rte_mbuf_from_vlib_buffer (buffer);
101 t0 = vlib_add_trace (vm, node, buffer, sizeof (t0[0]));
102 t0->queue_index = queue_id;
103 t0->device_index = xd->device_index;
104 t0->buffer_index = vlib_get_buffer_index (vm, buffer);
105 clib_memcpy_fast (&t0->mb, mb, sizeof (t0->mb));
106 clib_memcpy_fast (&t0->buffer, buffer,
107 sizeof (buffer[0]) - sizeof (buffer->pre_data));
108 clib_memcpy_fast (t0->buffer.pre_data, buffer->data + buffer->current_data,
109 sizeof (t0->buffer.pre_data));
110 clib_memcpy_fast (&t0->data, mb->buf_addr + mb->data_off,
114 static_always_inline void
115 dpdk_validate_rte_mbuf (vlib_main_t * vm, vlib_buffer_t * b,
118 struct rte_mbuf *mb, *first_mb, *last_mb;
119 last_mb = first_mb = mb = rte_mbuf_from_vlib_buffer (b);
121 /* buffer is coming from non-dpdk source so we need to init
123 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
124 rte_pktmbuf_reset (mb);
126 first_mb->nb_segs = 1;
127 mb->data_len = b->current_length;
128 mb->pkt_len = maybe_multiseg ? vlib_buffer_length_in_chain (vm, b) :
130 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
132 while (maybe_multiseg && (b->flags & VLIB_BUFFER_NEXT_PRESENT))
134 b = vlib_get_buffer (vm, b->next_buffer);
135 mb = rte_mbuf_from_vlib_buffer (b);
136 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
137 rte_pktmbuf_reset (mb);
140 mb->data_len = b->current_length;
141 mb->pkt_len = b->current_length;
142 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
144 if (PREDICT_FALSE (b->ref_count > 1))
146 dpdk_no_cache_mempool_by_buffer_pool_index[b->buffer_pool_index];
151 * This function calls the dpdk's tx_burst function to transmit the packets.
152 * It manages a lock per-device if the device does not
153 * support multiple queues. It returns the number of packets untransmitted
154 * If all packets are transmitted (the normal case), the function returns 0.
157 u32 tx_burst_vector_internal (vlib_main_t * vm,
159 struct rte_mbuf **mb, u32 n_left)
161 dpdk_main_t *dm = &dpdk_main;
162 dpdk_tx_queue_t *txq;
168 queue_id = vm->thread_index % xd->tx_q_used;
169 txq = vec_elt_at_index (xd->tx_queues, queue_id);
173 clib_spinlock_lock_if_init (&txq->lock);
175 if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD))
177 /* no wrap, transmit in one burst */
178 n_sent = rte_eth_tx_burst (xd->port_id, queue_id, mb, n_left);
187 clib_spinlock_unlock_if_init (&txq->lock);
189 if (PREDICT_FALSE (n_sent < 0))
191 // emit non-fatal message, bump counter
192 vnet_main_t *vnm = dm->vnet_main;
193 vnet_interface_main_t *im = &vnm->interface_main;
196 node_index = vec_elt_at_index (im->hw_interfaces,
197 xd->hw_if_index)->tx_node_index;
199 vlib_error_count (vm, node_index, DPDK_TX_FUNC_ERROR_BAD_RETVAL, 1);
200 return n_left; // untransmitted packets
205 while (n_sent && n_left && (n_retry > 0));
210 static_always_inline __clib_unused void
211 dpdk_prefetch_buffer (vlib_main_t * vm, struct rte_mbuf *mb)
213 vlib_buffer_t *b = vlib_buffer_from_rte_mbuf (mb);
214 CLIB_PREFETCH (mb, sizeof (struct rte_mbuf), STORE);
215 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
218 static_always_inline void
219 dpdk_buffer_tx_offload (dpdk_device_t * xd, vlib_buffer_t * b,
222 int is_ip4 = b->flags & VNET_BUFFER_F_IS_IP4;
223 u32 tso = b->flags & VNET_BUFFER_F_GSO, max_pkt_len;
224 u32 ip_cksum, tcp_cksum, udp_cksum;
226 vnet_buffer_oflags_t oflags = 0;
228 /* Is there any work for us? */
229 if (PREDICT_TRUE (((b->flags & VNET_BUFFER_F_OFFLOAD) | tso) == 0))
232 oflags = vnet_buffer (b)->oflags;
233 ip_cksum = oflags & VNET_BUFFER_OFFLOAD_F_IP_CKSUM;
234 tcp_cksum = oflags & VNET_BUFFER_OFFLOAD_F_TCP_CKSUM;
235 udp_cksum = oflags & VNET_BUFFER_OFFLOAD_F_UDP_CKSUM;
237 mb->l2_len = vnet_buffer (b)->l3_hdr_offset - b->current_data;
238 mb->l3_len = vnet_buffer (b)->l4_hdr_offset -
239 vnet_buffer (b)->l3_hdr_offset;
240 mb->outer_l3_len = 0;
241 mb->outer_l2_len = 0;
242 ol_flags = is_ip4 ? PKT_TX_IPV4 : PKT_TX_IPV6;
243 ol_flags |= ip_cksum ? PKT_TX_IP_CKSUM : 0;
244 ol_flags |= tcp_cksum ? PKT_TX_TCP_CKSUM : 0;
245 ol_flags |= udp_cksum ? PKT_TX_UDP_CKSUM : 0;
249 mb->l4_len = vnet_buffer2 (b)->gso_l4_hdr_sz;
250 mb->tso_segsz = vnet_buffer2 (b)->gso_size;
251 /* ensure packet is large enough to require tso */
252 max_pkt_len = mb->l2_len + mb->l3_len + mb->l4_len + mb->tso_segsz;
253 if (mb->tso_segsz != 0 && mb->pkt_len > max_pkt_len)
254 ol_flags |= (tcp_cksum ? PKT_TX_TCP_SEG : PKT_TX_UDP_SEG);
257 mb->ol_flags |= ol_flags;
259 /* we are trying to help compiler here by using local ol_flags with known
260 state of all flags */
261 if (xd->flags & DPDK_DEVICE_FLAG_INTEL_PHDR_CKSUM)
262 rte_net_intel_cksum_flags_prepare (mb, ol_flags);
266 * Transmits the packets on the frame to the interface associated with the
267 * node. It first copies packets on the frame to a per-thread arrays
268 * containing the rte_mbuf pointers.
270 VNET_DEVICE_CLASS_TX_FN (dpdk_device_class) (vlib_main_t * vm,
271 vlib_node_runtime_t * node,
274 dpdk_main_t *dm = &dpdk_main;
275 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
276 dpdk_device_t *xd = vec_elt_at_index (dm->devices, rd->dev_instance);
277 u32 n_packets = f->n_vectors;
279 u32 thread_index = vm->thread_index;
280 int queue_id = thread_index;
281 u32 tx_pkts = 0, all_or_flags = 0;
282 dpdk_per_thread_data_t *ptd = vec_elt_at_index (dm->per_thread_data,
284 struct rte_mbuf **mb;
287 ASSERT (n_packets <= VLIB_FRAME_SIZE);
289 /* calculate rte_mbuf pointers out of buffer indices */
290 vlib_get_buffers_with_offset (vm, vlib_frame_vector_args (f),
291 (void **) ptd->mbufs, n_packets,
292 -(i32) sizeof (struct rte_mbuf));
297 #if (CLIB_N_PREFETCHES >= 8)
302 dpdk_prefetch_buffer (vm, mb[4]);
303 dpdk_prefetch_buffer (vm, mb[5]);
304 dpdk_prefetch_buffer (vm, mb[6]);
305 dpdk_prefetch_buffer (vm, mb[7]);
307 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
308 b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
309 b[2] = vlib_buffer_from_rte_mbuf (mb[2]);
310 b[3] = vlib_buffer_from_rte_mbuf (mb[3]);
312 or_flags = b[0]->flags | b[1]->flags | b[2]->flags | b[3]->flags;
313 all_or_flags |= or_flags;
315 if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
317 dpdk_validate_rte_mbuf (vm, b[0], 1);
318 dpdk_validate_rte_mbuf (vm, b[1], 1);
319 dpdk_validate_rte_mbuf (vm, b[2], 1);
320 dpdk_validate_rte_mbuf (vm, b[3], 1);
324 dpdk_validate_rte_mbuf (vm, b[0], 0);
325 dpdk_validate_rte_mbuf (vm, b[1], 0);
326 dpdk_validate_rte_mbuf (vm, b[2], 0);
327 dpdk_validate_rte_mbuf (vm, b[3], 0);
330 if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
331 (or_flags & VNET_BUFFER_F_OFFLOAD)))
333 dpdk_buffer_tx_offload (xd, b[0], mb[0]);
334 dpdk_buffer_tx_offload (xd, b[1], mb[1]);
335 dpdk_buffer_tx_offload (xd, b[2], mb[2]);
336 dpdk_buffer_tx_offload (xd, b[3], mb[3]);
339 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
341 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
342 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]);
343 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
344 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[1]);
345 if (b[2]->flags & VLIB_BUFFER_IS_TRACED)
346 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[2]);
347 if (b[3]->flags & VLIB_BUFFER_IS_TRACED)
348 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[3]);
354 #elif (CLIB_N_PREFETCHES >= 4)
357 vlib_buffer_t *b2, *b3;
360 CLIB_PREFETCH (mb[2], CLIB_CACHE_LINE_BYTES, STORE);
361 CLIB_PREFETCH (mb[3], CLIB_CACHE_LINE_BYTES, STORE);
362 b2 = vlib_buffer_from_rte_mbuf (mb[2]);
363 CLIB_PREFETCH (b2, CLIB_CACHE_LINE_BYTES, LOAD);
364 b3 = vlib_buffer_from_rte_mbuf (mb[3]);
365 CLIB_PREFETCH (b3, CLIB_CACHE_LINE_BYTES, LOAD);
367 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
368 b[1] = vlib_buffer_from_rte_mbuf (mb[1]);
370 or_flags = b[0]->flags | b[1]->flags;
371 all_or_flags |= or_flags;
373 if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
375 dpdk_validate_rte_mbuf (vm, b[0], 1);
376 dpdk_validate_rte_mbuf (vm, b[1], 1);
380 dpdk_validate_rte_mbuf (vm, b[0], 0);
381 dpdk_validate_rte_mbuf (vm, b[1], 0);
384 if (PREDICT_FALSE ((xd->flags & DPDK_DEVICE_FLAG_TX_OFFLOAD) &&
385 (or_flags & VNET_BUFFER_F_OFFLOAD)))
387 dpdk_buffer_tx_offload (xd, b[0], mb[0]);
388 dpdk_buffer_tx_offload (xd, b[1], mb[1]);
391 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
393 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
394 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]);
395 if (b[1]->flags & VLIB_BUFFER_IS_TRACED)
396 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[1]);
406 b[0] = vlib_buffer_from_rte_mbuf (mb[0]);
407 all_or_flags |= b[0]->flags;
409 dpdk_validate_rte_mbuf (vm, b[0], 1);
410 dpdk_buffer_tx_offload (xd, b[0], mb[0]);
412 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
413 if (b[0]->flags & VLIB_BUFFER_IS_TRACED)
414 dpdk_tx_trace_buffer (dm, node, xd, queue_id, b[0]);
420 /* transmit as many packets as possible */
421 tx_pkts = n_packets = mb - ptd->mbufs;
422 n_left = tx_burst_vector_internal (vm, xd, ptd->mbufs, n_packets);
425 /* If there is no callback then drop any non-transmitted packets */
426 if (PREDICT_FALSE (n_left))
429 vlib_simple_counter_main_t *cm;
430 vnet_main_t *vnm = vnet_get_main ();
432 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
433 VNET_INTERFACE_COUNTER_TX_ERROR);
435 vlib_increment_simple_counter (cm, thread_index, xd->sw_if_index,
438 vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP,
442 rte_pktmbuf_free (ptd->mbufs[n_packets - n_left - 1]);
450 dpdk_clear_hw_interface_counters (u32 instance)
452 dpdk_main_t *dm = &dpdk_main;
453 dpdk_device_t *xd = vec_elt_at_index (dm->devices, instance);
455 rte_eth_stats_reset (xd->port_id);
456 rte_eth_xstats_reset (xd->port_id);
459 static clib_error_t *
460 dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
462 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
463 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
464 dpdk_main_t *dm = &dpdk_main;
465 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hif->dev_instance);
467 if (xd->flags & DPDK_DEVICE_FLAG_PMD_INIT_FAIL)
468 return clib_error_return (0, "Interface not initialized");
472 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
473 dpdk_device_start (xd);
474 xd->flags |= DPDK_DEVICE_FLAG_ADMIN_UP;
475 f64 now = vlib_time_now (dm->vlib_main);
476 dpdk_update_counters (xd, now);
477 dpdk_update_link_state (xd, now);
481 vnet_hw_interface_set_flags (vnm, xd->hw_if_index, 0);
482 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) != 0)
483 dpdk_device_stop (xd);
484 xd->flags &= ~DPDK_DEVICE_FLAG_ADMIN_UP;
487 return /* no error */ 0;
491 * Dynamically redirect all pkts from a specific interface
492 * to the specified node
495 dpdk_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
498 dpdk_main_t *xm = &dpdk_main;
499 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
500 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
502 /* Shut off redirection */
503 if (node_index == ~0)
505 xd->per_interface_next_index = node_index;
509 xd->per_interface_next_index =
510 vlib_node_add_next (xm->vlib_main, dpdk_input_node.index, node_index);
514 static clib_error_t *
515 dpdk_subif_add_del_function (vnet_main_t * vnm,
517 struct vnet_sw_interface_t *st, int is_add)
519 dpdk_main_t *xm = &dpdk_main;
520 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
521 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
522 vnet_sw_interface_t *t = (vnet_sw_interface_t *) st;
524 u32 prev_subifs = xd->num_subifs;
525 clib_error_t *err = 0;
529 else if (xd->num_subifs)
532 if ((xd->flags & DPDK_DEVICE_FLAG_PMD) == 0)
535 /* currently we program VLANS only for IXGBE VF */
536 if (xd->pmd != VNET_DPDK_PMD_IXGBEVF)
539 if (t->sub.eth.flags.no_tags == 1)
542 if ((t->sub.eth.flags.one_tag != 1) || (t->sub.eth.flags.exact_match != 1))
544 xd->num_subifs = prev_subifs;
545 err = clib_error_return (0, "unsupported VLAN setup");
549 vlan_offload = rte_eth_dev_get_vlan_offload (xd->port_id);
550 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
552 if ((r = rte_eth_dev_set_vlan_offload (xd->port_id, vlan_offload)))
554 xd->num_subifs = prev_subifs;
555 err = clib_error_return (0, "rte_eth_dev_set_vlan_offload[%d]: err %d",
562 rte_eth_dev_vlan_filter (xd->port_id,
563 t->sub.eth.outer_vlan_id, is_add)))
565 xd->num_subifs = prev_subifs;
566 err = clib_error_return (0, "rte_eth_dev_vlan_filter[%d]: err %d",
573 xd->flags |= DPDK_DEVICE_FLAG_HAVE_SUBIF;
575 xd->flags &= ~DPDK_DEVICE_FLAG_HAVE_SUBIF;
580 static clib_error_t *
581 dpdk_interface_set_rss_queues (struct vnet_main_t *vnm,
582 struct vnet_hw_interface_t *hi,
583 clib_bitmap_t * bitmap)
585 dpdk_main_t *xm = &dpdk_main;
586 u32 hw_if_index = hi->hw_if_index;
587 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
588 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
589 clib_error_t *err = 0;
590 struct rte_eth_rss_reta_entry64 *reta_conf = NULL;
591 struct rte_eth_dev_info dev_info;
593 u16 *valid_queue = NULL;
594 u16 valid_queue_count = 0;
598 rte_eth_dev_info_get (xd->port_id, &dev_info);
600 /* parameter check */
601 if (clib_bitmap_count_set_bits (bitmap) == 0)
603 err = clib_error_return (0, "must assign at least one valid rss queue");
607 if (clib_bitmap_count_set_bits (bitmap) > dev_info.nb_rx_queues)
609 err = clib_error_return (0, "too many rss queues");
614 reta = clib_mem_alloc (dev_info.reta_size * sizeof (*reta));
617 err = clib_error_return (0, "clib_mem_alloc failed");
621 clib_memset (reta, 0, dev_info.reta_size * sizeof (*reta));
623 valid_queue_count = 0;
625 clib_bitmap_foreach (i, bitmap) {
626 if (i >= dev_info.nb_rx_queues)
628 err = clib_error_return (0, "illegal queue number");
631 reta[valid_queue_count++] = i;
635 /* check valid_queue_count not zero, make coverity happy */
636 if (valid_queue_count == 0)
638 err = clib_error_return (0, "must assign at least one valid rss queue");
643 for (i = valid_queue_count, j = 0; i < dev_info.reta_size; i++, j++)
645 j = j % valid_queue_count;
646 reta[i] = valid_queue[j];
649 /* update reta table */
651 (struct rte_eth_rss_reta_entry64 *) clib_mem_alloc (dev_info.reta_size /
652 RTE_RETA_GROUP_SIZE *
653 sizeof (*reta_conf));
654 if (reta_conf == NULL)
656 err = clib_error_return (0, "clib_mem_alloc failed");
660 clib_memset (reta_conf, 0,
661 dev_info.reta_size / RTE_RETA_GROUP_SIZE *
662 sizeof (*reta_conf));
664 for (i = 0; i < dev_info.reta_size; i++)
666 uint32_t reta_id = i / RTE_RETA_GROUP_SIZE;
667 uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE;
669 reta_conf[reta_id].mask = UINT64_MAX;
670 reta_conf[reta_id].reta[reta_pos] = reta[i];
674 rte_eth_dev_rss_reta_update (xd->port_id, reta_conf, dev_info.reta_size);
677 err = clib_error_return (0, "rte_eth_dev_rss_reta_update err %d", ret);
683 clib_mem_free (reta);
685 clib_mem_free (reta_conf);
690 static clib_error_t *
691 dpdk_interface_rx_mode_change (vnet_main_t *vnm, u32 hw_if_index, u32 qid,
692 vnet_hw_if_rx_mode mode)
694 dpdk_main_t *xm = &dpdk_main;
695 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
696 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
697 clib_file_main_t *fm = &file_main;
698 dpdk_rx_queue_t *rxq;
701 if (!(xd->flags & DPDK_DEVICE_FLAG_INT_SUPPORTED))
702 return clib_error_return (0, "unsupported op (is the interface up?)", rv);
703 if (mode == VNET_HW_IF_RX_MODE_POLLING &&
704 !(xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE))
705 rv = rte_eth_dev_rx_intr_disable (xd->port_id, qid);
706 else if (mode == VNET_HW_IF_RX_MODE_POLLING)
708 rxq = vec_elt_at_index (xd->rx_queues, qid);
709 f = pool_elt_at_index (fm->file_pool, rxq->clib_file_index);
710 fm->file_update (f, UNIX_FILE_UPDATE_DELETE);
712 else if (!(xd->flags & DPDK_DEVICE_FLAG_INT_UNMASKABLE))
713 rv = rte_eth_dev_rx_intr_enable (xd->port_id, qid);
716 rxq = vec_elt_at_index (xd->rx_queues, qid);
717 f = pool_elt_at_index (fm->file_pool, rxq->clib_file_index);
718 fm->file_update (f, UNIX_FILE_UPDATE_ADD);
721 return clib_error_return (0, "dpdk_interface_rx_mode_change err %d", rv);
726 VNET_DEVICE_CLASS (dpdk_device_class) = {
728 .tx_function_n_errors = DPDK_TX_FUNC_N_ERROR,
729 .tx_function_error_strings = dpdk_tx_func_error_strings,
730 .format_device_name = format_dpdk_device_name,
731 .format_device = format_dpdk_device,
732 .format_tx_trace = format_dpdk_tx_trace,
733 .clear_counters = dpdk_clear_hw_interface_counters,
734 .admin_up_down_function = dpdk_interface_admin_up_down,
735 .subif_add_del_function = dpdk_subif_add_del_function,
736 .rx_redirect_to_node = dpdk_set_interface_next_node,
737 .mac_addr_change_function = dpdk_set_mac_address,
738 .mac_addr_add_del_function = dpdk_add_del_mac_address,
739 .format_flow = format_dpdk_flow,
740 .flow_ops_function = dpdk_flow_ops_fn,
741 .set_rss_queues_function = dpdk_interface_set_rss_queues,
742 .rx_mode_change_function = dpdk_interface_rx_mode_change,
746 #define UP_DOWN_FLAG_EVENT 1
749 admin_up_down_process (vlib_main_t * vm,
750 vlib_node_runtime_t * rt, vlib_frame_t * f)
752 clib_error_t *error = 0;
754 uword *event_data = 0;
760 vlib_process_wait_for_event (vm);
762 event_type = vlib_process_get_events (vm, &event_data);
764 dpdk_main.admin_up_down_in_progress = 1;
768 case UP_DOWN_FLAG_EVENT:
770 if (vec_len (event_data) == 2)
772 sw_if_index = event_data[0];
773 flags = event_data[1];
775 vnet_sw_interface_set_flags (vnet_get_main (), sw_if_index,
777 clib_error_report (error);
783 vec_reset_length (event_data);
785 dpdk_main.admin_up_down_in_progress = 0;
788 return 0; /* or not */
792 VLIB_REGISTER_NODE (admin_up_down_process_node) = {
793 .function = admin_up_down_process,
794 .type = VLIB_NODE_TYPE_PROCESS,
795 .name = "admin-up-down-process",
796 .process_log2_n_stack_bytes = 17, // 256KB
801 * fd.io coding-style-patch-verification: ON
804 * eval: (c-set-style "gnu")