2 * Copyright (c) 2015 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
15 #include <vnet/vnet.h>
16 #include <vppinfra/vec.h>
17 #include <vppinfra/format.h>
18 #include <vlib/unix/cj.h>
21 #include <vnet/ethernet/ethernet.h>
22 #include <dpdk/device/dpdk.h>
24 #include <dpdk/device/dpdk_priv.h>
25 #include <vppinfra/error.h>
27 #define foreach_dpdk_tx_func_error \
28 _(BAD_RETVAL, "DPDK tx function returned an error") \
29 _(RING_FULL, "Tx packet drops (ring full)") \
30 _(PKT_DROP, "Tx packet drops (dpdk tx failure)") \
31 _(REPL_FAIL, "Tx packet drops (replication failure)")
35 #define _(f,s) DPDK_TX_FUNC_ERROR_##f,
36 foreach_dpdk_tx_func_error
39 } dpdk_tx_func_error_t;
41 static char *dpdk_tx_func_error_strings[] = {
43 foreach_dpdk_tx_func_error
48 dpdk_set_mac_address (vnet_hw_interface_t * hi, char *address)
51 dpdk_main_t *dm = &dpdk_main;
52 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
54 error = rte_eth_dev_default_mac_addr_set (xd->device_index,
55 (struct ether_addr *) address);
59 return clib_error_return (0, "mac address set failed: %d", error);
63 vec_reset_length (xd->default_mac_address);
64 vec_add (xd->default_mac_address, address, sizeof (address));
70 dpdk_set_mc_filter (vnet_hw_interface_t * hi,
71 struct ether_addr mc_addr_vec[], int naddr)
74 dpdk_main_t *dm = &dpdk_main;
75 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hi->dev_instance);
77 error = rte_eth_dev_set_mc_addr_list (xd->device_index, mc_addr_vec, naddr);
81 return clib_error_return (0, "mc addr list failed: %d", error);
90 dpdk_replicate_packet_mb (vlib_buffer_t * b)
92 dpdk_main_t *dm = &dpdk_main;
93 struct rte_mbuf **mbufs = 0, *s, *d;
95 unsigned socket_id = rte_socket_id ();
98 ASSERT (dm->pktmbuf_pools[socket_id]);
99 s = rte_mbuf_from_vlib_buffer (b);
100 nb_segs = s->nb_segs;
101 vec_validate (mbufs, nb_segs - 1);
103 if (rte_pktmbuf_alloc_bulk (dm->pktmbuf_pools[socket_id], mbufs, nb_segs))
110 d->nb_segs = s->nb_segs;
111 d->data_len = s->data_len;
112 d->pkt_len = s->pkt_len;
113 d->data_off = s->data_off;
114 clib_memcpy (d->buf_addr, s->buf_addr, RTE_PKTMBUF_HEADROOM + s->data_len);
116 for (i = 1; i < nb_segs; i++)
121 d->data_len = s->data_len;
122 clib_memcpy (d->buf_addr, s->buf_addr,
123 RTE_PKTMBUF_HEADROOM + s->data_len);
132 dpdk_tx_trace_buffer (dpdk_main_t * dm,
133 vlib_node_runtime_t * node,
135 u16 queue_id, u32 buffer_index, vlib_buffer_t * buffer)
137 vlib_main_t *vm = vlib_get_main ();
138 dpdk_tx_dma_trace_t *t0;
141 mb = rte_mbuf_from_vlib_buffer (buffer);
143 t0 = vlib_add_trace (vm, node, buffer, sizeof (t0[0]));
144 t0->queue_index = queue_id;
145 t0->device_index = xd->device_index;
146 t0->buffer_index = buffer_index;
147 clib_memcpy (&t0->mb, mb, sizeof (t0->mb));
148 clib_memcpy (&t0->buffer, buffer,
149 sizeof (buffer[0]) - sizeof (buffer->pre_data));
150 clib_memcpy (t0->buffer.pre_data, buffer->data + buffer->current_data,
151 sizeof (t0->buffer.pre_data));
154 static_always_inline void
155 dpdk_validate_rte_mbuf (vlib_main_t * vm, vlib_buffer_t * b,
158 struct rte_mbuf *mb, *first_mb, *last_mb;
160 /* buffer is coming from non-dpdk source so we need to init
162 if (PREDICT_FALSE ((b->flags & VLIB_BUFFER_EXT_HDR_VALID) == 0))
164 vlib_buffer_t *b2 = b;
165 last_mb = mb = rte_mbuf_from_vlib_buffer (b2);
166 rte_pktmbuf_reset (mb);
167 while (maybe_multiseg && (b2->flags & VLIB_BUFFER_NEXT_PRESENT))
169 b2 = vlib_get_buffer (vm, b2->next_buffer);
170 mb = rte_mbuf_from_vlib_buffer (b2);
171 rte_pktmbuf_reset (mb);
175 last_mb = first_mb = mb = rte_mbuf_from_vlib_buffer (b);
176 first_mb->nb_segs = 1;
177 mb->data_len = b->current_length;
178 mb->pkt_len = maybe_multiseg ? vlib_buffer_length_in_chain (vm, b) :
180 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
182 while (maybe_multiseg && (b->flags & VLIB_BUFFER_NEXT_PRESENT))
184 b = vlib_get_buffer (vm, b->next_buffer);
185 mb = rte_mbuf_from_vlib_buffer (b);
188 mb->data_len = b->current_length;
189 mb->pkt_len = b->current_length;
190 mb->data_off = VLIB_BUFFER_PRE_DATA_SIZE + b->current_data;
192 if (PREDICT_FALSE (b->n_add_refs))
194 rte_mbuf_refcnt_update (mb, b->n_add_refs);
201 * This function calls the dpdk's tx_burst function to transmit the packets
202 * on the tx_vector. It manages a lock per-device if the device does not
203 * support multiple queues. It returns the number of packets untransmitted
204 * on the tx_vector. If all packets are transmitted (the normal case), the
205 * function returns 0.
207 * The function assumes there is at least one packet on the tx_vector.
210 u32 tx_burst_vector_internal (vlib_main_t * vm,
212 struct rte_mbuf **tx_vector)
214 dpdk_main_t *dm = &dpdk_main;
223 ring = vec_header (tx_vector, sizeof (*ring));
225 n_packets = ring->tx_head - ring->tx_tail;
227 tx_head = ring->tx_head % xd->nb_tx_desc;
230 * Ensure rte_eth_tx_burst is not called with 0 packets, which can lead to
231 * unpredictable results.
233 ASSERT (n_packets > 0);
236 * Check for tx_vector overflow. If this fails it is a system configuration
237 * error. The ring should be sized big enough to handle the largest un-flowed
238 * off burst from a traffic manager. A larger size also helps performance
239 * a bit because it decreases the probability of having to issue two tx_burst
240 * calls due to a ring wrap.
242 ASSERT (n_packets < xd->nb_tx_desc);
243 ASSERT (ring->tx_tail == 0);
246 queue_id = vm->thread_index;
250 /* start the burst at the tail */
251 tx_tail = ring->tx_tail % xd->nb_tx_desc;
254 * This device only supports one TX queue,
255 * and we're running multi-threaded...
257 if (PREDICT_FALSE (xd->lockp != 0))
259 queue_id = queue_id % xd->tx_q_used;
260 while (__sync_lock_test_and_set (xd->lockp[queue_id], 1))
262 queue_id = (queue_id + 1) % xd->tx_q_used;
265 if (PREDICT_FALSE (xd->flags & DPDK_DEVICE_FLAG_HQOS)) /* HQoS ON */
267 /* no wrap, transmit in one burst */
268 dpdk_device_hqos_per_worker_thread_t *hqos =
269 &xd->hqos_wt[vm->thread_index];
271 ASSERT (hqos->swq != NULL);
273 dpdk_hqos_metadata_set (hqos,
274 &tx_vector[tx_tail], tx_head - tx_tail);
275 rv = rte_ring_sp_enqueue_burst (hqos->swq,
276 (void **) &tx_vector[tx_tail],
277 (uint16_t) (tx_head - tx_tail));
279 else if (PREDICT_TRUE (xd->flags & DPDK_DEVICE_FLAG_PMD))
281 /* no wrap, transmit in one burst */
282 rv = rte_eth_tx_burst (xd->device_index,
285 (uint16_t) (tx_head - tx_tail));
293 if (PREDICT_FALSE (xd->lockp != 0))
294 *xd->lockp[queue_id] = 0;
296 if (PREDICT_FALSE (rv < 0))
298 // emit non-fatal message, bump counter
299 vnet_main_t *vnm = dm->vnet_main;
300 vnet_interface_main_t *im = &vnm->interface_main;
303 node_index = vec_elt_at_index (im->hw_interfaces,
304 xd->vlib_hw_if_index)->tx_node_index;
306 vlib_error_count (vm, node_index, DPDK_TX_FUNC_ERROR_BAD_RETVAL, 1);
307 clib_warning ("rte_eth_tx_burst[%d]: error %d", xd->device_index,
309 return n_packets; // untransmitted packets
311 ring->tx_tail += (u16) rv;
312 n_packets -= (uint16_t) rv;
314 while (rv && n_packets && (n_retry > 0));
319 static_always_inline void
320 dpdk_prefetch_buffer_by_index (vlib_main_t * vm, u32 bi)
324 b = vlib_get_buffer (vm, bi);
325 mb = rte_mbuf_from_vlib_buffer (b);
326 CLIB_PREFETCH (mb, CLIB_CACHE_LINE_BYTES, LOAD);
327 CLIB_PREFETCH (b, CLIB_CACHE_LINE_BYTES, LOAD);
330 static_always_inline void
331 dpdk_buffer_recycle (vlib_main_t * vm, vlib_node_runtime_t * node,
332 vlib_buffer_t * b, u32 bi, struct rte_mbuf **mbp)
334 dpdk_main_t *dm = &dpdk_main;
335 u32 my_cpu = vm->thread_index;
336 struct rte_mbuf *mb_new;
338 if (PREDICT_FALSE (b->flags & VLIB_BUFFER_RECYCLE) == 0)
341 mb_new = dpdk_replicate_packet_mb (b);
342 if (PREDICT_FALSE (mb_new == 0))
344 vlib_error_count (vm, node->node_index,
345 DPDK_TX_FUNC_ERROR_REPL_FAIL, 1);
346 b->flags |= VLIB_BUFFER_REPL_FAIL;
351 vec_add1 (dm->recycle[my_cpu], bi);
355 * Transmits the packets on the frame to the interface associated with the
356 * node. It first copies packets on the frame to a tx_vector containing the
357 * rte_mbuf pointers. It then passes this vector to tx_burst_vector_internal
358 * which calls the dpdk tx_burst function.
361 dpdk_interface_tx (vlib_main_t * vm,
362 vlib_node_runtime_t * node, vlib_frame_t * f)
364 dpdk_main_t *dm = &dpdk_main;
365 vnet_interface_output_runtime_t *rd = (void *) node->runtime_data;
366 dpdk_device_t *xd = vec_elt_at_index (dm->devices, rd->dev_instance);
367 u32 n_packets = f->n_vectors;
370 struct rte_mbuf **tx_vector;
372 u16 nb_tx_desc = xd->nb_tx_desc;
379 my_cpu = vm->thread_index;
383 tx_vector = xd->tx_vectors[queue_id];
384 ring = vec_header (tx_vector, sizeof (*ring));
386 n_on_ring = ring->tx_head - ring->tx_tail;
387 from = vlib_frame_vector_args (f);
389 ASSERT (n_packets <= VLIB_FRAME_SIZE);
391 if (PREDICT_FALSE (n_on_ring + n_packets > nb_tx_desc))
394 * Overflowing the ring should never happen.
395 * If it does then drop the whole frame.
397 vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_RING_FULL,
402 u32 bi0 = from[n_packets];
403 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
404 struct rte_mbuf *mb0 = rte_mbuf_from_vlib_buffer (b0);
405 rte_pktmbuf_free (mb0);
410 if (PREDICT_FALSE (dm->tx_pcap_enable))
416 vlib_buffer_t *b0 = vlib_get_buffer (vm, bi0);
417 if (dm->pcap_sw_if_index == 0 ||
418 dm->pcap_sw_if_index == vnet_buffer (b0)->sw_if_index[VLIB_TX])
419 pcap_add_buffer (&dm->pcap_main, vm, bi0, 512);
425 from = vlib_frame_vector_args (f);
427 i = ring->tx_head % nb_tx_desc;
431 u32 bi0, bi1, bi2, bi3;
432 struct rte_mbuf *mb0, *mb1, *mb2, *mb3;
433 vlib_buffer_t *b0, *b1, *b2, *b3;
436 dpdk_prefetch_buffer_by_index (vm, from[4]);
437 dpdk_prefetch_buffer_by_index (vm, from[5]);
438 dpdk_prefetch_buffer_by_index (vm, from[6]);
439 dpdk_prefetch_buffer_by_index (vm, from[7]);
447 b0 = vlib_get_buffer (vm, bi0);
448 b1 = vlib_get_buffer (vm, bi1);
449 b2 = vlib_get_buffer (vm, bi2);
450 b3 = vlib_get_buffer (vm, bi3);
452 or_flags = b0->flags | b1->flags | b2->flags | b3->flags;
454 if (or_flags & VLIB_BUFFER_NEXT_PRESENT)
456 dpdk_validate_rte_mbuf (vm, b0, 1);
457 dpdk_validate_rte_mbuf (vm, b1, 1);
458 dpdk_validate_rte_mbuf (vm, b2, 1);
459 dpdk_validate_rte_mbuf (vm, b3, 1);
463 dpdk_validate_rte_mbuf (vm, b0, 0);
464 dpdk_validate_rte_mbuf (vm, b1, 0);
465 dpdk_validate_rte_mbuf (vm, b2, 0);
466 dpdk_validate_rte_mbuf (vm, b3, 0);
469 mb0 = rte_mbuf_from_vlib_buffer (b0);
470 mb1 = rte_mbuf_from_vlib_buffer (b1);
471 mb2 = rte_mbuf_from_vlib_buffer (b2);
472 mb3 = rte_mbuf_from_vlib_buffer (b3);
474 if (PREDICT_FALSE (or_flags & VLIB_BUFFER_RECYCLE))
476 dpdk_buffer_recycle (vm, node, b0, bi0, &mb0);
477 dpdk_buffer_recycle (vm, node, b1, bi1, &mb1);
478 dpdk_buffer_recycle (vm, node, b2, bi2, &mb2);
479 dpdk_buffer_recycle (vm, node, b3, bi3, &mb3);
481 /* dont enqueue packets if replication failed as they must
482 be sent back to recycle */
483 if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0))
484 tx_vector[i++ % nb_tx_desc] = mb0;
485 if (PREDICT_TRUE ((b1->flags & VLIB_BUFFER_REPL_FAIL) == 0))
486 tx_vector[i++ % nb_tx_desc] = mb1;
487 if (PREDICT_TRUE ((b2->flags & VLIB_BUFFER_REPL_FAIL) == 0))
488 tx_vector[i++ % nb_tx_desc] = mb2;
489 if (PREDICT_TRUE ((b3->flags & VLIB_BUFFER_REPL_FAIL) == 0))
490 tx_vector[i++ % nb_tx_desc] = mb3;
494 if (PREDICT_FALSE (i + 3 >= nb_tx_desc))
496 tx_vector[i++ % nb_tx_desc] = mb0;
497 tx_vector[i++ % nb_tx_desc] = mb1;
498 tx_vector[i++ % nb_tx_desc] = mb2;
499 tx_vector[i++ % nb_tx_desc] = mb3;
504 tx_vector[i++] = mb0;
505 tx_vector[i++] = mb1;
506 tx_vector[i++] = mb2;
507 tx_vector[i++] = mb3;
512 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
514 if (b0->flags & VLIB_BUFFER_IS_TRACED)
515 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0);
516 if (b1->flags & VLIB_BUFFER_IS_TRACED)
517 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi1, b1);
518 if (b2->flags & VLIB_BUFFER_IS_TRACED)
519 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi2, b2);
520 if (b3->flags & VLIB_BUFFER_IS_TRACED)
521 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi3, b3);
529 struct rte_mbuf *mb0;
535 b0 = vlib_get_buffer (vm, bi0);
537 dpdk_validate_rte_mbuf (vm, b0, 1);
539 mb0 = rte_mbuf_from_vlib_buffer (b0);
540 dpdk_buffer_recycle (vm, node, b0, bi0, &mb0);
542 if (PREDICT_FALSE (node->flags & VLIB_NODE_FLAG_TRACE))
543 if (b0->flags & VLIB_BUFFER_IS_TRACED)
544 dpdk_tx_trace_buffer (dm, node, xd, queue_id, bi0, b0);
546 if (PREDICT_TRUE ((b0->flags & VLIB_BUFFER_REPL_FAIL) == 0))
548 tx_vector[i % nb_tx_desc] = mb0;
554 /* account for additional packets in the ring */
555 ring->tx_head += n_packets;
556 n_on_ring = ring->tx_head - ring->tx_tail;
558 /* transmit as many packets as possible */
559 n_packets = tx_burst_vector_internal (vm, xd, tx_vector);
562 * tx_pkts is the number of packets successfully transmitted
563 * This is the number originally on ring minus the number remaining on ring
565 tx_pkts = n_on_ring - n_packets;
568 /* If there is no callback then drop any non-transmitted packets */
569 if (PREDICT_FALSE (n_packets))
571 vlib_simple_counter_main_t *cm;
572 vnet_main_t *vnm = vnet_get_main ();
574 cm = vec_elt_at_index (vnm->interface_main.sw_if_counters,
575 VNET_INTERFACE_COUNTER_TX_ERROR);
577 vlib_increment_simple_counter (cm, my_cpu, xd->vlib_sw_if_index,
580 vlib_error_count (vm, node->node_index, DPDK_TX_FUNC_ERROR_PKT_DROP,
584 rte_pktmbuf_free (tx_vector[ring->tx_tail + n_packets]);
587 /* Reset head/tail to avoid unnecessary wrap */
592 /* Recycle replicated buffers */
593 if (PREDICT_FALSE (vec_len (dm->recycle[my_cpu])))
595 vlib_buffer_free (vm, dm->recycle[my_cpu],
596 vec_len (dm->recycle[my_cpu]));
597 _vec_len (dm->recycle[my_cpu]) = 0;
600 ASSERT (ring->tx_head >= ring->tx_tail);
606 dpdk_clear_hw_interface_counters (u32 instance)
608 dpdk_main_t *dm = &dpdk_main;
609 dpdk_device_t *xd = vec_elt_at_index (dm->devices, instance);
612 * Set the "last_cleared_stats" to the current stats, so that
613 * things appear to clear from a display perspective.
615 dpdk_update_counters (xd, vlib_time_now (dm->vlib_main));
617 clib_memcpy (&xd->last_cleared_stats, &xd->stats, sizeof (xd->stats));
618 clib_memcpy (xd->last_cleared_xstats, xd->xstats,
619 vec_len (xd->last_cleared_xstats) *
620 sizeof (xd->last_cleared_xstats[0]));
624 static clib_error_t *
625 dpdk_interface_admin_up_down (vnet_main_t * vnm, u32 hw_if_index, u32 flags)
627 vnet_hw_interface_t *hif = vnet_get_hw_interface (vnm, hw_if_index);
628 uword is_up = (flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) != 0;
629 dpdk_main_t *dm = &dpdk_main;
630 dpdk_device_t *xd = vec_elt_at_index (dm->devices, hif->dev_instance);
635 f64 now = vlib_time_now (dm->vlib_main);
637 if ((xd->flags & DPDK_DEVICE_FLAG_ADMIN_UP) == 0)
639 rv = rte_eth_dev_start (xd->device_index);
640 if (!rv && xd->default_mac_address)
641 rv = rte_eth_dev_default_mac_addr_set (xd->device_index,
642 (struct ether_addr *)
643 xd->default_mac_address);
646 if (xd->flags & DPDK_DEVICE_FLAG_PROMISC)
647 rte_eth_promiscuous_enable (xd->device_index);
649 rte_eth_promiscuous_disable (xd->device_index);
651 rte_eth_allmulticast_enable (xd->device_index);
652 xd->flags |= DPDK_DEVICE_FLAG_ADMIN_UP;
653 dpdk_update_counters (xd, now);
654 dpdk_update_link_state (xd, now);
658 xd->flags &= ~DPDK_DEVICE_FLAG_ADMIN_UP;
660 rte_eth_allmulticast_disable (xd->device_index);
661 vnet_hw_interface_set_flags (vnm, xd->vlib_hw_if_index, 0);
662 rte_eth_dev_stop (xd->device_index);
664 /* For bonded interface, stop slave links */
665 if (xd->pmd == VNET_DPDK_PMD_BOND)
668 int nlink = rte_eth_bond_slaves_get (xd->device_index, slink, 16);
671 u8 dpdk_port = slink[--nlink];
672 rte_eth_dev_stop (dpdk_port);
678 clib_warning ("rte_eth_dev_%s error: %d", is_up ? "start" : "stop", rv);
680 return /* no error */ 0;
684 * Dynamically redirect all pkts from a specific interface
685 * to the specified node
688 dpdk_set_interface_next_node (vnet_main_t * vnm, u32 hw_if_index,
691 dpdk_main_t *xm = &dpdk_main;
692 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
693 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
695 /* Shut off redirection */
696 if (node_index == ~0)
698 xd->per_interface_next_index = node_index;
702 xd->per_interface_next_index =
703 vlib_node_add_next (xm->vlib_main, dpdk_input_node.index, node_index);
707 static clib_error_t *
708 dpdk_subif_add_del_function (vnet_main_t * vnm,
710 struct vnet_sw_interface_t *st, int is_add)
712 dpdk_main_t *xm = &dpdk_main;
713 vnet_hw_interface_t *hw = vnet_get_hw_interface (vnm, hw_if_index);
714 dpdk_device_t *xd = vec_elt_at_index (xm->devices, hw->dev_instance);
715 vnet_sw_interface_t *t = (vnet_sw_interface_t *) st;
717 u32 prev_subifs = xd->num_subifs;
718 clib_error_t *err = 0;
722 else if (xd->num_subifs)
725 if ((xd->flags & DPDK_DEVICE_FLAG_PMD) == 0)
728 /* currently we program VLANS only for IXGBE VF and I40E VF */
729 if ((xd->pmd != VNET_DPDK_PMD_IXGBEVF) && (xd->pmd != VNET_DPDK_PMD_I40EVF))
732 if (t->sub.eth.flags.no_tags == 1)
735 if ((t->sub.eth.flags.one_tag != 1) || (t->sub.eth.flags.exact_match != 1))
737 xd->num_subifs = prev_subifs;
738 err = clib_error_return (0, "unsupported VLAN setup");
742 vlan_offload = rte_eth_dev_get_vlan_offload (xd->device_index);
743 vlan_offload |= ETH_VLAN_FILTER_OFFLOAD;
745 if ((r = rte_eth_dev_set_vlan_offload (xd->device_index, vlan_offload)))
747 xd->num_subifs = prev_subifs;
748 err = clib_error_return (0, "rte_eth_dev_set_vlan_offload[%d]: err %d",
749 xd->device_index, r);
755 rte_eth_dev_vlan_filter (xd->device_index, t->sub.eth.outer_vlan_id,
758 xd->num_subifs = prev_subifs;
759 err = clib_error_return (0, "rte_eth_dev_vlan_filter[%d]: err %d",
760 xd->device_index, r);
766 xd->flags |= DPDK_DEVICE_FLAG_HAVE_SUBIF;
768 xd->flags &= ~DPDK_DEVICE_FLAG_HAVE_SUBIF;
774 VNET_DEVICE_CLASS (dpdk_device_class) = {
776 .tx_function = dpdk_interface_tx,
777 .tx_function_n_errors = DPDK_TX_FUNC_N_ERROR,
778 .tx_function_error_strings = dpdk_tx_func_error_strings,
779 .format_device_name = format_dpdk_device_name,
780 .format_device = format_dpdk_device,
781 .format_tx_trace = format_dpdk_tx_dma_trace,
782 .clear_counters = dpdk_clear_hw_interface_counters,
783 .admin_up_down_function = dpdk_interface_admin_up_down,
784 .subif_add_del_function = dpdk_subif_add_del_function,
785 .rx_redirect_to_node = dpdk_set_interface_next_node,
786 .mac_addr_change_function = dpdk_set_mac_address,
789 VLIB_DEVICE_TX_FUNCTION_MULTIARCH (dpdk_device_class, dpdk_interface_tx)
792 #define UP_DOWN_FLAG_EVENT 1
795 admin_up_down_process (vlib_main_t * vm,
796 vlib_node_runtime_t * rt, vlib_frame_t * f)
798 clib_error_t *error = 0;
800 uword *event_data = 0;
806 vlib_process_wait_for_event (vm);
808 event_type = vlib_process_get_events (vm, &event_data);
810 dpdk_main.admin_up_down_in_progress = 1;
814 case UP_DOWN_FLAG_EVENT:
816 if (vec_len (event_data) == 2)
818 sw_if_index = event_data[0];
819 flags = event_data[1];
821 vnet_sw_interface_set_flags (vnet_get_main (), sw_if_index,
823 clib_error_report (error);
829 vec_reset_length (event_data);
831 dpdk_main.admin_up_down_in_progress = 0;
834 return 0; /* or not */
838 VLIB_REGISTER_NODE (admin_up_down_process_node,static) = {
839 .function = admin_up_down_process,
840 .type = VLIB_NODE_TYPE_PROCESS,
841 .name = "admin-up-down-process",
842 .process_log2_n_stack_bytes = 17, // 256KB
847 * fd.io coding-style-patch-verification: ON
850 * eval: (c-set-style "gnu")