2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * ethernet_node.c: ethernet packet processing
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 #include <vlib/vlib.h>
41 #include <vnet/pg/pg.h>
42 #include <vnet/ethernet/ethernet.h>
43 #include <vnet/ethernet/p2p_ethernet.h>
44 #include <vnet/devices/pipe/pipe.h>
45 #include <vppinfra/sparse_vec.h>
46 #include <vnet/l2/l2_bvi.h>
47 #include <vnet/classify/trace_classify.h>
49 #define foreach_ethernet_input_next \
50 _ (PUNT, "error-punt") \
51 _ (DROP, "error-drop") \
52 _ (LLC, "llc-input") \
53 _ (IP4_INPUT, "ip4-input") \
54 _ (IP4_INPUT_NCS, "ip4-input-no-checksum")
58 #define _(s,n) ETHERNET_INPUT_NEXT_##s,
59 foreach_ethernet_input_next
61 ETHERNET_INPUT_N_NEXT,
62 } ethernet_input_next_t;
68 ethernet_input_frame_t frame_data;
69 } ethernet_input_trace_t;
72 format_ethernet_input_trace (u8 * s, va_list * va)
74 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
75 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
76 ethernet_input_trace_t *t = va_arg (*va, ethernet_input_trace_t *);
77 u32 indent = format_get_indent (s);
81 s = format (s, "frame: flags 0x%x", t->frame_flags);
82 if (t->frame_flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
83 s = format (s, ", hw-if-index %u, sw-if-index %u",
84 t->frame_data.hw_if_index, t->frame_data.sw_if_index);
85 s = format (s, "\n%U", format_white_space, indent);
87 s = format (s, "%U", format_ethernet_header, t->packet_data);
92 extern vlib_node_registration_t ethernet_input_node;
96 ETHERNET_INPUT_VARIANT_ETHERNET,
97 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE,
98 ETHERNET_INPUT_VARIANT_NOT_L2,
99 } ethernet_input_variant_t;
102 // Parse the ethernet header to extract vlan tags and innermost ethertype
103 static_always_inline void
104 parse_header (ethernet_input_variant_t variant,
108 u16 * outer_id, u16 * inner_id, u32 * match_flags)
112 if (variant == ETHERNET_INPUT_VARIANT_ETHERNET
113 || variant == ETHERNET_INPUT_VARIANT_NOT_L2)
115 ethernet_header_t *e0;
117 e0 = (void *) (b0->data + b0->current_data);
119 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
120 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
122 vlib_buffer_advance (b0, sizeof (e0[0]));
124 *type = clib_net_to_host_u16 (e0->type);
126 else if (variant == ETHERNET_INPUT_VARIANT_ETHERNET_TYPE)
128 // here when prior node was LLC/SNAP processing
131 e0 = (void *) (b0->data + b0->current_data);
133 vlib_buffer_advance (b0, sizeof (e0[0]));
135 *type = clib_net_to_host_u16 (e0[0]);
138 // save for distinguishing between dot1q and dot1ad later
141 // default the tags to 0 (used if there is no corresponding tag)
145 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_0_TAG;
148 // check for vlan encaps
149 if (ethernet_frame_is_tagged (*type))
151 ethernet_vlan_header_t *h0;
154 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_1_TAG;
156 h0 = (void *) (b0->data + b0->current_data);
158 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
160 *outer_id = tag & 0xfff;
162 *match_flags &= ~SUBINT_CONFIG_MATCH_1_TAG;
164 *type = clib_net_to_host_u16 (h0->type);
166 vlib_buffer_advance (b0, sizeof (h0[0]));
169 if (*type == ETHERNET_TYPE_VLAN)
171 // Double tagged packet
172 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_2_TAG;
174 h0 = (void *) (b0->data + b0->current_data);
176 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
178 *inner_id = tag & 0xfff;
180 *type = clib_net_to_host_u16 (h0->type);
182 vlib_buffer_advance (b0, sizeof (h0[0]));
184 if (*type == ETHERNET_TYPE_VLAN)
186 // More than double tagged packet
187 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_3_TAG;
189 vlib_buffer_advance (b0, sizeof (h0[0]));
190 vlan_count = 3; // "unknown" number, aka, 3-or-more
194 ethernet_buffer_set_vlan_count (b0, vlan_count);
197 // Determine the subinterface for this packet, given the result of the
198 // vlan table lookups and vlan header parsing. Check the most specific
200 static_always_inline void
201 identify_subint (vnet_hw_interface_t * hi,
204 main_intf_t * main_intf,
205 vlan_intf_t * vlan_intf,
206 qinq_intf_t * qinq_intf,
207 u32 * new_sw_if_index, u8 * error0, u32 * is_l2)
211 matched = eth_identify_subint (hi, match_flags, main_intf, vlan_intf,
212 qinq_intf, new_sw_if_index, error0, is_l2);
217 // Perform L3 my-mac filter
218 // A unicast packet arriving on an L3 interface must have a dmac matching the interface mac.
219 // This is required for promiscuous mode, else we will forward packets we aren't supposed to.
222 ethernet_header_t *e0;
223 e0 = (void *) (b0->data + vnet_buffer (b0)->l2_hdr_offset);
225 if (!(ethernet_address_cast (e0->dst_address)))
227 if (!ethernet_mac_address_equal ((u8 *) e0, hi->hw_address))
229 *error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
234 // Check for down subinterface
235 *error0 = (*new_sw_if_index) != ~0 ? (*error0) : ETHERNET_ERROR_DOWN;
239 static_always_inline void
240 determine_next_node (ethernet_main_t * em,
241 ethernet_input_variant_t variant,
243 u32 type0, vlib_buffer_t * b0, u8 * error0, u8 * next0)
245 vnet_buffer (b0)->l3_hdr_offset = b0->current_data;
246 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
248 if (PREDICT_FALSE (*error0 != ETHERNET_ERROR_NONE))
250 // some error occurred
251 *next0 = ETHERNET_INPUT_NEXT_DROP;
255 // record the L2 len and reset the buffer so the L2 header is preserved
256 u32 eth_start = vnet_buffer (b0)->l2_hdr_offset;
257 vnet_buffer (b0)->l2.l2_len = b0->current_data - eth_start;
258 *next0 = em->l2_next;
259 ASSERT (vnet_buffer (b0)->l2.l2_len ==
260 ethernet_buffer_header_size (b0));
261 vlib_buffer_advance (b0, -(vnet_buffer (b0)->l2.l2_len));
263 // check for common IP/MPLS ethertypes
265 else if (type0 == ETHERNET_TYPE_IP4)
267 *next0 = em->l3_next.input_next_ip4;
269 else if (type0 == ETHERNET_TYPE_IP6)
271 *next0 = em->l3_next.input_next_ip6;
273 else if (type0 == ETHERNET_TYPE_MPLS)
275 *next0 = em->l3_next.input_next_mpls;
278 else if (em->redirect_l3)
280 // L3 Redirect is on, the cached common next nodes will be
281 // pointing to the redirect node, catch the uncommon types here
282 *next0 = em->redirect_l3_next;
286 // uncommon ethertype, check table
288 i0 = sparse_vec_index (em->l3_next.input_next_by_type, type0);
289 *next0 = vec_elt (em->l3_next.input_next_by_type, i0);
292 SPARSE_VEC_INVALID_INDEX ? ETHERNET_ERROR_UNKNOWN_TYPE : *error0;
294 // The table is not populated with LLC values, so check that now.
295 // If variant is variant_ethernet then we came from LLC processing. Don't
296 // go back there; drop instead using by keeping the drop/bad table result.
297 if ((type0 < 0x600) && (variant == ETHERNET_INPUT_VARIANT_ETHERNET))
299 *next0 = ETHERNET_INPUT_NEXT_LLC;
305 /* following vector code relies on following assumptions */
306 STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_data, 0);
307 STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_length, 2);
308 STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, flags, 4);
309 STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l2_hdr_offset) ==
310 STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l3_hdr_offset) - 2,
311 "l3_hdr_offset must follow l2_hdr_offset");
313 static_always_inline void
314 eth_input_adv_and_flags_x4 (vlib_buffer_t ** b, int is_l3)
316 i16 adv = sizeof (ethernet_header_t);
317 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
318 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
320 #ifdef CLIB_HAVE_VEC256
321 /* to reduce number of small loads/stores we are loading first 64 bits
322 of each buffer metadata into 256-bit register so we can advance
323 current_data, current_length and flags.
324 Observed saving of this code is ~2 clocks per packet */
327 /* vector if signed 16 bit integers used in signed vector add operation
328 to advnce current_data and current_length */
329 u32x8 flags4 = { 0, flags, 0, flags, 0, flags, 0, flags };
331 adv, -adv, 0, 0, adv, -adv, 0, 0,
332 adv, -adv, 0, 0, adv, -adv, 0, 0
335 /* load 4 x 64 bits */
336 r = u64x4_gather (b[0], b[1], b[2], b[3]);
342 radv = (u64x4) ((i16x16) r + adv4);
344 /* write 4 x 64 bits */
345 u64x4_scatter (is_l3 ? radv : r, b[0], b[1], b[2], b[3]);
347 /* use old current_data as l2_hdr_offset and new current_data as
349 r = (u64x4) u16x16_blend (r, radv << 16, 0xaa);
351 /* store both l2_hdr_offset and l3_hdr_offset in single store operation */
352 u32x8_scatter_one ((u32x8) r, 0, &vnet_buffer (b[0])->l2_hdr_offset);
353 u32x8_scatter_one ((u32x8) r, 2, &vnet_buffer (b[1])->l2_hdr_offset);
354 u32x8_scatter_one ((u32x8) r, 4, &vnet_buffer (b[2])->l2_hdr_offset);
355 u32x8_scatter_one ((u32x8) r, 6, &vnet_buffer (b[3])->l2_hdr_offset);
359 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l3_hdr_offset);
360 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l3_hdr_offset);
361 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l3_hdr_offset);
362 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l3_hdr_offset);
364 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l2_hdr_offset == adv);
365 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l2_hdr_offset == adv);
366 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l2_hdr_offset == adv);
367 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l2_hdr_offset == adv);
371 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l2_hdr_offset);
372 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l2_hdr_offset);
373 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l2_hdr_offset);
374 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l2_hdr_offset);
376 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l3_hdr_offset == -adv);
377 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l3_hdr_offset == -adv);
378 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l3_hdr_offset == -adv);
379 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l3_hdr_offset == -adv);
383 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
384 vnet_buffer (b[1])->l2_hdr_offset = b[1]->current_data;
385 vnet_buffer (b[2])->l2_hdr_offset = b[2]->current_data;
386 vnet_buffer (b[3])->l2_hdr_offset = b[3]->current_data;
387 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
388 vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data + adv;
389 vnet_buffer (b[2])->l3_hdr_offset = b[2]->current_data + adv;
390 vnet_buffer (b[3])->l3_hdr_offset = b[3]->current_data + adv;
394 vlib_buffer_advance (b[0], adv);
395 vlib_buffer_advance (b[1], adv);
396 vlib_buffer_advance (b[2], adv);
397 vlib_buffer_advance (b[3], adv);
400 b[0]->flags |= flags;
401 b[1]->flags |= flags;
402 b[2]->flags |= flags;
403 b[3]->flags |= flags;
408 vnet_buffer (b[0])->l2.l2_len = adv;
409 vnet_buffer (b[1])->l2.l2_len = adv;
410 vnet_buffer (b[2])->l2.l2_len = adv;
411 vnet_buffer (b[3])->l2.l2_len = adv;
415 static_always_inline void
416 eth_input_adv_and_flags_x1 (vlib_buffer_t ** b, int is_l3)
418 i16 adv = sizeof (ethernet_header_t);
419 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
420 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
422 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
423 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
426 vlib_buffer_advance (b[0], adv);
427 b[0]->flags |= flags;
429 vnet_buffer (b[0])->l2.l2_len = adv;
433 static_always_inline void
434 eth_input_get_etype_and_tags (vlib_buffer_t ** b, u16 * etype, u64 * tags,
435 u64 * dmacs, int offset, int dmac_check)
437 ethernet_header_t *e;
438 e = vlib_buffer_get_current (b[offset]);
439 #ifdef CLIB_HAVE_VEC128
440 u64x2 r = u64x2_load_unaligned (((u8 *) & e->type) - 6);
441 etype[offset] = ((u16x8) r)[3];
444 etype[offset] = e->type;
445 tags[offset] = *(u64 *) (e + 1);
449 dmacs[offset] = *(u64 *) e;
452 static_always_inline u16
453 eth_input_next_by_type (u16 etype)
455 ethernet_main_t *em = ðernet_main;
457 return (etype < 0x600) ? ETHERNET_INPUT_NEXT_LLC :
458 vec_elt (em->l3_next.input_next_by_type,
459 sparse_vec_index (em->l3_next.input_next_by_type, etype));
469 u64 n_packets, n_bytes;
470 } eth_input_tag_lookup_t;
472 static_always_inline void
473 eth_input_update_if_counters (vlib_main_t * vm, vnet_main_t * vnm,
474 eth_input_tag_lookup_t * l)
476 if (l->n_packets == 0 || l->sw_if_index == ~0)
480 l->n_bytes += l->n_packets * l->len;
482 vlib_increment_combined_counter
483 (vnm->interface_main.combined_sw_if_counters +
484 VNET_INTERFACE_COUNTER_RX, vm->thread_index, l->sw_if_index,
485 l->n_packets, l->n_bytes);
488 static_always_inline void
489 eth_input_tag_lookup (vlib_main_t * vm, vnet_main_t * vnm,
490 vlib_node_runtime_t * node, vnet_hw_interface_t * hi,
491 u64 tag, u16 * next, vlib_buffer_t * b,
492 eth_input_tag_lookup_t * l, u8 dmac_bad, int is_dot1ad,
493 int main_is_l3, int check_dmac)
495 ethernet_main_t *em = ðernet_main;
497 if ((tag ^ l->tag) & l->mask)
499 main_intf_t *mif = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
502 vlan_table_t *vlan_table;
503 qinq_table_t *qinq_table;
504 u16 *t = (u16 *) & tag;
505 u16 vlan1 = clib_net_to_host_u16 (t[0]) & 0xFFF;
506 u16 vlan2 = clib_net_to_host_u16 (t[2]) & 0xFFF;
507 u32 matched, is_l2, new_sw_if_index;
509 vlan_table = vec_elt_at_index (em->vlan_pool, is_dot1ad ?
510 mif->dot1ad_vlans : mif->dot1q_vlans);
511 vif = &vlan_table->vlans[vlan1];
512 qinq_table = vec_elt_at_index (em->qinq_pool, vif->qinqs);
513 qif = &qinq_table->vlans[vlan2];
514 l->err = ETHERNET_ERROR_NONE;
515 l->type = clib_net_to_host_u16 (t[1]);
517 if (l->type == ETHERNET_TYPE_VLAN)
519 l->type = clib_net_to_host_u16 (t[3]);
521 matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
522 SUBINT_CONFIG_MATCH_2_TAG, mif, vif,
523 qif, &new_sw_if_index, &l->err,
531 new_sw_if_index = hi->sw_if_index;
532 l->err = ETHERNET_ERROR_NONE;
534 is_l2 = main_is_l3 == 0;
537 matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
538 SUBINT_CONFIG_MATCH_1_TAG, mif,
539 vif, qif, &new_sw_if_index,
543 if (l->sw_if_index != new_sw_if_index)
545 eth_input_update_if_counters (vm, vnm, l);
548 l->sw_if_index = new_sw_if_index;
551 l->mask = (l->n_tags == 2) ?
552 clib_net_to_host_u64 (0xffffffffffffffff) :
553 clib_net_to_host_u64 (0xffffffff00000000);
555 if (matched && l->sw_if_index == ~0)
556 l->err = ETHERNET_ERROR_DOWN;
558 l->len = sizeof (ethernet_header_t) +
559 l->n_tags * sizeof (ethernet_vlan_header_t);
561 l->adv = is_l2 ? -(int) sizeof (ethernet_header_t) :
562 l->n_tags * sizeof (ethernet_vlan_header_t);
564 l->adv = is_l2 ? 0 : l->len;
566 if (PREDICT_FALSE (l->err != ETHERNET_ERROR_NONE))
567 l->next = ETHERNET_INPUT_NEXT_DROP;
569 l->next = em->l2_next;
570 else if (l->type == ETHERNET_TYPE_IP4)
571 l->next = em->l3_next.input_next_ip4;
572 else if (l->type == ETHERNET_TYPE_IP6)
573 l->next = em->l3_next.input_next_ip6;
574 else if (l->type == ETHERNET_TYPE_MPLS)
575 l->next = em->l3_next.input_next_mpls;
576 else if (em->redirect_l3)
577 l->next = em->redirect_l3_next;
580 l->next = eth_input_next_by_type (l->type);
581 if (l->next == ETHERNET_INPUT_NEXT_PUNT)
582 l->err = ETHERNET_ERROR_UNKNOWN_TYPE;
586 if (check_dmac && l->adv > 0 && dmac_bad)
588 l->err = ETHERNET_ERROR_L3_MAC_MISMATCH;
589 next[0] = ETHERNET_INPUT_NEXT_PUNT;
594 vlib_buffer_advance (b, l->adv);
595 vnet_buffer (b)->l2.l2_len = l->len;
596 vnet_buffer (b)->l3_hdr_offset = vnet_buffer (b)->l2_hdr_offset + l->len;
598 if (l->err == ETHERNET_ERROR_NONE)
600 vnet_buffer (b)->sw_if_index[VLIB_RX] = l->sw_if_index;
601 ethernet_buffer_set_vlan_count (b, l->n_tags);
604 b->error = node->errors[l->err];
606 /* update counters */
608 l->n_bytes += vlib_buffer_length_in_chain (vm, b);
611 #define DMAC_MASK clib_net_to_host_u64 (0xFFFFFFFFFFFF0000)
612 #define DMAC_IGBIT clib_net_to_host_u64 (0x0100000000000000)
614 #ifdef CLIB_HAVE_VEC256
615 static_always_inline u32
616 is_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
618 u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
619 r0 = (r0 != u64x4_splat (hwaddr)) & ((r0 & u64x4_splat (DMAC_IGBIT)) == 0);
620 return u8x32_msb_mask ((u8x32) (r0));
623 static_always_inline u8
624 is_dmac_bad (u64 dmac, u64 hwaddr)
626 u64 r0 = dmac & DMAC_MASK;
627 return (r0 != hwaddr) && ((r0 & DMAC_IGBIT) == 0);
631 static_always_inline u8
632 is_sec_dmac_bad (u64 dmac, u64 hwaddr)
634 return ((dmac & DMAC_MASK) != hwaddr);
637 #ifdef CLIB_HAVE_VEC256
638 static_always_inline u32
639 is_sec_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
641 u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
642 r0 = (r0 != u64x4_splat (hwaddr));
643 return u8x32_msb_mask ((u8x32) (r0));
647 static_always_inline u8
648 eth_input_sec_dmac_check_x1 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
650 dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
654 static_always_inline u32
655 eth_input_sec_dmac_check_x4 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
657 #ifdef CLIB_HAVE_VEC256
658 *(u32 *) (dmac_bad + 0) &= is_sec_dmac_bad_x4 (dmac + 0, hwaddr);
660 dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
661 dmac_bad[1] &= is_sec_dmac_bad (dmac[1], hwaddr);
662 dmac_bad[2] &= is_sec_dmac_bad (dmac[2], hwaddr);
663 dmac_bad[3] &= is_sec_dmac_bad (dmac[3], hwaddr);
665 return *(u32 *) dmac_bad;
668 static_always_inline void
669 eth_input_process_frame_dmac_check (vnet_hw_interface_t * hi,
670 u64 * dmacs, u8 * dmacs_bad,
671 u32 n_packets, ethernet_interface_t * ei,
674 u64 hwaddr = (*(u64 *) hi->hw_address) & DMAC_MASK;
676 u8 *dmac_bad = dmacs_bad;
678 i32 n_left = n_packets;
680 #ifdef CLIB_HAVE_VEC256
683 bad |= *(u32 *) (dmac_bad + 0) = is_dmac_bad_x4 (dmac + 0, hwaddr);
684 bad |= *(u32 *) (dmac_bad + 4) = is_dmac_bad_x4 (dmac + 4, hwaddr);
694 bad |= dmac_bad[0] = is_dmac_bad (dmac[0], hwaddr);
695 bad |= dmac_bad[1] = is_dmac_bad (dmac[1], hwaddr);
696 bad |= dmac_bad[2] = is_dmac_bad (dmac[2], hwaddr);
697 bad |= dmac_bad[3] = is_dmac_bad (dmac[3], hwaddr);
706 if (have_sec_dmac && bad)
710 vec_foreach (addr, ei->secondary_addrs)
712 u64 hwaddr = ((u64 *) addr)[0] & DMAC_MASK;
713 i32 n_left = n_packets;
715 u8 *dmac_bad = dmacs_bad;
724 /* skip any that have already matched */
733 n_bad = clib_min (4, n_left);
735 /* If >= 4 left, compare 4 together */
738 bad |= eth_input_sec_dmac_check_x4 (hwaddr, dmac, dmac_bad);
743 /* handle individually */
746 bad |= eth_input_sec_dmac_check_x1 (hwaddr, dmac + adv,
757 if (!bad) /* can stop looping if everything matched */
763 /* process frame of buffers, store ethertype into array and update
764 buffer metadata fields depending on interface being l2 or l3 assuming that
765 packets are untagged. For tagged packets those fields are updated later.
766 Optionally store Destionation MAC address and tag data into arrays
767 for further processing */
769 STATIC_ASSERT (VLIB_FRAME_SIZE % 8 == 0,
770 "VLIB_FRAME_SIZE must be power of 8");
771 static_always_inline void
772 eth_input_process_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
773 vnet_hw_interface_t * hi,
774 u32 * buffer_indices, u32 n_packets, int main_is_l3,
775 int ip4_cksum_ok, int dmac_check)
777 ethernet_main_t *em = ðernet_main;
778 u16 nexts[VLIB_FRAME_SIZE], *next;
779 u16 etypes[VLIB_FRAME_SIZE], *etype = etypes;
780 u64 dmacs[VLIB_FRAME_SIZE], *dmac = dmacs;
781 u8 dmacs_bad[VLIB_FRAME_SIZE];
782 u64 tags[VLIB_FRAME_SIZE], *tag = tags;
783 u16 slowpath_indices[VLIB_FRAME_SIZE];
785 u16 next_ip4, next_ip6, next_mpls, next_l2;
786 u16 et_ip4 = clib_host_to_net_u16 (ETHERNET_TYPE_IP4);
787 u16 et_ip6 = clib_host_to_net_u16 (ETHERNET_TYPE_IP6);
788 u16 et_mpls = clib_host_to_net_u16 (ETHERNET_TYPE_MPLS);
789 u16 et_vlan = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
790 u16 et_dot1ad = clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD);
791 i32 n_left = n_packets;
792 vlib_buffer_t *b[20];
794 ethernet_interface_t *ei = ethernet_get_interface (em, hi->hw_if_index);
796 from = buffer_indices;
800 vlib_buffer_t **ph = b + 16, **pd = b + 8;
801 vlib_get_buffers (vm, from, b, 4);
802 vlib_get_buffers (vm, from + 8, pd, 4);
803 vlib_get_buffers (vm, from + 16, ph, 4);
805 vlib_prefetch_buffer_header (ph[0], LOAD);
806 vlib_prefetch_buffer_data (pd[0], LOAD);
807 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
809 vlib_prefetch_buffer_header (ph[1], LOAD);
810 vlib_prefetch_buffer_data (pd[1], LOAD);
811 eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
813 vlib_prefetch_buffer_header (ph[2], LOAD);
814 vlib_prefetch_buffer_data (pd[2], LOAD);
815 eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
817 vlib_prefetch_buffer_header (ph[3], LOAD);
818 vlib_prefetch_buffer_data (pd[3], LOAD);
819 eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
821 eth_input_adv_and_flags_x4 (b, main_is_l3);
832 vlib_get_buffers (vm, from, b, 4);
833 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
834 eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
835 eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
836 eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
837 eth_input_adv_and_flags_x4 (b, main_is_l3);
848 vlib_get_buffers (vm, from, b, 1);
849 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
850 eth_input_adv_and_flags_x1 (b, main_is_l3);
862 if (vec_len (ei->secondary_addrs))
863 eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
864 ei, 1 /* have_sec_dmac */ );
866 eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
867 ei, 0 /* have_sec_dmac */ );
870 next_ip4 = em->l3_next.input_next_ip4;
871 next_ip6 = em->l3_next.input_next_ip6;
872 next_mpls = em->l3_next.input_next_mpls;
873 next_l2 = em->l2_next;
875 if (next_ip4 == ETHERNET_INPUT_NEXT_IP4_INPUT && ip4_cksum_ok)
876 next_ip4 = ETHERNET_INPUT_NEXT_IP4_INPUT_NCS;
878 #ifdef CLIB_HAVE_VEC256
879 u16x16 et16_ip4 = u16x16_splat (et_ip4);
880 u16x16 et16_ip6 = u16x16_splat (et_ip6);
881 u16x16 et16_mpls = u16x16_splat (et_mpls);
882 u16x16 et16_vlan = u16x16_splat (et_vlan);
883 u16x16 et16_dot1ad = u16x16_splat (et_dot1ad);
884 u16x16 next16_ip4 = u16x16_splat (next_ip4);
885 u16x16 next16_ip6 = u16x16_splat (next_ip6);
886 u16x16 next16_mpls = u16x16_splat (next_mpls);
887 u16x16 next16_l2 = u16x16_splat (next_l2);
889 u16x16 stairs = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
898 /* fastpath - in l3 mode hadles ip4, ip6 and mpls packets, other packets
899 are considered as slowpath, in l2 mode all untagged packets are
900 considered as fastpath */
903 #ifdef CLIB_HAVE_VEC256
907 u16x16 e16 = u16x16_load_unaligned (etype);
910 r += (e16 == et16_ip4) & next16_ip4;
911 r += (e16 == et16_ip6) & next16_ip6;
912 r += (e16 == et16_mpls) & next16_mpls;
915 r = ((e16 != et16_vlan) & (e16 != et16_dot1ad)) & next16_l2;
916 u16x16_store_unaligned (r, next);
918 if (!u16x16_is_all_zero (r == zero))
920 if (u16x16_is_all_zero (r))
922 u16x16_store_unaligned (u16x16_splat (i) + stairs,
923 slowpath_indices + n_slowpath);
928 for (int j = 0; j < 16; j++)
930 slowpath_indices[n_slowpath++] = i + j;
941 if (main_is_l3 && etype[0] == et_ip4)
943 else if (main_is_l3 && etype[0] == et_ip6)
945 else if (main_is_l3 && etype[0] == et_mpls)
947 else if (main_is_l3 == 0 &&
948 etype[0] != et_vlan && etype[0] != et_dot1ad)
953 slowpath_indices[n_slowpath++] = i;
964 vnet_main_t *vnm = vnet_get_main ();
966 u16 *si = slowpath_indices;
967 u32 last_unknown_etype = ~0;
968 u32 last_unknown_next = ~0;
969 eth_input_tag_lookup_t dot1ad_lookup, dot1q_lookup = {
971 .tag = tags[si[0]] ^ -1LL,
975 clib_memcpy_fast (&dot1ad_lookup, &dot1q_lookup, sizeof (dot1q_lookup));
980 u16 etype = etypes[i];
982 if (etype == et_vlan)
984 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
985 eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
986 &dot1q_lookup, dmacs_bad[i], 0,
987 main_is_l3, dmac_check);
990 else if (etype == et_dot1ad)
992 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
993 eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
994 &dot1ad_lookup, dmacs_bad[i], 1,
995 main_is_l3, dmac_check);
999 /* untagged packet with not well known etyertype */
1000 if (last_unknown_etype != etype)
1002 last_unknown_etype = etype;
1003 etype = clib_host_to_net_u16 (etype);
1004 last_unknown_next = eth_input_next_by_type (etype);
1006 if (dmac_check && main_is_l3 && dmacs_bad[i])
1008 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1009 b->error = node->errors[ETHERNET_ERROR_L3_MAC_MISMATCH];
1010 nexts[i] = ETHERNET_INPUT_NEXT_PUNT;
1013 nexts[i] = last_unknown_next;
1021 eth_input_update_if_counters (vm, vnm, &dot1q_lookup);
1022 eth_input_update_if_counters (vm, vnm, &dot1ad_lookup);
1025 vlib_buffer_enqueue_to_next (vm, node, buffer_indices, nexts, n_packets);
1028 static_always_inline void
1029 eth_input_single_int (vlib_main_t * vm, vlib_node_runtime_t * node,
1030 vnet_hw_interface_t * hi, u32 * from, u32 n_pkts,
1033 ethernet_main_t *em = ðernet_main;
1034 ethernet_interface_t *ei;
1035 ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
1036 main_intf_t *intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1037 subint_config_t *subint0 = &intf0->untagged_subint;
1039 int main_is_l3 = (subint0->flags & SUBINT_CONFIG_L2) == 0;
1040 int promisc = (ei->flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL) != 0;
1044 /* main interface is L3, we dont expect tagged packets and interface
1045 is not in promisc node, so we dont't need to check DMAC */
1049 eth_input_process_frame (vm, node, hi, from, n_pkts, is_l3,
1052 /* subinterfaces and promisc mode so DMAC check is needed */
1053 eth_input_process_frame (vm, node, hi, from, n_pkts, is_l3,
1059 /* untagged packets are treated as L2 */
1061 eth_input_process_frame (vm, node, hi, from, n_pkts, is_l3,
1067 static_always_inline void
1068 ethernet_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
1069 vlib_frame_t * from_frame)
1072 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
1074 from = vlib_frame_vector_args (from_frame);
1075 n_left = from_frame->n_vectors;
1079 ethernet_input_trace_t *t0;
1080 vlib_buffer_t *b0 = vlib_get_buffer (vm, from[0]);
1082 if (b0->flags & VLIB_BUFFER_IS_TRACED)
1084 t0 = vlib_add_trace (vm, node, b0,
1085 sizeof (ethernet_input_trace_t));
1086 clib_memcpy_fast (t0->packet_data, b0->data + b0->current_data,
1087 sizeof (t0->packet_data));
1088 t0->frame_flags = from_frame->flags;
1089 clib_memcpy_fast (&t0->frame_data,
1090 vlib_frame_scalar_args (from_frame),
1091 sizeof (ethernet_input_frame_t));
1098 /* rx pcap capture if enabled */
1099 if (PREDICT_FALSE (vlib_global_main.pcap.pcap_rx_enable))
1102 vnet_pcap_t *pp = &vlib_global_main.pcap;
1104 from = vlib_frame_vector_args (from_frame);
1105 n_left = from_frame->n_vectors;
1108 int classify_filter_result;
1113 b0 = vlib_get_buffer (vm, bi0);
1114 if (pp->filter_classify_table_index != ~0)
1116 classify_filter_result =
1117 vnet_is_packet_traced_inline
1118 (b0, pp->filter_classify_table_index, 0 /* full classify */ );
1119 if (classify_filter_result)
1120 pcap_add_buffer (&pp->pcap_main, vm, bi0,
1121 pp->max_bytes_per_pkt);
1125 if (pp->pcap_sw_if_index == 0 ||
1126 pp->pcap_sw_if_index == vnet_buffer (b0)->sw_if_index[VLIB_RX])
1128 pcap_add_buffer (&pp->pcap_main, vm, bi0,
1129 pp->max_bytes_per_pkt);
1135 static_always_inline void
1136 ethernet_input_inline (vlib_main_t * vm,
1137 vlib_node_runtime_t * node,
1138 u32 * from, u32 n_packets,
1139 ethernet_input_variant_t variant)
1141 vnet_main_t *vnm = vnet_get_main ();
1142 ethernet_main_t *em = ðernet_main;
1143 vlib_node_runtime_t *error_node;
1144 u32 n_left_from, next_index, *to_next;
1145 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
1146 u32 thread_index = vm->thread_index;
1147 u32 cached_sw_if_index = ~0;
1148 u32 cached_is_l2 = 0; /* shut up gcc */
1149 vnet_hw_interface_t *hi = NULL; /* used for main interface only */
1150 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
1151 vlib_buffer_t **b = bufs;
1153 if (variant != ETHERNET_INPUT_VARIANT_ETHERNET)
1154 error_node = vlib_node_get_runtime (vm, ethernet_input_node.index);
1158 n_left_from = n_packets;
1160 next_index = node->cached_next_index;
1161 stats_sw_if_index = node->runtime_data[0];
1162 stats_n_packets = stats_n_bytes = 0;
1163 vlib_get_buffers (vm, from, bufs, n_left_from);
1165 while (n_left_from > 0)
1169 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1171 while (n_left_from >= 4 && n_left_to_next >= 2)
1174 vlib_buffer_t *b0, *b1;
1175 u8 next0, next1, error0, error1;
1176 u16 type0, orig_type0, type1, orig_type1;
1177 u16 outer_id0, inner_id0, outer_id1, inner_id1;
1178 u32 match_flags0, match_flags1;
1179 u32 old_sw_if_index0, new_sw_if_index0, len0, old_sw_if_index1,
1180 new_sw_if_index1, len1;
1181 vnet_hw_interface_t *hi0, *hi1;
1182 main_intf_t *main_intf0, *main_intf1;
1183 vlan_intf_t *vlan_intf0, *vlan_intf1;
1184 qinq_intf_t *qinq_intf0, *qinq_intf1;
1186 ethernet_header_t *e0, *e1;
1188 /* Prefetch next iteration. */
1190 vlib_prefetch_buffer_header (b[2], STORE);
1191 vlib_prefetch_buffer_header (b[3], STORE);
1193 CLIB_PREFETCH (b[2]->data, sizeof (ethernet_header_t), LOAD);
1194 CLIB_PREFETCH (b[3]->data, sizeof (ethernet_header_t), LOAD);
1203 n_left_to_next -= 2;
1210 error0 = error1 = ETHERNET_ERROR_NONE;
1211 e0 = vlib_buffer_get_current (b0);
1212 type0 = clib_net_to_host_u16 (e0->type);
1213 e1 = vlib_buffer_get_current (b1);
1214 type1 = clib_net_to_host_u16 (e1->type);
1216 /* Set the L2 header offset for all packets */
1217 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1218 vnet_buffer (b1)->l2_hdr_offset = b1->current_data;
1219 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1220 b1->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1222 /* Speed-path for the untagged case */
1223 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
1224 && !ethernet_frame_is_any_tagged_x2 (type0,
1228 subint_config_t *subint0;
1229 u32 sw_if_index0, sw_if_index1;
1231 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1232 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
1233 is_l20 = cached_is_l2;
1235 /* This is probably wholly unnecessary */
1236 if (PREDICT_FALSE (sw_if_index0 != sw_if_index1))
1239 /* Now sw_if_index0 == sw_if_index1 */
1240 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1242 cached_sw_if_index = sw_if_index0;
1243 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
1244 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1245 subint0 = &intf0->untagged_subint;
1246 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1249 if (PREDICT_TRUE (is_l20 != 0))
1251 vnet_buffer (b0)->l3_hdr_offset =
1252 vnet_buffer (b0)->l2_hdr_offset +
1253 sizeof (ethernet_header_t);
1254 vnet_buffer (b1)->l3_hdr_offset =
1255 vnet_buffer (b1)->l2_hdr_offset +
1256 sizeof (ethernet_header_t);
1257 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
1258 b1->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
1259 next0 = em->l2_next;
1260 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
1261 next1 = em->l2_next;
1262 vnet_buffer (b1)->l2.l2_len = sizeof (ethernet_header_t);
1266 if (!ethernet_address_cast (e0->dst_address) &&
1267 (hi->hw_address != 0) &&
1268 !ethernet_mac_address_equal ((u8 *) e0, hi->hw_address))
1269 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
1270 if (!ethernet_address_cast (e1->dst_address) &&
1271 (hi->hw_address != 0) &&
1272 !ethernet_mac_address_equal ((u8 *) e1, hi->hw_address))
1273 error1 = ETHERNET_ERROR_L3_MAC_MISMATCH;
1274 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
1275 determine_next_node (em, variant, 0, type0, b0,
1277 vlib_buffer_advance (b1, sizeof (ethernet_header_t));
1278 determine_next_node (em, variant, 0, type1, b1,
1284 /* Slow-path for the tagged case */
1286 parse_header (variant,
1289 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
1291 parse_header (variant,
1294 &orig_type1, &outer_id1, &inner_id1, &match_flags1);
1296 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1297 old_sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
1299 eth_vlan_table_lookups (em,
1306 &main_intf0, &vlan_intf0, &qinq_intf0);
1308 eth_vlan_table_lookups (em,
1315 &main_intf1, &vlan_intf1, &qinq_intf1);
1317 identify_subint (hi0,
1322 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
1324 identify_subint (hi1,
1329 qinq_intf1, &new_sw_if_index1, &error1, &is_l21);
1331 // Save RX sw_if_index for later nodes
1332 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1334 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
1335 vnet_buffer (b1)->sw_if_index[VLIB_RX] =
1337 ETHERNET_ERROR_NONE ? old_sw_if_index1 : new_sw_if_index1;
1339 // Check if there is a stat to take (valid and non-main sw_if_index for pkt 0 or pkt 1)
1340 if (((new_sw_if_index0 != ~0)
1341 && (new_sw_if_index0 != old_sw_if_index0))
1342 || ((new_sw_if_index1 != ~0)
1343 && (new_sw_if_index1 != old_sw_if_index1)))
1346 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
1347 - vnet_buffer (b0)->l2_hdr_offset;
1348 len1 = vlib_buffer_length_in_chain (vm, b1) + b1->current_data
1349 - vnet_buffer (b1)->l2_hdr_offset;
1351 stats_n_packets += 2;
1352 stats_n_bytes += len0 + len1;
1355 (!(new_sw_if_index0 == stats_sw_if_index
1356 && new_sw_if_index1 == stats_sw_if_index)))
1358 stats_n_packets -= 2;
1359 stats_n_bytes -= len0 + len1;
1361 if (new_sw_if_index0 != old_sw_if_index0
1362 && new_sw_if_index0 != ~0)
1363 vlib_increment_combined_counter (vnm->
1364 interface_main.combined_sw_if_counters
1366 VNET_INTERFACE_COUNTER_RX,
1368 new_sw_if_index0, 1,
1370 if (new_sw_if_index1 != old_sw_if_index1
1371 && new_sw_if_index1 != ~0)
1372 vlib_increment_combined_counter (vnm->
1373 interface_main.combined_sw_if_counters
1375 VNET_INTERFACE_COUNTER_RX,
1377 new_sw_if_index1, 1,
1380 if (new_sw_if_index0 == new_sw_if_index1)
1382 if (stats_n_packets > 0)
1384 vlib_increment_combined_counter
1385 (vnm->interface_main.combined_sw_if_counters
1386 + VNET_INTERFACE_COUNTER_RX,
1389 stats_n_packets, stats_n_bytes);
1390 stats_n_packets = stats_n_bytes = 0;
1392 stats_sw_if_index = new_sw_if_index0;
1397 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1398 is_l20 = is_l21 = 0;
1400 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1402 determine_next_node (em, variant, is_l21, type1, b1, &error1,
1406 b0->error = error_node->errors[error0];
1407 b1->error = error_node->errors[error1];
1409 // verify speculative enqueue
1410 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
1411 n_left_to_next, bi0, bi1, next0,
1415 while (n_left_from > 0 && n_left_to_next > 0)
1420 u16 type0, orig_type0;
1421 u16 outer_id0, inner_id0;
1423 u32 old_sw_if_index0, new_sw_if_index0, len0;
1424 vnet_hw_interface_t *hi0;
1425 main_intf_t *main_intf0;
1426 vlan_intf_t *vlan_intf0;
1427 qinq_intf_t *qinq_intf0;
1428 ethernet_header_t *e0;
1431 // Prefetch next iteration
1432 if (n_left_from > 1)
1434 vlib_prefetch_buffer_header (b[1], STORE);
1435 CLIB_PREFETCH (b[1]->data, CLIB_CACHE_LINE_BYTES, LOAD);
1443 n_left_to_next -= 1;
1448 error0 = ETHERNET_ERROR_NONE;
1449 e0 = vlib_buffer_get_current (b0);
1450 type0 = clib_net_to_host_u16 (e0->type);
1452 /* Set the L2 header offset for all packets */
1453 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1454 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1456 /* Speed-path for the untagged case */
1457 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
1458 && !ethernet_frame_is_tagged (type0)))
1461 subint_config_t *subint0;
1464 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1465 is_l20 = cached_is_l2;
1467 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1469 cached_sw_if_index = sw_if_index0;
1470 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
1471 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1472 subint0 = &intf0->untagged_subint;
1473 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1477 if (PREDICT_TRUE (is_l20 != 0))
1479 vnet_buffer (b0)->l3_hdr_offset =
1480 vnet_buffer (b0)->l2_hdr_offset +
1481 sizeof (ethernet_header_t);
1482 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
1483 next0 = em->l2_next;
1484 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
1488 if (!ethernet_address_cast (e0->dst_address) &&
1489 (hi->hw_address != 0) &&
1490 !ethernet_mac_address_equal ((u8 *) e0, hi->hw_address))
1491 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
1492 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
1493 determine_next_node (em, variant, 0, type0, b0,
1499 /* Slow-path for the tagged case */
1500 parse_header (variant,
1503 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
1505 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1507 eth_vlan_table_lookups (em,
1514 &main_intf0, &vlan_intf0, &qinq_intf0);
1516 identify_subint (hi0,
1521 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
1523 // Save RX sw_if_index for later nodes
1524 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1526 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
1528 // Increment subinterface stats
1529 // Note that interface-level counters have already been incremented
1530 // prior to calling this function. Thus only subinterface counters
1531 // are incremented here.
1533 // Interface level counters include packets received on the main
1534 // interface and all subinterfaces. Subinterface level counters
1535 // include only those packets received on that subinterface
1536 // Increment stats if the subint is valid and it is not the main intf
1537 if ((new_sw_if_index0 != ~0)
1538 && (new_sw_if_index0 != old_sw_if_index0))
1541 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
1542 - vnet_buffer (b0)->l2_hdr_offset;
1544 stats_n_packets += 1;
1545 stats_n_bytes += len0;
1547 // Batch stat increments from the same subinterface so counters
1548 // don't need to be incremented for every packet.
1549 if (PREDICT_FALSE (new_sw_if_index0 != stats_sw_if_index))
1551 stats_n_packets -= 1;
1552 stats_n_bytes -= len0;
1554 if (new_sw_if_index0 != ~0)
1555 vlib_increment_combined_counter
1556 (vnm->interface_main.combined_sw_if_counters
1557 + VNET_INTERFACE_COUNTER_RX,
1558 thread_index, new_sw_if_index0, 1, len0);
1559 if (stats_n_packets > 0)
1561 vlib_increment_combined_counter
1562 (vnm->interface_main.combined_sw_if_counters
1563 + VNET_INTERFACE_COUNTER_RX,
1565 stats_sw_if_index, stats_n_packets, stats_n_bytes);
1566 stats_n_packets = stats_n_bytes = 0;
1568 stats_sw_if_index = new_sw_if_index0;
1572 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1575 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1579 b0->error = error_node->errors[error0];
1581 // verify speculative enqueue
1582 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1583 to_next, n_left_to_next,
1587 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1590 // Increment any remaining batched stats
1591 if (stats_n_packets > 0)
1593 vlib_increment_combined_counter
1594 (vnm->interface_main.combined_sw_if_counters
1595 + VNET_INTERFACE_COUNTER_RX,
1596 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
1597 node->runtime_data[0] = stats_sw_if_index;
1601 VLIB_NODE_FN (ethernet_input_node) (vlib_main_t * vm,
1602 vlib_node_runtime_t * node,
1603 vlib_frame_t * frame)
1605 vnet_main_t *vnm = vnet_get_main ();
1606 u32 *from = vlib_frame_vector_args (frame);
1607 u32 n_packets = frame->n_vectors;
1609 ethernet_input_trace (vm, node, frame);
1611 if (frame->flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
1613 ethernet_input_frame_t *ef = vlib_frame_scalar_args (frame);
1614 int ip4_cksum_ok = (frame->flags & ETH_INPUT_FRAME_F_IP4_CKSUM_OK) != 0;
1615 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ef->hw_if_index);
1616 eth_input_single_int (vm, node, hi, from, n_packets, ip4_cksum_ok);
1619 ethernet_input_inline (vm, node, from, n_packets,
1620 ETHERNET_INPUT_VARIANT_ETHERNET);
1624 VLIB_NODE_FN (ethernet_input_type_node) (vlib_main_t * vm,
1625 vlib_node_runtime_t * node,
1626 vlib_frame_t * from_frame)
1628 u32 *from = vlib_frame_vector_args (from_frame);
1629 u32 n_packets = from_frame->n_vectors;
1630 ethernet_input_trace (vm, node, from_frame);
1631 ethernet_input_inline (vm, node, from, n_packets,
1632 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE);
1636 VLIB_NODE_FN (ethernet_input_not_l2_node) (vlib_main_t * vm,
1637 vlib_node_runtime_t * node,
1638 vlib_frame_t * from_frame)
1640 u32 *from = vlib_frame_vector_args (from_frame);
1641 u32 n_packets = from_frame->n_vectors;
1642 ethernet_input_trace (vm, node, from_frame);
1643 ethernet_input_inline (vm, node, from, n_packets,
1644 ETHERNET_INPUT_VARIANT_NOT_L2);
1649 // Return the subinterface config struct for the given sw_if_index
1650 // Also return via parameter the appropriate match flags for the
1651 // configured number of tags.
1652 // On error (unsupported or not ethernet) return 0.
1653 static subint_config_t *
1654 ethernet_sw_interface_get_config (vnet_main_t * vnm,
1656 u32 * flags, u32 * unsupported)
1658 ethernet_main_t *em = ðernet_main;
1659 vnet_hw_interface_t *hi;
1660 vnet_sw_interface_t *si;
1661 main_intf_t *main_intf;
1662 vlan_table_t *vlan_table;
1663 qinq_table_t *qinq_table;
1664 subint_config_t *subint = 0;
1666 hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
1668 if (!hi || (hi->hw_class_index != ethernet_hw_interface_class.index))
1671 goto done; // non-ethernet interface
1674 // ensure there's an entry for the main intf (shouldn't really be necessary)
1675 vec_validate (em->main_intfs, hi->hw_if_index);
1676 main_intf = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1678 // Locate the subint for the given ethernet config
1679 si = vnet_get_sw_interface (vnm, sw_if_index);
1681 if (si->type == VNET_SW_INTERFACE_TYPE_P2P)
1683 p2p_ethernet_main_t *p2pm = &p2p_main;
1684 u32 p2pe_sw_if_index =
1685 p2p_ethernet_lookup (hi->hw_if_index, si->p2p.client_mac);
1686 if (p2pe_sw_if_index == ~0)
1688 pool_get (p2pm->p2p_subif_pool, subint);
1689 si->p2p.pool_index = subint - p2pm->p2p_subif_pool;
1692 subint = vec_elt_at_index (p2pm->p2p_subif_pool, si->p2p.pool_index);
1693 *flags = SUBINT_CONFIG_P2P;
1695 else if (si->type == VNET_SW_INTERFACE_TYPE_PIPE)
1699 pipe = pipe_get (sw_if_index);
1700 subint = &pipe->subint;
1701 *flags = SUBINT_CONFIG_P2P;
1703 else if (si->sub.eth.flags.default_sub)
1705 subint = &main_intf->default_subint;
1706 *flags = SUBINT_CONFIG_MATCH_1_TAG |
1707 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
1709 else if ((si->sub.eth.flags.no_tags) || (si->sub.eth.raw_flags == 0))
1711 // if no flags are set then this is a main interface
1712 // so treat as untagged
1713 subint = &main_intf->untagged_subint;
1714 *flags = SUBINT_CONFIG_MATCH_0_TAG;
1719 // first get the vlan table
1720 if (si->sub.eth.flags.dot1ad)
1722 if (main_intf->dot1ad_vlans == 0)
1724 // Allocate a vlan table from the pool
1725 pool_get (em->vlan_pool, vlan_table);
1726 main_intf->dot1ad_vlans = vlan_table - em->vlan_pool;
1730 // Get ptr to existing vlan table
1732 vec_elt_at_index (em->vlan_pool, main_intf->dot1ad_vlans);
1737 if (main_intf->dot1q_vlans == 0)
1739 // Allocate a vlan table from the pool
1740 pool_get (em->vlan_pool, vlan_table);
1741 main_intf->dot1q_vlans = vlan_table - em->vlan_pool;
1745 // Get ptr to existing vlan table
1747 vec_elt_at_index (em->vlan_pool, main_intf->dot1q_vlans);
1751 if (si->sub.eth.flags.one_tag)
1753 *flags = si->sub.eth.flags.exact_match ?
1754 SUBINT_CONFIG_MATCH_1_TAG :
1755 (SUBINT_CONFIG_MATCH_1_TAG |
1756 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1758 if (si->sub.eth.flags.outer_vlan_id_any)
1760 // not implemented yet
1766 // a single vlan, a common case
1768 &vlan_table->vlans[si->sub.eth.
1769 outer_vlan_id].single_tag_subint;
1776 *flags = si->sub.eth.flags.exact_match ?
1777 SUBINT_CONFIG_MATCH_2_TAG :
1778 (SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1780 if (si->sub.eth.flags.outer_vlan_id_any
1781 && si->sub.eth.flags.inner_vlan_id_any)
1783 // not implemented yet
1788 if (si->sub.eth.flags.inner_vlan_id_any)
1790 // a specific outer and "any" inner
1791 // don't need a qinq table for this
1793 &vlan_table->vlans[si->sub.eth.
1794 outer_vlan_id].inner_any_subint;
1795 if (si->sub.eth.flags.exact_match)
1797 *flags = SUBINT_CONFIG_MATCH_2_TAG;
1801 *flags = SUBINT_CONFIG_MATCH_2_TAG |
1802 SUBINT_CONFIG_MATCH_3_TAG;
1807 // a specific outer + specifc innner vlan id, a common case
1809 // get the qinq table
1810 if (vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs == 0)
1812 // Allocate a qinq table from the pool
1813 pool_get (em->qinq_pool, qinq_table);
1814 vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs =
1815 qinq_table - em->qinq_pool;
1819 // Get ptr to existing qinq table
1821 vec_elt_at_index (em->qinq_pool,
1822 vlan_table->vlans[si->sub.
1826 subint = &qinq_table->vlans[si->sub.eth.inner_vlan_id].subint;
1835 static clib_error_t *
1836 ethernet_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
1838 subint_config_t *subint;
1841 clib_error_t *error = 0;
1843 // Find the config for this subinterface
1845 ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
1850 // not implemented yet or not ethernet
1854 subint->sw_if_index =
1855 ((flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? sw_if_index : ~0);
1861 VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ethernet_sw_interface_up_down);
1864 #ifndef CLIB_MARCH_VARIANT
1865 // Set the L2/L3 mode for the subinterface
1867 ethernet_sw_interface_set_l2_mode (vnet_main_t * vnm, u32 sw_if_index, u32 l2)
1869 subint_config_t *subint;
1873 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);
1875 is_port = !(sw->type == VNET_SW_INTERFACE_TYPE_SUB);
1877 // Find the config for this subinterface
1879 ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
1884 // unimplemented or not ethernet
1888 // Double check that the config we found is for our interface (or the interface is down)
1889 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
1893 subint->flags |= SUBINT_CONFIG_L2;
1896 SUBINT_CONFIG_MATCH_0_TAG | SUBINT_CONFIG_MATCH_1_TAG
1897 | SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
1901 subint->flags &= ~SUBINT_CONFIG_L2;
1904 ~(SUBINT_CONFIG_MATCH_1_TAG | SUBINT_CONFIG_MATCH_2_TAG
1905 | SUBINT_CONFIG_MATCH_3_TAG);
1913 * Set the L2/L3 mode for the subinterface regardless of port
1916 ethernet_sw_interface_set_l2_mode_noport (vnet_main_t * vnm,
1917 u32 sw_if_index, u32 l2)
1919 subint_config_t *subint;
1923 /* Find the config for this subinterface */
1925 ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
1930 /* unimplemented or not ethernet */
1935 * Double check that the config we found is for our interface (or the
1936 * interface is down)
1938 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
1942 subint->flags |= SUBINT_CONFIG_L2;
1946 subint->flags &= ~SUBINT_CONFIG_L2;
1954 static clib_error_t *
1955 ethernet_sw_interface_add_del (vnet_main_t * vnm,
1956 u32 sw_if_index, u32 is_create)
1958 clib_error_t *error = 0;
1959 subint_config_t *subint;
1961 u32 unsupported = 0;
1963 // Find the config for this subinterface
1965 ethernet_sw_interface_get_config (vnm, sw_if_index, &match_flags,
1970 // not implemented yet or not ethernet
1973 // this is the NYI case
1974 error = clib_error_return (0, "not implemented yet");
1985 // Initialize the subint
1986 if (subint->flags & SUBINT_CONFIG_VALID)
1988 // Error vlan already in use
1989 error = clib_error_return (0, "vlan is already in use");
1993 // Note that config is L3 by default
1994 subint->flags = SUBINT_CONFIG_VALID | match_flags;
1995 subint->sw_if_index = ~0; // because interfaces are initially down
2002 VNET_SW_INTERFACE_ADD_DEL_FUNCTION (ethernet_sw_interface_add_del);
2004 static char *ethernet_error_strings[] = {
2005 #define ethernet_error(n,c,s) s,
2006 #include "error.def"
2007 #undef ethernet_error
2011 VLIB_REGISTER_NODE (ethernet_input_node) = {
2012 .name = "ethernet-input",
2013 /* Takes a vector of packets. */
2014 .vector_size = sizeof (u32),
2015 .scalar_size = sizeof (ethernet_input_frame_t),
2016 .n_errors = ETHERNET_N_ERROR,
2017 .error_strings = ethernet_error_strings,
2018 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2020 #define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2021 foreach_ethernet_input_next
2024 .format_buffer = format_ethernet_header_with_length,
2025 .format_trace = format_ethernet_input_trace,
2026 .unformat_buffer = unformat_ethernet_header,
2029 VLIB_REGISTER_NODE (ethernet_input_type_node) = {
2030 .name = "ethernet-input-type",
2031 /* Takes a vector of packets. */
2032 .vector_size = sizeof (u32),
2033 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2035 #define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2036 foreach_ethernet_input_next
2041 VLIB_REGISTER_NODE (ethernet_input_not_l2_node) = {
2042 .name = "ethernet-input-not-l2",
2043 /* Takes a vector of packets. */
2044 .vector_size = sizeof (u32),
2045 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2047 #define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2048 foreach_ethernet_input_next
2054 #ifndef CLIB_MARCH_VARIANT
2056 ethernet_set_rx_redirect (vnet_main_t * vnm,
2057 vnet_hw_interface_t * hi, u32 enable)
2059 // Insure all packets go to ethernet-input (i.e. untagged ipv4 packets
2060 // don't go directly to ip4-input)
2061 vnet_hw_interface_rx_redirect_to_node
2062 (vnm, hi->hw_if_index, enable ? ethernet_input_node.index : ~0);
2067 * Initialization and registration for the next_by_ethernet structure
2071 next_by_ethertype_init (next_by_ethertype_t * l3_next)
2073 l3_next->input_next_by_type = sparse_vec_new
2074 ( /* elt bytes */ sizeof (l3_next->input_next_by_type[0]),
2075 /* bits in index */ BITS (((ethernet_header_t *) 0)->type));
2077 vec_validate (l3_next->sparse_index_by_input_next_index,
2078 ETHERNET_INPUT_NEXT_DROP);
2079 vec_validate (l3_next->sparse_index_by_input_next_index,
2080 ETHERNET_INPUT_NEXT_PUNT);
2081 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_DROP] =
2082 SPARSE_VEC_INVALID_INDEX;
2083 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_PUNT] =
2084 SPARSE_VEC_INVALID_INDEX;
2087 * Make sure we don't wipe out an ethernet registration by mistake
2088 * Can happen if init function ordering constraints are missing.
2092 ethernet_main_t *em = ðernet_main;
2093 ASSERT (em->next_by_ethertype_register_called == 0);
2099 // Add an ethertype -> next index mapping to the structure
2101 next_by_ethertype_register (next_by_ethertype_t * l3_next,
2102 u32 ethertype, u32 next_index)
2106 ethernet_main_t *em = ðernet_main;
2110 ethernet_main_t *em = ðernet_main;
2111 em->next_by_ethertype_register_called = 1;
2114 /* Setup ethernet type -> next index sparse vector mapping. */
2115 n = sparse_vec_validate (l3_next->input_next_by_type, ethertype);
2118 /* Rebuild next index -> sparse index inverse mapping when sparse vector
2120 vec_validate (l3_next->sparse_index_by_input_next_index, next_index);
2121 for (i = 1; i < vec_len (l3_next->input_next_by_type); i++)
2123 sparse_index_by_input_next_index[l3_next->input_next_by_type[i]] = i;
2125 // do not allow the cached next index's to be updated if L3
2126 // redirect is enabled, as it will have overwritten them
2127 if (!em->redirect_l3)
2129 // Cache common ethertypes directly
2130 if (ethertype == ETHERNET_TYPE_IP4)
2132 l3_next->input_next_ip4 = next_index;
2134 else if (ethertype == ETHERNET_TYPE_IP6)
2136 l3_next->input_next_ip6 = next_index;
2138 else if (ethertype == ETHERNET_TYPE_MPLS)
2140 l3_next->input_next_mpls = next_index;
2147 ethernet_input_init (vlib_main_t * vm, ethernet_main_t * em)
2149 __attribute__ ((unused)) vlan_table_t *invalid_vlan_table;
2150 __attribute__ ((unused)) qinq_table_t *invalid_qinq_table;
2152 ethernet_setup_node (vm, ethernet_input_node.index);
2153 ethernet_setup_node (vm, ethernet_input_type_node.index);
2154 ethernet_setup_node (vm, ethernet_input_not_l2_node.index);
2156 next_by_ethertype_init (&em->l3_next);
2158 // Initialize pools and vector for vlan parsing
2159 vec_validate (em->main_intfs, 10); // 10 main interfaces
2160 pool_alloc (em->vlan_pool, 10);
2161 pool_alloc (em->qinq_pool, 1);
2163 // The first vlan pool will always be reserved for an invalid table
2164 pool_get (em->vlan_pool, invalid_vlan_table); // first id = 0
2165 // The first qinq pool will always be reserved for an invalid table
2166 pool_get (em->qinq_pool, invalid_qinq_table); // first id = 0
2170 ethernet_register_input_type (vlib_main_t * vm,
2171 ethernet_type_t type, u32 node_index)
2173 ethernet_main_t *em = ðernet_main;
2174 ethernet_type_info_t *ti;
2178 clib_error_t *error = vlib_call_init_function (vm, ethernet_init);
2180 clib_error_report (error);
2183 ti = ethernet_get_type_info (em, type);
2186 clib_warning ("type_info NULL for type %d", type);
2189 ti->node_index = node_index;
2190 ti->next_index = vlib_node_add_next (vm,
2191 ethernet_input_node.index, node_index);
2192 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
2193 ASSERT (i == ti->next_index);
2195 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
2196 ASSERT (i == ti->next_index);
2198 // Add the L3 node for this ethertype to the next nodes structure
2199 next_by_ethertype_register (&em->l3_next, type, ti->next_index);
2201 // Call the registration functions for other nodes that want a mapping
2202 l2bvi_register_input_type (vm, type, node_index);
2206 ethernet_register_l2_input (vlib_main_t * vm, u32 node_index)
2208 ethernet_main_t *em = ðernet_main;
2212 vlib_node_add_next (vm, ethernet_input_node.index, node_index);
2215 * Even if we never use these arcs, we have to align the next indices...
2217 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
2219 ASSERT (i == em->l2_next);
2221 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
2222 ASSERT (i == em->l2_next);
2225 // Register a next node for L3 redirect, and enable L3 redirect
2227 ethernet_register_l3_redirect (vlib_main_t * vm, u32 node_index)
2229 ethernet_main_t *em = ðernet_main;
2232 em->redirect_l3 = 1;
2233 em->redirect_l3_next = vlib_node_add_next (vm,
2234 ethernet_input_node.index,
2237 * Change the cached next nodes to the redirect node
2239 em->l3_next.input_next_ip4 = em->redirect_l3_next;
2240 em->l3_next.input_next_ip6 = em->redirect_l3_next;
2241 em->l3_next.input_next_mpls = em->redirect_l3_next;
2244 * Even if we never use these arcs, we have to align the next indices...
2246 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
2248 ASSERT (i == em->redirect_l3_next);
2250 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
2252 ASSERT (i == em->redirect_l3_next);
2257 * fd.io coding-style-patch-verification: ON
2260 * eval: (c-set-style "gnu")