2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * ethernet_node.c: ethernet packet processing
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 #include <vlib/vlib.h>
41 #include <vnet/pg/pg.h>
42 #include <vnet/ethernet/ethernet.h>
43 #include <vnet/ethernet/p2p_ethernet.h>
44 #include <vnet/devices/pipe/pipe.h>
45 #include <vppinfra/sparse_vec.h>
46 #include <vnet/l2/l2_bvi.h>
47 #include <vnet/classify/trace_classify.h>
49 #define foreach_ethernet_input_next \
50 _ (PUNT, "error-punt") \
51 _ (DROP, "error-drop") \
52 _ (LLC, "llc-input") \
53 _ (IP4_INPUT, "ip4-input") \
54 _ (IP4_INPUT_NCS, "ip4-input-no-checksum")
58 #define _(s,n) ETHERNET_INPUT_NEXT_##s,
59 foreach_ethernet_input_next
61 ETHERNET_INPUT_N_NEXT,
62 } ethernet_input_next_t;
68 ethernet_input_frame_t frame_data;
69 } ethernet_input_trace_t;
72 format_ethernet_input_trace (u8 * s, va_list * va)
74 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
75 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
76 ethernet_input_trace_t *t = va_arg (*va, ethernet_input_trace_t *);
77 u32 indent = format_get_indent (s);
81 s = format (s, "frame: flags 0x%x", t->frame_flags);
82 if (t->frame_flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
83 s = format (s, ", hw-if-index %u, sw-if-index %u",
84 t->frame_data.hw_if_index, t->frame_data.sw_if_index);
85 s = format (s, "\n%U", format_white_space, indent);
87 s = format (s, "%U", format_ethernet_header, t->packet_data);
92 extern vlib_node_registration_t ethernet_input_node;
96 ETHERNET_INPUT_VARIANT_ETHERNET,
97 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE,
98 ETHERNET_INPUT_VARIANT_NOT_L2,
99 } ethernet_input_variant_t;
102 // Parse the ethernet header to extract vlan tags and innermost ethertype
103 static_always_inline void
104 parse_header (ethernet_input_variant_t variant,
108 u16 * outer_id, u16 * inner_id, u32 * match_flags)
112 if (variant == ETHERNET_INPUT_VARIANT_ETHERNET
113 || variant == ETHERNET_INPUT_VARIANT_NOT_L2)
115 ethernet_header_t *e0;
117 e0 = (void *) (b0->data + b0->current_data);
119 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
120 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
122 vlib_buffer_advance (b0, sizeof (e0[0]));
124 *type = clib_net_to_host_u16 (e0->type);
126 else if (variant == ETHERNET_INPUT_VARIANT_ETHERNET_TYPE)
128 // here when prior node was LLC/SNAP processing
131 e0 = (void *) (b0->data + b0->current_data);
133 vlib_buffer_advance (b0, sizeof (e0[0]));
135 *type = clib_net_to_host_u16 (e0[0]);
138 // save for distinguishing between dot1q and dot1ad later
141 // default the tags to 0 (used if there is no corresponding tag)
145 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_0_TAG;
148 // check for vlan encaps
149 if (ethernet_frame_is_tagged (*type))
151 ethernet_vlan_header_t *h0;
154 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_1_TAG;
156 h0 = (void *) (b0->data + b0->current_data);
158 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
160 *outer_id = tag & 0xfff;
162 *match_flags &= ~SUBINT_CONFIG_MATCH_1_TAG;
164 *type = clib_net_to_host_u16 (h0->type);
166 vlib_buffer_advance (b0, sizeof (h0[0]));
169 if (*type == ETHERNET_TYPE_VLAN)
171 // Double tagged packet
172 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_2_TAG;
174 h0 = (void *) (b0->data + b0->current_data);
176 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
178 *inner_id = tag & 0xfff;
180 *type = clib_net_to_host_u16 (h0->type);
182 vlib_buffer_advance (b0, sizeof (h0[0]));
184 if (*type == ETHERNET_TYPE_VLAN)
186 // More than double tagged packet
187 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_3_TAG;
189 vlib_buffer_advance (b0, sizeof (h0[0]));
190 vlan_count = 3; // "unknown" number, aka, 3-or-more
194 ethernet_buffer_set_vlan_count (b0, vlan_count);
197 // Determine the subinterface for this packet, given the result of the
198 // vlan table lookups and vlan header parsing. Check the most specific
200 static_always_inline void
201 identify_subint (vnet_hw_interface_t * hi,
204 main_intf_t * main_intf,
205 vlan_intf_t * vlan_intf,
206 qinq_intf_t * qinq_intf,
207 u32 * new_sw_if_index, u8 * error0, u32 * is_l2)
211 matched = eth_identify_subint (hi, match_flags, main_intf, vlan_intf,
212 qinq_intf, new_sw_if_index, error0, is_l2);
217 // Perform L3 my-mac filter
218 // A unicast packet arriving on an L3 interface must have a dmac matching the interface mac.
219 // This is required for promiscuous mode, else we will forward packets we aren't supposed to.
222 ethernet_header_t *e0;
223 e0 = (void *) (b0->data + vnet_buffer (b0)->l2_hdr_offset);
225 if (!(ethernet_address_cast (e0->dst_address)))
227 if (!ethernet_mac_address_equal ((u8 *) e0, hi->hw_address))
229 *error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
234 // Check for down subinterface
235 *error0 = (*new_sw_if_index) != ~0 ? (*error0) : ETHERNET_ERROR_DOWN;
239 static_always_inline void
240 determine_next_node (ethernet_main_t * em,
241 ethernet_input_variant_t variant,
243 u32 type0, vlib_buffer_t * b0, u8 * error0, u8 * next0)
245 vnet_buffer (b0)->l3_hdr_offset = b0->current_data;
246 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
248 if (PREDICT_FALSE (*error0 != ETHERNET_ERROR_NONE))
250 // some error occurred
251 *next0 = ETHERNET_INPUT_NEXT_DROP;
255 // record the L2 len and reset the buffer so the L2 header is preserved
256 u32 eth_start = vnet_buffer (b0)->l2_hdr_offset;
257 vnet_buffer (b0)->l2.l2_len = b0->current_data - eth_start;
258 *next0 = em->l2_next;
259 ASSERT (vnet_buffer (b0)->l2.l2_len ==
260 ethernet_buffer_header_size (b0));
261 vlib_buffer_advance (b0, -(vnet_buffer (b0)->l2.l2_len));
263 // check for common IP/MPLS ethertypes
265 else if (type0 == ETHERNET_TYPE_IP4)
267 *next0 = em->l3_next.input_next_ip4;
269 else if (type0 == ETHERNET_TYPE_IP6)
271 *next0 = em->l3_next.input_next_ip6;
273 else if (type0 == ETHERNET_TYPE_MPLS)
275 *next0 = em->l3_next.input_next_mpls;
278 else if (em->redirect_l3)
280 // L3 Redirect is on, the cached common next nodes will be
281 // pointing to the redirect node, catch the uncommon types here
282 *next0 = em->redirect_l3_next;
286 // uncommon ethertype, check table
288 i0 = sparse_vec_index (em->l3_next.input_next_by_type, type0);
289 *next0 = vec_elt (em->l3_next.input_next_by_type, i0);
292 SPARSE_VEC_INVALID_INDEX ? ETHERNET_ERROR_UNKNOWN_TYPE : *error0;
294 // The table is not populated with LLC values, so check that now.
295 // If variant is variant_ethernet then we came from LLC processing. Don't
296 // go back there; drop instead using by keeping the drop/bad table result.
297 if ((type0 < 0x600) && (variant == ETHERNET_INPUT_VARIANT_ETHERNET))
299 *next0 = ETHERNET_INPUT_NEXT_LLC;
305 /* following vector code relies on following assumptions */
306 STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_data, 0);
307 STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_length, 2);
308 STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, flags, 4);
309 STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l2_hdr_offset) ==
310 STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l3_hdr_offset) - 2,
311 "l3_hdr_offset must follow l2_hdr_offset");
313 static_always_inline void
314 eth_input_adv_and_flags_x4 (vlib_buffer_t ** b, int is_l3)
316 i16 adv = sizeof (ethernet_header_t);
317 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
318 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
320 #ifdef CLIB_HAVE_VEC256
321 /* to reduce number of small loads/stores we are loading first 64 bits
322 of each buffer metadata into 256-bit register so we can advance
323 current_data, current_length and flags.
324 Observed saving of this code is ~2 clocks per packet */
327 /* vector if signed 16 bit integers used in signed vector add operation
328 to advnce current_data and current_length */
329 u32x8 flags4 = { 0, flags, 0, flags, 0, flags, 0, flags };
331 adv, -adv, 0, 0, adv, -adv, 0, 0,
332 adv, -adv, 0, 0, adv, -adv, 0, 0
335 /* load 4 x 64 bits */
336 r = u64x4_gather (b[0], b[1], b[2], b[3]);
342 radv = (u64x4) ((i16x16) r + adv4);
344 /* write 4 x 64 bits */
345 u64x4_scatter (is_l3 ? radv : r, b[0], b[1], b[2], b[3]);
347 /* use old current_data as l2_hdr_offset and new current_data as
349 r = (u64x4) u16x16_blend (r, radv << 16, 0xaa);
351 /* store both l2_hdr_offset and l3_hdr_offset in single store operation */
352 u32x8_scatter_one ((u32x8) r, 0, &vnet_buffer (b[0])->l2_hdr_offset);
353 u32x8_scatter_one ((u32x8) r, 2, &vnet_buffer (b[1])->l2_hdr_offset);
354 u32x8_scatter_one ((u32x8) r, 4, &vnet_buffer (b[2])->l2_hdr_offset);
355 u32x8_scatter_one ((u32x8) r, 6, &vnet_buffer (b[3])->l2_hdr_offset);
359 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l3_hdr_offset);
360 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l3_hdr_offset);
361 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l3_hdr_offset);
362 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l3_hdr_offset);
364 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l2_hdr_offset == adv);
365 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l2_hdr_offset == adv);
366 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l2_hdr_offset == adv);
367 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l2_hdr_offset == adv);
371 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l2_hdr_offset);
372 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l2_hdr_offset);
373 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l2_hdr_offset);
374 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l2_hdr_offset);
376 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l3_hdr_offset == -adv);
377 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l3_hdr_offset == -adv);
378 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l3_hdr_offset == -adv);
379 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l3_hdr_offset == -adv);
383 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
384 vnet_buffer (b[1])->l2_hdr_offset = b[1]->current_data;
385 vnet_buffer (b[2])->l2_hdr_offset = b[2]->current_data;
386 vnet_buffer (b[3])->l2_hdr_offset = b[3]->current_data;
387 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
388 vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data + adv;
389 vnet_buffer (b[2])->l3_hdr_offset = b[2]->current_data + adv;
390 vnet_buffer (b[3])->l3_hdr_offset = b[3]->current_data + adv;
394 vlib_buffer_advance (b[0], adv);
395 vlib_buffer_advance (b[1], adv);
396 vlib_buffer_advance (b[2], adv);
397 vlib_buffer_advance (b[3], adv);
400 b[0]->flags |= flags;
401 b[1]->flags |= flags;
402 b[2]->flags |= flags;
403 b[3]->flags |= flags;
408 vnet_buffer (b[0])->l2.l2_len = adv;
409 vnet_buffer (b[1])->l2.l2_len = adv;
410 vnet_buffer (b[2])->l2.l2_len = adv;
411 vnet_buffer (b[3])->l2.l2_len = adv;
415 static_always_inline void
416 eth_input_adv_and_flags_x1 (vlib_buffer_t ** b, int is_l3)
418 i16 adv = sizeof (ethernet_header_t);
419 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
420 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
422 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
423 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
426 vlib_buffer_advance (b[0], adv);
427 b[0]->flags |= flags;
429 vnet_buffer (b[0])->l2.l2_len = adv;
433 static_always_inline void
434 eth_input_get_etype_and_tags (vlib_buffer_t ** b, u16 * etype, u64 * tags,
435 u64 * dmacs, int offset, int dmac_check)
437 ethernet_header_t *e;
438 e = vlib_buffer_get_current (b[offset]);
439 #ifdef CLIB_HAVE_VEC128
440 u64x2 r = u64x2_load_unaligned (((u8 *) & e->type) - 6);
441 etype[offset] = ((u16x8) r)[3];
444 etype[offset] = e->type;
445 tags[offset] = *(u64 *) (e + 1);
449 dmacs[offset] = *(u64 *) e;
452 static_always_inline u16
453 eth_input_next_by_type (u16 etype)
455 ethernet_main_t *em = ðernet_main;
457 return (etype < 0x600) ? ETHERNET_INPUT_NEXT_LLC :
458 vec_elt (em->l3_next.input_next_by_type,
459 sparse_vec_index (em->l3_next.input_next_by_type, etype));
469 u64 n_packets, n_bytes;
470 } eth_input_tag_lookup_t;
472 static_always_inline void
473 eth_input_update_if_counters (vlib_main_t * vm, vnet_main_t * vnm,
474 eth_input_tag_lookup_t * l)
476 if (l->n_packets == 0 || l->sw_if_index == ~0)
480 l->n_bytes += l->n_packets * l->len;
482 vlib_increment_combined_counter
483 (vnm->interface_main.combined_sw_if_counters +
484 VNET_INTERFACE_COUNTER_RX, vm->thread_index, l->sw_if_index,
485 l->n_packets, l->n_bytes);
488 static_always_inline void
489 eth_input_tag_lookup (vlib_main_t * vm, vnet_main_t * vnm,
490 vlib_node_runtime_t * node, vnet_hw_interface_t * hi,
491 u64 tag, u16 * next, vlib_buffer_t * b,
492 eth_input_tag_lookup_t * l, u8 dmac_bad, int is_dot1ad,
493 int main_is_l3, int check_dmac)
495 ethernet_main_t *em = ðernet_main;
497 if ((tag ^ l->tag) & l->mask)
499 main_intf_t *mif = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
502 vlan_table_t *vlan_table;
503 qinq_table_t *qinq_table;
504 u16 *t = (u16 *) & tag;
505 u16 vlan1 = clib_net_to_host_u16 (t[0]) & 0xFFF;
506 u16 vlan2 = clib_net_to_host_u16 (t[2]) & 0xFFF;
507 u32 matched, is_l2, new_sw_if_index;
509 vlan_table = vec_elt_at_index (em->vlan_pool, is_dot1ad ?
510 mif->dot1ad_vlans : mif->dot1q_vlans);
511 vif = &vlan_table->vlans[vlan1];
512 qinq_table = vec_elt_at_index (em->qinq_pool, vif->qinqs);
513 qif = &qinq_table->vlans[vlan2];
514 l->err = ETHERNET_ERROR_NONE;
515 l->type = clib_net_to_host_u16 (t[1]);
517 if (l->type == ETHERNET_TYPE_VLAN)
519 l->type = clib_net_to_host_u16 (t[3]);
521 matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
522 SUBINT_CONFIG_MATCH_2_TAG, mif, vif,
523 qif, &new_sw_if_index, &l->err,
531 new_sw_if_index = hi->sw_if_index;
532 l->err = ETHERNET_ERROR_NONE;
534 is_l2 = main_is_l3 == 0;
537 matched = eth_identify_subint (hi, SUBINT_CONFIG_VALID |
538 SUBINT_CONFIG_MATCH_1_TAG, mif,
539 vif, qif, &new_sw_if_index,
543 if (l->sw_if_index != new_sw_if_index)
545 eth_input_update_if_counters (vm, vnm, l);
548 l->sw_if_index = new_sw_if_index;
551 l->mask = (l->n_tags == 2) ?
552 clib_net_to_host_u64 (0xffffffffffffffff) :
553 clib_net_to_host_u64 (0xffffffff00000000);
555 if (matched && l->sw_if_index == ~0)
556 l->err = ETHERNET_ERROR_DOWN;
558 l->len = sizeof (ethernet_header_t) +
559 l->n_tags * sizeof (ethernet_vlan_header_t);
561 l->adv = is_l2 ? -(int) sizeof (ethernet_header_t) :
562 l->n_tags * sizeof (ethernet_vlan_header_t);
564 l->adv = is_l2 ? 0 : l->len;
566 if (PREDICT_FALSE (l->err != ETHERNET_ERROR_NONE))
567 l->next = ETHERNET_INPUT_NEXT_DROP;
569 l->next = em->l2_next;
570 else if (l->type == ETHERNET_TYPE_IP4)
571 l->next = em->l3_next.input_next_ip4;
572 else if (l->type == ETHERNET_TYPE_IP6)
573 l->next = em->l3_next.input_next_ip6;
574 else if (l->type == ETHERNET_TYPE_MPLS)
575 l->next = em->l3_next.input_next_mpls;
576 else if (em->redirect_l3)
577 l->next = em->redirect_l3_next;
580 l->next = eth_input_next_by_type (l->type);
581 if (l->next == ETHERNET_INPUT_NEXT_PUNT)
582 l->err = ETHERNET_ERROR_UNKNOWN_TYPE;
586 if (check_dmac && l->adv > 0 && dmac_bad)
588 l->err = ETHERNET_ERROR_L3_MAC_MISMATCH;
589 next[0] = ETHERNET_INPUT_NEXT_PUNT;
594 vlib_buffer_advance (b, l->adv);
595 vnet_buffer (b)->l2.l2_len = l->len;
596 vnet_buffer (b)->l3_hdr_offset = vnet_buffer (b)->l2_hdr_offset + l->len;
598 if (l->err == ETHERNET_ERROR_NONE)
600 vnet_buffer (b)->sw_if_index[VLIB_RX] = l->sw_if_index;
601 ethernet_buffer_set_vlan_count (b, l->n_tags);
604 b->error = node->errors[l->err];
606 /* update counters */
608 l->n_bytes += vlib_buffer_length_in_chain (vm, b);
611 #define DMAC_MASK clib_net_to_host_u64 (0xFFFFFFFFFFFF0000)
612 #define DMAC_IGBIT clib_net_to_host_u64 (0x0100000000000000)
614 #ifdef CLIB_HAVE_VEC256
615 static_always_inline u32
616 is_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
618 u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
619 r0 = (r0 != u64x4_splat (hwaddr)) & ((r0 & u64x4_splat (DMAC_IGBIT)) == 0);
620 return u8x32_msb_mask ((u8x32) (r0));
623 static_always_inline u8
624 is_dmac_bad (u64 dmac, u64 hwaddr)
626 u64 r0 = dmac & DMAC_MASK;
627 return (r0 != hwaddr) && ((r0 & DMAC_IGBIT) == 0);
631 static_always_inline u8
632 is_sec_dmac_bad (u64 dmac, u64 hwaddr)
634 return ((dmac & DMAC_MASK) != hwaddr);
637 #ifdef CLIB_HAVE_VEC256
638 static_always_inline u32
639 is_sec_dmac_bad_x4 (u64 * dmacs, u64 hwaddr)
641 u64x4 r0 = u64x4_load_unaligned (dmacs) & u64x4_splat (DMAC_MASK);
642 r0 = (r0 != u64x4_splat (hwaddr));
643 return u8x32_msb_mask ((u8x32) (r0));
647 static_always_inline u8
648 eth_input_sec_dmac_check_x1 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
650 dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
654 static_always_inline u32
655 eth_input_sec_dmac_check_x4 (u64 hwaddr, u64 * dmac, u8 * dmac_bad)
657 #ifdef CLIB_HAVE_VEC256
658 *(u32 *) (dmac_bad + 0) &= is_sec_dmac_bad_x4 (dmac + 0, hwaddr);
660 dmac_bad[0] &= is_sec_dmac_bad (dmac[0], hwaddr);
661 dmac_bad[1] &= is_sec_dmac_bad (dmac[1], hwaddr);
662 dmac_bad[2] &= is_sec_dmac_bad (dmac[2], hwaddr);
663 dmac_bad[3] &= is_sec_dmac_bad (dmac[3], hwaddr);
665 return *(u32 *) dmac_bad;
668 static_always_inline void
669 eth_input_process_frame_dmac_check (vnet_hw_interface_t * hi,
670 u64 * dmacs, u8 * dmacs_bad,
671 u32 n_packets, ethernet_interface_t * ei,
674 u64 hwaddr = (*(u64 *) hi->hw_address) & DMAC_MASK;
676 u8 *dmac_bad = dmacs_bad;
678 i32 n_left = n_packets;
680 #ifdef CLIB_HAVE_VEC256
683 bad |= *(u32 *) (dmac_bad + 0) = is_dmac_bad_x4 (dmac + 0, hwaddr);
684 bad |= *(u32 *) (dmac_bad + 4) = is_dmac_bad_x4 (dmac + 4, hwaddr);
694 bad |= dmac_bad[0] = is_dmac_bad (dmac[0], hwaddr);
695 bad |= dmac_bad[1] = is_dmac_bad (dmac[1], hwaddr);
696 bad |= dmac_bad[2] = is_dmac_bad (dmac[2], hwaddr);
697 bad |= dmac_bad[3] = is_dmac_bad (dmac[3], hwaddr);
706 if (have_sec_dmac && bad)
710 vec_foreach (addr, ei->secondary_addrs)
712 u64 hwaddr = ((u64 *) addr)[0] & DMAC_MASK;
713 i32 n_left = n_packets;
715 u8 *dmac_bad = dmacs_bad;
724 /* skip any that have already matched */
733 n_bad = clib_min (4, n_left);
735 /* If >= 4 left, compare 4 together */
738 bad |= eth_input_sec_dmac_check_x4 (hwaddr, dmac, dmac_bad);
743 /* handle individually */
746 bad |= eth_input_sec_dmac_check_x1 (hwaddr, dmac + adv,
757 if (!bad) /* can stop looping if everything matched */
763 /* process frame of buffers, store ethertype into array and update
764 buffer metadata fields depending on interface being l2 or l3 assuming that
765 packets are untagged. For tagged packets those fields are updated later.
766 Optionally store Destionation MAC address and tag data into arrays
767 for further processing */
769 STATIC_ASSERT (VLIB_FRAME_SIZE % 8 == 0,
770 "VLIB_FRAME_SIZE must be power of 8");
771 static_always_inline void
772 eth_input_process_frame (vlib_main_t * vm, vlib_node_runtime_t * node,
773 vnet_hw_interface_t * hi,
774 u32 * buffer_indices, u32 n_packets, int main_is_l3,
775 int ip4_cksum_ok, int dmac_check)
777 ethernet_main_t *em = ðernet_main;
778 u16 nexts[VLIB_FRAME_SIZE], *next;
779 u16 etypes[VLIB_FRAME_SIZE], *etype = etypes;
780 u64 dmacs[VLIB_FRAME_SIZE], *dmac = dmacs;
781 u8 dmacs_bad[VLIB_FRAME_SIZE];
782 u64 tags[VLIB_FRAME_SIZE], *tag = tags;
783 u16 slowpath_indices[VLIB_FRAME_SIZE];
785 u16 next_ip4, next_ip6, next_mpls, next_l2;
786 u16 et_ip4 = clib_host_to_net_u16 (ETHERNET_TYPE_IP4);
787 u16 et_ip6 = clib_host_to_net_u16 (ETHERNET_TYPE_IP6);
788 u16 et_mpls = clib_host_to_net_u16 (ETHERNET_TYPE_MPLS);
789 u16 et_vlan = clib_host_to_net_u16 (ETHERNET_TYPE_VLAN);
790 u16 et_dot1ad = clib_host_to_net_u16 (ETHERNET_TYPE_DOT1AD);
791 i32 n_left = n_packets;
792 vlib_buffer_t *b[20];
794 ethernet_interface_t *ei = ethernet_get_interface (em, hi->hw_if_index);
796 from = buffer_indices;
800 vlib_buffer_t **ph = b + 16, **pd = b + 8;
801 vlib_get_buffers (vm, from, b, 4);
802 vlib_get_buffers (vm, from + 8, pd, 4);
803 vlib_get_buffers (vm, from + 16, ph, 4);
805 vlib_prefetch_buffer_header (ph[0], LOAD);
806 vlib_prefetch_buffer_data (pd[0], LOAD);
807 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
809 vlib_prefetch_buffer_header (ph[1], LOAD);
810 vlib_prefetch_buffer_data (pd[1], LOAD);
811 eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
813 vlib_prefetch_buffer_header (ph[2], LOAD);
814 vlib_prefetch_buffer_data (pd[2], LOAD);
815 eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
817 vlib_prefetch_buffer_header (ph[3], LOAD);
818 vlib_prefetch_buffer_data (pd[3], LOAD);
819 eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
821 eth_input_adv_and_flags_x4 (b, main_is_l3);
832 vlib_get_buffers (vm, from, b, 4);
833 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
834 eth_input_get_etype_and_tags (b, etype, tag, dmac, 1, dmac_check);
835 eth_input_get_etype_and_tags (b, etype, tag, dmac, 2, dmac_check);
836 eth_input_get_etype_and_tags (b, etype, tag, dmac, 3, dmac_check);
837 eth_input_adv_and_flags_x4 (b, main_is_l3);
848 vlib_get_buffers (vm, from, b, 1);
849 eth_input_get_etype_and_tags (b, etype, tag, dmac, 0, dmac_check);
850 eth_input_adv_and_flags_x1 (b, main_is_l3);
862 if (ei && vec_len (ei->secondary_addrs))
863 eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
864 ei, 1 /* have_sec_dmac */ );
866 eth_input_process_frame_dmac_check (hi, dmacs, dmacs_bad, n_packets,
867 ei, 0 /* have_sec_dmac */ );
870 next_ip4 = em->l3_next.input_next_ip4;
871 next_ip6 = em->l3_next.input_next_ip6;
872 next_mpls = em->l3_next.input_next_mpls;
873 next_l2 = em->l2_next;
875 if (next_ip4 == ETHERNET_INPUT_NEXT_IP4_INPUT && ip4_cksum_ok)
876 next_ip4 = ETHERNET_INPUT_NEXT_IP4_INPUT_NCS;
878 #ifdef CLIB_HAVE_VEC256
879 u16x16 et16_ip4 = u16x16_splat (et_ip4);
880 u16x16 et16_ip6 = u16x16_splat (et_ip6);
881 u16x16 et16_mpls = u16x16_splat (et_mpls);
882 u16x16 et16_vlan = u16x16_splat (et_vlan);
883 u16x16 et16_dot1ad = u16x16_splat (et_dot1ad);
884 u16x16 next16_ip4 = u16x16_splat (next_ip4);
885 u16x16 next16_ip6 = u16x16_splat (next_ip6);
886 u16x16 next16_mpls = u16x16_splat (next_mpls);
887 u16x16 next16_l2 = u16x16_splat (next_l2);
889 u16x16 stairs = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15 };
898 /* fastpath - in l3 mode hadles ip4, ip6 and mpls packets, other packets
899 are considered as slowpath, in l2 mode all untagged packets are
900 considered as fastpath */
903 #ifdef CLIB_HAVE_VEC256
907 u16x16 e16 = u16x16_load_unaligned (etype);
910 r += (e16 == et16_ip4) & next16_ip4;
911 r += (e16 == et16_ip6) & next16_ip6;
912 r += (e16 == et16_mpls) & next16_mpls;
915 r = ((e16 != et16_vlan) & (e16 != et16_dot1ad)) & next16_l2;
916 u16x16_store_unaligned (r, next);
918 if (!u16x16_is_all_zero (r == zero))
920 if (u16x16_is_all_zero (r))
922 u16x16_store_unaligned (u16x16_splat (i) + stairs,
923 slowpath_indices + n_slowpath);
928 for (int j = 0; j < 16; j++)
930 slowpath_indices[n_slowpath++] = i + j;
941 if (main_is_l3 && etype[0] == et_ip4)
943 else if (main_is_l3 && etype[0] == et_ip6)
945 else if (main_is_l3 && etype[0] == et_mpls)
947 else if (main_is_l3 == 0 &&
948 etype[0] != et_vlan && etype[0] != et_dot1ad)
953 slowpath_indices[n_slowpath++] = i;
964 vnet_main_t *vnm = vnet_get_main ();
966 u16 *si = slowpath_indices;
967 u32 last_unknown_etype = ~0;
968 u32 last_unknown_next = ~0;
969 eth_input_tag_lookup_t dot1ad_lookup, dot1q_lookup = {
971 .tag = tags[si[0]] ^ -1LL,
975 clib_memcpy_fast (&dot1ad_lookup, &dot1q_lookup, sizeof (dot1q_lookup));
980 u16 etype = etypes[i];
982 if (etype == et_vlan)
984 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
985 eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
986 &dot1q_lookup, dmacs_bad[i], 0,
987 main_is_l3, dmac_check);
990 else if (etype == et_dot1ad)
992 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
993 eth_input_tag_lookup (vm, vnm, node, hi, tags[i], nexts + i, b,
994 &dot1ad_lookup, dmacs_bad[i], 1,
995 main_is_l3, dmac_check);
999 /* untagged packet with not well known etyertype */
1000 if (last_unknown_etype != etype)
1002 last_unknown_etype = etype;
1003 etype = clib_host_to_net_u16 (etype);
1004 last_unknown_next = eth_input_next_by_type (etype);
1006 if (dmac_check && main_is_l3 && dmacs_bad[i])
1008 vlib_buffer_t *b = vlib_get_buffer (vm, buffer_indices[i]);
1009 b->error = node->errors[ETHERNET_ERROR_L3_MAC_MISMATCH];
1010 nexts[i] = ETHERNET_INPUT_NEXT_PUNT;
1013 nexts[i] = last_unknown_next;
1021 eth_input_update_if_counters (vm, vnm, &dot1q_lookup);
1022 eth_input_update_if_counters (vm, vnm, &dot1ad_lookup);
1025 vlib_buffer_enqueue_to_next (vm, node, buffer_indices, nexts, n_packets);
1028 static_always_inline void
1029 eth_input_single_int (vlib_main_t * vm, vlib_node_runtime_t * node,
1030 vnet_hw_interface_t * hi, u32 * from, u32 n_pkts,
1033 ethernet_main_t *em = ðernet_main;
1034 ethernet_interface_t *ei;
1035 ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
1036 main_intf_t *intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1037 subint_config_t *subint0 = &intf0->untagged_subint;
1039 int main_is_l3 = (subint0->flags & SUBINT_CONFIG_L2) == 0;
1040 int promisc = (ei->flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL) != 0;
1044 /* main interface is L3, we dont expect tagged packets and interface
1045 is not in promisc node, so we dont't need to check DMAC */
1049 eth_input_process_frame (vm, node, hi, from, n_pkts, is_l3,
1052 /* subinterfaces and promisc mode so DMAC check is needed */
1053 eth_input_process_frame (vm, node, hi, from, n_pkts, is_l3,
1059 /* untagged packets are treated as L2 */
1061 eth_input_process_frame (vm, node, hi, from, n_pkts, is_l3,
1067 static_always_inline void
1068 ethernet_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
1069 vlib_frame_t * from_frame)
1072 if (PREDICT_FALSE ((node->flags & VLIB_NODE_FLAG_TRACE)))
1074 from = vlib_frame_vector_args (from_frame);
1075 n_left = from_frame->n_vectors;
1079 ethernet_input_trace_t *t0;
1080 vlib_buffer_t *b0 = vlib_get_buffer (vm, from[0]);
1082 if (b0->flags & VLIB_BUFFER_IS_TRACED)
1084 t0 = vlib_add_trace (vm, node, b0,
1085 sizeof (ethernet_input_trace_t));
1086 clib_memcpy_fast (t0->packet_data, b0->data + b0->current_data,
1087 sizeof (t0->packet_data));
1088 t0->frame_flags = from_frame->flags;
1089 clib_memcpy_fast (&t0->frame_data,
1090 vlib_frame_scalar_args (from_frame),
1091 sizeof (ethernet_input_frame_t));
1098 /* rx pcap capture if enabled */
1099 if (PREDICT_FALSE (vlib_global_main.pcap.pcap_rx_enable))
1102 vnet_pcap_t *pp = &vlib_global_main.pcap;
1104 from = vlib_frame_vector_args (from_frame);
1105 n_left = from_frame->n_vectors;
1108 int classify_filter_result;
1113 b0 = vlib_get_buffer (vm, bi0);
1114 if (pp->filter_classify_table_index != ~0)
1116 classify_filter_result =
1117 vnet_is_packet_traced_inline
1118 (b0, pp->filter_classify_table_index, 0 /* full classify */ );
1119 if (classify_filter_result)
1120 pcap_add_buffer (&pp->pcap_main, vm, bi0,
1121 pp->max_bytes_per_pkt);
1125 if (pp->pcap_sw_if_index == 0 ||
1126 pp->pcap_sw_if_index == vnet_buffer (b0)->sw_if_index[VLIB_RX])
1128 vnet_main_t *vnm = vnet_get_main ();
1129 vnet_hw_interface_t *hi =
1130 vnet_get_sup_hw_interface
1131 (vnm, vnet_buffer (b0)->sw_if_index[VLIB_RX]);
1133 /* Capture pkt if not filtered, or if filter hits */
1134 if (hi->trace_classify_table_index == ~0 ||
1135 vnet_is_packet_traced_inline
1136 (b0, hi->trace_classify_table_index,
1137 0 /* full classify */ ))
1138 pcap_add_buffer (&pp->pcap_main, vm, bi0,
1139 pp->max_bytes_per_pkt);
1145 static_always_inline void
1146 ethernet_input_inline (vlib_main_t * vm,
1147 vlib_node_runtime_t * node,
1148 u32 * from, u32 n_packets,
1149 ethernet_input_variant_t variant)
1151 vnet_main_t *vnm = vnet_get_main ();
1152 ethernet_main_t *em = ðernet_main;
1153 vlib_node_runtime_t *error_node;
1154 u32 n_left_from, next_index, *to_next;
1155 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
1156 u32 thread_index = vm->thread_index;
1157 u32 cached_sw_if_index = ~0;
1158 u32 cached_is_l2 = 0; /* shut up gcc */
1159 vnet_hw_interface_t *hi = NULL; /* used for main interface only */
1160 vlib_buffer_t *bufs[VLIB_FRAME_SIZE];
1161 vlib_buffer_t **b = bufs;
1163 if (variant != ETHERNET_INPUT_VARIANT_ETHERNET)
1164 error_node = vlib_node_get_runtime (vm, ethernet_input_node.index);
1168 n_left_from = n_packets;
1170 next_index = node->cached_next_index;
1171 stats_sw_if_index = node->runtime_data[0];
1172 stats_n_packets = stats_n_bytes = 0;
1173 vlib_get_buffers (vm, from, bufs, n_left_from);
1175 while (n_left_from > 0)
1179 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
1181 while (n_left_from >= 4 && n_left_to_next >= 2)
1184 vlib_buffer_t *b0, *b1;
1185 u8 next0, next1, error0, error1;
1186 u16 type0, orig_type0, type1, orig_type1;
1187 u16 outer_id0, inner_id0, outer_id1, inner_id1;
1188 u32 match_flags0, match_flags1;
1189 u32 old_sw_if_index0, new_sw_if_index0, len0, old_sw_if_index1,
1190 new_sw_if_index1, len1;
1191 vnet_hw_interface_t *hi0, *hi1;
1192 main_intf_t *main_intf0, *main_intf1;
1193 vlan_intf_t *vlan_intf0, *vlan_intf1;
1194 qinq_intf_t *qinq_intf0, *qinq_intf1;
1196 ethernet_header_t *e0, *e1;
1198 /* Prefetch next iteration. */
1200 vlib_prefetch_buffer_header (b[2], STORE);
1201 vlib_prefetch_buffer_header (b[3], STORE);
1203 CLIB_PREFETCH (b[2]->data, sizeof (ethernet_header_t), LOAD);
1204 CLIB_PREFETCH (b[3]->data, sizeof (ethernet_header_t), LOAD);
1213 n_left_to_next -= 2;
1220 error0 = error1 = ETHERNET_ERROR_NONE;
1221 e0 = vlib_buffer_get_current (b0);
1222 type0 = clib_net_to_host_u16 (e0->type);
1223 e1 = vlib_buffer_get_current (b1);
1224 type1 = clib_net_to_host_u16 (e1->type);
1226 /* Set the L2 header offset for all packets */
1227 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1228 vnet_buffer (b1)->l2_hdr_offset = b1->current_data;
1229 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1230 b1->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1232 /* Speed-path for the untagged case */
1233 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
1234 && !ethernet_frame_is_any_tagged_x2 (type0,
1238 subint_config_t *subint0;
1239 u32 sw_if_index0, sw_if_index1;
1241 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1242 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
1243 is_l20 = cached_is_l2;
1245 /* This is probably wholly unnecessary */
1246 if (PREDICT_FALSE (sw_if_index0 != sw_if_index1))
1249 /* Now sw_if_index0 == sw_if_index1 */
1250 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1252 cached_sw_if_index = sw_if_index0;
1253 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
1254 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1255 subint0 = &intf0->untagged_subint;
1256 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1259 if (PREDICT_TRUE (is_l20 != 0))
1261 vnet_buffer (b0)->l3_hdr_offset =
1262 vnet_buffer (b0)->l2_hdr_offset +
1263 sizeof (ethernet_header_t);
1264 vnet_buffer (b1)->l3_hdr_offset =
1265 vnet_buffer (b1)->l2_hdr_offset +
1266 sizeof (ethernet_header_t);
1267 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
1268 b1->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
1269 next0 = em->l2_next;
1270 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
1271 next1 = em->l2_next;
1272 vnet_buffer (b1)->l2.l2_len = sizeof (ethernet_header_t);
1276 if (!ethernet_address_cast (e0->dst_address) &&
1277 (hi->hw_address != 0) &&
1278 !ethernet_mac_address_equal ((u8 *) e0, hi->hw_address))
1279 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
1280 if (!ethernet_address_cast (e1->dst_address) &&
1281 (hi->hw_address != 0) &&
1282 !ethernet_mac_address_equal ((u8 *) e1, hi->hw_address))
1283 error1 = ETHERNET_ERROR_L3_MAC_MISMATCH;
1284 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
1285 determine_next_node (em, variant, 0, type0, b0,
1287 vlib_buffer_advance (b1, sizeof (ethernet_header_t));
1288 determine_next_node (em, variant, 0, type1, b1,
1294 /* Slow-path for the tagged case */
1296 parse_header (variant,
1299 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
1301 parse_header (variant,
1304 &orig_type1, &outer_id1, &inner_id1, &match_flags1);
1306 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1307 old_sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
1309 eth_vlan_table_lookups (em,
1316 &main_intf0, &vlan_intf0, &qinq_intf0);
1318 eth_vlan_table_lookups (em,
1325 &main_intf1, &vlan_intf1, &qinq_intf1);
1327 identify_subint (hi0,
1332 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
1334 identify_subint (hi1,
1339 qinq_intf1, &new_sw_if_index1, &error1, &is_l21);
1341 // Save RX sw_if_index for later nodes
1342 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1344 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
1345 vnet_buffer (b1)->sw_if_index[VLIB_RX] =
1347 ETHERNET_ERROR_NONE ? old_sw_if_index1 : new_sw_if_index1;
1349 // Check if there is a stat to take (valid and non-main sw_if_index for pkt 0 or pkt 1)
1350 if (((new_sw_if_index0 != ~0)
1351 && (new_sw_if_index0 != old_sw_if_index0))
1352 || ((new_sw_if_index1 != ~0)
1353 && (new_sw_if_index1 != old_sw_if_index1)))
1356 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
1357 - vnet_buffer (b0)->l2_hdr_offset;
1358 len1 = vlib_buffer_length_in_chain (vm, b1) + b1->current_data
1359 - vnet_buffer (b1)->l2_hdr_offset;
1361 stats_n_packets += 2;
1362 stats_n_bytes += len0 + len1;
1365 (!(new_sw_if_index0 == stats_sw_if_index
1366 && new_sw_if_index1 == stats_sw_if_index)))
1368 stats_n_packets -= 2;
1369 stats_n_bytes -= len0 + len1;
1371 if (new_sw_if_index0 != old_sw_if_index0
1372 && new_sw_if_index0 != ~0)
1373 vlib_increment_combined_counter (vnm->
1374 interface_main.combined_sw_if_counters
1376 VNET_INTERFACE_COUNTER_RX,
1378 new_sw_if_index0, 1,
1380 if (new_sw_if_index1 != old_sw_if_index1
1381 && new_sw_if_index1 != ~0)
1382 vlib_increment_combined_counter (vnm->
1383 interface_main.combined_sw_if_counters
1385 VNET_INTERFACE_COUNTER_RX,
1387 new_sw_if_index1, 1,
1390 if (new_sw_if_index0 == new_sw_if_index1)
1392 if (stats_n_packets > 0)
1394 vlib_increment_combined_counter
1395 (vnm->interface_main.combined_sw_if_counters
1396 + VNET_INTERFACE_COUNTER_RX,
1399 stats_n_packets, stats_n_bytes);
1400 stats_n_packets = stats_n_bytes = 0;
1402 stats_sw_if_index = new_sw_if_index0;
1407 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1408 is_l20 = is_l21 = 0;
1410 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1412 determine_next_node (em, variant, is_l21, type1, b1, &error1,
1416 b0->error = error_node->errors[error0];
1417 b1->error = error_node->errors[error1];
1419 // verify speculative enqueue
1420 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
1421 n_left_to_next, bi0, bi1, next0,
1425 while (n_left_from > 0 && n_left_to_next > 0)
1430 u16 type0, orig_type0;
1431 u16 outer_id0, inner_id0;
1433 u32 old_sw_if_index0, new_sw_if_index0, len0;
1434 vnet_hw_interface_t *hi0;
1435 main_intf_t *main_intf0;
1436 vlan_intf_t *vlan_intf0;
1437 qinq_intf_t *qinq_intf0;
1438 ethernet_header_t *e0;
1441 // Prefetch next iteration
1442 if (n_left_from > 1)
1444 vlib_prefetch_buffer_header (b[1], STORE);
1445 CLIB_PREFETCH (b[1]->data, CLIB_CACHE_LINE_BYTES, LOAD);
1453 n_left_to_next -= 1;
1458 error0 = ETHERNET_ERROR_NONE;
1459 e0 = vlib_buffer_get_current (b0);
1460 type0 = clib_net_to_host_u16 (e0->type);
1462 /* Set the L2 header offset for all packets */
1463 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1464 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1466 /* Speed-path for the untagged case */
1467 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
1468 && !ethernet_frame_is_tagged (type0)))
1471 subint_config_t *subint0;
1474 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1475 is_l20 = cached_is_l2;
1477 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1479 cached_sw_if_index = sw_if_index0;
1480 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
1481 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1482 subint0 = &intf0->untagged_subint;
1483 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1487 if (PREDICT_TRUE (is_l20 != 0))
1489 vnet_buffer (b0)->l3_hdr_offset =
1490 vnet_buffer (b0)->l2_hdr_offset +
1491 sizeof (ethernet_header_t);
1492 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
1493 next0 = em->l2_next;
1494 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
1498 if (!ethernet_address_cast (e0->dst_address) &&
1499 (hi->hw_address != 0) &&
1500 !ethernet_mac_address_equal ((u8 *) e0, hi->hw_address))
1501 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
1502 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
1503 determine_next_node (em, variant, 0, type0, b0,
1509 /* Slow-path for the tagged case */
1510 parse_header (variant,
1513 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
1515 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1517 eth_vlan_table_lookups (em,
1524 &main_intf0, &vlan_intf0, &qinq_intf0);
1526 identify_subint (hi0,
1531 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
1533 // Save RX sw_if_index for later nodes
1534 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1536 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
1538 // Increment subinterface stats
1539 // Note that interface-level counters have already been incremented
1540 // prior to calling this function. Thus only subinterface counters
1541 // are incremented here.
1543 // Interface level counters include packets received on the main
1544 // interface and all subinterfaces. Subinterface level counters
1545 // include only those packets received on that subinterface
1546 // Increment stats if the subint is valid and it is not the main intf
1547 if ((new_sw_if_index0 != ~0)
1548 && (new_sw_if_index0 != old_sw_if_index0))
1551 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
1552 - vnet_buffer (b0)->l2_hdr_offset;
1554 stats_n_packets += 1;
1555 stats_n_bytes += len0;
1557 // Batch stat increments from the same subinterface so counters
1558 // don't need to be incremented for every packet.
1559 if (PREDICT_FALSE (new_sw_if_index0 != stats_sw_if_index))
1561 stats_n_packets -= 1;
1562 stats_n_bytes -= len0;
1564 if (new_sw_if_index0 != ~0)
1565 vlib_increment_combined_counter
1566 (vnm->interface_main.combined_sw_if_counters
1567 + VNET_INTERFACE_COUNTER_RX,
1568 thread_index, new_sw_if_index0, 1, len0);
1569 if (stats_n_packets > 0)
1571 vlib_increment_combined_counter
1572 (vnm->interface_main.combined_sw_if_counters
1573 + VNET_INTERFACE_COUNTER_RX,
1575 stats_sw_if_index, stats_n_packets, stats_n_bytes);
1576 stats_n_packets = stats_n_bytes = 0;
1578 stats_sw_if_index = new_sw_if_index0;
1582 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1585 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1589 b0->error = error_node->errors[error0];
1591 // verify speculative enqueue
1592 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1593 to_next, n_left_to_next,
1597 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1600 // Increment any remaining batched stats
1601 if (stats_n_packets > 0)
1603 vlib_increment_combined_counter
1604 (vnm->interface_main.combined_sw_if_counters
1605 + VNET_INTERFACE_COUNTER_RX,
1606 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
1607 node->runtime_data[0] = stats_sw_if_index;
1611 VLIB_NODE_FN (ethernet_input_node) (vlib_main_t * vm,
1612 vlib_node_runtime_t * node,
1613 vlib_frame_t * frame)
1615 vnet_main_t *vnm = vnet_get_main ();
1616 u32 *from = vlib_frame_vector_args (frame);
1617 u32 n_packets = frame->n_vectors;
1619 ethernet_input_trace (vm, node, frame);
1621 if (frame->flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
1623 ethernet_input_frame_t *ef = vlib_frame_scalar_args (frame);
1624 int ip4_cksum_ok = (frame->flags & ETH_INPUT_FRAME_F_IP4_CKSUM_OK) != 0;
1625 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ef->hw_if_index);
1626 eth_input_single_int (vm, node, hi, from, n_packets, ip4_cksum_ok);
1629 ethernet_input_inline (vm, node, from, n_packets,
1630 ETHERNET_INPUT_VARIANT_ETHERNET);
1634 VLIB_NODE_FN (ethernet_input_type_node) (vlib_main_t * vm,
1635 vlib_node_runtime_t * node,
1636 vlib_frame_t * from_frame)
1638 u32 *from = vlib_frame_vector_args (from_frame);
1639 u32 n_packets = from_frame->n_vectors;
1640 ethernet_input_trace (vm, node, from_frame);
1641 ethernet_input_inline (vm, node, from, n_packets,
1642 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE);
1646 VLIB_NODE_FN (ethernet_input_not_l2_node) (vlib_main_t * vm,
1647 vlib_node_runtime_t * node,
1648 vlib_frame_t * from_frame)
1650 u32 *from = vlib_frame_vector_args (from_frame);
1651 u32 n_packets = from_frame->n_vectors;
1652 ethernet_input_trace (vm, node, from_frame);
1653 ethernet_input_inline (vm, node, from, n_packets,
1654 ETHERNET_INPUT_VARIANT_NOT_L2);
1659 // Return the subinterface config struct for the given sw_if_index
1660 // Also return via parameter the appropriate match flags for the
1661 // configured number of tags.
1662 // On error (unsupported or not ethernet) return 0.
1663 static subint_config_t *
1664 ethernet_sw_interface_get_config (vnet_main_t * vnm,
1666 u32 * flags, u32 * unsupported)
1668 ethernet_main_t *em = ðernet_main;
1669 vnet_hw_interface_t *hi;
1670 vnet_sw_interface_t *si;
1671 main_intf_t *main_intf;
1672 vlan_table_t *vlan_table;
1673 qinq_table_t *qinq_table;
1674 subint_config_t *subint = 0;
1676 hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
1678 if (!hi || (hi->hw_class_index != ethernet_hw_interface_class.index))
1681 goto done; // non-ethernet interface
1684 // ensure there's an entry for the main intf (shouldn't really be necessary)
1685 vec_validate (em->main_intfs, hi->hw_if_index);
1686 main_intf = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1688 // Locate the subint for the given ethernet config
1689 si = vnet_get_sw_interface (vnm, sw_if_index);
1691 if (si->type == VNET_SW_INTERFACE_TYPE_P2P)
1693 p2p_ethernet_main_t *p2pm = &p2p_main;
1694 u32 p2pe_sw_if_index =
1695 p2p_ethernet_lookup (hi->hw_if_index, si->p2p.client_mac);
1696 if (p2pe_sw_if_index == ~0)
1698 pool_get (p2pm->p2p_subif_pool, subint);
1699 si->p2p.pool_index = subint - p2pm->p2p_subif_pool;
1702 subint = vec_elt_at_index (p2pm->p2p_subif_pool, si->p2p.pool_index);
1703 *flags = SUBINT_CONFIG_P2P;
1705 else if (si->type == VNET_SW_INTERFACE_TYPE_PIPE)
1709 pipe = pipe_get (sw_if_index);
1710 subint = &pipe->subint;
1711 *flags = SUBINT_CONFIG_P2P;
1713 else if (si->sub.eth.flags.default_sub)
1715 subint = &main_intf->default_subint;
1716 *flags = SUBINT_CONFIG_MATCH_1_TAG |
1717 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
1719 else if ((si->sub.eth.flags.no_tags) || (si->sub.eth.raw_flags == 0))
1721 // if no flags are set then this is a main interface
1722 // so treat as untagged
1723 subint = &main_intf->untagged_subint;
1724 *flags = SUBINT_CONFIG_MATCH_0_TAG;
1729 // first get the vlan table
1730 if (si->sub.eth.flags.dot1ad)
1732 if (main_intf->dot1ad_vlans == 0)
1734 // Allocate a vlan table from the pool
1735 pool_get (em->vlan_pool, vlan_table);
1736 main_intf->dot1ad_vlans = vlan_table - em->vlan_pool;
1740 // Get ptr to existing vlan table
1742 vec_elt_at_index (em->vlan_pool, main_intf->dot1ad_vlans);
1747 if (main_intf->dot1q_vlans == 0)
1749 // Allocate a vlan table from the pool
1750 pool_get (em->vlan_pool, vlan_table);
1751 main_intf->dot1q_vlans = vlan_table - em->vlan_pool;
1755 // Get ptr to existing vlan table
1757 vec_elt_at_index (em->vlan_pool, main_intf->dot1q_vlans);
1761 if (si->sub.eth.flags.one_tag)
1763 *flags = si->sub.eth.flags.exact_match ?
1764 SUBINT_CONFIG_MATCH_1_TAG :
1765 (SUBINT_CONFIG_MATCH_1_TAG |
1766 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1768 if (si->sub.eth.flags.outer_vlan_id_any)
1770 // not implemented yet
1776 // a single vlan, a common case
1778 &vlan_table->vlans[si->sub.eth.
1779 outer_vlan_id].single_tag_subint;
1786 *flags = si->sub.eth.flags.exact_match ?
1787 SUBINT_CONFIG_MATCH_2_TAG :
1788 (SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1790 if (si->sub.eth.flags.outer_vlan_id_any
1791 && si->sub.eth.flags.inner_vlan_id_any)
1793 // not implemented yet
1798 if (si->sub.eth.flags.inner_vlan_id_any)
1800 // a specific outer and "any" inner
1801 // don't need a qinq table for this
1803 &vlan_table->vlans[si->sub.eth.
1804 outer_vlan_id].inner_any_subint;
1805 if (si->sub.eth.flags.exact_match)
1807 *flags = SUBINT_CONFIG_MATCH_2_TAG;
1811 *flags = SUBINT_CONFIG_MATCH_2_TAG |
1812 SUBINT_CONFIG_MATCH_3_TAG;
1817 // a specific outer + specifc innner vlan id, a common case
1819 // get the qinq table
1820 if (vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs == 0)
1822 // Allocate a qinq table from the pool
1823 pool_get (em->qinq_pool, qinq_table);
1824 vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs =
1825 qinq_table - em->qinq_pool;
1829 // Get ptr to existing qinq table
1831 vec_elt_at_index (em->qinq_pool,
1832 vlan_table->vlans[si->sub.
1836 subint = &qinq_table->vlans[si->sub.eth.inner_vlan_id].subint;
1845 static clib_error_t *
1846 ethernet_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
1848 subint_config_t *subint;
1851 clib_error_t *error = 0;
1853 // Find the config for this subinterface
1855 ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
1860 // not implemented yet or not ethernet
1864 subint->sw_if_index =
1865 ((flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? sw_if_index : ~0);
1871 VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ethernet_sw_interface_up_down);
1874 #ifndef CLIB_MARCH_VARIANT
1875 // Set the L2/L3 mode for the subinterface
1877 ethernet_sw_interface_set_l2_mode (vnet_main_t * vnm, u32 sw_if_index, u32 l2)
1879 subint_config_t *subint;
1883 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);
1885 is_port = !(sw->type == VNET_SW_INTERFACE_TYPE_SUB);
1887 // Find the config for this subinterface
1889 ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
1894 // unimplemented or not ethernet
1898 // Double check that the config we found is for our interface (or the interface is down)
1899 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
1903 subint->flags |= SUBINT_CONFIG_L2;
1906 SUBINT_CONFIG_MATCH_0_TAG | SUBINT_CONFIG_MATCH_1_TAG
1907 | SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
1911 subint->flags &= ~SUBINT_CONFIG_L2;
1914 ~(SUBINT_CONFIG_MATCH_1_TAG | SUBINT_CONFIG_MATCH_2_TAG
1915 | SUBINT_CONFIG_MATCH_3_TAG);
1923 * Set the L2/L3 mode for the subinterface regardless of port
1926 ethernet_sw_interface_set_l2_mode_noport (vnet_main_t * vnm,
1927 u32 sw_if_index, u32 l2)
1929 subint_config_t *subint;
1933 /* Find the config for this subinterface */
1935 ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
1940 /* unimplemented or not ethernet */
1945 * Double check that the config we found is for our interface (or the
1946 * interface is down)
1948 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
1952 subint->flags |= SUBINT_CONFIG_L2;
1956 subint->flags &= ~SUBINT_CONFIG_L2;
1964 static clib_error_t *
1965 ethernet_sw_interface_add_del (vnet_main_t * vnm,
1966 u32 sw_if_index, u32 is_create)
1968 clib_error_t *error = 0;
1969 subint_config_t *subint;
1971 u32 unsupported = 0;
1973 // Find the config for this subinterface
1975 ethernet_sw_interface_get_config (vnm, sw_if_index, &match_flags,
1980 // not implemented yet or not ethernet
1983 // this is the NYI case
1984 error = clib_error_return (0, "not implemented yet");
1995 // Initialize the subint
1996 if (subint->flags & SUBINT_CONFIG_VALID)
1998 // Error vlan already in use
1999 error = clib_error_return (0, "vlan is already in use");
2003 // Note that config is L3 by default
2004 subint->flags = SUBINT_CONFIG_VALID | match_flags;
2005 subint->sw_if_index = ~0; // because interfaces are initially down
2012 VNET_SW_INTERFACE_ADD_DEL_FUNCTION (ethernet_sw_interface_add_del);
2014 static char *ethernet_error_strings[] = {
2015 #define ethernet_error(n,c,s) s,
2016 #include "error.def"
2017 #undef ethernet_error
2021 VLIB_REGISTER_NODE (ethernet_input_node) = {
2022 .name = "ethernet-input",
2023 /* Takes a vector of packets. */
2024 .vector_size = sizeof (u32),
2025 .scalar_size = sizeof (ethernet_input_frame_t),
2026 .n_errors = ETHERNET_N_ERROR,
2027 .error_strings = ethernet_error_strings,
2028 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2030 #define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2031 foreach_ethernet_input_next
2034 .format_buffer = format_ethernet_header_with_length,
2035 .format_trace = format_ethernet_input_trace,
2036 .unformat_buffer = unformat_ethernet_header,
2039 VLIB_REGISTER_NODE (ethernet_input_type_node) = {
2040 .name = "ethernet-input-type",
2041 /* Takes a vector of packets. */
2042 .vector_size = sizeof (u32),
2043 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2045 #define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2046 foreach_ethernet_input_next
2051 VLIB_REGISTER_NODE (ethernet_input_not_l2_node) = {
2052 .name = "ethernet-input-not-l2",
2053 /* Takes a vector of packets. */
2054 .vector_size = sizeof (u32),
2055 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
2057 #define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
2058 foreach_ethernet_input_next
2064 #ifndef CLIB_MARCH_VARIANT
2066 ethernet_set_rx_redirect (vnet_main_t * vnm,
2067 vnet_hw_interface_t * hi, u32 enable)
2069 // Insure all packets go to ethernet-input (i.e. untagged ipv4 packets
2070 // don't go directly to ip4-input)
2071 vnet_hw_interface_rx_redirect_to_node
2072 (vnm, hi->hw_if_index, enable ? ethernet_input_node.index : ~0);
2077 * Initialization and registration for the next_by_ethernet structure
2081 next_by_ethertype_init (next_by_ethertype_t * l3_next)
2083 l3_next->input_next_by_type = sparse_vec_new
2084 ( /* elt bytes */ sizeof (l3_next->input_next_by_type[0]),
2085 /* bits in index */ BITS (((ethernet_header_t *) 0)->type));
2087 vec_validate (l3_next->sparse_index_by_input_next_index,
2088 ETHERNET_INPUT_NEXT_DROP);
2089 vec_validate (l3_next->sparse_index_by_input_next_index,
2090 ETHERNET_INPUT_NEXT_PUNT);
2091 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_DROP] =
2092 SPARSE_VEC_INVALID_INDEX;
2093 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_PUNT] =
2094 SPARSE_VEC_INVALID_INDEX;
2097 * Make sure we don't wipe out an ethernet registration by mistake
2098 * Can happen if init function ordering constraints are missing.
2102 ethernet_main_t *em = ðernet_main;
2103 ASSERT (em->next_by_ethertype_register_called == 0);
2109 // Add an ethertype -> next index mapping to the structure
2111 next_by_ethertype_register (next_by_ethertype_t * l3_next,
2112 u32 ethertype, u32 next_index)
2116 ethernet_main_t *em = ðernet_main;
2120 ethernet_main_t *em = ðernet_main;
2121 em->next_by_ethertype_register_called = 1;
2124 /* Setup ethernet type -> next index sparse vector mapping. */
2125 n = sparse_vec_validate (l3_next->input_next_by_type, ethertype);
2128 /* Rebuild next index -> sparse index inverse mapping when sparse vector
2130 vec_validate (l3_next->sparse_index_by_input_next_index, next_index);
2131 for (i = 1; i < vec_len (l3_next->input_next_by_type); i++)
2133 sparse_index_by_input_next_index[l3_next->input_next_by_type[i]] = i;
2135 // do not allow the cached next index's to be updated if L3
2136 // redirect is enabled, as it will have overwritten them
2137 if (!em->redirect_l3)
2139 // Cache common ethertypes directly
2140 if (ethertype == ETHERNET_TYPE_IP4)
2142 l3_next->input_next_ip4 = next_index;
2144 else if (ethertype == ETHERNET_TYPE_IP6)
2146 l3_next->input_next_ip6 = next_index;
2148 else if (ethertype == ETHERNET_TYPE_MPLS)
2150 l3_next->input_next_mpls = next_index;
2157 ethernet_input_init (vlib_main_t * vm, ethernet_main_t * em)
2159 __attribute__ ((unused)) vlan_table_t *invalid_vlan_table;
2160 __attribute__ ((unused)) qinq_table_t *invalid_qinq_table;
2162 ethernet_setup_node (vm, ethernet_input_node.index);
2163 ethernet_setup_node (vm, ethernet_input_type_node.index);
2164 ethernet_setup_node (vm, ethernet_input_not_l2_node.index);
2166 next_by_ethertype_init (&em->l3_next);
2168 // Initialize pools and vector for vlan parsing
2169 vec_validate (em->main_intfs, 10); // 10 main interfaces
2170 pool_alloc (em->vlan_pool, 10);
2171 pool_alloc (em->qinq_pool, 1);
2173 // The first vlan pool will always be reserved for an invalid table
2174 pool_get (em->vlan_pool, invalid_vlan_table); // first id = 0
2175 // The first qinq pool will always be reserved for an invalid table
2176 pool_get (em->qinq_pool, invalid_qinq_table); // first id = 0
2180 ethernet_register_input_type (vlib_main_t * vm,
2181 ethernet_type_t type, u32 node_index)
2183 ethernet_main_t *em = ðernet_main;
2184 ethernet_type_info_t *ti;
2188 clib_error_t *error = vlib_call_init_function (vm, ethernet_init);
2190 clib_error_report (error);
2193 ti = ethernet_get_type_info (em, type);
2196 clib_warning ("type_info NULL for type %d", type);
2199 ti->node_index = node_index;
2200 ti->next_index = vlib_node_add_next (vm,
2201 ethernet_input_node.index, node_index);
2202 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
2203 ASSERT (i == ti->next_index);
2205 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
2206 ASSERT (i == ti->next_index);
2208 // Add the L3 node for this ethertype to the next nodes structure
2209 next_by_ethertype_register (&em->l3_next, type, ti->next_index);
2211 // Call the registration functions for other nodes that want a mapping
2212 l2bvi_register_input_type (vm, type, node_index);
2216 ethernet_register_l2_input (vlib_main_t * vm, u32 node_index)
2218 ethernet_main_t *em = ðernet_main;
2222 vlib_node_add_next (vm, ethernet_input_node.index, node_index);
2225 * Even if we never use these arcs, we have to align the next indices...
2227 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
2229 ASSERT (i == em->l2_next);
2231 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
2232 ASSERT (i == em->l2_next);
2235 // Register a next node for L3 redirect, and enable L3 redirect
2237 ethernet_register_l3_redirect (vlib_main_t * vm, u32 node_index)
2239 ethernet_main_t *em = ðernet_main;
2242 em->redirect_l3 = 1;
2243 em->redirect_l3_next = vlib_node_add_next (vm,
2244 ethernet_input_node.index,
2247 * Change the cached next nodes to the redirect node
2249 em->l3_next.input_next_ip4 = em->redirect_l3_next;
2250 em->l3_next.input_next_ip6 = em->redirect_l3_next;
2251 em->l3_next.input_next_mpls = em->redirect_l3_next;
2254 * Even if we never use these arcs, we have to align the next indices...
2256 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
2258 ASSERT (i == em->redirect_l3_next);
2260 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
2262 ASSERT (i == em->redirect_l3_next);
2267 * fd.io coding-style-patch-verification: ON
2270 * eval: (c-set-style "gnu")