2 * Copyright (c) 2018 Cisco and/or its affiliates.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at:
7 * http://www.apache.org/licenses/LICENSE-2.0
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
16 * ethernet_node.c: ethernet packet processing
18 * Copyright (c) 2008 Eliot Dresselhaus
20 * Permission is hereby granted, free of charge, to any person obtaining
21 * a copy of this software and associated documentation files (the
22 * "Software"), to deal in the Software without restriction, including
23 * without limitation the rights to use, copy, modify, merge, publish,
24 * distribute, sublicense, and/or sell copies of the Software, and to
25 * permit persons to whom the Software is furnished to do so, subject to
26 * the following conditions:
28 * The above copyright notice and this permission notice shall be
29 * included in all copies or substantial portions of the Software.
31 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
32 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
33 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
34 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
35 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
36 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
37 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
40 #include <vlib/vlib.h>
41 #include <vnet/pg/pg.h>
42 #include <vnet/ethernet/ethernet.h>
43 #include <vnet/ethernet/p2p_ethernet.h>
44 #include <vnet/devices/pipe/pipe.h>
45 #include <vppinfra/sparse_vec.h>
46 #include <vnet/l2/l2_bvi.h>
49 #define foreach_ethernet_input_next \
50 _ (PUNT, "error-punt") \
51 _ (DROP, "error-drop") \
52 _ (LLC, "llc-input") \
53 _ (IP4_INPUT, "ip4-input") \
54 _ (IP4_INPUT_NCS, "ip4-input-no-checksum")
58 #define _(s,n) ETHERNET_INPUT_NEXT_##s,
59 foreach_ethernet_input_next
61 ETHERNET_INPUT_N_NEXT,
62 } ethernet_input_next_t;
68 ethernet_input_frame_t frame_data;
69 } ethernet_input_trace_t;
72 format_ethernet_input_trace (u8 * s, va_list * va)
74 CLIB_UNUSED (vlib_main_t * vm) = va_arg (*va, vlib_main_t *);
75 CLIB_UNUSED (vlib_node_t * node) = va_arg (*va, vlib_node_t *);
76 ethernet_input_trace_t *t = va_arg (*va, ethernet_input_trace_t *);
77 u32 indent = format_get_indent (s);
81 s = format (s, "frame: flags 0x%x", t->frame_flags);
82 if (t->frame_flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
83 s = format (s, ", hw-if-index %u, sw-if-index %u",
84 t->frame_data.hw_if_index, t->frame_data.sw_if_index);
85 s = format (s, "\n%U", format_white_space, indent);
87 s = format (s, "%U", format_ethernet_header, t->packet_data);
92 extern vlib_node_registration_t ethernet_input_node;
96 ETHERNET_INPUT_VARIANT_ETHERNET,
97 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE,
98 ETHERNET_INPUT_VARIANT_NOT_L2,
99 } ethernet_input_variant_t;
102 // Parse the ethernet header to extract vlan tags and innermost ethertype
103 static_always_inline void
104 parse_header (ethernet_input_variant_t variant,
108 u16 * outer_id, u16 * inner_id, u32 * match_flags)
112 if (variant == ETHERNET_INPUT_VARIANT_ETHERNET
113 || variant == ETHERNET_INPUT_VARIANT_NOT_L2)
115 ethernet_header_t *e0;
117 e0 = (void *) (b0->data + b0->current_data);
119 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
120 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
122 vlib_buffer_advance (b0, sizeof (e0[0]));
124 *type = clib_net_to_host_u16 (e0->type);
126 else if (variant == ETHERNET_INPUT_VARIANT_ETHERNET_TYPE)
128 // here when prior node was LLC/SNAP processing
131 e0 = (void *) (b0->data + b0->current_data);
133 vlib_buffer_advance (b0, sizeof (e0[0]));
135 *type = clib_net_to_host_u16 (e0[0]);
138 // save for distinguishing between dot1q and dot1ad later
141 // default the tags to 0 (used if there is no corresponding tag)
145 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_0_TAG;
148 // check for vlan encaps
149 if (ethernet_frame_is_tagged (*type))
151 ethernet_vlan_header_t *h0;
154 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_1_TAG;
156 h0 = (void *) (b0->data + b0->current_data);
158 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
160 *outer_id = tag & 0xfff;
162 *match_flags &= ~SUBINT_CONFIG_MATCH_1_TAG;
164 *type = clib_net_to_host_u16 (h0->type);
166 vlib_buffer_advance (b0, sizeof (h0[0]));
169 if (*type == ETHERNET_TYPE_VLAN)
171 // Double tagged packet
172 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_2_TAG;
174 h0 = (void *) (b0->data + b0->current_data);
176 tag = clib_net_to_host_u16 (h0->priority_cfi_and_id);
178 *inner_id = tag & 0xfff;
180 *type = clib_net_to_host_u16 (h0->type);
182 vlib_buffer_advance (b0, sizeof (h0[0]));
184 if (*type == ETHERNET_TYPE_VLAN)
186 // More than double tagged packet
187 *match_flags = SUBINT_CONFIG_VALID | SUBINT_CONFIG_MATCH_3_TAG;
189 vlib_buffer_advance (b0, sizeof (h0[0]));
190 vlan_count = 3; // "unknown" number, aka, 3-or-more
194 ethernet_buffer_set_vlan_count (b0, vlan_count);
197 // Determine the subinterface for this packet, given the result of the
198 // vlan table lookups and vlan header parsing. Check the most specific
200 static_always_inline void
201 identify_subint (vnet_hw_interface_t * hi,
204 main_intf_t * main_intf,
205 vlan_intf_t * vlan_intf,
206 qinq_intf_t * qinq_intf,
207 u32 * new_sw_if_index, u8 * error0, u32 * is_l2)
211 matched = eth_identify_subint (hi, b0, match_flags,
212 main_intf, vlan_intf, qinq_intf,
213 new_sw_if_index, error0, is_l2);
218 // Perform L3 my-mac filter
219 // A unicast packet arriving on an L3 interface must have a dmac matching the interface mac.
220 // This is required for promiscuous mode, else we will forward packets we aren't supposed to.
223 ethernet_header_t *e0;
224 e0 = (void *) (b0->data + vnet_buffer (b0)->l2_hdr_offset);
226 if (!(ethernet_address_cast (e0->dst_address)))
228 if (!eth_mac_equal ((u8 *) e0, hi->hw_address))
230 *error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
235 // Check for down subinterface
236 *error0 = (*new_sw_if_index) != ~0 ? (*error0) : ETHERNET_ERROR_DOWN;
240 static_always_inline void
241 determine_next_node (ethernet_main_t * em,
242 ethernet_input_variant_t variant,
244 u32 type0, vlib_buffer_t * b0, u8 * error0, u8 * next0)
246 vnet_buffer (b0)->l3_hdr_offset = b0->current_data;
247 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
249 if (PREDICT_FALSE (*error0 != ETHERNET_ERROR_NONE))
251 // some error occurred
252 *next0 = ETHERNET_INPUT_NEXT_DROP;
256 // record the L2 len and reset the buffer so the L2 header is preserved
257 u32 eth_start = vnet_buffer (b0)->l2_hdr_offset;
258 vnet_buffer (b0)->l2.l2_len = b0->current_data - eth_start;
259 *next0 = em->l2_next;
260 ASSERT (vnet_buffer (b0)->l2.l2_len ==
261 ethernet_buffer_header_size (b0));
262 vlib_buffer_advance (b0, -(vnet_buffer (b0)->l2.l2_len));
264 // check for common IP/MPLS ethertypes
266 else if (type0 == ETHERNET_TYPE_IP4)
268 *next0 = em->l3_next.input_next_ip4;
270 else if (type0 == ETHERNET_TYPE_IP6)
272 *next0 = em->l3_next.input_next_ip6;
274 else if (type0 == ETHERNET_TYPE_MPLS)
276 *next0 = em->l3_next.input_next_mpls;
279 else if (em->redirect_l3)
281 // L3 Redirect is on, the cached common next nodes will be
282 // pointing to the redirect node, catch the uncommon types here
283 *next0 = em->redirect_l3_next;
287 // uncommon ethertype, check table
289 i0 = sparse_vec_index (em->l3_next.input_next_by_type, type0);
290 *next0 = vec_elt (em->l3_next.input_next_by_type, i0);
293 SPARSE_VEC_INVALID_INDEX ? ETHERNET_ERROR_UNKNOWN_TYPE : *error0;
295 // The table is not populated with LLC values, so check that now.
296 // If variant is variant_ethernet then we came from LLC processing. Don't
297 // go back there; drop instead using by keeping the drop/bad table result.
298 if ((type0 < 0x600) && (variant == ETHERNET_INPUT_VARIANT_ETHERNET))
300 *next0 = ETHERNET_INPUT_NEXT_LLC;
307 ETYPE_ID_UNKNOWN = 0,
314 static_always_inline void
315 eth_input_advance_and_flags (vlib_main_t * vm, u32 * from, u32 n_left,
316 i16 advance, u32 and_flags, u32 or_flags)
321 vlib_get_buffers (vm, from, b, 8);
322 vlib_buffer_advance (b[0], advance);
323 vlib_buffer_advance (b[1], advance);
324 vlib_buffer_advance (b[2], advance);
325 vlib_buffer_advance (b[3], advance);
326 vlib_buffer_advance (b[4], advance);
327 vlib_buffer_advance (b[5], advance);
328 vlib_buffer_advance (b[6], advance);
329 vlib_buffer_advance (b[7], advance);
330 b[0]->flags = (b[0]->flags & and_flags) | or_flags;
331 b[1]->flags = (b[1]->flags & and_flags) | or_flags;
332 b[2]->flags = (b[2]->flags & and_flags) | or_flags;
333 b[3]->flags = (b[3]->flags & and_flags) | or_flags;
334 b[4]->flags = (b[4]->flags & and_flags) | or_flags;
335 b[5]->flags = (b[5]->flags & and_flags) | or_flags;
336 b[6]->flags = (b[6]->flags & and_flags) | or_flags;
337 b[7]->flags = (b[7]->flags & and_flags) | or_flags;
344 vlib_get_buffers (vm, from, b, 1);
345 vlib_buffer_advance (b[0], advance);
346 b[0]->flags = (b[0]->flags & and_flags) | or_flags;
355 u16 etypes[VLIB_FRAME_SIZE];
356 u32 bufs_by_etype[ETYPE_N_IDS][VLIB_FRAME_SIZE];
357 u16 n_bufs_by_etype[ETYPE_N_IDS];
360 /* following vector code relies on following assumptions */
361 STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_data, 0);
362 STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, current_length, 2);
363 STATIC_ASSERT_OFFSET_OF (vlib_buffer_t, flags, 4);
364 STATIC_ASSERT (STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l2_hdr_offset) ==
365 STRUCT_OFFSET_OF (vnet_buffer_opaque_t, l3_hdr_offset) - 2,
366 "l3_hdr_offset must follow l2_hdr_offset");
368 static_always_inline void
369 eth_input_adv_and_flags_x4 (vlib_buffer_t ** b, i16 adv, u32 flags, int is_l3)
371 #ifdef CLIB_HAVE_VEC256
372 /* to reduce number of small loads/stores we are loading first 64 bits
373 of each buffer metadata into 256-bit register so we can advance
374 current_data, current_length and flags.
375 Observed saving of this code is ~2 clocks per packet */
378 /* vector if signed 16 bit integers used in signed vector add operation
379 to advnce current_data and current_length */
380 u32x8 flags4 = { 0, flags, 0, flags, 0, flags, 0, flags };
382 adv, -adv, 0, 0, adv, -adv, 0, 0,
383 adv, -adv, 0, 0, adv, -adv, 0, 0
386 /* load 4 x 64 bits */
387 r = u64x4_gather (b[0], b[1], b[2], b[3]);
393 radv = (u64x4) ((i16x16) r + adv4);
395 /* write 4 x 64 bits */
396 u64x4_scatter (is_l3 ? radv : r, b[0], b[1], b[2], b[3]);
398 /* use old current_data as l2_hdr_offset and new current_data as
400 r = (u64x4) u16x16_blend (r, radv << 16, 0xaa);
402 /* store both l2_hdr_offset and l3_hdr_offset in single store operation */
403 u32x8_scatter_one ((u32x8) r, 0, &vnet_buffer (b[0])->l2_hdr_offset);
404 u32x8_scatter_one ((u32x8) r, 2, &vnet_buffer (b[1])->l2_hdr_offset);
405 u32x8_scatter_one ((u32x8) r, 4, &vnet_buffer (b[2])->l2_hdr_offset);
406 u32x8_scatter_one ((u32x8) r, 6, &vnet_buffer (b[3])->l2_hdr_offset);
410 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l3_hdr_offset);
411 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l3_hdr_offset);
412 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l3_hdr_offset);
413 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l3_hdr_offset);
415 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l2_hdr_offset == adv);
416 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l2_hdr_offset == adv);
417 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l2_hdr_offset == adv);
418 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l2_hdr_offset == adv);
422 ASSERT (b[0]->current_data == vnet_buffer (b[0])->l2_hdr_offset);
423 ASSERT (b[1]->current_data == vnet_buffer (b[1])->l2_hdr_offset);
424 ASSERT (b[2]->current_data == vnet_buffer (b[2])->l2_hdr_offset);
425 ASSERT (b[3]->current_data == vnet_buffer (b[3])->l2_hdr_offset);
427 ASSERT (b[0]->current_data - vnet_buffer (b[0])->l3_hdr_offset == -adv);
428 ASSERT (b[1]->current_data - vnet_buffer (b[1])->l3_hdr_offset == -adv);
429 ASSERT (b[2]->current_data - vnet_buffer (b[2])->l3_hdr_offset == -adv);
430 ASSERT (b[3]->current_data - vnet_buffer (b[3])->l3_hdr_offset == -adv);
434 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
435 vnet_buffer (b[1])->l2_hdr_offset = b[1]->current_data;
436 vnet_buffer (b[2])->l2_hdr_offset = b[2]->current_data;
437 vnet_buffer (b[3])->l2_hdr_offset = b[3]->current_data;
438 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
439 vnet_buffer (b[1])->l3_hdr_offset = b[1]->current_data + adv;
440 vnet_buffer (b[2])->l3_hdr_offset = b[2]->current_data + adv;
441 vnet_buffer (b[3])->l3_hdr_offset = b[3]->current_data + adv;
445 vlib_buffer_advance (b[0], adv);
446 vlib_buffer_advance (b[1], adv);
447 vlib_buffer_advance (b[2], adv);
448 vlib_buffer_advance (b[3], adv);
451 b[0]->flags |= flags;
452 b[1]->flags |= flags;
453 b[2]->flags |= flags;
454 b[3]->flags |= flags;
459 vnet_buffer (b[0])->l2.l2_len = adv;
460 vnet_buffer (b[1])->l2.l2_len = adv;
461 vnet_buffer (b[2])->l2.l2_len = adv;
462 vnet_buffer (b[3])->l2.l2_len = adv;
466 static_always_inline void
467 eth_input_adv_and_flags_x1 (vlib_buffer_t ** b, i16 adv, u32 flags, int is_l3)
469 vnet_buffer (b[0])->l2_hdr_offset = b[0]->current_data;
470 vnet_buffer (b[0])->l3_hdr_offset = b[0]->current_data + adv;
473 vlib_buffer_advance (b[0], adv);
474 b[0]->flags |= flags;
476 vnet_buffer (b[0])->l2.l2_len = adv;
479 static_always_inline void
480 eth_input_process_frame (vlib_main_t * vm, u32 * from, u16 * etype,
481 u32 n_left, int is_l3)
483 vlib_buffer_t *b[16];
484 ethernet_header_t *e;
485 int adv = sizeof (ethernet_header_t);
487 u32 flags = VNET_BUFFER_F_L2_HDR_OFFSET_VALID |
488 VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
492 vlib_buffer_t **ph = b + 12, **pd = b + 8;
493 vlib_get_buffers (vm, from, b, 4);
494 vlib_get_buffers (vm, from + 8, b + 8, 8);
496 vlib_prefetch_buffer_header (ph[0], LOAD);
497 vlib_prefetch_buffer_data (pd[0], LOAD);
498 e = vlib_buffer_get_current (b[0]);
501 vlib_prefetch_buffer_header (ph[1], LOAD);
502 vlib_prefetch_buffer_data (pd[1], LOAD);
503 e = vlib_buffer_get_current (b[1]);
506 vlib_prefetch_buffer_header (ph[2], LOAD);
507 vlib_prefetch_buffer_data (pd[2], LOAD);
508 e = vlib_buffer_get_current (b[2]);
511 vlib_prefetch_buffer_header (ph[3], LOAD);
512 vlib_prefetch_buffer_data (pd[3], LOAD);
513 e = vlib_buffer_get_current (b[3]);
516 eth_input_adv_and_flags_x4 (b, adv, flags, is_l3);
525 vlib_get_buffers (vm, from, b, 4);
527 e = vlib_buffer_get_current (b[0]);
530 e = vlib_buffer_get_current (b[1]);
533 e = vlib_buffer_get_current (b[2]);
536 e = vlib_buffer_get_current (b[3]);
539 eth_input_adv_and_flags_x4 (b, adv, flags, is_l3);
548 vlib_get_buffers (vm, from, b, 1);
550 e = vlib_buffer_get_current (b[0]);
553 eth_input_adv_and_flags_x1 (b, adv, flags, is_l3);
562 static_always_inline void
563 eth_input_sort (vlib_main_t * vm, u32 * from, u32 n_packets,
564 eth_input_data_t * d)
566 u16 *etype = d->etypes;
567 i32 n_left = n_packets;
569 #if defined (CLIB_HAVE_VEC256)
571 u16x16 et16_ip4 = u16x16_splat (clib_host_to_net_u16 (ETHERNET_TYPE_IP4));
572 u16x16 et16_ip6 = u16x16_splat (clib_host_to_net_u16 (ETHERNET_TYPE_IP6));
573 u16x16 et16_mpls = u16x16_splat (clib_host_to_net_u16 (ETHERNET_TYPE_MPLS));
574 u16x16 id16_ip4 = u16x16_splat (ETYPE_ID_IP4);
575 u16x16 id16_ip6 = u16x16_splat (ETYPE_ID_IP6);
576 u16x16 id16_mpls = u16x16_splat (ETYPE_ID_MPLS);
581 e16 = u16x16_load_unaligned (etype);
582 r += (e16 == et16_ip4) & id16_ip4;
583 r += (e16 == et16_ip6) & id16_ip6;
584 r += (e16 == et16_mpls) & id16_mpls;
585 u16x16_store_unaligned (r, etype);
589 #elif defined (CLIB_HAVE_VEC128)
591 u16x8 et8_ip4 = u16x8_splat (clib_host_to_net_u16 (ETHERNET_TYPE_IP4));
592 u16x8 et8_ip6 = u16x8_splat (clib_host_to_net_u16 (ETHERNET_TYPE_IP6));
593 u16x8 et8_mpls = u16x8_splat (clib_host_to_net_u16 (ETHERNET_TYPE_MPLS));
594 u16x8 id8_ip4 = u16x8_splat (ETYPE_ID_IP4);
595 u16x8 id8_ip6 = u16x8_splat (ETYPE_ID_IP6);
596 u16x8 id8_mpls = u16x8_splat (ETYPE_ID_MPLS);
601 e8 = u16x8_load_unaligned (etype);
602 r += (e8 == et8_ip4) & id8_ip4;
603 r += (e8 == et8_ip6) & id8_ip6;
604 r += (e8 == et8_mpls) & id8_mpls;
605 u16x8_store_unaligned (r, etype);
612 if (etype[0] == ETHERNET_TYPE_IP4)
613 etype[0] = ETYPE_ID_IP4;
614 else if (etype[0] == ETHERNET_TYPE_IP6)
615 etype[0] = ETYPE_ID_IP6;
616 else if (etype[0] == ETHERNET_TYPE_MPLS)
617 etype[0] = ETYPE_ID_MPLS;
619 etype[0] = ETYPE_ID_UNKNOWN;
629 clib_memset_u16 (d->n_bufs_by_etype, 0, ETYPE_N_IDS);
634 y = d->n_bufs_by_etype[x];
636 #ifdef CLIB_HAVE_VEC256
637 if (n_left >= 16 && u16x16_is_all_equal (u16x16_load_unaligned (etype),
640 clib_memcpy_fast (&d->bufs_by_etype[x][y], from, 16 * sizeof (u32));
641 d->n_bufs_by_etype[x] += 16;
650 #ifdef CLIB_HAVE_VEC128
651 if (n_left >= 8 && u16x8_is_all_equal (u16x8_load_unaligned (etype),
654 clib_memcpy_fast (&d->bufs_by_etype[x][y], from, 8 * sizeof (u32));
655 d->n_bufs_by_etype[x] += 8;
664 d->bufs_by_etype[x][y] = from[0];
665 d->n_bufs_by_etype[x]++;
674 static_always_inline void
675 ethernet_input_trace (vlib_main_t * vm, vlib_node_runtime_t * node,
676 vlib_frame_t * from_frame)
679 if ((node->flags & VLIB_NODE_FLAG_TRACE) == 0)
682 from = vlib_frame_vector_args (from_frame);
683 n_left = from_frame->n_vectors;
687 ethernet_input_trace_t *t0;
688 vlib_buffer_t *b0 = vlib_get_buffer (vm, from[0]);
690 if (b0->flags & VLIB_BUFFER_IS_TRACED)
692 t0 = vlib_add_trace (vm, node, b0, sizeof (ethernet_input_trace_t));
693 clib_memcpy_fast (t0->packet_data, b0->data + b0->current_data,
694 sizeof (t0->packet_data));
695 t0->frame_flags = from_frame->flags;
696 clib_memcpy_fast (&t0->frame_data,
697 vlib_frame_scalar_args (from_frame),
698 sizeof (ethernet_input_frame_t));
705 static_always_inline void
706 ethernet_input_inline (vlib_main_t * vm,
707 vlib_node_runtime_t * node,
708 u32 * from, u32 n_packets,
709 ethernet_input_variant_t variant)
711 vnet_main_t *vnm = vnet_get_main ();
712 ethernet_main_t *em = ðernet_main;
713 vlib_node_runtime_t *error_node;
714 u32 n_left_from, next_index, *to_next;
715 u32 stats_sw_if_index, stats_n_packets, stats_n_bytes;
716 u32 thread_index = vm->thread_index;
717 u32 cached_sw_if_index = ~0;
718 u32 cached_is_l2 = 0; /* shut up gcc */
719 vnet_hw_interface_t *hi = NULL; /* used for main interface only */
721 if (variant != ETHERNET_INPUT_VARIANT_ETHERNET)
722 error_node = vlib_node_get_runtime (vm, ethernet_input_node.index);
726 n_left_from = n_packets;
728 next_index = node->cached_next_index;
729 stats_sw_if_index = node->runtime_data[0];
730 stats_n_packets = stats_n_bytes = 0;
732 while (n_left_from > 0)
736 vlib_get_next_frame (vm, node, next_index, to_next, n_left_to_next);
738 while (n_left_from >= 4 && n_left_to_next >= 2)
741 vlib_buffer_t *b0, *b1;
742 u8 next0, next1, error0, error1;
743 u16 type0, orig_type0, type1, orig_type1;
744 u16 outer_id0, inner_id0, outer_id1, inner_id1;
745 u32 match_flags0, match_flags1;
746 u32 old_sw_if_index0, new_sw_if_index0, len0, old_sw_if_index1,
747 new_sw_if_index1, len1;
748 vnet_hw_interface_t *hi0, *hi1;
749 main_intf_t *main_intf0, *main_intf1;
750 vlan_intf_t *vlan_intf0, *vlan_intf1;
751 qinq_intf_t *qinq_intf0, *qinq_intf1;
753 ethernet_header_t *e0, *e1;
755 /* Prefetch next iteration. */
757 vlib_buffer_t *b2, *b3;
759 b2 = vlib_get_buffer (vm, from[2]);
760 b3 = vlib_get_buffer (vm, from[3]);
762 vlib_prefetch_buffer_header (b2, STORE);
763 vlib_prefetch_buffer_header (b3, STORE);
765 CLIB_PREFETCH (b2->data, sizeof (ethernet_header_t), LOAD);
766 CLIB_PREFETCH (b3->data, sizeof (ethernet_header_t), LOAD);
778 b0 = vlib_get_buffer (vm, bi0);
779 b1 = vlib_get_buffer (vm, bi1);
781 error0 = error1 = ETHERNET_ERROR_NONE;
782 e0 = vlib_buffer_get_current (b0);
783 type0 = clib_net_to_host_u16 (e0->type);
784 e1 = vlib_buffer_get_current (b1);
785 type1 = clib_net_to_host_u16 (e1->type);
787 /* Set the L2 header offset for all packets */
788 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
789 vnet_buffer (b1)->l2_hdr_offset = b1->current_data;
790 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
791 b1->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
793 /* Speed-path for the untagged case */
794 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
795 && !ethernet_frame_is_any_tagged_x2 (type0,
799 subint_config_t *subint0;
800 u32 sw_if_index0, sw_if_index1;
802 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
803 sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
804 is_l20 = cached_is_l2;
806 /* This is probably wholly unnecessary */
807 if (PREDICT_FALSE (sw_if_index0 != sw_if_index1))
810 /* Now sw_if_index0 == sw_if_index1 */
811 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
813 cached_sw_if_index = sw_if_index0;
814 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
815 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
816 subint0 = &intf0->untagged_subint;
817 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
820 if (PREDICT_TRUE (is_l20 != 0))
822 vnet_buffer (b0)->l3_hdr_offset =
823 vnet_buffer (b0)->l2_hdr_offset +
824 sizeof (ethernet_header_t);
825 vnet_buffer (b1)->l3_hdr_offset =
826 vnet_buffer (b1)->l2_hdr_offset +
827 sizeof (ethernet_header_t);
828 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
829 b1->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
831 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
833 vnet_buffer (b1)->l2.l2_len = sizeof (ethernet_header_t);
837 if (!ethernet_address_cast (e0->dst_address) &&
838 (hi->hw_address != 0) &&
839 !eth_mac_equal ((u8 *) e0, hi->hw_address))
840 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
841 if (!ethernet_address_cast (e1->dst_address) &&
842 (hi->hw_address != 0) &&
843 !eth_mac_equal ((u8 *) e1, hi->hw_address))
844 error1 = ETHERNET_ERROR_L3_MAC_MISMATCH;
845 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
846 determine_next_node (em, variant, 0, type0, b0,
848 vlib_buffer_advance (b1, sizeof (ethernet_header_t));
849 determine_next_node (em, variant, 0, type1, b1,
855 /* Slow-path for the tagged case */
857 parse_header (variant,
860 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
862 parse_header (variant,
865 &orig_type1, &outer_id1, &inner_id1, &match_flags1);
867 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
868 old_sw_if_index1 = vnet_buffer (b1)->sw_if_index[VLIB_RX];
870 eth_vlan_table_lookups (em,
877 &main_intf0, &vlan_intf0, &qinq_intf0);
879 eth_vlan_table_lookups (em,
886 &main_intf1, &vlan_intf1, &qinq_intf1);
888 identify_subint (hi0,
893 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
895 identify_subint (hi1,
900 qinq_intf1, &new_sw_if_index1, &error1, &is_l21);
902 // Save RX sw_if_index for later nodes
903 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
905 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
906 vnet_buffer (b1)->sw_if_index[VLIB_RX] =
908 ETHERNET_ERROR_NONE ? old_sw_if_index1 : new_sw_if_index1;
910 // Check if there is a stat to take (valid and non-main sw_if_index for pkt 0 or pkt 1)
911 if (((new_sw_if_index0 != ~0)
912 && (new_sw_if_index0 != old_sw_if_index0))
913 || ((new_sw_if_index1 != ~0)
914 && (new_sw_if_index1 != old_sw_if_index1)))
917 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
918 - vnet_buffer (b0)->l2_hdr_offset;
919 len1 = vlib_buffer_length_in_chain (vm, b1) + b1->current_data
920 - vnet_buffer (b1)->l2_hdr_offset;
922 stats_n_packets += 2;
923 stats_n_bytes += len0 + len1;
926 (!(new_sw_if_index0 == stats_sw_if_index
927 && new_sw_if_index1 == stats_sw_if_index)))
929 stats_n_packets -= 2;
930 stats_n_bytes -= len0 + len1;
932 if (new_sw_if_index0 != old_sw_if_index0
933 && new_sw_if_index0 != ~0)
934 vlib_increment_combined_counter (vnm->
935 interface_main.combined_sw_if_counters
937 VNET_INTERFACE_COUNTER_RX,
941 if (new_sw_if_index1 != old_sw_if_index1
942 && new_sw_if_index1 != ~0)
943 vlib_increment_combined_counter (vnm->
944 interface_main.combined_sw_if_counters
946 VNET_INTERFACE_COUNTER_RX,
951 if (new_sw_if_index0 == new_sw_if_index1)
953 if (stats_n_packets > 0)
955 vlib_increment_combined_counter
956 (vnm->interface_main.combined_sw_if_counters
957 + VNET_INTERFACE_COUNTER_RX,
960 stats_n_packets, stats_n_bytes);
961 stats_n_packets = stats_n_bytes = 0;
963 stats_sw_if_index = new_sw_if_index0;
968 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
971 determine_next_node (em, variant, is_l20, type0, b0, &error0,
973 determine_next_node (em, variant, is_l21, type1, b1, &error1,
977 b0->error = error_node->errors[error0];
978 b1->error = error_node->errors[error1];
980 // verify speculative enqueue
981 vlib_validate_buffer_enqueue_x2 (vm, node, next_index, to_next,
982 n_left_to_next, bi0, bi1, next0,
986 while (n_left_from > 0 && n_left_to_next > 0)
991 u16 type0, orig_type0;
992 u16 outer_id0, inner_id0;
994 u32 old_sw_if_index0, new_sw_if_index0, len0;
995 vnet_hw_interface_t *hi0;
996 main_intf_t *main_intf0;
997 vlan_intf_t *vlan_intf0;
998 qinq_intf_t *qinq_intf0;
999 ethernet_header_t *e0;
1002 // Prefetch next iteration
1003 if (n_left_from > 1)
1007 p2 = vlib_get_buffer (vm, from[1]);
1008 vlib_prefetch_buffer_header (p2, STORE);
1009 CLIB_PREFETCH (p2->data, CLIB_CACHE_LINE_BYTES, LOAD);
1017 n_left_to_next -= 1;
1019 b0 = vlib_get_buffer (vm, bi0);
1021 error0 = ETHERNET_ERROR_NONE;
1022 e0 = vlib_buffer_get_current (b0);
1023 type0 = clib_net_to_host_u16 (e0->type);
1025 /* Set the L2 header offset for all packets */
1026 vnet_buffer (b0)->l2_hdr_offset = b0->current_data;
1027 b0->flags |= VNET_BUFFER_F_L2_HDR_OFFSET_VALID;
1029 /* Speed-path for the untagged case */
1030 if (PREDICT_TRUE (variant == ETHERNET_INPUT_VARIANT_ETHERNET
1031 && !ethernet_frame_is_tagged (type0)))
1034 subint_config_t *subint0;
1037 sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1038 is_l20 = cached_is_l2;
1040 if (PREDICT_FALSE (cached_sw_if_index != sw_if_index0))
1042 cached_sw_if_index = sw_if_index0;
1043 hi = vnet_get_sup_hw_interface (vnm, sw_if_index0);
1044 intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1045 subint0 = &intf0->untagged_subint;
1046 cached_is_l2 = is_l20 = subint0->flags & SUBINT_CONFIG_L2;
1050 if (PREDICT_TRUE (is_l20 != 0))
1052 vnet_buffer (b0)->l3_hdr_offset =
1053 vnet_buffer (b0)->l2_hdr_offset +
1054 sizeof (ethernet_header_t);
1055 b0->flags |= VNET_BUFFER_F_L3_HDR_OFFSET_VALID;
1056 next0 = em->l2_next;
1057 vnet_buffer (b0)->l2.l2_len = sizeof (ethernet_header_t);
1061 if (!ethernet_address_cast (e0->dst_address) &&
1062 (hi->hw_address != 0) &&
1063 !eth_mac_equal ((u8 *) e0, hi->hw_address))
1064 error0 = ETHERNET_ERROR_L3_MAC_MISMATCH;
1065 vlib_buffer_advance (b0, sizeof (ethernet_header_t));
1066 determine_next_node (em, variant, 0, type0, b0,
1072 /* Slow-path for the tagged case */
1073 parse_header (variant,
1076 &orig_type0, &outer_id0, &inner_id0, &match_flags0);
1078 old_sw_if_index0 = vnet_buffer (b0)->sw_if_index[VLIB_RX];
1080 eth_vlan_table_lookups (em,
1087 &main_intf0, &vlan_intf0, &qinq_intf0);
1089 identify_subint (hi0,
1094 qinq_intf0, &new_sw_if_index0, &error0, &is_l20);
1096 // Save RX sw_if_index for later nodes
1097 vnet_buffer (b0)->sw_if_index[VLIB_RX] =
1099 ETHERNET_ERROR_NONE ? old_sw_if_index0 : new_sw_if_index0;
1101 // Increment subinterface stats
1102 // Note that interface-level counters have already been incremented
1103 // prior to calling this function. Thus only subinterface counters
1104 // are incremented here.
1106 // Interface level counters include packets received on the main
1107 // interface and all subinterfaces. Subinterface level counters
1108 // include only those packets received on that subinterface
1109 // Increment stats if the subint is valid and it is not the main intf
1110 if ((new_sw_if_index0 != ~0)
1111 && (new_sw_if_index0 != old_sw_if_index0))
1114 len0 = vlib_buffer_length_in_chain (vm, b0) + b0->current_data
1115 - vnet_buffer (b0)->l2_hdr_offset;
1117 stats_n_packets += 1;
1118 stats_n_bytes += len0;
1120 // Batch stat increments from the same subinterface so counters
1121 // don't need to be incremented for every packet.
1122 if (PREDICT_FALSE (new_sw_if_index0 != stats_sw_if_index))
1124 stats_n_packets -= 1;
1125 stats_n_bytes -= len0;
1127 if (new_sw_if_index0 != ~0)
1128 vlib_increment_combined_counter
1129 (vnm->interface_main.combined_sw_if_counters
1130 + VNET_INTERFACE_COUNTER_RX,
1131 thread_index, new_sw_if_index0, 1, len0);
1132 if (stats_n_packets > 0)
1134 vlib_increment_combined_counter
1135 (vnm->interface_main.combined_sw_if_counters
1136 + VNET_INTERFACE_COUNTER_RX,
1138 stats_sw_if_index, stats_n_packets, stats_n_bytes);
1139 stats_n_packets = stats_n_bytes = 0;
1141 stats_sw_if_index = new_sw_if_index0;
1145 if (variant == ETHERNET_INPUT_VARIANT_NOT_L2)
1148 determine_next_node (em, variant, is_l20, type0, b0, &error0,
1152 b0->error = error_node->errors[error0];
1154 // verify speculative enqueue
1155 vlib_validate_buffer_enqueue_x1 (vm, node, next_index,
1156 to_next, n_left_to_next,
1160 vlib_put_next_frame (vm, node, next_index, n_left_to_next);
1163 // Increment any remaining batched stats
1164 if (stats_n_packets > 0)
1166 vlib_increment_combined_counter
1167 (vnm->interface_main.combined_sw_if_counters
1168 + VNET_INTERFACE_COUNTER_RX,
1169 thread_index, stats_sw_if_index, stats_n_packets, stats_n_bytes);
1170 node->runtime_data[0] = stats_sw_if_index;
1174 static_always_inline void
1175 eth_input_enqueue_untagged (vlib_main_t * vm, vlib_node_runtime_t * node,
1176 eth_input_data_t * d, int ip4_cksum_ok, int is_l3)
1178 ethernet_main_t *em = ðernet_main;
1183 if (d->n_bufs_by_etype[id])
1187 next_index = em->l3_next.input_next_ip4;
1188 if (next_index == ETHERNET_INPUT_NEXT_IP4_INPUT && ip4_cksum_ok)
1189 next_index = ETHERNET_INPUT_NEXT_IP4_INPUT_NCS;
1192 next_index = em->l2_next;
1194 vlib_buffer_enqueue_to_single_next (vm, node, d->bufs_by_etype[id],
1195 next_index, d->n_bufs_by_etype[id]);
1199 if (d->n_bufs_by_etype[id])
1201 next_index = is_l3 ? em->l3_next.input_next_ip6 : em->l2_next;
1202 vlib_buffer_enqueue_to_single_next (vm, node, d->bufs_by_etype[id],
1203 next_index, d->n_bufs_by_etype[id]);
1207 if (d->n_bufs_by_etype[id])
1209 next_index = is_l3 ? em->l3_next.input_next_mpls : em->l2_next;
1210 vlib_buffer_enqueue_to_single_next (vm, node, d->bufs_by_etype[id],
1211 next_index, d->n_bufs_by_etype[id]);
1214 id = ETYPE_ID_UNKNOWN;
1215 if (d->n_bufs_by_etype[id])
1217 /* in case of l3 interfaces, we already advanced buffer so we need to
1220 eth_input_advance_and_flags (vm, d->bufs_by_etype[id],
1221 d->n_bufs_by_etype[id],
1222 -(i16) sizeof (ethernet_header_t),
1223 ~VNET_BUFFER_F_L3_HDR_OFFSET_VALID, 0);
1224 ethernet_input_inline (vm, node, d->bufs_by_etype[id],
1225 d->n_bufs_by_etype[id],
1226 ETHERNET_INPUT_VARIANT_ETHERNET);
1230 VLIB_NODE_FN (ethernet_input_node) (vlib_main_t * vm,
1231 vlib_node_runtime_t * node,
1232 vlib_frame_t * frame)
1234 vnet_main_t *vnm = vnet_get_main ();
1235 ethernet_main_t *em = ðernet_main;
1236 u32 *from = vlib_frame_vector_args (frame);
1237 u32 n_packets = frame->n_vectors;
1239 ethernet_input_trace (vm, node, frame);
1241 if (frame->flags & ETH_INPUT_FRAME_F_SINGLE_SW_IF_IDX)
1243 eth_input_data_t data, *d = &data;
1244 ethernet_input_frame_t *ef = vlib_frame_scalar_args (frame);
1245 vnet_hw_interface_t *hi = vnet_get_hw_interface (vnm, ef->hw_if_index);
1246 main_intf_t *intf0 = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1247 subint_config_t *subint0 = &intf0->untagged_subint;
1248 int ip4_cksum_ok = (frame->flags & ETH_INPUT_FRAME_F_IP4_CKSUM_OK) != 0;
1250 if (subint0->flags & SUBINT_CONFIG_L2)
1252 /* untagged packets are treated as L2 */
1253 eth_input_process_frame (vm, from, d->etypes, n_packets, 0);
1254 eth_input_sort (vm, from, n_packets, d);
1255 eth_input_enqueue_untagged (vm, node, d, ip4_cksum_ok, 0);
1259 ethernet_interface_t *ei;
1260 ei = pool_elt_at_index (em->interfaces, hi->hw_instance);
1262 /* currently only slowpath deals with dmac check */
1263 if (ei->flags & ETHERNET_INTERFACE_FLAG_ACCEPT_ALL)
1266 /* untagged packets are treated as L3 */
1267 eth_input_process_frame (vm, from, d->etypes, n_packets, 1);
1268 eth_input_sort (vm, from, n_packets, d);
1269 eth_input_enqueue_untagged (vm, node, d, ip4_cksum_ok, 1);
1275 ethernet_input_inline (vm, node, from, n_packets,
1276 ETHERNET_INPUT_VARIANT_ETHERNET);
1280 VLIB_NODE_FN (ethernet_input_type_node) (vlib_main_t * vm,
1281 vlib_node_runtime_t * node,
1282 vlib_frame_t * from_frame)
1284 u32 *from = vlib_frame_vector_args (from_frame);
1285 u32 n_packets = from_frame->n_vectors;
1286 ethernet_input_trace (vm, node, from_frame);
1287 ethernet_input_inline (vm, node, from, n_packets,
1288 ETHERNET_INPUT_VARIANT_ETHERNET_TYPE);
1292 VLIB_NODE_FN (ethernet_input_not_l2_node) (vlib_main_t * vm,
1293 vlib_node_runtime_t * node,
1294 vlib_frame_t * from_frame)
1296 u32 *from = vlib_frame_vector_args (from_frame);
1297 u32 n_packets = from_frame->n_vectors;
1298 ethernet_input_trace (vm, node, from_frame);
1299 ethernet_input_inline (vm, node, from, n_packets,
1300 ETHERNET_INPUT_VARIANT_NOT_L2);
1305 // Return the subinterface config struct for the given sw_if_index
1306 // Also return via parameter the appropriate match flags for the
1307 // configured number of tags.
1308 // On error (unsupported or not ethernet) return 0.
1309 static subint_config_t *
1310 ethernet_sw_interface_get_config (vnet_main_t * vnm,
1312 u32 * flags, u32 * unsupported)
1314 ethernet_main_t *em = ðernet_main;
1315 vnet_hw_interface_t *hi;
1316 vnet_sw_interface_t *si;
1317 main_intf_t *main_intf;
1318 vlan_table_t *vlan_table;
1319 qinq_table_t *qinq_table;
1320 subint_config_t *subint = 0;
1322 hi = vnet_get_sup_hw_interface (vnm, sw_if_index);
1324 if (!hi || (hi->hw_class_index != ethernet_hw_interface_class.index))
1327 goto done; // non-ethernet interface
1330 // ensure there's an entry for the main intf (shouldn't really be necessary)
1331 vec_validate (em->main_intfs, hi->hw_if_index);
1332 main_intf = vec_elt_at_index (em->main_intfs, hi->hw_if_index);
1334 // Locate the subint for the given ethernet config
1335 si = vnet_get_sw_interface (vnm, sw_if_index);
1337 if (si->type == VNET_SW_INTERFACE_TYPE_P2P)
1339 p2p_ethernet_main_t *p2pm = &p2p_main;
1340 u32 p2pe_sw_if_index =
1341 p2p_ethernet_lookup (hi->hw_if_index, si->p2p.client_mac);
1342 if (p2pe_sw_if_index == ~0)
1344 pool_get (p2pm->p2p_subif_pool, subint);
1345 si->p2p.pool_index = subint - p2pm->p2p_subif_pool;
1348 subint = vec_elt_at_index (p2pm->p2p_subif_pool, si->p2p.pool_index);
1349 *flags = SUBINT_CONFIG_P2P;
1351 else if (si->type == VNET_SW_INTERFACE_TYPE_PIPE)
1355 pipe = pipe_get (sw_if_index);
1356 subint = &pipe->subint;
1357 *flags = SUBINT_CONFIG_P2P;
1359 else if (si->sub.eth.flags.default_sub)
1361 subint = &main_intf->default_subint;
1362 *flags = SUBINT_CONFIG_MATCH_1_TAG |
1363 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
1365 else if ((si->sub.eth.flags.no_tags) || (si->sub.eth.raw_flags == 0))
1367 // if no flags are set then this is a main interface
1368 // so treat as untagged
1369 subint = &main_intf->untagged_subint;
1370 *flags = SUBINT_CONFIG_MATCH_0_TAG;
1375 // first get the vlan table
1376 if (si->sub.eth.flags.dot1ad)
1378 if (main_intf->dot1ad_vlans == 0)
1380 // Allocate a vlan table from the pool
1381 pool_get (em->vlan_pool, vlan_table);
1382 main_intf->dot1ad_vlans = vlan_table - em->vlan_pool;
1386 // Get ptr to existing vlan table
1388 vec_elt_at_index (em->vlan_pool, main_intf->dot1ad_vlans);
1393 if (main_intf->dot1q_vlans == 0)
1395 // Allocate a vlan table from the pool
1396 pool_get (em->vlan_pool, vlan_table);
1397 main_intf->dot1q_vlans = vlan_table - em->vlan_pool;
1401 // Get ptr to existing vlan table
1403 vec_elt_at_index (em->vlan_pool, main_intf->dot1q_vlans);
1407 if (si->sub.eth.flags.one_tag)
1409 *flags = si->sub.eth.flags.exact_match ?
1410 SUBINT_CONFIG_MATCH_1_TAG :
1411 (SUBINT_CONFIG_MATCH_1_TAG |
1412 SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1414 if (si->sub.eth.flags.outer_vlan_id_any)
1416 // not implemented yet
1422 // a single vlan, a common case
1424 &vlan_table->vlans[si->sub.eth.
1425 outer_vlan_id].single_tag_subint;
1432 *flags = si->sub.eth.flags.exact_match ?
1433 SUBINT_CONFIG_MATCH_2_TAG :
1434 (SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG);
1436 if (si->sub.eth.flags.outer_vlan_id_any
1437 && si->sub.eth.flags.inner_vlan_id_any)
1439 // not implemented yet
1444 if (si->sub.eth.flags.inner_vlan_id_any)
1446 // a specific outer and "any" inner
1447 // don't need a qinq table for this
1449 &vlan_table->vlans[si->sub.eth.
1450 outer_vlan_id].inner_any_subint;
1451 if (si->sub.eth.flags.exact_match)
1453 *flags = SUBINT_CONFIG_MATCH_2_TAG;
1457 *flags = SUBINT_CONFIG_MATCH_2_TAG |
1458 SUBINT_CONFIG_MATCH_3_TAG;
1463 // a specific outer + specifc innner vlan id, a common case
1465 // get the qinq table
1466 if (vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs == 0)
1468 // Allocate a qinq table from the pool
1469 pool_get (em->qinq_pool, qinq_table);
1470 vlan_table->vlans[si->sub.eth.outer_vlan_id].qinqs =
1471 qinq_table - em->qinq_pool;
1475 // Get ptr to existing qinq table
1477 vec_elt_at_index (em->qinq_pool,
1478 vlan_table->vlans[si->sub.
1482 subint = &qinq_table->vlans[si->sub.eth.inner_vlan_id].subint;
1491 static clib_error_t *
1492 ethernet_sw_interface_up_down (vnet_main_t * vnm, u32 sw_if_index, u32 flags)
1494 subint_config_t *subint;
1497 clib_error_t *error = 0;
1499 // Find the config for this subinterface
1501 ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
1506 // not implemented yet or not ethernet
1510 subint->sw_if_index =
1511 ((flags & VNET_SW_INTERFACE_FLAG_ADMIN_UP) ? sw_if_index : ~0);
1517 VNET_SW_INTERFACE_ADMIN_UP_DOWN_FUNCTION (ethernet_sw_interface_up_down);
1520 #ifndef CLIB_MARCH_VARIANT
1521 // Set the L2/L3 mode for the subinterface
1523 ethernet_sw_interface_set_l2_mode (vnet_main_t * vnm, u32 sw_if_index, u32 l2)
1525 subint_config_t *subint;
1529 vnet_sw_interface_t *sw = vnet_get_sw_interface (vnm, sw_if_index);
1531 is_port = !(sw->type == VNET_SW_INTERFACE_TYPE_SUB);
1533 // Find the config for this subinterface
1535 ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
1540 // unimplemented or not ethernet
1544 // Double check that the config we found is for our interface (or the interface is down)
1545 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
1549 subint->flags |= SUBINT_CONFIG_L2;
1552 SUBINT_CONFIG_MATCH_0_TAG | SUBINT_CONFIG_MATCH_1_TAG
1553 | SUBINT_CONFIG_MATCH_2_TAG | SUBINT_CONFIG_MATCH_3_TAG;
1557 subint->flags &= ~SUBINT_CONFIG_L2;
1560 ~(SUBINT_CONFIG_MATCH_1_TAG | SUBINT_CONFIG_MATCH_2_TAG
1561 | SUBINT_CONFIG_MATCH_3_TAG);
1569 * Set the L2/L3 mode for the subinterface regardless of port
1572 ethernet_sw_interface_set_l2_mode_noport (vnet_main_t * vnm,
1573 u32 sw_if_index, u32 l2)
1575 subint_config_t *subint;
1579 /* Find the config for this subinterface */
1581 ethernet_sw_interface_get_config (vnm, sw_if_index, &dummy_flags,
1586 /* unimplemented or not ethernet */
1591 * Double check that the config we found is for our interface (or the
1592 * interface is down)
1594 ASSERT ((subint->sw_if_index == sw_if_index) | (subint->sw_if_index == ~0));
1598 subint->flags |= SUBINT_CONFIG_L2;
1602 subint->flags &= ~SUBINT_CONFIG_L2;
1610 static clib_error_t *
1611 ethernet_sw_interface_add_del (vnet_main_t * vnm,
1612 u32 sw_if_index, u32 is_create)
1614 clib_error_t *error = 0;
1615 subint_config_t *subint;
1617 u32 unsupported = 0;
1619 // Find the config for this subinterface
1621 ethernet_sw_interface_get_config (vnm, sw_if_index, &match_flags,
1626 // not implemented yet or not ethernet
1629 // this is the NYI case
1630 error = clib_error_return (0, "not implemented yet");
1641 // Initialize the subint
1642 if (subint->flags & SUBINT_CONFIG_VALID)
1644 // Error vlan already in use
1645 error = clib_error_return (0, "vlan is already in use");
1649 // Note that config is L3 by default
1650 subint->flags = SUBINT_CONFIG_VALID | match_flags;
1651 subint->sw_if_index = ~0; // because interfaces are initially down
1658 VNET_SW_INTERFACE_ADD_DEL_FUNCTION (ethernet_sw_interface_add_del);
1660 static char *ethernet_error_strings[] = {
1661 #define ethernet_error(n,c,s) s,
1662 #include "error.def"
1663 #undef ethernet_error
1667 VLIB_REGISTER_NODE (ethernet_input_node) = {
1668 .name = "ethernet-input",
1669 /* Takes a vector of packets. */
1670 .vector_size = sizeof (u32),
1671 .scalar_size = sizeof (ethernet_input_frame_t),
1672 .n_errors = ETHERNET_N_ERROR,
1673 .error_strings = ethernet_error_strings,
1674 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
1676 #define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
1677 foreach_ethernet_input_next
1680 .format_buffer = format_ethernet_header_with_length,
1681 .format_trace = format_ethernet_input_trace,
1682 .unformat_buffer = unformat_ethernet_header,
1685 VLIB_REGISTER_NODE (ethernet_input_type_node) = {
1686 .name = "ethernet-input-type",
1687 /* Takes a vector of packets. */
1688 .vector_size = sizeof (u32),
1689 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
1691 #define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
1692 foreach_ethernet_input_next
1697 VLIB_REGISTER_NODE (ethernet_input_not_l2_node) = {
1698 .name = "ethernet-input-not-l2",
1699 /* Takes a vector of packets. */
1700 .vector_size = sizeof (u32),
1701 .n_next_nodes = ETHERNET_INPUT_N_NEXT,
1703 #define _(s,n) [ETHERNET_INPUT_NEXT_##s] = n,
1704 foreach_ethernet_input_next
1710 #ifndef CLIB_MARCH_VARIANT
1712 ethernet_set_rx_redirect (vnet_main_t * vnm,
1713 vnet_hw_interface_t * hi, u32 enable)
1715 // Insure all packets go to ethernet-input (i.e. untagged ipv4 packets
1716 // don't go directly to ip4-input)
1717 vnet_hw_interface_rx_redirect_to_node
1718 (vnm, hi->hw_if_index, enable ? ethernet_input_node.index : ~0);
1723 * Initialization and registration for the next_by_ethernet structure
1727 next_by_ethertype_init (next_by_ethertype_t * l3_next)
1729 l3_next->input_next_by_type = sparse_vec_new
1730 ( /* elt bytes */ sizeof (l3_next->input_next_by_type[0]),
1731 /* bits in index */ BITS (((ethernet_header_t *) 0)->type));
1733 vec_validate (l3_next->sparse_index_by_input_next_index,
1734 ETHERNET_INPUT_NEXT_DROP);
1735 vec_validate (l3_next->sparse_index_by_input_next_index,
1736 ETHERNET_INPUT_NEXT_PUNT);
1737 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_DROP] =
1738 SPARSE_VEC_INVALID_INDEX;
1739 l3_next->sparse_index_by_input_next_index[ETHERNET_INPUT_NEXT_PUNT] =
1740 SPARSE_VEC_INVALID_INDEX;
1743 * Make sure we don't wipe out an ethernet registration by mistake
1744 * Can happen if init function ordering constraints are missing.
1748 ethernet_main_t *em = ðernet_main;
1749 ASSERT (em->next_by_ethertype_register_called == 0);
1755 // Add an ethertype -> next index mapping to the structure
1757 next_by_ethertype_register (next_by_ethertype_t * l3_next,
1758 u32 ethertype, u32 next_index)
1762 ethernet_main_t *em = ðernet_main;
1766 ethernet_main_t *em = ðernet_main;
1767 em->next_by_ethertype_register_called = 1;
1770 /* Setup ethernet type -> next index sparse vector mapping. */
1771 n = sparse_vec_validate (l3_next->input_next_by_type, ethertype);
1774 /* Rebuild next index -> sparse index inverse mapping when sparse vector
1776 vec_validate (l3_next->sparse_index_by_input_next_index, next_index);
1777 for (i = 1; i < vec_len (l3_next->input_next_by_type); i++)
1779 sparse_index_by_input_next_index[l3_next->input_next_by_type[i]] = i;
1781 // do not allow the cached next index's to be updated if L3
1782 // redirect is enabled, as it will have overwritten them
1783 if (!em->redirect_l3)
1785 // Cache common ethertypes directly
1786 if (ethertype == ETHERNET_TYPE_IP4)
1788 l3_next->input_next_ip4 = next_index;
1790 else if (ethertype == ETHERNET_TYPE_IP6)
1792 l3_next->input_next_ip6 = next_index;
1794 else if (ethertype == ETHERNET_TYPE_MPLS)
1796 l3_next->input_next_mpls = next_index;
1803 static clib_error_t *
1804 ethernet_input_init (vlib_main_t * vm)
1806 ethernet_main_t *em = ðernet_main;
1807 __attribute__ ((unused)) vlan_table_t *invalid_vlan_table;
1808 __attribute__ ((unused)) qinq_table_t *invalid_qinq_table;
1810 ethernet_setup_node (vm, ethernet_input_node.index);
1811 ethernet_setup_node (vm, ethernet_input_type_node.index);
1812 ethernet_setup_node (vm, ethernet_input_not_l2_node.index);
1814 next_by_ethertype_init (&em->l3_next);
1816 // Initialize pools and vector for vlan parsing
1817 vec_validate (em->main_intfs, 10); // 10 main interfaces
1818 pool_alloc (em->vlan_pool, 10);
1819 pool_alloc (em->qinq_pool, 1);
1821 // The first vlan pool will always be reserved for an invalid table
1822 pool_get (em->vlan_pool, invalid_vlan_table); // first id = 0
1823 // The first qinq pool will always be reserved for an invalid table
1824 pool_get (em->qinq_pool, invalid_qinq_table); // first id = 0
1829 VLIB_INIT_FUNCTION (ethernet_input_init);
1832 ethernet_register_input_type (vlib_main_t * vm,
1833 ethernet_type_t type, u32 node_index)
1835 ethernet_main_t *em = ðernet_main;
1836 ethernet_type_info_t *ti;
1840 clib_error_t *error = vlib_call_init_function (vm, ethernet_init);
1842 clib_error_report (error);
1845 ti = ethernet_get_type_info (em, type);
1846 ti->node_index = node_index;
1847 ti->next_index = vlib_node_add_next (vm,
1848 ethernet_input_node.index, node_index);
1849 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
1850 ASSERT (i == ti->next_index);
1852 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
1853 ASSERT (i == ti->next_index);
1855 // Add the L3 node for this ethertype to the next nodes structure
1856 next_by_ethertype_register (&em->l3_next, type, ti->next_index);
1858 // Call the registration functions for other nodes that want a mapping
1859 l2bvi_register_input_type (vm, type, node_index);
1863 ethernet_register_l2_input (vlib_main_t * vm, u32 node_index)
1865 ethernet_main_t *em = ðernet_main;
1869 vlib_node_add_next (vm, ethernet_input_node.index, node_index);
1872 * Even if we never use these arcs, we have to align the next indices...
1874 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
1876 ASSERT (i == em->l2_next);
1878 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
1879 ASSERT (i == em->l2_next);
1882 // Register a next node for L3 redirect, and enable L3 redirect
1884 ethernet_register_l3_redirect (vlib_main_t * vm, u32 node_index)
1886 ethernet_main_t *em = ðernet_main;
1889 em->redirect_l3 = 1;
1890 em->redirect_l3_next = vlib_node_add_next (vm,
1891 ethernet_input_node.index,
1894 * Change the cached next nodes to the redirect node
1896 em->l3_next.input_next_ip4 = em->redirect_l3_next;
1897 em->l3_next.input_next_ip6 = em->redirect_l3_next;
1898 em->l3_next.input_next_mpls = em->redirect_l3_next;
1901 * Even if we never use these arcs, we have to align the next indices...
1903 i = vlib_node_add_next (vm, ethernet_input_type_node.index, node_index);
1905 ASSERT (i == em->redirect_l3_next);
1907 i = vlib_node_add_next (vm, ethernet_input_not_l2_node.index, node_index);
1909 ASSERT (i == em->redirect_l3_next);
1914 * fd.io coding-style-patch-verification: ON
1917 * eval: (c-set-style "gnu")